xref: /openbsd-src/sys/dev/pci/drm/i915/intel_uncore.h (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
17f4dd379Sjsg /*
27f4dd379Sjsg  * Copyright © 2017 Intel Corporation
37f4dd379Sjsg  *
47f4dd379Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
57f4dd379Sjsg  * copy of this software and associated documentation files (the "Software"),
67f4dd379Sjsg  * to deal in the Software without restriction, including without limitation
77f4dd379Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
87f4dd379Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
97f4dd379Sjsg  * Software is furnished to do so, subject to the following conditions:
107f4dd379Sjsg  *
117f4dd379Sjsg  * The above copyright notice and this permission notice (including the next
127f4dd379Sjsg  * paragraph) shall be included in all copies or substantial portions of the
137f4dd379Sjsg  * Software.
147f4dd379Sjsg  *
157f4dd379Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
167f4dd379Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
177f4dd379Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
187f4dd379Sjsg  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
197f4dd379Sjsg  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
207f4dd379Sjsg  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
217f4dd379Sjsg  * IN THE SOFTWARE.
227f4dd379Sjsg  *
237f4dd379Sjsg  */
247f4dd379Sjsg 
257f4dd379Sjsg #ifndef __INTEL_UNCORE_H__
267f4dd379Sjsg #define __INTEL_UNCORE_H__
277f4dd379Sjsg 
287f4dd379Sjsg #include <linux/spinlock.h>
297f4dd379Sjsg #include <linux/notifier.h>
307f4dd379Sjsg #include <linux/hrtimer.h>
31c349dbc7Sjsg #include <linux/io-64-nonatomic-lo-hi.h>
321bb76ff1Sjsg #include <linux/types.h>
337f4dd379Sjsg 
341bb76ff1Sjsg #include "i915_reg_defs.h"
357f4dd379Sjsg 
361bb76ff1Sjsg struct drm_device;
377f4dd379Sjsg struct drm_i915_private;
38c349dbc7Sjsg struct intel_runtime_pm;
39c349dbc7Sjsg struct intel_uncore;
40ad8b1aafSjsg struct intel_gt;
41c349dbc7Sjsg 
42c349dbc7Sjsg struct intel_uncore_mmio_debug {
43c349dbc7Sjsg 	spinlock_t lock; /** lock is also taken in irq contexts. */
44c349dbc7Sjsg 	int unclaimed_mmio_check;
45c349dbc7Sjsg 	int saved_mmio_check;
46c349dbc7Sjsg 	u32 suspend_count;
47c349dbc7Sjsg };
487f4dd379Sjsg 
497f4dd379Sjsg enum forcewake_domain_id {
507f4dd379Sjsg 	FW_DOMAIN_ID_RENDER = 0,
515ca02815Sjsg 	FW_DOMAIN_ID_GT,        /* also includes blitter engine */
527f4dd379Sjsg 	FW_DOMAIN_ID_MEDIA,
537f4dd379Sjsg 	FW_DOMAIN_ID_MEDIA_VDBOX0,
547f4dd379Sjsg 	FW_DOMAIN_ID_MEDIA_VDBOX1,
557f4dd379Sjsg 	FW_DOMAIN_ID_MEDIA_VDBOX2,
567f4dd379Sjsg 	FW_DOMAIN_ID_MEDIA_VDBOX3,
575ca02815Sjsg 	FW_DOMAIN_ID_MEDIA_VDBOX4,
585ca02815Sjsg 	FW_DOMAIN_ID_MEDIA_VDBOX5,
595ca02815Sjsg 	FW_DOMAIN_ID_MEDIA_VDBOX6,
605ca02815Sjsg 	FW_DOMAIN_ID_MEDIA_VDBOX7,
617f4dd379Sjsg 	FW_DOMAIN_ID_MEDIA_VEBOX0,
627f4dd379Sjsg 	FW_DOMAIN_ID_MEDIA_VEBOX1,
635ca02815Sjsg 	FW_DOMAIN_ID_MEDIA_VEBOX2,
645ca02815Sjsg 	FW_DOMAIN_ID_MEDIA_VEBOX3,
65*f005ef32Sjsg 	FW_DOMAIN_ID_GSC,
667f4dd379Sjsg 
677f4dd379Sjsg 	FW_DOMAIN_ID_COUNT
687f4dd379Sjsg };
697f4dd379Sjsg 
707f4dd379Sjsg enum forcewake_domains {
717f4dd379Sjsg 	FORCEWAKE_RENDER	= BIT(FW_DOMAIN_ID_RENDER),
725ca02815Sjsg 	FORCEWAKE_GT		= BIT(FW_DOMAIN_ID_GT),
737f4dd379Sjsg 	FORCEWAKE_MEDIA		= BIT(FW_DOMAIN_ID_MEDIA),
747f4dd379Sjsg 	FORCEWAKE_MEDIA_VDBOX0	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX0),
757f4dd379Sjsg 	FORCEWAKE_MEDIA_VDBOX1	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
767f4dd379Sjsg 	FORCEWAKE_MEDIA_VDBOX2	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX2),
777f4dd379Sjsg 	FORCEWAKE_MEDIA_VDBOX3	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX3),
785ca02815Sjsg 	FORCEWAKE_MEDIA_VDBOX4	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX4),
795ca02815Sjsg 	FORCEWAKE_MEDIA_VDBOX5	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX5),
805ca02815Sjsg 	FORCEWAKE_MEDIA_VDBOX6	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX6),
815ca02815Sjsg 	FORCEWAKE_MEDIA_VDBOX7	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX7),
827f4dd379Sjsg 	FORCEWAKE_MEDIA_VEBOX0	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX0),
837f4dd379Sjsg 	FORCEWAKE_MEDIA_VEBOX1	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX1),
845ca02815Sjsg 	FORCEWAKE_MEDIA_VEBOX2	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX2),
855ca02815Sjsg 	FORCEWAKE_MEDIA_VEBOX3	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX3),
86*f005ef32Sjsg 	FORCEWAKE_GSC		= BIT(FW_DOMAIN_ID_GSC),
877f4dd379Sjsg 
885ca02815Sjsg 	FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1,
897f4dd379Sjsg };
907f4dd379Sjsg 
911bb76ff1Sjsg struct intel_uncore_fw_get {
92c349dbc7Sjsg 	void (*force_wake_get)(struct intel_uncore *uncore,
937f4dd379Sjsg 			       enum forcewake_domains domains);
941bb76ff1Sjsg };
957f4dd379Sjsg 
961bb76ff1Sjsg struct intel_uncore_funcs {
97c349dbc7Sjsg 	enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
98c349dbc7Sjsg 						  i915_reg_t r);
99c349dbc7Sjsg 	enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
100c349dbc7Sjsg 						   i915_reg_t r);
101c349dbc7Sjsg 
102c349dbc7Sjsg 	u8 (*mmio_readb)(struct intel_uncore *uncore,
1037f4dd379Sjsg 			 i915_reg_t r, bool trace);
104c349dbc7Sjsg 	u16 (*mmio_readw)(struct intel_uncore *uncore,
1057f4dd379Sjsg 			  i915_reg_t r, bool trace);
106c349dbc7Sjsg 	u32 (*mmio_readl)(struct intel_uncore *uncore,
1077f4dd379Sjsg 			  i915_reg_t r, bool trace);
108c349dbc7Sjsg 	u64 (*mmio_readq)(struct intel_uncore *uncore,
1097f4dd379Sjsg 			  i915_reg_t r, bool trace);
1107f4dd379Sjsg 
111c349dbc7Sjsg 	void (*mmio_writeb)(struct intel_uncore *uncore,
1127f4dd379Sjsg 			    i915_reg_t r, u8 val, bool trace);
113c349dbc7Sjsg 	void (*mmio_writew)(struct intel_uncore *uncore,
1147f4dd379Sjsg 			    i915_reg_t r, u16 val, bool trace);
115c349dbc7Sjsg 	void (*mmio_writel)(struct intel_uncore *uncore,
1167f4dd379Sjsg 			    i915_reg_t r, u32 val, bool trace);
1177f4dd379Sjsg };
1187f4dd379Sjsg 
1197f4dd379Sjsg struct intel_forcewake_range {
1207f4dd379Sjsg 	u32 start;
1217f4dd379Sjsg 	u32 end;
1227f4dd379Sjsg 
1237f4dd379Sjsg 	enum forcewake_domains domains;
1247f4dd379Sjsg };
1257f4dd379Sjsg 
1261bb76ff1Sjsg /* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
1271bb76ff1Sjsg struct i915_range {
1281bb76ff1Sjsg 	u32 start;
1291bb76ff1Sjsg 	u32 end;
1301bb76ff1Sjsg };
1311bb76ff1Sjsg 
1327f4dd379Sjsg struct intel_uncore {
133c349dbc7Sjsg 	void __iomem *regs;
134c349dbc7Sjsg 
135c349dbc7Sjsg 	struct drm_i915_private *i915;
1361bb76ff1Sjsg 	struct intel_gt *gt;
137c349dbc7Sjsg 	struct intel_runtime_pm *rpm;
138c349dbc7Sjsg 
1397f4dd379Sjsg 	spinlock_t lock; /** lock is also taken in irq contexts. */
1407f4dd379Sjsg 
1411bb76ff1Sjsg 	/*
1421bb76ff1Sjsg 	 * Do we need to apply an additional offset to reach the beginning
1431bb76ff1Sjsg 	 * of the basic non-engine GT registers (referred to as "GSI" on
1441bb76ff1Sjsg 	 * newer platforms, or "GT block" on older platforms)?  If so, we'll
1451bb76ff1Sjsg 	 * track that here and apply it transparently to registers in the
1461bb76ff1Sjsg 	 * appropriate range to maintain compatibility with our existing
1471bb76ff1Sjsg 	 * register definitions and GT code.
1481bb76ff1Sjsg 	 */
1491bb76ff1Sjsg 	u32 gsi_offset;
1501bb76ff1Sjsg 
151c349dbc7Sjsg 	unsigned int flags;
152c349dbc7Sjsg #define UNCORE_HAS_FORCEWAKE		BIT(0)
153c349dbc7Sjsg #define UNCORE_HAS_FPGA_DBG_UNCLAIMED	BIT(1)
154c349dbc7Sjsg #define UNCORE_HAS_DBG_UNCLAIMED	BIT(2)
155c349dbc7Sjsg #define UNCORE_HAS_FIFO			BIT(3)
156*f005ef32Sjsg #define UNCORE_NEEDS_FLR_ON_FINI	BIT(4)
157c349dbc7Sjsg 
1587f4dd379Sjsg 	const struct intel_forcewake_range *fw_domains_table;
1597f4dd379Sjsg 	unsigned int fw_domains_table_entries;
1607f4dd379Sjsg 
1611bb76ff1Sjsg 	/*
1621bb76ff1Sjsg 	 * Shadowed registers are special cases where we can safely write
1631bb76ff1Sjsg 	 * to the register *without* grabbing forcewake.
1641bb76ff1Sjsg 	 */
1651bb76ff1Sjsg 	const struct i915_range *shadowed_reg_table;
1661bb76ff1Sjsg 	unsigned int shadowed_reg_table_entries;
1671bb76ff1Sjsg 
1687f4dd379Sjsg 	struct notifier_block pmic_bus_access_nb;
1691bb76ff1Sjsg 	const struct intel_uncore_fw_get *fw_get_funcs;
1707f4dd379Sjsg 	struct intel_uncore_funcs funcs;
1717f4dd379Sjsg 
1727f4dd379Sjsg 	unsigned int fifo_count;
1737f4dd379Sjsg 
1747f4dd379Sjsg 	enum forcewake_domains fw_domains;
1757f4dd379Sjsg 	enum forcewake_domains fw_domains_active;
176c349dbc7Sjsg 	enum forcewake_domains fw_domains_timer;
1777f4dd379Sjsg 	enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
1787f4dd379Sjsg 
1797f4dd379Sjsg 	struct intel_uncore_forcewake_domain {
180c349dbc7Sjsg 		struct intel_uncore *uncore;
1817f4dd379Sjsg 		enum forcewake_domain_id id;
1827f4dd379Sjsg 		enum forcewake_domains mask;
1837f4dd379Sjsg 		unsigned int wake_count;
1847f4dd379Sjsg 		bool active;
1857f4dd379Sjsg 		struct timeout timer;
186c349dbc7Sjsg 		u32 __iomem *reg_set;
187c349dbc7Sjsg 		u32 __iomem *reg_ack;
188c349dbc7Sjsg 	} *fw_domain[FW_DOMAIN_ID_COUNT];
1897f4dd379Sjsg 
190c349dbc7Sjsg 	unsigned int user_forcewake_count;
1917f4dd379Sjsg 
192c349dbc7Sjsg 	struct intel_uncore_mmio_debug *debug;
1937f4dd379Sjsg };
1947f4dd379Sjsg 
1957f4dd379Sjsg /* Iterate over initialised fw domains */
196c349dbc7Sjsg #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
197c349dbc7Sjsg 	for (tmp__ = (mask__); tmp__ ;) \
198c349dbc7Sjsg 		for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
1997f4dd379Sjsg 
200c349dbc7Sjsg #define for_each_fw_domain(domain__, uncore__, tmp__) \
201c349dbc7Sjsg 	for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
2027f4dd379Sjsg 
203c349dbc7Sjsg static inline bool
intel_uncore_has_forcewake(const struct intel_uncore * uncore)204c349dbc7Sjsg intel_uncore_has_forcewake(const struct intel_uncore *uncore)
205c349dbc7Sjsg {
206c349dbc7Sjsg 	return uncore->flags & UNCORE_HAS_FORCEWAKE;
207c349dbc7Sjsg }
2087f4dd379Sjsg 
209c349dbc7Sjsg static inline bool
intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore * uncore)210c349dbc7Sjsg intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
211c349dbc7Sjsg {
212c349dbc7Sjsg 	return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
213c349dbc7Sjsg }
2147f4dd379Sjsg 
215c349dbc7Sjsg static inline bool
intel_uncore_has_dbg_unclaimed(const struct intel_uncore * uncore)216c349dbc7Sjsg intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
217c349dbc7Sjsg {
218c349dbc7Sjsg 	return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
219c349dbc7Sjsg }
220c349dbc7Sjsg 
221c349dbc7Sjsg static inline bool
intel_uncore_has_fifo(const struct intel_uncore * uncore)222c349dbc7Sjsg intel_uncore_has_fifo(const struct intel_uncore *uncore)
223c349dbc7Sjsg {
224c349dbc7Sjsg 	return uncore->flags & UNCORE_HAS_FIFO;
225c349dbc7Sjsg }
226c349dbc7Sjsg 
227*f005ef32Sjsg static inline bool
intel_uncore_needs_flr_on_fini(const struct intel_uncore * uncore)228*f005ef32Sjsg intel_uncore_needs_flr_on_fini(const struct intel_uncore *uncore)
229*f005ef32Sjsg {
230*f005ef32Sjsg 	return uncore->flags & UNCORE_NEEDS_FLR_ON_FINI;
231*f005ef32Sjsg }
232*f005ef32Sjsg 
233*f005ef32Sjsg static inline bool
intel_uncore_set_flr_on_fini(struct intel_uncore * uncore)234*f005ef32Sjsg intel_uncore_set_flr_on_fini(struct intel_uncore *uncore)
235*f005ef32Sjsg {
236*f005ef32Sjsg 	return uncore->flags |= UNCORE_NEEDS_FLR_ON_FINI;
237*f005ef32Sjsg }
238*f005ef32Sjsg 
2391bb76ff1Sjsg void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915);
240c349dbc7Sjsg void intel_uncore_init_early(struct intel_uncore *uncore,
2411bb76ff1Sjsg 			     struct intel_gt *gt);
2421bb76ff1Sjsg int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
243c349dbc7Sjsg int intel_uncore_init_mmio(struct intel_uncore *uncore);
244ad8b1aafSjsg void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
245ad8b1aafSjsg 					  struct intel_gt *gt);
246c349dbc7Sjsg bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
247c349dbc7Sjsg bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
2481bb76ff1Sjsg void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
2491bb76ff1Sjsg void intel_uncore_fini_mmio(struct drm_device *dev, void *data);
250c349dbc7Sjsg void intel_uncore_suspend(struct intel_uncore *uncore);
251c349dbc7Sjsg void intel_uncore_resume_early(struct intel_uncore *uncore);
252c349dbc7Sjsg void intel_uncore_runtime_resume(struct intel_uncore *uncore);
253c349dbc7Sjsg 
254c349dbc7Sjsg void assert_forcewakes_inactive(struct intel_uncore *uncore);
255c349dbc7Sjsg void assert_forcewakes_active(struct intel_uncore *uncore,
2567f4dd379Sjsg 			      enum forcewake_domains fw_domains);
2577f4dd379Sjsg const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
2587f4dd379Sjsg 
2597f4dd379Sjsg enum forcewake_domains
260c349dbc7Sjsg intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2617f4dd379Sjsg 			       i915_reg_t reg, unsigned int op);
2627f4dd379Sjsg #define FW_REG_READ  (1)
2637f4dd379Sjsg #define FW_REG_WRITE (2)
2647f4dd379Sjsg 
265c349dbc7Sjsg void intel_uncore_forcewake_get(struct intel_uncore *uncore,
2667f4dd379Sjsg 				enum forcewake_domains domains);
267c349dbc7Sjsg void intel_uncore_forcewake_put(struct intel_uncore *uncore,
2687f4dd379Sjsg 				enum forcewake_domains domains);
2691fd8e27eSjsg void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
2701fd8e27eSjsg 					enum forcewake_domains domains);
271ad8b1aafSjsg void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
272ad8b1aafSjsg 				  enum forcewake_domains fw_domains);
273ad8b1aafSjsg 
274ad8b1aafSjsg /*
275ad8b1aafSjsg  * Like above but the caller must manage the uncore.lock itself.
2765ca02815Sjsg  * Must be used with intel_uncore_read_fw() and friends.
2777f4dd379Sjsg  */
278c349dbc7Sjsg void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
2797f4dd379Sjsg 					enum forcewake_domains domains);
280c349dbc7Sjsg void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
2817f4dd379Sjsg 					enum forcewake_domains domains);
2827f4dd379Sjsg 
283c349dbc7Sjsg void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
284c349dbc7Sjsg void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
2857f4dd379Sjsg 
286c349dbc7Sjsg int __intel_wait_for_register(struct intel_uncore *uncore,
2877f4dd379Sjsg 			      i915_reg_t reg,
2887f4dd379Sjsg 			      u32 mask,
2897f4dd379Sjsg 			      u32 value,
2907f4dd379Sjsg 			      unsigned int fast_timeout_us,
2917f4dd379Sjsg 			      unsigned int slow_timeout_ms,
2927f4dd379Sjsg 			      u32 *out_value);
293c349dbc7Sjsg static inline int
intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int timeout_ms)294c349dbc7Sjsg intel_wait_for_register(struct intel_uncore *uncore,
2957f4dd379Sjsg 			i915_reg_t reg,
2967f4dd379Sjsg 			u32 mask,
2977f4dd379Sjsg 			u32 value,
2987f4dd379Sjsg 			unsigned int timeout_ms)
2997f4dd379Sjsg {
300c349dbc7Sjsg 	return __intel_wait_for_register(uncore, reg, mask, value, 2,
3017f4dd379Sjsg 					 timeout_ms, NULL);
3027f4dd379Sjsg }
303c349dbc7Sjsg 
304c349dbc7Sjsg int __intel_wait_for_register_fw(struct intel_uncore *uncore,
3057f4dd379Sjsg 				 i915_reg_t reg,
3067f4dd379Sjsg 				 u32 mask,
3077f4dd379Sjsg 				 u32 value,
3087f4dd379Sjsg 				 unsigned int fast_timeout_us,
3097f4dd379Sjsg 				 unsigned int slow_timeout_ms,
3107f4dd379Sjsg 				 u32 *out_value);
311c349dbc7Sjsg static inline int
intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int timeout_ms)312c349dbc7Sjsg intel_wait_for_register_fw(struct intel_uncore *uncore,
3137f4dd379Sjsg 			   i915_reg_t reg,
3147f4dd379Sjsg 			   u32 mask,
3157f4dd379Sjsg 			   u32 value,
3167f4dd379Sjsg 			       unsigned int timeout_ms)
3177f4dd379Sjsg {
318c349dbc7Sjsg 	return __intel_wait_for_register_fw(uncore, reg, mask, value,
3197f4dd379Sjsg 					    2, timeout_ms, NULL);
3207f4dd379Sjsg }
3217f4dd379Sjsg 
3221bb76ff1Sjsg #define IS_GSI_REG(reg) ((reg) < 0x40000)
3231bb76ff1Sjsg 
324c349dbc7Sjsg /* register access functions */
325c349dbc7Sjsg #define __raw_read(x__, s__) \
326c349dbc7Sjsg static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
327c349dbc7Sjsg 					    i915_reg_t reg) \
328c349dbc7Sjsg { \
3291bb76ff1Sjsg 	u32 offset = i915_mmio_reg_offset(reg); \
3301bb76ff1Sjsg 	if (IS_GSI_REG(offset)) \
3311bb76ff1Sjsg 		offset += uncore->gsi_offset; \
3321bb76ff1Sjsg 	return read##s__(uncore->regs + offset); \
333c349dbc7Sjsg }
334c349dbc7Sjsg 
335c349dbc7Sjsg #define __raw_write(x__, s__) \
336c349dbc7Sjsg static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
337c349dbc7Sjsg 					   i915_reg_t reg, u##x__ val) \
338c349dbc7Sjsg { \
3391bb76ff1Sjsg 	u32 offset = i915_mmio_reg_offset(reg); \
3401bb76ff1Sjsg 	if (IS_GSI_REG(offset)) \
3411bb76ff1Sjsg 		offset += uncore->gsi_offset; \
3421bb76ff1Sjsg 	write##s__(val, uncore->regs + offset); \
343c349dbc7Sjsg }
344c349dbc7Sjsg __raw_read(8, b)
345c349dbc7Sjsg __raw_read(16, w)
346c349dbc7Sjsg __raw_read(32, l)
347c349dbc7Sjsg __raw_read(64, q)
348c349dbc7Sjsg 
349c349dbc7Sjsg __raw_write(8, b)
350c349dbc7Sjsg __raw_write(16, w)
351c349dbc7Sjsg __raw_write(32, l)
352c349dbc7Sjsg __raw_write(64, q)
353c349dbc7Sjsg 
354c349dbc7Sjsg #undef __raw_read
355c349dbc7Sjsg #undef __raw_write
356c349dbc7Sjsg 
357c349dbc7Sjsg #define __uncore_read(name__, x__, s__, trace__) \
358c349dbc7Sjsg static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
359c349dbc7Sjsg 					   i915_reg_t reg) \
360c349dbc7Sjsg { \
361c349dbc7Sjsg 	return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
362c349dbc7Sjsg }
363c349dbc7Sjsg 
364c349dbc7Sjsg #define __uncore_write(name__, x__, s__, trace__) \
365c349dbc7Sjsg static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
366c349dbc7Sjsg 					 i915_reg_t reg, u##x__ val) \
367c349dbc7Sjsg { \
368c349dbc7Sjsg 	uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
369c349dbc7Sjsg }
370c349dbc7Sjsg 
371c349dbc7Sjsg __uncore_read(read8, 8, b, true)
372c349dbc7Sjsg __uncore_read(read16, 16, w, true)
373c349dbc7Sjsg __uncore_read(read, 32, l, true)
374c349dbc7Sjsg __uncore_read(read16_notrace, 16, w, false)
375c349dbc7Sjsg __uncore_read(read_notrace, 32, l, false)
376c349dbc7Sjsg 
377c349dbc7Sjsg __uncore_write(write8, 8, b, true)
378c349dbc7Sjsg __uncore_write(write16, 16, w, true)
379c349dbc7Sjsg __uncore_write(write, 32, l, true)
380c349dbc7Sjsg __uncore_write(write_notrace, 32, l, false)
381c349dbc7Sjsg 
382c349dbc7Sjsg /* Be very careful with read/write 64-bit values. On 32-bit machines, they
383c349dbc7Sjsg  * will be implemented using 2 32-bit writes in an arbitrary order with
384c349dbc7Sjsg  * an arbitrary delay between them. This can cause the hardware to
385c349dbc7Sjsg  * act upon the intermediate value, possibly leading to corruption and
3865ca02815Sjsg  * machine death. For this reason we do not support intel_uncore_write64,
3875ca02815Sjsg  * or uncore->funcs.mmio_writeq.
388c349dbc7Sjsg  *
389c349dbc7Sjsg  * When reading a 64-bit value as two 32-bit values, the delay may cause
390c349dbc7Sjsg  * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
391c349dbc7Sjsg  * occasionally a 64-bit register does not actually support a full readq
392c349dbc7Sjsg  * and must be read using two 32-bit reads.
393c349dbc7Sjsg  *
394c349dbc7Sjsg  * You have been warned.
395c349dbc7Sjsg  */
396c349dbc7Sjsg __uncore_read(read64, 64, q, true)
397c349dbc7Sjsg 
398c349dbc7Sjsg #define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
399c349dbc7Sjsg #define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
400c349dbc7Sjsg 
401c349dbc7Sjsg #undef __uncore_read
402c349dbc7Sjsg #undef __uncore_write
403c349dbc7Sjsg 
404c349dbc7Sjsg /* These are untraced mmio-accessors that are only valid to be used inside
405c349dbc7Sjsg  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
406c349dbc7Sjsg  * controlled.
407c349dbc7Sjsg  *
408c349dbc7Sjsg  * Think twice, and think again, before using these.
409c349dbc7Sjsg  *
410c349dbc7Sjsg  * As an example, these accessors can possibly be used between:
411c349dbc7Sjsg  *
412c349dbc7Sjsg  * spin_lock_irq(&uncore->lock);
413c349dbc7Sjsg  * intel_uncore_forcewake_get__locked();
414c349dbc7Sjsg  *
415c349dbc7Sjsg  * and
416c349dbc7Sjsg  *
417c349dbc7Sjsg  * intel_uncore_forcewake_put__locked();
418c349dbc7Sjsg  * spin_unlock_irq(&uncore->lock);
419c349dbc7Sjsg  *
420c349dbc7Sjsg  *
421c349dbc7Sjsg  * Note: some registers may not need forcewake held, so
422c349dbc7Sjsg  * intel_uncore_forcewake_{get,put} can be omitted, see
423c349dbc7Sjsg  * intel_uncore_forcewake_for_reg().
424c349dbc7Sjsg  *
425c349dbc7Sjsg  * Certain architectures will die if the same cacheline is concurrently accessed
426c349dbc7Sjsg  * by different clients (e.g. on Ivybridge). Access to registers should
427c349dbc7Sjsg  * therefore generally be serialised, by either the dev_priv->uncore.lock or
428c349dbc7Sjsg  * a more localised lock guarding all access to that bank of registers.
429c349dbc7Sjsg  */
430c349dbc7Sjsg #define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
431c349dbc7Sjsg #define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
432c349dbc7Sjsg #define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
433c349dbc7Sjsg #define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
434c349dbc7Sjsg 
intel_uncore_rmw(struct intel_uncore * uncore,i915_reg_t reg,u32 clear,u32 set)435*f005ef32Sjsg static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
436c349dbc7Sjsg 				   i915_reg_t reg, u32 clear, u32 set)
437c349dbc7Sjsg {
438c349dbc7Sjsg 	u32 old, val;
439c349dbc7Sjsg 
440c349dbc7Sjsg 	old = intel_uncore_read(uncore, reg);
441c349dbc7Sjsg 	val = (old & ~clear) | set;
442c349dbc7Sjsg 	intel_uncore_write(uncore, reg, val);
443*f005ef32Sjsg 	return old;
444c349dbc7Sjsg }
445c349dbc7Sjsg 
intel_uncore_rmw_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 clear,u32 set)446c349dbc7Sjsg static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
447c349dbc7Sjsg 				       i915_reg_t reg, u32 clear, u32 set)
448c349dbc7Sjsg {
449c349dbc7Sjsg 	u32 old, val;
450c349dbc7Sjsg 
451c349dbc7Sjsg 	old = intel_uncore_read_fw(uncore, reg);
452c349dbc7Sjsg 	val = (old & ~clear) | set;
453c349dbc7Sjsg 	if (val != old)
454c349dbc7Sjsg 		intel_uncore_write_fw(uncore, reg, val);
455c349dbc7Sjsg }
456c349dbc7Sjsg 
457*f005ef32Sjsg static inline u64
intel_uncore_read64_2x32(struct intel_uncore * uncore,i915_reg_t lower_reg,i915_reg_t upper_reg)458*f005ef32Sjsg intel_uncore_read64_2x32(struct intel_uncore *uncore,
459*f005ef32Sjsg 			 i915_reg_t lower_reg, i915_reg_t upper_reg)
460*f005ef32Sjsg {
461*f005ef32Sjsg 	u32 upper, lower, old_upper, loop = 0;
462*f005ef32Sjsg 	enum forcewake_domains fw_domains;
463*f005ef32Sjsg 	unsigned long flags;
464*f005ef32Sjsg 
465*f005ef32Sjsg 	fw_domains = intel_uncore_forcewake_for_reg(uncore, lower_reg,
466*f005ef32Sjsg 						    FW_REG_READ);
467*f005ef32Sjsg 
468*f005ef32Sjsg 	fw_domains |= intel_uncore_forcewake_for_reg(uncore, upper_reg,
469*f005ef32Sjsg 						    FW_REG_READ);
470*f005ef32Sjsg 
471*f005ef32Sjsg 	spin_lock_irqsave(&uncore->lock, flags);
472*f005ef32Sjsg 	intel_uncore_forcewake_get__locked(uncore, fw_domains);
473*f005ef32Sjsg 
474*f005ef32Sjsg 	upper = intel_uncore_read_fw(uncore, upper_reg);
475*f005ef32Sjsg 	do {
476*f005ef32Sjsg 		old_upper = upper;
477*f005ef32Sjsg 		lower = intel_uncore_read_fw(uncore, lower_reg);
478*f005ef32Sjsg 		upper = intel_uncore_read_fw(uncore, upper_reg);
479*f005ef32Sjsg 	} while (upper != old_upper && loop++ < 2);
480*f005ef32Sjsg 
481*f005ef32Sjsg 	intel_uncore_forcewake_put__locked(uncore, fw_domains);
482*f005ef32Sjsg 	spin_unlock_irqrestore(&uncore->lock, flags);
483*f005ef32Sjsg 
484*f005ef32Sjsg 	return (u64)upper << 32 | lower;
485*f005ef32Sjsg }
486*f005ef32Sjsg 
intel_uncore_write_and_verify(struct intel_uncore * uncore,i915_reg_t reg,u32 val,u32 mask,u32 expected_val)487c349dbc7Sjsg static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
488c349dbc7Sjsg 						i915_reg_t reg, u32 val,
489c349dbc7Sjsg 						u32 mask, u32 expected_val)
490c349dbc7Sjsg {
491c349dbc7Sjsg 	u32 reg_val;
492c349dbc7Sjsg 
493c349dbc7Sjsg 	intel_uncore_write(uncore, reg, val);
494c349dbc7Sjsg 	reg_val = intel_uncore_read(uncore, reg);
495c349dbc7Sjsg 
496c349dbc7Sjsg 	return (reg_val & mask) != expected_val ? -EINVAL : 0;
497c349dbc7Sjsg }
498c349dbc7Sjsg 
intel_uncore_regs(struct intel_uncore * uncore)499*f005ef32Sjsg static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
500*f005ef32Sjsg {
501*f005ef32Sjsg 	return uncore->regs;
502*f005ef32Sjsg }
503*f005ef32Sjsg 
5041bb76ff1Sjsg /*
5051bb76ff1Sjsg  * The raw_reg_{read,write} macros are intended as a micro-optimization for
5061bb76ff1Sjsg  * interrupt handlers so that the pointer indirection on uncore->regs can
5071bb76ff1Sjsg  * be computed once (and presumably cached in a register) instead of generating
5081bb76ff1Sjsg  * extra load instructions for each MMIO access.
5091bb76ff1Sjsg  *
5101bb76ff1Sjsg  * Given that these macros are only intended for non-GSI interrupt registers
5111bb76ff1Sjsg  * (and the goal is to avoid extra instructions generated by the compiler),
5121bb76ff1Sjsg  * these macros do not account for uncore->gsi_offset.  Any caller that needs
5131bb76ff1Sjsg  * to use these macros on a GSI register is responsible for adding the
5141bb76ff1Sjsg  * appropriate GSI offset to the 'base' parameter.
5151bb76ff1Sjsg  */
5167f4dd379Sjsg #define raw_reg_read(base, reg) \
5177f4dd379Sjsg 	readl(base + i915_mmio_reg_offset(reg))
5187f4dd379Sjsg #define raw_reg_write(base, reg, value) \
5197f4dd379Sjsg 	writel(value, base + i915_mmio_reg_offset(reg))
5207f4dd379Sjsg 
5217f4dd379Sjsg #endif /* !__INTEL_UNCORE_H__ */
522