1 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
14 #include "dpu_hw_catalog.h"
15 #include "dpu_hw_vbif.h"
18 #define VBIF_VERSION 0x0000
19 #define VBIF_CLK_FORCE_CTRL0 0x0008
20 #define VBIF_CLK_FORCE_CTRL1 0x000C
21 #define VBIF_QOS_REMAP_00 0x0020
22 #define VBIF_QOS_REMAP_01 0x0024
23 #define VBIF_QOS_REMAP_10 0x0028
24 #define VBIF_QOS_REMAP_11 0x002C
25 #define VBIF_WRITE_GATHER_EN 0x00AC
26 #define VBIF_IN_RD_LIM_CONF0 0x00B0
27 #define VBIF_IN_RD_LIM_CONF1 0x00B4
28 #define VBIF_IN_RD_LIM_CONF2 0x00B8
29 #define VBIF_IN_WR_LIM_CONF0 0x00C0
30 #define VBIF_IN_WR_LIM_CONF1 0x00C4
31 #define VBIF_IN_WR_LIM_CONF2 0x00C8
32 #define VBIF_OUT_RD_LIM_CONF0 0x00D0
33 #define VBIF_OUT_WR_LIM_CONF0 0x00D4
34 #define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
35 #define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
36 #define VBIF_XIN_PND_ERR 0x0190
37 #define VBIF_XIN_SRC_ERR 0x0194
38 #define VBIF_XIN_CLR_ERR 0x019C
39 #define VBIF_XIN_HALT_CTRL0 0x0200
40 #define VBIF_XIN_HALT_CTRL1 0x0204
41 #define VBIF_XINL_QOS_RP_REMAP_000 0x0550
42 #define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
44 static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
45 u32 *pnd_errors, u32 *src_errors)
47 struct dpu_hw_blk_reg_map *c;
53 pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
54 src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
61 DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
64 static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
65 u32 xin_id, u32 value)
67 struct dpu_hw_blk_reg_map *c;
73 * Assume 4 bits per bit field, 8 fields per 32-bit register so
74 * 16 bit fields maximum across two registers
76 if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
83 reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
85 reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
87 bit_off = (xin_id & 0x7) * 4;
88 reg_val = DPU_REG_READ(c, reg_off);
89 reg_val &= ~(0x7 << bit_off);
90 reg_val |= (value & 0x7) << bit_off;
91 DPU_REG_WRITE(c, reg_off, reg_val);
94 static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
95 u32 xin_id, bool rd, u32 limit)
97 struct dpu_hw_blk_reg_map *c = &vbif->hw;
103 reg_off = VBIF_IN_RD_LIM_CONF0;
105 reg_off = VBIF_IN_WR_LIM_CONF0;
107 reg_off += (xin_id / 4) * 4;
108 bit_off = (xin_id % 4) * 8;
109 reg_val = DPU_REG_READ(c, reg_off);
110 reg_val &= ~(0xFF << bit_off);
111 reg_val |= (limit) << bit_off;
112 DPU_REG_WRITE(c, reg_off, reg_val);
115 static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
118 struct dpu_hw_blk_reg_map *c = &vbif->hw;
125 reg_off = VBIF_IN_RD_LIM_CONF0;
127 reg_off = VBIF_IN_WR_LIM_CONF0;
129 reg_off += (xin_id / 4) * 4;
130 bit_off = (xin_id % 4) * 8;
131 reg_val = DPU_REG_READ(c, reg_off);
132 limit = (reg_val >> bit_off) & 0xFF;
137 static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
138 u32 xin_id, bool enable)
140 struct dpu_hw_blk_reg_map *c = &vbif->hw;
143 reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
146 reg_val |= BIT(xin_id);
148 reg_val &= ~BIT(xin_id);
150 DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
153 static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
156 struct dpu_hw_blk_reg_map *c = &vbif->hw;
159 reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
161 return (reg_val & BIT(xin_id)) ? true : false;
164 static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
165 u32 xin_id, u32 level, u32 remap_level)
167 struct dpu_hw_blk_reg_map *c;
168 u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
175 reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
176 reg_shift = (xin_id & 0x7) * 4;
178 reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
179 reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
181 mask = 0x7 << reg_shift;
184 reg_val |= (remap_level << reg_shift) & mask;
186 reg_val_lvl &= ~mask;
187 reg_val_lvl |= (remap_level << reg_shift) & mask;
189 DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
190 DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
193 static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
195 struct dpu_hw_blk_reg_map *c;
198 if (!vbif || xin_id >= MAX_XIN_COUNT)
203 reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
204 reg_val |= BIT(xin_id);
205 DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
208 static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
211 ops->set_limit_conf = dpu_hw_set_limit_conf;
212 ops->get_limit_conf = dpu_hw_get_limit_conf;
213 ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
214 ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
215 if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
216 ops->set_qos_remap = dpu_hw_set_qos_remap;
217 ops->set_mem_type = dpu_hw_set_mem_type;
218 ops->clear_errors = dpu_hw_clear_errors;
219 ops->set_write_gather_en = dpu_hw_set_write_gather_en;
222 static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
223 const struct dpu_mdss_cfg *m,
225 struct dpu_hw_blk_reg_map *b)
229 for (i = 0; i < m->vbif_count; i++) {
230 if (vbif == m->vbif[i].id) {
232 b->blk_off = m->vbif[i].base;
233 b->length = m->vbif[i].len;
234 b->hwversion = m->hwversion;
235 b->log_mask = DPU_DBG_MASK_VBIF;
240 return ERR_PTR(-EINVAL);
243 struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
245 const struct dpu_mdss_cfg *m)
247 struct dpu_hw_vbif *c;
248 const struct dpu_vbif_cfg *cfg;
250 c = kzalloc(sizeof(*c), GFP_KERNEL);
252 return ERR_PTR(-ENOMEM);
254 cfg = _top_offset(idx, m, addr, &c->hw);
255 if (IS_ERR_OR_NULL(cfg)) {
257 return ERR_PTR(-EINVAL);
265 _setup_vbif_ops(&c->ops, c->cap->features);
267 /* no need to register sub-range in dpu dbg, dump entire vbif io base */
272 void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)