GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / soc / qcom / rpmh.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/atomic.h>
7 #include <linux/bug.h>
8 #include <linux/interrupt.h>
9 #include <linux/jiffies.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 #include <linux/wait.h>
19
20 #include <soc/qcom/rpmh.h>
21
22 #include "rpmh-internal.h"
23
24 #define RPMH_TIMEOUT_MS                 msecs_to_jiffies(10000)
25
26 #define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name)        \
27         struct rpmh_request name = {                    \
28                 .msg = {                                \
29                         .state = s,                     \
30                         .cmds = name.cmd,               \
31                         .num_cmds = 0,                  \
32                         .wait_for_compl = true,         \
33                 },                                      \
34                 .cmd = { { 0 } },                       \
35                 .completion = q,                        \
36                 .dev = dev,                             \
37                 .needs_free = false,                            \
38         }
39
40 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
41
42 /**
43  * struct cache_req: the request object for caching
44  *
45  * @addr: the address of the resource
46  * @sleep_val: the sleep vote
47  * @wake_val: the wake vote
48  * @list: linked list obj
49  */
50 struct cache_req {
51         u32 addr;
52         u32 sleep_val;
53         u32 wake_val;
54         struct list_head list;
55 };
56
57 /**
58  * struct batch_cache_req - An entry in our batch catch
59  *
60  * @list: linked list obj
61  * @count: number of messages
62  * @rpm_msgs: the messages
63  */
64
65 struct batch_cache_req {
66         struct list_head list;
67         int count;
68         struct rpmh_request rpm_msgs[];
69 };
70
71 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
72 {
73         struct rsc_drv *drv = dev_get_drvdata(dev->parent);
74
75         return &drv->client;
76 }
77
78 void rpmh_tx_done(const struct tcs_request *msg, int r)
79 {
80         struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
81                                                     msg);
82         struct completion *compl = rpm_msg->completion;
83         bool free = rpm_msg->needs_free;
84
85         rpm_msg->err = r;
86
87         if (r)
88                 dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
89                         rpm_msg->msg.cmds[0].addr, r);
90
91         if (!compl)
92                 goto exit;
93
94         /* Signal the blocking thread we are done */
95         complete(compl);
96
97 exit:
98         if (free)
99                 kfree(rpm_msg);
100 }
101
102 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
103 {
104         struct cache_req *p, *req = NULL;
105
106         list_for_each_entry(p, &ctrlr->cache, list) {
107                 if (p->addr == addr) {
108                         req = p;
109                         break;
110                 }
111         }
112
113         return req;
114 }
115
116 static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
117                                            enum rpmh_state state,
118                                            struct tcs_cmd *cmd)
119 {
120         struct cache_req *req;
121         unsigned long flags;
122         u32 old_sleep_val, old_wake_val;
123
124         spin_lock_irqsave(&ctrlr->cache_lock, flags);
125         req = __find_req(ctrlr, cmd->addr);
126         if (req)
127                 goto existing;
128
129         req = kzalloc(sizeof(*req), GFP_ATOMIC);
130         if (!req) {
131                 req = ERR_PTR(-ENOMEM);
132                 goto unlock;
133         }
134
135         req->addr = cmd->addr;
136         req->sleep_val = req->wake_val = UINT_MAX;
137         list_add_tail(&req->list, &ctrlr->cache);
138
139 existing:
140         old_sleep_val = req->sleep_val;
141         old_wake_val = req->wake_val;
142
143         switch (state) {
144         case RPMH_ACTIVE_ONLY_STATE:
145         case RPMH_WAKE_ONLY_STATE:
146                 req->wake_val = cmd->data;
147                 break;
148         case RPMH_SLEEP_STATE:
149                 req->sleep_val = cmd->data;
150                 break;
151         }
152
153         ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
154                          req->wake_val != old_wake_val) &&
155                          req->sleep_val != UINT_MAX &&
156                          req->wake_val != UINT_MAX;
157
158 unlock:
159         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
160
161         return req;
162 }
163
164 /**
165  * __rpmh_write: Cache and send the RPMH request
166  *
167  * @dev: The device making the request
168  * @state: Active/Sleep request type
169  * @rpm_msg: The data that needs to be sent (cmds).
170  *
171  * Cache the RPMH request and send if the state is ACTIVE_ONLY.
172  * SLEEP/WAKE_ONLY requests are not sent to the controller at
173  * this time. Use rpmh_flush() to send them to the controller.
174  */
175 static int __rpmh_write(const struct device *dev, enum rpmh_state state,
176                         struct rpmh_request *rpm_msg)
177 {
178         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
179         int ret = -EINVAL;
180         struct cache_req *req;
181         int i;
182
183         rpm_msg->msg.state = state;
184
185         /* Cache the request in our store and link the payload */
186         for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
187                 req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
188                 if (IS_ERR(req))
189                         return PTR_ERR(req);
190         }
191
192         rpm_msg->msg.state = state;
193
194         if (state == RPMH_ACTIVE_ONLY_STATE) {
195                 WARN_ON(irqs_disabled());
196                 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
197         } else {
198                 ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
199                                 &rpm_msg->msg);
200                 /* Clean up our call by spoofing tx_done */
201                 rpmh_tx_done(&rpm_msg->msg, ret);
202         }
203
204         return ret;
205 }
206
207 static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
208                 const struct tcs_cmd *cmd, u32 n)
209 {
210         if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
211                 return -EINVAL;
212
213         memcpy(req->cmd, cmd, n * sizeof(*cmd));
214
215         req->msg.state = state;
216         req->msg.cmds = req->cmd;
217         req->msg.num_cmds = n;
218
219         return 0;
220 }
221
222 /**
223  * rpmh_write_async: Write a set of RPMH commands
224  *
225  * @dev: The device making the request
226  * @state: Active/sleep set
227  * @cmd: The payload data
228  * @n: The number of elements in payload
229  *
230  * Write a set of RPMH commands, the order of commands is maintained
231  * and will be sent as a single shot.
232  */
233 int rpmh_write_async(const struct device *dev, enum rpmh_state state,
234                      const struct tcs_cmd *cmd, u32 n)
235 {
236         struct rpmh_request *rpm_msg;
237         int ret;
238
239         rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
240         if (!rpm_msg)
241                 return -ENOMEM;
242         rpm_msg->needs_free = true;
243
244         ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
245         if (ret) {
246                 kfree(rpm_msg);
247                 return ret;
248         }
249
250         return __rpmh_write(dev, state, rpm_msg);
251 }
252 EXPORT_SYMBOL(rpmh_write_async);
253
254 /**
255  * rpmh_write: Write a set of RPMH commands and block until response
256  *
257  * @rc: The RPMH handle got from rpmh_get_client
258  * @state: Active/sleep set
259  * @cmd: The payload data
260  * @n: The number of elements in @cmd
261  *
262  * May sleep. Do not call from atomic contexts.
263  */
264 int rpmh_write(const struct device *dev, enum rpmh_state state,
265                const struct tcs_cmd *cmd, u32 n)
266 {
267         DECLARE_COMPLETION_ONSTACK(compl);
268         DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
269         int ret;
270
271         if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
272                 return -EINVAL;
273
274         memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
275         rpm_msg.msg.num_cmds = n;
276
277         ret = __rpmh_write(dev, state, &rpm_msg);
278         if (ret)
279                 return ret;
280
281         ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
282         WARN_ON(!ret);
283         return (ret > 0) ? 0 : -ETIMEDOUT;
284 }
285 EXPORT_SYMBOL(rpmh_write);
286
287 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
288 {
289         unsigned long flags;
290
291         spin_lock_irqsave(&ctrlr->cache_lock, flags);
292         list_add_tail(&req->list, &ctrlr->batch_cache);
293         ctrlr->dirty = true;
294         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
295 }
296
297 static int flush_batch(struct rpmh_ctrlr *ctrlr)
298 {
299         struct batch_cache_req *req;
300         const struct rpmh_request *rpm_msg;
301         unsigned long flags;
302         int ret = 0;
303         int i;
304
305         /* Send Sleep/Wake requests to the controller, expect no response */
306         spin_lock_irqsave(&ctrlr->cache_lock, flags);
307         list_for_each_entry(req, &ctrlr->batch_cache, list) {
308                 for (i = 0; i < req->count; i++) {
309                         rpm_msg = req->rpm_msgs + i;
310                         ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
311                                                        &rpm_msg->msg);
312                         if (ret)
313                                 break;
314                 }
315         }
316         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
317
318         return ret;
319 }
320
321 /**
322  * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
323  * batch to finish.
324  *
325  * @dev: the device making the request
326  * @state: Active/sleep set
327  * @cmd: The payload data
328  * @n: The array of count of elements in each batch, 0 terminated.
329  *
330  * Write a request to the RSC controller without caching. If the request
331  * state is ACTIVE, then the requests are treated as completion request
332  * and sent to the controller immediately. The function waits until all the
333  * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
334  * request is sent as fire-n-forget and no ack is expected.
335  *
336  * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
337  */
338 int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
339                      const struct tcs_cmd *cmd, u32 *n)
340 {
341         struct batch_cache_req *req;
342         struct rpmh_request *rpm_msgs;
343         struct completion *compls;
344         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
345         unsigned long time_left;
346         int count = 0;
347         int ret, i;
348         void *ptr;
349
350         if (!cmd || !n)
351                 return -EINVAL;
352
353         while (n[count] > 0)
354                 count++;
355         if (!count)
356                 return -EINVAL;
357
358         ptr = kzalloc(sizeof(*req) +
359                       count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
360                       GFP_ATOMIC);
361         if (!ptr)
362                 return -ENOMEM;
363
364         req = ptr;
365         compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
366
367         req->count = count;
368         rpm_msgs = req->rpm_msgs;
369
370         for (i = 0; i < count; i++) {
371                 __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
372                 cmd += n[i];
373         }
374
375         if (state != RPMH_ACTIVE_ONLY_STATE) {
376                 cache_batch(ctrlr, req);
377                 return 0;
378         }
379
380         for (i = 0; i < count; i++) {
381                 struct completion *compl = &compls[i];
382
383                 init_completion(compl);
384                 rpm_msgs[i].completion = compl;
385                 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
386                 if (ret) {
387                         pr_err("Error(%d) sending RPMH message addr=%#x\n",
388                                ret, rpm_msgs[i].msg.cmds[0].addr);
389                         break;
390                 }
391         }
392
393         time_left = RPMH_TIMEOUT_MS;
394         while (i--) {
395                 time_left = wait_for_completion_timeout(&compls[i], time_left);
396                 if (!time_left) {
397                         /*
398                          * Better hope they never finish because they'll signal
399                          * the completion that we're going to free once
400                          * we've returned from this function.
401                          */
402                         WARN_ON(1);
403                         ret = -ETIMEDOUT;
404                         goto exit;
405                 }
406         }
407
408 exit:
409         kfree(ptr);
410
411         return ret;
412 }
413 EXPORT_SYMBOL(rpmh_write_batch);
414
415 static int is_req_valid(struct cache_req *req)
416 {
417         return (req->sleep_val != UINT_MAX &&
418                 req->wake_val != UINT_MAX &&
419                 req->sleep_val != req->wake_val);
420 }
421
422 static int send_single(const struct device *dev, enum rpmh_state state,
423                        u32 addr, u32 data)
424 {
425         DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
426         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
427
428         /* Wake sets are always complete and sleep sets are not */
429         rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
430         rpm_msg.cmd[0].addr = addr;
431         rpm_msg.cmd[0].data = data;
432         rpm_msg.msg.num_cmds = 1;
433
434         return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
435 }
436
437 /**
438  * rpmh_flush: Flushes the buffered active and sleep sets to TCS
439  *
440  * @dev: The device making the request
441  *
442  * Return: -EBUSY if the controller is busy, probably waiting on a response
443  * to a RPMH request sent earlier.
444  *
445  * This function is always called from the sleep code from the last CPU
446  * that is powering down the entire system. Since no other RPMH API would be
447  * executing at this time, it is safe to run lockless.
448  */
449 int rpmh_flush(const struct device *dev)
450 {
451         struct cache_req *p;
452         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
453         int ret;
454
455         if (!ctrlr->dirty) {
456                 pr_debug("Skipping flush, TCS has latest data.\n");
457                 return 0;
458         }
459
460         /* Invalidate the TCSes first to avoid stale data */
461         do {
462                 ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
463         } while (ret == -EAGAIN);
464         if (ret)
465                 return ret;
466
467         /* First flush the cached batch requests */
468         ret = flush_batch(ctrlr);
469         if (ret)
470                 return ret;
471
472         /*
473          * Nobody else should be calling this function other than system PM,
474          * hence we can run without locks.
475          */
476         list_for_each_entry(p, &ctrlr->cache, list) {
477                 if (!is_req_valid(p)) {
478                         pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
479                                  __func__, p->addr, p->sleep_val, p->wake_val);
480                         continue;
481                 }
482                 ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
483                 if (ret)
484                         return ret;
485                 ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
486                                   p->addr, p->wake_val);
487                 if (ret)
488                         return ret;
489         }
490
491         ctrlr->dirty = false;
492
493         return 0;
494 }
495 EXPORT_SYMBOL(rpmh_flush);
496
497 /**
498  * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
499  *
500  * @dev: The device making the request
501  *
502  * Invalidate the sleep and wake values in batch_cache.
503  */
504 int rpmh_invalidate(const struct device *dev)
505 {
506         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
507         struct batch_cache_req *req, *tmp;
508         unsigned long flags;
509
510         spin_lock_irqsave(&ctrlr->cache_lock, flags);
511         list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
512                 kfree(req);
513         INIT_LIST_HEAD(&ctrlr->batch_cache);
514         ctrlr->dirty = true;
515         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
516
517         return 0;
518 }
519 EXPORT_SYMBOL(rpmh_invalidate);