GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / staging / greybus / svc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SVC Greybus driver.
4  *
5  * Copyright 2015 Google Inc.
6  * Copyright 2015 Linaro Ltd.
7  */
8
9 #include <linux/debugfs.h>
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14 #define SVC_INTF_EJECT_TIMEOUT          9000
15 #define SVC_INTF_ACTIVATE_TIMEOUT       6000
16 #define SVC_INTF_RESUME_TIMEOUT         3000
17
18 struct gb_svc_deferred_request {
19         struct work_struct work;
20         struct gb_operation *operation;
21 };
22
23
24 static int gb_svc_queue_deferred_request(struct gb_operation *operation);
25
26 static ssize_t endo_id_show(struct device *dev,
27                         struct device_attribute *attr, char *buf)
28 {
29         struct gb_svc *svc = to_gb_svc(dev);
30
31         return sprintf(buf, "0x%04x\n", svc->endo_id);
32 }
33 static DEVICE_ATTR_RO(endo_id);
34
35 static ssize_t ap_intf_id_show(struct device *dev,
36                         struct device_attribute *attr, char *buf)
37 {
38         struct gb_svc *svc = to_gb_svc(dev);
39
40         return sprintf(buf, "%u\n", svc->ap_intf_id);
41 }
42 static DEVICE_ATTR_RO(ap_intf_id);
43
44 // FIXME
45 // This is a hack, we need to do this "right" and clean the interface up
46 // properly, not just forcibly yank the thing out of the system and hope for the
47 // best.  But for now, people want their modules to come out without having to
48 // throw the thing to the ground or get out a screwdriver.
49 static ssize_t intf_eject_store(struct device *dev,
50                                 struct device_attribute *attr, const char *buf,
51                                 size_t len)
52 {
53         struct gb_svc *svc = to_gb_svc(dev);
54         unsigned short intf_id;
55         int ret;
56
57         ret = kstrtou16(buf, 10, &intf_id);
58         if (ret < 0)
59                 return ret;
60
61         dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
62
63         ret = gb_svc_intf_eject(svc, intf_id);
64         if (ret < 0)
65                 return ret;
66
67         return len;
68 }
69 static DEVICE_ATTR_WO(intf_eject);
70
71 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
72                              char *buf)
73 {
74         struct gb_svc *svc = to_gb_svc(dev);
75
76         return sprintf(buf, "%s\n",
77                        gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
78 }
79
80 static ssize_t watchdog_store(struct device *dev,
81                               struct device_attribute *attr, const char *buf,
82                               size_t len)
83 {
84         struct gb_svc *svc = to_gb_svc(dev);
85         int retval;
86         bool user_request;
87
88         retval = strtobool(buf, &user_request);
89         if (retval)
90                 return retval;
91
92         if (user_request)
93                 retval = gb_svc_watchdog_enable(svc);
94         else
95                 retval = gb_svc_watchdog_disable(svc);
96         if (retval)
97                 return retval;
98         return len;
99 }
100 static DEVICE_ATTR_RW(watchdog);
101
102 static ssize_t watchdog_action_show(struct device *dev,
103                                     struct device_attribute *attr, char *buf)
104 {
105         struct gb_svc *svc = to_gb_svc(dev);
106
107         if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
108                 return sprintf(buf, "panic\n");
109         else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
110                 return sprintf(buf, "reset\n");
111
112         return -EINVAL;
113 }
114
115 static ssize_t watchdog_action_store(struct device *dev,
116                                      struct device_attribute *attr,
117                                      const char *buf, size_t len)
118 {
119         struct gb_svc *svc = to_gb_svc(dev);
120
121         if (sysfs_streq(buf, "panic"))
122                 svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
123         else if (sysfs_streq(buf, "reset"))
124                 svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
125         else
126                 return -EINVAL;
127
128         return len;
129 }
130 static DEVICE_ATTR_RW(watchdog_action);
131
132 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
133 {
134         struct gb_svc_pwrmon_rail_count_get_response response;
135         int ret;
136
137         ret = gb_operation_sync(svc->connection,
138                                 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
139                                 &response, sizeof(response));
140         if (ret) {
141                 dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
142                 return ret;
143         }
144
145         *value = response.rail_count;
146
147         return 0;
148 }
149
150 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
151                 struct gb_svc_pwrmon_rail_names_get_response *response,
152                 size_t bufsize)
153 {
154         int ret;
155
156         ret = gb_operation_sync(svc->connection,
157                                 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
158                                 response, bufsize);
159         if (ret) {
160                 dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
161                 return ret;
162         }
163
164         if (response->status != GB_SVC_OP_SUCCESS) {
165                 dev_err(&svc->dev,
166                         "SVC error while getting rail names: %u\n",
167                         response->status);
168                 return -EREMOTEIO;
169         }
170
171         return 0;
172 }
173
174 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
175                                     u8 measurement_type, u32 *value)
176 {
177         struct gb_svc_pwrmon_sample_get_request request;
178         struct gb_svc_pwrmon_sample_get_response response;
179         int ret;
180
181         request.rail_id = rail_id;
182         request.measurement_type = measurement_type;
183
184         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
185                                 &request, sizeof(request),
186                                 &response, sizeof(response));
187         if (ret) {
188                 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
189                 return ret;
190         }
191
192         if (response.result) {
193                 dev_err(&svc->dev,
194                         "UniPro error while getting rail power sample (%d %d): %d\n",
195                         rail_id, measurement_type, response.result);
196                 switch (response.result) {
197                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
198                         return -EINVAL;
199                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
200                         return -ENOMSG;
201                 default:
202                         return -EREMOTEIO;
203                 }
204         }
205
206         *value = le32_to_cpu(response.measurement);
207
208         return 0;
209 }
210
211 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
212                                   u8 measurement_type, u32 *value)
213 {
214         struct gb_svc_pwrmon_intf_sample_get_request request;
215         struct gb_svc_pwrmon_intf_sample_get_response response;
216         int ret;
217
218         request.intf_id = intf_id;
219         request.measurement_type = measurement_type;
220
221         ret = gb_operation_sync(svc->connection,
222                                 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
223                                 &request, sizeof(request),
224                                 &response, sizeof(response));
225         if (ret) {
226                 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
227                 return ret;
228         }
229
230         if (response.result) {
231                 dev_err(&svc->dev,
232                         "UniPro error while getting intf power sample (%d %d): %d\n",
233                         intf_id, measurement_type, response.result);
234                 switch (response.result) {
235                 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
236                         return -EINVAL;
237                 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
238                         return -ENOMSG;
239                 default:
240                         return -EREMOTEIO;
241                 }
242         }
243
244         *value = le32_to_cpu(response.measurement);
245
246         return 0;
247 }
248
249 static struct attribute *svc_attrs[] = {
250         &dev_attr_endo_id.attr,
251         &dev_attr_ap_intf_id.attr,
252         &dev_attr_intf_eject.attr,
253         &dev_attr_watchdog.attr,
254         &dev_attr_watchdog_action.attr,
255         NULL,
256 };
257 ATTRIBUTE_GROUPS(svc);
258
259 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
260 {
261         struct gb_svc_intf_device_id_request request;
262
263         request.intf_id = intf_id;
264         request.device_id = device_id;
265
266         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
267                                  &request, sizeof(request), NULL, 0);
268 }
269
270 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
271 {
272         struct gb_svc_intf_eject_request request;
273         int ret;
274
275         request.intf_id = intf_id;
276
277         /*
278          * The pulse width for module release in svc is long so we need to
279          * increase the timeout so the operation will not return to soon.
280          */
281         ret = gb_operation_sync_timeout(svc->connection,
282                                         GB_SVC_TYPE_INTF_EJECT, &request,
283                                         sizeof(request), NULL, 0,
284                                         SVC_INTF_EJECT_TIMEOUT);
285         if (ret) {
286                 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
287                 return ret;
288         }
289
290         return 0;
291 }
292
293 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
294 {
295         struct gb_svc_intf_vsys_request request;
296         struct gb_svc_intf_vsys_response response;
297         int type, ret;
298
299         request.intf_id = intf_id;
300
301         if (enable)
302                 type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
303         else
304                 type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
305
306         ret = gb_operation_sync(svc->connection, type,
307                         &request, sizeof(request),
308                         &response, sizeof(response));
309         if (ret < 0)
310                 return ret;
311         if (response.result_code != GB_SVC_INTF_VSYS_OK)
312                 return -EREMOTEIO;
313         return 0;
314 }
315
316 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
317 {
318         struct gb_svc_intf_refclk_request request;
319         struct gb_svc_intf_refclk_response response;
320         int type, ret;
321
322         request.intf_id = intf_id;
323
324         if (enable)
325                 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
326         else
327                 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
328
329         ret = gb_operation_sync(svc->connection, type,
330                         &request, sizeof(request),
331                         &response, sizeof(response));
332         if (ret < 0)
333                 return ret;
334         if (response.result_code != GB_SVC_INTF_REFCLK_OK)
335                 return -EREMOTEIO;
336         return 0;
337 }
338
339 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
340 {
341         struct gb_svc_intf_unipro_request request;
342         struct gb_svc_intf_unipro_response response;
343         int type, ret;
344
345         request.intf_id = intf_id;
346
347         if (enable)
348                 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
349         else
350                 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
351
352         ret = gb_operation_sync(svc->connection, type,
353                         &request, sizeof(request),
354                         &response, sizeof(response));
355         if (ret < 0)
356                 return ret;
357         if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
358                 return -EREMOTEIO;
359         return 0;
360 }
361
362 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
363 {
364         struct gb_svc_intf_activate_request request;
365         struct gb_svc_intf_activate_response response;
366         int ret;
367
368         request.intf_id = intf_id;
369
370         ret = gb_operation_sync_timeout(svc->connection,
371                         GB_SVC_TYPE_INTF_ACTIVATE,
372                         &request, sizeof(request),
373                         &response, sizeof(response),
374                         SVC_INTF_ACTIVATE_TIMEOUT);
375         if (ret < 0)
376                 return ret;
377         if (response.status != GB_SVC_OP_SUCCESS) {
378                 dev_err(&svc->dev, "failed to activate interface %u: %u\n",
379                                 intf_id, response.status);
380                 return -EREMOTEIO;
381         }
382
383         *intf_type = response.intf_type;
384
385         return 0;
386 }
387
388 int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
389 {
390         struct gb_svc_intf_resume_request request;
391         struct gb_svc_intf_resume_response response;
392         int ret;
393
394         request.intf_id = intf_id;
395
396         ret = gb_operation_sync_timeout(svc->connection,
397                                         GB_SVC_TYPE_INTF_RESUME,
398                                         &request, sizeof(request),
399                                         &response, sizeof(response),
400                                         SVC_INTF_RESUME_TIMEOUT);
401         if (ret < 0) {
402                 dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
403                         intf_id, ret);
404                 return ret;
405         }
406
407         if (response.status != GB_SVC_OP_SUCCESS) {
408                 dev_err(&svc->dev, "failed to resume interface %u: %u\n",
409                         intf_id, response.status);
410                 return -EREMOTEIO;
411         }
412
413         return 0;
414 }
415
416 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
417                         u32 *value)
418 {
419         struct gb_svc_dme_peer_get_request request;
420         struct gb_svc_dme_peer_get_response response;
421         u16 result;
422         int ret;
423
424         request.intf_id = intf_id;
425         request.attr = cpu_to_le16(attr);
426         request.selector = cpu_to_le16(selector);
427
428         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
429                                 &request, sizeof(request),
430                                 &response, sizeof(response));
431         if (ret) {
432                 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
433                                 intf_id, attr, selector, ret);
434                 return ret;
435         }
436
437         result = le16_to_cpu(response.result_code);
438         if (result) {
439                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
440                                 intf_id, attr, selector, result);
441                 return -EREMOTEIO;
442         }
443
444         if (value)
445                 *value = le32_to_cpu(response.attr_value);
446
447         return 0;
448 }
449
450 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
451                         u32 value)
452 {
453         struct gb_svc_dme_peer_set_request request;
454         struct gb_svc_dme_peer_set_response response;
455         u16 result;
456         int ret;
457
458         request.intf_id = intf_id;
459         request.attr = cpu_to_le16(attr);
460         request.selector = cpu_to_le16(selector);
461         request.value = cpu_to_le32(value);
462
463         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
464                                 &request, sizeof(request),
465                                 &response, sizeof(response));
466         if (ret) {
467                 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
468                                 intf_id, attr, selector, value, ret);
469                 return ret;
470         }
471
472         result = le16_to_cpu(response.result_code);
473         if (result) {
474                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
475                                 intf_id, attr, selector, value, result);
476                 return -EREMOTEIO;
477         }
478
479         return 0;
480 }
481
482 int gb_svc_connection_create(struct gb_svc *svc,
483                                 u8 intf1_id, u16 cport1_id,
484                                 u8 intf2_id, u16 cport2_id,
485                                 u8 cport_flags)
486 {
487         struct gb_svc_conn_create_request request;
488
489         request.intf1_id = intf1_id;
490         request.cport1_id = cpu_to_le16(cport1_id);
491         request.intf2_id = intf2_id;
492         request.cport2_id = cpu_to_le16(cport2_id);
493         request.tc = 0;         /* TC0 */
494         request.flags = cport_flags;
495
496         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
497                                  &request, sizeof(request), NULL, 0);
498 }
499
500 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
501                                u8 intf2_id, u16 cport2_id)
502 {
503         struct gb_svc_conn_destroy_request request;
504         struct gb_connection *connection = svc->connection;
505         int ret;
506
507         request.intf1_id = intf1_id;
508         request.cport1_id = cpu_to_le16(cport1_id);
509         request.intf2_id = intf2_id;
510         request.cport2_id = cpu_to_le16(cport2_id);
511
512         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
513                                 &request, sizeof(request), NULL, 0);
514         if (ret) {
515                 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
516                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
517         }
518 }
519
520 /* Creates bi-directional routes between the devices */
521 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
522                                u8 intf2_id, u8 dev2_id)
523 {
524         struct gb_svc_route_create_request request;
525
526         request.intf1_id = intf1_id;
527         request.dev1_id = dev1_id;
528         request.intf2_id = intf2_id;
529         request.dev2_id = dev2_id;
530
531         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
532                                  &request, sizeof(request), NULL, 0);
533 }
534
535 /* Destroys bi-directional routes between the devices */
536 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
537 {
538         struct gb_svc_route_destroy_request request;
539         int ret;
540
541         request.intf1_id = intf1_id;
542         request.intf2_id = intf2_id;
543
544         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
545                                 &request, sizeof(request), NULL, 0);
546         if (ret) {
547                 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
548                                 intf1_id, intf2_id, ret);
549         }
550 }
551
552 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
553                                u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
554                                u8 tx_amplitude, u8 tx_hs_equalizer,
555                                u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
556                                u8 flags, u32 quirks,
557                                struct gb_svc_l2_timer_cfg *local,
558                                struct gb_svc_l2_timer_cfg *remote)
559 {
560         struct gb_svc_intf_set_pwrm_request request;
561         struct gb_svc_intf_set_pwrm_response response;
562         int ret;
563         u16 result_code;
564
565         memset(&request, 0, sizeof(request));
566
567         request.intf_id = intf_id;
568         request.hs_series = hs_series;
569         request.tx_mode = tx_mode;
570         request.tx_gear = tx_gear;
571         request.tx_nlanes = tx_nlanes;
572         request.tx_amplitude = tx_amplitude;
573         request.tx_hs_equalizer = tx_hs_equalizer;
574         request.rx_mode = rx_mode;
575         request.rx_gear = rx_gear;
576         request.rx_nlanes = rx_nlanes;
577         request.flags = flags;
578         request.quirks = cpu_to_le32(quirks);
579         if (local)
580                 request.local_l2timerdata = *local;
581         if (remote)
582                 request.remote_l2timerdata = *remote;
583
584         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
585                                 &request, sizeof(request),
586                                 &response, sizeof(response));
587         if (ret < 0)
588                 return ret;
589
590         result_code = response.result_code;
591         if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
592                 dev_err(&svc->dev, "set power mode = %d\n", result_code);
593                 return -EIO;
594         }
595
596         return 0;
597 }
598 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
599
600 int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
601 {
602         struct gb_svc_intf_set_pwrm_request request;
603         struct gb_svc_intf_set_pwrm_response response;
604         int ret;
605         u16 result_code;
606
607         memset(&request, 0, sizeof(request));
608
609         request.intf_id = intf_id;
610         request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
611         request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
612         request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
613
614         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
615                                 &request, sizeof(request),
616                                 &response, sizeof(response));
617         if (ret < 0) {
618                 dev_err(&svc->dev,
619                         "failed to send set power mode operation to interface %u: %d\n",
620                         intf_id, ret);
621                 return ret;
622         }
623
624         result_code = response.result_code;
625         if (result_code != GB_SVC_SETPWRM_PWR_OK) {
626                 dev_err(&svc->dev,
627                         "failed to hibernate the link for interface %u: %u\n",
628                         intf_id, result_code);
629                 return -EIO;
630         }
631
632         return 0;
633 }
634
635 int gb_svc_ping(struct gb_svc *svc)
636 {
637         return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
638                                          NULL, 0, NULL, 0,
639                                          GB_OPERATION_TIMEOUT_DEFAULT * 2);
640 }
641
642 static int gb_svc_version_request(struct gb_operation *op)
643 {
644         struct gb_connection *connection = op->connection;
645         struct gb_svc *svc = gb_connection_get_data(connection);
646         struct gb_svc_version_request *request;
647         struct gb_svc_version_response *response;
648
649         if (op->request->payload_size < sizeof(*request)) {
650                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
651                                 op->request->payload_size,
652                                 sizeof(*request));
653                 return -EINVAL;
654         }
655
656         request = op->request->payload;
657
658         if (request->major > GB_SVC_VERSION_MAJOR) {
659                 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
660                                 request->major, GB_SVC_VERSION_MAJOR);
661                 return -ENOTSUPP;
662         }
663
664         svc->protocol_major = request->major;
665         svc->protocol_minor = request->minor;
666
667         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
668                 return -ENOMEM;
669
670         response = op->response->payload;
671         response->major = svc->protocol_major;
672         response->minor = svc->protocol_minor;
673
674         return 0;
675 }
676
677 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
678                                         size_t len, loff_t *offset)
679 {
680         struct svc_debugfs_pwrmon_rail *pwrmon_rails =
681                 file_inode(file)->i_private;
682         struct gb_svc *svc = pwrmon_rails->svc;
683         int ret, desc;
684         u32 value;
685         char buff[16];
686
687         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
688                                        GB_SVC_PWRMON_TYPE_VOL, &value);
689         if (ret) {
690                 dev_err(&svc->dev,
691                         "failed to get voltage sample %u: %d\n",
692                         pwrmon_rails->id, ret);
693                 return ret;
694         }
695
696         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
697
698         return simple_read_from_buffer(buf, len, offset, buff, desc);
699 }
700
701 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
702                                         size_t len, loff_t *offset)
703 {
704         struct svc_debugfs_pwrmon_rail *pwrmon_rails =
705                 file_inode(file)->i_private;
706         struct gb_svc *svc = pwrmon_rails->svc;
707         int ret, desc;
708         u32 value;
709         char buff[16];
710
711         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
712                                        GB_SVC_PWRMON_TYPE_CURR, &value);
713         if (ret) {
714                 dev_err(&svc->dev,
715                         "failed to get current sample %u: %d\n",
716                         pwrmon_rails->id, ret);
717                 return ret;
718         }
719
720         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
721
722         return simple_read_from_buffer(buf, len, offset, buff, desc);
723 }
724
725 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
726                                       size_t len, loff_t *offset)
727 {
728         struct svc_debugfs_pwrmon_rail *pwrmon_rails =
729                 file_inode(file)->i_private;
730         struct gb_svc *svc = pwrmon_rails->svc;
731         int ret, desc;
732         u32 value;
733         char buff[16];
734
735         ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
736                                        GB_SVC_PWRMON_TYPE_PWR, &value);
737         if (ret) {
738                 dev_err(&svc->dev, "failed to get power sample %u: %d\n",
739                         pwrmon_rails->id, ret);
740                 return ret;
741         }
742
743         desc = scnprintf(buff, sizeof(buff), "%u\n", value);
744
745         return simple_read_from_buffer(buf, len, offset, buff, desc);
746 }
747
748 static const struct file_operations pwrmon_debugfs_voltage_fops = {
749         .read           = pwr_debugfs_voltage_read,
750 };
751
752 static const struct file_operations pwrmon_debugfs_current_fops = {
753         .read           = pwr_debugfs_current_read,
754 };
755
756 static const struct file_operations pwrmon_debugfs_power_fops = {
757         .read           = pwr_debugfs_power_read,
758 };
759
760 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
761 {
762         int i;
763         size_t bufsize;
764         struct dentry *dent;
765         struct gb_svc_pwrmon_rail_names_get_response *rail_names;
766         u8 rail_count;
767
768         dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
769         if (IS_ERR_OR_NULL(dent))
770                 return;
771
772         if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
773                 goto err_pwrmon_debugfs;
774
775         if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
776                 goto err_pwrmon_debugfs;
777
778         bufsize = sizeof(*rail_names) +
779                 GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
780
781         rail_names = kzalloc(bufsize, GFP_KERNEL);
782         if (!rail_names)
783                 goto err_pwrmon_debugfs;
784
785         svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
786                                     GFP_KERNEL);
787         if (!svc->pwrmon_rails)
788                 goto err_pwrmon_debugfs_free;
789
790         if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
791                 goto err_pwrmon_debugfs_free;
792
793         for (i = 0; i < rail_count; i++) {
794                 struct dentry *dir;
795                 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
796                 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
797
798                 snprintf(fname, sizeof(fname), "%s",
799                          (char *)&rail_names->name[i]);
800
801                 rail->id = i;
802                 rail->svc = svc;
803
804                 dir = debugfs_create_dir(fname, dent);
805                 debugfs_create_file("voltage_now", 0444, dir, rail,
806                                     &pwrmon_debugfs_voltage_fops);
807                 debugfs_create_file("current_now", 0444, dir, rail,
808                                     &pwrmon_debugfs_current_fops);
809                 debugfs_create_file("power_now", 0444, dir, rail,
810                                     &pwrmon_debugfs_power_fops);
811         }
812
813         kfree(rail_names);
814         return;
815
816 err_pwrmon_debugfs_free:
817         kfree(rail_names);
818         kfree(svc->pwrmon_rails);
819         svc->pwrmon_rails = NULL;
820
821 err_pwrmon_debugfs:
822         debugfs_remove(dent);
823 }
824
825 static void gb_svc_debugfs_init(struct gb_svc *svc)
826 {
827         svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
828                                                  gb_debugfs_get());
829         gb_svc_pwrmon_debugfs_init(svc);
830 }
831
832 static void gb_svc_debugfs_exit(struct gb_svc *svc)
833 {
834         debugfs_remove_recursive(svc->debugfs_dentry);
835         kfree(svc->pwrmon_rails);
836         svc->pwrmon_rails = NULL;
837 }
838
839 static int gb_svc_hello(struct gb_operation *op)
840 {
841         struct gb_connection *connection = op->connection;
842         struct gb_svc *svc = gb_connection_get_data(connection);
843         struct gb_svc_hello_request *hello_request;
844         int ret;
845
846         if (op->request->payload_size < sizeof(*hello_request)) {
847                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
848                                 op->request->payload_size,
849                                 sizeof(*hello_request));
850                 return -EINVAL;
851         }
852
853         hello_request = op->request->payload;
854         svc->endo_id = le16_to_cpu(hello_request->endo_id);
855         svc->ap_intf_id = hello_request->interface_id;
856
857         ret = device_add(&svc->dev);
858         if (ret) {
859                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
860                 return ret;
861         }
862
863         ret = gb_svc_watchdog_create(svc);
864         if (ret) {
865                 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
866                 goto err_unregister_device;
867         }
868
869         gb_svc_debugfs_init(svc);
870
871         return gb_svc_queue_deferred_request(op);
872
873 err_unregister_device:
874         gb_svc_watchdog_destroy(svc);
875         device_del(&svc->dev);
876         return ret;
877 }
878
879 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
880                                                         u8 intf_id)
881 {
882         struct gb_host_device *hd = svc->hd;
883         struct gb_module *module;
884         size_t num_interfaces;
885         u8 module_id;
886
887         list_for_each_entry(module, &hd->modules, hd_node) {
888                 module_id = module->module_id;
889                 num_interfaces = module->num_interfaces;
890
891                 if (intf_id >= module_id &&
892                                 intf_id < module_id + num_interfaces) {
893                         return module->interfaces[intf_id - module_id];
894                 }
895         }
896
897         return NULL;
898 }
899
900 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
901 {
902         struct gb_host_device *hd = svc->hd;
903         struct gb_module *module;
904
905         list_for_each_entry(module, &hd->modules, hd_node) {
906                 if (module->module_id == module_id)
907                         return module;
908         }
909
910         return NULL;
911 }
912
913 static void gb_svc_process_hello_deferred(struct gb_operation *operation)
914 {
915         struct gb_connection *connection = operation->connection;
916         struct gb_svc *svc = gb_connection_get_data(connection);
917         int ret;
918
919         /*
920          * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
921          * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
922          * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
923          * module.
924          *
925          * The code should be removed once SW-2217, Heuristic for UniPro
926          * Power Mode Changes is resolved.
927          */
928         ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
929                                          GB_SVC_UNIPRO_HS_SERIES_A,
930                                          GB_SVC_UNIPRO_SLOW_AUTO_MODE,
931                                          2, 1,
932                                          GB_SVC_SMALL_AMPLITUDE,
933                                          GB_SVC_NO_DE_EMPHASIS,
934                                          GB_SVC_UNIPRO_SLOW_AUTO_MODE,
935                                          2, 1,
936                                          0, 0,
937                                          NULL, NULL);
938
939         if (ret)
940                 dev_warn(&svc->dev,
941                         "power mode change failed on AP to switch link: %d\n",
942                         ret);
943 }
944
945 static void gb_svc_process_module_inserted(struct gb_operation *operation)
946 {
947         struct gb_svc_module_inserted_request *request;
948         struct gb_connection *connection = operation->connection;
949         struct gb_svc *svc = gb_connection_get_data(connection);
950         struct gb_host_device *hd = svc->hd;
951         struct gb_module *module;
952         size_t num_interfaces;
953         u8 module_id;
954         u16 flags;
955         int ret;
956
957         /* The request message size has already been verified. */
958         request = operation->request->payload;
959         module_id = request->primary_intf_id;
960         num_interfaces = request->intf_count;
961         flags = le16_to_cpu(request->flags);
962
963         dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
964                         __func__, module_id, num_interfaces, flags);
965
966         if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
967                 dev_warn(&svc->dev, "no primary interface detected on module %u\n",
968                                 module_id);
969         }
970
971         module = gb_svc_module_lookup(svc, module_id);
972         if (module) {
973                 dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
974                                 module_id);
975                 return;
976         }
977
978         module = gb_module_create(hd, module_id, num_interfaces);
979         if (!module) {
980                 dev_err(&svc->dev, "failed to create module\n");
981                 return;
982         }
983
984         ret = gb_module_add(module);
985         if (ret) {
986                 gb_module_put(module);
987                 return;
988         }
989
990         list_add(&module->hd_node, &hd->modules);
991 }
992
993 static void gb_svc_process_module_removed(struct gb_operation *operation)
994 {
995         struct gb_svc_module_removed_request *request;
996         struct gb_connection *connection = operation->connection;
997         struct gb_svc *svc = gb_connection_get_data(connection);
998         struct gb_module *module;
999         u8 module_id;
1000
1001         /* The request message size has already been verified. */
1002         request = operation->request->payload;
1003         module_id = request->primary_intf_id;
1004
1005         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1006
1007         module = gb_svc_module_lookup(svc, module_id);
1008         if (!module) {
1009                 dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1010                                 module_id);
1011                 return;
1012         }
1013
1014         module->disconnected = true;
1015
1016         gb_module_del(module);
1017         list_del(&module->hd_node);
1018         gb_module_put(module);
1019 }
1020
1021 static void gb_svc_process_intf_oops(struct gb_operation *operation)
1022 {
1023         struct gb_svc_intf_oops_request *request;
1024         struct gb_connection *connection = operation->connection;
1025         struct gb_svc *svc = gb_connection_get_data(connection);
1026         struct gb_interface *intf;
1027         u8 intf_id;
1028         u8 reason;
1029
1030         /* The request message size has already been verified. */
1031         request = operation->request->payload;
1032         intf_id = request->intf_id;
1033         reason = request->reason;
1034
1035         intf = gb_svc_interface_lookup(svc, intf_id);
1036         if (!intf) {
1037                 dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
1038                          intf_id);
1039                 return;
1040         }
1041
1042         dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
1043                  intf_id, reason);
1044
1045         mutex_lock(&intf->mutex);
1046         intf->disconnected = true;
1047         gb_interface_disable(intf);
1048         gb_interface_deactivate(intf);
1049         mutex_unlock(&intf->mutex);
1050 }
1051
1052 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1053 {
1054         struct gb_svc_intf_mailbox_event_request *request;
1055         struct gb_connection *connection = operation->connection;
1056         struct gb_svc *svc = gb_connection_get_data(connection);
1057         struct gb_interface *intf;
1058         u8 intf_id;
1059         u16 result_code;
1060         u32 mailbox;
1061
1062         /* The request message size has already been verified. */
1063         request = operation->request->payload;
1064         intf_id = request->intf_id;
1065         result_code = le16_to_cpu(request->result_code);
1066         mailbox = le32_to_cpu(request->mailbox);
1067
1068         dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1069                         __func__, intf_id, result_code, mailbox);
1070
1071         intf = gb_svc_interface_lookup(svc, intf_id);
1072         if (!intf) {
1073                 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1074                 return;
1075         }
1076
1077         gb_interface_mailbox_event(intf, result_code, mailbox);
1078 }
1079
1080 static void gb_svc_process_deferred_request(struct work_struct *work)
1081 {
1082         struct gb_svc_deferred_request *dr;
1083         struct gb_operation *operation;
1084         struct gb_svc *svc;
1085         u8 type;
1086
1087         dr = container_of(work, struct gb_svc_deferred_request, work);
1088         operation = dr->operation;
1089         svc = gb_connection_get_data(operation->connection);
1090         type = operation->request->header->type;
1091
1092         switch (type) {
1093         case GB_SVC_TYPE_SVC_HELLO:
1094                 gb_svc_process_hello_deferred(operation);
1095                 break;
1096         case GB_SVC_TYPE_MODULE_INSERTED:
1097                 gb_svc_process_module_inserted(operation);
1098                 break;
1099         case GB_SVC_TYPE_MODULE_REMOVED:
1100                 gb_svc_process_module_removed(operation);
1101                 break;
1102         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1103                 gb_svc_process_intf_mailbox_event(operation);
1104                 break;
1105         case GB_SVC_TYPE_INTF_OOPS:
1106                 gb_svc_process_intf_oops(operation);
1107                 break;
1108         default:
1109                 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1110         }
1111
1112         gb_operation_put(operation);
1113         kfree(dr);
1114 }
1115
1116 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1117 {
1118         struct gb_svc *svc = gb_connection_get_data(operation->connection);
1119         struct gb_svc_deferred_request *dr;
1120
1121         dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1122         if (!dr)
1123                 return -ENOMEM;
1124
1125         gb_operation_get(operation);
1126
1127         dr->operation = operation;
1128         INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1129
1130         queue_work(svc->wq, &dr->work);
1131
1132         return 0;
1133 }
1134
1135 static int gb_svc_intf_reset_recv(struct gb_operation *op)
1136 {
1137         struct gb_svc *svc = gb_connection_get_data(op->connection);
1138         struct gb_message *request = op->request;
1139         struct gb_svc_intf_reset_request *reset;
1140
1141         if (request->payload_size < sizeof(*reset)) {
1142                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1143                                 request->payload_size, sizeof(*reset));
1144                 return -EINVAL;
1145         }
1146         reset = request->payload;
1147
1148         /* FIXME Reset the interface here */
1149
1150         return 0;
1151 }
1152
1153 static int gb_svc_module_inserted_recv(struct gb_operation *op)
1154 {
1155         struct gb_svc *svc = gb_connection_get_data(op->connection);
1156         struct gb_svc_module_inserted_request *request;
1157
1158         if (op->request->payload_size < sizeof(*request)) {
1159                 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1160                                 op->request->payload_size, sizeof(*request));
1161                 return -EINVAL;
1162         }
1163
1164         request = op->request->payload;
1165
1166         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1167                         request->primary_intf_id);
1168
1169         return gb_svc_queue_deferred_request(op);
1170 }
1171
1172 static int gb_svc_module_removed_recv(struct gb_operation *op)
1173 {
1174         struct gb_svc *svc = gb_connection_get_data(op->connection);
1175         struct gb_svc_module_removed_request *request;
1176
1177         if (op->request->payload_size < sizeof(*request)) {
1178                 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1179                                 op->request->payload_size, sizeof(*request));
1180                 return -EINVAL;
1181         }
1182
1183         request = op->request->payload;
1184
1185         dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1186                         request->primary_intf_id);
1187
1188         return gb_svc_queue_deferred_request(op);
1189 }
1190
1191 static int gb_svc_intf_oops_recv(struct gb_operation *op)
1192 {
1193         struct gb_svc *svc = gb_connection_get_data(op->connection);
1194         struct gb_svc_intf_oops_request *request;
1195
1196         if (op->request->payload_size < sizeof(*request)) {
1197                 dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
1198                          op->request->payload_size, sizeof(*request));
1199                 return -EINVAL;
1200         }
1201
1202         return gb_svc_queue_deferred_request(op);
1203 }
1204
1205 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1206 {
1207         struct gb_svc *svc = gb_connection_get_data(op->connection);
1208         struct gb_svc_intf_mailbox_event_request *request;
1209
1210         if (op->request->payload_size < sizeof(*request)) {
1211                 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1212                                 op->request->payload_size, sizeof(*request));
1213                 return -EINVAL;
1214         }
1215
1216         request = op->request->payload;
1217
1218         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1219
1220         return gb_svc_queue_deferred_request(op);
1221 }
1222
1223 static int gb_svc_request_handler(struct gb_operation *op)
1224 {
1225         struct gb_connection *connection = op->connection;
1226         struct gb_svc *svc = gb_connection_get_data(connection);
1227         u8 type = op->type;
1228         int ret = 0;
1229
1230         /*
1231          * SVC requests need to follow a specific order (at least initially) and
1232          * below code takes care of enforcing that. The expected order is:
1233          * - PROTOCOL_VERSION
1234          * - SVC_HELLO
1235          * - Any other request, but the earlier two.
1236          *
1237          * Incoming requests are guaranteed to be serialized and so we don't
1238          * need to protect 'state' for any races.
1239          */
1240         switch (type) {
1241         case GB_SVC_TYPE_PROTOCOL_VERSION:
1242                 if (svc->state != GB_SVC_STATE_RESET)
1243                         ret = -EINVAL;
1244                 break;
1245         case GB_SVC_TYPE_SVC_HELLO:
1246                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1247                         ret = -EINVAL;
1248                 break;
1249         default:
1250                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
1251                         ret = -EINVAL;
1252                 break;
1253         }
1254
1255         if (ret) {
1256                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1257                                 type, svc->state);
1258                 return ret;
1259         }
1260
1261         switch (type) {
1262         case GB_SVC_TYPE_PROTOCOL_VERSION:
1263                 ret = gb_svc_version_request(op);
1264                 if (!ret)
1265                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1266                 return ret;
1267         case GB_SVC_TYPE_SVC_HELLO:
1268                 ret = gb_svc_hello(op);
1269                 if (!ret)
1270                         svc->state = GB_SVC_STATE_SVC_HELLO;
1271                 return ret;
1272         case GB_SVC_TYPE_INTF_RESET:
1273                 return gb_svc_intf_reset_recv(op);
1274         case GB_SVC_TYPE_MODULE_INSERTED:
1275                 return gb_svc_module_inserted_recv(op);
1276         case GB_SVC_TYPE_MODULE_REMOVED:
1277                 return gb_svc_module_removed_recv(op);
1278         case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1279                 return gb_svc_intf_mailbox_event_recv(op);
1280         case GB_SVC_TYPE_INTF_OOPS:
1281                 return gb_svc_intf_oops_recv(op);
1282         default:
1283                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1284                 return -EINVAL;
1285         }
1286 }
1287
1288 static void gb_svc_release(struct device *dev)
1289 {
1290         struct gb_svc *svc = to_gb_svc(dev);
1291
1292         if (svc->connection)
1293                 gb_connection_destroy(svc->connection);
1294         ida_destroy(&svc->device_id_map);
1295         destroy_workqueue(svc->wq);
1296         kfree(svc);
1297 }
1298
1299 struct device_type greybus_svc_type = {
1300         .name           = "greybus_svc",
1301         .release        = gb_svc_release,
1302 };
1303
1304 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1305 {
1306         struct gb_svc *svc;
1307
1308         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1309         if (!svc)
1310                 return NULL;
1311
1312         svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1313         if (!svc->wq) {
1314                 kfree(svc);
1315                 return NULL;
1316         }
1317
1318         svc->dev.parent = &hd->dev;
1319         svc->dev.bus = &greybus_bus_type;
1320         svc->dev.type = &greybus_svc_type;
1321         svc->dev.groups = svc_groups;
1322         svc->dev.dma_mask = svc->dev.parent->dma_mask;
1323         device_initialize(&svc->dev);
1324
1325         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1326
1327         ida_init(&svc->device_id_map);
1328         svc->state = GB_SVC_STATE_RESET;
1329         svc->hd = hd;
1330
1331         svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1332                                                 gb_svc_request_handler);
1333         if (IS_ERR(svc->connection)) {
1334                 dev_err(&svc->dev, "failed to create connection: %ld\n",
1335                                 PTR_ERR(svc->connection));
1336                 goto err_put_device;
1337         }
1338
1339         gb_connection_set_data(svc->connection, svc);
1340
1341         return svc;
1342
1343 err_put_device:
1344         put_device(&svc->dev);
1345         return NULL;
1346 }
1347
1348 int gb_svc_add(struct gb_svc *svc)
1349 {
1350         int ret;
1351
1352         /*
1353          * The SVC protocol is currently driven by the SVC, so the SVC device
1354          * is added from the connection request handler when enough
1355          * information has been received.
1356          */
1357         ret = gb_connection_enable(svc->connection);
1358         if (ret)
1359                 return ret;
1360
1361         return 0;
1362 }
1363
1364 static void gb_svc_remove_modules(struct gb_svc *svc)
1365 {
1366         struct gb_host_device *hd = svc->hd;
1367         struct gb_module *module, *tmp;
1368
1369         list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1370                 gb_module_del(module);
1371                 list_del(&module->hd_node);
1372                 gb_module_put(module);
1373         }
1374 }
1375
1376 void gb_svc_del(struct gb_svc *svc)
1377 {
1378         gb_connection_disable_rx(svc->connection);
1379
1380         /*
1381          * The SVC device may have been registered from the request handler.
1382          */
1383         if (device_is_registered(&svc->dev)) {
1384                 gb_svc_debugfs_exit(svc);
1385                 gb_svc_watchdog_destroy(svc);
1386                 device_del(&svc->dev);
1387         }
1388
1389         flush_workqueue(svc->wq);
1390
1391         gb_svc_remove_modules(svc);
1392
1393         gb_connection_disable(svc->connection);
1394 }
1395
1396 void gb_svc_put(struct gb_svc *svc)
1397 {
1398         put_device(&svc->dev);
1399 }