GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / hv / vmbus_drv.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  *   K. Y. Srinivasan <kys@microsoft.com>
21  *
22  */
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/device.h>
28 #include <linux/interrupt.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/completion.h>
33 #include <linux/hyperv.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/clockchips.h>
36 #include <linux/cpu.h>
37 #include <linux/sched/task_stack.h>
38
39 #include <asm/mshyperv.h>
40 #include <linux/notifier.h>
41 #include <linux/ptrace.h>
42 #include <linux/screen_info.h>
43 #include <linux/kdebug.h>
44 #include <linux/efi.h>
45 #include <linux/random.h>
46 #include <linux/kernel.h>
47 #include "hyperv_vmbus.h"
48
49 struct vmbus_dynid {
50         struct list_head node;
51         struct hv_vmbus_device_id id;
52 };
53
54 static struct acpi_device  *hv_acpi_dev;
55
56 static struct completion probe_event;
57
58 static int hyperv_cpuhp_online;
59
60 static void *hv_panic_page;
61
62 /*
63  * Boolean to control whether to report panic messages over Hyper-V.
64  *
65  * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
66  */
67 static int sysctl_record_panic_msg = 1;
68
69 static int hyperv_report_reg(void)
70 {
71         return !sysctl_record_panic_msg || !hv_panic_page;
72 }
73
74 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
75                               void *args)
76 {
77         struct pt_regs *regs;
78
79         vmbus_initiate_unload(true);
80
81         /*
82          * Hyper-V should be notified only once about a panic.  If we will be
83          * doing hyperv_report_panic_msg() later with kmsg data, don't do
84          * the notification here.
85          */
86         if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
87             && hyperv_report_reg()) {
88                 regs = current_pt_regs();
89                 hyperv_report_panic(regs, val, false);
90         }
91         return NOTIFY_DONE;
92 }
93
94 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
95                             void *args)
96 {
97         struct die_args *die = (struct die_args *)args;
98         struct pt_regs *regs = die->regs;
99
100         /*
101          * Hyper-V should be notified only once about a panic.  If we will be
102          * doing hyperv_report_panic_msg() later with kmsg data, don't do
103          * the notification here.
104          */
105         if (hyperv_report_reg())
106                 hyperv_report_panic(regs, val, true);
107         return NOTIFY_DONE;
108 }
109
110 static struct notifier_block hyperv_die_block = {
111         .notifier_call = hyperv_die_event,
112 };
113 static struct notifier_block hyperv_panic_block = {
114         .notifier_call = hyperv_panic_event,
115 };
116
117 static const char *fb_mmio_name = "fb_range";
118 static struct resource *fb_mmio;
119 static struct resource *hyperv_mmio;
120 static DEFINE_SEMAPHORE(hyperv_mmio_lock);
121
122 static int vmbus_exists(void)
123 {
124         if (hv_acpi_dev == NULL)
125                 return -ENODEV;
126
127         return 0;
128 }
129
130 #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
131 static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
132 {
133         int i;
134         for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
135                 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
136 }
137
138 static u8 channel_monitor_group(const struct vmbus_channel *channel)
139 {
140         return (u8)channel->offermsg.monitorid / 32;
141 }
142
143 static u8 channel_monitor_offset(const struct vmbus_channel *channel)
144 {
145         return (u8)channel->offermsg.monitorid % 32;
146 }
147
148 static u32 channel_pending(const struct vmbus_channel *channel,
149                            const struct hv_monitor_page *monitor_page)
150 {
151         u8 monitor_group = channel_monitor_group(channel);
152
153         return monitor_page->trigger_group[monitor_group].pending;
154 }
155
156 static u32 channel_latency(const struct vmbus_channel *channel,
157                            const struct hv_monitor_page *monitor_page)
158 {
159         u8 monitor_group = channel_monitor_group(channel);
160         u8 monitor_offset = channel_monitor_offset(channel);
161
162         return monitor_page->latency[monitor_group][monitor_offset];
163 }
164
165 static u32 channel_conn_id(struct vmbus_channel *channel,
166                            struct hv_monitor_page *monitor_page)
167 {
168         u8 monitor_group = channel_monitor_group(channel);
169         u8 monitor_offset = channel_monitor_offset(channel);
170         return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
171 }
172
173 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
174                        char *buf)
175 {
176         struct hv_device *hv_dev = device_to_hv_device(dev);
177
178         if (!hv_dev->channel)
179                 return -ENODEV;
180         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
181 }
182 static DEVICE_ATTR_RO(id);
183
184 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
185                           char *buf)
186 {
187         struct hv_device *hv_dev = device_to_hv_device(dev);
188
189         if (!hv_dev->channel)
190                 return -ENODEV;
191         return sprintf(buf, "%d\n", hv_dev->channel->state);
192 }
193 static DEVICE_ATTR_RO(state);
194
195 static ssize_t monitor_id_show(struct device *dev,
196                                struct device_attribute *dev_attr, char *buf)
197 {
198         struct hv_device *hv_dev = device_to_hv_device(dev);
199
200         if (!hv_dev->channel)
201                 return -ENODEV;
202         return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
203 }
204 static DEVICE_ATTR_RO(monitor_id);
205
206 static ssize_t class_id_show(struct device *dev,
207                                struct device_attribute *dev_attr, char *buf)
208 {
209         struct hv_device *hv_dev = device_to_hv_device(dev);
210
211         if (!hv_dev->channel)
212                 return -ENODEV;
213         return sprintf(buf, "{%pUl}\n",
214                        hv_dev->channel->offermsg.offer.if_type.b);
215 }
216 static DEVICE_ATTR_RO(class_id);
217
218 static ssize_t device_id_show(struct device *dev,
219                               struct device_attribute *dev_attr, char *buf)
220 {
221         struct hv_device *hv_dev = device_to_hv_device(dev);
222
223         if (!hv_dev->channel)
224                 return -ENODEV;
225         return sprintf(buf, "{%pUl}\n",
226                        hv_dev->channel->offermsg.offer.if_instance.b);
227 }
228 static DEVICE_ATTR_RO(device_id);
229
230 static ssize_t modalias_show(struct device *dev,
231                              struct device_attribute *dev_attr, char *buf)
232 {
233         struct hv_device *hv_dev = device_to_hv_device(dev);
234         char alias_name[VMBUS_ALIAS_LEN + 1];
235
236         print_alias_name(hv_dev, alias_name);
237         return sprintf(buf, "vmbus:%s\n", alias_name);
238 }
239 static DEVICE_ATTR_RO(modalias);
240
241 #ifdef CONFIG_NUMA
242 static ssize_t numa_node_show(struct device *dev,
243                               struct device_attribute *attr, char *buf)
244 {
245         struct hv_device *hv_dev = device_to_hv_device(dev);
246
247         if (!hv_dev->channel)
248                 return -ENODEV;
249
250         return sprintf(buf, "%d\n", hv_dev->channel->numa_node);
251 }
252 static DEVICE_ATTR_RO(numa_node);
253 #endif
254
255 static ssize_t server_monitor_pending_show(struct device *dev,
256                                            struct device_attribute *dev_attr,
257                                            char *buf)
258 {
259         struct hv_device *hv_dev = device_to_hv_device(dev);
260
261         if (!hv_dev->channel)
262                 return -ENODEV;
263         return sprintf(buf, "%d\n",
264                        channel_pending(hv_dev->channel,
265                                        vmbus_connection.monitor_pages[1]));
266 }
267 static DEVICE_ATTR_RO(server_monitor_pending);
268
269 static ssize_t client_monitor_pending_show(struct device *dev,
270                                            struct device_attribute *dev_attr,
271                                            char *buf)
272 {
273         struct hv_device *hv_dev = device_to_hv_device(dev);
274
275         if (!hv_dev->channel)
276                 return -ENODEV;
277         return sprintf(buf, "%d\n",
278                        channel_pending(hv_dev->channel,
279                                        vmbus_connection.monitor_pages[1]));
280 }
281 static DEVICE_ATTR_RO(client_monitor_pending);
282
283 static ssize_t server_monitor_latency_show(struct device *dev,
284                                            struct device_attribute *dev_attr,
285                                            char *buf)
286 {
287         struct hv_device *hv_dev = device_to_hv_device(dev);
288
289         if (!hv_dev->channel)
290                 return -ENODEV;
291         return sprintf(buf, "%d\n",
292                        channel_latency(hv_dev->channel,
293                                        vmbus_connection.monitor_pages[0]));
294 }
295 static DEVICE_ATTR_RO(server_monitor_latency);
296
297 static ssize_t client_monitor_latency_show(struct device *dev,
298                                            struct device_attribute *dev_attr,
299                                            char *buf)
300 {
301         struct hv_device *hv_dev = device_to_hv_device(dev);
302
303         if (!hv_dev->channel)
304                 return -ENODEV;
305         return sprintf(buf, "%d\n",
306                        channel_latency(hv_dev->channel,
307                                        vmbus_connection.monitor_pages[1]));
308 }
309 static DEVICE_ATTR_RO(client_monitor_latency);
310
311 static ssize_t server_monitor_conn_id_show(struct device *dev,
312                                            struct device_attribute *dev_attr,
313                                            char *buf)
314 {
315         struct hv_device *hv_dev = device_to_hv_device(dev);
316
317         if (!hv_dev->channel)
318                 return -ENODEV;
319         return sprintf(buf, "%d\n",
320                        channel_conn_id(hv_dev->channel,
321                                        vmbus_connection.monitor_pages[0]));
322 }
323 static DEVICE_ATTR_RO(server_monitor_conn_id);
324
325 static ssize_t client_monitor_conn_id_show(struct device *dev,
326                                            struct device_attribute *dev_attr,
327                                            char *buf)
328 {
329         struct hv_device *hv_dev = device_to_hv_device(dev);
330
331         if (!hv_dev->channel)
332                 return -ENODEV;
333         return sprintf(buf, "%d\n",
334                        channel_conn_id(hv_dev->channel,
335                                        vmbus_connection.monitor_pages[1]));
336 }
337 static DEVICE_ATTR_RO(client_monitor_conn_id);
338
339 static ssize_t out_intr_mask_show(struct device *dev,
340                                   struct device_attribute *dev_attr, char *buf)
341 {
342         struct hv_device *hv_dev = device_to_hv_device(dev);
343         struct hv_ring_buffer_debug_info outbound;
344         int ret;
345
346         if (!hv_dev->channel)
347                 return -ENODEV;
348
349         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
350                                           &outbound);
351         if (ret < 0)
352                 return ret;
353
354         return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
355 }
356 static DEVICE_ATTR_RO(out_intr_mask);
357
358 static ssize_t out_read_index_show(struct device *dev,
359                                    struct device_attribute *dev_attr, char *buf)
360 {
361         struct hv_device *hv_dev = device_to_hv_device(dev);
362         struct hv_ring_buffer_debug_info outbound;
363         int ret;
364
365         if (!hv_dev->channel)
366                 return -ENODEV;
367
368         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
369                                           &outbound);
370         if (ret < 0)
371                 return ret;
372         return sprintf(buf, "%d\n", outbound.current_read_index);
373 }
374 static DEVICE_ATTR_RO(out_read_index);
375
376 static ssize_t out_write_index_show(struct device *dev,
377                                     struct device_attribute *dev_attr,
378                                     char *buf)
379 {
380         struct hv_device *hv_dev = device_to_hv_device(dev);
381         struct hv_ring_buffer_debug_info outbound;
382         int ret;
383
384         if (!hv_dev->channel)
385                 return -ENODEV;
386
387         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
388                                           &outbound);
389         if (ret < 0)
390                 return ret;
391         return sprintf(buf, "%d\n", outbound.current_write_index);
392 }
393 static DEVICE_ATTR_RO(out_write_index);
394
395 static ssize_t out_read_bytes_avail_show(struct device *dev,
396                                          struct device_attribute *dev_attr,
397                                          char *buf)
398 {
399         struct hv_device *hv_dev = device_to_hv_device(dev);
400         struct hv_ring_buffer_debug_info outbound;
401         int ret;
402
403         if (!hv_dev->channel)
404                 return -ENODEV;
405
406         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
407                                           &outbound);
408         if (ret < 0)
409                 return ret;
410         return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
411 }
412 static DEVICE_ATTR_RO(out_read_bytes_avail);
413
414 static ssize_t out_write_bytes_avail_show(struct device *dev,
415                                           struct device_attribute *dev_attr,
416                                           char *buf)
417 {
418         struct hv_device *hv_dev = device_to_hv_device(dev);
419         struct hv_ring_buffer_debug_info outbound;
420         int ret;
421
422         if (!hv_dev->channel)
423                 return -ENODEV;
424
425         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
426                                           &outbound);
427         if (ret < 0)
428                 return ret;
429         return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
430 }
431 static DEVICE_ATTR_RO(out_write_bytes_avail);
432
433 static ssize_t in_intr_mask_show(struct device *dev,
434                                  struct device_attribute *dev_attr, char *buf)
435 {
436         struct hv_device *hv_dev = device_to_hv_device(dev);
437         struct hv_ring_buffer_debug_info inbound;
438         int ret;
439
440         if (!hv_dev->channel)
441                 return -ENODEV;
442
443         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
444         if (ret < 0)
445                 return ret;
446
447         return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
448 }
449 static DEVICE_ATTR_RO(in_intr_mask);
450
451 static ssize_t in_read_index_show(struct device *dev,
452                                   struct device_attribute *dev_attr, char *buf)
453 {
454         struct hv_device *hv_dev = device_to_hv_device(dev);
455         struct hv_ring_buffer_debug_info inbound;
456         int ret;
457
458         if (!hv_dev->channel)
459                 return -ENODEV;
460
461         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
462         if (ret < 0)
463                 return ret;
464
465         return sprintf(buf, "%d\n", inbound.current_read_index);
466 }
467 static DEVICE_ATTR_RO(in_read_index);
468
469 static ssize_t in_write_index_show(struct device *dev,
470                                    struct device_attribute *dev_attr, char *buf)
471 {
472         struct hv_device *hv_dev = device_to_hv_device(dev);
473         struct hv_ring_buffer_debug_info inbound;
474         int ret;
475
476         if (!hv_dev->channel)
477                 return -ENODEV;
478
479         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
480         if (ret < 0)
481                 return ret;
482
483         return sprintf(buf, "%d\n", inbound.current_write_index);
484 }
485 static DEVICE_ATTR_RO(in_write_index);
486
487 static ssize_t in_read_bytes_avail_show(struct device *dev,
488                                         struct device_attribute *dev_attr,
489                                         char *buf)
490 {
491         struct hv_device *hv_dev = device_to_hv_device(dev);
492         struct hv_ring_buffer_debug_info inbound;
493         int ret;
494
495         if (!hv_dev->channel)
496                 return -ENODEV;
497
498         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
499         if (ret < 0)
500                 return ret;
501
502         return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
503 }
504 static DEVICE_ATTR_RO(in_read_bytes_avail);
505
506 static ssize_t in_write_bytes_avail_show(struct device *dev,
507                                          struct device_attribute *dev_attr,
508                                          char *buf)
509 {
510         struct hv_device *hv_dev = device_to_hv_device(dev);
511         struct hv_ring_buffer_debug_info inbound;
512         int ret;
513
514         if (!hv_dev->channel)
515                 return -ENODEV;
516
517         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
518         if (ret < 0)
519                 return ret;
520
521         return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
522 }
523 static DEVICE_ATTR_RO(in_write_bytes_avail);
524
525 static ssize_t channel_vp_mapping_show(struct device *dev,
526                                        struct device_attribute *dev_attr,
527                                        char *buf)
528 {
529         struct hv_device *hv_dev = device_to_hv_device(dev);
530         struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
531         unsigned long flags;
532         int buf_size = PAGE_SIZE, n_written, tot_written;
533         struct list_head *cur;
534
535         if (!channel)
536                 return -ENODEV;
537
538         tot_written = snprintf(buf, buf_size, "%u:%u\n",
539                 channel->offermsg.child_relid, channel->target_cpu);
540
541         spin_lock_irqsave(&channel->lock, flags);
542
543         list_for_each(cur, &channel->sc_list) {
544                 if (tot_written >= buf_size - 1)
545                         break;
546
547                 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
548                 n_written = scnprintf(buf + tot_written,
549                                      buf_size - tot_written,
550                                      "%u:%u\n",
551                                      cur_sc->offermsg.child_relid,
552                                      cur_sc->target_cpu);
553                 tot_written += n_written;
554         }
555
556         spin_unlock_irqrestore(&channel->lock, flags);
557
558         return tot_written;
559 }
560 static DEVICE_ATTR_RO(channel_vp_mapping);
561
562 static ssize_t vendor_show(struct device *dev,
563                            struct device_attribute *dev_attr,
564                            char *buf)
565 {
566         struct hv_device *hv_dev = device_to_hv_device(dev);
567         return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
568 }
569 static DEVICE_ATTR_RO(vendor);
570
571 static ssize_t device_show(struct device *dev,
572                            struct device_attribute *dev_attr,
573                            char *buf)
574 {
575         struct hv_device *hv_dev = device_to_hv_device(dev);
576         return sprintf(buf, "0x%x\n", hv_dev->device_id);
577 }
578 static DEVICE_ATTR_RO(device);
579
580 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
581 static struct attribute *vmbus_dev_attrs[] = {
582         &dev_attr_id.attr,
583         &dev_attr_state.attr,
584         &dev_attr_monitor_id.attr,
585         &dev_attr_class_id.attr,
586         &dev_attr_device_id.attr,
587         &dev_attr_modalias.attr,
588 #ifdef CONFIG_NUMA
589         &dev_attr_numa_node.attr,
590 #endif
591         &dev_attr_server_monitor_pending.attr,
592         &dev_attr_client_monitor_pending.attr,
593         &dev_attr_server_monitor_latency.attr,
594         &dev_attr_client_monitor_latency.attr,
595         &dev_attr_server_monitor_conn_id.attr,
596         &dev_attr_client_monitor_conn_id.attr,
597         &dev_attr_out_intr_mask.attr,
598         &dev_attr_out_read_index.attr,
599         &dev_attr_out_write_index.attr,
600         &dev_attr_out_read_bytes_avail.attr,
601         &dev_attr_out_write_bytes_avail.attr,
602         &dev_attr_in_intr_mask.attr,
603         &dev_attr_in_read_index.attr,
604         &dev_attr_in_write_index.attr,
605         &dev_attr_in_read_bytes_avail.attr,
606         &dev_attr_in_write_bytes_avail.attr,
607         &dev_attr_channel_vp_mapping.attr,
608         &dev_attr_vendor.attr,
609         &dev_attr_device.attr,
610         NULL,
611 };
612
613 /*
614  * Device-level attribute_group callback function. Returns the permission for
615  * each attribute, and returns 0 if an attribute is not visible.
616  */
617 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
618                                          struct attribute *attr, int idx)
619 {
620         struct device *dev = kobj_to_dev(kobj);
621         const struct hv_device *hv_dev = device_to_hv_device(dev);
622
623         /* Hide the monitor attributes if the monitor mechanism is not used. */
624         if (!hv_dev->channel->offermsg.monitor_allocated &&
625             (attr == &dev_attr_monitor_id.attr ||
626              attr == &dev_attr_server_monitor_pending.attr ||
627              attr == &dev_attr_client_monitor_pending.attr ||
628              attr == &dev_attr_server_monitor_latency.attr ||
629              attr == &dev_attr_client_monitor_latency.attr ||
630              attr == &dev_attr_server_monitor_conn_id.attr ||
631              attr == &dev_attr_client_monitor_conn_id.attr))
632                 return 0;
633
634         return attr->mode;
635 }
636
637 static const struct attribute_group vmbus_dev_group = {
638         .attrs = vmbus_dev_attrs,
639         .is_visible = vmbus_dev_attr_is_visible
640 };
641 __ATTRIBUTE_GROUPS(vmbus_dev);
642
643 /*
644  * vmbus_uevent - add uevent for our device
645  *
646  * This routine is invoked when a device is added or removed on the vmbus to
647  * generate a uevent to udev in the userspace. The udev will then look at its
648  * rule and the uevent generated here to load the appropriate driver
649  *
650  * The alias string will be of the form vmbus:guid where guid is the string
651  * representation of the device guid (each byte of the guid will be
652  * represented with two hex characters.
653  */
654 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
655 {
656         struct hv_device *dev = device_to_hv_device(device);
657         int ret;
658         char alias_name[VMBUS_ALIAS_LEN + 1];
659
660         print_alias_name(dev, alias_name);
661         ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
662         return ret;
663 }
664
665 static const uuid_le null_guid;
666
667 static inline bool is_null_guid(const uuid_le *guid)
668 {
669         if (uuid_le_cmp(*guid, null_guid))
670                 return false;
671         return true;
672 }
673
674 /*
675  * Return a matching hv_vmbus_device_id pointer.
676  * If there is no match, return NULL.
677  */
678 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
679                                         const uuid_le *guid)
680 {
681         const struct hv_vmbus_device_id *id = NULL;
682         struct vmbus_dynid *dynid;
683
684         /* Look at the dynamic ids first, before the static ones */
685         spin_lock(&drv->dynids.lock);
686         list_for_each_entry(dynid, &drv->dynids.list, node) {
687                 if (!uuid_le_cmp(dynid->id.guid, *guid)) {
688                         id = &dynid->id;
689                         break;
690                 }
691         }
692         spin_unlock(&drv->dynids.lock);
693
694         if (id)
695                 return id;
696
697         id = drv->id_table;
698         if (id == NULL)
699                 return NULL; /* empty device table */
700
701         for (; !is_null_guid(&id->guid); id++)
702                 if (!uuid_le_cmp(id->guid, *guid))
703                         return id;
704
705         return NULL;
706 }
707
708 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
709 static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
710 {
711         struct vmbus_dynid *dynid;
712
713         dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
714         if (!dynid)
715                 return -ENOMEM;
716
717         dynid->id.guid = *guid;
718
719         spin_lock(&drv->dynids.lock);
720         list_add_tail(&dynid->node, &drv->dynids.list);
721         spin_unlock(&drv->dynids.lock);
722
723         return driver_attach(&drv->driver);
724 }
725
726 static void vmbus_free_dynids(struct hv_driver *drv)
727 {
728         struct vmbus_dynid *dynid, *n;
729
730         spin_lock(&drv->dynids.lock);
731         list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
732                 list_del(&dynid->node);
733                 kfree(dynid);
734         }
735         spin_unlock(&drv->dynids.lock);
736 }
737
738 /*
739  * store_new_id - sysfs frontend to vmbus_add_dynid()
740  *
741  * Allow GUIDs to be added to an existing driver via sysfs.
742  */
743 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
744                             size_t count)
745 {
746         struct hv_driver *drv = drv_to_hv_drv(driver);
747         uuid_le guid;
748         ssize_t retval;
749
750         retval = uuid_le_to_bin(buf, &guid);
751         if (retval)
752                 return retval;
753
754         if (hv_vmbus_get_id(drv, &guid))
755                 return -EEXIST;
756
757         retval = vmbus_add_dynid(drv, &guid);
758         if (retval)
759                 return retval;
760         return count;
761 }
762 static DRIVER_ATTR_WO(new_id);
763
764 /*
765  * store_remove_id - remove a PCI device ID from this driver
766  *
767  * Removes a dynamic pci device ID to this driver.
768  */
769 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
770                                size_t count)
771 {
772         struct hv_driver *drv = drv_to_hv_drv(driver);
773         struct vmbus_dynid *dynid, *n;
774         uuid_le guid;
775         ssize_t retval;
776
777         retval = uuid_le_to_bin(buf, &guid);
778         if (retval)
779                 return retval;
780
781         retval = -ENODEV;
782         spin_lock(&drv->dynids.lock);
783         list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
784                 struct hv_vmbus_device_id *id = &dynid->id;
785
786                 if (!uuid_le_cmp(id->guid, guid)) {
787                         list_del(&dynid->node);
788                         kfree(dynid);
789                         retval = count;
790                         break;
791                 }
792         }
793         spin_unlock(&drv->dynids.lock);
794
795         return retval;
796 }
797 static DRIVER_ATTR_WO(remove_id);
798
799 static struct attribute *vmbus_drv_attrs[] = {
800         &driver_attr_new_id.attr,
801         &driver_attr_remove_id.attr,
802         NULL,
803 };
804 ATTRIBUTE_GROUPS(vmbus_drv);
805
806
807 /*
808  * vmbus_match - Attempt to match the specified device to the specified driver
809  */
810 static int vmbus_match(struct device *device, struct device_driver *driver)
811 {
812         struct hv_driver *drv = drv_to_hv_drv(driver);
813         struct hv_device *hv_dev = device_to_hv_device(device);
814
815         /* The hv_sock driver handles all hv_sock offers. */
816         if (is_hvsock_channel(hv_dev->channel))
817                 return drv->hvsock;
818
819         if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
820                 return 1;
821
822         return 0;
823 }
824
825 /*
826  * vmbus_probe - Add the new vmbus's child device
827  */
828 static int vmbus_probe(struct device *child_device)
829 {
830         int ret = 0;
831         struct hv_driver *drv =
832                         drv_to_hv_drv(child_device->driver);
833         struct hv_device *dev = device_to_hv_device(child_device);
834         const struct hv_vmbus_device_id *dev_id;
835
836         dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
837         if (drv->probe) {
838                 ret = drv->probe(dev, dev_id);
839                 if (ret != 0)
840                         pr_err("probe failed for device %s (%d)\n",
841                                dev_name(child_device), ret);
842
843         } else {
844                 pr_err("probe not set for driver %s\n",
845                        dev_name(child_device));
846                 ret = -ENODEV;
847         }
848         return ret;
849 }
850
851 /*
852  * vmbus_remove - Remove a vmbus device
853  */
854 static int vmbus_remove(struct device *child_device)
855 {
856         struct hv_driver *drv;
857         struct hv_device *dev = device_to_hv_device(child_device);
858
859         if (child_device->driver) {
860                 drv = drv_to_hv_drv(child_device->driver);
861                 if (drv->remove)
862                         drv->remove(dev);
863         }
864
865         return 0;
866 }
867
868
869 /*
870  * vmbus_shutdown - Shutdown a vmbus device
871  */
872 static void vmbus_shutdown(struct device *child_device)
873 {
874         struct hv_driver *drv;
875         struct hv_device *dev = device_to_hv_device(child_device);
876
877
878         /* The device may not be attached yet */
879         if (!child_device->driver)
880                 return;
881
882         drv = drv_to_hv_drv(child_device->driver);
883
884         if (drv->shutdown)
885                 drv->shutdown(dev);
886 }
887
888
889 /*
890  * vmbus_device_release - Final callback release of the vmbus child device
891  */
892 static void vmbus_device_release(struct device *device)
893 {
894         struct hv_device *hv_dev = device_to_hv_device(device);
895         struct vmbus_channel *channel = hv_dev->channel;
896
897         mutex_lock(&vmbus_connection.channel_mutex);
898         hv_process_channel_removal(channel->offermsg.child_relid);
899         mutex_unlock(&vmbus_connection.channel_mutex);
900         kfree(hv_dev);
901
902 }
903
904 /* The one and only one */
905 static struct bus_type  hv_bus = {
906         .name =         "vmbus",
907         .match =                vmbus_match,
908         .shutdown =             vmbus_shutdown,
909         .remove =               vmbus_remove,
910         .probe =                vmbus_probe,
911         .uevent =               vmbus_uevent,
912         .dev_groups =           vmbus_dev_groups,
913         .drv_groups =           vmbus_drv_groups,
914 };
915
916 struct onmessage_work_context {
917         struct work_struct work;
918         struct hv_message msg;
919 };
920
921 static void vmbus_onmessage_work(struct work_struct *work)
922 {
923         struct onmessage_work_context *ctx;
924
925         /* Do not process messages if we're in DISCONNECTED state */
926         if (vmbus_connection.conn_state == DISCONNECTED)
927                 return;
928
929         ctx = container_of(work, struct onmessage_work_context,
930                            work);
931         vmbus_onmessage(&ctx->msg);
932         kfree(ctx);
933 }
934
935 static void hv_process_timer_expiration(struct hv_message *msg,
936                                         struct hv_per_cpu_context *hv_cpu)
937 {
938         struct clock_event_device *dev = hv_cpu->clk_evt;
939
940         if (dev->event_handler)
941                 dev->event_handler(dev);
942
943         vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
944 }
945
946 void vmbus_on_msg_dpc(unsigned long data)
947 {
948         struct hv_per_cpu_context *hv_cpu = (void *)data;
949         void *page_addr = hv_cpu->synic_message_page;
950         struct hv_message *msg = (struct hv_message *)page_addr +
951                                   VMBUS_MESSAGE_SINT;
952         struct vmbus_channel_message_header *hdr;
953         const struct vmbus_channel_message_table_entry *entry;
954         struct onmessage_work_context *ctx;
955         u32 message_type = msg->header.message_type;
956
957         if (message_type == HVMSG_NONE)
958                 /* no msg */
959                 return;
960
961         hdr = (struct vmbus_channel_message_header *)msg->u.payload;
962
963         trace_vmbus_on_msg_dpc(hdr);
964
965         if (hdr->msgtype >= CHANNELMSG_COUNT) {
966                 WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
967                 goto msg_handled;
968         }
969
970         entry = &channel_message_table[hdr->msgtype];
971
972         if (!entry->message_handler)
973                 goto msg_handled;
974
975         if (entry->handler_type == VMHT_BLOCKING) {
976                 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
977                 if (ctx == NULL)
978                         return;
979
980                 INIT_WORK(&ctx->work, vmbus_onmessage_work);
981                 memcpy(&ctx->msg, msg, sizeof(*msg));
982
983                 /*
984                  * The host can generate a rescind message while we
985                  * may still be handling the original offer. We deal with
986                  * this condition by ensuring the processing is done on the
987                  * same CPU.
988                  */
989                 switch (hdr->msgtype) {
990                 case CHANNELMSG_RESCIND_CHANNELOFFER:
991                         /*
992                          * If we are handling the rescind message;
993                          * schedule the work on the global work queue.
994                          */
995                         schedule_work_on(vmbus_connection.connect_cpu,
996                                          &ctx->work);
997                         break;
998
999                 case CHANNELMSG_OFFERCHANNEL:
1000                         atomic_inc(&vmbus_connection.offer_in_progress);
1001                         queue_work_on(vmbus_connection.connect_cpu,
1002                                       vmbus_connection.work_queue,
1003                                       &ctx->work);
1004                         break;
1005
1006                 default:
1007                         queue_work(vmbus_connection.work_queue, &ctx->work);
1008                 }
1009         } else
1010                 entry->message_handler(hdr);
1011
1012 msg_handled:
1013         vmbus_signal_eom(msg, message_type);
1014 }
1015
1016
1017 /*
1018  * Direct callback for channels using other deferred processing
1019  */
1020 static void vmbus_channel_isr(struct vmbus_channel *channel)
1021 {
1022         void (*callback_fn)(void *);
1023
1024         callback_fn = READ_ONCE(channel->onchannel_callback);
1025         if (likely(callback_fn != NULL))
1026                 (*callback_fn)(channel->channel_callback_context);
1027 }
1028
1029 /*
1030  * Schedule all channels with events pending
1031  */
1032 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1033 {
1034         unsigned long *recv_int_page;
1035         u32 maxbits, relid;
1036
1037         if (vmbus_proto_version < VERSION_WIN8) {
1038                 maxbits = MAX_NUM_CHANNELS_SUPPORTED;
1039                 recv_int_page = vmbus_connection.recv_int_page;
1040         } else {
1041                 /*
1042                  * When the host is win8 and beyond, the event page
1043                  * can be directly checked to get the id of the channel
1044                  * that has the interrupt pending.
1045                  */
1046                 void *page_addr = hv_cpu->synic_event_page;
1047                 union hv_synic_event_flags *event
1048                         = (union hv_synic_event_flags *)page_addr +
1049                                                  VMBUS_MESSAGE_SINT;
1050
1051                 maxbits = HV_EVENT_FLAGS_COUNT;
1052                 recv_int_page = event->flags;
1053         }
1054
1055         if (unlikely(!recv_int_page))
1056                 return;
1057
1058         for_each_set_bit(relid, recv_int_page, maxbits) {
1059                 struct vmbus_channel *channel;
1060
1061                 if (!sync_test_and_clear_bit(relid, recv_int_page))
1062                         continue;
1063
1064                 /* Special case - vmbus channel protocol msg */
1065                 if (relid == 0)
1066                         continue;
1067
1068                 rcu_read_lock();
1069
1070                 /* Find channel based on relid */
1071                 list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
1072                         if (channel->offermsg.child_relid != relid)
1073                                 continue;
1074
1075                         if (channel->rescind)
1076                                 continue;
1077
1078                         trace_vmbus_chan_sched(channel);
1079
1080                         ++channel->interrupts;
1081
1082                         switch (channel->callback_mode) {
1083                         case HV_CALL_ISR:
1084                                 vmbus_channel_isr(channel);
1085                                 break;
1086
1087                         case HV_CALL_BATCHED:
1088                                 hv_begin_read(&channel->inbound);
1089                                 /* fallthrough */
1090                         case HV_CALL_DIRECT:
1091                                 tasklet_schedule(&channel->callback_event);
1092                         }
1093                 }
1094
1095                 rcu_read_unlock();
1096         }
1097 }
1098
1099 static void vmbus_isr(void)
1100 {
1101         struct hv_per_cpu_context *hv_cpu
1102                 = this_cpu_ptr(hv_context.cpu_context);
1103         void *page_addr = hv_cpu->synic_event_page;
1104         struct hv_message *msg;
1105         union hv_synic_event_flags *event;
1106         bool handled = false;
1107
1108         if (unlikely(page_addr == NULL))
1109                 return;
1110
1111         event = (union hv_synic_event_flags *)page_addr +
1112                                          VMBUS_MESSAGE_SINT;
1113         /*
1114          * Check for events before checking for messages. This is the order
1115          * in which events and messages are checked in Windows guests on
1116          * Hyper-V, and the Windows team suggested we do the same.
1117          */
1118
1119         if ((vmbus_proto_version == VERSION_WS2008) ||
1120                 (vmbus_proto_version == VERSION_WIN7)) {
1121
1122                 /* Since we are a child, we only need to check bit 0 */
1123                 if (sync_test_and_clear_bit(0, event->flags))
1124                         handled = true;
1125         } else {
1126                 /*
1127                  * Our host is win8 or above. The signaling mechanism
1128                  * has changed and we can directly look at the event page.
1129                  * If bit n is set then we have an interrup on the channel
1130                  * whose id is n.
1131                  */
1132                 handled = true;
1133         }
1134
1135         if (handled)
1136                 vmbus_chan_sched(hv_cpu);
1137
1138         page_addr = hv_cpu->synic_message_page;
1139         msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1140
1141         /* Check if there are actual msgs to be processed */
1142         if (msg->header.message_type != HVMSG_NONE) {
1143                 if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
1144                         hv_process_timer_expiration(msg, hv_cpu);
1145                 else
1146                         tasklet_schedule(&hv_cpu->msg_dpc);
1147         }
1148
1149         add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR);
1150 }
1151
1152 /*
1153  * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1154  * buffer and call into Hyper-V to transfer the data.
1155  */
1156 static void hv_kmsg_dump(struct kmsg_dumper *dumper,
1157                          enum kmsg_dump_reason reason)
1158 {
1159         size_t bytes_written;
1160         phys_addr_t panic_pa;
1161
1162         /* We are only interested in panics. */
1163         if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
1164                 return;
1165
1166         panic_pa = virt_to_phys(hv_panic_page);
1167
1168         /*
1169          * Write dump contents to the page. No need to synchronize; panic should
1170          * be single-threaded.
1171          */
1172         kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
1173                              &bytes_written);
1174         if (bytes_written)
1175                 hyperv_report_panic_msg(panic_pa, bytes_written);
1176 }
1177
1178 static struct kmsg_dumper hv_kmsg_dumper = {
1179         .dump = hv_kmsg_dump,
1180 };
1181
1182 static struct ctl_table_header *hv_ctl_table_hdr;
1183 static int zero;
1184 static int one = 1;
1185
1186 /*
1187  * sysctl option to allow the user to control whether kmsg data should be
1188  * reported to Hyper-V on panic.
1189  */
1190 static struct ctl_table hv_ctl_table[] = {
1191         {
1192                 .procname       = "hyperv_record_panic_msg",
1193                 .data           = &sysctl_record_panic_msg,
1194                 .maxlen         = sizeof(int),
1195                 .mode           = 0644,
1196                 .proc_handler   = proc_dointvec_minmax,
1197                 .extra1         = &zero,
1198                 .extra2         = &one
1199         },
1200         {}
1201 };
1202
1203 static struct ctl_table hv_root_table[] = {
1204         {
1205                 .procname       = "kernel",
1206                 .mode           = 0555,
1207                 .child          = hv_ctl_table
1208         },
1209         {}
1210 };
1211
1212 /*
1213  * vmbus_bus_init -Main vmbus driver initialization routine.
1214  *
1215  * Here, we
1216  *      - initialize the vmbus driver context
1217  *      - invoke the vmbus hv main init routine
1218  *      - retrieve the channel offers
1219  */
1220 static int vmbus_bus_init(void)
1221 {
1222         int ret;
1223
1224         /* Hypervisor initialization...setup hypercall page..etc */
1225         ret = hv_init();
1226         if (ret != 0) {
1227                 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1228                 return ret;
1229         }
1230
1231         ret = bus_register(&hv_bus);
1232         if (ret)
1233                 return ret;
1234
1235         hv_setup_vmbus_irq(vmbus_isr);
1236
1237         ret = hv_synic_alloc();
1238         if (ret)
1239                 goto err_alloc;
1240         /*
1241          * Initialize the per-cpu interrupt state and
1242          * connect to the host.
1243          */
1244         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1245                                 hv_synic_init, hv_synic_cleanup);
1246         if (ret < 0)
1247                 goto err_alloc;
1248         hyperv_cpuhp_online = ret;
1249
1250         ret = vmbus_connect();
1251         if (ret)
1252                 goto err_connect;
1253
1254         /*
1255          * Only register if the crash MSRs are available
1256          */
1257         if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1258                 u64 hyperv_crash_ctl;
1259                 /*
1260                  * Sysctl registration is not fatal, since by default
1261                  * reporting is enabled.
1262                  */
1263                 hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
1264                 if (!hv_ctl_table_hdr)
1265                         pr_err("Hyper-V: sysctl table register error");
1266
1267                 /*
1268                  * Register for panic kmsg callback only if the right
1269                  * capability is supported by the hypervisor.
1270                  */
1271                 hv_get_crash_ctl(hyperv_crash_ctl);
1272                 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
1273                         hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
1274                         if (hv_panic_page) {
1275                                 ret = kmsg_dump_register(&hv_kmsg_dumper);
1276                                 if (ret) {
1277                                         pr_err("Hyper-V: kmsg dump register "
1278                                                 "error 0x%x\n", ret);
1279                                         free_page(
1280                                             (unsigned long)hv_panic_page);
1281                                         hv_panic_page = NULL;
1282                                 }
1283                         } else
1284                                 pr_err("Hyper-V: panic message page memory "
1285                                         "allocation failed");
1286                 }
1287
1288                 register_die_notifier(&hyperv_die_block);
1289         }
1290
1291         /*
1292          * Always register the panic notifier because we need to unload
1293          * the VMbus channel connection to prevent any VMbus
1294          * activity after the VM panics.
1295          */
1296         atomic_notifier_chain_register(&panic_notifier_list,
1297                                &hyperv_panic_block);
1298
1299         vmbus_request_offers();
1300
1301         return 0;
1302
1303 err_connect:
1304         cpuhp_remove_state(hyperv_cpuhp_online);
1305 err_alloc:
1306         hv_synic_free();
1307         hv_remove_vmbus_irq();
1308
1309         bus_unregister(&hv_bus);
1310         unregister_sysctl_table(hv_ctl_table_hdr);
1311         hv_ctl_table_hdr = NULL;
1312         return ret;
1313 }
1314
1315 /**
1316  * __vmbus_child_driver_register() - Register a vmbus's driver
1317  * @hv_driver: Pointer to driver structure you want to register
1318  * @owner: owner module of the drv
1319  * @mod_name: module name string
1320  *
1321  * Registers the given driver with Linux through the 'driver_register()' call
1322  * and sets up the hyper-v vmbus handling for this driver.
1323  * It will return the state of the 'driver_register()' call.
1324  *
1325  */
1326 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1327 {
1328         int ret;
1329
1330         pr_info("registering driver %s\n", hv_driver->name);
1331
1332         ret = vmbus_exists();
1333         if (ret < 0)
1334                 return ret;
1335
1336         hv_driver->driver.name = hv_driver->name;
1337         hv_driver->driver.owner = owner;
1338         hv_driver->driver.mod_name = mod_name;
1339         hv_driver->driver.bus = &hv_bus;
1340
1341         spin_lock_init(&hv_driver->dynids.lock);
1342         INIT_LIST_HEAD(&hv_driver->dynids.list);
1343
1344         ret = driver_register(&hv_driver->driver);
1345
1346         return ret;
1347 }
1348 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1349
1350 /**
1351  * vmbus_driver_unregister() - Unregister a vmbus's driver
1352  * @hv_driver: Pointer to driver structure you want to
1353  *             un-register
1354  *
1355  * Un-register the given driver that was previous registered with a call to
1356  * vmbus_driver_register()
1357  */
1358 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1359 {
1360         pr_info("unregistering driver %s\n", hv_driver->name);
1361
1362         if (!vmbus_exists()) {
1363                 driver_unregister(&hv_driver->driver);
1364                 vmbus_free_dynids(hv_driver);
1365         }
1366 }
1367 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1368
1369
1370 /*
1371  * Called when last reference to channel is gone.
1372  */
1373 static void vmbus_chan_release(struct kobject *kobj)
1374 {
1375         struct vmbus_channel *channel
1376                 = container_of(kobj, struct vmbus_channel, kobj);
1377
1378         kfree_rcu(channel, rcu);
1379 }
1380
1381 struct vmbus_chan_attribute {
1382         struct attribute attr;
1383         ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
1384         ssize_t (*store)(struct vmbus_channel *chan,
1385                          const char *buf, size_t count);
1386 };
1387 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1388         struct vmbus_chan_attribute chan_attr_##_name \
1389                 = __ATTR(_name, _mode, _show, _store)
1390 #define VMBUS_CHAN_ATTR_RW(_name) \
1391         struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1392 #define VMBUS_CHAN_ATTR_RO(_name) \
1393         struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1394 #define VMBUS_CHAN_ATTR_WO(_name) \
1395         struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1396
1397 static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1398                                     struct attribute *attr, char *buf)
1399 {
1400         const struct vmbus_chan_attribute *attribute
1401                 = container_of(attr, struct vmbus_chan_attribute, attr);
1402         const struct vmbus_channel *chan
1403                 = container_of(kobj, struct vmbus_channel, kobj);
1404
1405         if (!attribute->show)
1406                 return -EIO;
1407
1408         if (chan->state != CHANNEL_OPENED_STATE)
1409                 return -EINVAL;
1410
1411         return attribute->show(chan, buf);
1412 }
1413
1414 static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1415         .show = vmbus_chan_attr_show,
1416 };
1417
1418 static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
1419 {
1420         const struct hv_ring_buffer_info *rbi = &channel->outbound;
1421
1422         return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1423 }
1424 static VMBUS_CHAN_ATTR_RO(out_mask);
1425
1426 static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
1427 {
1428         const struct hv_ring_buffer_info *rbi = &channel->inbound;
1429
1430         return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1431 }
1432 static VMBUS_CHAN_ATTR_RO(in_mask);
1433
1434 static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
1435 {
1436         const struct hv_ring_buffer_info *rbi = &channel->inbound;
1437
1438         return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1439 }
1440 static VMBUS_CHAN_ATTR_RO(read_avail);
1441
1442 static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
1443 {
1444         const struct hv_ring_buffer_info *rbi = &channel->outbound;
1445
1446         return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1447 }
1448 static VMBUS_CHAN_ATTR_RO(write_avail);
1449
1450 static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
1451 {
1452         return sprintf(buf, "%u\n", channel->target_cpu);
1453 }
1454 static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
1455
1456 static ssize_t channel_pending_show(const struct vmbus_channel *channel,
1457                                     char *buf)
1458 {
1459         return sprintf(buf, "%d\n",
1460                        channel_pending(channel,
1461                                        vmbus_connection.monitor_pages[1]));
1462 }
1463 static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
1464
1465 static ssize_t channel_latency_show(const struct vmbus_channel *channel,
1466                                     char *buf)
1467 {
1468         return sprintf(buf, "%d\n",
1469                        channel_latency(channel,
1470                                        vmbus_connection.monitor_pages[1]));
1471 }
1472 static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
1473
1474 static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
1475 {
1476         return sprintf(buf, "%llu\n", channel->interrupts);
1477 }
1478 static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
1479
1480 static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
1481 {
1482         return sprintf(buf, "%llu\n", channel->sig_events);
1483 }
1484 static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
1485
1486 static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
1487                                           char *buf)
1488 {
1489         return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1490 }
1491 static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
1492
1493 static ssize_t subchannel_id_show(const struct vmbus_channel *channel,
1494                                   char *buf)
1495 {
1496         return sprintf(buf, "%u\n",
1497                        channel->offermsg.offer.sub_channel_index);
1498 }
1499 static VMBUS_CHAN_ATTR_RO(subchannel_id);
1500
1501 static struct attribute *vmbus_chan_attrs[] = {
1502         &chan_attr_out_mask.attr,
1503         &chan_attr_in_mask.attr,
1504         &chan_attr_read_avail.attr,
1505         &chan_attr_write_avail.attr,
1506         &chan_attr_cpu.attr,
1507         &chan_attr_pending.attr,
1508         &chan_attr_latency.attr,
1509         &chan_attr_interrupts.attr,
1510         &chan_attr_events.attr,
1511         &chan_attr_monitor_id.attr,
1512         &chan_attr_subchannel_id.attr,
1513         NULL
1514 };
1515
1516 /*
1517  * Channel-level attribute_group callback function. Returns the permission for
1518  * each attribute, and returns 0 if an attribute is not visible.
1519  */
1520 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1521                                           struct attribute *attr, int idx)
1522 {
1523         const struct vmbus_channel *channel =
1524                 container_of(kobj, struct vmbus_channel, kobj);
1525
1526         /* Hide the monitor attributes if the monitor mechanism is not used. */
1527         if (!channel->offermsg.monitor_allocated &&
1528             (attr == &chan_attr_pending.attr ||
1529              attr == &chan_attr_latency.attr ||
1530              attr == &chan_attr_monitor_id.attr))
1531                 return 0;
1532
1533         return attr->mode;
1534 }
1535
1536 static struct attribute_group vmbus_chan_group = {
1537         .attrs = vmbus_chan_attrs,
1538         .is_visible = vmbus_chan_attr_is_visible
1539 };
1540
1541 static struct kobj_type vmbus_chan_ktype = {
1542         .sysfs_ops = &vmbus_chan_sysfs_ops,
1543         .release = vmbus_chan_release,
1544 };
1545
1546 /*
1547  * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1548  */
1549 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
1550 {
1551         const struct device *device = &dev->device;
1552         struct kobject *kobj = &channel->kobj;
1553         u32 relid = channel->offermsg.child_relid;
1554         int ret;
1555
1556         kobj->kset = dev->channels_kset;
1557         ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
1558                                    "%u", relid);
1559         if (ret) {
1560                 kobject_put(kobj);
1561                 return ret;
1562         }
1563
1564         ret = sysfs_create_group(kobj, &vmbus_chan_group);
1565
1566         if (ret) {
1567                 /*
1568                  * The calling functions' error handling paths will cleanup the
1569                  * empty channel directory.
1570                  */
1571                 kobject_put(kobj);
1572                 dev_err(device, "Unable to set up channel sysfs files\n");
1573                 return ret;
1574         }
1575
1576         kobject_uevent(kobj, KOBJ_ADD);
1577
1578         return 0;
1579 }
1580
1581 /*
1582  * vmbus_remove_channel_attr_group - remove the channel's attribute group
1583  */
1584 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
1585 {
1586         sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
1587 }
1588
1589 /*
1590  * vmbus_device_create - Creates and registers a new child device
1591  * on the vmbus.
1592  */
1593 struct hv_device *vmbus_device_create(const uuid_le *type,
1594                                       const uuid_le *instance,
1595                                       struct vmbus_channel *channel)
1596 {
1597         struct hv_device *child_device_obj;
1598
1599         child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
1600         if (!child_device_obj) {
1601                 pr_err("Unable to allocate device object for child device\n");
1602                 return NULL;
1603         }
1604
1605         child_device_obj->channel = channel;
1606         memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
1607         memcpy(&child_device_obj->dev_instance, instance,
1608                sizeof(uuid_le));
1609         child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
1610
1611
1612         return child_device_obj;
1613 }
1614
1615 /*
1616  * vmbus_device_register - Register the child device
1617  */
1618 int vmbus_device_register(struct hv_device *child_device_obj)
1619 {
1620         struct kobject *kobj = &child_device_obj->device.kobj;
1621         int ret;
1622
1623         dev_set_name(&child_device_obj->device, "%pUl",
1624                      child_device_obj->channel->offermsg.offer.if_instance.b);
1625
1626         child_device_obj->device.bus = &hv_bus;
1627         child_device_obj->device.parent = &hv_acpi_dev->dev;
1628         child_device_obj->device.release = vmbus_device_release;
1629
1630         /*
1631          * Register with the LDM. This will kick off the driver/device
1632          * binding...which will eventually call vmbus_match() and vmbus_probe()
1633          */
1634         ret = device_register(&child_device_obj->device);
1635         if (ret) {
1636                 pr_err("Unable to register child device\n");
1637                 put_device(&child_device_obj->device);
1638                 return ret;
1639         }
1640
1641         child_device_obj->channels_kset = kset_create_and_add("channels",
1642                                                               NULL, kobj);
1643         if (!child_device_obj->channels_kset) {
1644                 ret = -ENOMEM;
1645                 goto err_dev_unregister;
1646         }
1647
1648         ret = vmbus_add_channel_kobj(child_device_obj,
1649                                      child_device_obj->channel);
1650         if (ret) {
1651                 pr_err("Unable to register primary channeln");
1652                 goto err_kset_unregister;
1653         }
1654
1655         return 0;
1656
1657 err_kset_unregister:
1658         kset_unregister(child_device_obj->channels_kset);
1659
1660 err_dev_unregister:
1661         device_unregister(&child_device_obj->device);
1662         return ret;
1663 }
1664
1665 /*
1666  * vmbus_device_unregister - Remove the specified child device
1667  * from the vmbus.
1668  */
1669 void vmbus_device_unregister(struct hv_device *device_obj)
1670 {
1671         pr_debug("child device %s unregistered\n",
1672                 dev_name(&device_obj->device));
1673
1674         kset_unregister(device_obj->channels_kset);
1675
1676         /*
1677          * Kick off the process of unregistering the device.
1678          * This will call vmbus_remove() and eventually vmbus_device_release()
1679          */
1680         device_unregister(&device_obj->device);
1681 }
1682
1683
1684 /*
1685  * VMBUS is an acpi enumerated device. Get the information we
1686  * need from DSDT.
1687  */
1688 #define VTPM_BASE_ADDRESS 0xfed40000
1689 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
1690 {
1691         resource_size_t start = 0;
1692         resource_size_t end = 0;
1693         struct resource *new_res;
1694         struct resource **old_res = &hyperv_mmio;
1695         struct resource **prev_res = NULL;
1696
1697         switch (res->type) {
1698
1699         /*
1700          * "Address" descriptors are for bus windows. Ignore
1701          * "memory" descriptors, which are for registers on
1702          * devices.
1703          */
1704         case ACPI_RESOURCE_TYPE_ADDRESS32:
1705                 start = res->data.address32.address.minimum;
1706                 end = res->data.address32.address.maximum;
1707                 break;
1708
1709         case ACPI_RESOURCE_TYPE_ADDRESS64:
1710                 start = res->data.address64.address.minimum;
1711                 end = res->data.address64.address.maximum;
1712                 break;
1713
1714         default:
1715                 /* Unused resource type */
1716                 return AE_OK;
1717
1718         }
1719         /*
1720          * Ignore ranges that are below 1MB, as they're not
1721          * necessary or useful here.
1722          */
1723         if (end < 0x100000)
1724                 return AE_OK;
1725
1726         new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
1727         if (!new_res)
1728                 return AE_NO_MEMORY;
1729
1730         /* If this range overlaps the virtual TPM, truncate it. */
1731         if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
1732                 end = VTPM_BASE_ADDRESS;
1733
1734         new_res->name = "hyperv mmio";
1735         new_res->flags = IORESOURCE_MEM;
1736         new_res->start = start;
1737         new_res->end = end;
1738
1739         /*
1740          * If two ranges are adjacent, merge them.
1741          */
1742         do {
1743                 if (!*old_res) {
1744                         *old_res = new_res;
1745                         break;
1746                 }
1747
1748                 if (((*old_res)->end + 1) == new_res->start) {
1749                         (*old_res)->end = new_res->end;
1750                         kfree(new_res);
1751                         break;
1752                 }
1753
1754                 if ((*old_res)->start == new_res->end + 1) {
1755                         (*old_res)->start = new_res->start;
1756                         kfree(new_res);
1757                         break;
1758                 }
1759
1760                 if ((*old_res)->start > new_res->end) {
1761                         new_res->sibling = *old_res;
1762                         if (prev_res)
1763                                 (*prev_res)->sibling = new_res;
1764                         *old_res = new_res;
1765                         break;
1766                 }
1767
1768                 prev_res = old_res;
1769                 old_res = &(*old_res)->sibling;
1770
1771         } while (1);
1772
1773         return AE_OK;
1774 }
1775
1776 static int vmbus_acpi_remove(struct acpi_device *device)
1777 {
1778         struct resource *cur_res;
1779         struct resource *next_res;
1780
1781         if (hyperv_mmio) {
1782                 if (fb_mmio) {
1783                         __release_region(hyperv_mmio, fb_mmio->start,
1784                                          resource_size(fb_mmio));
1785                         fb_mmio = NULL;
1786                 }
1787
1788                 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
1789                         next_res = cur_res->sibling;
1790                         kfree(cur_res);
1791                 }
1792         }
1793
1794         return 0;
1795 }
1796
1797 static void vmbus_reserve_fb(void)
1798 {
1799         int size;
1800         /*
1801          * Make a claim for the frame buffer in the resource tree under the
1802          * first node, which will be the one below 4GB.  The length seems to
1803          * be underreported, particularly in a Generation 1 VM.  So start out
1804          * reserving a larger area and make it smaller until it succeeds.
1805          */
1806
1807         if (screen_info.lfb_base) {
1808                 if (efi_enabled(EFI_BOOT))
1809                         size = max_t(__u32, screen_info.lfb_size, 0x800000);
1810                 else
1811                         size = max_t(__u32, screen_info.lfb_size, 0x4000000);
1812
1813                 for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
1814                         fb_mmio = __request_region(hyperv_mmio,
1815                                                    screen_info.lfb_base, size,
1816                                                    fb_mmio_name, 0);
1817                 }
1818         }
1819 }
1820
1821 /**
1822  * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
1823  * @new:                If successful, supplied a pointer to the
1824  *                      allocated MMIO space.
1825  * @device_obj:         Identifies the caller
1826  * @min:                Minimum guest physical address of the
1827  *                      allocation
1828  * @max:                Maximum guest physical address
1829  * @size:               Size of the range to be allocated
1830  * @align:              Alignment of the range to be allocated
1831  * @fb_overlap_ok:      Whether this allocation can be allowed
1832  *                      to overlap the video frame buffer.
1833  *
1834  * This function walks the resources granted to VMBus by the
1835  * _CRS object in the ACPI namespace underneath the parent
1836  * "bridge" whether that's a root PCI bus in the Generation 1
1837  * case or a Module Device in the Generation 2 case.  It then
1838  * attempts to allocate from the global MMIO pool in a way that
1839  * matches the constraints supplied in these parameters and by
1840  * that _CRS.
1841  *
1842  * Return: 0 on success, -errno on failure
1843  */
1844 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1845                         resource_size_t min, resource_size_t max,
1846                         resource_size_t size, resource_size_t align,
1847                         bool fb_overlap_ok)
1848 {
1849         struct resource *iter, *shadow;
1850         resource_size_t range_min, range_max, start, end;
1851         const char *dev_n = dev_name(&device_obj->device);
1852         int retval;
1853
1854         retval = -ENXIO;
1855         down(&hyperv_mmio_lock);
1856
1857         /*
1858          * If overlaps with frame buffers are allowed, then first attempt to
1859          * make the allocation from within the reserved region.  Because it
1860          * is already reserved, no shadow allocation is necessary.
1861          */
1862         if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
1863             !(max < fb_mmio->start)) {
1864
1865                 range_min = fb_mmio->start;
1866                 range_max = fb_mmio->end;
1867                 start = (range_min + align - 1) & ~(align - 1);
1868                 for (; start + size - 1 <= range_max; start += align) {
1869                         *new = request_mem_region_exclusive(start, size, dev_n);
1870                         if (*new) {
1871                                 retval = 0;
1872                                 goto exit;
1873                         }
1874                 }
1875         }
1876
1877         for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1878                 if ((iter->start >= max) || (iter->end <= min))
1879                         continue;
1880
1881                 range_min = iter->start;
1882                 range_max = iter->end;
1883                 start = (range_min + align - 1) & ~(align - 1);
1884                 for (; start + size - 1 <= range_max; start += align) {
1885                         end = start + size - 1;
1886
1887                         /* Skip the whole fb_mmio region if not fb_overlap_ok */
1888                         if (!fb_overlap_ok && fb_mmio &&
1889                             (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
1890                              ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
1891                                 continue;
1892
1893                         shadow = __request_region(iter, start, size, NULL,
1894                                                   IORESOURCE_BUSY);
1895                         if (!shadow)
1896                                 continue;
1897
1898                         *new = request_mem_region_exclusive(start, size, dev_n);
1899                         if (*new) {
1900                                 shadow->name = (char *)*new;
1901                                 retval = 0;
1902                                 goto exit;
1903                         }
1904
1905                         __release_region(iter, start, size);
1906                 }
1907         }
1908
1909 exit:
1910         up(&hyperv_mmio_lock);
1911         return retval;
1912 }
1913 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
1914
1915 /**
1916  * vmbus_free_mmio() - Free a memory-mapped I/O range.
1917  * @start:              Base address of region to release.
1918  * @size:               Size of the range to be allocated
1919  *
1920  * This function releases anything requested by
1921  * vmbus_mmio_allocate().
1922  */
1923 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
1924 {
1925         struct resource *iter;
1926
1927         down(&hyperv_mmio_lock);
1928         for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1929                 if ((iter->start >= start + size) || (iter->end <= start))
1930                         continue;
1931
1932                 __release_region(iter, start, size);
1933         }
1934         release_mem_region(start, size);
1935         up(&hyperv_mmio_lock);
1936
1937 }
1938 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
1939
1940 static int vmbus_acpi_add(struct acpi_device *device)
1941 {
1942         acpi_status result;
1943         int ret_val = -ENODEV;
1944         struct acpi_device *ancestor;
1945
1946         hv_acpi_dev = device;
1947
1948         result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
1949                                         vmbus_walk_resources, NULL);
1950
1951         if (ACPI_FAILURE(result))
1952                 goto acpi_walk_err;
1953         /*
1954          * Some ancestor of the vmbus acpi device (Gen1 or Gen2
1955          * firmware) is the VMOD that has the mmio ranges. Get that.
1956          */
1957         for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
1958                 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
1959                                              vmbus_walk_resources, NULL);
1960
1961                 if (ACPI_FAILURE(result))
1962                         continue;
1963                 if (hyperv_mmio) {
1964                         vmbus_reserve_fb();
1965                         break;
1966                 }
1967         }
1968         ret_val = 0;
1969
1970 acpi_walk_err:
1971         complete(&probe_event);
1972         if (ret_val)
1973                 vmbus_acpi_remove(device);
1974         return ret_val;
1975 }
1976
1977 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
1978         {"VMBUS", 0},
1979         {"VMBus", 0},
1980         {"", 0},
1981 };
1982 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
1983
1984 static struct acpi_driver vmbus_acpi_driver = {
1985         .name = "vmbus",
1986         .ids = vmbus_acpi_device_ids,
1987         .ops = {
1988                 .add = vmbus_acpi_add,
1989                 .remove = vmbus_acpi_remove,
1990         },
1991 };
1992
1993 static void hv_kexec_handler(void)
1994 {
1995         hv_synic_clockevents_cleanup();
1996         vmbus_initiate_unload(false);
1997         /* Make sure conn_state is set as hv_synic_cleanup checks for it */
1998         mb();
1999         cpuhp_remove_state(hyperv_cpuhp_online);
2000         hyperv_cleanup();
2001 };
2002
2003 static void hv_crash_handler(struct pt_regs *regs)
2004 {
2005         vmbus_initiate_unload(true);
2006         /*
2007          * In crash handler we can't schedule synic cleanup for all CPUs,
2008          * doing the cleanup for current CPU only. This should be sufficient
2009          * for kdump.
2010          */
2011         hv_synic_cleanup(smp_processor_id());
2012         hyperv_cleanup();
2013 };
2014
2015 static int __init hv_acpi_init(void)
2016 {
2017         int ret, t;
2018
2019         if (!hv_is_hyperv_initialized())
2020                 return -ENODEV;
2021
2022         init_completion(&probe_event);
2023
2024         /*
2025          * Get ACPI resources first.
2026          */
2027         ret = acpi_bus_register_driver(&vmbus_acpi_driver);
2028
2029         if (ret)
2030                 return ret;
2031
2032         t = wait_for_completion_timeout(&probe_event, 5*HZ);
2033         if (t == 0) {
2034                 ret = -ETIMEDOUT;
2035                 goto cleanup;
2036         }
2037
2038         ret = vmbus_bus_init();
2039         if (ret)
2040                 goto cleanup;
2041
2042         hv_setup_kexec_handler(hv_kexec_handler);
2043         hv_setup_crash_handler(hv_crash_handler);
2044
2045         return 0;
2046
2047 cleanup:
2048         acpi_bus_unregister_driver(&vmbus_acpi_driver);
2049         hv_acpi_dev = NULL;
2050         return ret;
2051 }
2052
2053 static void __exit vmbus_exit(void)
2054 {
2055         int cpu;
2056
2057         hv_remove_kexec_handler();
2058         hv_remove_crash_handler();
2059         vmbus_connection.conn_state = DISCONNECTED;
2060         hv_synic_clockevents_cleanup();
2061         vmbus_disconnect();
2062         hv_remove_vmbus_irq();
2063         for_each_online_cpu(cpu) {
2064                 struct hv_per_cpu_context *hv_cpu
2065                         = per_cpu_ptr(hv_context.cpu_context, cpu);
2066
2067                 tasklet_kill(&hv_cpu->msg_dpc);
2068         }
2069         vmbus_free_channels();
2070
2071         if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
2072                 kmsg_dump_unregister(&hv_kmsg_dumper);
2073                 unregister_die_notifier(&hyperv_die_block);
2074         }
2075
2076         /*
2077          * The panic notifier is always registered, hence we should
2078          * also unconditionally unregister it here as well.
2079          */
2080         atomic_notifier_chain_unregister(&panic_notifier_list,
2081                                          &hyperv_panic_block);
2082
2083         free_page((unsigned long)hv_panic_page);
2084         unregister_sysctl_table(hv_ctl_table_hdr);
2085         hv_ctl_table_hdr = NULL;
2086         bus_unregister(&hv_bus);
2087
2088         cpuhp_remove_state(hyperv_cpuhp_online);
2089         hv_synic_free();
2090         acpi_bus_unregister_driver(&vmbus_acpi_driver);
2091 }
2092
2093
2094 MODULE_LICENSE("GPL");
2095
2096 subsys_initcall(hv_acpi_init);
2097 module_exit(vmbus_exit);