GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / hv / hv_balloon.c
1 /*
2  * Copyright (c) 2012, Microsoft Corporation.
3  *
4  * Author:
5  *   K. Y. Srinivasan <kys@microsoft.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  *
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
34
35 #include <linux/hyperv.h>
36
37 #define CREATE_TRACE_POINTS
38 #include "hv_trace_balloon.h"
39
40 /*
41  * We begin with definitions supporting the Dynamic Memory protocol
42  * with the host.
43  *
44  * Begin protocol definitions.
45  */
46
47
48
49 /*
50  * Protocol versions. The low word is the minor version, the high word the major
51  * version.
52  *
53  * History:
54  * Initial version 1.0
55  * Changed to 0.1 on 2009/03/25
56  * Changes to 0.2 on 2009/05/14
57  * Changes to 0.3 on 2009/12/03
58  * Changed to 1.0 on 2011/04/05
59  */
60
61 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
62 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
63 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
64
65 enum {
66         DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
67         DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
68         DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
69
70         DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
71         DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
72         DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
73
74         DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
75 };
76
77
78
79 /*
80  * Message Types
81  */
82
83 enum dm_message_type {
84         /*
85          * Version 0.3
86          */
87         DM_ERROR                        = 0,
88         DM_VERSION_REQUEST              = 1,
89         DM_VERSION_RESPONSE             = 2,
90         DM_CAPABILITIES_REPORT          = 3,
91         DM_CAPABILITIES_RESPONSE        = 4,
92         DM_STATUS_REPORT                = 5,
93         DM_BALLOON_REQUEST              = 6,
94         DM_BALLOON_RESPONSE             = 7,
95         DM_UNBALLOON_REQUEST            = 8,
96         DM_UNBALLOON_RESPONSE           = 9,
97         DM_MEM_HOT_ADD_REQUEST          = 10,
98         DM_MEM_HOT_ADD_RESPONSE         = 11,
99         DM_VERSION_03_MAX               = 11,
100         /*
101          * Version 1.0.
102          */
103         DM_INFO_MESSAGE                 = 12,
104         DM_VERSION_1_MAX                = 12
105 };
106
107
108 /*
109  * Structures defining the dynamic memory management
110  * protocol.
111  */
112
113 union dm_version {
114         struct {
115                 __u16 minor_version;
116                 __u16 major_version;
117         };
118         __u32 version;
119 } __packed;
120
121
122 union dm_caps {
123         struct {
124                 __u64 balloon:1;
125                 __u64 hot_add:1;
126                 /*
127                  * To support guests that may have alignment
128                  * limitations on hot-add, the guest can specify
129                  * its alignment requirements; a value of n
130                  * represents an alignment of 2^n in mega bytes.
131                  */
132                 __u64 hot_add_alignment:4;
133                 __u64 reservedz:58;
134         } cap_bits;
135         __u64 caps;
136 } __packed;
137
138 union dm_mem_page_range {
139         struct  {
140                 /*
141                  * The PFN number of the first page in the range.
142                  * 40 bits is the architectural limit of a PFN
143                  * number for AMD64.
144                  */
145                 __u64 start_page:40;
146                 /*
147                  * The number of pages in the range.
148                  */
149                 __u64 page_cnt:24;
150         } finfo;
151         __u64  page_range;
152 } __packed;
153
154
155
156 /*
157  * The header for all dynamic memory messages:
158  *
159  * type: Type of the message.
160  * size: Size of the message in bytes; including the header.
161  * trans_id: The guest is responsible for manufacturing this ID.
162  */
163
164 struct dm_header {
165         __u16 type;
166         __u16 size;
167         __u32 trans_id;
168 } __packed;
169
170 /*
171  * A generic message format for dynamic memory.
172  * Specific message formats are defined later in the file.
173  */
174
175 struct dm_message {
176         struct dm_header hdr;
177         __u8 data[]; /* enclosed message */
178 } __packed;
179
180
181 /*
182  * Specific message types supporting the dynamic memory protocol.
183  */
184
185 /*
186  * Version negotiation message. Sent from the guest to the host.
187  * The guest is free to try different versions until the host
188  * accepts the version.
189  *
190  * dm_version: The protocol version requested.
191  * is_last_attempt: If TRUE, this is the last version guest will request.
192  * reservedz: Reserved field, set to zero.
193  */
194
195 struct dm_version_request {
196         struct dm_header hdr;
197         union dm_version version;
198         __u32 is_last_attempt:1;
199         __u32 reservedz:31;
200 } __packed;
201
202 /*
203  * Version response message; Host to Guest and indicates
204  * if the host has accepted the version sent by the guest.
205  *
206  * is_accepted: If TRUE, host has accepted the version and the guest
207  * should proceed to the next stage of the protocol. FALSE indicates that
208  * guest should re-try with a different version.
209  *
210  * reservedz: Reserved field, set to zero.
211  */
212
213 struct dm_version_response {
214         struct dm_header hdr;
215         __u64 is_accepted:1;
216         __u64 reservedz:63;
217 } __packed;
218
219 /*
220  * Message reporting capabilities. This is sent from the guest to the
221  * host.
222  */
223
224 struct dm_capabilities {
225         struct dm_header hdr;
226         union dm_caps caps;
227         __u64 min_page_cnt;
228         __u64 max_page_number;
229 } __packed;
230
231 /*
232  * Response to the capabilities message. This is sent from the host to the
233  * guest. This message notifies if the host has accepted the guest's
234  * capabilities. If the host has not accepted, the guest must shutdown
235  * the service.
236  *
237  * is_accepted: Indicates if the host has accepted guest's capabilities.
238  * reservedz: Must be 0.
239  */
240
241 struct dm_capabilities_resp_msg {
242         struct dm_header hdr;
243         __u64 is_accepted:1;
244         __u64 reservedz:63;
245 } __packed;
246
247 /*
248  * This message is used to report memory pressure from the guest.
249  * This message is not part of any transaction and there is no
250  * response to this message.
251  *
252  * num_avail: Available memory in pages.
253  * num_committed: Committed memory in pages.
254  * page_file_size: The accumulated size of all page files
255  *                 in the system in pages.
256  * zero_free: The nunber of zero and free pages.
257  * page_file_writes: The writes to the page file in pages.
258  * io_diff: An indicator of file cache efficiency or page file activity,
259  *          calculated as File Cache Page Fault Count - Page Read Count.
260  *          This value is in pages.
261  *
262  * Some of these metrics are Windows specific and fortunately
263  * the algorithm on the host side that computes the guest memory
264  * pressure only uses num_committed value.
265  */
266
267 struct dm_status {
268         struct dm_header hdr;
269         __u64 num_avail;
270         __u64 num_committed;
271         __u64 page_file_size;
272         __u64 zero_free;
273         __u32 page_file_writes;
274         __u32 io_diff;
275 } __packed;
276
277
278 /*
279  * Message to ask the guest to allocate memory - balloon up message.
280  * This message is sent from the host to the guest. The guest may not be
281  * able to allocate as much memory as requested.
282  *
283  * num_pages: number of pages to allocate.
284  */
285
286 struct dm_balloon {
287         struct dm_header hdr;
288         __u32 num_pages;
289         __u32 reservedz;
290 } __packed;
291
292
293 /*
294  * Balloon response message; this message is sent from the guest
295  * to the host in response to the balloon message.
296  *
297  * reservedz: Reserved; must be set to zero.
298  * more_pages: If FALSE, this is the last message of the transaction.
299  * if TRUE there will atleast one more message from the guest.
300  *
301  * range_count: The number of ranges in the range array.
302  *
303  * range_array: An array of page ranges returned to the host.
304  *
305  */
306
307 struct dm_balloon_response {
308         struct dm_header hdr;
309         __u32 reservedz;
310         __u32 more_pages:1;
311         __u32 range_count:31;
312         union dm_mem_page_range range_array[];
313 } __packed;
314
315 /*
316  * Un-balloon message; this message is sent from the host
317  * to the guest to give guest more memory.
318  *
319  * more_pages: If FALSE, this is the last message of the transaction.
320  * if TRUE there will atleast one more message from the guest.
321  *
322  * reservedz: Reserved; must be set to zero.
323  *
324  * range_count: The number of ranges in the range array.
325  *
326  * range_array: An array of page ranges returned to the host.
327  *
328  */
329
330 struct dm_unballoon_request {
331         struct dm_header hdr;
332         __u32 more_pages:1;
333         __u32 reservedz:31;
334         __u32 range_count;
335         union dm_mem_page_range range_array[];
336 } __packed;
337
338 /*
339  * Un-balloon response message; this message is sent from the guest
340  * to the host in response to an unballoon request.
341  *
342  */
343
344 struct dm_unballoon_response {
345         struct dm_header hdr;
346 } __packed;
347
348
349 /*
350  * Hot add request message. Message sent from the host to the guest.
351  *
352  * mem_range: Memory range to hot add.
353  *
354  * On Linux we currently don't support this since we cannot hot add
355  * arbitrary granularity of memory.
356  */
357
358 struct dm_hot_add {
359         struct dm_header hdr;
360         union dm_mem_page_range range;
361 } __packed;
362
363 /*
364  * Hot add response message.
365  * This message is sent by the guest to report the status of a hot add request.
366  * If page_count is less than the requested page count, then the host should
367  * assume all further hot add requests will fail, since this indicates that
368  * the guest has hit an upper physical memory barrier.
369  *
370  * Hot adds may also fail due to low resources; in this case, the guest must
371  * not complete this message until the hot add can succeed, and the host must
372  * not send a new hot add request until the response is sent.
373  * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
374  * times it fails the request.
375  *
376  *
377  * page_count: number of pages that were successfully hot added.
378  *
379  * result: result of the operation 1: success, 0: failure.
380  *
381  */
382
383 struct dm_hot_add_response {
384         struct dm_header hdr;
385         __u32 page_count;
386         __u32 result;
387 } __packed;
388
389 /*
390  * Types of information sent from host to the guest.
391  */
392
393 enum dm_info_type {
394         INFO_TYPE_MAX_PAGE_CNT = 0,
395         MAX_INFO_TYPE
396 };
397
398
399 /*
400  * Header for the information message.
401  */
402
403 struct dm_info_header {
404         enum dm_info_type type;
405         __u32 data_size;
406 } __packed;
407
408 /*
409  * This message is sent from the host to the guest to pass
410  * some relevant information (win8 addition).
411  *
412  * reserved: no used.
413  * info_size: size of the information blob.
414  * info: information blob.
415  */
416
417 struct dm_info_msg {
418         struct dm_header hdr;
419         __u32 reserved;
420         __u32 info_size;
421         __u8  info[];
422 };
423
424 /*
425  * End protocol definitions.
426  */
427
428 /*
429  * State to manage hot adding memory into the guest.
430  * The range start_pfn : end_pfn specifies the range
431  * that the host has asked us to hot add. The range
432  * start_pfn : ha_end_pfn specifies the range that we have
433  * currently hot added. We hot add in multiples of 128M
434  * chunks; it is possible that we may not be able to bring
435  * online all the pages in the region. The range
436  * covered_start_pfn:covered_end_pfn defines the pages that can
437  * be brough online.
438  */
439
440 struct hv_hotadd_state {
441         struct list_head list;
442         unsigned long start_pfn;
443         unsigned long covered_start_pfn;
444         unsigned long covered_end_pfn;
445         unsigned long ha_end_pfn;
446         unsigned long end_pfn;
447         /*
448          * A list of gaps.
449          */
450         struct list_head gap_list;
451 };
452
453 struct hv_hotadd_gap {
454         struct list_head list;
455         unsigned long start_pfn;
456         unsigned long end_pfn;
457 };
458
459 struct balloon_state {
460         __u32 num_pages;
461         struct work_struct wrk;
462 };
463
464 struct hot_add_wrk {
465         union dm_mem_page_range ha_page_range;
466         union dm_mem_page_range ha_region_range;
467         struct work_struct wrk;
468 };
469
470 static bool hot_add = true;
471 static bool do_hot_add;
472 /*
473  * Delay reporting memory pressure by
474  * the specified number of seconds.
475  */
476 static uint pressure_report_delay = 45;
477
478 /*
479  * The last time we posted a pressure report to host.
480  */
481 static unsigned long last_post_time;
482
483 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
484 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
485
486 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
487 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
488 static atomic_t trans_id = ATOMIC_INIT(0);
489
490 static int dm_ring_size = (5 * PAGE_SIZE);
491
492 /*
493  * Driver specific state.
494  */
495
496 enum hv_dm_state {
497         DM_INITIALIZING = 0,
498         DM_INITIALIZED,
499         DM_BALLOON_UP,
500         DM_BALLOON_DOWN,
501         DM_HOT_ADD,
502         DM_INIT_ERROR
503 };
504
505
506 static __u8 recv_buffer[PAGE_SIZE];
507 static __u8 *send_buffer;
508 #define PAGES_IN_2M     512
509 #define HA_CHUNK (32 * 1024)
510
511 struct hv_dynmem_device {
512         struct hv_device *dev;
513         enum hv_dm_state state;
514         struct completion host_event;
515         struct completion config_event;
516
517         /*
518          * Number of pages we have currently ballooned out.
519          */
520         unsigned int num_pages_ballooned;
521         unsigned int num_pages_onlined;
522         unsigned int num_pages_added;
523
524         /*
525          * State to manage the ballooning (up) operation.
526          */
527         struct balloon_state balloon_wrk;
528
529         /*
530          * State to execute the "hot-add" operation.
531          */
532         struct hot_add_wrk ha_wrk;
533
534         /*
535          * This state tracks if the host has specified a hot-add
536          * region.
537          */
538         bool host_specified_ha_region;
539
540         /*
541          * State to synchronize hot-add.
542          */
543         struct completion  ol_waitevent;
544         bool ha_waiting;
545         /*
546          * This thread handles hot-add
547          * requests from the host as well as notifying
548          * the host with regards to memory pressure in
549          * the guest.
550          */
551         struct task_struct *thread;
552
553         /*
554          * Protects ha_region_list, num_pages_onlined counter and individual
555          * regions from ha_region_list.
556          */
557         spinlock_t ha_lock;
558
559         /*
560          * A list of hot-add regions.
561          */
562         struct list_head ha_region_list;
563
564         /*
565          * We start with the highest version we can support
566          * and downgrade based on the host; we save here the
567          * next version to try.
568          */
569         __u32 next_version;
570
571         /*
572          * The negotiated version agreed by host.
573          */
574         __u32 version;
575 };
576
577 static struct hv_dynmem_device dm_device;
578
579 static void post_status(struct hv_dynmem_device *dm);
580
581 #ifdef CONFIG_MEMORY_HOTPLUG
582 static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
583                                      unsigned long pfn)
584 {
585         struct hv_hotadd_gap *gap;
586
587         /* The page is not backed. */
588         if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
589                 return false;
590
591         /* Check for gaps. */
592         list_for_each_entry(gap, &has->gap_list, list) {
593                 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
594                         return false;
595         }
596
597         return true;
598 }
599
600 static unsigned long hv_page_offline_check(unsigned long start_pfn,
601                                            unsigned long nr_pages)
602 {
603         unsigned long pfn = start_pfn, count = 0;
604         struct hv_hotadd_state *has;
605         bool found;
606
607         while (pfn < start_pfn + nr_pages) {
608                 /*
609                  * Search for HAS which covers the pfn and when we find one
610                  * count how many consequitive PFNs are covered.
611                  */
612                 found = false;
613                 list_for_each_entry(has, &dm_device.ha_region_list, list) {
614                         while ((pfn >= has->start_pfn) &&
615                                (pfn < has->end_pfn) &&
616                                (pfn < start_pfn + nr_pages)) {
617                                 found = true;
618                                 if (has_pfn_is_backed(has, pfn))
619                                         count++;
620                                 pfn++;
621                         }
622                 }
623
624                 /*
625                  * This PFN is not in any HAS (e.g. we're offlining a region
626                  * which was present at boot), no need to account for it. Go
627                  * to the next one.
628                  */
629                 if (!found)
630                         pfn++;
631         }
632
633         return count;
634 }
635
636 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
637                               void *v)
638 {
639         struct memory_notify *mem = (struct memory_notify *)v;
640         unsigned long flags, pfn_count;
641
642         switch (val) {
643         case MEM_ONLINE:
644         case MEM_CANCEL_ONLINE:
645                 if (dm_device.ha_waiting) {
646                         dm_device.ha_waiting = false;
647                         complete(&dm_device.ol_waitevent);
648                 }
649                 break;
650
651         case MEM_OFFLINE:
652                 spin_lock_irqsave(&dm_device.ha_lock, flags);
653                 pfn_count = hv_page_offline_check(mem->start_pfn,
654                                                   mem->nr_pages);
655                 if (pfn_count <= dm_device.num_pages_onlined) {
656                         dm_device.num_pages_onlined -= pfn_count;
657                 } else {
658                         /*
659                          * We're offlining more pages than we managed to online.
660                          * This is unexpected. In any case don't let
661                          * num_pages_onlined wrap around zero.
662                          */
663                         WARN_ON_ONCE(1);
664                         dm_device.num_pages_onlined = 0;
665                 }
666                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
667                 break;
668         case MEM_GOING_ONLINE:
669         case MEM_GOING_OFFLINE:
670         case MEM_CANCEL_OFFLINE:
671                 break;
672         }
673         return NOTIFY_OK;
674 }
675
676 static struct notifier_block hv_memory_nb = {
677         .notifier_call = hv_memory_notifier,
678         .priority = 0
679 };
680
681 /* Check if the particular page is backed and can be onlined and online it. */
682 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
683 {
684         if (!has_pfn_is_backed(has, page_to_pfn(pg)))
685                 return;
686
687         /* This frame is currently backed; online the page. */
688         __online_page_set_limits(pg);
689         __online_page_increment_counters(pg);
690         __online_page_free(pg);
691
692         WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
693         dm_device.num_pages_onlined++;
694 }
695
696 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
697                                 unsigned long start_pfn, unsigned long size)
698 {
699         int i;
700
701         pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
702         for (i = 0; i < size; i++)
703                 hv_page_online_one(has, pfn_to_page(start_pfn + i));
704 }
705
706 static void hv_mem_hot_add(unsigned long start, unsigned long size,
707                                 unsigned long pfn_count,
708                                 struct hv_hotadd_state *has)
709 {
710         int ret = 0;
711         int i, nid;
712         unsigned long start_pfn;
713         unsigned long processed_pfn;
714         unsigned long total_pfn = pfn_count;
715         unsigned long flags;
716
717         for (i = 0; i < (size/HA_CHUNK); i++) {
718                 start_pfn = start + (i * HA_CHUNK);
719
720                 spin_lock_irqsave(&dm_device.ha_lock, flags);
721                 has->ha_end_pfn +=  HA_CHUNK;
722
723                 if (total_pfn > HA_CHUNK) {
724                         processed_pfn = HA_CHUNK;
725                         total_pfn -= HA_CHUNK;
726                 } else {
727                         processed_pfn = total_pfn;
728                         total_pfn = 0;
729                 }
730
731                 has->covered_end_pfn +=  processed_pfn;
732                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
733
734                 init_completion(&dm_device.ol_waitevent);
735                 dm_device.ha_waiting = !memhp_auto_online;
736
737                 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
738                 ret = add_memory(nid, PFN_PHYS((start_pfn)),
739                                 (HA_CHUNK << PAGE_SHIFT));
740
741                 if (ret) {
742                         pr_err("hot_add memory failed error is %d\n", ret);
743                         if (ret == -EEXIST) {
744                                 /*
745                                  * This error indicates that the error
746                                  * is not a transient failure. This is the
747                                  * case where the guest's physical address map
748                                  * precludes hot adding memory. Stop all further
749                                  * memory hot-add.
750                                  */
751                                 do_hot_add = false;
752                         }
753                         spin_lock_irqsave(&dm_device.ha_lock, flags);
754                         has->ha_end_pfn -= HA_CHUNK;
755                         has->covered_end_pfn -=  processed_pfn;
756                         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
757                         break;
758                 }
759
760                 /*
761                  * Wait for the memory block to be onlined when memory onlining
762                  * is done outside of kernel (memhp_auto_online). Since the hot
763                  * add has succeeded, it is ok to proceed even if the pages in
764                  * the hot added region have not been "onlined" within the
765                  * allowed time.
766                  */
767                 if (dm_device.ha_waiting)
768                         wait_for_completion_timeout(&dm_device.ol_waitevent,
769                                                     5*HZ);
770                 post_status(&dm_device);
771         }
772 }
773
774 static void hv_online_page(struct page *pg)
775 {
776         struct hv_hotadd_state *has;
777         unsigned long flags;
778         unsigned long pfn = page_to_pfn(pg);
779
780         spin_lock_irqsave(&dm_device.ha_lock, flags);
781         list_for_each_entry(has, &dm_device.ha_region_list, list) {
782                 /* The page belongs to a different HAS. */
783                 if ((pfn < has->start_pfn) || (pfn >= has->end_pfn))
784                         continue;
785
786                 hv_page_online_one(has, pg);
787                 break;
788         }
789         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
790 }
791
792 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
793 {
794         struct hv_hotadd_state *has;
795         struct hv_hotadd_gap *gap;
796         unsigned long residual, new_inc;
797         int ret = 0;
798         unsigned long flags;
799
800         spin_lock_irqsave(&dm_device.ha_lock, flags);
801         list_for_each_entry(has, &dm_device.ha_region_list, list) {
802                 /*
803                  * If the pfn range we are dealing with is not in the current
804                  * "hot add block", move on.
805                  */
806                 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
807                         continue;
808
809                 /*
810                  * If the current start pfn is not where the covered_end
811                  * is, create a gap and update covered_end_pfn.
812                  */
813                 if (has->covered_end_pfn != start_pfn) {
814                         gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
815                         if (!gap) {
816                                 ret = -ENOMEM;
817                                 break;
818                         }
819
820                         INIT_LIST_HEAD(&gap->list);
821                         gap->start_pfn = has->covered_end_pfn;
822                         gap->end_pfn = start_pfn;
823                         list_add_tail(&gap->list, &has->gap_list);
824
825                         has->covered_end_pfn = start_pfn;
826                 }
827
828                 /*
829                  * If the current hot add-request extends beyond
830                  * our current limit; extend it.
831                  */
832                 if ((start_pfn + pfn_cnt) > has->end_pfn) {
833                         residual = (start_pfn + pfn_cnt - has->end_pfn);
834                         /*
835                          * Extend the region by multiples of HA_CHUNK.
836                          */
837                         new_inc = (residual / HA_CHUNK) * HA_CHUNK;
838                         if (residual % HA_CHUNK)
839                                 new_inc += HA_CHUNK;
840
841                         has->end_pfn += new_inc;
842                 }
843
844                 ret = 1;
845                 break;
846         }
847         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
848
849         return ret;
850 }
851
852 static unsigned long handle_pg_range(unsigned long pg_start,
853                                         unsigned long pg_count)
854 {
855         unsigned long start_pfn = pg_start;
856         unsigned long pfn_cnt = pg_count;
857         unsigned long size;
858         struct hv_hotadd_state *has;
859         unsigned long pgs_ol = 0;
860         unsigned long old_covered_state;
861         unsigned long res = 0, flags;
862
863         pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
864                 pg_start);
865
866         spin_lock_irqsave(&dm_device.ha_lock, flags);
867         list_for_each_entry(has, &dm_device.ha_region_list, list) {
868                 /*
869                  * If the pfn range we are dealing with is not in the current
870                  * "hot add block", move on.
871                  */
872                 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
873                         continue;
874
875                 old_covered_state = has->covered_end_pfn;
876
877                 if (start_pfn < has->ha_end_pfn) {
878                         /*
879                          * This is the case where we are backing pages
880                          * in an already hot added region. Bring
881                          * these pages online first.
882                          */
883                         pgs_ol = has->ha_end_pfn - start_pfn;
884                         if (pgs_ol > pfn_cnt)
885                                 pgs_ol = pfn_cnt;
886
887                         has->covered_end_pfn +=  pgs_ol;
888                         pfn_cnt -= pgs_ol;
889                         /*
890                          * Check if the corresponding memory block is already
891                          * online. It is possible to observe struct pages still
892                          * being uninitialized here so check section instead.
893                          * In case the section is online we need to bring the
894                          * rest of pfns (which were not backed previously)
895                          * online too.
896                          */
897                         if (start_pfn > has->start_pfn &&
898                             online_section_nr(pfn_to_section_nr(start_pfn)))
899                                 hv_bring_pgs_online(has, start_pfn, pgs_ol);
900
901                 }
902
903                 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
904                         /*
905                          * We have some residual hot add range
906                          * that needs to be hot added; hot add
907                          * it now. Hot add a multiple of
908                          * of HA_CHUNK that fully covers the pages
909                          * we have.
910                          */
911                         size = (has->end_pfn - has->ha_end_pfn);
912                         if (pfn_cnt <= size) {
913                                 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
914                                 if (pfn_cnt % HA_CHUNK)
915                                         size += HA_CHUNK;
916                         } else {
917                                 pfn_cnt = size;
918                         }
919                         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
920                         hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
921                         spin_lock_irqsave(&dm_device.ha_lock, flags);
922                 }
923                 /*
924                  * If we managed to online any pages that were given to us,
925                  * we declare success.
926                  */
927                 res = has->covered_end_pfn - old_covered_state;
928                 break;
929         }
930         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
931
932         return res;
933 }
934
935 static unsigned long process_hot_add(unsigned long pg_start,
936                                         unsigned long pfn_cnt,
937                                         unsigned long rg_start,
938                                         unsigned long rg_size)
939 {
940         struct hv_hotadd_state *ha_region = NULL;
941         int covered;
942         unsigned long flags;
943
944         if (pfn_cnt == 0)
945                 return 0;
946
947         if (!dm_device.host_specified_ha_region) {
948                 covered = pfn_covered(pg_start, pfn_cnt);
949                 if (covered < 0)
950                         return 0;
951
952                 if (covered)
953                         goto do_pg_range;
954         }
955
956         /*
957          * If the host has specified a hot-add range; deal with it first.
958          */
959
960         if (rg_size != 0) {
961                 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
962                 if (!ha_region)
963                         return 0;
964
965                 INIT_LIST_HEAD(&ha_region->list);
966                 INIT_LIST_HEAD(&ha_region->gap_list);
967
968                 ha_region->start_pfn = rg_start;
969                 ha_region->ha_end_pfn = rg_start;
970                 ha_region->covered_start_pfn = pg_start;
971                 ha_region->covered_end_pfn = pg_start;
972                 ha_region->end_pfn = rg_start + rg_size;
973
974                 spin_lock_irqsave(&dm_device.ha_lock, flags);
975                 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
976                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
977         }
978
979 do_pg_range:
980         /*
981          * Process the page range specified; bringing them
982          * online if possible.
983          */
984         return handle_pg_range(pg_start, pfn_cnt);
985 }
986
987 #endif
988
989 static void hot_add_req(struct work_struct *dummy)
990 {
991         struct dm_hot_add_response resp;
992 #ifdef CONFIG_MEMORY_HOTPLUG
993         unsigned long pg_start, pfn_cnt;
994         unsigned long rg_start, rg_sz;
995 #endif
996         struct hv_dynmem_device *dm = &dm_device;
997
998         memset(&resp, 0, sizeof(struct dm_hot_add_response));
999         resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
1000         resp.hdr.size = sizeof(struct dm_hot_add_response);
1001
1002 #ifdef CONFIG_MEMORY_HOTPLUG
1003         pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
1004         pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
1005
1006         rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
1007         rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
1008
1009         if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
1010                 unsigned long region_size;
1011                 unsigned long region_start;
1012
1013                 /*
1014                  * The host has not specified the hot-add region.
1015                  * Based on the hot-add page range being specified,
1016                  * compute a hot-add region that can cover the pages
1017                  * that need to be hot-added while ensuring the alignment
1018                  * and size requirements of Linux as it relates to hot-add.
1019                  */
1020                 region_start = pg_start;
1021                 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
1022                 if (pfn_cnt % HA_CHUNK)
1023                         region_size += HA_CHUNK;
1024
1025                 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
1026
1027                 rg_start = region_start;
1028                 rg_sz = region_size;
1029         }
1030
1031         if (do_hot_add)
1032                 resp.page_count = process_hot_add(pg_start, pfn_cnt,
1033                                                 rg_start, rg_sz);
1034
1035         dm->num_pages_added += resp.page_count;
1036 #endif
1037         /*
1038          * The result field of the response structure has the
1039          * following semantics:
1040          *
1041          * 1. If all or some pages hot-added: Guest should return success.
1042          *
1043          * 2. If no pages could be hot-added:
1044          *
1045          * If the guest returns success, then the host
1046          * will not attempt any further hot-add operations. This
1047          * signifies a permanent failure.
1048          *
1049          * If the guest returns failure, then this failure will be
1050          * treated as a transient failure and the host may retry the
1051          * hot-add operation after some delay.
1052          */
1053         if (resp.page_count > 0)
1054                 resp.result = 1;
1055         else if (!do_hot_add)
1056                 resp.result = 1;
1057         else
1058                 resp.result = 0;
1059
1060         if (!do_hot_add || (resp.page_count == 0))
1061                 pr_err("Memory hot add failed\n");
1062
1063         dm->state = DM_INITIALIZED;
1064         resp.hdr.trans_id = atomic_inc_return(&trans_id);
1065         vmbus_sendpacket(dm->dev->channel, &resp,
1066                         sizeof(struct dm_hot_add_response),
1067                         (unsigned long)NULL,
1068                         VM_PKT_DATA_INBAND, 0);
1069 }
1070
1071 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1072 {
1073         struct dm_info_header *info_hdr;
1074
1075         info_hdr = (struct dm_info_header *)msg->info;
1076
1077         switch (info_hdr->type) {
1078         case INFO_TYPE_MAX_PAGE_CNT:
1079                 if (info_hdr->data_size == sizeof(__u64)) {
1080                         __u64 *max_page_count = (__u64 *)&info_hdr[1];
1081
1082                         pr_info("Max. dynamic memory size: %llu MB\n",
1083                                 (*max_page_count) >> (20 - PAGE_SHIFT));
1084                 }
1085
1086                 break;
1087         default:
1088                 pr_warn("Received Unknown type: %d\n", info_hdr->type);
1089         }
1090 }
1091
1092 static unsigned long compute_balloon_floor(void)
1093 {
1094         unsigned long min_pages;
1095 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1096         /* Simple continuous piecewiese linear function:
1097          *  max MiB -> min MiB  gradient
1098          *       0         0
1099          *      16        16
1100          *      32        24
1101          *     128        72    (1/2)
1102          *     512       168    (1/4)
1103          *    2048       360    (1/8)
1104          *    8192       744    (1/16)
1105          *   32768      1512    (1/32)
1106          */
1107         if (totalram_pages < MB2PAGES(128))
1108                 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1109         else if (totalram_pages < MB2PAGES(512))
1110                 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1111         else if (totalram_pages < MB2PAGES(2048))
1112                 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1113         else if (totalram_pages < MB2PAGES(8192))
1114                 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1115         else
1116                 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1117 #undef MB2PAGES
1118         return min_pages;
1119 }
1120
1121 /*
1122  * Post our status as it relates memory pressure to the
1123  * host. Host expects the guests to post this status
1124  * periodically at 1 second intervals.
1125  *
1126  * The metrics specified in this protocol are very Windows
1127  * specific and so we cook up numbers here to convey our memory
1128  * pressure.
1129  */
1130
1131 static void post_status(struct hv_dynmem_device *dm)
1132 {
1133         struct dm_status status;
1134         unsigned long now = jiffies;
1135         unsigned long last_post = last_post_time;
1136
1137         if (pressure_report_delay > 0) {
1138                 --pressure_report_delay;
1139                 return;
1140         }
1141
1142         if (!time_after(now, (last_post_time + HZ)))
1143                 return;
1144
1145         memset(&status, 0, sizeof(struct dm_status));
1146         status.hdr.type = DM_STATUS_REPORT;
1147         status.hdr.size = sizeof(struct dm_status);
1148         status.hdr.trans_id = atomic_inc_return(&trans_id);
1149
1150         /*
1151          * The host expects the guest to report free and committed memory.
1152          * Furthermore, the host expects the pressure information to include
1153          * the ballooned out pages. For a given amount of memory that we are
1154          * managing we need to compute a floor below which we should not
1155          * balloon. Compute this and add it to the pressure report.
1156          * We also need to report all offline pages (num_pages_added -
1157          * num_pages_onlined) as committed to the host, otherwise it can try
1158          * asking us to balloon them out.
1159          */
1160         status.num_avail = si_mem_available();
1161         status.num_committed = vm_memory_committed() +
1162                 dm->num_pages_ballooned +
1163                 (dm->num_pages_added > dm->num_pages_onlined ?
1164                  dm->num_pages_added - dm->num_pages_onlined : 0) +
1165                 compute_balloon_floor();
1166
1167         trace_balloon_status(status.num_avail, status.num_committed,
1168                              vm_memory_committed(), dm->num_pages_ballooned,
1169                              dm->num_pages_added, dm->num_pages_onlined);
1170         /*
1171          * If our transaction ID is no longer current, just don't
1172          * send the status. This can happen if we were interrupted
1173          * after we picked our transaction ID.
1174          */
1175         if (status.hdr.trans_id != atomic_read(&trans_id))
1176                 return;
1177
1178         /*
1179          * If the last post time that we sampled has changed,
1180          * we have raced, don't post the status.
1181          */
1182         if (last_post != last_post_time)
1183                 return;
1184
1185         last_post_time = jiffies;
1186         vmbus_sendpacket(dm->dev->channel, &status,
1187                                 sizeof(struct dm_status),
1188                                 (unsigned long)NULL,
1189                                 VM_PKT_DATA_INBAND, 0);
1190
1191 }
1192
1193 static void free_balloon_pages(struct hv_dynmem_device *dm,
1194                          union dm_mem_page_range *range_array)
1195 {
1196         int num_pages = range_array->finfo.page_cnt;
1197         __u64 start_frame = range_array->finfo.start_page;
1198         struct page *pg;
1199         int i;
1200
1201         for (i = 0; i < num_pages; i++) {
1202                 pg = pfn_to_page(i + start_frame);
1203                 __free_page(pg);
1204                 dm->num_pages_ballooned--;
1205         }
1206 }
1207
1208
1209
1210 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1211                                         unsigned int num_pages,
1212                                         struct dm_balloon_response *bl_resp,
1213                                         int alloc_unit)
1214 {
1215         unsigned int i = 0;
1216         struct page *pg;
1217
1218         for (i = 0; i < num_pages / alloc_unit; i++) {
1219                 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1220                         PAGE_SIZE)
1221                         return i * alloc_unit;
1222
1223                 /*
1224                  * We execute this code in a thread context. Furthermore,
1225                  * we don't want the kernel to try too hard.
1226                  */
1227                 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1228                                 __GFP_NOMEMALLOC | __GFP_NOWARN,
1229                                 get_order(alloc_unit << PAGE_SHIFT));
1230
1231                 if (!pg)
1232                         return i * alloc_unit;
1233
1234                 dm->num_pages_ballooned += alloc_unit;
1235
1236                 /*
1237                  * If we allocatted 2M pages; split them so we
1238                  * can free them in any order we get.
1239                  */
1240
1241                 if (alloc_unit != 1)
1242                         split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1243
1244                 bl_resp->range_count++;
1245                 bl_resp->range_array[i].finfo.start_page =
1246                         page_to_pfn(pg);
1247                 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1248                 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1249
1250         }
1251
1252         return i * alloc_unit;
1253 }
1254
1255 static void balloon_up(struct work_struct *dummy)
1256 {
1257         unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1258         unsigned int num_ballooned = 0;
1259         struct dm_balloon_response *bl_resp;
1260         int alloc_unit;
1261         int ret;
1262         bool done = false;
1263         int i;
1264         long avail_pages;
1265         unsigned long floor;
1266
1267         /*
1268          * We will attempt 2M allocations. However, if we fail to
1269          * allocate 2M chunks, we will go back to 4k allocations.
1270          */
1271         alloc_unit = 512;
1272
1273         avail_pages = si_mem_available();
1274         floor = compute_balloon_floor();
1275
1276         /* Refuse to balloon below the floor. */
1277         if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1278                 pr_info("Balloon request will be partially fulfilled. %s\n",
1279                         avail_pages < num_pages ? "Not enough memory." :
1280                         "Balloon floor reached.");
1281
1282                 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1283         }
1284
1285         while (!done) {
1286                 bl_resp = (struct dm_balloon_response *)send_buffer;
1287                 memset(send_buffer, 0, PAGE_SIZE);
1288                 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1289                 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1290                 bl_resp->more_pages = 1;
1291
1292                 num_pages -= num_ballooned;
1293                 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1294                                                     bl_resp, alloc_unit);
1295
1296                 if (alloc_unit != 1 && num_ballooned == 0) {
1297                         alloc_unit = 1;
1298                         continue;
1299                 }
1300
1301                 if (num_ballooned == 0 || num_ballooned == num_pages) {
1302                         pr_debug("Ballooned %u out of %u requested pages.\n",
1303                                 num_pages, dm_device.balloon_wrk.num_pages);
1304
1305                         bl_resp->more_pages = 0;
1306                         done = true;
1307                         dm_device.state = DM_INITIALIZED;
1308                 }
1309
1310                 /*
1311                  * We are pushing a lot of data through the channel;
1312                  * deal with transient failures caused because of the
1313                  * lack of space in the ring buffer.
1314                  */
1315
1316                 do {
1317                         bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1318                         ret = vmbus_sendpacket(dm_device.dev->channel,
1319                                                 bl_resp,
1320                                                 bl_resp->hdr.size,
1321                                                 (unsigned long)NULL,
1322                                                 VM_PKT_DATA_INBAND, 0);
1323
1324                         if (ret == -EAGAIN)
1325                                 msleep(20);
1326                         post_status(&dm_device);
1327                 } while (ret == -EAGAIN);
1328
1329                 if (ret) {
1330                         /*
1331                          * Free up the memory we allocatted.
1332                          */
1333                         pr_err("Balloon response failed\n");
1334
1335                         for (i = 0; i < bl_resp->range_count; i++)
1336                                 free_balloon_pages(&dm_device,
1337                                                  &bl_resp->range_array[i]);
1338
1339                         done = true;
1340                 }
1341         }
1342
1343 }
1344
1345 static void balloon_down(struct hv_dynmem_device *dm,
1346                         struct dm_unballoon_request *req)
1347 {
1348         union dm_mem_page_range *range_array = req->range_array;
1349         int range_count = req->range_count;
1350         struct dm_unballoon_response resp;
1351         int i;
1352         unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1353
1354         for (i = 0; i < range_count; i++) {
1355                 free_balloon_pages(dm, &range_array[i]);
1356                 complete(&dm_device.config_event);
1357         }
1358
1359         pr_debug("Freed %u ballooned pages.\n",
1360                 prev_pages_ballooned - dm->num_pages_ballooned);
1361
1362         if (req->more_pages == 1)
1363                 return;
1364
1365         memset(&resp, 0, sizeof(struct dm_unballoon_response));
1366         resp.hdr.type = DM_UNBALLOON_RESPONSE;
1367         resp.hdr.trans_id = atomic_inc_return(&trans_id);
1368         resp.hdr.size = sizeof(struct dm_unballoon_response);
1369
1370         vmbus_sendpacket(dm_device.dev->channel, &resp,
1371                                 sizeof(struct dm_unballoon_response),
1372                                 (unsigned long)NULL,
1373                                 VM_PKT_DATA_INBAND, 0);
1374
1375         dm->state = DM_INITIALIZED;
1376 }
1377
1378 static void balloon_onchannelcallback(void *context);
1379
1380 static int dm_thread_func(void *dm_dev)
1381 {
1382         struct hv_dynmem_device *dm = dm_dev;
1383
1384         while (!kthread_should_stop()) {
1385                 wait_for_completion_interruptible_timeout(
1386                                                 &dm_device.config_event, 1*HZ);
1387                 /*
1388                  * The host expects us to post information on the memory
1389                  * pressure every second.
1390                  */
1391                 reinit_completion(&dm_device.config_event);
1392                 post_status(dm);
1393         }
1394
1395         return 0;
1396 }
1397
1398
1399 static void version_resp(struct hv_dynmem_device *dm,
1400                         struct dm_version_response *vresp)
1401 {
1402         struct dm_version_request version_req;
1403         int ret;
1404
1405         if (vresp->is_accepted) {
1406                 /*
1407                  * We are done; wakeup the
1408                  * context waiting for version
1409                  * negotiation.
1410                  */
1411                 complete(&dm->host_event);
1412                 return;
1413         }
1414         /*
1415          * If there are more versions to try, continue
1416          * with negotiations; if not
1417          * shutdown the service since we are not able
1418          * to negotiate a suitable version number
1419          * with the host.
1420          */
1421         if (dm->next_version == 0)
1422                 goto version_error;
1423
1424         memset(&version_req, 0, sizeof(struct dm_version_request));
1425         version_req.hdr.type = DM_VERSION_REQUEST;
1426         version_req.hdr.size = sizeof(struct dm_version_request);
1427         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1428         version_req.version.version = dm->next_version;
1429         dm->version = version_req.version.version;
1430
1431         /*
1432          * Set the next version to try in case current version fails.
1433          * Win7 protocol ought to be the last one to try.
1434          */
1435         switch (version_req.version.version) {
1436         case DYNMEM_PROTOCOL_VERSION_WIN8:
1437                 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1438                 version_req.is_last_attempt = 0;
1439                 break;
1440         default:
1441                 dm->next_version = 0;
1442                 version_req.is_last_attempt = 1;
1443         }
1444
1445         ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1446                                 sizeof(struct dm_version_request),
1447                                 (unsigned long)NULL,
1448                                 VM_PKT_DATA_INBAND, 0);
1449
1450         if (ret)
1451                 goto version_error;
1452
1453         return;
1454
1455 version_error:
1456         dm->state = DM_INIT_ERROR;
1457         complete(&dm->host_event);
1458 }
1459
1460 static void cap_resp(struct hv_dynmem_device *dm,
1461                         struct dm_capabilities_resp_msg *cap_resp)
1462 {
1463         if (!cap_resp->is_accepted) {
1464                 pr_err("Capabilities not accepted by host\n");
1465                 dm->state = DM_INIT_ERROR;
1466         }
1467         complete(&dm->host_event);
1468 }
1469
1470 static void balloon_onchannelcallback(void *context)
1471 {
1472         struct hv_device *dev = context;
1473         u32 recvlen;
1474         u64 requestid;
1475         struct dm_message *dm_msg;
1476         struct dm_header *dm_hdr;
1477         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1478         struct dm_balloon *bal_msg;
1479         struct dm_hot_add *ha_msg;
1480         union dm_mem_page_range *ha_pg_range;
1481         union dm_mem_page_range *ha_region;
1482
1483         memset(recv_buffer, 0, sizeof(recv_buffer));
1484         vmbus_recvpacket(dev->channel, recv_buffer,
1485                          PAGE_SIZE, &recvlen, &requestid);
1486
1487         if (recvlen > 0) {
1488                 dm_msg = (struct dm_message *)recv_buffer;
1489                 dm_hdr = &dm_msg->hdr;
1490
1491                 switch (dm_hdr->type) {
1492                 case DM_VERSION_RESPONSE:
1493                         version_resp(dm,
1494                                  (struct dm_version_response *)dm_msg);
1495                         break;
1496
1497                 case DM_CAPABILITIES_RESPONSE:
1498                         cap_resp(dm,
1499                                  (struct dm_capabilities_resp_msg *)dm_msg);
1500                         break;
1501
1502                 case DM_BALLOON_REQUEST:
1503                         if (dm->state == DM_BALLOON_UP)
1504                                 pr_warn("Currently ballooning\n");
1505                         bal_msg = (struct dm_balloon *)recv_buffer;
1506                         dm->state = DM_BALLOON_UP;
1507                         dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1508                         schedule_work(&dm_device.balloon_wrk.wrk);
1509                         break;
1510
1511                 case DM_UNBALLOON_REQUEST:
1512                         dm->state = DM_BALLOON_DOWN;
1513                         balloon_down(dm,
1514                                  (struct dm_unballoon_request *)recv_buffer);
1515                         break;
1516
1517                 case DM_MEM_HOT_ADD_REQUEST:
1518                         if (dm->state == DM_HOT_ADD)
1519                                 pr_warn("Currently hot-adding\n");
1520                         dm->state = DM_HOT_ADD;
1521                         ha_msg = (struct dm_hot_add *)recv_buffer;
1522                         if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1523                                 /*
1524                                  * This is a normal hot-add request specifying
1525                                  * hot-add memory.
1526                                  */
1527                                 dm->host_specified_ha_region = false;
1528                                 ha_pg_range = &ha_msg->range;
1529                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1530                                 dm->ha_wrk.ha_region_range.page_range = 0;
1531                         } else {
1532                                 /*
1533                                  * Host is specifying that we first hot-add
1534                                  * a region and then partially populate this
1535                                  * region.
1536                                  */
1537                                 dm->host_specified_ha_region = true;
1538                                 ha_pg_range = &ha_msg->range;
1539                                 ha_region = &ha_pg_range[1];
1540                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1541                                 dm->ha_wrk.ha_region_range = *ha_region;
1542                         }
1543                         schedule_work(&dm_device.ha_wrk.wrk);
1544                         break;
1545
1546                 case DM_INFO_MESSAGE:
1547                         process_info(dm, (struct dm_info_msg *)dm_msg);
1548                         break;
1549
1550                 default:
1551                         pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type);
1552
1553                 }
1554         }
1555
1556 }
1557
1558 static int balloon_probe(struct hv_device *dev,
1559                         const struct hv_vmbus_device_id *dev_id)
1560 {
1561         int ret;
1562         unsigned long t;
1563         struct dm_version_request version_req;
1564         struct dm_capabilities cap_msg;
1565
1566 #ifdef CONFIG_MEMORY_HOTPLUG
1567         do_hot_add = hot_add;
1568 #else
1569         do_hot_add = false;
1570 #endif
1571
1572         /*
1573          * First allocate a send buffer.
1574          */
1575
1576         send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1577         if (!send_buffer)
1578                 return -ENOMEM;
1579
1580         ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1581                         balloon_onchannelcallback, dev);
1582
1583         if (ret)
1584                 goto probe_error0;
1585
1586         dm_device.dev = dev;
1587         dm_device.state = DM_INITIALIZING;
1588         dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1589         init_completion(&dm_device.host_event);
1590         init_completion(&dm_device.config_event);
1591         INIT_LIST_HEAD(&dm_device.ha_region_list);
1592         spin_lock_init(&dm_device.ha_lock);
1593         INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1594         INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1595         dm_device.host_specified_ha_region = false;
1596
1597         dm_device.thread =
1598                  kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1599         if (IS_ERR(dm_device.thread)) {
1600                 ret = PTR_ERR(dm_device.thread);
1601                 goto probe_error1;
1602         }
1603
1604 #ifdef CONFIG_MEMORY_HOTPLUG
1605         set_online_page_callback(&hv_online_page);
1606         register_memory_notifier(&hv_memory_nb);
1607 #endif
1608
1609         hv_set_drvdata(dev, &dm_device);
1610         /*
1611          * Initiate the hand shake with the host and negotiate
1612          * a version that the host can support. We start with the
1613          * highest version number and go down if the host cannot
1614          * support it.
1615          */
1616         memset(&version_req, 0, sizeof(struct dm_version_request));
1617         version_req.hdr.type = DM_VERSION_REQUEST;
1618         version_req.hdr.size = sizeof(struct dm_version_request);
1619         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1620         version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1621         version_req.is_last_attempt = 0;
1622         dm_device.version = version_req.version.version;
1623
1624         ret = vmbus_sendpacket(dev->channel, &version_req,
1625                                 sizeof(struct dm_version_request),
1626                                 (unsigned long)NULL,
1627                                 VM_PKT_DATA_INBAND, 0);
1628         if (ret)
1629                 goto probe_error2;
1630
1631         t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1632         if (t == 0) {
1633                 ret = -ETIMEDOUT;
1634                 goto probe_error2;
1635         }
1636
1637         /*
1638          * If we could not negotiate a compatible version with the host
1639          * fail the probe function.
1640          */
1641         if (dm_device.state == DM_INIT_ERROR) {
1642                 ret = -ETIMEDOUT;
1643                 goto probe_error2;
1644         }
1645
1646         pr_info("Using Dynamic Memory protocol version %u.%u\n",
1647                 DYNMEM_MAJOR_VERSION(dm_device.version),
1648                 DYNMEM_MINOR_VERSION(dm_device.version));
1649
1650         /*
1651          * Now submit our capabilities to the host.
1652          */
1653         memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1654         cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1655         cap_msg.hdr.size = sizeof(struct dm_capabilities);
1656         cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1657
1658         cap_msg.caps.cap_bits.balloon = 1;
1659         cap_msg.caps.cap_bits.hot_add = 1;
1660
1661         /*
1662          * Specify our alignment requirements as it relates
1663          * memory hot-add. Specify 128MB alignment.
1664          */
1665         cap_msg.caps.cap_bits.hot_add_alignment = 7;
1666
1667         /*
1668          * Currently the host does not use these
1669          * values and we set them to what is done in the
1670          * Windows driver.
1671          */
1672         cap_msg.min_page_cnt = 0;
1673         cap_msg.max_page_number = -1;
1674
1675         ret = vmbus_sendpacket(dev->channel, &cap_msg,
1676                                 sizeof(struct dm_capabilities),
1677                                 (unsigned long)NULL,
1678                                 VM_PKT_DATA_INBAND, 0);
1679         if (ret)
1680                 goto probe_error2;
1681
1682         t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1683         if (t == 0) {
1684                 ret = -ETIMEDOUT;
1685                 goto probe_error2;
1686         }
1687
1688         /*
1689          * If the host does not like our capabilities,
1690          * fail the probe function.
1691          */
1692         if (dm_device.state == DM_INIT_ERROR) {
1693                 ret = -ETIMEDOUT;
1694                 goto probe_error2;
1695         }
1696
1697         dm_device.state = DM_INITIALIZED;
1698         last_post_time = jiffies;
1699
1700         return 0;
1701
1702 probe_error2:
1703 #ifdef CONFIG_MEMORY_HOTPLUG
1704         restore_online_page_callback(&hv_online_page);
1705 #endif
1706         kthread_stop(dm_device.thread);
1707
1708 probe_error1:
1709         vmbus_close(dev->channel);
1710 probe_error0:
1711         kfree(send_buffer);
1712         return ret;
1713 }
1714
1715 static int balloon_remove(struct hv_device *dev)
1716 {
1717         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1718         struct hv_hotadd_state *has, *tmp;
1719         struct hv_hotadd_gap *gap, *tmp_gap;
1720         unsigned long flags;
1721
1722         if (dm->num_pages_ballooned != 0)
1723                 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1724
1725         cancel_work_sync(&dm->balloon_wrk.wrk);
1726         cancel_work_sync(&dm->ha_wrk.wrk);
1727
1728         vmbus_close(dev->channel);
1729         kthread_stop(dm->thread);
1730         kfree(send_buffer);
1731 #ifdef CONFIG_MEMORY_HOTPLUG
1732         restore_online_page_callback(&hv_online_page);
1733         unregister_memory_notifier(&hv_memory_nb);
1734 #endif
1735         spin_lock_irqsave(&dm_device.ha_lock, flags);
1736         list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1737                 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1738                         list_del(&gap->list);
1739                         kfree(gap);
1740                 }
1741                 list_del(&has->list);
1742                 kfree(has);
1743         }
1744         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1745
1746         return 0;
1747 }
1748
1749 static const struct hv_vmbus_device_id id_table[] = {
1750         /* Dynamic Memory Class ID */
1751         /* 525074DC-8985-46e2-8057-A307DC18A502 */
1752         { HV_DM_GUID, },
1753         { },
1754 };
1755
1756 MODULE_DEVICE_TABLE(vmbus, id_table);
1757
1758 static  struct hv_driver balloon_drv = {
1759         .name = "hv_balloon",
1760         .id_table = id_table,
1761         .probe =  balloon_probe,
1762         .remove =  balloon_remove,
1763         .driver = {
1764                 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1765         },
1766 };
1767
1768 static int __init init_balloon_drv(void)
1769 {
1770
1771         return vmbus_driver_register(&balloon_drv);
1772 }
1773
1774 module_init(init_balloon_drv);
1775
1776 MODULE_DESCRIPTION("Hyper-V Balloon");
1777 MODULE_LICENSE("GPL");