GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / virt / vboxguest / vboxguest_core.c
1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2 /*
3  * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
4  *
5  * Copyright (C) 2007-2016 Oracle Corporation
6  */
7
8 #include <linux/device.h>
9 #include <linux/mm.h>
10 #include <linux/sched.h>
11 #include <linux/sizes.h>
12 #include <linux/slab.h>
13 #include <linux/vbox_err.h>
14 #include <linux/vbox_utils.h>
15 #include <linux/vmalloc.h>
16 #include "vboxguest_core.h"
17 #include "vboxguest_version.h"
18
19 /* Get the pointer to the first HGCM parameter. */
20 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
21         ((struct vmmdev_hgcm_function_parameter *)( \
22                 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
23 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
24 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
25         ((struct vmmdev_hgcm_function_parameter32 *)( \
26                 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
27
28 #define GUEST_MAPPINGS_TRIES    5
29
30 /**
31  * Reserves memory in which the VMM can relocate any guest mappings
32  * that are floating around.
33  *
34  * This operation is a little bit tricky since the VMM might not accept
35  * just any address because of address clashes between the three contexts
36  * it operates in, so we try several times.
37  *
38  * Failure to reserve the guest mappings is ignored.
39  *
40  * @gdev:               The Guest extension device.
41  */
42 static void vbg_guest_mappings_init(struct vbg_dev *gdev)
43 {
44         struct vmmdev_hypervisorinfo *req;
45         void *guest_mappings[GUEST_MAPPINGS_TRIES];
46         struct page **pages = NULL;
47         u32 size, hypervisor_size;
48         int i, rc;
49
50         /* Query the required space. */
51         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
52         if (!req)
53                 return;
54
55         req->hypervisor_start = 0;
56         req->hypervisor_size = 0;
57         rc = vbg_req_perform(gdev, req);
58         if (rc < 0)
59                 goto out;
60
61         /*
62          * The VMM will report back if there is nothing it wants to map, like
63          * for instance in VT-x and AMD-V mode.
64          */
65         if (req->hypervisor_size == 0)
66                 goto out;
67
68         hypervisor_size = req->hypervisor_size;
69         /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
70         size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
71
72         pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL);
73         if (!pages)
74                 goto out;
75
76         gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
77         if (!gdev->guest_mappings_dummy_page)
78                 goto out;
79
80         for (i = 0; i < (size >> PAGE_SHIFT); i++)
81                 pages[i] = gdev->guest_mappings_dummy_page;
82
83         /*
84          * Try several times, the VMM might not accept some addresses because
85          * of address clashes between the three contexts.
86          */
87         for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
88                 guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
89                                          VM_MAP, PAGE_KERNEL_RO);
90                 if (!guest_mappings[i])
91                         break;
92
93                 req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
94                 req->header.rc = VERR_INTERNAL_ERROR;
95                 req->hypervisor_size = hypervisor_size;
96                 req->hypervisor_start =
97                         (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
98
99                 rc = vbg_req_perform(gdev, req);
100                 if (rc >= 0) {
101                         gdev->guest_mappings = guest_mappings[i];
102                         break;
103                 }
104         }
105
106         /* Free vmap's from failed attempts. */
107         while (--i >= 0)
108                 vunmap(guest_mappings[i]);
109
110         /* On failure free the dummy-page backing the vmap */
111         if (!gdev->guest_mappings) {
112                 __free_page(gdev->guest_mappings_dummy_page);
113                 gdev->guest_mappings_dummy_page = NULL;
114         }
115
116 out:
117         vbg_req_free(req, sizeof(*req));
118         kfree(pages);
119 }
120
121 /**
122  * Undo what vbg_guest_mappings_init did.
123  *
124  * @gdev:               The Guest extension device.
125  */
126 static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
127 {
128         struct vmmdev_hypervisorinfo *req;
129         int rc;
130
131         if (!gdev->guest_mappings)
132                 return;
133
134         /*
135          * Tell the host that we're going to free the memory we reserved for
136          * it, the free it up. (Leak the memory if anything goes wrong here.)
137          */
138         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
139         if (!req)
140                 return;
141
142         req->hypervisor_start = 0;
143         req->hypervisor_size = 0;
144
145         rc = vbg_req_perform(gdev, req);
146
147         vbg_req_free(req, sizeof(*req));
148
149         if (rc < 0) {
150                 vbg_err("%s error: %d\n", __func__, rc);
151                 return;
152         }
153
154         vunmap(gdev->guest_mappings);
155         gdev->guest_mappings = NULL;
156
157         __free_page(gdev->guest_mappings_dummy_page);
158         gdev->guest_mappings_dummy_page = NULL;
159 }
160
161 /**
162  * Report the guest information to the host.
163  * Return: 0 or negative errno value.
164  * @gdev:               The Guest extension device.
165  */
166 static int vbg_report_guest_info(struct vbg_dev *gdev)
167 {
168         /*
169          * Allocate and fill in the two guest info reports.
170          */
171         struct vmmdev_guest_info *req1 = NULL;
172         struct vmmdev_guest_info2 *req2 = NULL;
173         int rc, ret = -ENOMEM;
174
175         req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
176         req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
177         if (!req1 || !req2)
178                 goto out_free;
179
180         req1->interface_version = VMMDEV_VERSION;
181         req1->os_type = VMMDEV_OSTYPE_LINUX26;
182 #if __BITS_PER_LONG == 64
183         req1->os_type |= VMMDEV_OSTYPE_X64;
184 #endif
185
186         req2->additions_major = VBG_VERSION_MAJOR;
187         req2->additions_minor = VBG_VERSION_MINOR;
188         req2->additions_build = VBG_VERSION_BUILD;
189         req2->additions_revision = VBG_SVN_REV;
190         /* (no features defined yet) */
191         req2->additions_features = 0;
192         strlcpy(req2->name, VBG_VERSION_STRING,
193                 sizeof(req2->name));
194
195         /*
196          * There are two protocols here:
197          *      1. INFO2 + INFO1. Supported by >=3.2.51.
198          *      2. INFO1 and optionally INFO2. The old protocol.
199          *
200          * We try protocol 2 first.  It will fail with VERR_NOT_SUPPORTED
201          * if not supported by the VMMDev (message ordering requirement).
202          */
203         rc = vbg_req_perform(gdev, req2);
204         if (rc >= 0) {
205                 rc = vbg_req_perform(gdev, req1);
206         } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
207                 rc = vbg_req_perform(gdev, req1);
208                 if (rc >= 0) {
209                         rc = vbg_req_perform(gdev, req2);
210                         if (rc == VERR_NOT_IMPLEMENTED)
211                                 rc = VINF_SUCCESS;
212                 }
213         }
214         ret = vbg_status_code_to_errno(rc);
215
216 out_free:
217         vbg_req_free(req2, sizeof(*req2));
218         vbg_req_free(req1, sizeof(*req1));
219         return ret;
220 }
221
222 /**
223  * Report the guest driver status to the host.
224  * Return: 0 or negative errno value.
225  * @gdev:               The Guest extension device.
226  * @active:             Flag whether the driver is now active or not.
227  */
228 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
229 {
230         struct vmmdev_guest_status *req;
231         int rc;
232
233         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
234         if (!req)
235                 return -ENOMEM;
236
237         req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
238         if (active)
239                 req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
240         else
241                 req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
242         req->flags = 0;
243
244         rc = vbg_req_perform(gdev, req);
245         if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
246                 rc = VINF_SUCCESS;
247
248         vbg_req_free(req, sizeof(*req));
249
250         return vbg_status_code_to_errno(rc);
251 }
252
253 /**
254  * Inflate the balloon by one chunk. The caller owns the balloon mutex.
255  * Return: 0 or negative errno value.
256  * @gdev:               The Guest extension device.
257  * @chunk_idx:          Index of the chunk.
258  */
259 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
260 {
261         struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
262         struct page **pages;
263         int i, rc, ret;
264
265         pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
266                               sizeof(*pages),
267                               GFP_KERNEL | __GFP_NOWARN);
268         if (!pages)
269                 return -ENOMEM;
270
271         req->header.size = sizeof(*req);
272         req->inflate = true;
273         req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
274
275         for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
276                 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
277                 if (!pages[i]) {
278                         ret = -ENOMEM;
279                         goto out_error;
280                 }
281
282                 req->phys_page[i] = page_to_phys(pages[i]);
283         }
284
285         rc = vbg_req_perform(gdev, req);
286         if (rc < 0) {
287                 vbg_err("%s error, rc: %d\n", __func__, rc);
288                 ret = vbg_status_code_to_errno(rc);
289                 goto out_error;
290         }
291
292         gdev->mem_balloon.pages[chunk_idx] = pages;
293
294         return 0;
295
296 out_error:
297         while (--i >= 0)
298                 __free_page(pages[i]);
299         kfree(pages);
300
301         return ret;
302 }
303
304 /**
305  * Deflate the balloon by one chunk. The caller owns the balloon mutex.
306  * Return: 0 or negative errno value.
307  * @gdev:               The Guest extension device.
308  * @chunk_idx:          Index of the chunk.
309  */
310 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
311 {
312         struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
313         struct page **pages = gdev->mem_balloon.pages[chunk_idx];
314         int i, rc;
315
316         req->header.size = sizeof(*req);
317         req->inflate = false;
318         req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
319
320         for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
321                 req->phys_page[i] = page_to_phys(pages[i]);
322
323         rc = vbg_req_perform(gdev, req);
324         if (rc < 0) {
325                 vbg_err("%s error, rc: %d\n", __func__, rc);
326                 return vbg_status_code_to_errno(rc);
327         }
328
329         for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
330                 __free_page(pages[i]);
331         kfree(pages);
332         gdev->mem_balloon.pages[chunk_idx] = NULL;
333
334         return 0;
335 }
336
337 /**
338  * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
339  * the host wants the balloon to be and adjust accordingly.
340  */
341 static void vbg_balloon_work(struct work_struct *work)
342 {
343         struct vbg_dev *gdev =
344                 container_of(work, struct vbg_dev, mem_balloon.work);
345         struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
346         u32 i, chunks;
347         int rc, ret;
348
349         /*
350          * Setting this bit means that we request the value from the host and
351          * change the guest memory balloon according to the returned value.
352          */
353         req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
354         rc = vbg_req_perform(gdev, req);
355         if (rc < 0) {
356                 vbg_err("%s error, rc: %d)\n", __func__, rc);
357                 return;
358         }
359
360         /*
361          * The host always returns the same maximum amount of chunks, so
362          * we do this once.
363          */
364         if (!gdev->mem_balloon.max_chunks) {
365                 gdev->mem_balloon.pages =
366                         devm_kcalloc(gdev->dev, req->phys_mem_chunks,
367                                      sizeof(struct page **), GFP_KERNEL);
368                 if (!gdev->mem_balloon.pages)
369                         return;
370
371                 gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
372         }
373
374         chunks = req->balloon_chunks;
375         if (chunks > gdev->mem_balloon.max_chunks) {
376                 vbg_err("%s: illegal balloon size %u (max=%u)\n",
377                         __func__, chunks, gdev->mem_balloon.max_chunks);
378                 return;
379         }
380
381         if (chunks > gdev->mem_balloon.chunks) {
382                 /* inflate */
383                 for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
384                         ret = vbg_balloon_inflate(gdev, i);
385                         if (ret < 0)
386                                 return;
387
388                         gdev->mem_balloon.chunks++;
389                 }
390         } else {
391                 /* deflate */
392                 for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
393                         ret = vbg_balloon_deflate(gdev, i);
394                         if (ret < 0)
395                                 return;
396
397                         gdev->mem_balloon.chunks--;
398                 }
399         }
400 }
401
402 /**
403  * Callback for heartbeat timer.
404  */
405 static void vbg_heartbeat_timer(struct timer_list *t)
406 {
407         struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
408
409         vbg_req_perform(gdev, gdev->guest_heartbeat_req);
410         mod_timer(&gdev->heartbeat_timer,
411                   msecs_to_jiffies(gdev->heartbeat_interval_ms));
412 }
413
414 /**
415  * Configure the host to check guest's heartbeat
416  * and get heartbeat interval from the host.
417  * Return: 0 or negative errno value.
418  * @gdev:               The Guest extension device.
419  * @enabled:            Set true to enable guest heartbeat checks on host.
420  */
421 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
422 {
423         struct vmmdev_heartbeat *req;
424         int rc;
425
426         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
427         if (!req)
428                 return -ENOMEM;
429
430         req->enabled = enabled;
431         req->interval_ns = 0;
432         rc = vbg_req_perform(gdev, req);
433         do_div(req->interval_ns, 1000000); /* ns -> ms */
434         gdev->heartbeat_interval_ms = req->interval_ns;
435         vbg_req_free(req, sizeof(*req));
436
437         return vbg_status_code_to_errno(rc);
438 }
439
440 /**
441  * Initializes the heartbeat timer. This feature may be disabled by the host.
442  * Return: 0 or negative errno value.
443  * @gdev:               The Guest extension device.
444  */
445 static int vbg_heartbeat_init(struct vbg_dev *gdev)
446 {
447         int ret;
448
449         /* Make sure that heartbeat checking is disabled if we fail. */
450         ret = vbg_heartbeat_host_config(gdev, false);
451         if (ret < 0)
452                 return ret;
453
454         ret = vbg_heartbeat_host_config(gdev, true);
455         if (ret < 0)
456                 return ret;
457
458         gdev->guest_heartbeat_req = vbg_req_alloc(
459                                         sizeof(*gdev->guest_heartbeat_req),
460                                         VMMDEVREQ_GUEST_HEARTBEAT);
461         if (!gdev->guest_heartbeat_req)
462                 return -ENOMEM;
463
464         vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
465                  __func__, gdev->heartbeat_interval_ms);
466         mod_timer(&gdev->heartbeat_timer, 0);
467
468         return 0;
469 }
470
471 /**
472  * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
473  * @gdev:               The Guest extension device.
474  */
475 static void vbg_heartbeat_exit(struct vbg_dev *gdev)
476 {
477         del_timer_sync(&gdev->heartbeat_timer);
478         vbg_heartbeat_host_config(gdev, false);
479         vbg_req_free(gdev->guest_heartbeat_req,
480                      sizeof(*gdev->guest_heartbeat_req));
481 }
482
483 /**
484  * Applies a change to the bit usage tracker.
485  * Return: true if the mask changed, false if not.
486  * @tracker:            The bit usage tracker.
487  * @changed:            The bits to change.
488  * @previous:           The previous value of the bits.
489  */
490 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
491                                 u32 changed, u32 previous)
492 {
493         bool global_change = false;
494
495         while (changed) {
496                 u32 bit = ffs(changed) - 1;
497                 u32 bitmask = BIT(bit);
498
499                 if (bitmask & previous) {
500                         tracker->per_bit_usage[bit] -= 1;
501                         if (tracker->per_bit_usage[bit] == 0) {
502                                 global_change = true;
503                                 tracker->mask &= ~bitmask;
504                         }
505                 } else {
506                         tracker->per_bit_usage[bit] += 1;
507                         if (tracker->per_bit_usage[bit] == 1) {
508                                 global_change = true;
509                                 tracker->mask |= bitmask;
510                         }
511                 }
512
513                 changed &= ~bitmask;
514         }
515
516         return global_change;
517 }
518
519 /**
520  * Init and termination worker for resetting the (host) event filter on the host
521  * Return: 0 or negative errno value.
522  * @gdev:                  The Guest extension device.
523  * @fixed_events:          Fixed events (init time).
524  */
525 static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
526                                        u32 fixed_events)
527 {
528         struct vmmdev_mask *req;
529         int rc;
530
531         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
532         if (!req)
533                 return -ENOMEM;
534
535         req->not_mask = U32_MAX & ~fixed_events;
536         req->or_mask = fixed_events;
537         rc = vbg_req_perform(gdev, req);
538         if (rc < 0)
539                 vbg_err("%s error, rc: %d\n", __func__, rc);
540
541         vbg_req_free(req, sizeof(*req));
542         return vbg_status_code_to_errno(rc);
543 }
544
545 /**
546  * Changes the event filter mask for the given session.
547  *
548  * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
549  * do session cleanup. Takes the session spinlock.
550  *
551  * Return: 0 or negative errno value.
552  * @gdev:                       The Guest extension device.
553  * @session:                    The session.
554  * @or_mask:                    The events to add.
555  * @not_mask:                   The events to remove.
556  * @session_termination:        Set if we're called by the session cleanup code.
557  *                              This tweaks the error handling so we perform
558  *                              proper session cleanup even if the host
559  *                              misbehaves.
560  */
561 static int vbg_set_session_event_filter(struct vbg_dev *gdev,
562                                         struct vbg_session *session,
563                                         u32 or_mask, u32 not_mask,
564                                         bool session_termination)
565 {
566         struct vmmdev_mask *req;
567         u32 changed, previous;
568         int rc, ret = 0;
569
570         /* Allocate a request buffer before taking the spinlock */
571         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
572         if (!req) {
573                 if (!session_termination)
574                         return -ENOMEM;
575                 /* Ignore allocation failure, we must do session cleanup. */
576         }
577
578         mutex_lock(&gdev->session_mutex);
579
580         /* Apply the changes to the session mask. */
581         previous = session->event_filter;
582         session->event_filter |= or_mask;
583         session->event_filter &= ~not_mask;
584
585         /* If anything actually changed, update the global usage counters. */
586         changed = previous ^ session->event_filter;
587         if (!changed)
588                 goto out;
589
590         vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
591         or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
592
593         if (gdev->event_filter_host == or_mask || !req)
594                 goto out;
595
596         gdev->event_filter_host = or_mask;
597         req->or_mask = or_mask;
598         req->not_mask = ~or_mask;
599         rc = vbg_req_perform(gdev, req);
600         if (rc < 0) {
601                 ret = vbg_status_code_to_errno(rc);
602
603                 /* Failed, roll back (unless it's session termination time). */
604                 gdev->event_filter_host = U32_MAX;
605                 if (session_termination)
606                         goto out;
607
608                 vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
609                                     session->event_filter);
610                 session->event_filter = previous;
611         }
612
613 out:
614         mutex_unlock(&gdev->session_mutex);
615         vbg_req_free(req, sizeof(*req));
616
617         return ret;
618 }
619
620 /**
621  * Init and termination worker for set guest capabilities to zero on the host.
622  * Return: 0 or negative errno value.
623  * @gdev:               The Guest extension device.
624  */
625 static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
626 {
627         struct vmmdev_mask *req;
628         int rc;
629
630         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
631         if (!req)
632                 return -ENOMEM;
633
634         req->not_mask = U32_MAX;
635         req->or_mask = 0;
636         rc = vbg_req_perform(gdev, req);
637         if (rc < 0)
638                 vbg_err("%s error, rc: %d\n", __func__, rc);
639
640         vbg_req_free(req, sizeof(*req));
641         return vbg_status_code_to_errno(rc);
642 }
643
644 /**
645  * Sets the guest capabilities for a session. Takes the session spinlock.
646  * Return: 0 or negative errno value.
647  * @gdev:                       The Guest extension device.
648  * @session:                    The session.
649  * @or_mask:                    The capabilities to add.
650  * @not_mask:                   The capabilities to remove.
651  * @session_termination:        Set if we're called by the session cleanup code.
652  *                              This tweaks the error handling so we perform
653  *                              proper session cleanup even if the host
654  *                              misbehaves.
655  */
656 static int vbg_set_session_capabilities(struct vbg_dev *gdev,
657                                         struct vbg_session *session,
658                                         u32 or_mask, u32 not_mask,
659                                         bool session_termination)
660 {
661         struct vmmdev_mask *req;
662         u32 changed, previous;
663         int rc, ret = 0;
664
665         /* Allocate a request buffer before taking the spinlock */
666         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
667         if (!req) {
668                 if (!session_termination)
669                         return -ENOMEM;
670                 /* Ignore allocation failure, we must do session cleanup. */
671         }
672
673         mutex_lock(&gdev->session_mutex);
674
675         /* Apply the changes to the session mask. */
676         previous = session->guest_caps;
677         session->guest_caps |= or_mask;
678         session->guest_caps &= ~not_mask;
679
680         /* If anything actually changed, update the global usage counters. */
681         changed = previous ^ session->guest_caps;
682         if (!changed)
683                 goto out;
684
685         vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
686         or_mask = gdev->guest_caps_tracker.mask;
687
688         if (gdev->guest_caps_host == or_mask || !req)
689                 goto out;
690
691         gdev->guest_caps_host = or_mask;
692         req->or_mask = or_mask;
693         req->not_mask = ~or_mask;
694         rc = vbg_req_perform(gdev, req);
695         if (rc < 0) {
696                 ret = vbg_status_code_to_errno(rc);
697
698                 /* Failed, roll back (unless it's session termination time). */
699                 gdev->guest_caps_host = U32_MAX;
700                 if (session_termination)
701                         goto out;
702
703                 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
704                                     session->guest_caps);
705                 session->guest_caps = previous;
706         }
707
708 out:
709         mutex_unlock(&gdev->session_mutex);
710         vbg_req_free(req, sizeof(*req));
711
712         return ret;
713 }
714
715 /**
716  * vbg_query_host_version get the host feature mask and version information.
717  * Return: 0 or negative errno value.
718  * @gdev:               The Guest extension device.
719  */
720 static int vbg_query_host_version(struct vbg_dev *gdev)
721 {
722         struct vmmdev_host_version *req;
723         int rc, ret;
724
725         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
726         if (!req)
727                 return -ENOMEM;
728
729         rc = vbg_req_perform(gdev, req);
730         ret = vbg_status_code_to_errno(rc);
731         if (ret) {
732                 vbg_err("%s error: %d\n", __func__, rc);
733                 goto out;
734         }
735
736         snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
737                  req->major, req->minor, req->build, req->revision);
738         gdev->host_features = req->features;
739
740         vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
741                  gdev->host_features);
742
743         if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
744                 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
745                 ret = -ENODEV;
746         }
747
748 out:
749         vbg_req_free(req, sizeof(*req));
750         return ret;
751 }
752
753 /**
754  * Initializes the VBoxGuest device extension when the
755  * device driver is loaded.
756  *
757  * The native code locates the VMMDev on the PCI bus and retrieve
758  * the MMIO and I/O port ranges, this function will take care of
759  * mapping the MMIO memory (if present). Upon successful return
760  * the native code should set up the interrupt handler.
761  *
762  * Return: 0 or negative errno value.
763  *
764  * @gdev:               The Guest extension device.
765  * @fixed_events:       Events that will be enabled upon init and no client
766  *                      will ever be allowed to mask.
767  */
768 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
769 {
770         int ret = -ENOMEM;
771
772         gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
773         gdev->event_filter_host = U32_MAX;      /* forces a report */
774         gdev->guest_caps_host = U32_MAX;        /* forces a report */
775
776         init_waitqueue_head(&gdev->event_wq);
777         init_waitqueue_head(&gdev->hgcm_wq);
778         spin_lock_init(&gdev->event_spinlock);
779         mutex_init(&gdev->session_mutex);
780         mutex_init(&gdev->cancel_req_mutex);
781         timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
782         INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
783
784         gdev->mem_balloon.get_req =
785                 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
786                               VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
787         gdev->mem_balloon.change_req =
788                 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
789                               VMMDEVREQ_CHANGE_MEMBALLOON);
790         gdev->cancel_req =
791                 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
792                               VMMDEVREQ_HGCM_CANCEL2);
793         gdev->ack_events_req =
794                 vbg_req_alloc(sizeof(*gdev->ack_events_req),
795                               VMMDEVREQ_ACKNOWLEDGE_EVENTS);
796         gdev->mouse_status_req =
797                 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
798                               VMMDEVREQ_GET_MOUSE_STATUS);
799
800         if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
801             !gdev->cancel_req || !gdev->ack_events_req ||
802             !gdev->mouse_status_req)
803                 goto err_free_reqs;
804
805         ret = vbg_query_host_version(gdev);
806         if (ret)
807                 goto err_free_reqs;
808
809         ret = vbg_report_guest_info(gdev);
810         if (ret) {
811                 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
812                 goto err_free_reqs;
813         }
814
815         ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
816         if (ret) {
817                 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
818                         ret);
819                 goto err_free_reqs;
820         }
821
822         ret = vbg_reset_host_capabilities(gdev);
823         if (ret) {
824                 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
825                         ret);
826                 goto err_free_reqs;
827         }
828
829         ret = vbg_core_set_mouse_status(gdev, 0);
830         if (ret) {
831                 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
832                 goto err_free_reqs;
833         }
834
835         /* These may fail without requiring the driver init to fail. */
836         vbg_guest_mappings_init(gdev);
837         vbg_heartbeat_init(gdev);
838
839         /* All Done! */
840         ret = vbg_report_driver_status(gdev, true);
841         if (ret < 0)
842                 vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
843
844         return 0;
845
846 err_free_reqs:
847         vbg_req_free(gdev->mouse_status_req,
848                      sizeof(*gdev->mouse_status_req));
849         vbg_req_free(gdev->ack_events_req,
850                      sizeof(*gdev->ack_events_req));
851         vbg_req_free(gdev->cancel_req,
852                      sizeof(*gdev->cancel_req));
853         vbg_req_free(gdev->mem_balloon.change_req,
854                      sizeof(*gdev->mem_balloon.change_req));
855         vbg_req_free(gdev->mem_balloon.get_req,
856                      sizeof(*gdev->mem_balloon.get_req));
857         return ret;
858 }
859
860 /**
861  * Call this on exit to clean-up vboxguest-core managed resources.
862  *
863  * The native code should call this before the driver is loaded,
864  * but don't call this on shutdown.
865  * @gdev:               The Guest extension device.
866  */
867 void vbg_core_exit(struct vbg_dev *gdev)
868 {
869         vbg_heartbeat_exit(gdev);
870         vbg_guest_mappings_exit(gdev);
871
872         /* Clear the host flags (mouse status etc). */
873         vbg_reset_host_event_filter(gdev, 0);
874         vbg_reset_host_capabilities(gdev);
875         vbg_core_set_mouse_status(gdev, 0);
876
877         vbg_req_free(gdev->mouse_status_req,
878                      sizeof(*gdev->mouse_status_req));
879         vbg_req_free(gdev->ack_events_req,
880                      sizeof(*gdev->ack_events_req));
881         vbg_req_free(gdev->cancel_req,
882                      sizeof(*gdev->cancel_req));
883         vbg_req_free(gdev->mem_balloon.change_req,
884                      sizeof(*gdev->mem_balloon.change_req));
885         vbg_req_free(gdev->mem_balloon.get_req,
886                      sizeof(*gdev->mem_balloon.get_req));
887 }
888
889 /**
890  * Creates a VBoxGuest user session.
891  *
892  * vboxguest_linux.c calls this when userspace opens the char-device.
893  * Return: A pointer to the new session or an ERR_PTR on error.
894  * @gdev:               The Guest extension device.
895  * @user:               Set if this is a session for the vboxuser device.
896  */
897 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
898 {
899         struct vbg_session *session;
900
901         session = kzalloc(sizeof(*session), GFP_KERNEL);
902         if (!session)
903                 return ERR_PTR(-ENOMEM);
904
905         session->gdev = gdev;
906         session->user_session = user;
907
908         return session;
909 }
910
911 /**
912  * Closes a VBoxGuest session.
913  * @session:            The session to close (and free).
914  */
915 void vbg_core_close_session(struct vbg_session *session)
916 {
917         struct vbg_dev *gdev = session->gdev;
918         int i, rc;
919
920         vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
921         vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
922
923         for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
924                 if (!session->hgcm_client_ids[i])
925                         continue;
926
927                 vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
928         }
929
930         kfree(session);
931 }
932
933 static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
934                          size_t out_size)
935 {
936         if (hdr->size_in  != (sizeof(*hdr) + in_size) ||
937             hdr->size_out != (sizeof(*hdr) + out_size))
938                 return -EINVAL;
939
940         return 0;
941 }
942
943 static int vbg_ioctl_driver_version_info(
944         struct vbg_ioctl_driver_version_info *info)
945 {
946         const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
947         u16 min_maj_version, req_maj_version;
948
949         if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
950                 return -EINVAL;
951
952         req_maj_version = info->u.in.req_version >> 16;
953         min_maj_version = info->u.in.min_version >> 16;
954
955         if (info->u.in.min_version > info->u.in.req_version ||
956             min_maj_version != req_maj_version)
957                 return -EINVAL;
958
959         if (info->u.in.min_version <= VBG_IOC_VERSION &&
960             min_maj_version == vbg_maj_version) {
961                 info->u.out.session_version = VBG_IOC_VERSION;
962         } else {
963                 info->u.out.session_version = U32_MAX;
964                 info->hdr.rc = VERR_VERSION_MISMATCH;
965         }
966
967         info->u.out.driver_version  = VBG_IOC_VERSION;
968         info->u.out.driver_revision = 0;
969         info->u.out.reserved1      = 0;
970         info->u.out.reserved2      = 0;
971
972         return 0;
973 }
974
975 static bool vbg_wait_event_cond(struct vbg_dev *gdev,
976                                 struct vbg_session *session,
977                                 u32 event_mask)
978 {
979         unsigned long flags;
980         bool wakeup;
981         u32 events;
982
983         spin_lock_irqsave(&gdev->event_spinlock, flags);
984
985         events = gdev->pending_events & event_mask;
986         wakeup = events || session->cancel_waiters;
987
988         spin_unlock_irqrestore(&gdev->event_spinlock, flags);
989
990         return wakeup;
991 }
992
993 /* Must be called with the event_lock held */
994 static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
995                                      struct vbg_session *session,
996                                      u32 event_mask)
997 {
998         u32 events = gdev->pending_events & event_mask;
999
1000         gdev->pending_events &= ~events;
1001         return events;
1002 }
1003
1004 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
1005                                      struct vbg_session *session,
1006                                      struct vbg_ioctl_wait_for_events *wait)
1007 {
1008         u32 timeout_ms = wait->u.in.timeout_ms;
1009         u32 event_mask = wait->u.in.events;
1010         unsigned long flags;
1011         long timeout;
1012         int ret = 0;
1013
1014         if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
1015                 return -EINVAL;
1016
1017         if (timeout_ms == U32_MAX)
1018                 timeout = MAX_SCHEDULE_TIMEOUT;
1019         else
1020                 timeout = msecs_to_jiffies(timeout_ms);
1021
1022         wait->u.out.events = 0;
1023         do {
1024                 timeout = wait_event_interruptible_timeout(
1025                                 gdev->event_wq,
1026                                 vbg_wait_event_cond(gdev, session, event_mask),
1027                                 timeout);
1028
1029                 spin_lock_irqsave(&gdev->event_spinlock, flags);
1030
1031                 if (timeout < 0 || session->cancel_waiters) {
1032                         ret = -EINTR;
1033                 } else if (timeout == 0) {
1034                         ret = -ETIMEDOUT;
1035                 } else {
1036                         wait->u.out.events =
1037                            vbg_consume_events_locked(gdev, session, event_mask);
1038                 }
1039
1040                 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1041
1042                 /*
1043                  * Someone else may have consumed the event(s) first, in
1044                  * which case we go back to waiting.
1045                  */
1046         } while (ret == 0 && wait->u.out.events == 0);
1047
1048         return ret;
1049 }
1050
1051 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
1052                                                struct vbg_session *session,
1053                                                struct vbg_ioctl_hdr *hdr)
1054 {
1055         unsigned long flags;
1056
1057         if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
1058                 return -EINVAL;
1059
1060         spin_lock_irqsave(&gdev->event_spinlock, flags);
1061         session->cancel_waiters = true;
1062         spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1063
1064         wake_up(&gdev->event_wq);
1065
1066         return 0;
1067 }
1068
1069 /**
1070  * Checks if the VMM request is allowed in the context of the given session.
1071  * Return: 0 or negative errno value.
1072  * @gdev:               The Guest extension device.
1073  * @session:            The calling session.
1074  * @req:                The request.
1075  */
1076 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1077                            const struct vmmdev_request_header *req)
1078 {
1079         const struct vmmdev_guest_status *guest_status;
1080         bool trusted_apps_only;
1081
1082         switch (req->request_type) {
1083         /* Trusted users apps only. */
1084         case VMMDEVREQ_QUERY_CREDENTIALS:
1085         case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
1086         case VMMDEVREQ_REGISTER_SHARED_MODULE:
1087         case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
1088         case VMMDEVREQ_WRITE_COREDUMP:
1089         case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
1090         case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
1091         case VMMDEVREQ_CHECK_SHARED_MODULES:
1092         case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
1093         case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
1094         case VMMDEVREQ_REPORT_GUEST_STATS:
1095         case VMMDEVREQ_REPORT_GUEST_USER_STATE:
1096         case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
1097                 trusted_apps_only = true;
1098                 break;
1099
1100         /* Anyone. */
1101         case VMMDEVREQ_GET_MOUSE_STATUS:
1102         case VMMDEVREQ_SET_MOUSE_STATUS:
1103         case VMMDEVREQ_SET_POINTER_SHAPE:
1104         case VMMDEVREQ_GET_HOST_VERSION:
1105         case VMMDEVREQ_IDLE:
1106         case VMMDEVREQ_GET_HOST_TIME:
1107         case VMMDEVREQ_SET_POWER_STATUS:
1108         case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
1109         case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
1110         case VMMDEVREQ_REPORT_GUEST_STATUS:
1111         case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
1112         case VMMDEVREQ_VIDEMODE_SUPPORTED:
1113         case VMMDEVREQ_GET_HEIGHT_REDUCTION:
1114         case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
1115         case VMMDEVREQ_VIDEMODE_SUPPORTED2:
1116         case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
1117         case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
1118         case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
1119         case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
1120         case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
1121         case VMMDEVREQ_GET_VRDPCHANGE_REQ:
1122         case VMMDEVREQ_LOG_STRING:
1123         case VMMDEVREQ_GET_SESSION_ID:
1124                 trusted_apps_only = false;
1125                 break;
1126
1127         /* Depends on the request parameters... */
1128         case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
1129                 guest_status = (const struct vmmdev_guest_status *)req;
1130                 switch (guest_status->facility) {
1131                 case VBOXGUEST_FACILITY_TYPE_ALL:
1132                 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
1133                         vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1134                                 guest_status->facility);
1135                         return -EPERM;
1136                 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
1137                         trusted_apps_only = true;
1138                         break;
1139                 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
1140                 case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
1141                 case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
1142                 default:
1143                         trusted_apps_only = false;
1144                         break;
1145                 }
1146                 break;
1147
1148         /* Anything else is not allowed. */
1149         default:
1150                 vbg_err("Denying userspace vmm call type %#08x\n",
1151                         req->request_type);
1152                 return -EPERM;
1153         }
1154
1155         if (trusted_apps_only && session->user_session) {
1156                 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1157                         req->request_type);
1158                 return -EPERM;
1159         }
1160
1161         return 0;
1162 }
1163
1164 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
1165                                 struct vbg_session *session, void *data)
1166 {
1167         struct vbg_ioctl_hdr *hdr = data;
1168         int ret;
1169
1170         if (hdr->size_in != hdr->size_out)
1171                 return -EINVAL;
1172
1173         if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
1174                 return -E2BIG;
1175
1176         if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
1177                 return -EINVAL;
1178
1179         ret = vbg_req_allowed(gdev, session, data);
1180         if (ret < 0)
1181                 return ret;
1182
1183         vbg_req_perform(gdev, data);
1184         WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
1185
1186         return 0;
1187 }
1188
1189 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1190                                   struct vbg_session *session,
1191                                   struct vbg_ioctl_hgcm_connect *conn)
1192 {
1193         u32 client_id;
1194         int i, ret;
1195
1196         if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
1197                 return -EINVAL;
1198
1199         /* Find a free place in the sessions clients array and claim it */
1200         mutex_lock(&gdev->session_mutex);
1201         for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1202                 if (!session->hgcm_client_ids[i]) {
1203                         session->hgcm_client_ids[i] = U32_MAX;
1204                         break;
1205                 }
1206         }
1207         mutex_unlock(&gdev->session_mutex);
1208
1209         if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1210                 return -EMFILE;
1211
1212         ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
1213                                &conn->hdr.rc);
1214
1215         mutex_lock(&gdev->session_mutex);
1216         if (ret == 0 && conn->hdr.rc >= 0) {
1217                 conn->u.out.client_id = client_id;
1218                 session->hgcm_client_ids[i] = client_id;
1219         } else {
1220                 conn->u.out.client_id = 0;
1221                 session->hgcm_client_ids[i] = 0;
1222         }
1223         mutex_unlock(&gdev->session_mutex);
1224
1225         return ret;
1226 }
1227
1228 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1229                                      struct vbg_session *session,
1230                                      struct vbg_ioctl_hgcm_disconnect *disconn)
1231 {
1232         u32 client_id;
1233         int i, ret;
1234
1235         if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
1236                 return -EINVAL;
1237
1238         client_id = disconn->u.in.client_id;
1239         if (client_id == 0 || client_id == U32_MAX)
1240                 return -EINVAL;
1241
1242         mutex_lock(&gdev->session_mutex);
1243         for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1244                 if (session->hgcm_client_ids[i] == client_id) {
1245                         session->hgcm_client_ids[i] = U32_MAX;
1246                         break;
1247                 }
1248         }
1249         mutex_unlock(&gdev->session_mutex);
1250
1251         if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1252                 return -EINVAL;
1253
1254         ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
1255
1256         mutex_lock(&gdev->session_mutex);
1257         if (ret == 0 && disconn->hdr.rc >= 0)
1258                 session->hgcm_client_ids[i] = 0;
1259         else
1260                 session->hgcm_client_ids[i] = client_id;
1261         mutex_unlock(&gdev->session_mutex);
1262
1263         return ret;
1264 }
1265
1266 static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
1267 {
1268         switch (type) {
1269         case VMMDEV_HGCM_PARM_TYPE_32BIT:
1270         case VMMDEV_HGCM_PARM_TYPE_64BIT:
1271         case VMMDEV_HGCM_PARM_TYPE_LINADDR:
1272         case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
1273         case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
1274                 return true;
1275         default:
1276                 return false;
1277         }
1278 }
1279
1280 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1281                                struct vbg_session *session, bool f32bit,
1282                                struct vbg_ioctl_hgcm_call *call)
1283 {
1284         size_t actual_size;
1285         u32 client_id;
1286         int i, ret;
1287
1288         if (call->hdr.size_in < sizeof(*call))
1289                 return -EINVAL;
1290
1291         if (call->hdr.size_in != call->hdr.size_out)
1292                 return -EINVAL;
1293
1294         if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
1295                 return -E2BIG;
1296
1297         client_id = call->client_id;
1298         if (client_id == 0 || client_id == U32_MAX)
1299                 return -EINVAL;
1300
1301         actual_size = sizeof(*call);
1302         if (f32bit)
1303                 actual_size += call->parm_count *
1304                                sizeof(struct vmmdev_hgcm_function_parameter32);
1305         else
1306                 actual_size += call->parm_count *
1307                                sizeof(struct vmmdev_hgcm_function_parameter);
1308         if (call->hdr.size_in < actual_size) {
1309                 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1310                           call->hdr.size_in, actual_size);
1311                 return -EINVAL;
1312         }
1313         call->hdr.size_out = actual_size;
1314
1315         /* Validate parameter types */
1316         if (f32bit) {
1317                 struct vmmdev_hgcm_function_parameter32 *parm =
1318                         VBG_IOCTL_HGCM_CALL_PARMS32(call);
1319
1320                 for (i = 0; i < call->parm_count; i++)
1321                         if (!vbg_param_valid(parm[i].type))
1322                                 return -EINVAL;
1323         } else {
1324                 struct vmmdev_hgcm_function_parameter *parm =
1325                         VBG_IOCTL_HGCM_CALL_PARMS(call);
1326
1327                 for (i = 0; i < call->parm_count; i++)
1328                         if (!vbg_param_valid(parm[i].type))
1329                                 return -EINVAL;
1330         }
1331
1332         /*
1333          * Validate the client id.
1334          */
1335         mutex_lock(&gdev->session_mutex);
1336         for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
1337                 if (session->hgcm_client_ids[i] == client_id)
1338                         break;
1339         mutex_unlock(&gdev->session_mutex);
1340         if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
1341                 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1342                           client_id);
1343                 return -EINVAL;
1344         }
1345
1346         if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1347                 ret = vbg_hgcm_call32(gdev, client_id,
1348                                       call->function, call->timeout_ms,
1349                                       VBG_IOCTL_HGCM_CALL_PARMS32(call),
1350                                       call->parm_count, &call->hdr.rc);
1351         else
1352                 ret = vbg_hgcm_call(gdev, client_id,
1353                                     call->function, call->timeout_ms,
1354                                     VBG_IOCTL_HGCM_CALL_PARMS(call),
1355                                     call->parm_count, &call->hdr.rc);
1356
1357         if (ret == -E2BIG) {
1358                 /* E2BIG needs to be reported through the hdr.rc field. */
1359                 call->hdr.rc = VERR_OUT_OF_RANGE;
1360                 ret = 0;
1361         }
1362
1363         if (ret && ret != -EINTR && ret != -ETIMEDOUT)
1364                 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
1365
1366         return ret;
1367 }
1368
1369 static int vbg_ioctl_log(struct vbg_ioctl_log *log)
1370 {
1371         if (log->hdr.size_out != sizeof(log->hdr))
1372                 return -EINVAL;
1373
1374         vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
1375                  log->u.in.msg);
1376
1377         return 0;
1378 }
1379
1380 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
1381                                         struct vbg_session *session,
1382                                         struct vbg_ioctl_change_filter *filter)
1383 {
1384         u32 or_mask, not_mask;
1385
1386         if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
1387                 return -EINVAL;
1388
1389         or_mask = filter->u.in.or_mask;
1390         not_mask = filter->u.in.not_mask;
1391
1392         if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1393                 return -EINVAL;
1394
1395         return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
1396                                             false);
1397 }
1398
1399 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
1400              struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
1401 {
1402         u32 or_mask, not_mask;
1403         int ret;
1404
1405         if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
1406                 return -EINVAL;
1407
1408         or_mask = caps->u.in.or_mask;
1409         not_mask = caps->u.in.not_mask;
1410
1411         if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
1412                 return -EINVAL;
1413
1414         ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
1415                                            false);
1416         if (ret)
1417                 return ret;
1418
1419         caps->u.out.session_caps = session->guest_caps;
1420         caps->u.out.global_caps = gdev->guest_caps_host;
1421
1422         return 0;
1423 }
1424
1425 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1426                                    struct vbg_ioctl_check_balloon *balloon_info)
1427 {
1428         if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
1429                 return -EINVAL;
1430
1431         balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
1432         /*
1433          * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1434          * events entirely in the kernel, see vbg_core_isr().
1435          */
1436         balloon_info->u.out.handle_in_r3 = false;
1437
1438         return 0;
1439 }
1440
1441 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1442                                      struct vbg_ioctl_write_coredump *dump)
1443 {
1444         struct vmmdev_write_core_dump *req;
1445
1446         if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1447                 return -EINVAL;
1448
1449         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
1450         if (!req)
1451                 return -ENOMEM;
1452
1453         req->flags = dump->u.in.flags;
1454         dump->hdr.rc = vbg_req_perform(gdev, req);
1455
1456         vbg_req_free(req, sizeof(*req));
1457         return 0;
1458 }
1459
1460 /**
1461  * Common IOCtl for user to kernel communication.
1462  * Return: 0 or negative errno value.
1463  * @session:    The client session.
1464  * @req:        The requested function.
1465  * @data:       The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1466  */
1467 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1468 {
1469         unsigned int req_no_size = req & ~IOCSIZE_MASK;
1470         struct vbg_dev *gdev = session->gdev;
1471         struct vbg_ioctl_hdr *hdr = data;
1472         bool f32bit = false;
1473
1474         hdr->rc = VINF_SUCCESS;
1475         if (!hdr->size_out)
1476                 hdr->size_out = hdr->size_in;
1477
1478         /*
1479          * hdr->version and hdr->size_in / hdr->size_out minimum size are
1480          * already checked by vbg_misc_device_ioctl().
1481          */
1482
1483         /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1484         if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
1485             req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
1486             req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
1487                 return vbg_ioctl_vmmrequest(gdev, session, data);
1488
1489         if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
1490                 return -EINVAL;
1491
1492         /* Fixed size requests. */
1493         switch (req) {
1494         case VBG_IOCTL_DRIVER_VERSION_INFO:
1495                 return vbg_ioctl_driver_version_info(data);
1496         case VBG_IOCTL_HGCM_CONNECT:
1497                 return vbg_ioctl_hgcm_connect(gdev, session, data);
1498         case VBG_IOCTL_HGCM_DISCONNECT:
1499                 return vbg_ioctl_hgcm_disconnect(gdev, session, data);
1500         case VBG_IOCTL_WAIT_FOR_EVENTS:
1501                 return vbg_ioctl_wait_for_events(gdev, session, data);
1502         case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
1503                 return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
1504         case VBG_IOCTL_CHANGE_FILTER_MASK:
1505                 return vbg_ioctl_change_filter_mask(gdev, session, data);
1506         case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
1507                 return vbg_ioctl_change_guest_capabilities(gdev, session, data);
1508         case VBG_IOCTL_CHECK_BALLOON:
1509                 return vbg_ioctl_check_balloon(gdev, data);
1510         case VBG_IOCTL_WRITE_CORE_DUMP:
1511                 return vbg_ioctl_write_core_dump(gdev, data);
1512         }
1513
1514         /* Variable sized requests. */
1515         switch (req_no_size) {
1516 #ifdef CONFIG_COMPAT
1517         case VBG_IOCTL_HGCM_CALL_32(0):
1518                 f32bit = true;
1519                 /* Fall through */
1520 #endif
1521         case VBG_IOCTL_HGCM_CALL(0):
1522                 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1523         case VBG_IOCTL_LOG(0):
1524         case VBG_IOCTL_LOG_ALT(0):
1525                 return vbg_ioctl_log(data);
1526         }
1527
1528         vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
1529         return -ENOTTY;
1530 }
1531
1532 /**
1533  * Report guest supported mouse-features to the host.
1534  *
1535  * Return: 0 or negative errno value.
1536  * @gdev:               The Guest extension device.
1537  * @features:           The set of features to report to the host.
1538  */
1539 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1540 {
1541         struct vmmdev_mouse_status *req;
1542         int rc;
1543
1544         req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
1545         if (!req)
1546                 return -ENOMEM;
1547
1548         req->mouse_features = features;
1549         req->pointer_pos_x = 0;
1550         req->pointer_pos_y = 0;
1551
1552         rc = vbg_req_perform(gdev, req);
1553         if (rc < 0)
1554                 vbg_err("%s error, rc: %d\n", __func__, rc);
1555
1556         vbg_req_free(req, sizeof(*req));
1557         return vbg_status_code_to_errno(rc);
1558 }
1559
1560 /** Core interrupt service routine. */
1561 irqreturn_t vbg_core_isr(int irq, void *dev_id)
1562 {
1563         struct vbg_dev *gdev = dev_id;
1564         struct vmmdev_events *req = gdev->ack_events_req;
1565         bool mouse_position_changed = false;
1566         unsigned long flags;
1567         u32 events = 0;
1568         int rc;
1569
1570         if (!gdev->mmio->V.V1_04.have_events)
1571                 return IRQ_NONE;
1572
1573         /* Get and acknowlegde events. */
1574         req->header.rc = VERR_INTERNAL_ERROR;
1575         req->events = 0;
1576         rc = vbg_req_perform(gdev, req);
1577         if (rc < 0) {
1578                 vbg_err("Error performing events req, rc: %d\n", rc);
1579                 return IRQ_NONE;
1580         }
1581
1582         events = req->events;
1583
1584         if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
1585                 mouse_position_changed = true;
1586                 events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1587         }
1588
1589         if (events & VMMDEV_EVENT_HGCM) {
1590                 wake_up(&gdev->hgcm_wq);
1591                 events &= ~VMMDEV_EVENT_HGCM;
1592         }
1593
1594         if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
1595                 schedule_work(&gdev->mem_balloon.work);
1596                 events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1597         }
1598
1599         if (events) {
1600                 spin_lock_irqsave(&gdev->event_spinlock, flags);
1601                 gdev->pending_events |= events;
1602                 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1603
1604                 wake_up(&gdev->event_wq);
1605         }
1606
1607         if (mouse_position_changed)
1608                 vbg_linux_mouse_event(gdev);
1609
1610         return IRQ_HANDLED;
1611 }