GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fb.c
1 /**************************************************************************
2  *
3  * Copyright © 2007 David Airlie
4  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28
29 #include <linux/export.h>
30
31 #include <drm/drmP.h>
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_kms.h"
34
35 #include <drm/ttm/ttm_placement.h>
36
37 #define VMW_DIRTY_DELAY (HZ / 30)
38
39 struct vmw_fb_par {
40         struct vmw_private *vmw_priv;
41
42         void *vmalloc;
43
44         struct mutex bo_mutex;
45         struct vmw_dma_buffer *vmw_bo;
46         struct ttm_bo_kmap_obj map;
47         void *bo_ptr;
48         unsigned bo_size;
49         struct drm_framebuffer *set_fb;
50         struct drm_display_mode *set_mode;
51         u32 fb_x;
52         u32 fb_y;
53         bool bo_iowrite;
54
55         u32 pseudo_palette[17];
56
57         unsigned max_width;
58         unsigned max_height;
59
60         struct {
61                 spinlock_t lock;
62                 bool active;
63                 unsigned x1;
64                 unsigned y1;
65                 unsigned x2;
66                 unsigned y2;
67         } dirty;
68
69         struct drm_crtc *crtc;
70         struct drm_connector *con;
71         struct delayed_work local_work;
72 };
73
74 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
75                             unsigned blue, unsigned transp,
76                             struct fb_info *info)
77 {
78         struct vmw_fb_par *par = info->par;
79         u32 *pal = par->pseudo_palette;
80
81         if (regno > 15) {
82                 DRM_ERROR("Bad regno %u.\n", regno);
83                 return 1;
84         }
85
86         switch (par->set_fb->format->depth) {
87         case 24:
88         case 32:
89                 pal[regno] = ((red & 0xff00) << 8) |
90                               (green & 0xff00) |
91                              ((blue  & 0xff00) >> 8);
92                 break;
93         default:
94                 DRM_ERROR("Bad depth %u, bpp %u.\n",
95                           par->set_fb->format->depth,
96                           par->set_fb->format->cpp[0] * 8);
97                 return 1;
98         }
99
100         return 0;
101 }
102
103 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
104                             struct fb_info *info)
105 {
106         int depth = var->bits_per_pixel;
107         struct vmw_fb_par *par = info->par;
108         struct vmw_private *vmw_priv = par->vmw_priv;
109
110         switch (var->bits_per_pixel) {
111         case 32:
112                 depth = (var->transp.length > 0) ? 32 : 24;
113                 break;
114         default:
115                 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
116                 return -EINVAL;
117         }
118
119         switch (depth) {
120         case 24:
121                 var->red.offset = 16;
122                 var->green.offset = 8;
123                 var->blue.offset = 0;
124                 var->red.length = 8;
125                 var->green.length = 8;
126                 var->blue.length = 8;
127                 var->transp.length = 0;
128                 var->transp.offset = 0;
129                 break;
130         case 32:
131                 var->red.offset = 16;
132                 var->green.offset = 8;
133                 var->blue.offset = 0;
134                 var->red.length = 8;
135                 var->green.length = 8;
136                 var->blue.length = 8;
137                 var->transp.length = 8;
138                 var->transp.offset = 24;
139                 break;
140         default:
141                 DRM_ERROR("Bad depth %u.\n", depth);
142                 return -EINVAL;
143         }
144
145         if ((var->xoffset + var->xres) > par->max_width ||
146             (var->yoffset + var->yres) > par->max_height) {
147                 DRM_ERROR("Requested geom can not fit in framebuffer\n");
148                 return -EINVAL;
149         }
150
151         if (!vmw_kms_validate_mode_vram(vmw_priv,
152                                         var->xres * var->bits_per_pixel/8,
153                                         var->yoffset + var->yres)) {
154                 DRM_ERROR("Requested geom can not fit in framebuffer\n");
155                 return -EINVAL;
156         }
157
158         return 0;
159 }
160
161 static int vmw_fb_blank(int blank, struct fb_info *info)
162 {
163         return 0;
164 }
165
166 /*
167  * Dirty code
168  */
169
170 static void vmw_fb_dirty_flush(struct work_struct *work)
171 {
172         struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
173                                               local_work.work);
174         struct vmw_private *vmw_priv = par->vmw_priv;
175         struct fb_info *info = vmw_priv->fb_info;
176         unsigned long irq_flags;
177         s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
178         u32 cpp, max_x, max_y;
179         struct drm_clip_rect clip;
180         struct drm_framebuffer *cur_fb;
181         u8 *src_ptr, *dst_ptr;
182
183         if (vmw_priv->suspended)
184                 return;
185
186         mutex_lock(&par->bo_mutex);
187         cur_fb = par->set_fb;
188         if (!cur_fb)
189                 goto out_unlock;
190
191         spin_lock_irqsave(&par->dirty.lock, irq_flags);
192         if (!par->dirty.active) {
193                 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
194                 goto out_unlock;
195         }
196
197         /*
198          * Handle panning when copying from vmalloc to framebuffer.
199          * Clip dirty area to framebuffer.
200          */
201         cpp = cur_fb->format->cpp[0];
202         max_x = par->fb_x + cur_fb->width;
203         max_y = par->fb_y + cur_fb->height;
204
205         dst_x1 = par->dirty.x1 - par->fb_x;
206         dst_y1 = par->dirty.y1 - par->fb_y;
207         dst_x1 = max_t(s32, dst_x1, 0);
208         dst_y1 = max_t(s32, dst_y1, 0);
209
210         dst_x2 = par->dirty.x2 - par->fb_x;
211         dst_y2 = par->dirty.y2 - par->fb_y;
212         dst_x2 = min_t(s32, dst_x2, max_x);
213         dst_y2 = min_t(s32, dst_y2, max_y);
214         w = dst_x2 - dst_x1;
215         h = dst_y2 - dst_y1;
216         w = max_t(s32, 0, w);
217         h = max_t(s32, 0, h);
218
219         par->dirty.x1 = par->dirty.x2 = 0;
220         par->dirty.y1 = par->dirty.y2 = 0;
221         spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
222
223         if (w && h) {
224                 dst_ptr = (u8 *)par->bo_ptr  +
225                         (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
226                 src_ptr = (u8 *)par->vmalloc +
227                         ((dst_y1 + par->fb_y) * info->fix.line_length +
228                          (dst_x1 + par->fb_x) * cpp);
229
230                 while (h-- > 0) {
231                         memcpy(dst_ptr, src_ptr, w*cpp);
232                         dst_ptr += par->set_fb->pitches[0];
233                         src_ptr += info->fix.line_length;
234                 }
235
236                 clip.x1 = dst_x1;
237                 clip.x2 = dst_x2;
238                 clip.y1 = dst_y1;
239                 clip.y2 = dst_y2;
240
241                 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
242                                                        &clip, 1));
243                 vmw_fifo_flush(vmw_priv, false);
244         }
245 out_unlock:
246         mutex_unlock(&par->bo_mutex);
247 }
248
249 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
250                               unsigned x1, unsigned y1,
251                               unsigned width, unsigned height)
252 {
253         unsigned long flags;
254         unsigned x2 = x1 + width;
255         unsigned y2 = y1 + height;
256
257         spin_lock_irqsave(&par->dirty.lock, flags);
258         if (par->dirty.x1 == par->dirty.x2) {
259                 par->dirty.x1 = x1;
260                 par->dirty.y1 = y1;
261                 par->dirty.x2 = x2;
262                 par->dirty.y2 = y2;
263                 /* if we are active start the dirty work
264                  * we share the work with the defio system */
265                 if (par->dirty.active)
266                         schedule_delayed_work(&par->local_work,
267                                               VMW_DIRTY_DELAY);
268         } else {
269                 if (x1 < par->dirty.x1)
270                         par->dirty.x1 = x1;
271                 if (y1 < par->dirty.y1)
272                         par->dirty.y1 = y1;
273                 if (x2 > par->dirty.x2)
274                         par->dirty.x2 = x2;
275                 if (y2 > par->dirty.y2)
276                         par->dirty.y2 = y2;
277         }
278         spin_unlock_irqrestore(&par->dirty.lock, flags);
279 }
280
281 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
282                               struct fb_info *info)
283 {
284         struct vmw_fb_par *par = info->par;
285
286         if ((var->xoffset + var->xres) > var->xres_virtual ||
287             (var->yoffset + var->yres) > var->yres_virtual) {
288                 DRM_ERROR("Requested panning can not fit in framebuffer\n");
289                 return -EINVAL;
290         }
291
292         mutex_lock(&par->bo_mutex);
293         par->fb_x = var->xoffset;
294         par->fb_y = var->yoffset;
295         if (par->set_fb)
296                 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
297                                   par->set_fb->height);
298         mutex_unlock(&par->bo_mutex);
299
300         return 0;
301 }
302
303 static void vmw_deferred_io(struct fb_info *info,
304                             struct list_head *pagelist)
305 {
306         struct vmw_fb_par *par = info->par;
307         unsigned long start, end, min, max;
308         unsigned long flags;
309         struct page *page;
310         int y1, y2;
311
312         min = ULONG_MAX;
313         max = 0;
314         list_for_each_entry(page, pagelist, lru) {
315                 start = page->index << PAGE_SHIFT;
316                 end = start + PAGE_SIZE - 1;
317                 min = min(min, start);
318                 max = max(max, end);
319         }
320
321         if (min < max) {
322                 y1 = min / info->fix.line_length;
323                 y2 = (max / info->fix.line_length) + 1;
324
325                 spin_lock_irqsave(&par->dirty.lock, flags);
326                 par->dirty.x1 = 0;
327                 par->dirty.y1 = y1;
328                 par->dirty.x2 = info->var.xres;
329                 par->dirty.y2 = y2;
330                 spin_unlock_irqrestore(&par->dirty.lock, flags);
331
332                 /*
333                  * Since we've already waited on this work once, try to
334                  * execute asap.
335                  */
336                 cancel_delayed_work(&par->local_work);
337                 schedule_delayed_work(&par->local_work, 0);
338         }
339 };
340
341 static struct fb_deferred_io vmw_defio = {
342         .delay          = VMW_DIRTY_DELAY,
343         .deferred_io    = vmw_deferred_io,
344 };
345
346 /*
347  * Draw code
348  */
349
350 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
351 {
352         cfb_fillrect(info, rect);
353         vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
354                           rect->width, rect->height);
355 }
356
357 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
358 {
359         cfb_copyarea(info, region);
360         vmw_fb_dirty_mark(info->par, region->dx, region->dy,
361                           region->width, region->height);
362 }
363
364 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
365 {
366         cfb_imageblit(info, image);
367         vmw_fb_dirty_mark(info->par, image->dx, image->dy,
368                           image->width, image->height);
369 }
370
371 /*
372  * Bring up code
373  */
374
375 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
376                             size_t size, struct vmw_dma_buffer **out)
377 {
378         struct vmw_dma_buffer *vmw_bo;
379         int ret;
380
381         (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
382
383         vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
384         if (!vmw_bo) {
385                 ret = -ENOMEM;
386                 goto err_unlock;
387         }
388
389         ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
390                               &vmw_sys_placement,
391                               false,
392                               &vmw_dmabuf_bo_free);
393         if (unlikely(ret != 0))
394                 goto err_unlock; /* init frees the buffer on failure */
395
396         *out = vmw_bo;
397         ttm_write_unlock(&vmw_priv->reservation_sem);
398
399         return 0;
400
401 err_unlock:
402         ttm_write_unlock(&vmw_priv->reservation_sem);
403         return ret;
404 }
405
406 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
407                                 int *depth)
408 {
409         switch (var->bits_per_pixel) {
410         case 32:
411                 *depth = (var->transp.length > 0) ? 32 : 24;
412                 break;
413         default:
414                 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
415                 return -EINVAL;
416         }
417
418         return 0;
419 }
420
421 static int vmwgfx_set_config_internal(struct drm_mode_set *set)
422 {
423         struct drm_crtc *crtc = set->crtc;
424         struct drm_framebuffer *fb;
425         struct drm_crtc *tmp;
426         struct drm_modeset_acquire_ctx *ctx;
427         struct drm_device *dev = set->crtc->dev;
428         int ret;
429
430         ctx = dev->mode_config.acquire_ctx;
431
432 restart:
433         /*
434          * NOTE: ->set_config can also disable other crtcs (if we steal all
435          * connectors from it), hence we need to refcount the fbs across all
436          * crtcs. Atomic modeset will have saner semantics ...
437          */
438         drm_for_each_crtc(tmp, dev)
439                 tmp->primary->old_fb = tmp->primary->fb;
440
441         fb = set->fb;
442
443         ret = crtc->funcs->set_config(set, ctx);
444         if (ret == 0) {
445                 crtc->primary->crtc = crtc;
446                 crtc->primary->fb = fb;
447         }
448
449         drm_for_each_crtc(tmp, dev) {
450                 if (tmp->primary->fb)
451                         drm_framebuffer_get(tmp->primary->fb);
452                 if (tmp->primary->old_fb)
453                         drm_framebuffer_put(tmp->primary->old_fb);
454                 tmp->primary->old_fb = NULL;
455         }
456
457         if (ret == -EDEADLK) {
458                 dev->mode_config.acquire_ctx = NULL;
459
460 retry_locking:
461                 drm_modeset_backoff(ctx);
462
463                 ret = drm_modeset_lock_all_ctx(dev, ctx);
464                 if (ret)
465                         goto retry_locking;
466
467                 dev->mode_config.acquire_ctx = ctx;
468
469                 goto restart;
470         }
471
472         return ret;
473 }
474
475 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
476                              bool detach_bo,
477                              bool unref_bo)
478 {
479         struct drm_framebuffer *cur_fb = par->set_fb;
480         int ret;
481
482         /* Detach the KMS framebuffer from crtcs */
483         if (par->set_mode) {
484                 struct drm_mode_set set;
485
486                 set.crtc = par->crtc;
487                 set.x = 0;
488                 set.y = 0;
489                 set.mode = NULL;
490                 set.fb = NULL;
491                 set.num_connectors = 0;
492                 set.connectors = &par->con;
493                 ret = vmwgfx_set_config_internal(&set);
494                 if (ret) {
495                         DRM_ERROR("Could not unset a mode.\n");
496                         return ret;
497                 }
498                 drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
499                 par->set_mode = NULL;
500         }
501
502         if (cur_fb) {
503                 drm_framebuffer_unreference(cur_fb);
504                 par->set_fb = NULL;
505         }
506
507         if (par->vmw_bo && detach_bo) {
508                 struct vmw_private *vmw_priv = par->vmw_priv;
509
510                 if (par->bo_ptr) {
511                         ttm_bo_kunmap(&par->map);
512                         par->bo_ptr = NULL;
513                 }
514                 if (unref_bo)
515                         vmw_dmabuf_unreference(&par->vmw_bo);
516                 else if (vmw_priv->active_display_unit != vmw_du_legacy)
517                         vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
518         }
519
520         return 0;
521 }
522
523 static int vmw_fb_kms_framebuffer(struct fb_info *info)
524 {
525         struct drm_mode_fb_cmd2 mode_cmd = {0};
526         struct vmw_fb_par *par = info->par;
527         struct fb_var_screeninfo *var = &info->var;
528         struct drm_framebuffer *cur_fb;
529         struct vmw_framebuffer *vfb;
530         int ret = 0, depth;
531         size_t new_bo_size;
532
533         ret = vmw_fb_compute_depth(var, &depth);
534         if (ret)
535                 return ret;
536
537         mode_cmd.width = var->xres;
538         mode_cmd.height = var->yres;
539         mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
540         mode_cmd.pixel_format =
541                 drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
542
543         cur_fb = par->set_fb;
544         if (cur_fb && cur_fb->width == mode_cmd.width &&
545             cur_fb->height == mode_cmd.height &&
546             cur_fb->format->format == mode_cmd.pixel_format &&
547             cur_fb->pitches[0] == mode_cmd.pitches[0])
548                 return 0;
549
550         /* Need new buffer object ? */
551         new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
552         ret = vmw_fb_kms_detach(par,
553                                 par->bo_size < new_bo_size ||
554                                 par->bo_size > 2*new_bo_size,
555                                 true);
556         if (ret)
557                 return ret;
558
559         if (!par->vmw_bo) {
560                 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
561                                        &par->vmw_bo);
562                 if (ret) {
563                         DRM_ERROR("Failed creating a buffer object for "
564                                   "fbdev.\n");
565                         return ret;
566                 }
567                 par->bo_size = new_bo_size;
568         }
569
570         vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
571                                       true, &mode_cmd);
572         if (IS_ERR(vfb))
573                 return PTR_ERR(vfb);
574
575         par->set_fb = &vfb->base;
576
577         return 0;
578 }
579
580 static int vmw_fb_set_par(struct fb_info *info)
581 {
582         struct vmw_fb_par *par = info->par;
583         struct vmw_private *vmw_priv = par->vmw_priv;
584         struct drm_mode_set set;
585         struct fb_var_screeninfo *var = &info->var;
586         struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
587                 DRM_MODE_TYPE_DRIVER,
588                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
589                 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
590         };
591         struct drm_display_mode *mode;
592         int ret;
593
594         mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
595         if (!mode) {
596                 DRM_ERROR("Could not create new fb mode.\n");
597                 return -ENOMEM;
598         }
599
600         mode->hdisplay = var->xres;
601         mode->vdisplay = var->yres;
602         vmw_guess_mode_timing(mode);
603
604         if (!vmw_kms_validate_mode_vram(vmw_priv,
605                                         mode->hdisplay *
606                                         DIV_ROUND_UP(var->bits_per_pixel, 8),
607                                         mode->vdisplay)) {
608                 drm_mode_destroy(vmw_priv->dev, mode);
609                 return -EINVAL;
610         }
611
612         mutex_lock(&par->bo_mutex);
613         drm_modeset_lock_all(vmw_priv->dev);
614         ret = vmw_fb_kms_framebuffer(info);
615         if (ret)
616                 goto out_unlock;
617
618         par->fb_x = var->xoffset;
619         par->fb_y = var->yoffset;
620
621         set.crtc = par->crtc;
622         set.x = 0;
623         set.y = 0;
624         set.mode = mode;
625         set.fb = par->set_fb;
626         set.num_connectors = 1;
627         set.connectors = &par->con;
628
629         ret = vmwgfx_set_config_internal(&set);
630         if (ret)
631                 goto out_unlock;
632
633         if (!par->bo_ptr) {
634                 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
635
636                 /*
637                  * Pin before mapping. Since we don't know in what placement
638                  * to pin, call into KMS to do it for us.  LDU doesn't require
639                  * additional pinning because set_config() would've pinned
640                  * it already
641                  */
642                 if (vmw_priv->active_display_unit != vmw_du_legacy) {
643                         ret = vfb->pin(vfb);
644                         if (ret) {
645                                 DRM_ERROR("Could not pin the fbdev "
646                                           "framebuffer.\n");
647                                 goto out_unlock;
648                         }
649                 }
650
651                 ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
652                                   par->vmw_bo->base.num_pages, &par->map);
653                 if (ret) {
654                         if (vmw_priv->active_display_unit != vmw_du_legacy)
655                                 vfb->unpin(vfb);
656
657                         DRM_ERROR("Could not map the fbdev framebuffer.\n");
658                         goto out_unlock;
659                 }
660
661                 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
662         }
663
664
665         vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
666                           par->set_fb->width, par->set_fb->height);
667
668         /* If there already was stuff dirty we wont
669          * schedule a new work, so lets do it now */
670
671         schedule_delayed_work(&par->local_work, 0);
672
673 out_unlock:
674         if (par->set_mode)
675                 drm_mode_destroy(vmw_priv->dev, par->set_mode);
676         par->set_mode = mode;
677
678         drm_modeset_unlock_all(vmw_priv->dev);
679         mutex_unlock(&par->bo_mutex);
680
681         return ret;
682 }
683
684
685 static struct fb_ops vmw_fb_ops = {
686         .owner = THIS_MODULE,
687         .fb_check_var = vmw_fb_check_var,
688         .fb_set_par = vmw_fb_set_par,
689         .fb_setcolreg = vmw_fb_setcolreg,
690         .fb_fillrect = vmw_fb_fillrect,
691         .fb_copyarea = vmw_fb_copyarea,
692         .fb_imageblit = vmw_fb_imageblit,
693         .fb_pan_display = vmw_fb_pan_display,
694         .fb_blank = vmw_fb_blank,
695 };
696
697 int vmw_fb_init(struct vmw_private *vmw_priv)
698 {
699         struct device *device = &vmw_priv->dev->pdev->dev;
700         struct vmw_fb_par *par;
701         struct fb_info *info;
702         unsigned fb_width, fb_height;
703         unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
704         struct drm_display_mode *init_mode;
705         int ret;
706
707         fb_bpp = 32;
708         fb_depth = 24;
709
710         /* XXX As shouldn't these be as well. */
711         fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
712         fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
713
714         fb_pitch = fb_width * fb_bpp / 8;
715         fb_size = fb_pitch * fb_height;
716         fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
717
718         info = framebuffer_alloc(sizeof(*par), device);
719         if (!info)
720                 return -ENOMEM;
721
722         /*
723          * Par
724          */
725         vmw_priv->fb_info = info;
726         par = info->par;
727         memset(par, 0, sizeof(*par));
728         INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
729         par->vmw_priv = vmw_priv;
730         par->vmalloc = NULL;
731         par->max_width = fb_width;
732         par->max_height = fb_height;
733
734         drm_modeset_lock_all(vmw_priv->dev);
735         ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
736                                       par->max_height, &par->con,
737                                       &par->crtc, &init_mode);
738         if (ret) {
739                 drm_modeset_unlock_all(vmw_priv->dev);
740                 goto err_kms;
741         }
742
743         info->var.xres = init_mode->hdisplay;
744         info->var.yres = init_mode->vdisplay;
745         drm_modeset_unlock_all(vmw_priv->dev);
746
747         /*
748          * Create buffers and alloc memory
749          */
750         par->vmalloc = vzalloc(fb_size);
751         if (unlikely(par->vmalloc == NULL)) {
752                 ret = -ENOMEM;
753                 goto err_free;
754         }
755
756         /*
757          * Fixed and var
758          */
759         strcpy(info->fix.id, "svgadrmfb");
760         info->fix.type = FB_TYPE_PACKED_PIXELS;
761         info->fix.visual = FB_VISUAL_TRUECOLOR;
762         info->fix.type_aux = 0;
763         info->fix.xpanstep = 1; /* doing it in hw */
764         info->fix.ypanstep = 1; /* doing it in hw */
765         info->fix.ywrapstep = 0;
766         info->fix.accel = FB_ACCEL_NONE;
767         info->fix.line_length = fb_pitch;
768
769         info->fix.smem_start = 0;
770         info->fix.smem_len = fb_size;
771
772         info->pseudo_palette = par->pseudo_palette;
773         info->screen_base = (char __iomem *)par->vmalloc;
774         info->screen_size = fb_size;
775
776         info->fbops = &vmw_fb_ops;
777
778         /* 24 depth per default */
779         info->var.red.offset = 16;
780         info->var.green.offset = 8;
781         info->var.blue.offset = 0;
782         info->var.red.length = 8;
783         info->var.green.length = 8;
784         info->var.blue.length = 8;
785         info->var.transp.offset = 0;
786         info->var.transp.length = 0;
787
788         info->var.xres_virtual = fb_width;
789         info->var.yres_virtual = fb_height;
790         info->var.bits_per_pixel = fb_bpp;
791         info->var.xoffset = 0;
792         info->var.yoffset = 0;
793         info->var.activate = FB_ACTIVATE_NOW;
794         info->var.height = -1;
795         info->var.width = -1;
796
797         /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
798         info->apertures = alloc_apertures(1);
799         if (!info->apertures) {
800                 ret = -ENOMEM;
801                 goto err_aper;
802         }
803         info->apertures->ranges[0].base = vmw_priv->vram_start;
804         info->apertures->ranges[0].size = vmw_priv->vram_size;
805
806         /*
807          * Dirty & Deferred IO
808          */
809         par->dirty.x1 = par->dirty.x2 = 0;
810         par->dirty.y1 = par->dirty.y2 = 0;
811         par->dirty.active = true;
812         spin_lock_init(&par->dirty.lock);
813         mutex_init(&par->bo_mutex);
814         info->fbdefio = &vmw_defio;
815         fb_deferred_io_init(info);
816
817         ret = register_framebuffer(info);
818         if (unlikely(ret != 0))
819                 goto err_defio;
820
821         vmw_fb_set_par(info);
822
823         return 0;
824
825 err_defio:
826         fb_deferred_io_cleanup(info);
827 err_aper:
828 err_free:
829         vfree(par->vmalloc);
830 err_kms:
831         framebuffer_release(info);
832         vmw_priv->fb_info = NULL;
833
834         return ret;
835 }
836
837 int vmw_fb_close(struct vmw_private *vmw_priv)
838 {
839         struct fb_info *info;
840         struct vmw_fb_par *par;
841
842         if (!vmw_priv->fb_info)
843                 return 0;
844
845         info = vmw_priv->fb_info;
846         par = info->par;
847
848         /* ??? order */
849         fb_deferred_io_cleanup(info);
850         cancel_delayed_work_sync(&par->local_work);
851         unregister_framebuffer(info);
852
853         (void) vmw_fb_kms_detach(par, true, true);
854
855         vfree(par->vmalloc);
856         framebuffer_release(info);
857
858         return 0;
859 }
860
861 int vmw_fb_off(struct vmw_private *vmw_priv)
862 {
863         struct fb_info *info;
864         struct vmw_fb_par *par;
865         unsigned long flags;
866
867         if (!vmw_priv->fb_info)
868                 return -EINVAL;
869
870         info = vmw_priv->fb_info;
871         par = info->par;
872
873         spin_lock_irqsave(&par->dirty.lock, flags);
874         par->dirty.active = false;
875         spin_unlock_irqrestore(&par->dirty.lock, flags);
876
877         flush_delayed_work(&info->deferred_work);
878         flush_delayed_work(&par->local_work);
879
880         mutex_lock(&par->bo_mutex);
881         drm_modeset_lock_all(vmw_priv->dev);
882         (void) vmw_fb_kms_detach(par, true, false);
883         drm_modeset_unlock_all(vmw_priv->dev);
884         mutex_unlock(&par->bo_mutex);
885
886         return 0;
887 }
888
889 int vmw_fb_on(struct vmw_private *vmw_priv)
890 {
891         struct fb_info *info;
892         struct vmw_fb_par *par;
893         unsigned long flags;
894
895         if (!vmw_priv->fb_info)
896                 return -EINVAL;
897
898         info = vmw_priv->fb_info;
899         par = info->par;
900
901         vmw_fb_set_par(info);
902         spin_lock_irqsave(&par->dirty.lock, flags);
903         par->dirty.active = true;
904         spin_unlock_irqrestore(&par->dirty.lock, flags);
905  
906         return 0;
907 }