blob: e6195debb0d4f30575b1b1f458b5d39562108ed8 [file] [log] [blame]
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +02001/*
2 * drm kms/fb cma (contiguous memory allocator) helper functions
3 *
4 * Copyright (C) 2012 Analog Device Inc.
5 * Author: Lars-Peter Clausen <lars@metafoo.de>
6 *
7 * Based on udl_fbdev.c
8 * Copyright (C) 2012 Red Hat
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <drm/drmP.h>
21#include <drm/drm_crtc.h>
22#include <drm/drm_fb_helper.h>
23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_gem_cma_helper.h>
25#include <drm/drm_fb_cma_helper.h>
26#include <linux/module.h>
27
Noralf Trønnes199c7712016-04-28 17:18:35 +020028#define DEFAULT_FBDEFIO_DELAY_MS 50
29
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +020030struct drm_fb_cma {
31 struct drm_framebuffer fb;
32 struct drm_gem_cma_object *obj[4];
33};
34
35struct drm_fbdev_cma {
36 struct drm_fb_helper fb_helper;
37 struct drm_fb_cma *fb;
38};
39
Noralf Trønnes199c7712016-04-28 17:18:35 +020040/**
41 * DOC: framebuffer cma helper functions
42 *
43 * Provides helper functions for creating a cma (contiguous memory allocator)
44 * backed framebuffer.
45 *
Noralf Trønnes02da16d2016-05-11 18:09:18 +020046 * drm_fb_cma_create() is used in the &drm_mode_config_funcs ->fb_create
47 * callback function to create a cma backed framebuffer.
Noralf Trønnes199c7712016-04-28 17:18:35 +020048 *
49 * An fbdev framebuffer backed by cma is also available by calling
50 * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
Noralf Trønnes02da16d2016-05-11 18:09:18 +020051 * If the &drm_framebuffer_funcs ->dirty callback is set, fb_deferred_io
Noralf Trønnes199c7712016-04-28 17:18:35 +020052 * will be set up automatically. dirty() is called by
53 * drm_fb_helper_deferred_io() in process context (struct delayed_work).
54 *
Daniel Vetterda5335b2016-05-31 22:55:13 +020055 * Example fbdev deferred io code::
Noralf Trønnes199c7712016-04-28 17:18:35 +020056 *
57 * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
58 * struct drm_file *file_priv,
59 * unsigned flags, unsigned color,
60 * struct drm_clip_rect *clips,
61 * unsigned num_clips)
62 * {
63 * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
64 * ... push changes ...
65 * return 0;
66 * }
67 *
68 * static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = {
69 * .destroy = drm_fb_cma_destroy,
70 * .create_handle = drm_fb_cma_create_handle,
71 * .dirty = driver_fbdev_fb_dirty,
72 * };
73 *
74 * static int driver_fbdev_create(struct drm_fb_helper *helper,
75 * struct drm_fb_helper_surface_size *sizes)
76 * {
77 * return drm_fbdev_cma_create_with_funcs(helper, sizes,
78 * &driver_fbdev_fb_funcs);
79 * }
80 *
81 * static const struct drm_fb_helper_funcs driver_fb_helper_funcs = {
82 * .fb_probe = driver_fbdev_create,
83 * };
84 *
85 * Initialize:
86 * fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
87 * dev->mode_config.num_crtc,
88 * dev->mode_config.num_connector,
89 * &driver_fb_helper_funcs);
90 *
91 */
92
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +020093static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
94{
95 return container_of(helper, struct drm_fbdev_cma, fb_helper);
96}
97
98static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
99{
100 return container_of(fb, struct drm_fb_cma, fb);
101}
102
Noralf Trønnes199c7712016-04-28 17:18:35 +0200103void drm_fb_cma_destroy(struct drm_framebuffer *fb)
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200104{
105 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
106 int i;
107
108 for (i = 0; i < 4; i++) {
109 if (fb_cma->obj[i])
110 drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
111 }
112
113 drm_framebuffer_cleanup(fb);
114 kfree(fb_cma);
115}
Noralf Trønnes199c7712016-04-28 17:18:35 +0200116EXPORT_SYMBOL(drm_fb_cma_destroy);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200117
Noralf Trønnes199c7712016-04-28 17:18:35 +0200118int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200119 struct drm_file *file_priv, unsigned int *handle)
120{
121 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
122
123 return drm_gem_handle_create(file_priv,
124 &fb_cma->obj[0]->base, handle);
125}
Noralf Trønnes199c7712016-04-28 17:18:35 +0200126EXPORT_SYMBOL(drm_fb_cma_create_handle);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200127
128static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
129 .destroy = drm_fb_cma_destroy,
130 .create_handle = drm_fb_cma_create_handle,
131};
132
133static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
Colin Ian King70c06162016-01-20 10:59:34 +0000134 const struct drm_mode_fb_cmd2 *mode_cmd,
135 struct drm_gem_cma_object **obj,
Noralf Trønnesfdce1842016-05-12 20:25:21 +0200136 unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200137{
138 struct drm_fb_cma *fb_cma;
139 int ret;
140 int i;
141
142 fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
143 if (!fb_cma)
144 return ERR_PTR(-ENOMEM);
145
Daniel Vetterc7d73f62012-12-13 23:38:38 +0100146 drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
147
148 for (i = 0; i < num_planes; i++)
149 fb_cma->obj[i] = obj[i];
150
Noralf Trønnes199c7712016-04-28 17:18:35 +0200151 ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200152 if (ret) {
Masanari Iida8b513d02013-05-21 23:13:12 +0900153 dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200154 kfree(fb_cma);
155 return ERR_PTR(ret);
156 }
157
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200158 return fb_cma;
159}
160
161/**
Noralf Trønnes3995b392016-05-12 20:25:22 +0200162 * drm_fb_cma_create_with_funcs() - helper function for the
163 * &drm_mode_config_funcs ->fb_create
164 * callback function
Daniel Vetter890358a2016-05-31 23:11:12 +0200165 * @dev: DRM device
166 * @file_priv: drm file for the ioctl call
167 * @mode_cmd: metadata from the userspace fb creation request
168 * @funcs: vtable to be used for the new framebuffer object
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200169 *
Noralf Trønnes3995b392016-05-12 20:25:22 +0200170 * This can be used to set &drm_framebuffer_funcs for drivers that need the
171 * dirty() callback. Use drm_fb_cma_create() if you don't need to change
172 * &drm_framebuffer_funcs.
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200173 */
Noralf Trønnes3995b392016-05-12 20:25:22 +0200174struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
175 struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
176 const struct drm_framebuffer_funcs *funcs)
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200177{
178 struct drm_fb_cma *fb_cma;
179 struct drm_gem_cma_object *objs[4];
180 struct drm_gem_object *obj;
181 unsigned int hsub;
182 unsigned int vsub;
183 int ret;
184 int i;
185
186 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
187 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
188
189 for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
190 unsigned int width = mode_cmd->width / (i ? hsub : 1);
191 unsigned int height = mode_cmd->height / (i ? vsub : 1);
192 unsigned int min_size;
193
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100194 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200195 if (!obj) {
196 dev_err(dev->dev, "Failed to lookup GEM object\n");
197 ret = -ENXIO;
198 goto err_gem_object_unreference;
199 }
200
201 min_size = (height - 1) * mode_cmd->pitches[i]
202 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
203 + mode_cmd->offsets[i];
204
205 if (obj->size < min_size) {
206 drm_gem_object_unreference_unlocked(obj);
207 ret = -EINVAL;
208 goto err_gem_object_unreference;
209 }
210 objs[i] = to_drm_gem_cma_obj(obj);
211 }
212
Noralf Trønnes3995b392016-05-12 20:25:22 +0200213 fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200214 if (IS_ERR(fb_cma)) {
215 ret = PTR_ERR(fb_cma);
216 goto err_gem_object_unreference;
217 }
218
219 return &fb_cma->fb;
220
221err_gem_object_unreference:
222 for (i--; i >= 0; i--)
223 drm_gem_object_unreference_unlocked(&objs[i]->base);
224 return ERR_PTR(ret);
225}
Noralf Trønnes3995b392016-05-12 20:25:22 +0200226EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
227
228/**
229 * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
Daniel Vetter890358a2016-05-31 23:11:12 +0200230 * @dev: DRM device
231 * @file_priv: drm file for the ioctl call
232 * @mode_cmd: metadata from the userspace fb creation request
Noralf Trønnes3995b392016-05-12 20:25:22 +0200233 *
234 * If your hardware has special alignment or pitch requirements these should be
235 * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
236 * you need to set &drm_framebuffer_funcs ->dirty.
237 */
238struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
239 struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
240{
241 return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
242 &drm_fb_cma_funcs);
243}
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200244EXPORT_SYMBOL_GPL(drm_fb_cma_create);
245
246/**
247 * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
248 * @fb: The framebuffer
249 * @plane: Which plane
250 *
251 * Return the CMA GEM object for given framebuffer.
252 *
253 * This function will usually be called from the CRTC callback functions.
254 */
255struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
Daniel Vetter890358a2016-05-31 23:11:12 +0200256 unsigned int plane)
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200257{
258 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
259
260 if (plane >= 4)
261 return NULL;
262
263 return fb_cma->obj[plane];
264}
265EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
266
Rob Clark6f646092012-12-10 10:46:43 -0600267#ifdef CONFIG_DEBUG_FS
Lespiau, Damien2c9c52e2013-08-20 00:53:08 +0100268static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
Rob Clark6f646092012-12-10 10:46:43 -0600269{
270 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
271 int i, n = drm_format_num_planes(fb->pixel_format);
272
273 seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
274 (char *)&fb->pixel_format);
275
276 for (i = 0; i < n; i++) {
277 seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
278 i, fb->offsets[i], fb->pitches[i]);
279 drm_gem_cma_describe(fb_cma->obj[i], m);
280 }
281}
Rob Clark6f646092012-12-10 10:46:43 -0600282
283/**
284 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
Daniel Vetter890358a2016-05-31 23:11:12 +0200285 * in debugfs.
286 * @m: output file
287 * @arg: private data for the callback
Rob Clark6f646092012-12-10 10:46:43 -0600288 */
289int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
290{
291 struct drm_info_node *node = (struct drm_info_node *) m->private;
292 struct drm_device *dev = node->minor->dev;
293 struct drm_framebuffer *fb;
Rob Clark6f646092012-12-10 10:46:43 -0600294
Daniel Vetter9e75c0e2015-07-09 23:32:34 +0200295 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vettere4f62542015-07-09 23:44:35 +0200296 drm_for_each_fb(fb, dev)
Rob Clark6f646092012-12-10 10:46:43 -0600297 drm_fb_cma_describe(fb, m);
Daniel Vetter9e75c0e2015-07-09 23:32:34 +0200298 mutex_unlock(&dev->mode_config.fb_lock);
Rob Clark6f646092012-12-10 10:46:43 -0600299
300 return 0;
301}
302EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
303#endif
304
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200305static struct fb_ops drm_fbdev_cma_ops = {
306 .owner = THIS_MODULE,
Archit Taneja85f2edf2015-07-22 14:58:20 +0530307 .fb_fillrect = drm_fb_helper_sys_fillrect,
308 .fb_copyarea = drm_fb_helper_sys_copyarea,
309 .fb_imageblit = drm_fb_helper_sys_imageblit,
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200310 .fb_check_var = drm_fb_helper_check_var,
311 .fb_set_par = drm_fb_helper_set_par,
312 .fb_blank = drm_fb_helper_blank,
313 .fb_pan_display = drm_fb_helper_pan_display,
314 .fb_setcmap = drm_fb_helper_setcmap,
315};
316
Noralf Trønnes199c7712016-04-28 17:18:35 +0200317static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
318 struct vm_area_struct *vma)
319{
320 fb_deferred_io_mmap(info, vma);
321 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
322
323 return 0;
324}
325
326static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
327 struct drm_gem_cma_object *cma_obj)
328{
329 struct fb_deferred_io *fbdefio;
330 struct fb_ops *fbops;
331
332 /*
333 * Per device structures are needed because:
334 * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
335 * fbdefio: individual delays
336 */
337 fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
338 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
339 if (!fbdefio || !fbops) {
340 kfree(fbdefio);
341 return -ENOMEM;
342 }
343
344 /* can't be offset from vaddr since dirty() uses cma_obj */
345 fbi->screen_buffer = cma_obj->vaddr;
346 /* fb_deferred_io_fault() needs a physical address */
347 fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
348
349 *fbops = *fbi->fbops;
350 fbi->fbops = fbops;
351
352 fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
353 fbdefio->deferred_io = drm_fb_helper_deferred_io;
354 fbi->fbdefio = fbdefio;
355 fb_deferred_io_init(fbi);
356 fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
357
358 return 0;
359}
360
361static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
362{
363 if (!fbi->fbdefio)
364 return;
365
366 fb_deferred_io_cleanup(fbi);
367 kfree(fbi->fbdefio);
368 kfree(fbi->fbops);
369}
370
371/*
372 * For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that
373 * needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use.
374 */
375int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
376 struct drm_fb_helper_surface_size *sizes,
Noralf Trønnesfdce1842016-05-12 20:25:21 +0200377 const struct drm_framebuffer_funcs *funcs)
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200378{
379 struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
380 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
381 struct drm_device *dev = helper->dev;
382 struct drm_gem_cma_object *obj;
383 struct drm_framebuffer *fb;
384 unsigned int bytes_per_pixel;
385 unsigned long offset;
386 struct fb_info *fbi;
387 size_t size;
388 int ret;
389
Thierry Redinge0d78d082012-10-20 10:32:46 +0000390 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200391 sizes->surface_width, sizes->surface_height,
392 sizes->surface_bpp);
393
394 bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
395
396 mode_cmd.width = sizes->surface_width;
397 mode_cmd.height = sizes->surface_height;
398 mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
399 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
400 sizes->surface_depth);
401
402 size = mode_cmd.pitches[0] * mode_cmd.height;
403 obj = drm_gem_cma_create(dev, size);
Thierry Reding02813242012-10-20 10:32:47 +0000404 if (IS_ERR(obj))
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200405 return -ENOMEM;
406
Archit Taneja85f2edf2015-07-22 14:58:20 +0530407 fbi = drm_fb_helper_alloc_fbi(helper);
408 if (IS_ERR(fbi)) {
409 ret = PTR_ERR(fbi);
Eric Anholt50cbc132015-12-14 16:26:26 -0800410 goto err_gem_free_object;
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200411 }
412
Noralf Trønnes199c7712016-04-28 17:18:35 +0200413 fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200414 if (IS_ERR(fbdev_cma->fb)) {
415 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
416 ret = PTR_ERR(fbdev_cma->fb);
Archit Taneja85f2edf2015-07-22 14:58:20 +0530417 goto err_fb_info_destroy;
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200418 }
419
420 fb = &fbdev_cma->fb->fb;
421 helper->fb = fb;
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200422
423 fbi->par = helper;
424 fbi->flags = FBINFO_FLAG_DEFAULT;
425 fbi->fbops = &drm_fbdev_cma_ops;
426
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200427 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
Rob Clark8d766122015-03-11 10:23:10 -0400428 drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200429
430 offset = fbi->var.xoffset * bytes_per_pixel;
431 offset += fbi->var.yoffset * fb->pitches[0];
432
433 dev->mode_config.fb_base = (resource_size_t)obj->paddr;
434 fbi->screen_base = obj->vaddr + offset;
435 fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
436 fbi->screen_size = size;
437 fbi->fix.smem_len = size;
438
Noralf Trønnes199c7712016-04-28 17:18:35 +0200439 if (funcs->dirty) {
440 ret = drm_fbdev_cma_defio_init(fbi, obj);
441 if (ret)
442 goto err_cma_destroy;
443 }
444
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200445 return 0;
446
Noralf Trønnes199c7712016-04-28 17:18:35 +0200447err_cma_destroy:
448 drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
449 drm_fb_cma_destroy(&fbdev_cma->fb->fb);
Archit Taneja85f2edf2015-07-22 14:58:20 +0530450err_fb_info_destroy:
451 drm_fb_helper_release_fbi(helper);
Eric Anholt50cbc132015-12-14 16:26:26 -0800452err_gem_free_object:
453 dev->driver->gem_free_object(&obj->base);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200454 return ret;
455}
Noralf Trønnes199c7712016-04-28 17:18:35 +0200456EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
457
458static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
459 struct drm_fb_helper_surface_size *sizes)
460{
461 return drm_fbdev_cma_create_with_funcs(helper, sizes, &drm_fb_cma_funcs);
462}
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200463
Thierry Reding3a493872014-06-27 17:19:23 +0200464static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
Daniel Vettercd5428a2013-01-21 23:42:49 +0100465 .fb_probe = drm_fbdev_cma_create,
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200466};
467
468/**
Noralf Trønnes199c7712016-04-28 17:18:35 +0200469 * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200470 * @dev: DRM device
471 * @preferred_bpp: Preferred bits per pixel for the device
472 * @num_crtc: Number of CRTCs
473 * @max_conn_count: Maximum number of connectors
Noralf Trønnes199c7712016-04-28 17:18:35 +0200474 * @funcs: fb helper functions, in particular fb_probe()
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200475 *
476 * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
477 */
Noralf Trønnes199c7712016-04-28 17:18:35 +0200478struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200479 unsigned int preferred_bpp, unsigned int num_crtc,
Noralf Trønnes199c7712016-04-28 17:18:35 +0200480 unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs)
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200481{
482 struct drm_fbdev_cma *fbdev_cma;
483 struct drm_fb_helper *helper;
484 int ret;
485
486 fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
487 if (!fbdev_cma) {
488 dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
489 return ERR_PTR(-ENOMEM);
490 }
491
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200492 helper = &fbdev_cma->fb_helper;
493
Noralf Trønnes199c7712016-04-28 17:18:35 +0200494 drm_fb_helper_prepare(dev, helper, funcs);
Thierry Reding10a23102014-06-27 17:19:24 +0200495
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200496 ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
497 if (ret < 0) {
498 dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
499 goto err_free;
500 }
501
502 ret = drm_fb_helper_single_add_all_connectors(helper);
503 if (ret < 0) {
504 dev_err(dev->dev, "Failed to add connectors.\n");
505 goto err_drm_fb_helper_fini;
506
507 }
508
509 ret = drm_fb_helper_initial_config(helper, preferred_bpp);
510 if (ret < 0) {
Masanari Iida8b513d02013-05-21 23:13:12 +0900511 dev_err(dev->dev, "Failed to set initial hw configuration.\n");
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200512 goto err_drm_fb_helper_fini;
513 }
514
515 return fbdev_cma;
516
517err_drm_fb_helper_fini:
518 drm_fb_helper_fini(helper);
519err_free:
520 kfree(fbdev_cma);
521
522 return ERR_PTR(ret);
523}
Noralf Trønnes199c7712016-04-28 17:18:35 +0200524EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
525
526/**
527 * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
528 * @dev: DRM device
529 * @preferred_bpp: Preferred bits per pixel for the device
530 * @num_crtc: Number of CRTCs
531 * @max_conn_count: Maximum number of connectors
532 *
533 * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
534 */
535struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
536 unsigned int preferred_bpp, unsigned int num_crtc,
537 unsigned int max_conn_count)
538{
539 return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
540 max_conn_count, &drm_fb_cma_helper_funcs);
541}
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200542EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
543
544/**
545 * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
546 * @fbdev_cma: The drm_fbdev_cma struct
547 */
548void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
549{
Archit Taneja85f2edf2015-07-22 14:58:20 +0530550 drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
Noralf Trønnes199c7712016-04-28 17:18:35 +0200551 drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
Archit Taneja85f2edf2015-07-22 14:58:20 +0530552 drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200553
Daniel Vetter36206362012-12-10 20:42:17 +0100554 if (fbdev_cma->fb) {
555 drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200556 drm_fb_cma_destroy(&fbdev_cma->fb->fb);
Daniel Vetter36206362012-12-10 20:42:17 +0100557 }
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200558
559 drm_fb_helper_fini(&fbdev_cma->fb_helper);
560 kfree(fbdev_cma);
561}
562EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
563
564/**
565 * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
566 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
567 *
568 * This function is usually called from the DRM drivers lastclose callback.
569 */
570void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
571{
Rob Clark5ea1f752014-05-30 12:29:48 -0400572 if (fbdev_cma)
573 drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
Lars-Peter Clausen2e3b3c42012-07-02 16:37:47 +0200574}
575EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
576
577/**
578 * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
579 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
580 *
581 * This function is usually called from the DRM drivers output_poll_changed
582 * callback.
583 */
584void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
585{
586 if (fbdev_cma)
587 drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
588}
589EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);