Lines Matching defs:par
81 struct vmw_fb_par *par = info->par;
82 u32 *pal = par->pseudo_palette;
89 switch (par->set_fb->format->depth) {
98 par->set_fb->format->depth,
99 par->set_fb->format->cpp[0] * 8);
110 struct vmw_fb_par *par = info->par;
111 struct vmw_private *vmw_priv = par->vmw_priv;
148 if ((var->xoffset + var->xres) > par->max_width ||
149 (var->yoffset + var->yres) > par->max_height) {
178 * off during hibernation using the par->dirty.active bool.
182 struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
184 struct vmw_private *vmw_priv = par->vmw_priv;
192 struct vmw_buffer_object *vbo = par->vmw_bo;
195 if (!READ_ONCE(par->dirty.active))
198 mutex_lock(&par->bo_mutex);
199 cur_fb = par->set_fb;
209 spin_lock_irqsave(&par->dirty.lock, irq_flags);
210 if (!par->dirty.active) {
211 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
220 max_x = par->fb_x + cur_fb->width;
221 max_y = par->fb_y + cur_fb->height;
223 dst_x1 = par->dirty.x1 - par->fb_x;
224 dst_y1 = par->dirty.y1 - par->fb_y;
228 dst_x2 = par->dirty.x2 - par->fb_x;
229 dst_y2 = par->dirty.y2 - par->fb_y;
237 par->dirty.x1 = par->dirty.x2 = 0;
238 par->dirty.y1 = par->dirty.y2 = 0;
239 spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
243 (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
244 src_ptr = (u8 *)par->vmalloc +
245 ((dst_y1 + par->fb_y) * info->fix.line_length +
246 (dst_x1 + par->fb_x) * cpp);
250 dst_ptr += par->set_fb->pitches[0];
264 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
269 mutex_unlock(&par->bo_mutex);
272 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
280 spin_lock_irqsave(&par->dirty.lock, flags);
281 if (par->dirty.x1 == par->dirty.x2) {
282 par->dirty.x1 = x1;
283 par->dirty.y1 = y1;
284 par->dirty.x2 = x2;
285 par->dirty.y2 = y2;
288 if (par->dirty.active)
289 schedule_delayed_work(&par->local_work,
292 if (x1 < par->dirty.x1)
293 par->dirty.x1 = x1;
294 if (y1 < par->dirty.y1)
295 par->dirty.y1 = y1;
296 if (x2 > par->dirty.x2)
297 par->dirty.x2 = x2;
298 if (y2 > par->dirty.y2)
299 par->dirty.y2 = y2;
301 spin_unlock_irqrestore(&par->dirty.lock, flags);
307 struct vmw_fb_par *par = info->par;
315 mutex_lock(&par->bo_mutex);
316 par->fb_x = var->xoffset;
317 par->fb_y = var->yoffset;
318 if (par->set_fb)
319 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
320 par->set_fb->height);
321 mutex_unlock(&par->bo_mutex);
329 struct vmw_fb_par *par = info->par;
348 spin_lock_irqsave(&par->dirty.lock, flags);
349 par->dirty.x1 = 0;
350 par->dirty.y1 = y1;
351 par->dirty.x2 = info->var.xres;
352 par->dirty.y2 = y2;
353 spin_unlock_irqrestore(&par->dirty.lock, flags);
359 cancel_delayed_work(&par->local_work);
360 schedule_delayed_work(&par->local_work, 0);
376 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
383 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
390 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
466 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
470 struct drm_framebuffer *cur_fb = par->set_fb;
474 if (par->set_mode) {
477 set.crtc = par->crtc;
483 set.connectors = &par->con;
489 drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
490 par->set_mode = NULL;
495 par->set_fb = NULL;
498 if (par->vmw_bo && detach_bo && unref_bo)
499 vmw_bo_unreference(&par->vmw_bo);
507 struct vmw_fb_par *par = info->par;
524 cur_fb = par->set_fb;
533 ret = vmw_fb_kms_detach(par,
534 par->bo_size < new_bo_size ||
535 par->bo_size > 2*new_bo_size,
540 if (!par->vmw_bo) {
541 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
542 &par->vmw_bo);
548 par->bo_size = new_bo_size;
551 vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
556 par->set_fb = &vfb->base;
563 struct vmw_fb_par *par = info->par;
564 struct vmw_private *vmw_priv = par->vmw_priv;
593 mutex_lock(&par->bo_mutex);
598 par->fb_x = var->xoffset;
599 par->fb_y = var->yoffset;
601 set.crtc = par->crtc;
605 set.fb = par->set_fb;
607 set.connectors = &par->con;
613 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
614 par->set_fb->width, par->set_fb->height);
619 schedule_delayed_work(&par->local_work, 0);
622 if (par->set_mode)
623 drm_mode_destroy(vmw_priv->dev, par->set_mode);
624 par->set_mode = mode;
626 mutex_unlock(&par->bo_mutex);
647 struct vmw_fb_par *par;
663 info = framebuffer_alloc(sizeof(*par), device);
668 * Par
671 par = info->par;
672 memset(parpar));
673 INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
674 par->vmw_priv = vmw_priv;
675 par->vmalloc = NULL;
676 par->max_width = fb_width;
677 par->max_height = fb_height;
679 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
680 par->max_height, &par->con,
681 &par->crtc, &init_mode);
691 par->vmalloc = vzalloc(fb_size);
692 if (unlikely(par->vmalloc == NULL)) {
713 info->pseudo_palette = par->pseudo_palette;
714 info->screen_base = (char __iomem *)par->vmalloc;
750 par->dirty.x1 = par->dirty.x2 = 0;
751 par->dirty.y1 = par->dirty.y2 = 0;
752 par->dirty.active = true;
753 spin_lock_init(&par->dirty.lock);
754 mutex_init(&par->bo_mutex);
770 vfree(par->vmalloc);
781 struct vmw_fb_par *par;
787 par = info->par;
791 cancel_delayed_work_sync(&par->local_work);
794 mutex_lock(&par->bo_mutex);
795 (void) vmw_fb_kms_detach(par, true, true);
796 mutex_unlock(&par->bo_mutex);
798 vfree(par->vmalloc);
807 struct vmw_fb_par *par;
814 par = info->par;
816 spin_lock_irqsave(&par->dirty.lock, flags);
817 par->dirty.active = false;
818 spin_unlock_irqrestore(&par->dirty.lock, flags);
821 flush_delayed_work(&par->local_work);
829 struct vmw_fb_par *par;
836 par = info->par;
838 spin_lock_irqsave(&par->dirty.lock, flags);
839 par->dirty.active = true;
840 spin_unlock_irqrestore(&par->dirty.lock, flags);
847 schedule_delayed_work(&par->local_work, 0);