Merge remote-tracking branch 'airlied/drm-next' into HEAD
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / intel_atomic.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * DOC: atomic modeset support
26  *
27  * The functions here implement the state management and hardware programming
28  * dispatch required by the atomic modeset infrastructure.
29  * See intel_atomic_plane.c for the plane-specific atomic functionality.
30  */
31
32 #include <drm/drmP.h>
33 #include <drm/drm_atomic.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_plane_helper.h>
36 #include "intel_drv.h"
37
38
39 /**
40  * intel_atomic_check - validate state object
41  * @dev: drm device
42  * @state: state to validate
43  */
44 int intel_atomic_check(struct drm_device *dev,
45                        struct drm_atomic_state *state)
46 {
47         int nplanes = dev->mode_config.num_total_plane;
48         int ncrtcs = dev->mode_config.num_crtc;
49         int nconnectors = dev->mode_config.num_connector;
50         enum pipe nuclear_pipe = INVALID_PIPE;
51         struct intel_crtc *nuclear_crtc = NULL;
52         struct intel_crtc_state *crtc_state = NULL;
53         int ret;
54         int i;
55         bool not_nuclear = false;
56
57         to_intel_atomic_state(state)->cdclk = to_i915(dev)->cdclk_freq;
58
59         /*
60          * FIXME:  At the moment, we only support "nuclear pageflip" on a
61          * single CRTC.  Cross-crtc updates will be added later.
62          */
63         for (i = 0; i < nplanes; i++) {
64                 struct intel_plane *plane = to_intel_plane(state->planes[i]);
65                 if (!plane)
66                         continue;
67
68                 if (nuclear_pipe == INVALID_PIPE) {
69                         nuclear_pipe = plane->pipe;
70                 } else if (nuclear_pipe != plane->pipe) {
71                         DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
72                         return -EINVAL;
73                 }
74         }
75
76         /*
77          * FIXME:  We only handle planes for now; make sure there are no CRTC's
78          * or connectors involved.
79          */
80         state->allow_modeset = false;
81         for (i = 0; i < ncrtcs; i++) {
82                 struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
83                 if (crtc)
84                         memset(&crtc->atomic, 0, sizeof(crtc->atomic));
85                 if (crtc && crtc->pipe != nuclear_pipe)
86                         not_nuclear = true;
87                 if (crtc && crtc->pipe == nuclear_pipe) {
88                         nuclear_crtc = crtc;
89                         crtc_state = to_intel_crtc_state(state->crtc_states[i]);
90                 }
91         }
92         for (i = 0; i < nconnectors; i++)
93                 if (state->connectors[i] != NULL)
94                         not_nuclear = true;
95
96         if (not_nuclear) {
97                 DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
98                 return -EINVAL;
99         }
100
101         if (crtc_state &&
102             crtc_state->quirks & PIPE_CONFIG_QUIRK_INITIAL_PLANES) {
103                 ret = drm_atomic_add_affected_planes(state, &nuclear_crtc->base);
104                 if (ret)
105                         return ret;
106         }
107
108         ret = drm_atomic_helper_check_planes(dev, state);
109         if (ret)
110                 return ret;
111
112         return ret;
113 }
114
115
116 /**
117  * intel_atomic_commit - commit validated state object
118  * @dev: DRM device
119  * @state: the top-level driver state object
120  * @async: asynchronous commit
121  *
122  * This function commits a top-level state object that has been validated
123  * with drm_atomic_helper_check().
124  *
125  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
126  * we can only handle plane-related operations and do not yet support
127  * asynchronous commit.
128  *
129  * RETURNS
130  * Zero for success or -errno.
131  */
132 int intel_atomic_commit(struct drm_device *dev,
133                         struct drm_atomic_state *state,
134                         bool async)
135 {
136         struct drm_crtc_state *crtc_state;
137         struct drm_crtc *crtc;
138         int ret;
139         int i;
140
141         if (async) {
142                 DRM_DEBUG_KMS("i915 does not yet support async commit\n");
143                 return -EINVAL;
144         }
145
146         ret = drm_atomic_helper_prepare_planes(dev, state);
147         if (ret)
148                 return ret;
149
150         /* Point of no return */
151         drm_atomic_helper_swap_state(dev, state);
152
153         for_each_crtc_in_state(state, crtc, crtc_state, i) {
154                 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
155
156                 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
157         }
158
159         /* FIXME: This function should eventually call __intel_set_mode when needed */
160
161         drm_atomic_helper_wait_for_vblanks(dev, state);
162         drm_atomic_helper_cleanup_planes(dev, state);
163         drm_atomic_state_free(state);
164
165         return 0;
166 }
167
168 /**
169  * intel_connector_atomic_get_property - fetch connector property value
170  * @connector: connector to fetch property for
171  * @state: state containing the property value
172  * @property: property to look up
173  * @val: pointer to write property value into
174  *
175  * The DRM core does not store shadow copies of properties for
176  * atomic-capable drivers.  This entrypoint is used to fetch
177  * the current value of a driver-specific connector property.
178  */
179 int
180 intel_connector_atomic_get_property(struct drm_connector *connector,
181                                     const struct drm_connector_state *state,
182                                     struct drm_property *property,
183                                     uint64_t *val)
184 {
185         int i;
186
187         /*
188          * TODO: We only have atomic modeset for planes at the moment, so the
189          * crtc/connector code isn't quite ready yet.  Until it's ready,
190          * continue to look up all property values in the DRM's shadow copy
191          * in obj->properties->values[].
192          *
193          * When the crtc/connector state work matures, this function should
194          * be updated to read the values out of the state structure instead.
195          */
196         for (i = 0; i < connector->base.properties->count; i++) {
197                 if (connector->base.properties->properties[i] == property) {
198                         *val = connector->base.properties->values[i];
199                         return 0;
200                 }
201         }
202
203         return -EINVAL;
204 }
205
206 /*
207  * intel_crtc_duplicate_state - duplicate crtc state
208  * @crtc: drm crtc
209  *
210  * Allocates and returns a copy of the crtc state (both common and
211  * Intel-specific) for the specified crtc.
212  *
213  * Returns: The newly allocated crtc state, or NULL on failure.
214  */
215 struct drm_crtc_state *
216 intel_crtc_duplicate_state(struct drm_crtc *crtc)
217 {
218         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
219         struct intel_crtc_state *crtc_state;
220
221         if (WARN_ON(!intel_crtc->config))
222                 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
223         else
224                 crtc_state = kmemdup(intel_crtc->config,
225                                      sizeof(*intel_crtc->config), GFP_KERNEL);
226
227         if (!crtc_state)
228                 return NULL;
229
230         __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
231
232         crtc_state->base.crtc = crtc;
233
234         return &crtc_state->base;
235 }
236
237 /**
238  * intel_crtc_destroy_state - destroy crtc state
239  * @crtc: drm crtc
240  *
241  * Destroys the crtc state (both common and Intel-specific) for the
242  * specified crtc.
243  */
244 void
245 intel_crtc_destroy_state(struct drm_crtc *crtc,
246                           struct drm_crtc_state *state)
247 {
248         drm_atomic_helper_crtc_destroy_state(crtc, state);
249 }
250
251 /**
252  * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
253  * @dev: DRM device
254  * @crtc: intel crtc
255  * @crtc_state: incoming crtc_state to validate and setup scalers
256  *
257  * This function sets up scalers based on staged scaling requests for
258  * a @crtc and its planes. It is called from crtc level check path. If request
259  * is a supportable request, it attaches scalers to requested planes and crtc.
260  *
261  * This function takes into account the current scaler(s) in use by any planes
262  * not being part of this atomic state
263  *
264  *  Returns:
265  *         0 - scalers were setup succesfully
266  *         error code - otherwise
267  */
268 int intel_atomic_setup_scalers(struct drm_device *dev,
269         struct intel_crtc *intel_crtc,
270         struct intel_crtc_state *crtc_state)
271 {
272         struct drm_plane *plane = NULL;
273         struct intel_plane *intel_plane;
274         struct intel_plane_state *plane_state = NULL;
275         struct intel_crtc_scaler_state *scaler_state;
276         struct drm_atomic_state *drm_state;
277         int num_scalers_need;
278         int i, j;
279
280         if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
281                 return 0;
282
283         scaler_state = &crtc_state->scaler_state;
284         drm_state = crtc_state->base.state;
285
286         num_scalers_need = hweight32(scaler_state->scaler_users);
287         DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
288                 crtc_state, num_scalers_need, intel_crtc->num_scalers,
289                 scaler_state->scaler_users);
290
291         /*
292          * High level flow:
293          * - staged scaler requests are already in scaler_state->scaler_users
294          * - check whether staged scaling requests can be supported
295          * - add planes using scalers that aren't in current transaction
296          * - assign scalers to requested users
297          * - as part of plane commit, scalers will be committed
298          *   (i.e., either attached or detached) to respective planes in hw
299          * - as part of crtc_commit, scaler will be either attached or detached
300          *   to crtc in hw
301          */
302
303         /* fail if required scalers > available scalers */
304         if (num_scalers_need > intel_crtc->num_scalers){
305                 DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
306                         num_scalers_need, intel_crtc->num_scalers);
307                 return -EINVAL;
308         }
309
310         /* walkthrough scaler_users bits and start assigning scalers */
311         for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
312                 int *scaler_id;
313                 const char *name;
314                 int idx;
315
316                 /* skip if scaler not required */
317                 if (!(scaler_state->scaler_users & (1 << i)))
318                         continue;
319
320                 if (i == SKL_CRTC_INDEX) {
321                         name = "CRTC";
322                         idx = intel_crtc->base.base.id;
323
324                         /* panel fitter case: assign as a crtc scaler */
325                         scaler_id = &scaler_state->scaler_id;
326                 } else {
327                         name = "PLANE";
328                         idx = plane->base.id;
329
330                         if (!drm_state)
331                                 continue;
332
333                         /* plane scaler case: assign as a plane scaler */
334                         /* find the plane that set the bit as scaler_user */
335                         plane = drm_state->planes[i];
336
337                         /*
338                          * to enable/disable hq mode, add planes that are using scaler
339                          * into this transaction
340                          */
341                         if (!plane) {
342                                 struct drm_plane_state *state;
343                                 plane = drm_plane_from_index(dev, i);
344                                 state = drm_atomic_get_plane_state(drm_state, plane);
345                                 if (IS_ERR(state)) {
346                                         DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
347                                                 plane->base.id);
348                                         return PTR_ERR(state);
349                                 }
350
351                                 /*
352                                  * the plane is added after plane checks are run,
353                                  * but since this plane is unchanged just do the
354                                  * minimum required validation.
355                                  */
356                                 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
357                                         intel_crtc->atomic.wait_for_flips = true;
358                                 crtc_state->base.planes_changed = true;
359                         }
360
361                         intel_plane = to_intel_plane(plane);
362
363                         /* plane on different crtc cannot be a scaler user of this crtc */
364                         if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
365                                 continue;
366                         }
367
368                         plane_state = to_intel_plane_state(drm_state->plane_states[i]);
369                         scaler_id = &plane_state->scaler_id;
370                 }
371
372                 if (*scaler_id < 0) {
373                         /* find a free scaler */
374                         for (j = 0; j < intel_crtc->num_scalers; j++) {
375                                 if (!scaler_state->scalers[j].in_use) {
376                                         scaler_state->scalers[j].in_use = 1;
377                                         *scaler_id = j;
378                                         DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
379                                                 intel_crtc->pipe, *scaler_id, name, idx);
380                                         break;
381                                 }
382                         }
383                 }
384
385                 if (WARN_ON(*scaler_id < 0)) {
386                         DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
387                         continue;
388                 }
389
390                 /* set scaler mode */
391                 if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
392                         /*
393                          * when only 1 scaler is in use on either pipe A or B,
394                          * scaler 0 operates in high quality (HQ) mode.
395                          * In this case use scaler 0 to take advantage of HQ mode
396                          */
397                         *scaler_id = 0;
398                         scaler_state->scalers[0].in_use = 1;
399                         scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
400                         scaler_state->scalers[1].in_use = 0;
401                 } else {
402                         scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
403                 }
404         }
405
406         return 0;
407 }
408
409 static void
410 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
411                                   struct intel_shared_dpll_config *shared_dpll)
412 {
413         enum intel_dpll_id i;
414
415         /* Copy shared dpll state */
416         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
417                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
418
419                 shared_dpll[i] = pll->config;
420         }
421 }
422
423 struct intel_shared_dpll_config *
424 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
425 {
426         struct intel_atomic_state *state = to_intel_atomic_state(s);
427
428         WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
429
430         if (!state->dpll_set) {
431                 state->dpll_set = true;
432
433                 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
434                                                   state->shared_dpll);
435         }
436
437         return state->shared_dpll;
438 }
439
440 struct drm_atomic_state *
441 intel_atomic_state_alloc(struct drm_device *dev)
442 {
443         struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
444
445         if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
446                 kfree(state);
447                 return NULL;
448         }
449
450         return &state->base;
451 }
452
453 void intel_atomic_state_clear(struct drm_atomic_state *s)
454 {
455         struct intel_atomic_state *state = to_intel_atomic_state(s);
456         drm_atomic_state_default_clear(&state->base);
457         state->dpll_set = false;
458 }