@@ -41,6 +41,8 @@ static __always_inline unsigned int
__busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id))
{
const struct i915_request *rq;
+ struct dma_fence *current_fence;
+ unsigned int i;
/*
* We have to check the current hw status of the fence as the uABI
@@ -58,40 +60,22 @@ __busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id))
*
* 2. A single i915 request.
*/
- if (dma_fence_is_array(fence)) {
- struct dma_fence_array *array = to_dma_fence_array(fence);
- struct dma_fence **child = array->fences;
- unsigned int nchild = array->num_fences;
-
- do {
- struct dma_fence *current_fence = *child++;
-
- /* Not an i915 fence, can't be busy per above */
- if (!dma_fence_is_i915(current_fence) ||
- !test_bit(I915_FENCE_FLAG_COMPOSITE,
- ¤t_fence->flags)) {
- return 0;
- }
-
- rq = to_request(current_fence);
- if (!i915_request_completed(rq))
- return flag(rq->engine->uabi_class);
- } while (--nchild);
-
- /* All requests in array complete, not busy */
- return 0;
- } else {
- if (!dma_fence_is_i915(fence))
- return 0;
+ dma_fence_array_for_each(current_fence, i, fence) {
- rq = to_request(fence);
- if (i915_request_completed(rq))
+ /* Not an i915 fence, can't be busy per above */
+ if (!dma_fence_is_i915(current_fence) ||
+ !test_bit(I915_FENCE_FLAG_COMPOSITE, ¤t_fence->flags))
return 0;
+ rq = to_request(current_fence);
/* Beware type-expansion follies! */
BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
- return flag(rq->engine->uabi_class);
+ if (!i915_request_completed(rq))
+ return flag(rq->engine->uabi_class);
}
+
+ /* All requests in array complete, not busy */
+ return 0;
}
static __always_inline unsigned int
@@ -72,11 +72,6 @@ static void fence_set_priority(struct dma_fence *fence,
rcu_read_unlock();
}
-static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_chain_ops;
-}
-
void i915_gem_fence_wait_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr)
{
@@ -85,25 +80,15 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence,
local_bh_disable();
- /* Recurse once into a fence-array */
- if (dma_fence_is_array(fence)) {
- struct dma_fence_array *array = to_dma_fence_array(fence);
- int i;
+ /* The chain is ordered; if we boost the last, we boost all */
+ dma_fence_chain_for_each(fence, fence) {
+ struct dma_fence *array, *element;
+ unsigned int i;
- for (i = 0; i < array->num_fences; i++)
- fence_set_priority(array->fences[i], attr);
- } else if (__dma_fence_is_chain(fence)) {
- struct dma_fence *iter;
-
- /* The chain is ordered; if we boost the last, we boost all */
- dma_fence_chain_for_each(iter, fence) {
- fence_set_priority(to_dma_fence_chain(iter)->fence,
- attr);
- break;
- }
- dma_fence_put(iter);
- } else {
- fence_set_priority(fence, attr);
+ /* Recurse once into a fence-array */
+ array = dma_fence_chain_contained(fence);
+ dma_fence_array_for_each(element, i, array)
+ fence_set_priority(element, attr);
}
local_bh_enable(); /* kick the tasklets if queues were reprioritised */
@@ -1345,18 +1345,15 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
struct dma_fence *iter;
int err = 0;
- if (!to_dma_fence_chain(fence))
- return __i915_request_await_external(rq, fence);
-
dma_fence_chain_for_each(iter, fence) {
- struct dma_fence_chain *chain = to_dma_fence_chain(iter);
+ struct dma_fence *tmp = dma_fence_chain_contained(iter);
- if (!dma_fence_is_i915(chain->fence)) {
+ if (!dma_fence_is_i915(tmp)) {
err = __i915_request_await_external(rq, iter);
break;
}
- err = i915_request_await_dma_fence(rq, chain->fence);
+ err = i915_request_await_dma_fence(rq, tmp);
if (err < 0)
break;
}
@@ -1386,24 +1383,14 @@ static bool is_same_parallel_context(struct i915_request *to,
int
i915_request_await_execution(struct i915_request *rq,
- struct dma_fence *fence)
+ struct dma_fence *array)
{
- struct dma_fence **child = &fence;
- unsigned int nchild = 1;
+ struct dma_fence *fence;
+ unsigned int i;
int ret;
- if (dma_fence_is_array(fence)) {
- struct dma_fence_array *array = to_dma_fence_array(fence);
-
- /* XXX Error for signal-on-any fence arrays */
+ dma_fence_array_for_each(fence, i, array) {
- child = array->fences;
- nchild = array->num_fences;
- GEM_BUG_ON(!nchild);
- }
-
- do {
- fence = *child++;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
@@ -1425,7 +1412,7 @@ i915_request_await_execution(struct i915_request *rq,
}
if (ret < 0)
return ret;
- } while (--nchild);
+ }
return 0;
}
@@ -1482,10 +1469,10 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
}
int
-i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
+i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *array)
{
- struct dma_fence **child = &fence;
- unsigned int nchild = 1;
+ struct dma_fence *fence;
+ unsigned int i;
int ret;
/*
@@ -1496,16 +1483,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
* amdgpu and we should not see any incoming fence-array from
* sync-file being in signal-on-any mode.
*/
- if (dma_fence_is_array(fence)) {
- struct dma_fence_array *array = to_dma_fence_array(fence);
-
- child = array->fences;
- nchild = array->num_fences;
- GEM_BUG_ON(!nchild);
- }
+ dma_fence_array_for_each(fence, i, array) {
- do {
- fence = *child++;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
@@ -1537,7 +1516,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
if (fence->context)
intel_timeline_sync_set(i915_request_timeline(rq),
fence);
- } while (--nchild);
+ }
return 0;
}