void-packages/srcpkgs/xf86-video-intel/patches/gcc-8.2.0-force_inline.patch

141 lines
4.2 KiB
Diff

--- a/src/sna/sna_render_inline.h 2018-08-10 09:04:33.466268800 +0200
+++ b/src/sna/sna_render_inline.h 2018-08-10 09:06:56.537276181 +0200
@@ -1,6 +1,12 @@
#ifndef SNA_RENDER_INLINE_H
#define SNA_RENDER_INLINE_H
+#if defined(__x86_64__)
+#define sna_inline force_inline
+#else
+#define sna_inline inline
+#endif
+
static inline bool need_tiling(struct sna *sna, int16_t width, int16_t height)
{
/* Is the damage area too large to fit in 3D pipeline,
@@ -17,7 +23,7 @@
dst->drawable.height > sna->render.max_3d_size);
}
-static force_inline float pack_2s(int16_t x, int16_t y)
+static sna_inline float pack_2s(int16_t x, int16_t y)
{
union {
struct sna_coordinate p;
@@ -28,35 +34,35 @@
return u.f;
}
-static force_inline int vertex_space(struct sna *sna)
+static sna_inline int vertex_space(struct sna *sna)
{
return sna->render.vertex_size - sna->render.vertex_used;
}
-static force_inline void vertex_emit(struct sna *sna, float v)
+static sna_inline void vertex_emit(struct sna *sna, float v)
{
assert(sna->render.vertex_used < sna->render.vertex_size);
sna->render.vertices[sna->render.vertex_used++] = v;
}
-static force_inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
+static sna_inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
{
vertex_emit(sna, pack_2s(x, y));
}
-static force_inline int batch_space(struct sna *sna)
+static sna_inline int batch_space(struct sna *sna)
{
assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem));
assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED <= sna->kgem.surface);
return sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED;
}
-static force_inline void batch_emit(struct sna *sna, uint32_t dword)
+static sna_inline void batch_emit(struct sna *sna, uint32_t dword)
{
assert(sna->kgem.mode != KGEM_NONE);
assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
sna->kgem.batch[sna->kgem.nbatch++] = dword;
}
-static force_inline void batch_emit_aligned(struct sna *sna, uint32_t dword, unsigned align)
+static sna_inline void batch_emit_aligned(struct sna *sna, uint32_t dword, unsigned align)
{
assert(sna->kgem.mode != KGEM_NONE);
assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
@@ -65,7 +71,7 @@
sna->kgem.batch[sna->kgem.nbatch++] = dword;
}
-static force_inline void batch_emit64(struct sna *sna, uint64_t qword)
+static sna_inline void batch_emit64(struct sna *sna, uint64_t qword)
{
assert(sna->kgem.mode != KGEM_NONE);
assert(sna->kgem.nbatch + 2 + KGEM_BATCH_RESERVED < sna->kgem.surface);
@@ -73,7 +79,7 @@
sna->kgem.nbatch += 2;
}
-static force_inline void batch_emit_float(struct sna *sna, float f)
+static sna_inline void batch_emit_float(struct sna *sna, float f)
{
union {
uint32_t dw;
--- a/src/sna/gen4_vertex.c 2018-08-10 09:08:26.380280816 +0200
+++ b/src/sna/gen4_vertex.c 2018-08-10 09:09:55.522285415 +0200
@@ -308,7 +308,7 @@
#define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y) /* XXX assert(!too_large(x, y)); */
#define OUT_VERTEX_F(v) vertex_emit(sna, v)
-force_inline static float
+sna_inline static float
compute_linear(const struct sna_composite_channel *channel,
int16_t x, int16_t y)
{
@@ -350,7 +350,7 @@
}
}
-sse2 force_inline static void
+sse2 sna_inline static void
emit_vertex(struct sna *sna,
const struct sna_composite_op *op,
int16_t srcX, int16_t srcY,
@@ -414,7 +414,7 @@
return v;
}
-sse2 force_inline static float *
+sse2 sna_inline static float *
vemit_vertex(float *v,
const struct sna_composite_op *op,
int16_t x, int16_t y)
@@ -437,7 +437,7 @@
} while (--nbox);
}
-sse2 force_inline static void
+sse2 sna_inline static void
emit_vertex_mask(struct sna *sna,
const struct sna_composite_op *op,
int16_t srcX, int16_t srcY,
@@ -468,7 +468,7 @@
r->dst.x, r->dst.y);
}
-sse2 force_inline static float *
+sse2 sna_inline static float *
vemit_vertex_mask(float *v,
const struct sna_composite_op *op,
int16_t x, int16_t y)
@@ -2040,7 +2040,7 @@
return vb;
}
-sse2 force_inline static void
+sse2 sna_inline static void
emit_span_vertex(struct sna *sna,
const struct sna_composite_spans_op *op,
int16_t x, int16_t y)