1
/* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
2
/* glitter-paths - polygon scan converter
3
 *
4
 * Copyright (c) 2008  M Joonas Pihlaja
5
 * Copyright (c) 2007  David Turner
6
 *
7
 * Permission is hereby granted, free of charge, to any person
8
 * obtaining a copy of this software and associated documentation
9
 * files (the "Software"), to deal in the Software without
10
 * restriction, including without limitation the rights to use,
11
 * copy, modify, merge, publish, distribute, sublicense, and/or sell
12
 * copies of the Software, and to permit persons to whom the
13
 * Software is furnished to do so, subject to the following
14
 * conditions:
15
 *
16
 * The above copyright notice and this permission notice shall be
17
 * included in all copies or substantial portions of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
21
 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23
 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24
 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26
 * OTHER DEALINGS IN THE SOFTWARE.
27
 */
28
/* This is the Glitter paths scan converter incorporated into cairo.
29
 * The source is from commit 734c53237a867a773640bd5b64816249fa1730f8
30
 * of
31
 *
32
 *   https://gitweb.freedesktop.org/?p=users/joonas/glitter-paths
33
 */
34
/* Glitter-paths is a stand alone polygon rasteriser derived from
35
 * David Turner's reimplementation of Tor Anderssons's 15x17
36
 * supersampling rasteriser from the Apparition graphics library.  The
37
 * main new feature here is cheaply choosing per-scan line between
38
 * doing fully analytical coverage computation for an entire row at a
39
 * time vs. using a supersampling approach.
40
 *
41
 * David Turner's code can be found at
42
 *
43
 *   http://david.freetype.org/rasterizer-shootout/raster-comparison-20070813.tar.bz2
44
 *
45
 * In particular this file incorporates large parts of ftgrays_tor10.h
46
 * from raster-comparison-20070813.tar.bz2
47
 */
48
/* Overview
49
 *
50
 * A scan converter's basic purpose to take polygon edges and convert
51
 * them into an RLE compressed A8 mask.  This one works in two phases:
52
 * gathering edges and generating spans.
53
 *
54
 * 1) As the user feeds the scan converter edges they are vertically
55
 * clipped and bucketted into a _polygon_ data structure.  The edges
56
 * are also snapped from the user's coordinates to the subpixel grid
57
 * coordinates used during scan conversion.
58
 *
59
 *     user
60
 *      |
61
 *      | edges
62
 *      V
63
 *    polygon buckets
64
 *
65
 * 2) Generating spans works by performing a vertical sweep of pixel
66
 * rows from top to bottom and maintaining an _active_list_ of edges
67
 * that intersect the row.  From the active list the fill rule
68
 * determines which edges are the left and right edges of the start of
69
 * each span, and their contribution is then accumulated into a pixel
70
 * coverage list (_cell_list_) as coverage deltas.  Once the coverage
71
 * deltas of all edges are known we can form spans of constant pixel
72
 * coverage by summing the deltas during a traversal of the cell list.
73
 * At the end of a pixel row the cell list is sent to a coverage
74
 * blitter for rendering to some target surface.
75
 *
76
 * The pixel coverages are computed by either supersampling the row
77
 * and box filtering a mono rasterisation, or by computing the exact
78
 * coverages of edges in the active list.  The supersampling method is
79
 * used whenever some edge starts or stops within the row or there are
80
 * edge intersections in the row.
81
 *
82
 *   polygon bucket for       \
83
 *   current pixel row        |
84
 *      |                     |
85
 *      | activate new edges  |  Repeat GRID_Y times if we
86
 *      V                     \  are supersampling this row,
87
 *   active list              /  or just once if we're computing
88
 *      |                     |  analytical coverage.
89
 *      | coverage deltas     |
90
 *      V                     |
91
 *   pixel coverage list     /
92
 *      |
93
 *      V
94
 *   coverage blitter
95
 */
96
#include "cairoint.h"
97
#include "cairo-spans-private.h"
98
#include "cairo-error-private.h"
99

            
100
#include <stdlib.h>
101
#include <string.h>
102
#include <limits.h>
103
#include <setjmp.h>
104

            
105
/*-------------------------------------------------------------------------
106
 * cairo specific config
107
 */
108
#define I static
109

            
110
/* Prefer cairo's status type. */
111
#define GLITTER_HAVE_STATUS_T 1
112
#define GLITTER_STATUS_SUCCESS CAIRO_STATUS_SUCCESS
113
#define GLITTER_STATUS_NO_MEMORY CAIRO_STATUS_NO_MEMORY
114
typedef cairo_status_t glitter_status_t;
115

            
116
/* The input coordinate scale and the rasterisation grid scales. */
117
#define GLITTER_INPUT_BITS CAIRO_FIXED_FRAC_BITS
118
#define GRID_X_BITS CAIRO_FIXED_FRAC_BITS
119
#define GRID_Y 15
120

            
121
/* Set glitter up to use a cairo span renderer to do the coverage
122
 * blitting. */
123
struct pool;
124
struct cell_list;
125

            
126
/*-------------------------------------------------------------------------
127
 * glitter-paths.h
128
 */
129

            
130
/* "Input scaled" numbers are fixed precision reals with multiplier
131
 * 2**GLITTER_INPUT_BITS.  Input coordinates are given to glitter as
132
 * pixel scaled numbers.  These get converted to the internal grid
133
 * scaled numbers as soon as possible. Internal overflow is possible
134
 * if GRID_X/Y inside glitter-paths.c is larger than
135
 * 1<<GLITTER_INPUT_BITS. */
136
#ifndef GLITTER_INPUT_BITS
137
#  define GLITTER_INPUT_BITS 8
138
#endif
139
#define GLITTER_INPUT_SCALE (1<<GLITTER_INPUT_BITS)
140
typedef int glitter_input_scaled_t;
141

            
142
#if !GLITTER_HAVE_STATUS_T
143
typedef enum {
144
    GLITTER_STATUS_SUCCESS = 0,
145
    GLITTER_STATUS_NO_MEMORY
146
} glitter_status_t;
147
#endif
148

            
149
#ifndef I
150
# define I /*static*/
151
#endif
152

            
153
/* Opaque type for scan converting. */
154
typedef struct glitter_scan_converter glitter_scan_converter_t;
155

            
156
/* Reset a scan converter to accept polygon edges and set the clip box
157
 * in pixels.  Allocates O(ymax-ymin) bytes of memory.	The clip box
158
 * is set to integer pixel coordinates xmin <= x < xmax, ymin <= y <
159
 * ymax. */
160
I glitter_status_t
161
glitter_scan_converter_reset(
162
    glitter_scan_converter_t *converter,
163
    int xmin, int ymin,
164
    int xmax, int ymax);
165

            
166
/* Render the polygon in the scan converter to the given A8 format
167
 * image raster.  Only the pixels accessible as pixels[y*stride+x] for
168
 * x,y inside the clip box are written to, where xmin <= x < xmax,
169
 * ymin <= y < ymax.  The image is assumed to be clear on input.
170
 *
171
 * If nonzero_fill is true then the interior of the polygon is
172
 * computed with the non-zero fill rule.  Otherwise the even-odd fill
173
 * rule is used.
174
 *
175
 * The scan converter must be reset or destroyed after this call. */
176

            
177
/*-------------------------------------------------------------------------
178
 * glitter-paths.c: Implementation internal types
179
 */
180
#include <stdlib.h>
181
#include <string.h>
182
#include <limits.h>
183

            
184
/* All polygon coordinates are snapped onto a subsample grid. "Grid
185
 * scaled" numbers are fixed precision reals with multiplier GRID_X or
186
 * GRID_Y. */
187
typedef int grid_scaled_t;
188
typedef int grid_scaled_x_t;
189
typedef int grid_scaled_y_t;
190

            
191
/* Default x/y scale factors.
192
 *  You can either define GRID_X/Y_BITS to get a power-of-two scale
193
 *  or define GRID_X/Y separately. */
194
#if !defined(GRID_X) && !defined(GRID_X_BITS)
195
#  define GRID_X_BITS 8
196
#endif
197
#if !defined(GRID_Y) && !defined(GRID_Y_BITS)
198
#  define GRID_Y 15
199
#endif
200

            
201
/* Use GRID_X/Y_BITS to define GRID_X/Y if they're available. */
202
#ifdef GRID_X_BITS
203
#  define GRID_X (1 << GRID_X_BITS)
204
#endif
205
#ifdef GRID_Y_BITS
206
#  define GRID_Y (1 << GRID_Y_BITS)
207
#endif
208

            
209
/* The GRID_X_TO_INT_FRAC macro splits a grid scaled coordinate into
210
 * integer and fractional parts. The integer part is floored. */
211
#if defined(GRID_X_TO_INT_FRAC)
212
  /* do nothing */
213
#elif defined(GRID_X_BITS)
214
#  define GRID_X_TO_INT_FRAC(x, i, f) \
215
	_GRID_TO_INT_FRAC_shift(x, i, f, GRID_X_BITS)
216
#else
217
#  define GRID_X_TO_INT_FRAC(x, i, f) \
218
	_GRID_TO_INT_FRAC_general(x, i, f, GRID_X)
219
#endif
220

            
221
#define _GRID_TO_INT_FRAC_general(t, i, f, m) do {	\
222
    (i) = (t) / (m);					\
223
    (f) = (t) % (m);					\
224
    if ((f) < 0) {					\
225
	--(i);						\
226
	(f) += (m);					\
227
    }							\
228
} while (0)
229

            
230
#define _GRID_TO_INT_FRAC_shift(t, i, f, b) do {	\
231
    (f) = (t) & ((1 << (b)) - 1);			\
232
    (i) = (t) >> (b);					\
233
} while (0)
234

            
235
/* A grid area is a real in [0,1] scaled by 2*GRID_X*GRID_Y.  We want
236
 * to be able to represent exactly areas of subpixel trapezoids whose
237
 * vertices are given in grid scaled coordinates.  The scale factor
238
 * comes from needing to accurately represent the area 0.5*dx*dy of a
239
 * triangle with base dx and height dy in grid scaled numbers. */
240
#define GRID_XY (2*GRID_X*GRID_Y) /* Unit area on the grid. */
241

            
242
/* GRID_AREA_TO_ALPHA(area): map [0,GRID_XY] to [0,255]. */
243
#if GRID_XY == 510
244
#  define GRID_AREA_TO_ALPHA(c)	  (((c)+1) >> 1)
245
#elif GRID_XY == 255
246
#  define  GRID_AREA_TO_ALPHA(c)  (c)
247
#elif GRID_XY == 64
248
#  define  GRID_AREA_TO_ALPHA(c)  (((c) << 2) | -(((c) & 0x40) >> 6))
249
#elif GRID_XY == 128
250
#  define  GRID_AREA_TO_ALPHA(c)  ((((c) << 1) | -((c) >> 7)) & 255)
251
#elif GRID_XY == 256
252
#  define  GRID_AREA_TO_ALPHA(c)  (((c) | -((c) >> 8)) & 255)
253
#elif GRID_XY == 15
254
#  define  GRID_AREA_TO_ALPHA(c)  (((c) << 4) + (c))
255
#elif GRID_XY == 2*256*15
256
#  define  GRID_AREA_TO_ALPHA(c)  (((c) + ((c)<<4) + 256) >> 9)
257
#else
258
#  define  GRID_AREA_TO_ALPHA(c)  (((c)*255 + GRID_XY/2) / GRID_XY)
259
#endif
260

            
261
#define UNROLL3(x) x x x
262

            
263
struct quorem {
264
    int32_t quo;
265
    int64_t rem;
266
};
267

            
268
/* Header for a chunk of memory in a memory pool. */
269
struct _pool_chunk {
270
    /* # bytes used in this chunk. */
271
    size_t size;
272

            
273
    /* # bytes total in this chunk */
274
    size_t capacity;
275

            
276
    /* Pointer to the previous chunk or %NULL if this is the sentinel
277
     * chunk in the pool header. */
278
    struct _pool_chunk *prev_chunk;
279

            
280
    /* Actual data starts here. Well aligned even for 64 bit types. */
281
    int64_t data;
282
};
283

            
284
/* The int64_t data member of _pool_chunk just exists to enforce alignment,
285
 * it shouldn't be included in the allocated size for the struct. */
286
#define SIZEOF_POOL_CHUNK (sizeof(struct _pool_chunk) - sizeof(int64_t))
287

            
288
/* A memory pool.  This is supposed to be embedded on the stack or
289
 * within some other structure.	 It may optionally be followed by an
290
 * embedded array from which requests are fulfilled until
291
 * malloc needs to be called to allocate a first real chunk. */
292
struct pool {
293
    /* Chunk we're allocating from. */
294
    struct _pool_chunk *current;
295

            
296
    jmp_buf *jmp;
297

            
298
    /* Free list of previously allocated chunks.  All have >= default
299
     * capacity. */
300
    struct _pool_chunk *first_free;
301

            
302
    /* The default capacity of a chunk. */
303
    size_t default_capacity;
304

            
305
    /* Header for the sentinel chunk.  Directly following the pool
306
     * struct should be some space for embedded elements from which
307
     * the sentinel chunk allocates from. This is expressed as a char
308
     * array so that the 'int64_t data' member of _pool_chunk isn't
309
     * included. This way embedding struct pool in other structs works
310
     * without wasting space. */
311
    char sentinel[SIZEOF_POOL_CHUNK];
312
};
313

            
314
/* A polygon edge. */
315
struct edge {
316
    /* Next in y-bucket or active list. */
317
    struct edge *next, *prev;
318

            
319
    /* The clipped y of the top of the edge. */
320
    grid_scaled_y_t ytop;
321

            
322
    /* Number of subsample rows remaining to scan convert of this
323
     * edge. */
324
    grid_scaled_y_t height_left;
325

            
326
    /* Original sign of the edge: +1 for downwards, -1 for upwards
327
     * edges.  */
328
    int dir;
329
    int cell;
330

            
331
    /* Current x coordinate while the edge is on the active
332
     * list. Initialised to the x coordinate of the top of the
333
     * edge. The quotient is in grid_scaled_x_t units and the
334
     * remainder is mod dy in grid_scaled_y_t units.*/
335
    struct quorem x;
336

            
337
    /* Advance of the current x when moving down a subsample line. */
338
    struct quorem dxdy;
339

            
340
    /* Advance of the current x when moving down a full pixel
341
     * row. Only initialised when the height of the edge is large
342
     * enough that there's a chance the edge could be stepped by a
343
     * full row's worth of subsample rows at a time. */
344
    struct quorem dxdy_full;
345

            
346
    /* y2-y1 after orienting the edge downwards.  */
347
    int64_t dy;
348
};
349

            
350
#define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/GRID_Y)
351

            
352
/* A collection of sorted and vertically clipped edges of the polygon.
353
 * Edges are moved from the polygon to an active list while scan
354
 * converting. */
355
struct polygon {
356
    /* The vertical clip extents. */
357
    grid_scaled_y_t ymin, ymax;
358

            
359
    /* Array of edges all starting in the same bucket.	An edge is put
360
     * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when
361
     * it is added to the polygon. */
362
    struct edge **y_buckets;
363
    struct edge *y_buckets_embedded[64];
364

            
365
    struct {
366
	struct pool base[1];
367
	struct edge embedded[32];
368
    } edge_pool;
369
};
370

            
371
/* A cell records the effect on pixel coverage of polygon edges
372
 * passing through a pixel.  It contains two accumulators of pixel
373
 * coverage.
374
 *
375
 * Consider the effects of a polygon edge on the coverage of a pixel
376
 * it intersects and that of the following one.  The coverage of the
377
 * following pixel is the height of the edge multiplied by the width
378
 * of the pixel, and the coverage of the pixel itself is the area of
379
 * the trapezoid formed by the edge and the right side of the pixel.
380
 *
381
 * +-----------------------+-----------------------+
382
 * |                       |                       |
383
 * |                       |                       |
384
 * |_______________________|_______________________|
385
 * |   \...................|.......................|\
386
 * |    \..................|.......................| |
387
 * |     \.................|.......................| |
388
 * |      \....covered.....|.......................| |
389
 * |       \....area.......|.......................| } covered height
390
 * |        \..............|.......................| |
391
 * |uncovered\.............|.......................| |
392
 * |  area    \............|.......................| |
393
 * |___________\...........|.......................|/
394
 * |                       |                       |
395
 * |                       |                       |
396
 * |                       |                       |
397
 * +-----------------------+-----------------------+
398
 *
399
 * Since the coverage of the following pixel will always be a multiple
400
 * of the width of the pixel, we can store the height of the covered
401
 * area instead.  The coverage of the pixel itself is the total
402
 * coverage minus the area of the uncovered area to the left of the
403
 * edge.  As it's faster to compute the uncovered area we only store
404
 * that and subtract it from the total coverage later when forming
405
 * spans to blit.
406
 *
407
 * The heights and areas are signed, with left edges of the polygon
408
 * having positive sign and right edges having negative sign.  When
409
 * two edges intersect they swap their left/rightness so their
410
 * contribution above and below the intersection point must be
411
 * computed separately. */
412
struct cell {
413
    struct cell		*next;
414
    int			 x;
415
    int16_t		 uncovered_area;
416
    int16_t		 covered_height;
417
};
418

            
419
/* A cell list represents the scan line sparsely as cells ordered by
420
 * ascending x.  It is geared towards scanning the cells in order
421
 * using an internal cursor. */
422
struct cell_list {
423
    /* Sentinel nodes */
424
    struct cell head, tail;
425

            
426
    /* Cursor state for iterating through the cell list. */
427
    struct cell *cursor, *rewind;
428

            
429
    /* Cells in the cell list are owned by the cell list and are
430
     * allocated from this pool.  */
431
    struct {
432
	struct pool base[1];
433
	struct cell embedded[32];
434
    } cell_pool;
435
};
436

            
437
struct cell_pair {
438
    struct cell *cell1;
439
    struct cell *cell2;
440
};
441

            
442
/* The active list contains edges in the current scan line ordered by
443
 * the x-coordinate of the intercept of the edge and the scan line. */
444
struct active_list {
445
    /* Leftmost edge on the current scan line. */
446
    struct edge head, tail;
447

            
448
    /* A lower bound on the height of the active edges is used to
449
     * estimate how soon some active edge ends.	 We can't advance the
450
     * scan conversion by a full pixel row if an edge ends somewhere
451
     * within it. */
452
    grid_scaled_y_t min_height;
453
    int is_vertical;
454
};
455

            
456
struct glitter_scan_converter {
457
    struct polygon	polygon[1];
458
    struct active_list	active[1];
459
    struct cell_list	coverages[1];
460

            
461
    cairo_half_open_span_t *spans;
462
    cairo_half_open_span_t spans_embedded[64];
463

            
464
    /* Clip box. */
465
    grid_scaled_x_t xmin, xmax;
466
    grid_scaled_y_t ymin, ymax;
467
};
468

            
469
static struct _pool_chunk *
470
519685
_pool_chunk_init(
471
    struct _pool_chunk *p,
472
    struct _pool_chunk *prev_chunk,
473
    size_t capacity)
474
{
475
519685
    p->prev_chunk = prev_chunk;
476
519685
    p->size = 0;
477
519685
    p->capacity = capacity;
478
519685
    return p;
479
}
480

            
481
static struct _pool_chunk *
482
136689
_pool_chunk_create(struct pool *pool, size_t size)
483
{
484
    struct _pool_chunk *p;
485

            
486
136689
    p = _cairo_malloc (SIZEOF_POOL_CHUNK + size);
487
136689
    if (unlikely (NULL == p))
488
	longjmp (*pool->jmp, _cairo_error (CAIRO_STATUS_NO_MEMORY));
489

            
490
136689
    return _pool_chunk_init(p, pool->current, size);
491
}
492

            
493
static void
494
357868
pool_init(struct pool *pool,
495
	  jmp_buf *jmp,
496
	  size_t default_capacity,
497
	  size_t embedded_capacity)
498
{
499
357868
    pool->jmp = jmp;
500
357868
    pool->current = (void*) pool->sentinel;
501
357868
    pool->first_free = NULL;
502
357868
    pool->default_capacity = default_capacity;
503
357868
    _pool_chunk_init(pool->current, NULL, embedded_capacity);
504
357868
}
505

            
506
static void
507
357868
pool_fini(struct pool *pool)
508
{
509
357868
    struct _pool_chunk *p = pool->current;
510
    do {
511
852713
	while (NULL != p) {
512
494557
	    struct _pool_chunk *prev = p->prev_chunk;
513
494557
	    if (p != (void *) pool->sentinel)
514
136689
		free(p);
515
494557
	    p = prev;
516
	}
517
358156
	p = pool->first_free;
518
358156
	pool->first_free = NULL;
519
358156
    } while (NULL != p);
520
357868
}
521

            
522
/* Satisfy an allocation by first allocating a new large enough chunk
523
 * and adding it to the head of the pool's chunk list. This function
524
 * is called as a fallback if pool_alloc() couldn't do a quick
525
 * allocation from the current chunk in the pool. */
526
static void *
527
161817
_pool_alloc_from_new_chunk(
528
    struct pool *pool,
529
    size_t size)
530
{
531
    struct _pool_chunk *chunk;
532
    void *obj;
533
    size_t capacity;
534

            
535
    /* If the allocation is smaller than the default chunk size then
536
     * try getting a chunk off the free list.  Force alloc of a new
537
     * chunk for large requests. */
538
161817
    capacity = size;
539
161817
    chunk = NULL;
540
161817
    if (size < pool->default_capacity) {
541
161817
	capacity = pool->default_capacity;
542
161817
	chunk = pool->first_free;
543
161817
	if (chunk) {
544
25128
	    pool->first_free = chunk->prev_chunk;
545
25128
	    _pool_chunk_init(chunk, pool->current, chunk->capacity);
546
	}
547
    }
548

            
549
161817
    if (NULL == chunk)
550
136689
	chunk = _pool_chunk_create (pool, capacity);
551
161817
    pool->current = chunk;
552

            
553
161817
    obj = ((unsigned char*)&chunk->data + chunk->size);
554
161817
    chunk->size += size;
555
161817
    return obj;
556
}
557

            
558
/* Allocate size bytes from the pool.  The first allocated address
559
 * returned from a pool is aligned to 8 bytes.  Subsequent
560
 * addresses will maintain alignment as long as multiples of 8 are
561
 * allocated.  Returns the address of a new memory area or %NULL on
562
 * allocation failures.	 The pool retains ownership of the returned
563
 * memory. */
564
inline static void *
565
20679661
pool_alloc (struct pool *pool, size_t size)
566
{
567
20679661
    struct _pool_chunk *chunk = pool->current;
568

            
569
20679661
    if (size <= chunk->capacity - chunk->size) {
570
20517844
	void *obj = ((unsigned char*)&chunk->data + chunk->size);
571
20517844
	chunk->size += size;
572
20517844
	return obj;
573
    } else {
574
161817
	return _pool_alloc_from_new_chunk(pool, size);
575
    }
576
}
577

            
578
/* Relinquish all pool_alloced memory back to the pool. */
579
static void
580
853007
pool_reset (struct pool *pool)
581
{
582
    /* Transfer all used chunks to the chunk free list. */
583
853007
    struct _pool_chunk *chunk = pool->current;
584
853007
    if (chunk != (void *) pool->sentinel) {
585
25539
	while (chunk->prev_chunk != (void *) pool->sentinel) {
586
1734
	    chunk = chunk->prev_chunk;
587
	}
588
23805
	chunk->prev_chunk = pool->first_free;
589
23805
	pool->first_free = pool->current;
590
    }
591
    /* Reset the sentinel as the current chunk. */
592
853007
    pool->current = (void *) pool->sentinel;
593
853007
    pool->current->size = 0;
594
853007
}
595

            
596
/* Rewinds the cell list's cursor to the beginning.  After rewinding
597
 * we're good to cell_list_find() the cell any x coordinate. */
598
inline static void
599
5581517
cell_list_rewind (struct cell_list *cells)
600
{
601
5581517
    cells->cursor = &cells->head;
602
5581517
}
603

            
604
inline static void
605
522194
cell_list_maybe_rewind (struct cell_list *cells, int x)
606
{
607
522194
    if (x < cells->cursor->x) {
608
4491
	cells->cursor = cells->rewind;
609
4491
	if (x < cells->cursor->x)
610
1623
	    cells->cursor = &cells->head;
611
    }
612
522194
}
613

            
614
inline static void
615
261097
cell_list_set_rewind (struct cell_list *cells)
616
{
617
261097
    cells->rewind = cells->cursor;
618
261097
}
619

            
620
static void
621
178934
cell_list_init(struct cell_list *cells, jmp_buf *jmp)
622
{
623
178934
    pool_init(cells->cell_pool.base, jmp,
624
	      256*sizeof(struct cell),
625
	      sizeof(cells->cell_pool.embedded));
626
178934
    cells->tail.next = NULL;
627
178934
    cells->tail.x = INT_MAX;
628
178934
    cells->head.x = INT_MIN;
629
178934
    cells->head.next = &cells->tail;
630
178934
    cell_list_rewind (cells);
631
178934
}
632

            
633
static void
634
178934
cell_list_fini(struct cell_list *cells)
635
{
636
178934
    pool_fini (cells->cell_pool.base);
637
178934
}
638

            
639
/* Empty the cell list.  This is called at the start of every pixel
640
 * row. */
641
inline static void
642
674073
cell_list_reset (struct cell_list *cells)
643
{
644
674073
    cell_list_rewind (cells);
645
674073
    cells->head.next = &cells->tail;
646
674073
    pool_reset (cells->cell_pool.base);
647
674073
}
648

            
649
inline static struct cell *
650
5140195
cell_list_alloc (struct cell_list *cells,
651
		 struct cell *tail,
652
		 int x)
653
{
654
    struct cell *cell;
655

            
656
5140195
    cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
657
5140195
    cell->next = tail->next;
658
5140195
    tail->next = cell;
659
5140195
    cell->x = x;
660
5140195
    *(uint32_t *)&cell->uncovered_area = 0;
661

            
662
5140195
    return cell;
663
}
664

            
665
/* Find a cell at the given x-coordinate.  Returns %NULL if a new cell
666
 * needed to be allocated but couldn't be.  Cells must be found with
667
 * non-decreasing x-coordinate until the cell list is rewound using
668
 * cell_list_rewind(). Ownership of the returned cell is retained by
669
 * the cell list. */
670
inline static struct cell *
671
10476407
cell_list_find (struct cell_list *cells, int x)
672
{
673
10476407
    struct cell *tail = cells->cursor;
674

            
675
10476407
    if (tail->x == x)
676
4123965
	return tail;
677

            
678
    while (1) {
679
6811613
	UNROLL3({
680
		if (tail->next->x > x)
681
			break;
682
		tail = tail->next;
683
	});
684
    }
685

            
686
6352442
    if (tail->x != x)
687
907372
	tail = cell_list_alloc (cells, tail, x);
688
6352442
    return cells->cursor = tail;
689

            
690
}
691

            
692
/* Find two cells at x1 and x2.	 This is exactly equivalent
693
 * to
694
 *
695
 *   pair.cell1 = cell_list_find(cells, x1);
696
 *   pair.cell2 = cell_list_find(cells, x2);
697
 *
698
 * except with less function call overhead. */
699
inline static struct cell_pair
700
15957855
cell_list_find_pair(struct cell_list *cells, int x1, int x2)
701
{
702
    struct cell_pair pair;
703

            
704
15957855
    pair.cell1 = cells->cursor;
705
    while (1) {
706
16837983
	UNROLL3({
707
		if (pair.cell1->next->x > x1)
708
			break;
709
		pair.cell1 = pair.cell1->next;
710
	});
711
    }
712
15957855
    if (pair.cell1->x != x1)
713
2012699
	pair.cell1 = cell_list_alloc (cells, pair.cell1, x1);
714

            
715
15957855
    pair.cell2 = pair.cell1;
716
    while (1) {
717
16927097
	UNROLL3({
718
		if (pair.cell2->next->x > x2)
719
			break;
720
		pair.cell2 = pair.cell2->next;
721
	});
722
    }
723
15957855
    if (pair.cell2->x != x2)
724
2220124
	pair.cell2 = cell_list_alloc (cells, pair.cell2, x2);
725

            
726
15957855
    cells->cursor = pair.cell2;
727
15957855
    return pair;
728
}
729

            
730
/* Add a subpixel span covering [x1, x2) to the coverage cells. */
731
inline static void
732
33192849
cell_list_add_subspan(struct cell_list *cells,
733
		      grid_scaled_x_t x1,
734
		      grid_scaled_x_t x2)
735
{
736
    int ix1, fx1;
737
    int ix2, fx2;
738

            
739
33192849
    if (x1 == x2)
740
7366587
	return;
741

            
742
25826262
    GRID_X_TO_INT_FRAC(x1, ix1, fx1);
743
25826262
    GRID_X_TO_INT_FRAC(x2, ix2, fx2);
744

            
745
25826262
    if (ix1 != ix2) {
746
	struct cell_pair p;
747
15715199
	p = cell_list_find_pair(cells, ix1, ix2);
748
15715199
	p.cell1->uncovered_area += 2*fx1;
749
15715199
	++p.cell1->covered_height;
750
15715199
	p.cell2->uncovered_area -= 2*fx2;
751
15715199
	--p.cell2->covered_height;
752
    } else {
753
10111063
	struct cell *cell = cell_list_find(cells, ix1);
754
10111063
	cell->uncovered_area += 2*(fx1-fx2);
755
    }
756
}
757

            
758
618200
inline static void full_step (struct edge *e)
759
{
760
618200
    if (e->dy == 0)
761
184109
	return;
762

            
763
434091
    e->x.quo += e->dxdy_full.quo;
764
434091
    e->x.rem += e->dxdy_full.rem;
765
434091
    if (e->x.rem < 0) {
766
84067
	e->x.quo--;
767
84067
	e->x.rem += e->dy;
768
350024
    } else if (e->x.rem >= e->dy) {
769
90117
	++e->x.quo;
770
90117
	e->x.rem -= e->dy;
771
    }
772

            
773
434091
    e->cell = e->x.quo + (e->x.rem >= e->dy/2);
774
}
775

            
776

            
777
/* Adds the analytical coverage of an edge crossing the current pixel
778
 * row to the coverage cells and advances the edge's x position to the
779
 * following row.
780
 *
781
 * This function is only called when we know that during this pixel row:
782
 *
783
 * 1) The relative order of all edges on the active list doesn't
784
 * change.  In particular, no edges intersect within this row to pixel
785
 * precision.
786
 *
787
 * 2) No new edges start in this row.
788
 *
789
 * 3) No existing edges end mid-row.
790
 *
791
 * This function depends on being called with all edges from the
792
 * active list in the order they appear on the list (i.e. with
793
 * non-decreasing x-coordinate.)  */
794
static void
795
522194
cell_list_render_edge(struct cell_list *cells,
796
		      struct edge *edge,
797
		      int sign)
798
{
799
    struct quorem x1, x2;
800
    grid_scaled_x_t fx1, fx2;
801
    int ix1, ix2;
802

            
803
522194
    x1 = edge->x;
804
522194
    full_step (edge);
805
522194
    x2 = edge->x;
806

            
807
    /* Step back from the sample location (half-subrow) to the pixel origin */
808
522194
    if (edge->dy) {
809
371217
	x1.quo -= edge->dxdy.quo / 2;
810
371217
	x1.rem -= edge->dxdy.rem / 2;
811
371217
	if (x1.rem < 0) {
812
41409
	    --x1.quo;
813
41409
	    x1.rem += edge->dy;
814
329808
	} else if (x1.rem >= edge->dy) {
815
45905
	    ++x1.quo;
816
45905
	    x1.rem -= edge->dy;
817
	}
818

            
819
371217
	x2.quo -= edge->dxdy.quo / 2;
820
371217
	x2.rem -= edge->dxdy.rem / 2;
821
371217
	if (x2.rem < 0) {
822
39531
	    --x2.quo;
823
39531
	    x2.rem += edge->dy;
824
331686
	} else if (x2.rem >= edge->dy) {
825
43355
	    ++x2.quo;
826
43355
	    x2.rem -= edge->dy;
827
	}
828
    }
829

            
830
522194
    GRID_X_TO_INT_FRAC(x1.quo, ix1, fx1);
831
522194
    GRID_X_TO_INT_FRAC(x2.quo, ix2, fx2);
832

            
833
522194
    cell_list_maybe_rewind(cells, MIN(ix1, ix2));
834

            
835
    /* Edge is entirely within a column? */
836
522194
    if (ix1 == ix2) {
837
	/* We always know that ix1 is >= the cell list cursor in this
838
	 * case due to the no-intersections precondition.  */
839
279538
	struct cell *cell = cell_list_find(cells, ix1);
840
279538
	cell->covered_height += sign*GRID_Y;
841
279538
	cell->uncovered_area += sign*(fx1 + fx2)*GRID_Y;
842
279538
	return;
843
    }
844

            
845
    /* Orient the edge left-to-right. */
846
242656
    if (ix2 < ix1) {
847
	struct quorem tx;
848
	int t;
849

            
850
132033
	t = ix1;
851
132033
	ix1 = ix2;
852
132033
	ix2 = t;
853

            
854
132033
	t = fx1;
855
132033
	fx1 = fx2;
856
132033
	fx2 = t;
857

            
858
132033
	tx = x1;
859
132033
	x1 = x2;
860
132033
	x2 = tx;
861
    }
862

            
863
    /* Add coverage for all pixels [ix1,ix2] on this row crossed
864
     * by the edge. */
865
    {
866
	struct cell_pair pair;
867
	struct quorem y;
868
	int64_t tmp, dx;
869
	int y_last;
870

            
871
242656
	dx = (x2.quo - x1.quo) * edge->dy + (x2.rem - x1.rem);
872

            
873
242656
	tmp = (ix1 + 1) * GRID_X * edge->dy;
874
242656
	tmp -= x1.quo * edge->dy + x1.rem;
875
242656
	tmp *= GRID_Y;
876

            
877
242656
	y.quo = tmp / dx;
878
242656
	y.rem = tmp % dx;
879

            
880
	/* When rendering a previous edge on the active list we may
881
	 * advance the cell list cursor past the leftmost pixel of the
882
	 * current edge even though the two edges don't intersect.
883
	 * e.g. consider two edges going down and rightwards:
884
	 *
885
	 *  --\_+---\_+-----+-----+----
886
	 *      \_    \_    |     |
887
	 *      | \_  | \_  |     |
888
	 *      |   \_|   \_|     |
889
	 *      |     \_    \_    |
890
	 *  ----+-----+-\---+-\---+----
891
	 *
892
	 * The left edge touches cells past the starting cell of the
893
	 * right edge.  Fortunately such cases are rare.
894
	 */
895

            
896
242656
	pair = cell_list_find_pair(cells, ix1, ix1+1);
897
242656
	pair.cell1->uncovered_area += sign*y.quo*(GRID_X + fx1);
898
242656
	pair.cell1->covered_height += sign*y.quo;
899
242656
	y_last = y.quo;
900

            
901
242656
	if (ix1+1 < ix2) {
902
52663
	    struct cell *cell = pair.cell2;
903
	    struct quorem dydx_full;
904

            
905
52663
	    dydx_full.quo = GRID_Y * GRID_X * edge->dy / dx;
906
52663
	    dydx_full.rem = GRID_Y * GRID_X * edge->dy % dx;
907

            
908
52663
	    ++ix1;
909
	    do {
910
85806
		y.quo += dydx_full.quo;
911
85806
		y.rem += dydx_full.rem;
912
85806
		if (y.rem >= dx) {
913
45352
		    y.quo++;
914
45352
		    y.rem -= dx;
915
		}
916

            
917
85806
		cell->uncovered_area += sign*(y.quo - y_last)*GRID_X;
918
85806
		cell->covered_height += sign*(y.quo - y_last);
919
85806
		y_last = y.quo;
920

            
921
85806
		++ix1;
922
85806
		cell = cell_list_find(cells, ix1);
923
85806
	    } while (ix1 != ix2);
924

            
925
52663
	    pair.cell2 = cell;
926
	}
927
242656
	pair.cell2->uncovered_area += sign*(GRID_Y - y_last)*fx2;
928
242656
	pair.cell2->covered_height += sign*(GRID_Y - y_last);
929
    }
930
}
931

            
932
static void
933
178934
polygon_init (struct polygon *polygon, jmp_buf *jmp)
934
{
935
178934
    polygon->ymin = polygon->ymax = 0;
936
178934
    polygon->y_buckets = polygon->y_buckets_embedded;
937
178934
    pool_init (polygon->edge_pool.base, jmp,
938
	       8192 - sizeof (struct _pool_chunk),
939
	       sizeof (polygon->edge_pool.embedded));
940
178934
}
941

            
942
static void
943
178934
polygon_fini (struct polygon *polygon)
944
{
945
178934
    if (polygon->y_buckets != polygon->y_buckets_embedded)
946
901
	free (polygon->y_buckets);
947

            
948
178934
    pool_fini (polygon->edge_pool.base);
949
178934
}
950

            
951
/* Empties the polygon of all edges. The polygon is then prepared to
952
 * receive new edges and clip them to the vertical range
953
 * [ymin,ymax). */
954
static glitter_status_t
955
178934
polygon_reset (struct polygon *polygon,
956
	       grid_scaled_y_t ymin,
957
	       grid_scaled_y_t ymax)
958
{
959
178934
    unsigned h = ymax - ymin;
960
178934
    unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + GRID_Y-1, ymin);
961

            
962
178934
    pool_reset(polygon->edge_pool.base);
963

            
964
178934
    if (unlikely (h > 0x7FFFFFFFU - GRID_Y))
965
	goto bail_no_mem; /* even if you could, you wouldn't want to. */
966

            
967
178934
    if (polygon->y_buckets != polygon->y_buckets_embedded)
968
	free (polygon->y_buckets);
969

            
970
178934
    polygon->y_buckets =  polygon->y_buckets_embedded;
971
178934
    if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) {
972
901
	polygon->y_buckets = _cairo_malloc_ab (num_buckets,
973
					       sizeof (struct edge *));
974
901
	if (unlikely (NULL == polygon->y_buckets))
975
	    goto bail_no_mem;
976
    }
977
178934
    memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *));
978

            
979
178934
    polygon->ymin = ymin;
980
178934
    polygon->ymax = ymax;
981
178934
    return GLITTER_STATUS_SUCCESS;
982

            
983
bail_no_mem:
984
    polygon->ymin = 0;
985
    polygon->ymax = 0;
986
    return GLITTER_STATUS_NO_MEMORY;
987
}
988

            
989
static void
990
15539466
_polygon_insert_edge_into_its_y_bucket(struct polygon *polygon,
991
				       struct edge *e)
992
{
993
15539466
    unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin);
994
15539466
    struct edge **ptail = &polygon->y_buckets[ix];
995
15539466
    e->next = *ptail;
996
15539466
    *ptail = e;
997
15539466
}
998

            
999
static void
357868
active_list_reset (struct active_list *active)
{
357868
    active->head.height_left = INT_MAX;
357868
    active->head.dy = 0;
357868
    active->head.cell = INT_MIN;
357868
    active->head.prev = NULL;
357868
    active->head.next = &active->tail;
357868
    active->tail.prev = &active->head;
357868
    active->tail.next = NULL;
357868
    active->tail.cell = INT_MAX;
357868
    active->tail.height_left = INT_MAX;
357868
    active->tail.dy = 0;
357868
    active->min_height = 0;
357868
    active->is_vertical = 1;
357868
}
static void
178934
active_list_init(struct active_list *active)
{
178934
    active_list_reset(active);
178934
}
/*
 * Merge two sorted edge lists.
 * Input:
 *  - head_a: The head of the first list.
 *  - head_b: The head of the second list; head_b cannot be NULL.
 * Output:
 * Returns the head of the merged list.
 *
 * Implementation notes:
 * To make it fast (in particular, to reduce to an insertion sort whenever
 * one of the two input lists only has a single element) we iterate through
 * a list until its head becomes greater than the head of the other list,
 * then we switch their roles. As soon as one of the two lists is empty, we
 * just attach the other one to the current list and exit.
 * Writes to memory are only needed to "switch" lists (as it also requires
 * attaching to the output list the list which we will be iterating next) and
 * to attach the last non-empty list.
 */
static struct edge *
7968868
merge_sorted_edges (struct edge *head_a, struct edge *head_b)
{
    struct edge *head, **next, *prev;
    int32_t x;
7968868
    prev = head_a->prev;
7968868
    next = &head;
7968868
    if (head_a->cell <= head_b->cell) {
5972621
	head = head_a;
    } else {
1996247
	head = head_b;
1996247
	head_b->prev = prev;
1996247
	goto start_with_b;
    }
    do {
9153689
	x = head_b->cell;
72963010
	while (head_a != NULL && head_a->cell <= x) {
63809321
	    prev = head_a;
63809321
	    next = &head_a->next;
63809321
	    head_a = head_a->next;
	}
9153689
	head_b->prev = prev;
9153689
	*next = head_b;
9153689
	if (head_a == NULL)
5388939
	    return head;
3764750
start_with_b:
5760997
	x = head_a->cell;
22634392
	while (head_b != NULL && head_b->cell <= x) {
16873395
	    prev = head_b;
16873395
	    next = &head_b->next;
16873395
	    head_b = head_b->next;
	}
5760997
	head_a->prev = prev;
5760997
	*next = head_a;
5760997
	if (head_b == NULL)
2579929
	    return head;
    } while (1);
}
/*
 * Sort (part of) a list.
 * Input:
 *  - list: The list to be sorted; list cannot be NULL.
 *  - limit: Recursion limit.
 * Output:
 *  - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
 *              input list; if the input list has fewer elements, head_out be a sorted list
 *              containing all the elements of the input list.
 * Returns the head of the list of unprocessed elements (NULL if the sorted list contains
 * all the elements of the input list).
 *
 * Implementation notes:
 * Special case single element list, unroll/inline the sorting of the first two elements.
 * Some tail recursion is used since we iterate on the bottom-up solution of the problem
 * (we start with a small sorted list and keep merging other lists of the same size to it).
 */
static struct edge *
7968868
sort_edges (struct edge *list,
	    unsigned int level,
	    struct edge **head_out)
{
    struct edge *head_other, *remaining;
    unsigned int i;
7968868
    head_other = list->next;
7968868
    if (head_other == NULL) {
400049
	*head_out = list;
400049
	return NULL;
    }
7568819
    remaining = head_other->next;
7568819
    if (list->cell <= head_other->cell) {
5531202
	*head_out = list;
5531202
	head_other->next = NULL;
    } else {
2037617
	*head_out = head_other;
2037617
	head_other->prev = list->prev;
2037617
	head_other->next = list;
2037617
	list->prev = head_other;
2037617
	list->next = NULL;
    }
13285646
    for (i = 0; i < level && remaining; i++) {
5716827
	remaining = sort_edges (remaining, i, &head_other);
5716827
	*head_out = merge_sorted_edges (*head_out, head_other);
    }
7568819
    return remaining;
}
 static struct edge *
2252041
merge_unsorted_edges (struct edge *head, struct edge *unsorted)
{
2252041
    sort_edges (unsorted, UINT_MAX, &unsorted);
2252041
    return merge_sorted_edges (head, unsorted);
}
/* Test if the edges on the active list can be safely advanced by a
 * full row without intersections or any edges ending. */
inline static int
196128
can_do_full_row (struct active_list *active)
{
    const struct edge *e;
196128
    int prev_x = INT_MIN;
    /* Recomputes the minimum height of all edges on the active
     * list if we have been dropping edges. */
196128
    if (active->min_height <= 0) {
81418
	int min_height = INT_MAX;
81418
	int is_vertical = 1;
81418
	e = active->head.next;
1463218
	while (NULL != e) {
1381800
	    if (e->height_left < min_height)
141318
		min_height = e->height_left;
1381800
	    is_vertical &= e->dy == 0;
1381800
	    e = e->next;
	}
81418
	active->is_vertical = is_vertical;
81418
	active->min_height = min_height;
    }
196128
    if (active->min_height < GRID_Y)
6278
	return 0;
    /* Check for intersections as no edges end during the next row. */
944406
    for (e = active->head.next; e != &active->tail; e = e->next) {
	int cell;
764501
	if (e->dy) {
577314
	    struct quorem x = e->x;
577314
	    x.quo += e->dxdy_full.quo;
577314
	    x.rem += e->dxdy_full.rem;
577314
	    if (x.rem < 0) {
106495
		x.quo--;
106495
		x.rem += e->dy;
470819
	    } else if (x.rem >= e->dy) {
130413
		x.quo++;
130413
		x.rem -= e->dy;
	    }
577314
	    cell = x.quo + (x.rem >= e->dy/2);
	} else
187187
	    cell = e->cell;
764501
	if (cell < prev_x)
9945
	    return 0;
754556
	prev_x = cell;
    }
179905
    return 1;
}
/* Merges edges on the given subpixel row from the polygon to the
 * active_list. */
inline static void
2252041
active_list_merge_edges_from_bucket(struct active_list *active,
				    struct edge *edges)
{
2252041
    active->head.next = merge_unsorted_edges (active->head.next, edges);
2252041
}
inline static int
497629
polygon_fill_buckets (struct active_list *active,
		      struct edge *edge,
		      int y,
		      struct edge **buckets)
{
497629
    grid_scaled_y_t min_height = active->min_height;
497629
    int is_vertical = active->is_vertical;
497629
    int max_suby = 0;
16035316
    while (edge) {
15537687
	struct edge *next = edge->next;
15537687
	int suby = edge->ytop - y;
15537687
	if (buckets[suby])
13285646
	    buckets[suby]->prev = edge;
15537687
	edge->next = buckets[suby];
15537687
	edge->prev = NULL;
15537687
	buckets[suby] = edge;
15537687
	if (edge->height_left < min_height)
8784
	    min_height = edge->height_left;
15537687
	is_vertical &= edge->dy == 0;
15537687
	edge = next;
15537687
	if (suby > max_suby)
477970
		max_suby = suby;
    }
497629
    active->is_vertical = is_vertical;
497629
    active->min_height = min_height;
497629
    return max_suby;
}
84611856
static void step (struct edge *edge)
{
84611856
    if (edge->dy == 0)
22267822
	return;
62344034
    edge->x.quo += edge->dxdy.quo;
62344034
    edge->x.rem += edge->dxdy.rem;
62344034
    if (edge->x.rem < 0) {
10267203
	--edge->x.quo;
10267203
	edge->x.rem += edge->dy;
52076831
    } else if (edge->x.rem >= edge->dy) {
14544832
	++edge->x.quo;
14544832
	edge->x.rem -= edge->dy;
    }
62344034
    edge->cell = edge->x.quo + (edge->x.rem >= edge->dy/2);
}
inline static void
4728510
sub_row (struct active_list *active,
	 struct cell_list *coverages,
	 unsigned int mask)
{
4728510
    struct edge *edge = active->head.next;
4728510
    int xstart = INT_MIN, prev_x = INT_MIN;
4728510
    int winding = 0;
4728510
    cell_list_rewind (coverages);
104778504
    while (&active->tail != edge) {
100049994
	struct edge *next = edge->next;
100049994
	int xend = edge->cell;
100049994
	if (--edge->height_left) {
84611856
	    step (edge);
84611856
	    if (edge->cell < prev_x) {
765153
		struct edge *pos = edge->prev;
765153
		pos->next = next;
765153
		next->prev = pos;
		do {
880965
		    pos = pos->prev;
880965
		} while (edge->cell < pos->cell);
765153
		pos->next->prev = edge;
765153
		edge->next = pos->next;
765153
		edge->prev = pos;
765153
		pos->next = edge;
	    } else
83846703
		prev_x = edge->cell;
84611856
	    active->min_height = -1;
	} else {
15438138
	    edge->prev->next = next;
15438138
	    next->prev = edge->prev;
	}
100049994
	winding += edge->dir;
100049994
	if ((winding & mask) == 0) {
41278008
	    if (next->cell != xend) {
33192849
		cell_list_add_subspan (coverages, xstart, xend);
33192849
		xstart = INT_MIN;
	    }
58771986
	} else if (xstart == INT_MIN)
33192849
	    xstart = xend;
100049994
	edge = next;
    }
4728510
}
618200
inline static void dec (struct active_list *a, struct edge *e, int h)
{
618200
    e->height_left -= h;
618200
    if (e->height_left == 0) {
56292
	e->prev->next = e->next;
56292
	e->next->prev = e->prev;
56292
	a->min_height = -1;
    }
618200
}
static void
179905
full_row (struct active_list *active,
	  struct cell_list *coverages,
	  unsigned int mask)
{
179905
    struct edge *left = active->head.next;
441002
    while (&active->tail != left) {
	struct edge *right;
	int winding;
261097
	dec (active, left, GRID_Y);
261097
	winding = left->dir;
261097
	right = left->next;
	do {
357103
	    dec (active, right, GRID_Y);
357103
	    winding += right->dir;
357103
	    if ((winding & mask) == 0 && right->next->cell != right->cell)
261097
		break;
96006
	    full_step (right);
96006
	    right = right->next;
	} while (1);
261097
	cell_list_set_rewind (coverages);
261097
	cell_list_render_edge (coverages, left, +1);
261097
	cell_list_render_edge (coverages, right, -1);
261097
	left = right->next;
    }
179905
}
static void
178934
_glitter_scan_converter_init(glitter_scan_converter_t *converter, jmp_buf *jmp)
{
178934
    polygon_init(converter->polygon, jmp);
178934
    active_list_init(converter->active);
178934
    cell_list_init(converter->coverages, jmp);
178934
    converter->xmin=0;
178934
    converter->ymin=0;
178934
    converter->xmax=0;
178934
    converter->ymax=0;
178934
}
static void
178934
_glitter_scan_converter_fini(glitter_scan_converter_t *self)
{
178934
    if (self->spans != self->spans_embedded)
1933
	free (self->spans);
178934
    polygon_fini(self->polygon);
178934
    cell_list_fini(self->coverages);
178934
    self->xmin=0;
178934
    self->ymin=0;
178934
    self->xmax=0;
178934
    self->ymax=0;
178934
}
static grid_scaled_t
715736
int_to_grid_scaled(int i, int scale)
{
    /* Clamp to max/min representable scaled number. */
715736
    if (i >= 0) {
715736
	if (i >= INT_MAX/scale)
	    i = INT_MAX/scale;
    }
    else {
	if (i <= INT_MIN/scale)
	    i = INT_MIN/scale;
    }
715736
    return i*scale;
}
#define int_to_grid_scaled_x(x) int_to_grid_scaled((x), GRID_X)
#define int_to_grid_scaled_y(x) int_to_grid_scaled((x), GRID_Y)
I glitter_status_t
178934
glitter_scan_converter_reset(
			     glitter_scan_converter_t *converter,
			     int xmin, int ymin,
			     int xmax, int ymax)
{
    glitter_status_t status;
    int max_num_spans;
178934
    converter->xmin = 0; converter->xmax = 0;
178934
    converter->ymin = 0; converter->ymax = 0;
178934
    max_num_spans = xmax - xmin + 1;
178934
    if (max_num_spans > ARRAY_LENGTH(converter->spans_embedded)) {
1933
	converter->spans = _cairo_malloc_ab (max_num_spans,
					     sizeof (cairo_half_open_span_t));
1933
	if (unlikely (converter->spans == NULL))
	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
    } else
177001
	converter->spans = converter->spans_embedded;
178934
    xmin = int_to_grid_scaled_x(xmin);
178934
    ymin = int_to_grid_scaled_y(ymin);
178934
    xmax = int_to_grid_scaled_x(xmax);
178934
    ymax = int_to_grid_scaled_y(ymax);
178934
    active_list_reset(converter->active);
178934
    cell_list_reset(converter->coverages);
178934
    status = polygon_reset(converter->polygon, ymin, ymax);
178934
    if (status)
	return status;
178934
    converter->xmin = xmin;
178934
    converter->xmax = xmax;
178934
    converter->ymin = ymin;
178934
    converter->ymax = ymax;
178934
    return GLITTER_STATUS_SUCCESS;
}
/* INPUT_TO_GRID_X/Y (in_coord, out_grid_scaled, grid_scale)
 *   These macros convert an input coordinate in the client's
 *   device space to the rasterisation grid.
 */
/* Gah.. this bit of ugly defines INPUT_TO_GRID_X/Y so as to use
 * shifts if possible, and something saneish if not.
 */
#if !defined(INPUT_TO_GRID_Y) && defined(GRID_Y_BITS) && GRID_Y_BITS <= GLITTER_INPUT_BITS
#  define INPUT_TO_GRID_Y(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_Y_BITS)
#else
#  define INPUT_TO_GRID_Y(in, out) INPUT_TO_GRID_general(in, out, GRID_Y)
#endif
#if !defined(INPUT_TO_GRID_X) && defined(GRID_X_BITS) && GRID_X_BITS <= GLITTER_INPUT_BITS
#  define INPUT_TO_GRID_X(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_X_BITS)
#else
#  define INPUT_TO_GRID_X(in, out) INPUT_TO_GRID_general(in, out, GRID_X)
#endif
#define INPUT_TO_GRID_general(in, out, grid_scale) do {		\
    long long tmp__ = (long long)(grid_scale) * (in);	\
    tmp__ += 1 << (GLITTER_INPUT_BITS-1);			\
    tmp__ >>= GLITTER_INPUT_BITS;				\
    (out) = tmp__;						\
} while (0)
inline static void
60418602
polygon_add_edge (struct polygon *polygon,
		  const cairo_edge_t *edge)
{
    struct edge *e;
    grid_scaled_y_t ytop, ybot;
    const cairo_point_t *p1, *p2;
60418602
    INPUT_TO_GRID_Y (edge->top, ytop);
60418602
    if (ytop < polygon->ymin)
	    ytop = polygon->ymin;
60418602
    INPUT_TO_GRID_Y (edge->bottom, ybot);
60418602
    if (ybot > polygon->ymax)
	    ybot = polygon->ymax;
60418602
    if (ybot <= ytop)
44879136
	    return;
15539466
    e = pool_alloc (polygon->edge_pool.base, sizeof (struct edge));
15539466
    e->ytop = ytop;
15539466
    e->height_left = ybot - ytop;
15539466
    if (edge->line.p2.y > edge->line.p1.y) {
15539466
	    e->dir = edge->dir;
15539466
	    p1 = &edge->line.p1;
15539466
	    p2 = &edge->line.p2;
    } else {
	    e->dir = -edge->dir;
	    p1 = &edge->line.p2;
	    p2 = &edge->line.p1;
    }
15539466
    if (p2->x == p1->x) {
5365471
	e->cell = p1->x;
5365471
	e->x.quo = p1->x;
5365471
	e->x.rem = 0;
5365471
	e->dxdy.quo = e->dxdy.rem = 0;
5365471
	e->dxdy_full.quo = e->dxdy_full.rem = 0;
5365471
	e->dy = 0;
    } else {
	int64_t Ex, Ey, tmp;
10173995
	Ex = (int64_t)(p2->x - p1->x) * GRID_X;
10173995
	Ey = (int64_t)(p2->y - p1->y) * GRID_Y * (2 << GLITTER_INPUT_BITS);
10173995
	e->dxdy.quo = Ex * (2 << GLITTER_INPUT_BITS) / Ey;
10173995
	e->dxdy.rem = Ex * (2 << GLITTER_INPUT_BITS) % Ey;
10173995
	tmp = (int64_t)(2*ytop + 1) << GLITTER_INPUT_BITS;
10173995
	tmp -= (int64_t)p1->y * GRID_Y * 2;
10173995
	tmp *= Ex;
10173995
	e->x.quo = tmp / Ey;
10173995
	e->x.rem = tmp % Ey;
#if GRID_X_BITS == GLITTER_INPUT_BITS
10173995
	e->x.quo += p1->x;
#else
	tmp = (int64_t)p1->x * GRID_X;
	e->x.quo += tmp >> GLITTER_INPUT_BITS;
	e->x.rem += ((tmp & ((1 << GLITTER_INPUT_BITS) - 1)) * Ey) / (1 << GLITTER_INPUT_BITS);
#endif
10173995
	if (e->x.rem < 0) {
1422034
		e->x.quo--;
1422034
		e->x.rem += Ey;
8751961
	} else  if (e->x.rem >= Ey) {
		e->x.quo++;
		e->x.rem -= Ey;
	}
10173995
	if (e->height_left >= GRID_Y) {
517373
	    tmp = Ex * (2 * GRID_Y << GLITTER_INPUT_BITS);
517373
	    e->dxdy_full.quo = tmp / Ey;
517373
	    e->dxdy_full.rem = tmp % Ey;
	} else
9656622
	    e->dxdy_full.quo = e->dxdy_full.rem = 0;
10173995
	e->cell = e->x.quo + (e->x.rem >= Ey/2);
10173995
	e->dy = Ey;
    }
15539466
    _polygon_insert_edge_into_its_y_bucket (polygon, e);
}
/* Add a new polygon edge from pixel (x1,y1) to (x2,y2) to the scan
 * converter.  The coordinates represent pixel positions scaled by
 * 2**GLITTER_PIXEL_BITS.  If this function fails then the scan
 * converter should be reset or destroyed.  Dir must be +1 or -1,
 * with the latter reversing the orientation of the edge. */
I void
60418602
glitter_scan_converter_add_edge (glitter_scan_converter_t *converter,
				 const cairo_edge_t *edge)
{
60418602
    polygon_add_edge (converter->polygon, edge);
60418602
}
static void
22962
step_edges (struct active_list *active, int count)
{
    struct edge *edge;
22962
    count *= GRID_Y;
75978
    for (edge = active->head.next; edge != &active->tail; edge = edge->next) {
53016
	edge->height_left -= count;
53016
	if (! edge->height_left) {
43257
	    edge->prev->next = edge->next;
43257
	    edge->next->prev = edge->prev;
43257
	    active->min_height = -1;
	}
    }
22962
}
static glitter_status_t
495139
blit_a8 (struct cell_list *cells,
	 cairo_span_renderer_t *renderer,
	 cairo_half_open_span_t *spans,
	 int y, int height,
	 int xmin, int xmax)
{
495139
    struct cell *cell = cells->head.next;
495139
    int prev_x = xmin, last_x = -1;
495139
    int16_t cover = 0, last_cover = 0;
    unsigned num_spans;
495139
    if (cell == &cells->tail)
1050
	return CAIRO_STATUS_SUCCESS;
    /* Skip cells to the left of the clip region. */
495202
    while (cell->x < xmin) {
1113
	cover += cell->covered_height;
1113
	cell = cell->next;
    }
494089
    cover *= GRID_X*2;
    /* Form the spans from the coverages and areas. */
494089
    num_spans = 0;
5515577
    for (; cell->x < xmax; cell = cell->next) {
5021488
	int x = cell->x;
	int16_t area;
5021488
	if (x > prev_x && cover != last_cover) {
1072025
	    spans[num_spans].x = prev_x;
1072025
	    spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1072025
	    last_cover = cover;
1072025
	    last_x = prev_x;
1072025
	    ++num_spans;
	}
5021488
	cover += cell->covered_height*GRID_X*2;
5021488
	area = cover - cell->uncovered_area;
5021488
	if (area != last_cover) {
4552177
	    spans[num_spans].x = x;
4552177
	    spans[num_spans].coverage = GRID_AREA_TO_ALPHA (area);
4552177
	    last_cover = area;
4552177
	    last_x = x;
4552177
	    ++num_spans;
	}
5021488
	prev_x = x+1;
    }
494089
    if (prev_x <= xmax && cover != last_cover) {
404507
	spans[num_spans].x = prev_x;
404507
	spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
404507
	last_cover = cover;
404507
	last_x = prev_x;
404507
	++num_spans;
    }
494089
    if (last_x < xmax && last_cover) {
94932
	spans[num_spans].x = xmax;
94932
	spans[num_spans].coverage = 0;
94932
	++num_spans;
    }
    /* Dump them into the renderer. */
494089
    return renderer->render_rows (renderer, y, height, spans, num_spans);
}
#define GRID_AREA_TO_A1(A)  ((GRID_AREA_TO_ALPHA (A) > 127) ? 255 : 0)
static glitter_status_t
blit_a1 (struct cell_list *cells,
	 cairo_span_renderer_t *renderer,
	 cairo_half_open_span_t *spans,
	 int y, int height,
	 int xmin, int xmax)
{
    struct cell *cell = cells->head.next;
    int prev_x = xmin, last_x = -1;
    int16_t cover = 0;
    uint8_t coverage, last_cover = 0;
    unsigned num_spans;
    if (cell == &cells->tail)
	return CAIRO_STATUS_SUCCESS;
    /* Skip cells to the left of the clip region. */
    while (cell->x < xmin) {
	cover += cell->covered_height;
	cell = cell->next;
    }
    cover *= GRID_X*2;
    /* Form the spans from the coverages and areas. */
    num_spans = 0;
    for (; cell->x < xmax; cell = cell->next) {
	int x = cell->x;
	int16_t area;
	coverage = GRID_AREA_TO_A1 (cover);
	if (x > prev_x && coverage != last_cover) {
	    last_x = spans[num_spans].x = prev_x;
	    last_cover = spans[num_spans].coverage = coverage;
	    ++num_spans;
	}
	cover += cell->covered_height*GRID_X*2;
	area = cover - cell->uncovered_area;
	coverage = GRID_AREA_TO_A1 (area);
	if (coverage != last_cover) {
	    last_x = spans[num_spans].x = x;
	    last_cover = spans[num_spans].coverage = coverage;
	    ++num_spans;
	}
	prev_x = x+1;
    }
    coverage = GRID_AREA_TO_A1 (cover);
    if (prev_x <= xmax && coverage != last_cover) {
	last_x = spans[num_spans].x = prev_x;
	last_cover = spans[num_spans].coverage = coverage;
	++num_spans;
    }
    if (last_x < xmax && last_cover) {
	spans[num_spans].x = xmax;
	spans[num_spans].coverage = 0;
	++num_spans;
    }
    if (num_spans == 1)
	return CAIRO_STATUS_SUCCESS;
    /* Dump them into the renderer. */
    return renderer->render_rows (renderer, y, height, spans, num_spans);
}
I void
178526
glitter_scan_converter_render(glitter_scan_converter_t *converter,
			      unsigned int winding_mask,
			      int antialias,
			      cairo_span_renderer_t *renderer)
{
    int i, j;
178526
    int ymax_i = converter->ymax / GRID_Y;
178526
    int ymin_i = converter->ymin / GRID_Y;
    int xmin_i, xmax_i;
178526
    int h = ymax_i - ymin_i;
178526
    struct polygon *polygon = converter->polygon;
178526
    struct cell_list *coverages = converter->coverages;
178526
    struct active_list *active = converter->active;
178526
    struct edge *buckets[GRID_Y] = { 0 };
178526
    xmin_i = converter->xmin / GRID_X;
178526
    xmax_i = converter->xmax / GRID_X;
178526
    if (xmin_i >= xmax_i)
	return;
    /* Render each pixel row. */
676155
    for (i = 0; i < h; i = j) {
497629
	int do_full_row = 0;
497629
	j = i + 1;
	/* Determine if we can ignore this row or use the full pixel
	 * stepper. */
497629
	if (polygon_fill_buckets (active,
497629
				  polygon->y_buckets[i],
497629
				  (i+ymin_i)*GRID_Y,
				  buckets) == 0) {
198618
	    if (buckets[0]) {
40617
		active_list_merge_edges_from_bucket (active, buckets[0]);
40617
		buckets[0] = NULL;
	    }
198618
	    if (active->head.next == &active->tail) {
2490
		active->min_height = INT_MAX;
2490
		active->is_vertical = 1;
6396
		for (; j < h && ! polygon->y_buckets[j]; j++)
		    ;
2490
		continue;
	    }
196128
	    do_full_row = can_do_full_row (active);
	}
495139
	if (do_full_row) {
	    /* Step by a full pixel row's worth. */
179905
	    full_row (active, coverages, winding_mask);
179905
	    if (active->is_vertical) {
33882
		while (j < h &&
75699
		       polygon->y_buckets[j] == NULL &&
42321
		       active->min_height >= 2*GRID_Y)
		{
41817
		    active->min_height -= GRID_Y;
41817
		    j++;
		}
33882
		if (j != i + 1)
22962
		    step_edges (active, j - (i + 1));
	    }
	} else {
	    int sub;
	    /* Subsample this row. */
5043744
	    for (sub = 0; sub < GRID_Y; sub++) {
4728510
		if (buckets[sub]) {
2211424
		    active_list_merge_edges_from_bucket (active, buckets[sub]);
2211424
		    buckets[sub] = NULL;
		}
4728510
		sub_row (active, coverages, winding_mask);
	    }
	}
495139
	if (antialias)
495139
	    blit_a8 (coverages, renderer, converter->spans,
		     i+ymin_i, j-i, xmin_i, xmax_i);
	else
	    blit_a1 (coverages, renderer, converter->spans,
		     i+ymin_i, j-i, xmin_i, xmax_i);
495139
	cell_list_reset (coverages);
495139
	active->min_height -= GRID_Y;
    }
}
struct _cairo_tor_scan_converter {
    cairo_scan_converter_t base;
    glitter_scan_converter_t converter[1];
    cairo_fill_rule_t fill_rule;
    cairo_antialias_t antialias;
    jmp_buf jmp;
};
typedef struct _cairo_tor_scan_converter cairo_tor_scan_converter_t;
static void
178934
_cairo_tor_scan_converter_destroy (void *converter)
{
178934
    cairo_tor_scan_converter_t *self = converter;
178934
    if (self == NULL) {
	return;
    }
178934
    _glitter_scan_converter_fini (self->converter);
178934
    free(self);
}
cairo_status_t
178934
_cairo_tor_scan_converter_add_polygon (void		*converter,
				       const cairo_polygon_t *polygon)
{
178934
    cairo_tor_scan_converter_t *self = converter;
    int i;
#if 0
    FILE *file = fopen ("polygon.txt", "w");
    _cairo_debug_print_polygon (file, polygon);
    fclose (file);
#endif
60597536
    for (i = 0; i < polygon->num_edges; i++)
60418602
	 glitter_scan_converter_add_edge (self->converter, &polygon->edges[i]);
178934
    return CAIRO_STATUS_SUCCESS;
}
static cairo_status_t
178526
_cairo_tor_scan_converter_generate (void			*converter,
				    cairo_span_renderer_t	*renderer)
{
178526
    cairo_tor_scan_converter_t *self = converter;
    cairo_status_t status;
178526
    if ((status = setjmp (self->jmp)))
	return _cairo_scan_converter_set_error (self, _cairo_error (status));
178526
    glitter_scan_converter_render (self->converter,
178526
				   self->fill_rule == CAIRO_FILL_RULE_WINDING ? ~0 : 1,
178526
				   self->antialias != CAIRO_ANTIALIAS_NONE,
				   renderer);
178526
    return CAIRO_STATUS_SUCCESS;
}
cairo_scan_converter_t *
178934
_cairo_tor_scan_converter_create (int			xmin,
				  int			ymin,
				  int			xmax,
				  int			ymax,
				  cairo_fill_rule_t	fill_rule,
				  cairo_antialias_t	antialias)
{
    cairo_tor_scan_converter_t *self;
    cairo_status_t status;
178934
    self = _cairo_calloc (sizeof(struct _cairo_tor_scan_converter));
178934
    if (unlikely (self == NULL)) {
	status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
	goto bail_nomem;
    }
178934
    self->base.destroy = _cairo_tor_scan_converter_destroy;
178934
    self->base.generate = _cairo_tor_scan_converter_generate;
178934
    _glitter_scan_converter_init (self->converter, &self->jmp);
178934
    status = glitter_scan_converter_reset (self->converter,
					   xmin, ymin, xmax, ymax);
178934
    if (unlikely (status))
	goto bail;
178934
    self->fill_rule = fill_rule;
178934
    self->antialias = antialias;
178934
    return &self->base;
 bail:
    self->base.destroy(&self->base);
 bail_nomem:
    return _cairo_scan_converter_create_in_error (status);
}