Skia
2DGraphicsLibrary
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
SkColorPriv.h
1 /*
2  * Copyright 2006 The Android Open Source Project
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkColorPriv_DEFINED
9 #define SkColorPriv_DEFINED
10 
11 // turn this own for extra debug checking when blending onto 565
12 #ifdef SK_DEBUG
13  #define CHECK_FOR_565_OVERFLOW
14 #endif
15 
16 #include "SkColor.h"
17 #include "SkMath.h"
18 
20 
21 #define SkASSERT_IS_BYTE(x) SkASSERT(0 == ((x) & ~0xFF))
22 
23 /*
24  * Skia's 32bit backend only supports 1 sizzle order at a time (compile-time).
25  * This is specified by 4 defines SK_A32_SHIFT, SK_R32_SHIFT, ... for G and B.
26  *
27  * For easier compatibility with Skia's GPU backend, we further restrict these
28  * to either (in memory-byte-order) RGBA or BGRA. Note that this "order" does
29  * not directly correspond to the same shift-order, since we have to take endianess
30  * into account.
31  *
32  * Here we enforce this constraint.
33  */
34 
35 #ifdef SK_CPU_BENDIAN
36  #define SK_RGBA_R32_SHIFT 24
37  #define SK_RGBA_G32_SHIFT 16
38  #define SK_RGBA_B32_SHIFT 8
39  #define SK_RGBA_A32_SHIFT 0
40 
41  #define SK_BGRA_B32_SHIFT 24
42  #define SK_BGRA_G32_SHIFT 16
43  #define SK_BGRA_R32_SHIFT 8
44  #define SK_BGRA_A32_SHIFT 0
45 #else
46  #define SK_RGBA_R32_SHIFT 0
47  #define SK_RGBA_G32_SHIFT 8
48  #define SK_RGBA_B32_SHIFT 16
49  #define SK_RGBA_A32_SHIFT 24
50 
51  #define SK_BGRA_B32_SHIFT 0
52  #define SK_BGRA_G32_SHIFT 8
53  #define SK_BGRA_R32_SHIFT 16
54  #define SK_BGRA_A32_SHIFT 24
55 #endif
56 
57 #if defined(SK_PMCOLOR_IS_RGBA) && defined(SK_PMCOLOR_IS_BGRA)
58  #error "can't define PMCOLOR to be RGBA and BGRA"
59 #endif
60 
61 #define LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_RGBA \
62  (SK_A32_SHIFT == SK_RGBA_A32_SHIFT && \
63  SK_R32_SHIFT == SK_RGBA_R32_SHIFT && \
64  SK_G32_SHIFT == SK_RGBA_G32_SHIFT && \
65  SK_B32_SHIFT == SK_RGBA_B32_SHIFT)
66 
67 #define LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_BGRA \
68  (SK_A32_SHIFT == SK_BGRA_A32_SHIFT && \
69  SK_R32_SHIFT == SK_BGRA_R32_SHIFT && \
70  SK_G32_SHIFT == SK_BGRA_G32_SHIFT && \
71  SK_B32_SHIFT == SK_BGRA_B32_SHIFT)
72 
73 
74 #define SK_A_INDEX (SK_A32_SHIFT/8)
75 #define SK_R_INDEX (SK_R32_SHIFT/8)
76 #define SK_G_INDEX (SK_G32_SHIFT/8)
77 #define SK_B_INDEX (SK_B32_SHIFT/8)
78 
79 #if defined(SK_PMCOLOR_IS_RGBA) && !LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_RGBA
80  #error "SK_PMCOLOR_IS_RGBA does not match SK_*32_SHIFT values"
81 #endif
82 
83 #if defined(SK_PMCOLOR_IS_BGRA) && !LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_BGRA
84  #error "SK_PMCOLOR_IS_BGRA does not match SK_*32_SHIFT values"
85 #endif
86 
87 #if !defined(SK_PMCOLOR_IS_RGBA) && !defined(SK_PMCOLOR_IS_BGRA)
88  // deduce which to define from the _SHIFT defines
89 
90  #if LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_RGBA
91  #define SK_PMCOLOR_IS_RGBA
92  #elif LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_BGRA
93  #define SK_PMCOLOR_IS_BGRA
94  #else
95  #error "need 32bit packing to be either RGBA or BGRA"
96  #endif
97 #endif
98 
99 // hide these now that we're done
100 #undef LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_RGBA
101 #undef LOCAL_PMCOLOR_SHIFTS_EQUIVALENT_TO_BGRA
102 
104 
105 // Reverse the bytes coorsponding to RED and BLUE in a packed pixels. Note the
106 // pair of them are in the same 2 slots in both RGBA and BGRA, thus there is
107 // no need to pass in the colortype to this function.
108 static inline uint32_t SkSwizzle_RB(uint32_t c) {
109  static const uint32_t kRBMask = (0xFF << SK_R32_SHIFT) | (0xFF << SK_B32_SHIFT);
110 
111  unsigned c0 = (c >> SK_R32_SHIFT) & 0xFF;
112  unsigned c1 = (c >> SK_B32_SHIFT) & 0xFF;
113  return (c & ~kRBMask) | (c0 << SK_B32_SHIFT) | (c1 << SK_R32_SHIFT);
114 }
115 
116 static inline uint32_t SkPackARGB_as_RGBA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
117  SkASSERT_IS_BYTE(a);
118  SkASSERT_IS_BYTE(r);
119  SkASSERT_IS_BYTE(g);
120  SkASSERT_IS_BYTE(b);
121  return (a << SK_RGBA_A32_SHIFT) | (r << SK_RGBA_R32_SHIFT) |
122  (g << SK_RGBA_G32_SHIFT) | (b << SK_RGBA_B32_SHIFT);
123 }
124 
125 static inline uint32_t SkPackARGB_as_BGRA(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
126  SkASSERT_IS_BYTE(a);
127  SkASSERT_IS_BYTE(r);
128  SkASSERT_IS_BYTE(g);
129  SkASSERT_IS_BYTE(b);
130  return (a << SK_BGRA_A32_SHIFT) | (r << SK_BGRA_R32_SHIFT) |
131  (g << SK_BGRA_G32_SHIFT) | (b << SK_BGRA_B32_SHIFT);
132 }
133 
134 static inline SkPMColor SkSwizzle_RGBA_to_PMColor(uint32_t c) {
135 #ifdef SK_PMCOLOR_IS_RGBA
136  return c;
137 #else
138  return SkSwizzle_RB(c);
139 #endif
140 }
141 
142 static inline SkPMColor SkSwizzle_BGRA_to_PMColor(uint32_t c) {
143 #ifdef SK_PMCOLOR_IS_BGRA
144  return c;
145 #else
146  return SkSwizzle_RB(c);
147 #endif
148 }
149 
151 
153 
154 #define SK_ITU_BT709_LUM_COEFF_R (0.2126f)
155 #define SK_ITU_BT709_LUM_COEFF_G (0.7152f)
156 #define SK_ITU_BT709_LUM_COEFF_B (0.0722f)
157 
160 
161 #define SK_LUM_COEFF_R SK_ITU_BT709_LUM_COEFF_R
162 #define SK_LUM_COEFF_G SK_ITU_BT709_LUM_COEFF_G
163 #define SK_LUM_COEFF_B SK_ITU_BT709_LUM_COEFF_B
164 
169 static inline U8CPU SkComputeLuminance(U8CPU r, U8CPU g, U8CPU b) {
170  //The following is
171  //r * SK_LUM_COEFF_R + g * SK_LUM_COEFF_G + b * SK_LUM_COEFF_B
172  //with SK_LUM_COEFF_X in 1.8 fixed point (rounding adjusted to sum to 256).
173  return (r * 54 + g * 183 + b * 19) >> 8;
174 }
175 
182 static inline unsigned SkAlpha255To256(U8CPU alpha) {
183  SkASSERT(SkToU8(alpha) == alpha);
184  // this one assues that blending on top of an opaque dst keeps it that way
185  // even though it is less accurate than a+(a>>7) for non-opaque dsts
186  return alpha + 1;
187 }
188 
193 static inline unsigned Sk255To256(U8CPU value) {
194  SkASSERT(SkToU8(value) == value);
195  return value + (value >> 7);
196 }
197 
201 #define SkAlphaMul(value, alpha256) (((value) * (alpha256)) >> 8)
202 
206 static inline U16CPU SkAlphaMulInv256(U16CPU value, U16CPU alpha256) {
207 #ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
208  return SkAlpha255To256(255 - SkAlphaMul(value, alpha256));
209 #else
210  unsigned prod = 0xFFFF - value * alpha256;
211  return (prod + (prod >> 8)) >> 8;
212 #endif
213 }
214 
215 // The caller may want negative values, so keep all params signed (int)
216 // so we don't accidentally slip into unsigned math and lose the sign
217 // extension when we shift (in SkAlphaMul)
218 static inline int SkAlphaBlend(int src, int dst, int scale256) {
219  SkASSERT((unsigned)scale256 <= 256);
220  return dst + SkAlphaMul(src - dst, scale256);
221 }
222 
228 static inline int SkAlphaBlend255(S16CPU src, S16CPU dst, U8CPU alpha) {
229  SkASSERT((int16_t)src == src);
230  SkASSERT((int16_t)dst == dst);
231  SkASSERT((uint8_t)alpha == alpha);
232 
233  int prod = (src - dst) * alpha + 128;
234  prod = (prod + (prod >> 8)) >> 8;
235  return dst + prod;
236 }
237 
238 static inline U8CPU SkUnitScalarClampToByte(SkScalar x) {
239  return static_cast<U8CPU>(SkScalarPin(x, 0, 1) * 255 + 0.5);
240 }
241 
242 #define SK_R16_BITS 5
243 #define SK_G16_BITS 6
244 #define SK_B16_BITS 5
245 
246 #define SK_R16_SHIFT (SK_B16_BITS + SK_G16_BITS)
247 #define SK_G16_SHIFT (SK_B16_BITS)
248 #define SK_B16_SHIFT 0
249 
250 #define SK_R16_MASK ((1 << SK_R16_BITS) - 1)
251 #define SK_G16_MASK ((1 << SK_G16_BITS) - 1)
252 #define SK_B16_MASK ((1 << SK_B16_BITS) - 1)
253 
254 #define SkGetPackedR16(color) (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
255 #define SkGetPackedG16(color) (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
256 #define SkGetPackedB16(color) (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
257 
258 #define SkR16Assert(r) SkASSERT((unsigned)(r) <= SK_R16_MASK)
259 #define SkG16Assert(g) SkASSERT((unsigned)(g) <= SK_G16_MASK)
260 #define SkB16Assert(b) SkASSERT((unsigned)(b) <= SK_B16_MASK)
261 
262 static inline uint16_t SkPackRGB16(unsigned r, unsigned g, unsigned b) {
263  SkASSERT(r <= SK_R16_MASK);
264  SkASSERT(g <= SK_G16_MASK);
265  SkASSERT(b <= SK_B16_MASK);
266 
267  return SkToU16((r << SK_R16_SHIFT) | (g << SK_G16_SHIFT) | (b << SK_B16_SHIFT));
268 }
269 
270 #define SK_R16_MASK_IN_PLACE (SK_R16_MASK << SK_R16_SHIFT)
271 #define SK_G16_MASK_IN_PLACE (SK_G16_MASK << SK_G16_SHIFT)
272 #define SK_B16_MASK_IN_PLACE (SK_B16_MASK << SK_B16_SHIFT)
273 
277 static inline uint32_t SkExpand_rgb_16(U16CPU c) {
278  SkASSERT(c == (uint16_t)c);
279 
280  return ((c & SK_G16_MASK_IN_PLACE) << 16) | (c & ~SK_G16_MASK_IN_PLACE);
281 }
282 
289 static inline U16CPU SkCompact_rgb_16(uint32_t c) {
290  return ((c >> 16) & SK_G16_MASK_IN_PLACE) | (c & ~SK_G16_MASK_IN_PLACE);
291 }
292 
298 static inline U16CPU SkAlphaMulRGB16(U16CPU c, unsigned scale) {
299  return SkCompact_rgb_16(SkExpand_rgb_16(c) * (scale >> 3) >> 5);
300 }
301 
302 // this helper explicitly returns a clean 16bit value (but slower)
303 #define SkAlphaMulRGB16_ToU16(c, s) (uint16_t)SkAlphaMulRGB16(c, s)
304 
310 static inline U16CPU SkBlend32_RGB16(uint32_t src_expand, uint16_t dst, unsigned scale) {
311  uint32_t dst_expand = SkExpand_rgb_16(dst) * scale;
312  return SkCompact_rgb_16((src_expand + dst_expand) >> 5);
313 }
314 
320 static inline U16CPU SkBlendRGB16(U16CPU src, U16CPU dst, int srcScale) {
321  SkASSERT((unsigned)srcScale <= 256);
322 
323  srcScale >>= 3;
324 
325  uint32_t src32 = SkExpand_rgb_16(src);
326  uint32_t dst32 = SkExpand_rgb_16(dst);
327  return SkCompact_rgb_16(dst32 + ((src32 - dst32) * srcScale >> 5));
328 }
329 
330 static inline void SkBlendRGB16(const uint16_t src[], uint16_t dst[],
331  int srcScale, int count) {
332  SkASSERT(count > 0);
333  SkASSERT((unsigned)srcScale <= 256);
334 
335  srcScale >>= 3;
336 
337  do {
338  uint32_t src32 = SkExpand_rgb_16(*src++);
339  uint32_t dst32 = SkExpand_rgb_16(*dst);
340  *dst++ = static_cast<uint16_t>(
341  SkCompact_rgb_16(dst32 + ((src32 - dst32) * srcScale >> 5)));
342  } while (--count > 0);
343 }
344 
345 #ifdef SK_DEBUG
346  static inline U16CPU SkRGB16Add(U16CPU a, U16CPU b) {
347  SkASSERT(SkGetPackedR16(a) + SkGetPackedR16(b) <= SK_R16_MASK);
348  SkASSERT(SkGetPackedG16(a) + SkGetPackedG16(b) <= SK_G16_MASK);
349  SkASSERT(SkGetPackedB16(a) + SkGetPackedB16(b) <= SK_B16_MASK);
350 
351  return a + b;
352  }
353 #else
354  #define SkRGB16Add(a, b) ((a) + (b))
355 #endif
356 
358 
359 #define SK_A32_BITS 8
360 #define SK_R32_BITS 8
361 #define SK_G32_BITS 8
362 #define SK_B32_BITS 8
363 
364 #define SK_A32_MASK ((1 << SK_A32_BITS) - 1)
365 #define SK_R32_MASK ((1 << SK_R32_BITS) - 1)
366 #define SK_G32_MASK ((1 << SK_G32_BITS) - 1)
367 #define SK_B32_MASK ((1 << SK_B32_BITS) - 1)
368 
369 #define SkGetPackedA32(packed) ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24)
370 #define SkGetPackedR32(packed) ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24)
371 #define SkGetPackedG32(packed) ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24)
372 #define SkGetPackedB32(packed) ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24)
373 
374 #define SkA32Assert(a) SkASSERT((unsigned)(a) <= SK_A32_MASK)
375 #define SkR32Assert(r) SkASSERT((unsigned)(r) <= SK_R32_MASK)
376 #define SkG32Assert(g) SkASSERT((unsigned)(g) <= SK_G32_MASK)
377 #define SkB32Assert(b) SkASSERT((unsigned)(b) <= SK_B32_MASK)
378 
379 #ifdef SK_DEBUG
380  #define SkPMColorAssert(color_value) \
381  do { \
382  SkPMColor pm_color_value = (color_value); \
383  uint32_t alpha_color_value = SkGetPackedA32(pm_color_value); \
384  SkA32Assert(alpha_color_value); \
385  SkASSERT(SkGetPackedR32(pm_color_value) <= alpha_color_value); \
386  SkASSERT(SkGetPackedG32(pm_color_value) <= alpha_color_value); \
387  SkASSERT(SkGetPackedB32(pm_color_value) <= alpha_color_value); \
388  } while (false)
389 #else
390  #define SkPMColorAssert(c)
391 #endif
392 
393 static inline bool SkPMColorValid(SkPMColor c) {
394  auto a = SkGetPackedA32(c);
395  bool valid = a <= SK_A32_MASK
396  && SkGetPackedR32(c) <= a
397  && SkGetPackedG32(c) <= a
398  && SkGetPackedB32(c) <= a;
399  if (valid) {
400  SkPMColorAssert(c); // Make sure we're consistent when it counts.
401  }
402  return valid;
403 }
404 
409 static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
410  SkA32Assert(a);
411  SkASSERT(r <= a);
412  SkASSERT(g <= a);
413  SkASSERT(b <= a);
414 
415  return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
416  (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
417 }
418 
419 static inline uint32_t SkPackPMColor_as_RGBA(SkPMColor c) {
420  return SkPackARGB_as_RGBA(SkGetPackedA32(c), SkGetPackedR32(c),
421  SkGetPackedG32(c), SkGetPackedB32(c));
422 }
423 
424 static inline uint32_t SkPackPMColor_as_BGRA(SkPMColor c) {
425  return SkPackARGB_as_BGRA(SkGetPackedA32(c), SkGetPackedR32(c),
426  SkGetPackedG32(c), SkGetPackedB32(c));
427 }
428 
436 static inline SkPMColor SkFourByteInterp256(SkPMColor src, SkPMColor dst,
437  unsigned scale) {
438  unsigned a = SkAlphaBlend(SkGetPackedA32(src), SkGetPackedA32(dst), scale);
439  unsigned r = SkAlphaBlend(SkGetPackedR32(src), SkGetPackedR32(dst), scale);
440  unsigned g = SkAlphaBlend(SkGetPackedG32(src), SkGetPackedG32(dst), scale);
441  unsigned b = SkAlphaBlend(SkGetPackedB32(src), SkGetPackedB32(dst), scale);
442 
443  return SkPackARGB32(a, r, g, b);
444 }
445 
452 static inline SkPMColor SkFourByteInterp(SkPMColor src, SkPMColor dst,
453  U8CPU srcWeight) {
454  unsigned scale = SkAlpha255To256(srcWeight);
455  return SkFourByteInterp256(src, dst, scale);
456 }
457 
461 static inline void SkSplay(uint32_t color, uint32_t* ag, uint32_t* rb) {
462  const uint32_t mask = 0x00FF00FF;
463  *ag = (color >> 8) & mask;
464  *rb = color & mask;
465 }
466 
471 static inline uint64_t SkSplay(uint32_t color) {
472  const uint32_t mask = 0x00FF00FF;
473  uint64_t agrb = (color >> 8) & mask; // 0x0000000000AA00GG
474  agrb <<= 32; // 0x00AA00GG00000000
475  agrb |= color & mask; // 0x00AA00GG00RR00BB
476  return agrb;
477 }
478 
482 static inline uint32_t SkUnsplay(uint32_t ag, uint32_t rb) {
483  const uint32_t mask = 0xFF00FF00;
484  return (ag & mask) | ((rb & mask) >> 8);
485 }
486 
491 static inline uint32_t SkUnsplay(uint64_t agrb) {
492  const uint32_t mask = 0xFF00FF00;
493  return SkPMColor(
494  ((agrb & mask) >> 8) | // 0x00RR00BB
495  ((agrb >> 32) & mask)); // 0xAARRGGBB
496 }
497 
498 static inline SkPMColor SkFastFourByteInterp256_32(SkPMColor src, SkPMColor dst, unsigned scale) {
499  SkASSERT(scale <= 256);
500 
501  // Two 8-bit blends per two 32-bit registers, with space to make sure the math doesn't collide.
502  uint32_t src_ag, src_rb, dst_ag, dst_rb;
503  SkSplay(src, &src_ag, &src_rb);
504  SkSplay(dst, &dst_ag, &dst_rb);
505 
506  const uint32_t ret_ag = src_ag * scale + (256 - scale) * dst_ag;
507  const uint32_t ret_rb = src_rb * scale + (256 - scale) * dst_rb;
508 
509  return SkUnsplay(ret_ag, ret_rb);
510 }
511 
512 static inline SkPMColor SkFastFourByteInterp256_64(SkPMColor src, SkPMColor dst, unsigned scale) {
513  SkASSERT(scale <= 256);
514  // Four 8-bit blends in one 64-bit register, with space to make sure the math doesn't collide.
515  return SkUnsplay(SkSplay(src) * scale + (256-scale) * SkSplay(dst));
516 }
517 
518 // TODO(mtklein): Replace slow versions with fast versions, using scale + (scale>>7) everywhere.
519 
523 static inline SkPMColor SkFastFourByteInterp256(SkPMColor src, SkPMColor dst, unsigned scale) {
524  // On a 64-bit machine, _64 is about 10% faster than _32, but ~40% slower on a 32-bit machine.
525  if (sizeof(void*) == 4) {
526  return SkFastFourByteInterp256_32(src, dst, scale);
527  } else {
528  return SkFastFourByteInterp256_64(src, dst, scale);
529  }
530 }
531 
536 static inline SkPMColor SkFastFourByteInterp(SkPMColor src,
537  SkPMColor dst,
538  U8CPU srcWeight) {
539  SkASSERT(srcWeight <= 255);
540  // scale = srcWeight + (srcWeight >> 7) is more accurate than
541  // scale = srcWeight + 1, but 7% slower
542  return SkFastFourByteInterp256(src, dst, srcWeight + (srcWeight >> 7));
543 }
544 
549 static inline SkPMColor SkPackARGB32NoCheck(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
550  return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) |
551  (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
552 }
553 
554 static inline
555 SkPMColor SkPremultiplyARGBInline(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
556  SkA32Assert(a);
557  SkR32Assert(r);
558  SkG32Assert(g);
559  SkB32Assert(b);
560 
561  if (a != 255) {
562  r = SkMulDiv255Round(r, a);
563  g = SkMulDiv255Round(g, a);
564  b = SkMulDiv255Round(b, a);
565  }
566  return SkPackARGB32(a, r, g, b);
567 }
568 
569 // When Android is compiled optimizing for size, SkAlphaMulQ doesn't get
570 // inlined; forcing inlining significantly improves performance.
571 static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale) {
572  uint32_t mask = 0xFF00FF;
573 
574  uint32_t rb = ((c & mask) * scale) >> 8;
575  uint32_t ag = ((c >> 8) & mask) * scale;
576  return (rb & mask) | (ag & ~mask);
577 }
578 
579 static inline SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst) {
580  return src + SkAlphaMulQ(dst, SkAlpha255To256(255 - SkGetPackedA32(src)));
581 }
582 
586 static inline SkPMColor SkPMLerp(SkPMColor src, SkPMColor dst, unsigned scale) {
587 #ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
588  return SkAlphaMulQ(src, scale) + SkAlphaMulQ(dst, 256 - scale);
589 #else
590  return SkFastFourByteInterp256(src, dst, scale);
591 #endif
592 }
593 
594 static inline SkPMColor SkBlendARGB32(SkPMColor src, SkPMColor dst, U8CPU aa) {
595  SkASSERT((unsigned)aa <= 255);
596 
597  unsigned src_scale = SkAlpha255To256(aa);
598 #ifdef SK_SUPPORT_LEGACY_BROKEN_LERP
599  unsigned dst_scale = SkAlpha255To256(255 - SkAlphaMul(SkGetPackedA32(src), src_scale));
600 
601  return SkAlphaMulQ(src, src_scale) + SkAlphaMulQ(dst, dst_scale);
602 #else
603  unsigned dst_scale = SkAlphaMulInv256(SkGetPackedA32(src), src_scale);
604 
605  const uint32_t mask = 0xFF00FF;
606 
607  uint32_t src_rb = (src & mask) * src_scale;
608  uint32_t src_ag = ((src >> 8) & mask) * src_scale;
609 
610  uint32_t dst_rb = (dst & mask) * dst_scale;
611  uint32_t dst_ag = ((dst >> 8) & mask) * dst_scale;
612 
613  return (((src_rb + dst_rb) >> 8) & mask) | ((src_ag + dst_ag) & ~mask);
614 #endif
615 }
616 
618 // Convert a 32bit pixel to a 16bit pixel (no dither)
619 
620 #define SkR32ToR16_MACRO(r) ((unsigned)(r) >> (SK_R32_BITS - SK_R16_BITS))
621 #define SkG32ToG16_MACRO(g) ((unsigned)(g) >> (SK_G32_BITS - SK_G16_BITS))
622 #define SkB32ToB16_MACRO(b) ((unsigned)(b) >> (SK_B32_BITS - SK_B16_BITS))
623 
624 #ifdef SK_DEBUG
625  static inline unsigned SkR32ToR16(unsigned r) {
626  SkR32Assert(r);
627  return SkR32ToR16_MACRO(r);
628  }
629  static inline unsigned SkG32ToG16(unsigned g) {
630  SkG32Assert(g);
631  return SkG32ToG16_MACRO(g);
632  }
633  static inline unsigned SkB32ToB16(unsigned b) {
634  SkB32Assert(b);
635  return SkB32ToB16_MACRO(b);
636  }
637 #else
638  #define SkR32ToR16(r) SkR32ToR16_MACRO(r)
639  #define SkG32ToG16(g) SkG32ToG16_MACRO(g)
640  #define SkB32ToB16(b) SkB32ToB16_MACRO(b)
641 #endif
642 
643 #define SkPacked32ToR16(c) (((unsigned)(c) >> (SK_R32_SHIFT + SK_R32_BITS - SK_R16_BITS)) & SK_R16_MASK)
644 #define SkPacked32ToG16(c) (((unsigned)(c) >> (SK_G32_SHIFT + SK_G32_BITS - SK_G16_BITS)) & SK_G16_MASK)
645 #define SkPacked32ToB16(c) (((unsigned)(c) >> (SK_B32_SHIFT + SK_B32_BITS - SK_B16_BITS)) & SK_B16_MASK)
646 
647 static inline U16CPU SkPixel32ToPixel16(SkPMColor c) {
648  unsigned r = ((c >> (SK_R32_SHIFT + (8 - SK_R16_BITS))) & SK_R16_MASK) << SK_R16_SHIFT;
649  unsigned g = ((c >> (SK_G32_SHIFT + (8 - SK_G16_BITS))) & SK_G16_MASK) << SK_G16_SHIFT;
650  unsigned b = ((c >> (SK_B32_SHIFT + (8 - SK_B16_BITS))) & SK_B16_MASK) << SK_B16_SHIFT;
651  return r | g | b;
652 }
653 
654 static inline U16CPU SkPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) {
655  return (SkR32ToR16(r) << SK_R16_SHIFT) |
656  (SkG32ToG16(g) << SK_G16_SHIFT) |
657  (SkB32ToB16(b) << SK_B16_SHIFT);
658 }
659 
660 #define SkPixel32ToPixel16_ToU16(src) SkToU16(SkPixel32ToPixel16(src))
661 
663 // Fast dither from 32->16
664 
665 #define SkShouldDitherXY(x, y) (((x) ^ (y)) & 1)
666 
667 static inline uint16_t SkDitherPack888ToRGB16(U8CPU r, U8CPU g, U8CPU b) {
668  r = ((r << 1) - ((r >> (8 - SK_R16_BITS) << (8 - SK_R16_BITS)) | (r >> SK_R16_BITS))) >> (8 - SK_R16_BITS);
669  g = ((g << 1) - ((g >> (8 - SK_G16_BITS) << (8 - SK_G16_BITS)) | (g >> SK_G16_BITS))) >> (8 - SK_G16_BITS);
670  b = ((b << 1) - ((b >> (8 - SK_B16_BITS) << (8 - SK_B16_BITS)) | (b >> SK_B16_BITS))) >> (8 - SK_B16_BITS);
671 
672  return SkPackRGB16(r, g, b);
673 }
674 
675 static inline uint16_t SkDitherPixel32ToPixel16(SkPMColor c) {
676  return SkDitherPack888ToRGB16(SkGetPackedR32(c), SkGetPackedG32(c), SkGetPackedB32(c));
677 }
678 
679 /* Return c in expanded_rgb_16 format, but also scaled up by 32 (5 bits)
680  It is now suitable for combining with a scaled expanded_rgb_16 color
681  as in SkSrcOver32To16().
682  We must do this 565 high-bit replication, in order for the subsequent add
683  to saturate properly (and not overflow). If we take the 8 bits as is, it is
684  possible to overflow.
685 */
686 static inline uint32_t SkPMColorToExpanded16x5(SkPMColor c) {
687  unsigned sr = SkPacked32ToR16(c);
688  unsigned sg = SkPacked32ToG16(c);
689  unsigned sb = SkPacked32ToB16(c);
690 
691  sr = (sr << 5) | sr;
692  sg = (sg << 5) | (sg >> 1);
693  sb = (sb << 5) | sb;
694  return (sr << 11) | (sg << 21) | (sb << 0);
695 }
696 
697 /* SrcOver the 32bit src color with the 16bit dst, returning a 16bit value
698  (with dirt in the high 16bits, so caller beware).
699 */
700 static inline U16CPU SkSrcOver32To16(SkPMColor src, uint16_t dst) {
701  unsigned sr = SkGetPackedR32(src);
702  unsigned sg = SkGetPackedG32(src);
703  unsigned sb = SkGetPackedB32(src);
704 
705  unsigned dr = SkGetPackedR16(dst);
706  unsigned dg = SkGetPackedG16(dst);
707  unsigned db = SkGetPackedB16(dst);
708 
709  unsigned isa = 255 - SkGetPackedA32(src);
710 
711  dr = (sr + SkMul16ShiftRound(dr, isa, SK_R16_BITS)) >> (8 - SK_R16_BITS);
712  dg = (sg + SkMul16ShiftRound(dg, isa, SK_G16_BITS)) >> (8 - SK_G16_BITS);
713  db = (sb + SkMul16ShiftRound(db, isa, SK_B16_BITS)) >> (8 - SK_B16_BITS);
714 
715  return SkPackRGB16(dr, dg, db);
716 }
717 
719 // Convert a 16bit pixel to a 32bit pixel
720 
721 static inline unsigned SkR16ToR32(unsigned r) {
722  return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
723 }
724 
725 static inline unsigned SkG16ToG32(unsigned g) {
726  return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
727 }
728 
729 static inline unsigned SkB16ToB32(unsigned b) {
730  return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
731 }
732 
733 #define SkPacked16ToR32(c) SkR16ToR32(SkGetPackedR16(c))
734 #define SkPacked16ToG32(c) SkG16ToG32(SkGetPackedG16(c))
735 #define SkPacked16ToB32(c) SkB16ToB32(SkGetPackedB16(c))
736 
737 static inline SkPMColor SkPixel16ToPixel32(U16CPU src) {
738  SkASSERT(src == SkToU16(src));
739 
740  unsigned r = SkPacked16ToR32(src);
741  unsigned g = SkPacked16ToG32(src);
742  unsigned b = SkPacked16ToB32(src);
743 
744  SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src));
745  SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src));
746  SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src));
747 
748  return SkPackARGB32(0xFF, r, g, b);
749 }
750 
751 // similar to SkPixel16ToPixel32, but returns SkColor instead of SkPMColor
752 static inline SkColor SkPixel16ToColor(U16CPU src) {
753  SkASSERT(src == SkToU16(src));
754 
755  unsigned r = SkPacked16ToR32(src);
756  unsigned g = SkPacked16ToG32(src);
757  unsigned b = SkPacked16ToB32(src);
758 
759  SkASSERT((r >> (8 - SK_R16_BITS)) == SkGetPackedR16(src));
760  SkASSERT((g >> (8 - SK_G16_BITS)) == SkGetPackedG16(src));
761  SkASSERT((b >> (8 - SK_B16_BITS)) == SkGetPackedB16(src));
762 
763  return SkColorSetRGB(r, g, b);
764 }
765 
767 
768 typedef uint16_t SkPMColor16;
769 
770 // Put in OpenGL order (r g b a)
771 #define SK_A4444_SHIFT 0
772 #define SK_R4444_SHIFT 12
773 #define SK_G4444_SHIFT 8
774 #define SK_B4444_SHIFT 4
775 
776 #define SkA32To4444(a) ((unsigned)(a) >> 4)
777 #define SkR32To4444(r) ((unsigned)(r) >> 4)
778 #define SkG32To4444(g) ((unsigned)(g) >> 4)
779 #define SkB32To4444(b) ((unsigned)(b) >> 4)
780 
781 static inline U8CPU SkReplicateNibble(unsigned nib) {
782  SkASSERT(nib <= 0xF);
783  return (nib << 4) | nib;
784 }
785 
786 #define SkA4444ToA32(a) SkReplicateNibble(a)
787 #define SkR4444ToR32(r) SkReplicateNibble(r)
788 #define SkG4444ToG32(g) SkReplicateNibble(g)
789 #define SkB4444ToB32(b) SkReplicateNibble(b)
790 
791 #define SkGetPackedA4444(c) (((unsigned)(c) >> SK_A4444_SHIFT) & 0xF)
792 #define SkGetPackedR4444(c) (((unsigned)(c) >> SK_R4444_SHIFT) & 0xF)
793 #define SkGetPackedG4444(c) (((unsigned)(c) >> SK_G4444_SHIFT) & 0xF)
794 #define SkGetPackedB4444(c) (((unsigned)(c) >> SK_B4444_SHIFT) & 0xF)
795 
796 #define SkPacked4444ToA32(c) SkReplicateNibble(SkGetPackedA4444(c))
797 #define SkPacked4444ToR32(c) SkReplicateNibble(SkGetPackedR4444(c))
798 #define SkPacked4444ToG32(c) SkReplicateNibble(SkGetPackedG4444(c))
799 #define SkPacked4444ToB32(c) SkReplicateNibble(SkGetPackedB4444(c))
800 
801 #ifdef SK_DEBUG
802 static inline void SkPMColor16Assert(U16CPU c) {
803  unsigned a = SkGetPackedA4444(c);
804  unsigned r = SkGetPackedR4444(c);
805  unsigned g = SkGetPackedG4444(c);
806  unsigned b = SkGetPackedB4444(c);
807 
808  SkASSERT(a <= 0xF);
809  SkASSERT(r <= a);
810  SkASSERT(g <= a);
811  SkASSERT(b <= a);
812 }
813 #else
814 #define SkPMColor16Assert(c)
815 #endif
816 
817 static inline unsigned SkAlpha15To16(unsigned a) {
818  SkASSERT(a <= 0xF);
819  return a + (a >> 3);
820 }
821 
822 #ifdef SK_DEBUG
823  static inline int SkAlphaMul4(int value, int scale) {
824  SkASSERT((unsigned)scale <= 0x10);
825  return value * scale >> 4;
826  }
827 #else
828  #define SkAlphaMul4(value, scale) ((value) * (scale) >> 4)
829 #endif
830 
831 static inline unsigned SkR4444ToR565(unsigned r) {
832  SkASSERT(r <= 0xF);
833  return (r << (SK_R16_BITS - 4)) | (r >> (8 - SK_R16_BITS));
834 }
835 
836 static inline unsigned SkG4444ToG565(unsigned g) {
837  SkASSERT(g <= 0xF);
838  return (g << (SK_G16_BITS - 4)) | (g >> (8 - SK_G16_BITS));
839 }
840 
841 static inline unsigned SkB4444ToB565(unsigned b) {
842  SkASSERT(b <= 0xF);
843  return (b << (SK_B16_BITS - 4)) | (b >> (8 - SK_B16_BITS));
844 }
845 
846 static inline SkPMColor16 SkPackARGB4444(unsigned a, unsigned r,
847  unsigned g, unsigned b) {
848  SkASSERT(a <= 0xF);
849  SkASSERT(r <= a);
850  SkASSERT(g <= a);
851  SkASSERT(b <= a);
852 
853  return (SkPMColor16)((a << SK_A4444_SHIFT) | (r << SK_R4444_SHIFT) |
854  (g << SK_G4444_SHIFT) | (b << SK_B4444_SHIFT));
855 }
856 
857 static inline SkPMColor16 SkAlphaMulQ4(SkPMColor16 c, int scale) {
858  SkASSERT(scale <= 16);
859 
860  const unsigned mask = 0xF0F; //gMask_0F0F;
861 
862 #if 0
863  unsigned rb = ((c & mask) * scale) >> 4;
864  unsigned ag = ((c >> 4) & mask) * scale;
865  return (rb & mask) | (ag & ~mask);
866 #else
867  unsigned expanded_c = (c & mask) | ((c & (mask << 4)) << 12);
868  unsigned scaled_c = (expanded_c * scale) >> 4;
869  return (scaled_c & mask) | ((scaled_c >> 12) & (mask << 4));
870 #endif
871 }
872 
876 static inline uint32_t SkExpand_4444(U16CPU c) {
877  SkASSERT(c == (uint16_t)c);
878 
879  const unsigned mask = 0xF0F; //gMask_0F0F;
880  return (c & mask) | ((c & ~mask) << 12);
881 }
882 
883 static inline uint16_t SkSrcOver4444To16(SkPMColor16 s, uint16_t d) {
884  unsigned sa = SkGetPackedA4444(s);
885  unsigned sr = SkR4444ToR565(SkGetPackedR4444(s));
886  unsigned sg = SkG4444ToG565(SkGetPackedG4444(s));
887  unsigned sb = SkB4444ToB565(SkGetPackedB4444(s));
888 
889  // To avoid overflow, we have to clear the low bit of the synthetic sg
890  // if the src alpha is <= 7.
891  // to see why, try blending 0x4444 on top of 565-white and watch green
892  // overflow (sum == 64)
893  sg &= ~(~(sa >> 3) & 1);
894 
895  unsigned scale = SkAlpha15To16(15 - sa);
896  unsigned dr = SkAlphaMul4(SkGetPackedR16(d), scale);
897  unsigned dg = SkAlphaMul4(SkGetPackedG16(d), scale);
898  unsigned db = SkAlphaMul4(SkGetPackedB16(d), scale);
899 
900 #if 0
901  if (sg + dg > 63) {
902  SkDebugf("---- SkSrcOver4444To16 src=%x dst=%x scale=%d, sg=%d dg=%d\n", s, d, scale, sg, dg);
903  }
904 #endif
905  return SkPackRGB16(sr + dr, sg + dg, sb + db);
906 }
907 
908 static inline uint16_t SkBlend4444To16(SkPMColor16 src, uint16_t dst, int scale16) {
909  SkASSERT((unsigned)scale16 <= 16);
910 
911  return SkSrcOver4444To16(SkAlphaMulQ4(src, scale16), dst);
912 }
913 
914 static inline SkPMColor SkPixel4444ToPixel32(U16CPU c) {
915  uint32_t d = (SkGetPackedA4444(c) << SK_A32_SHIFT) |
916  (SkGetPackedR4444(c) << SK_R32_SHIFT) |
917  (SkGetPackedG4444(c) << SK_G32_SHIFT) |
918  (SkGetPackedB4444(c) << SK_B32_SHIFT);
919  return d | (d << 4);
920 }
921 
922 static inline SkPMColor16 SkPixel32ToPixel4444(SkPMColor c) {
923  return (((c >> (SK_A32_SHIFT + 4)) & 0xF) << SK_A4444_SHIFT) |
924  (((c >> (SK_R32_SHIFT + 4)) & 0xF) << SK_R4444_SHIFT) |
925  (((c >> (SK_G32_SHIFT + 4)) & 0xF) << SK_G4444_SHIFT) |
926  (((c >> (SK_B32_SHIFT + 4)) & 0xF) << SK_B4444_SHIFT);
927 }
928 
929 // cheap 2x2 dither
930 static inline SkPMColor16 SkDitherARGB32To4444(U8CPU a, U8CPU r,
931  U8CPU g, U8CPU b) {
932  // to ensure that we stay a legal premultiplied color, we take the max()
933  // of the truncated and dithered alpha values. If we didn't, cases like
934  // SkDitherARGB32To4444(0x31, 0x2E, ...) would generate SkPackARGB4444(2, 3, ...)
935  // which is not legal premultiplied, since a < color
936  unsigned dithered_a = ((a << 1) - ((a >> 4 << 4) | (a >> 4))) >> 4;
937  a = SkMax32(a >> 4, dithered_a);
938  // these we just dither in place
939  r = ((r << 1) - ((r >> 4 << 4) | (r >> 4))) >> 4;
940  g = ((g << 1) - ((g >> 4 << 4) | (g >> 4))) >> 4;
941  b = ((b << 1) - ((b >> 4 << 4) | (b >> 4))) >> 4;
942 
943  return SkPackARGB4444(a, r, g, b);
944 }
945 
946 static inline SkPMColor16 SkDitherPixel32To4444(SkPMColor c) {
947  return SkDitherARGB32To4444(SkGetPackedA32(c), SkGetPackedR32(c),
948  SkGetPackedG32(c), SkGetPackedB32(c));
949 }
950 
951 /* Assumes 16bit is in standard RGBA order.
952  Transforms a normal ARGB_8888 into the same byte order as
953  expanded ARGB_4444, but keeps each component 8bits
954 */
955 static inline uint32_t SkExpand_8888(SkPMColor c) {
956  return (((c >> SK_R32_SHIFT) & 0xFF) << 24) |
957  (((c >> SK_G32_SHIFT) & 0xFF) << 8) |
958  (((c >> SK_B32_SHIFT) & 0xFF) << 16) |
959  (((c >> SK_A32_SHIFT) & 0xFF) << 0);
960 }
961 
962 /* Undo the operation of SkExpand_8888, turning the argument back into
963  a SkPMColor.
964 */
965 static inline SkPMColor SkCompact_8888(uint32_t c) {
966  return (((c >> 24) & 0xFF) << SK_R32_SHIFT) |
967  (((c >> 8) & 0xFF) << SK_G32_SHIFT) |
968  (((c >> 16) & 0xFF) << SK_B32_SHIFT) |
969  (((c >> 0) & 0xFF) << SK_A32_SHIFT);
970 }
971 
972 /* Like SkExpand_8888, this transforms a pmcolor into the expanded 4444 format,
973  but this routine just keeps the high 4bits of each component in the low
974  4bits of the result (just like a newly expanded PMColor16).
975 */
976 static inline uint32_t SkExpand32_4444(SkPMColor c) {
977  return (((c >> (SK_R32_SHIFT + 4)) & 0xF) << 24) |
978  (((c >> (SK_G32_SHIFT + 4)) & 0xF) << 8) |
979  (((c >> (SK_B32_SHIFT + 4)) & 0xF) << 16) |
980  (((c >> (SK_A32_SHIFT + 4)) & 0xF) << 0);
981 }
982 
983 // takes two values and alternamtes them as part of a memset16
984 // used for cheap 2x2 dithering when the colors are opaque
985 void sk_dither_memset16(uint16_t dst[], uint16_t value, uint16_t other, int n);
986 
988 
989 static inline int SkUpscale31To32(int value) {
990  SkASSERT((unsigned)value <= 31);
991  return value + (value >> 4);
992 }
993 
994 static inline int SkBlend32(int src, int dst, int scale) {
995  SkASSERT((unsigned)src <= 0xFF);
996  SkASSERT((unsigned)dst <= 0xFF);
997  SkASSERT((unsigned)scale <= 32);
998  return dst + ((src - dst) * scale >> 5);
999 }
1000 
1001 static inline SkPMColor SkBlendLCD16(int srcA, int srcR, int srcG, int srcB,
1002  SkPMColor dst, uint16_t mask) {
1003  if (mask == 0) {
1004  return dst;
1005  }
1006 
1007  /* We want all of these in 5bits, hence the shifts in case one of them
1008  * (green) is 6bits.
1009  */
1010  int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
1011  int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
1012  int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
1013 
1014  // Now upscale them to 0..32, so we can use blend32
1015  maskR = SkUpscale31To32(maskR);
1016  maskG = SkUpscale31To32(maskG);
1017  maskB = SkUpscale31To32(maskB);
1018 
1019  // srcA has been upscaled to 256 before passed into this function
1020  maskR = maskR * srcA >> 8;
1021  maskG = maskG * srcA >> 8;
1022  maskB = maskB * srcA >> 8;
1023 
1024  int dstR = SkGetPackedR32(dst);
1025  int dstG = SkGetPackedG32(dst);
1026  int dstB = SkGetPackedB32(dst);
1027 
1028  // LCD blitting is only supported if the dst is known/required
1029  // to be opaque
1030  return SkPackARGB32(0xFF,
1031  SkBlend32(srcR, dstR, maskR),
1032  SkBlend32(srcG, dstG, maskG),
1033  SkBlend32(srcB, dstB, maskB));
1034 }
1035 
1036 static inline SkPMColor SkBlendLCD16Opaque(int srcR, int srcG, int srcB,
1037  SkPMColor dst, uint16_t mask,
1038  SkPMColor opaqueDst) {
1039  if (mask == 0) {
1040  return dst;
1041  }
1042 
1043  if (0xFFFF == mask) {
1044  return opaqueDst;
1045  }
1046 
1047  /* We want all of these in 5bits, hence the shifts in case one of them
1048  * (green) is 6bits.
1049  */
1050  int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
1051  int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
1052  int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
1053 
1054  // Now upscale them to 0..32, so we can use blend32
1055  maskR = SkUpscale31To32(maskR);
1056  maskG = SkUpscale31To32(maskG);
1057  maskB = SkUpscale31To32(maskB);
1058 
1059  int dstR = SkGetPackedR32(dst);
1060  int dstG = SkGetPackedG32(dst);
1061  int dstB = SkGetPackedB32(dst);
1062 
1063  // LCD blitting is only supported if the dst is known/required
1064  // to be opaque
1065  return SkPackARGB32(0xFF,
1066  SkBlend32(srcR, dstR, maskR),
1067  SkBlend32(srcG, dstG, maskG),
1068  SkBlend32(srcB, dstB, maskB));
1069 }
1070 
1071 static inline void SkBlitLCD16Row(SkPMColor dst[], const uint16_t mask[],
1072  SkColor src, int width, SkPMColor) {
1073  int srcA = SkColorGetA(src);
1074  int srcR = SkColorGetR(src);
1075  int srcG = SkColorGetG(src);
1076  int srcB = SkColorGetB(src);
1077 
1078  srcA = SkAlpha255To256(srcA);
1079 
1080  for (int i = 0; i < width; i++) {
1081  dst[i] = SkBlendLCD16(srcA, srcR, srcG, srcB, dst[i], mask[i]);
1082  }
1083 }
1084 
1085 static inline void SkBlitLCD16OpaqueRow(SkPMColor dst[], const uint16_t mask[],
1086  SkColor src, int width,
1087  SkPMColor opaqueDst) {
1088  int srcR = SkColorGetR(src);
1089  int srcG = SkColorGetG(src);
1090  int srcB = SkColorGetB(src);
1091 
1092  for (int i = 0; i < width; i++) {
1093  dst[i] = SkBlendLCD16Opaque(srcR, srcG, srcB, dst[i], mask[i],
1094  opaqueDst);
1095  }
1096 }
1097 
1098 #endif
unsigned U16CPU
Fast type for unsigned 16 bits.
Definition: SkTypes.h:263
#define SkColorGetG(color)
return the green byte from a SkColor value
Definition: SkColor.h:66
uint32_t SkPMColor
32 bit ARGB color value, premultiplied.
Definition: SkColor.h:147
uint32_t SkColor
32 bit ARGB color value, not premultiplied.
Definition: SkColor.h:28
#define SkColorGetB(color)
return the blue byte from a SkColor value
Definition: SkColor.h:68
#define SkColorGetR(color)
return the red byte from a SkColor value
Definition: SkColor.h:64
#define SkColorSetRGB(r, g, b)
Return a SkColor value from 8 bit component values, with an implied value of 0xFF for alpha (fully op...
Definition: SkColor.h:59
#define SkColorGetA(color)
return the alpha byte from a SkColor value
Definition: SkColor.h:62
unsigned U8CPU
Fast type for unsigned 8 bits.
Definition: SkTypes.h:251
Types and macros for colors.
int S16CPU
Fast type for signed 16 bits.
Definition: SkTypes.h:257