Merge branch 'development' of https://github.com/arendst/Tasmota into development

This commit is contained in:
Theo Arends 2025-10-02 17:32:46 +02:00
commit ee9eabb414
11 changed files with 1637 additions and 2366 deletions

View File

@ -53,7 +53,7 @@ class BeaconAnimation : animation.animation
# Fill background if not transparent
if back_color != 0xFF000000
frame.fill_pixels(back_color)
frame.fill_pixels(frame.pixels, back_color)
end
# Calculate beacon boundaries

View File

@ -59,7 +59,7 @@ class CrenelPositionAnimation : animation.animation
# Fill background if not transparent
if back_color != 0xFF000000
frame.fill_pixels(back_color)
frame.fill_pixels(frame.pixels, back_color)
end
# Ensure we have a meaningful period

View File

@ -0,0 +1,41 @@
/********************************************************************
* Berry class `neopixelbus_ntv`
*
*******************************************************************/
#ifdef USE_BERRY
#include "be_constobj.h"
#ifdef USE_WS2812
#ifdef USE_BERRY_ANIMATION
extern int be_animation_ntv_blend(bvm *vm);
extern int be_animation_ntv_blend_linear(bvm *vm);
extern int be_animation_ntv_blend_pixels(bvm *vm);
extern int be_animation_ntv_gradient_fill(bvm *vm);
extern int be_animation_ntv_blend_color(bvm *vm);
extern int be_animation_ntv_apply_opacity(bvm *vm);
extern int be_animation_ntv_apply_brightness(bvm *vm);
extern int be_animation_ntv_fill_pixels(bvm *vm);
BE_EXPORT_VARIABLE extern const bclass be_class_bytes;
/* @const_object_info_begin
class be_class_FrameBufferNtv (scope: global, name: FrameBufferNtv, strings: weak) {
// the following are on buffers
blend, static_func(be_animation_ntv_blend)
blend_linear, static_func(be_animation_ntv_blend_linear)
blend_pixels, static_func(be_animation_ntv_blend_pixels)
gradient_fill, static_func(be_animation_ntv_gradient_fill)
blend_color, static_func(be_animation_ntv_blend_color)
apply_opacity, static_func(be_animation_ntv_apply_opacity)
apply_brightness, static_func(be_animation_ntv_apply_brightness)
fill_pixels, static_func(be_animation_ntv_fill_pixels)
}
@const_object_info_end */
#include "be_fixed_be_class_FrameBufferNtv.h"
#endif // USE_BERRY_ANIMATION
#endif // USE_WS2812
#endif // USE_BERRY

View File

@ -0,0 +1,610 @@
/*
xdrv_52_3_berry_leds.ino - Berry scripting language, native fucnctions
Copyright (C) 2021 Stephan Hadinger, Berry language by Guan Wenliang https://github.com/Skiars/berry
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef USE_BERRY
#include <berry.h>
#ifdef USE_WS2812
#ifdef USE_BERRY_ANIMATION
extern uint16_t changeUIntScale(uint16_t inum, uint16_t ifrom_min, uint16_t ifrom_max,uint16_t ito_min, uint16_t ito_max);
extern uint32_t ApplyBriGamma(uint32_t color_a /* 0xRRGGBB */, uint32_t bri /* 0..255 */, bool gamma);
extern "C" {
// frame_buffer_ntv.blend(color1:int, color2:int) -> int
// Blend two colors using color2's alpha channel
// color1: destination color (ARGB format - 0xAARRGGBB)
// color2: source color (ARGB format - 0xAARRGGBB)
int32_t be_animation_ntv_blend(bvm *vm);
int32_t be_animation_ntv_blend(bvm *vm) {
uint32_t color1 = be_toint(vm, 1);
uint32_t color2 = be_toint(vm, 2);
// Extract components from color1 (destination)
uint32_t a1 = (color1 >> 24) & 0xFF;
uint32_t r1 = (color1 >> 16) & 0xFF;
uint32_t g1 = (color1 >> 8) & 0xFF;
uint32_t b1 = (color1 ) & 0xFF;
// Extract components from color2 (source)
uint32_t a2 = (color2 >> 24) & 0xFF;
uint32_t r2 = (color2 >> 16) & 0xFF;
uint32_t g2 = (color2 >> 8) & 0xFF;
uint32_t b2 = (color2 ) & 0xFF;
// Fast path: if source is fully transparent, return destination unchanged
if (a2 == 0) {
be_pushint(vm, color1);
be_return(vm);
}
// Blend RGB channels using source alpha
uint8_t r = changeUIntScale(255 - a2, 0, 255, 0, r1) + changeUIntScale(a2, 0, 255, 0, r2);
uint8_t g = changeUIntScale(255 - a2, 0, 255, 0, g1) + changeUIntScale(a2, 0, 255, 0, g2);
uint8_t b = changeUIntScale(255 - a2, 0, 255, 0, b1) + changeUIntScale(a2, 0, 255, 0, b2);
// Blend alpha channels: a = a1 + (255 - a1) * a2 / 255
uint32_t a = a1 + changeUIntScale((255 - a1) * a2, 0, 255 * 255, 0, 255);
// Clamp alpha to valid range
if (a > 255) { a = 255; }
// Combine components into result
uint32_t result = (a << 24) | (r << 16) | (g << 8) | b;
be_pushint(vm, result);
be_return(vm);
}
// frame_buffer_ntv.blend_linear(color1:int, color2:int, alpha:int) -> int
//
int32_t be_animation_ntv_blend_linear(bvm *vm);
int32_t be_animation_ntv_blend_linear(bvm *vm) {
int32_t top = be_top(vm); // Get the number of arguments
// we skip argument type testing since we're in a controlled environment
uint32_t color_a = be_toint(vm, 1);
uint32_t color_b = be_toint(vm, 2);
uint32_t alpha = be_toint(vm, 3);
uint32_t r = (color_a >> 16) & 0xFF;
uint32_t g = (color_a >> 8) & 0xFF;
uint32_t b = (color_a ) & 0xFF;
uint32_t a = (color_a >> 24) & 0xFF;
uint32_t r2 = (color_b >> 16) & 0xFF;
uint32_t g2 = (color_b >> 8) & 0xFF;
uint32_t b2 = (color_b ) & 0xFF;
uint32_t a2 = (color_b >> 24) & 0xFF;
uint8_t r3 = changeUIntScale(alpha, 0, 255, r2, r);
uint8_t g3 = changeUIntScale(alpha, 0, 255, g2, g);
uint8_t b3 = changeUIntScale(alpha, 0, 255, b2, b);
uint8_t a3 = changeUIntScale(alpha, 0, 255, a2, a);
uint32_t rgb = (a3 << 24) | (r3 << 16) | (g3 << 8) | b3;
be_pushint(vm, rgb);
be_return(vm);
}
// frame_buffer_ntv.blend_pixels(dest_bytes:bytes(), src_bytes:bytes(), region_start:int, region_end:int) -> nil
// Blend source buffer into destination buffer using per-pixel alpha
// Standard ARGB convention: alpha 0 = transparent, 255 = opaque
int32_t be_animation_ntv_blend_pixels(bvm *vm);
int32_t be_animation_ntv_blend_pixels(bvm *vm) {
int32_t top = be_top(vm); // Get the number of arguments
size_t dest_len = 0;
uint32_t * dest_buf = (uint32_t*) be_tobytes(vm, 1, &dest_len);
size_t src_len = 0;
const uint32_t * src_buf = (const uint32_t*) be_tobytes(vm, 2, &src_len);
if (dest_buf == NULL || src_buf == NULL) {
be_raise(vm, "argument_error", "needs bytes() arguments");
}
int32_t region_start = 0;
int32_t region_end = -1;
if (top >= 3 && be_isint(vm, 3)) {
region_start = be_toint(vm, 3);
}
if (top >= 4 && be_isint(vm,4)) {
region_end = be_toint(vm, 4);
}
// Calculate pixel counts
size_t dest_pixels = dest_len / 4;
size_t src_pixels = src_len / 4;
// Clamp to smallest of both
if (dest_pixels < src_pixels) { dest_pixels = src_pixels; }
if (src_pixels < dest_pixels) { src_pixels = dest_pixels; }
// Validate region bounds
if (region_start < 0) { region_start += dest_pixels; }
if (region_end < 0) { region_end += dest_pixels; }
if (region_start < 0) { region_start = 0; }
if (region_end < 0) { region_end = 0; }
if (region_start >= dest_pixels) { be_return_nil(vm); }
if (region_end >= dest_pixels) { region_end = dest_pixels - 1; }
if (region_end < region_start) { be_return_nil(vm); }
// Blend pixels in the specified region
for (int32_t i = region_start; i <= region_end; i++) {
uint32_t color2 = src_buf[i];
uint32_t a2 = (color2 >> 24) & 0xFF;
// Only blend if source has some alpha (standard ARGB: 0=transparent, 255=opaque)
if (a2 > 0) {
if (a2 == 255) {
// Fully opaque source, just copy it
dest_buf[i] = color2;
} else {
// Partially transparent, need to blend
uint32_t color1 = dest_buf[i];
// Extract components from color1 (destination)
uint32_t a1 = (color1 >> 24) & 0xFF;
uint32_t r1 = (color1 >> 16) & 0xFF;
uint32_t g1 = (color1 >> 8) & 0xFF;
uint32_t b1 = (color1 ) & 0xFF;
// Extract components from color2 (source) - already have a2
uint32_t r2 = (color2 >> 16) & 0xFF;
uint32_t g2 = (color2 >> 8) & 0xFF;
uint32_t b2 = (color2 ) & 0xFF;
// Blend RGB channels using source alpha
uint8_t r = changeUIntScale(255 - a2, 0, 255, 0, r1) + changeUIntScale(a2, 0, 255, 0, r2);
uint8_t g = changeUIntScale(255 - a2, 0, 255, 0, g1) + changeUIntScale(a2, 0, 255, 0, g2);
uint8_t b = changeUIntScale(255 - a2, 0, 255, 0, b1) + changeUIntScale(a2, 0, 255, 0, b2);
// Blend alpha channels: a = a1 + (255 - a1) * a2 / 255
uint32_t a = a1 + changeUIntScale((255 - a1) * a2, 0, 255 * 255, 0, 255);
if (a > 255) { a = 255; }
// Write blended result
dest_buf[i] = (a << 24) | (r << 16) | (g << 8) | b;
}
}
// If a2 == 0 (fully transparent), leave destination unchanged
}
be_return_nil(vm);
}
// frame_buffer_ntv.gradient_fill(pixels:bytes(), color1:int, color2:int, start_pos:int, end_pos:int) -> nil
// Create a gradient fill in the buffer
int32_t be_animation_ntv_gradient_fill(bvm *vm);
int32_t be_animation_ntv_gradient_fill(bvm *vm) {
int32_t top = be_top(vm); // Get the number of arguments
size_t pixels_len = 0;
uint32_t * pixels_buf = (uint32_t*) be_tobytes(vm, 1, &pixels_len);
if (pixels_buf == NULL) {
be_raise(vm, "argument_error", "needs bytes() argument");
}
uint32_t color1 = be_toint(vm, 2);
uint32_t color2 = be_toint(vm, 3);
int32_t start_pos = 0;
int32_t end_pos = -1;
if (top >= 4 && be_isint(vm, 4)) {
start_pos = be_toint(vm, 4);
}
if (top >= 5 && be_isint(vm, 5)) {
end_pos = be_toint(vm, 5);
}
// Calculate pixel count
size_t width = pixels_len / 4;
// Handle negative indices (Python-style)
if (start_pos < 0) { start_pos += width; }
if (end_pos < 0) { end_pos += width; }
// Clamp to valid range
if (start_pos < 0) { start_pos = 0; }
if (end_pos < 0) { end_pos = 0; }
if (start_pos >= width) { be_return_nil(vm); }
if (end_pos >= width) { end_pos = width - 1; }
if (end_pos < start_pos) { be_return_nil(vm); }
// Set first pixel directly
pixels_buf[start_pos] = color1;
// If only one pixel, we're done
if (start_pos == end_pos) {
be_return_nil(vm);
}
// Set last pixel directly
pixels_buf[end_pos] = color2;
// If only two pixels, we're done
if (end_pos - start_pos <= 1) {
be_return_nil(vm);
}
// Extract components from color1 (ARGB format)
uint32_t a1 = (color1 >> 24) & 0xFF;
uint32_t r1 = (color1 >> 16) & 0xFF;
uint32_t g1 = (color1 >> 8) & 0xFF;
uint32_t b1 = (color1 ) & 0xFF;
// Extract components from color2 (ARGB format)
uint32_t a2 = (color2 >> 24) & 0xFF;
uint32_t r2 = (color2 >> 16) & 0xFF;
uint32_t g2 = (color2 >> 8) & 0xFF;
uint32_t b2 = (color2 ) & 0xFF;
// Calculate the total number of steps
int32_t steps = end_pos - start_pos;
// Fill the gradient for intermediate pixels
for (int32_t i = start_pos + 1; i < end_pos; i++) {
int32_t pos = i - start_pos;
// Linear interpolation using changeUIntScale
uint8_t r = changeUIntScale(pos, 0, steps, r1, r2);
uint8_t g = changeUIntScale(pos, 0, steps, g1, g2);
uint8_t b = changeUIntScale(pos, 0, steps, b1, b2);
uint8_t a = changeUIntScale(pos, 0, steps, a1, a2);
// Combine components into a 32-bit value (ARGB format)
pixels_buf[i] = (a << 24) | (r << 16) | (g << 8) | b;
}
be_return_nil(vm);
}
// frame_buffer_ntv.blend_color(pixels:bytes(), color:int, start_pos:int, end_pos:int) -> nil
// Blend a specific region with a solid color using the color's alpha channel
int32_t be_animation_ntv_blend_color(bvm *vm);
int32_t be_animation_ntv_blend_color(bvm *vm) {
int32_t top = be_top(vm); // Get the number of arguments
size_t pixels_len = 0;
uint32_t * pixels_buf = (uint32_t*) be_tobytes(vm, 1, &pixels_len);
if (pixels_buf == NULL) {
be_raise(vm, "argument_error", "needs bytes() argument");
}
uint32_t color = be_toint(vm, 2);
int32_t start_pos = 0;
int32_t end_pos = -1;
if (top >= 3 && be_isint(vm, 3)) {
start_pos = be_toint(vm, 3);
}
if (top >= 4 && be_isint(vm, 4)) {
end_pos = be_toint(vm, 4);
}
// Calculate pixel count
size_t width = pixels_len / 4;
// Handle negative indices (Python-style)
if (start_pos < 0) { start_pos += width; }
if (end_pos < 0) { end_pos += width; }
// Clamp to valid range
if (start_pos < 0) { start_pos = 0; }
if (end_pos < 0) { end_pos = 0; }
if (start_pos >= width) { be_return_nil(vm); }
if (end_pos >= width) { end_pos = width - 1; }
if (end_pos < start_pos) { be_return_nil(vm); }
// Extract alpha from color
uint32_t a2 = (color >> 24) & 0xFF;
// Only blend if the color has some alpha
if (a2 == 0) {
be_return_nil(vm); // Fully transparent, nothing to do
}
// Extract components from color (source)
uint32_t r2 = (color >> 16) & 0xFF;
uint32_t g2 = (color >> 8) & 0xFF;
uint32_t b2 = (color ) & 0xFF;
// Blend the pixels in the specified region
for (int32_t i = start_pos; i <= end_pos; i++) {
uint32_t color1 = pixels_buf[i];
// Extract components from color1 (destination)
uint32_t a1 = (color1 >> 24) & 0xFF;
uint32_t r1 = (color1 >> 16) & 0xFF;
uint32_t g1 = (color1 >> 8) & 0xFF;
uint32_t b1 = (color1 ) & 0xFF;
// Blend RGB channels using source alpha
uint8_t r = changeUIntScale(255 - a2, 0, 255, 0, r1) + changeUIntScale(a2, 0, 255, 0, r2);
uint8_t g = changeUIntScale(255 - a2, 0, 255, 0, g1) + changeUIntScale(a2, 0, 255, 0, g2);
uint8_t b = changeUIntScale(255 - a2, 0, 255, 0, b1) + changeUIntScale(a2, 0, 255, 0, b2);
// Blend alpha channels: a = a1 + (255 - a1) * a2 / 255
uint32_t a = a1 + changeUIntScale((255 - a1) * a2, 0, 255 * 255, 0, 255);
if (a > 255) { a = 255; }
// Write blended result
pixels_buf[i] = (a << 24) | (r << 16) | (g << 8) | b;
}
be_return_nil(vm);
}
// frame_buffer_ntv.apply_opacity(pixels:bytes(), opacity:int|bytes(), start_pos:int, end_pos:int) -> nil
// Apply an opacity adjustment to a region of the buffer
// opacity can be an int (0-511) or bytes() buffer for mask mode
int32_t be_animation_ntv_apply_opacity(bvm *vm);
int32_t be_animation_ntv_apply_opacity(bvm *vm) {
int32_t top = be_top(vm); // Get the number of arguments
size_t pixels_len = 0;
uint32_t * pixels_buf = (uint32_t*) be_tobytes(vm, 1, &pixels_len);
if (pixels_buf == NULL) {
be_raise(vm, "argument_error", "needs bytes() argument");
}
int32_t start_pos = 0;
int32_t end_pos = -1;
if (top >= 3 && be_isint(vm, 3)) {
start_pos = be_toint(vm, 3);
}
if (top >= 4 && be_isint(vm, 4)) {
end_pos = be_toint(vm, 4);
}
// Calculate pixel count
size_t width = pixels_len / 4;
// Handle negative indices (Python-style)
if (start_pos < 0) { start_pos += width; }
if (end_pos < 0) { end_pos += width; }
// Clamp to valid range
if (start_pos < 0) { start_pos = 0; }
if (end_pos < 0) { end_pos = 0; }
if (start_pos >= width) { be_return_nil(vm); }
if (end_pos >= width) { end_pos = width - 1; }
if (end_pos < start_pos) { be_return_nil(vm); }
// Check if opacity is bytes (mask mode) or int (value mode)
if (top >= 2 && be_isbytes(vm, 2)) {
// Mask mode: use another buffer as opacity mask
size_t mask_len = 0;
const uint32_t * mask_buf = (const uint32_t*) be_tobytes(vm, 2, &mask_len);
if (mask_buf == NULL) {
be_raise(vm, "argument_error", "mask needs bytes() argument");
}
size_t mask_width = mask_len / 4;
// Validate mask size - use smaller width
if (mask_width < width) { width = mask_width; }
if (end_pos >= width) { end_pos = width - 1; }
// Apply mask opacity
for (int32_t i = start_pos; i <= end_pos; i++) {
uint32_t color = pixels_buf[i];
uint32_t mask_color = mask_buf[i];
// Extract alpha from mask as opacity factor (0-255)
uint32_t mask_opacity = (mask_color >> 24) & 0xFF;
// Extract components from color (ARGB format)
uint32_t a = (color >> 24) & 0xFF;
uint32_t r = (color >> 16) & 0xFF;
uint32_t g = (color >> 8) & 0xFF;
uint32_t b = (color ) & 0xFF;
// Apply mask opacity to alpha channel
a = changeUIntScale(mask_opacity, 0, 255, 0, a);
// Write result
pixels_buf[i] = (a << 24) | (r << 16) | (g << 8) | b;
}
} else {
// Number mode: uniform opacity adjustment
int32_t opacity_value = 255;
if (top >= 2 && be_isint(vm, 2)) {
opacity_value = be_toint(vm, 2);
}
// Ensure opacity is in valid range (0-511)
if (opacity_value < 0) { opacity_value = 0; }
if (opacity_value > 511) { opacity_value = 511; }
// Apply opacity adjustment
for (int32_t i = start_pos; i <= end_pos; i++) {
uint32_t color = pixels_buf[i];
// Extract components (ARGB format)
uint32_t a = (color >> 24) & 0xFF;
uint32_t r = (color >> 16) & 0xFF;
uint32_t g = (color >> 8) & 0xFF;
uint32_t b = (color ) & 0xFF;
// Adjust alpha
// For opacity 0-255: scale down alpha
// For opacity 256-511: scale up alpha (but cap at 255)
if (opacity_value <= 255) {
a = changeUIntScale(opacity_value, 0, 255, 0, a);
} else {
// Scale up alpha: map 256-511 to 1.0-2.0 multiplier
a = a + changeUIntScale(a * (opacity_value - 255), 0, 255 * 256, 0, 255);
if (a > 255) { a = 255; } // Cap at maximum alpha
}
// Write result
pixels_buf[i] = (a << 24) | (r << 16) | (g << 8) | b;
}
}
be_return_nil(vm);
}
// frame_buffer_ntv.apply_brightness(pixels:bytes(), brightness:int|bytes(), start_pos:int, end_pos:int) -> nil
// Apply a brightness adjustment to a region of the buffer
// brightness can be an int (0-511) or bytes() buffer for mask mode
int32_t be_animation_ntv_apply_brightness(bvm *vm);
int32_t be_animation_ntv_apply_brightness(bvm *vm) {
int32_t top = be_top(vm); // Get the number of arguments
size_t pixels_len = 0;
uint32_t * pixels_buf = (uint32_t*) be_tobytes(vm, 1, &pixels_len);
if (pixels_buf == NULL) {
be_raise(vm, "argument_error", "needs bytes() argument");
}
int32_t start_pos = 0;
int32_t end_pos = -1;
if (top >= 3 && be_isint(vm, 3)) {
start_pos = be_toint(vm, 3);
}
if (top >= 4 && be_isint(vm, 4)) {
end_pos = be_toint(vm, 4);
}
// Calculate pixel count
size_t width = pixels_len / 4;
// Handle negative indices (Python-style)
if (start_pos < 0) { start_pos += width; }
if (end_pos < 0) { end_pos += width; }
// Clamp to valid range
if (start_pos < 0) { start_pos = 0; }
if (end_pos < 0) { end_pos = 0; }
if (start_pos >= width) { be_return_nil(vm); }
if (end_pos >= width) { end_pos = width - 1; }
if (end_pos < start_pos) { be_return_nil(vm); }
// Check if brightness is bytes (mask mode) or int (value mode)
if (top >= 2 && be_isbytes(vm, 2)) {
// Mask mode: use another buffer as brightness mask
size_t mask_len = 0;
const uint32_t * mask_buf = (const uint32_t*) be_tobytes(vm, 2, &mask_len);
size_t mask_width = mask_len / 4;
// Validate mask size - use smaller width
if (mask_width < width) { width = mask_width; }
if (end_pos >= width) { end_pos = width - 1; }
// Apply mask brightness
for (int32_t i = start_pos; i <= end_pos; i++) {
uint32_t color = pixels_buf[i];
uint32_t mask_color = mask_buf[i];
// Extract alpha from mask as brightness factor (0-255)
uint32_t mask_brightness = (mask_color >> 24) & 0xFF;
// Extract components from color (ARGB format)
uint32_t a = (color >> 24) & 0xFF;
uint32_t r = (color >> 16) & 0xFF;
uint32_t g = (color >> 8) & 0xFF;
uint32_t b = (color ) & 0xFF;
// Apply mask brightness to RGB channels
r = changeUIntScale(mask_brightness, 0, 255, 0, r);
g = changeUIntScale(mask_brightness, 0, 255, 0, g);
b = changeUIntScale(mask_brightness, 0, 255, 0, b);
// Write result
pixels_buf[i] = (a << 24) | (r << 16) | (g << 8) | b;
}
} else {
// Number mode: uniform brightness adjustment
int32_t brightness_value = 255;
if (top >= 2 && be_isint(vm, 2)) {
brightness_value = be_toint(vm, 2);
}
// Ensure brightness is in valid range (0-511)
if (brightness_value < 0) { brightness_value = 0; }
if (brightness_value > 511) { brightness_value = 511; }
// Apply brightness adjustment
for (int32_t i = start_pos; i <= end_pos; i++) {
uint32_t color = pixels_buf[i];
// Extract components (ARGB format)
uint32_t a = (color >> 24) & 0xFF;
uint32_t r = (color >> 16) & 0xFF;
uint32_t g = (color >> 8) & 0xFF;
uint32_t b = (color ) & 0xFF;
// Adjust brightness
// For brightness 0-255: scale down RGB
// For brightness 256-511: scale up RGB (but cap at 255)
if (brightness_value <= 255) {
r = changeUIntScale(r, 0, 255, 0, brightness_value);
g = changeUIntScale(g, 0, 255, 0, brightness_value);
b = changeUIntScale(b, 0, 255, 0, brightness_value);
} else {
// Scale up RGB: map 256-511 to 1.0-2.0 multiplier
int32_t multiplier = brightness_value - 255; // 0-256 range
r = r + changeUIntScale(r * multiplier, 0, 255 * 256, 0, 255);
g = g + changeUIntScale(g * multiplier, 0, 255 * 256, 0, 255);
b = b + changeUIntScale(b * multiplier, 0, 255 * 256, 0, 255);
if (r > 255) { r = 255; } // Cap at maximum
if (g > 255) { g = 255; }
if (b > 255) { b = 255; }
}
// Write result
pixels_buf[i] = (a << 24) | (r << 16) | (g << 8) | b;
}
}
be_return_nil(vm);
}
// frame_buffer_ntv.fill_pixels(pixels:bytes(), color:int, start_pos:int, end_pos:int) -> nil
// Fill a region of the buffer with a specific color
int32_t be_animation_ntv_fill_pixels(bvm *vm);
int32_t be_animation_ntv_fill_pixels(bvm *vm) {
int32_t top = be_top(vm); // Get the number of arguments
size_t pixels_len = 0;
uint32_t * pixels_buf = (uint32_t*) be_tobytes(vm, 1, &pixels_len);
if (pixels_buf == NULL) {
be_raise(vm, "argument_error", "needs bytes() argument");
}
uint32_t color = be_toint(vm, 2);
int32_t start_pos = 0;
int32_t end_pos = -1;
if (top >= 3 && be_isint(vm, 3)) {
start_pos = be_toint(vm, 3);
}
if (top >= 4 && be_isint(vm, 4)) {
end_pos = be_toint(vm, 4);
}
// Calculate pixel count
size_t width = pixels_len / 4;
// Handle negative indices (Python-style)
if (start_pos < 0) { start_pos += width; }
if (end_pos < 0) { end_pos += width; }
// Clamp to valid range
if (start_pos < 0) { start_pos = 0; }
if (end_pos < 0) { end_pos = 0; }
if (start_pos >= width) { be_return_nil(vm); }
if (end_pos >= width) { end_pos = width - 1; }
if (end_pos < start_pos) { be_return_nil(vm); }
// Fill the region with the color
for (int32_t i = start_pos; i <= end_pos; i++) {
pixels_buf[i] = color;
}
be_return_nil(vm);
}
}
#endif // USE_BERRY_ANIMATION
#endif // USE_WS2812
#endif // USE_BERRY

View File

@ -92,7 +92,7 @@ class Animation : animation.parameterized_object
# Fill the entire frame with the current color if not transparent
if (current_color != 0x00000000)
frame.fill_pixels(current_color)
frame.fill_pixels(frame.pixels, current_color)
end
return true
@ -138,10 +138,10 @@ class Animation : animation.parameterized_object
opacity_animation.render(self.opacity_frame, time_ms)
# Use rendered frame buffer as opacity mask
frame.apply_opacity(self.opacity_frame)
frame.apply_opacity(frame.pixels, self.opacity_frame.pixels)
elif type(opacity) == 'int' && opacity < 255
# Number mode: apply uniform opacity
frame.apply_opacity(opacity)
frame.apply_opacity(frame.pixels, opacity)
end
# If opacity is 255 (full opacity), do nothing
end

View File

@ -302,7 +302,7 @@ class AnimationEngine
if rendered
anim.post_render(self.temp_buffer, time_ms)
# Blend temp buffer into main buffer
self.frame_buffer.blend_pixels(self.temp_buffer)
self.frame_buffer.blend_pixels(self.frame_buffer.pixels, self.temp_buffer.pixels)
end
i += 1
end

View File

@ -12,7 +12,12 @@
#
# The class is optimized for performance and minimal memory usage.
class FrameBuffer
# Special import for FrameBufferNtv that is pure Berry but will be replaced
# by native code in Tasmota, so we don't register to 'animation' module
# so that it is not solidified
import "./core/frame_buffer_ntv" as FrameBufferNtv
class FrameBuffer : FrameBufferNtv
var pixels # Pixel data (bytes object)
var width # Number of pixels
@ -65,7 +70,9 @@ class FrameBuffer
# Clear the frame buffer (set all pixels to transparent black)
def clear()
self.pixels.clear() # clear buffer
self.pixels.resize(self.width * 4) # resize to full size filled with transparent black (all zeroes)
if (size(self.pixels) != self.width * 4)
self.pixels.resize(self.width * 4) # resize to full size filled with transparent black (all zeroes)
end
end
# Resize the frame buffer to a new width
@ -86,176 +93,27 @@ class FrameBuffer
self.clear()
end
# Convert separate a, r, g, b components to a 32-bit color value
# r: red component (0-255)
# g: green component (0-255)
# b: blue component (0-255)
# a: alpha component (0-255, default 255 = fully opaque)
# Returns: 32-bit color value in ARGB format (0xAARRGGBB)
static def to_color(r, g, b, a)
# Default alpha to fully opaque if not specified
if a == nil
a = 255
end
# # Convert separate a, r, g, b components to a 32-bit color value
# # r: red component (0-255)
# # g: green component (0-255)
# # b: blue component (0-255)
# # a: alpha component (0-255, default 255 = fully opaque)
# # Returns: 32-bit color value in ARGB format (0xAARRGGBB)
# static def to_color(r, g, b, a)
# # Default alpha to fully opaque if not specified
# if a == nil
# a = 255
# end
# Ensure values are in valid range
r = r & 0xFF
g = g & 0xFF
b = b & 0xFF
a = a & 0xFF
# # Ensure values are in valid range
# r = r & 0xFF
# g = g & 0xFF
# b = b & 0xFF
# a = a & 0xFF
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
return (a << 24) | (r << 16) | (g << 8) | b
end
# Fill the frame buffer with a specific color using a bytes object
# This is an optimization for filling with a pre-computed color
def fill_pixels(color)
var i = 0
while i < self.width
self.pixels.set(i * 4, color, 4)
i += 1
end
end
# Blend two colors using their alpha channels
# Returns the blended color as a 32-bit integer (ARGB format - 0xAARRGGBB)
# color1: destination color (ARGB format - 0xAARRGGBB)
# color2: source color (ARGB format - 0xAARRGGBB)
def blend(color1, color2)
# Extract components from color1 (ARGB format - 0xAARRGGBB)
var a1 = (color1 >> 24) & 0xFF
var r1 = (color1 >> 16) & 0xFF
var g1 = (color1 >> 8) & 0xFF
var b1 = color1 & 0xFF
# Extract components from color2 (ARGB format - 0xAARRGGBB)
var a2 = (color2 >> 24) & 0xFF
var r2 = (color2 >> 16) & 0xFF
var g2 = (color2 >> 8) & 0xFF
var b2 = color2 & 0xFF
# Fast path for common cases
if a2 == 0
# Source is fully transparent, no blending needed
return color1
end
# Use the source alpha directly for blending
var effective_opacity = a2
# Normal alpha blending
# Use tasmota.scale_uint for ratio conversion instead of integer arithmetic
var r = tasmota.scale_uint(255 - effective_opacity, 0, 255, 0, r1) + tasmota.scale_uint(effective_opacity, 0, 255, 0, r2)
var g = tasmota.scale_uint(255 - effective_opacity, 0, 255, 0, g1) + tasmota.scale_uint(effective_opacity, 0, 255, 0, g2)
var b = tasmota.scale_uint(255 - effective_opacity, 0, 255, 0, b1) + tasmota.scale_uint(effective_opacity, 0, 255, 0, b2)
# More accurate alpha blending using tasmota.scale_uint
var a = a1 + tasmota.scale_uint((255 - a1) * a2, 0, 255 * 255, 0, 255)
# Ensure values are in valid range
r = r < 0 ? 0 : (r > 255 ? 255 : r)
g = g < 0 ? 0 : (g > 255 ? 255 : g)
b = b < 0 ? 0 : (b > 255 ? 255 : b)
a = a < 0 ? 0 : (a > 255 ? 255 : a)
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
return (int(a) << 24) | (int(r) << 16) | (int(g) << 8) | int(b)
end
# Linear interpolation between two colors using explicit blend factor
# Returns the blended color as a 32-bit integer (ARGB format - 0xAARRGGBB)
#
# This function matches the original berry_animate frame.blend(color1, color2, blend_factor) behavior
# Used for creating smooth gradients like beacon slew regions
#
# color1: destination/background color (ARGB format - 0xAARRGGBB)
# color2: source/foreground color (ARGB format - 0xAARRGGBB)
# blend_factor: blend factor (0-255 integer)
# - 0 = full color2 (foreground)
# - 255 = full color1 (background)
def blend_linear(color1, color2, blend_factor)
# Extract components from color1 (background/destination)
var back_a = (color1 >> 24) & 0xFF
var back_r = (color1 >> 16) & 0xFF
var back_g = (color1 >> 8) & 0xFF
var back_b = color1 & 0xFF
# Extract components from color2 (foreground/source)
var fore_a = (color2 >> 24) & 0xFF
var fore_r = (color2 >> 16) & 0xFF
var fore_g = (color2 >> 8) & 0xFF
var fore_b = color2 & 0xFF
# Linear interpolation: result = fore + (back - fore) * blend_factor / 255
var result_a = fore_a + (back_a - fore_a) * blend_factor / 255
var result_r = fore_r + (back_r - fore_r) * blend_factor / 255
var result_g = fore_g + (back_g - fore_g) * blend_factor / 255
var result_b = fore_b + (back_b - fore_b) * blend_factor / 255
# Ensure values are in valid range
result_a = result_a < 0 ? 0 : (result_a > 255 ? 255 : result_a)
result_r = result_r < 0 ? 0 : (result_r > 255 ? 255 : result_r)
result_g = result_g < 0 ? 0 : (result_g > 255 ? 255 : result_g)
result_b = result_b < 0 ? 0 : (result_b > 255 ? 255 : result_b)
# Combine components into a 32-bit value (ARGB format)
return (int(result_a) << 24) | (int(result_r) << 16) | (int(result_g) << 8) | int(result_b)
end
# Blend this frame buffer with another frame buffer using per-pixel alpha
# other_buffer: the other frame buffer to blend with
# region_start: start index for blending (default: 0)
# region_end: end index for blending (default: width-1)
def blend_pixels(other_buffer, region_start, region_end)
# Default parameters
if region_start == nil
region_start = 0
end
if region_end == nil
region_end = self.width - 1
end
# Validate parameters
if self.width != other_buffer.width
raise "value_error", "frame buffers must have the same width"
end
if region_start < 0 || region_start >= self.width
raise "index_error", "region_start out of range"
end
if region_end < region_start || region_end >= self.width
raise "index_error", "region_end out of range"
end
# Blend each pixel using the blend function
var i = region_start
while i <= region_end
var color2 = other_buffer.get_pixel_color(i)
var a2 = (color2 >> 24) & 0xFF
# Only blend if the source pixel has some alpha
if a2 > 0
if a2 == 255
# Fully opaque source pixel, just copy it
self.pixels.set(i * 4, color2, 4)
else
# Partially transparent source pixel, need to blend
var color1 = self.get_pixel_color(i)
var blended = self.blend(color1, color2)
self.pixels.set(i * 4, blended, 4)
end
end
i += 1
end
end
# # Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
# return (a << 24) | (r << 16) | (g << 8) | b
# end
# Convert the frame buffer to a hexadecimal string (for debugging)
def tohex()
@ -273,395 +131,15 @@ class FrameBuffer
self.set_pixel_color(i, v)
end
# Create a gradient fill in the frame buffer
# color1: start color (ARGB format - 0xAARRGGBB)
# color2: end color (ARGB format - 0xAARRGGBB)
# start_pos: start position (default: 0)
# end_pos: end position (default: width-1)
def gradient_fill(color1, color2, start_pos, end_pos)
if start_pos == nil
start_pos = 0
end
if end_pos == nil
end_pos = self.width - 1
end
# Validate parameters
if start_pos < 0 || start_pos >= self.width
raise "index_error", "start_pos out of range"
end
if end_pos < start_pos || end_pos >= self.width
raise "index_error", "end_pos out of range"
end
# Set first pixel directly
self.set_pixel_color(start_pos, color1)
# If only one pixel, we're done
if start_pos == end_pos
return
end
# Set last pixel directly
self.set_pixel_color(end_pos, color2)
# If only two pixels, we're done
if end_pos - start_pos <= 1
return
end
# Extract components from color1 (ARGB format - 0xAARRGGBB)
var a1 = (color1 >> 24) & 0xFF
var r1 = (color1 >> 16) & 0xFF
var g1 = (color1 >> 8) & 0xFF
var b1 = color1 & 0xFF
# Extract components from color2 (ARGB format - 0xAARRGGBB)
var a2 = (color2 >> 24) & 0xFF
var r2 = (color2 >> 16) & 0xFF
var g2 = (color2 >> 8) & 0xFF
var b2 = color2 & 0xFF
# Calculate the total number of steps
var steps = end_pos - start_pos
# Fill the gradient for intermediate pixels
var i = start_pos + 1
while (i < end_pos)
var pos = i - start_pos
# Use tasmota.scale_uint for ratio conversion instead of floating point arithmetic
var r = tasmota.scale_uint(pos, 0, steps, r1, r2)
var g = tasmota.scale_uint(pos, 0, steps, g1, g2)
var b = tasmota.scale_uint(pos, 0, steps, b1, b2)
var a = tasmota.scale_uint(pos, 0, steps, a1, a2)
# Ensure values are in valid range
r = r < 0 ? 0 : (r > 255 ? 255 : r)
g = g < 0 ? 0 : (g > 255 ? 255 : g)
b = b < 0 ? 0 : (b > 255 ? 255 : b)
a = a < 0 ? 0 : (a > 255 ? 255 : a)
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
var color = (a << 24) | (r << 16) | (g << 8) | b
self.set_pixel_color(i, color)
i += 1
end
end
# Apply a mask to this frame buffer
# mask_buffer: the mask frame buffer (alpha channel is used as mask)
# invert: if true, invert the mask (default: false)
def apply_mask(mask_buffer, invert)
if invert == nil
invert = false
end
if self.width != mask_buffer.width
raise "value_error", "frame buffers must have the same width"
end
var i = 0
while i < self.width
var color = self.get_pixel_color(i)
var mask_color = mask_buffer.get_pixel_color(i)
# Extract alpha from mask (0-255)
var mask_alpha = (mask_color >> 24) & 0xFF
# Invert mask if requested
if invert
mask_alpha = 255 - mask_alpha
end
# Extract components from color (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
# Apply mask to alpha channel using tasmota.scale_uint
a = tasmota.scale_uint(mask_alpha, 0, 255, 0, a)
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
var new_color = (a << 24) | (r << 16) | (g << 8) | b
# Update the pixel
self.set_pixel_color(i, new_color)
i += 1
end
end
# Create a copy of this frame buffer
def copy()
return animation.frame_buffer(self) # return using the self copying constructor
end
# Blend a specific region with a solid color using the color's alpha channel
# color: the color to blend (ARGB)
# start_pos: start position (default: 0)
# end_pos: end position (default: width-1)
def blend_color(color, start_pos, end_pos)
if start_pos == nil
start_pos = 0
end
if end_pos == nil
end_pos = self.width - 1
end
# Validate parameters
if start_pos < 0 || start_pos >= self.width
raise "index_error", "start_pos out of range"
end
if end_pos < start_pos || end_pos >= self.width
raise "index_error", "end_pos out of range"
end
# Extract components from color (ARGB format - 0xAARRGGBB)
var a2 = (color >> 24) & 0xFF
var r2 = (color >> 16) & 0xFF
var g2 = (color >> 8) & 0xFF
var b2 = color & 0xFF
# Blend the pixels in the specified region
var i = start_pos
while i <= end_pos
var color1 = self.get_pixel_color(i)
# Only blend if the color has some alpha
if a2 > 0
var blended = self.blend(color1, color)
self.pixels.set(i * 4, blended, 4)
end
i += 1
end
end
# Apply an opacity adjustment to the frame buffer
# opacity: opacity factor (0-511) or another FrameBuffer to use as mask
# - Number: 0 is fully transparent, 255 is original, 511 is maximum opaque
# - FrameBuffer: uses alpha channel as opacity mask
def apply_opacity(opacity)
if opacity == nil
opacity = 255
end
# Check if opacity is a FrameBuffer (mask mode)
if isinstance(opacity, animation.frame_buffer)
# Mask mode: use another frame buffer as opacity mask
var mask_buffer = opacity
if self.width != mask_buffer.width
raise "value_error", "frame buffers must have the same width"
end
var i = 0
while i < self.width
var color = self.get_pixel_color(i)
var mask_color = mask_buffer.get_pixel_color(i)
# Extract alpha from mask as opacity factor (0-255)
var mask_opacity = (mask_color >> 24) & 0xFF
# Extract components from color (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
# Apply mask opacity to alpha channel using tasmota.scale_uint
a = tasmota.scale_uint(mask_opacity, 0, 255, 0, a)
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
var new_color = (a << 24) | (r << 16) | (g << 8) | b
# Update the pixel
self.set_pixel_color(i, new_color)
i += 1
end
else
# Number mode: uniform opacity adjustment
var opacity_value = int(opacity)
# Ensure opacity is in valid range (0-511)
opacity_value = opacity_value < 0 ? 0 : (opacity_value > 511 ? 511 : opacity_value)
# Apply opacity adjustment
var i = 0
while i < self.width
var color = self.get_pixel_color(i)
# Extract components (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
# Adjust alpha using tasmota.scale_uint
# For opacity 0-255: scale down alpha
# For opacity 256-511: scale up alpha (but cap at 255)
if opacity_value <= 255
a = tasmota.scale_uint(opacity_value, 0, 255, 0, a)
else
# Scale up alpha: map 256-511 to 1.0-2.0 multiplier
a = tasmota.scale_uint(a * opacity_value, 0, 255 * 255, 0, 255)
a = a > 255 ? 255 : a # Cap at maximum alpha
end
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
color = (a << 24) | (r << 16) | (g << 8) | b
# Update the pixel
self.set_pixel_color(i, color)
i += 1
end
end
end
# Apply a brightness adjustment to the frame buffer
# brightness: brightness factor (0-511) or another FrameBuffer to use as mask
# - Number: 0 is black, 255 is original, 511 is maximum bright
# - FrameBuffer: uses alpha channel as brightness mask
# start_pos: start position (default: 0)
# end_pos: end position (default: width-1)
def apply_brightness(brightness, start_pos, end_pos)
if brightness == nil
brightness = 255
end
if start_pos == nil
start_pos = 0
end
if end_pos == nil
end_pos = self.width - 1
end
# Validate parameters
if start_pos < 0 || start_pos >= self.width
raise "index_error", "start_pos out of range"
end
if end_pos < start_pos || end_pos >= self.width
raise "index_error", "end_pos out of range"
end
# Check if brightness is a FrameBuffer (mask mode)
if isinstance(brightness, animation.frame_buffer)
# Mask mode: use another frame buffer as brightness mask
var mask_buffer = brightness
if self.width != mask_buffer.width
raise "value_error", "frame buffers must have the same width"
end
var i = start_pos
while i <= end_pos
var color = self.get_pixel_color(i)
var mask_color = mask_buffer.get_pixel_color(i)
# Extract alpha from mask as brightness factor (0-255)
var mask_brightness = (mask_color >> 24) & 0xFF
# Extract components from color (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
# Apply mask brightness to RGB channels using tasmota.scale_uint
r = tasmota.scale_uint(mask_brightness, 0, 255, 0, r)
g = tasmota.scale_uint(mask_brightness, 0, 255, 0, g)
b = tasmota.scale_uint(mask_brightness, 0, 255, 0, b)
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
var new_color = (a << 24) | (r << 16) | (g << 8) | b
# Update the pixel
self.set_pixel_color(i, new_color)
i += 1
end
else
# Number mode: uniform brightness adjustment
var brightness_value = int(brightness)
# Ensure brightness is in valid range (0-511)
brightness_value = brightness_value < 0 ? 0 : (brightness_value > 511 ? 511 : brightness_value)
# Apply brightness adjustment
var i = start_pos
while i <= end_pos
var color = self.get_pixel_color(i)
# Extract components (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
# Adjust brightness using tasmota.scale_uint
# For brightness 0-255: scale down RGB
# For brightness 256-511: scale up RGB (but cap at 255)
if brightness_value <= 255
r = tasmota.scale_uint(r, 0, 255, 0, brightness_value)
g = tasmota.scale_uint(g, 0, 255, 0, brightness_value)
b = tasmota.scale_uint(b, 0, 255, 0, brightness_value)
else
# Scale up RGB: map 256-511 to 1.0-2.0 multiplier
var multiplier = brightness_value - 255 # 0-256 range
r = r + tasmota.scale_uint(r * multiplier, 0, 255 * 256, 0, 255)
g = g + tasmota.scale_uint(g * multiplier, 0, 255 * 256, 0, 255)
b = b + tasmota.scale_uint(b * multiplier, 0, 255 * 256, 0, 255)
r = r > 255 ? 255 : r # Cap at maximum
g = g > 255 ? 255 : g # Cap at maximum
b = b > 255 ? 255 : b # Cap at maximum
end
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
color = (a << 24) | (r << 16) | (g << 8) | b
# Update the pixel
self.set_pixel_color(i, color)
i += 1
end
end
end
# String representation of the frame buffer
def tostring()
return f"FrameBuffer(width={self.width}, pixels={self.pixels})"
end
# Dump the pixels into AARRGGBB string separated with '|'
def dump()
var s = ""
var i = 0
while i < self.width
var color = self.get_pixel_color(i)
# Extract components from color (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
s += f"{a:02X}{r:02X}{g:02X}{b:02X}|"
i += 1
end
s = s[0..-2] # remove last character
return s
end
end
return {'frame_buffer': FrameBuffer}

View File

@ -0,0 +1,517 @@
# FrameBuffeNtv class for Berry Animation Framework
#
# This class provides a place-holder for native implementation of some
# static methods.
#
# Below is a pure Berry implementation for emulator, while it is replaced
# by C++ code in Tasmota devices
class FrameBufferNtv
# Blend two colors using their alpha channels
# Returns the blended color as a 32-bit integer (ARGB format - 0xAARRGGBB)
# color1: destination color (ARGB format - 0xAARRGGBB)
# color2: source color (ARGB format - 0xAARRGGBB)
static def blend(color1, color2)
# Extract components from color1 (ARGB format - 0xAARRGGBB)
var a1 = (color1 >> 24) & 0xFF
var r1 = (color1 >> 16) & 0xFF
var g1 = (color1 >> 8) & 0xFF
var b1 = color1 & 0xFF
# Extract components from color2 (ARGB format - 0xAARRGGBB)
var a2 = (color2 >> 24) & 0xFF
var r2 = (color2 >> 16) & 0xFF
var g2 = (color2 >> 8) & 0xFF
var b2 = color2 & 0xFF
# Fast path for common cases
if a2 == 0
# Source is fully transparent, no blending needed
return color1
end
# Use the source alpha directly for blending
var effective_opacity = a2
# Normal alpha blending
# Use tasmota.scale_uint for ratio conversion instead of integer arithmetic
var r = tasmota.scale_uint(255 - effective_opacity, 0, 255, 0, r1) + tasmota.scale_uint(effective_opacity, 0, 255, 0, r2)
var g = tasmota.scale_uint(255 - effective_opacity, 0, 255, 0, g1) + tasmota.scale_uint(effective_opacity, 0, 255, 0, g2)
var b = tasmota.scale_uint(255 - effective_opacity, 0, 255, 0, b1) + tasmota.scale_uint(effective_opacity, 0, 255, 0, b2)
# More accurate alpha blending using tasmota.scale_uint
var a = a1 + tasmota.scale_uint((255 - a1) * a2, 0, 255 * 255, 0, 255)
# Ensure values are in valid range
r = r < 0 ? 0 : (r > 255 ? 255 : r)
g = g < 0 ? 0 : (g > 255 ? 255 : g)
b = b < 0 ? 0 : (b > 255 ? 255 : b)
a = a < 0 ? 0 : (a > 255 ? 255 : a)
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
return (int(a) << 24) | (int(r) << 16) | (int(g) << 8) | int(b)
end
# Linear interpolation between two colors using explicit blend factor
# Returns the blended color as a 32-bit integer (ARGB format - 0xAARRGGBB)
#
# This function matches the original berry_animate frame.blend(color1, color2, blend_factor) behavior
# Used for creating smooth gradients like beacon slew regions
#
# color1: destination/background color (ARGB format - 0xAARRGGBB)
# color2: source/foreground color (ARGB format - 0xAARRGGBB)
# blend_factor: blend factor (0-255 integer)
# - 0 = full color2 (foreground)
# - 255 = full color1 (background)
static def blend_linear(color1, color2, blend_factor)
# Extract components from color1 (background/destination)
var back_a = (color1 >> 24) & 0xFF
var back_r = (color1 >> 16) & 0xFF
var back_g = (color1 >> 8) & 0xFF
var back_b = color1 & 0xFF
# Extract components from color2 (foreground/source)
var fore_a = (color2 >> 24) & 0xFF
var fore_r = (color2 >> 16) & 0xFF
var fore_g = (color2 >> 8) & 0xFF
var fore_b = color2 & 0xFF
# Linear interpolation using tasmota.scale_uint instead of integer mul/div
# Maps blend_factor (0-255) to interpolate between fore and back colors
var result_a = tasmota.scale_uint(blend_factor, 0, 255, fore_a, back_a)
var result_r = tasmota.scale_uint(blend_factor, 0, 255, fore_r, back_r)
var result_g = tasmota.scale_uint(blend_factor, 0, 255, fore_g, back_g)
var result_b = tasmota.scale_uint(blend_factor, 0, 255, fore_b, back_b)
# Combine components into a 32-bit value (ARGB format)
return (int(result_a) << 24) | (int(result_r) << 16) | (int(result_g) << 8) | int(result_b)
end
# Fill a region of the buffer with a specific color
# pixels: destination bytes buffer
# color: the color to fill (ARGB format - 0xAARRGGBB)
# start_pos: start position (default: 0)
# end_pos: end position (default: -1 = last pixel)
static def fill_pixels(pixels, color, start_pos, end_pos)
# Default parameters
if (start_pos == nil) start_pos = 0 end
if (end_pos == nil) end_pos = -1 end
# Validate region bounds
var width = size(pixels) / 4
# Handle negative indices (Python-style)
if (start_pos < 0) start_pos += width end
if (end_pos < 0) end_pos += width end
# Clamp to valid range
if (start_pos < 0) start_pos = 0 end
if (end_pos < 0) end_pos = 0 end
if (start_pos >= width) return end
if (end_pos >= width) end_pos = width - 1 end
if (end_pos < start_pos) return end
# Fill the region with the color
var i = start_pos
while i <= end_pos
pixels.set(i * 4, color, 4)
i += 1
end
end
# Blend destination buffer with source buffer using per-pixel alpha
# dest_pixels: destination bytes buffer
# src_pixels: source bytes buffer
# region_start: start index for blending
# region_end: end index for blending
static def blend_pixels(dest_pixels, src_pixels, region_start, region_end)
# Default parameters
if (region_start == nil) region_start = 0 end
if (region_end == nil) region_end = -1 end
# Validate region bounds
var dest_width = size(dest_pixels) / 4
var src_width = size(src_pixels) / 4
if (dest_width < src_width) dest_width = src_width end
if (src_width < dest_width) src_width = dest_width end
if (region_start < 0) region_start += dest_width end
if (region_end < 0) region_end += dest_width end
if (region_start < 0) region_start = 0 end
if (region_end < 0)region_end = 0 end
if (region_start >= dest_width) return end
if (region_end >= dest_width) region_end = dest_width - 1 end
if (region_end < region_start) return end
# Blend each pixel using the blend function
var i = region_start
while i <= region_end
var color2 = src_pixels.get(i * 4, 4)
var a2 = (color2 >> 24) & 0xFF
# Only blend if the source pixel has some alpha
if a2 > 0
if a2 == 255
# Fully opaque source pixel, just copy it
dest_pixels.set(i * 4, color2, 4)
else
# Partially transparent source pixel, need to blend
var color1 = dest_pixels.get(i * 4, 4)
var blended = _class.blend(color1, color2)
dest_pixels.set(i * 4, blended, 4)
end
end
i += 1
end
end
# Create a gradient fill in the buffer
# pixels: destination bytes buffer
# color1: start color (ARGB format - 0xAARRGGBB)
# color2: end color (ARGB format - 0xAARRGGBB)
# start_pos: start position (default: 0)
# end_pos: end position (default: -1 = last pixel)
static def gradient_fill(pixels, color1, color2, start_pos, end_pos)
# Default parameters
if (start_pos == nil) start_pos = 0 end
if (end_pos == nil) end_pos = -1 end
# Validate region bounds
var width = size(pixels) / 4
# Handle negative indices (Python-style)
if (start_pos < 0) start_pos += width end
if (end_pos < 0) end_pos += width end
# Clamp to valid range
if (start_pos < 0) start_pos = 0 end
if (end_pos < 0) end_pos = 0 end
if (start_pos >= width) return end
if (end_pos >= width) end_pos = width - 1 end
if (end_pos < start_pos) return end
# Set first pixel directly
pixels.set(start_pos * 4, color1, 4)
# If only one pixel, we're done
if start_pos == end_pos
return
end
# Set last pixel directly
pixels.set(end_pos * 4, color2, 4)
# If only two pixels, we're done
if end_pos - start_pos <= 1
return
end
# Extract components from color1 (ARGB format - 0xAARRGGBB)
var a1 = (color1 >> 24) & 0xFF
var r1 = (color1 >> 16) & 0xFF
var g1 = (color1 >> 8) & 0xFF
var b1 = color1 & 0xFF
# Extract components from color2 (ARGB format - 0xAARRGGBB)
var a2 = (color2 >> 24) & 0xFF
var r2 = (color2 >> 16) & 0xFF
var g2 = (color2 >> 8) & 0xFF
var b2 = color2 & 0xFF
# Calculate the total number of steps
var steps = end_pos - start_pos
# Fill the gradient for intermediate pixels
var i = start_pos + 1
while (i < end_pos)
var pos = i - start_pos
# Use tasmota.scale_uint for ratio conversion instead of floating point arithmetic
var r = tasmota.scale_uint(pos, 0, steps, r1, r2)
var g = tasmota.scale_uint(pos, 0, steps, g1, g2)
var b = tasmota.scale_uint(pos, 0, steps, b1, b2)
var a = tasmota.scale_uint(pos, 0, steps, a1, a2)
# Ensure values are in valid range
r = r < 0 ? 0 : (r > 255 ? 255 : r)
g = g < 0 ? 0 : (g > 255 ? 255 : g)
b = b < 0 ? 0 : (b > 255 ? 255 : b)
a = a < 0 ? 0 : (a > 255 ? 255 : a)
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
var color = (a << 24) | (r << 16) | (g << 8) | b
pixels.set(i * 4, color, 4)
i += 1
end
end
# Blend a specific region with a solid color using the color's alpha channel
# pixels: destination bytes buffer
# color: the color to blend (ARGB format - 0xAARRGGBB)
# start_pos: start position (default: 0)
# end_pos: end position (default: -1 = last pixel)
static def blend_color(pixels, color, start_pos, end_pos)
# Default parameters
if (start_pos == nil) start_pos = 0 end
if (end_pos == nil) end_pos = -1 end
# Validate region bounds
var width = size(pixels) / 4
# Handle negative indices (Python-style)
if (start_pos < 0) start_pos += width end
if (end_pos < 0) end_pos += width end
# Clamp to valid range
if (start_pos < 0) start_pos = 0 end
if (end_pos < 0) end_pos = 0 end
if (start_pos >= width) return end
if (end_pos >= width) end_pos = width - 1 end
if (end_pos < start_pos) return end
# Extract alpha from color
var a2 = (color >> 24) & 0xFF
# Only blend if the color has some alpha
if a2 == 0
return # Fully transparent, nothing to do
end
# Blend the pixels in the specified region
var i = start_pos
while i <= end_pos
var color1 = pixels.get(i * 4, 4)
var blended = _class.blend(color1, color)
pixels.set(i * 4, blended, 4)
i += 1
end
end
# Apply an opacity adjustment to a region of the buffer
# pixels: destination bytes buffer
# opacity: opacity factor (0-511) OR mask_pixels (bytes buffer to use as mask)
# - Number: 0 is fully transparent, 255 is original, 511 is maximum opaque
# - bytes(): uses alpha channel as opacity mask
# start_pos: start position (default: 0)
# end_pos: end position (default: -1 = last pixel)
static def apply_opacity(pixels, opacity, start_pos, end_pos)
if opacity == nil opacity = 255 end
# Default parameters
if (start_pos == nil) start_pos = 0 end
if (end_pos == nil) end_pos = -1 end
# Validate region bounds
var width = size(pixels) / 4
# Handle negative indices (Python-style)
if (start_pos < 0) start_pos += width end
if (end_pos < 0) end_pos += width end
# Clamp to valid range
if (start_pos < 0) start_pos = 0 end
if (end_pos < 0) end_pos = 0 end
if (start_pos >= width) return end
if (end_pos >= width) end_pos = width - 1 end
if (end_pos < start_pos) return end
# Check if opacity is a bytes buffer (mask mode)
if isinstance(opacity, bytes)
# Mask mode: use another buffer as opacity mask
var mask_pixels = opacity
var mask_width = size(mask_pixels) / 4
# Validate mask size
if mask_width < width
width = mask_width
end
if end_pos >= width
end_pos = width - 1
end
# Apply mask opacity
var i = start_pos
while i <= end_pos
var color = pixels.get(i * 4, 4)
var mask_color = mask_pixels.get(i * 4, 4)
# Extract alpha from mask as opacity factor (0-255)
var mask_opacity = (mask_color >> 24) & 0xFF
# Extract components from color (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
# Apply mask opacity to alpha channel using tasmota.scale_uint
a = tasmota.scale_uint(mask_opacity, 0, 255, 0, a)
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
var new_color = (a << 24) | (r << 16) | (g << 8) | b
# Update the pixel
pixels.set(i * 4, new_color, 4)
i += 1
end
else
# Number mode: uniform opacity adjustment
var opacity_value = int(opacity == nil ? 255 : opacity)
# Ensure opacity is in valid range (0-511)
opacity_value = opacity_value < 0 ? 0 : (opacity_value > 511 ? 511 : opacity_value)
# Apply opacity adjustment
var i = start_pos
while i <= end_pos
var color = pixels.get(i * 4, 4)
# Extract components (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
# Adjust alpha using tasmota.scale_uint
# For opacity 0-255: scale down alpha
# For opacity 256-511: scale up alpha (but cap at 255)
if opacity_value <= 255
a = tasmota.scale_uint(opacity_value, 0, 255, 0, a)
else
# Scale up alpha: map 256-511 to 1.0-2.0 multiplier
a = tasmota.scale_uint(a * opacity_value, 0, 255 * 255, 0, 255)
a = a > 255 ? 255 : a # Cap at maximum alpha
end
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
color = (a << 24) | (r << 16) | (g << 8) | b
# Update the pixel
pixels.set(i * 4, color, 4)
i += 1
end
end
end
# Apply a brightness adjustment to a region of the buffer
# pixels: destination bytes buffer
# brightness: brightness factor (0-511) OR mask_pixels (bytes buffer to use as mask)
# - Number: 0 is black, 255 is original, 511 is maximum bright
# - bytes(): uses alpha channel as brightness mask
# start_pos: start position (default: 0)
# end_pos: end position (default: -1 = last pixel)
static def apply_brightness(pixels, brightness, start_pos, end_pos)
# Default parameters
if (start_pos == nil) start_pos = 0 end
if (end_pos == nil) end_pos = -1 end
# Validate region bounds
var width = size(pixels) / 4
# Handle negative indices (Python-style)
if (start_pos < 0) start_pos += width end
if (end_pos < 0) end_pos += width end
# Clamp to valid range
if (start_pos < 0) start_pos = 0 end
if (end_pos < 0) end_pos = 0 end
if (start_pos >= width) return end
if (end_pos >= width) end_pos = width - 1 end
if (end_pos < start_pos) return end
# Check if brightness is a bytes buffer (mask mode)
if isinstance(brightness, bytes)
# Mask mode: use another buffer as brightness mask
var mask_pixels = brightness
var mask_width = size(mask_pixels) / 4
# Validate mask size
if mask_width < width
width = mask_width
end
if end_pos >= width
end_pos = width - 1
end
# Apply mask brightness
var i = start_pos
while i <= end_pos
var color = pixels.get(i * 4, 4)
var mask_color = mask_pixels.get(i * 4, 4)
# Extract alpha from mask as brightness factor (0-255)
var mask_brightness = (mask_color >> 24) & 0xFF
# Extract components from color (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
# Apply mask brightness to RGB channels using tasmota.scale_uint
r = tasmota.scale_uint(mask_brightness, 0, 255, 0, r)
g = tasmota.scale_uint(mask_brightness, 0, 255, 0, g)
b = tasmota.scale_uint(mask_brightness, 0, 255, 0, b)
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
var new_color = (a << 24) | (r << 16) | (g << 8) | b
# Update the pixel
pixels.set(i * 4, new_color, 4)
i += 1
end
else
# Number mode: uniform brightness adjustment
var brightness_value = int(brightness == nil ? 255 : brightness)
# Ensure brightness is in valid range (0-511)
brightness_value = brightness_value < 0 ? 0 : (brightness_value > 511 ? 511 : brightness_value)
# Apply brightness adjustment
var i = start_pos
while i <= end_pos
var color = pixels.get(i * 4, 4)
# Extract components (ARGB format - 0xAARRGGBB)
var a = (color >> 24) & 0xFF
var r = (color >> 16) & 0xFF
var g = (color >> 8) & 0xFF
var b = color & 0xFF
# Adjust brightness using tasmota.scale_uint
# For brightness 0-255: scale down RGB
# For brightness 256-511: scale up RGB (but cap at 255)
if brightness_value <= 255
r = tasmota.scale_uint(r, 0, 255, 0, brightness_value)
g = tasmota.scale_uint(g, 0, 255, 0, brightness_value)
b = tasmota.scale_uint(b, 0, 255, 0, brightness_value)
else
# Scale up RGB: map 256-511 to 1.0-2.0 multiplier
var multiplier = brightness_value - 255 # 0-256 range
r = r + tasmota.scale_uint(r * multiplier, 0, 255 * 256, 0, 255)
g = g + tasmota.scale_uint(g * multiplier, 0, 255 * 256, 0, 255)
b = b + tasmota.scale_uint(b * multiplier, 0, 255 * 256, 0, 255)
r = r > 255 ? 255 : r # Cap at maximum
g = g > 255 ? 255 : g # Cap at maximum
b = b > 255 ? 255 : b # Cap at maximum
end
# Combine components into a 32-bit value (ARGB format - 0xAARRGGBB)
color = (a << 24) | (r << 16) | (g << 8) | b
# Update the pixel
pixels.set(i * 4, color, 4)
i += 1
end
end
end
end
return FrameBufferNtv

View File

@ -29,7 +29,7 @@ class TestAnimation : animation.animation
self.render_called = true
# Fill frame with red for testing
if frame != nil
frame.fill_pixels(0xFF0000FF)
frame.fill_pixels(frame.pixels, 0xFF0000FF)
end
return true
end

View File

@ -34,7 +34,7 @@ fb.set_pixel_color(4, 0x80FF00FF) # Set fifth pixel to purple with 50% alpha
assert(fb.get_pixel_color(4) == 0x80FF00FF, f"Fifth pixel should be purple with 50% alpha (0x{fb.get_pixel_color(4) :08x})")
# Test fill_pixels method
fb.fill_pixels(0xFFFFFFFF) # Fill with white
fb.fill_pixels(fb.pixels, 0xFFFFFFFF) # Fill with white
var all_white = true
for i: 0..9
@ -46,7 +46,7 @@ end
assert(all_white, "All pixels should be white")
# Test fill_pixels with color components
fb.fill_pixels(0xFF00FF00) # Fill with green
fb.fill_pixels(fb.pixels, 0xFF00FF00) # Fill with green
var all_green = true
for i: 0..9
@ -61,11 +61,11 @@ assert(all_green, "All pixels should be green")
var fb1 = animation.frame_buffer(10)
var fb2 = animation.frame_buffer(10)
fb1.fill_pixels(0xFF0000FF) # Fill fb1 with red (fully opaque)
fb2.fill_pixels(0x80FF0000) # Fill fb2 with blue at 50% alpha
fb1.fill_pixels(fb1.pixels, 0xFF0000FF) # Fill fb1 with red (fully opaque)
fb2.fill_pixels(fb2.pixels, 0x80FF0000) # Fill fb2 with blue at 50% alpha
# Blend fb2 into fb1 using per-pixel alpha
fb1.blend_pixels(fb2)
fb1.blend_pixels(fb1.pixels, fb2.pixels)
var all_blended = true
for i: 0..9
@ -99,8 +99,8 @@ end
assert(all_copied, "All pixels should be copied correctly")
# Test blend_color method
fb1.fill_pixels(0xFF0000FF) # Fill fb1 with red
fb1.blend_color(0x8000FF00) # Blend with green at 50% alpha
fb1.fill_pixels(fb1.pixels, 0xFF0000FF) # Fill fb1 with red
fb1.blend_color(fb1.pixels, 0x8000FF00) # Blend with green at 50% alpha
var still_red = true
for i: 0..9
@ -116,8 +116,8 @@ print("Testing apply_brightness method...")
# Test reducing brightness (0-255 range)
var brightness_test = animation.frame_buffer(5)
brightness_test.fill_pixels(0xFFFF0000) # Red with full brightness (255)
brightness_test.apply_brightness(128) # Apply 50% brightness
brightness_test.fill_pixels(brightness_test.pixels, 0xFFFF0000) # Red with full brightness (255)
brightness_test.apply_brightness(brightness_test.pixels, 128) # Apply 50% brightness
var reduced_pixel = brightness_test.get_pixel_color(0)
var reduced_r = (reduced_pixel >> 16) & 0xFF
@ -125,8 +125,8 @@ assert(reduced_r == 128, f"Red component should be reduced to 128, got {reduced_
# Test increasing brightness (256-511 range)
var increase_test = animation.frame_buffer(5)
increase_test.fill_pixels(0xFF008000) # Green with 50% brightness (128)
increase_test.apply_brightness(384) # Apply 1.5x brightness (384 = 256 + 128)
increase_test.fill_pixels(increase_test.pixels, 0xFF008000) # Green with 50% brightness (128)
increase_test.apply_brightness(increase_test.pixels, 384) # Apply 1.5x brightness (384 = 256 + 128)
var increased_pixel = increase_test.get_pixel_color(0)
var increased_g = (increased_pixel >> 8) & 0xFF
@ -137,8 +137,8 @@ assert(increased_g <= 255, f"Green component should not exceed 255, got {increas
# Test zero brightness (fully black)
var black_test = animation.frame_buffer(5)
black_test.fill_pixels(0xFFFF0000) # Red with full brightness
black_test.apply_brightness(0) # Make fully black
black_test.fill_pixels(black_test.pixels, 0xFFFF0000) # Red with full brightness
black_test.apply_brightness(black_test.pixels, 0) # Make fully black
var black_pixel = black_test.get_pixel_color(0)
var black_r = (black_pixel >> 16) & 0xFF
@ -150,8 +150,8 @@ assert(black_b == 0, f"Blue component should be 0 (black), got {black_b}")
# Test maximum brightness (should cap at 255)
var max_test = animation.frame_buffer(5)
max_test.fill_pixels(0xFF008000) # Green with 50% brightness
max_test.apply_brightness(511) # Apply maximum brightness
max_test.fill_pixels(max_test.pixels, 0xFF008000) # Green with 50% brightness
max_test.apply_brightness(max_test.pixels, 511) # Apply maximum brightness
var max_pixel = max_test.get_pixel_color(0)
var max_g = (max_pixel >> 8) & 0xFF
@ -159,8 +159,8 @@ assert(max_g == 255, f"Green component should be capped at 255, got {max_g}")
# Test that alpha channel is preserved
var alpha_test = animation.frame_buffer(5)
alpha_test.fill_pixels(0x80FF0000) # Red with 50% alpha
alpha_test.apply_brightness(128) # Apply 50% brightness
alpha_test.fill_pixels(alpha_test.pixels, 0x80FF0000) # Red with 50% alpha
alpha_test.apply_brightness(alpha_test.pixels, 128) # Apply 50% brightness
var alpha_pixel = alpha_test.get_pixel_color(0)
var alpha_a = (alpha_pixel >> 24) & 0xFF
@ -169,11 +169,11 @@ assert(alpha_a == 128, f"Alpha should be preserved at 128, got {alpha_a}")
assert(alpha_r == 128, f"Red should be reduced to 128, got {alpha_r}")
# Test blend_pixels with region
fb1.fill_pixels(0xFF0000FF) # Fill fb1 with red (fully opaque)
fb2.fill_pixels(0x8000FF00) # Fill fb2 with green at 50% alpha
fb1.fill_pixels(fb1.pixels, 0xFF0000FF) # Fill fb1 with red (fully opaque)
fb2.fill_pixels(fb2.pixels, 0x8000FF00) # Fill fb2 with green at 50% alpha
# Blend fb2 into fb1 using per-pixel alpha, but only for the first half
fb1.blend_pixels(fb2, 0, 4)
fb1.blend_pixels(fb1.pixels, fb2.pixels, 0, 4)
var first_half_blended = true
var second_half_original = true
@ -197,7 +197,7 @@ assert(second_half_original, "Second half should remain original")
# Test gradient_fill method
fb1.clear()
fb1.gradient_fill(0xFFFF0000, 0xFF00FF00) # Red to green gradient
fb1.gradient_fill(fb1.pixels, 0xFFFF0000, 0xFF00FF00) # Red to green gradient
var first_pixel_color = fb1.get_pixel_color(0)
var last_pixel_color = fb1.get_pixel_color(9)
@ -205,31 +205,13 @@ var last_pixel_color = fb1.get_pixel_color(9)
assert(first_pixel_color == 0xFFFF0000, f"First pixel should be red (0x{first_pixel_color :08x})")
assert(last_pixel_color == 0xFF00FF00, f"Last pixel should be green (0x{last_pixel_color :08x})")
# Test apply_mask method
fb1.fill_pixels(0xFF0000FF) # Fill fb1 with red
fb2.clear()
# Create a gradient mask
for i: 0..9
var alpha = tasmota.scale_uint(i, 0, 9, 0, 255)
fb2.set_pixel_color(i, animation.frame_buffer.to_color(255, 255, 255, alpha)) # White with varying alpha
end
fb1.apply_mask(fb2)
# First pixel should be fully transparent (alpha = 0)
assert((fb1.get_pixel_color(0) >> 24) & 0xFF == 0, "First pixel should be fully transparent")
# Last pixel should be fully opaque (alpha = 255)
assert((fb1.get_pixel_color(9) >> 24) & 0xFF == 255, "Last pixel should be fully opaque")
# Test apply_opacity method
print("Testing apply_opacity method...")
# Test reducing opacity (0-255 range)
var opacity_test = animation.frame_buffer(5)
opacity_test.fill_pixels(0xFF0000FF) # Red with full alpha (255)
opacity_test.apply_opacity(128) # Apply 50% opacity
opacity_test.fill_pixels(opacity_test.pixels, 0xFF0000FF) # Red with full alpha (255)
opacity_test.apply_opacity(opacity_test.pixels, 128) # Apply 50% opacity
var reduced_pixel = opacity_test.get_pixel_color(0)
var reduced_alpha = (reduced_pixel >> 24) & 0xFF
@ -237,8 +219,8 @@ assert(reduced_alpha == 128, f"Alpha should be reduced to 128, got {reduced_alph
# Test increasing opacity (256-511 range)
var increase_test = animation.frame_buffer(5)
increase_test.fill_pixels(0x800000FF) # Red with 50% alpha (128)
increase_test.apply_opacity(384) # Apply 1.5x opacity (384 = 256 + 128)
increase_test.fill_pixels(increase_test.pixels, 0x800000FF) # Red with 50% alpha (128)
increase_test.apply_opacity(increase_test.pixels, 384) # Apply 1.5x opacity (384 = 256 + 128)
var increased_pixel = increase_test.get_pixel_color(0)
@ -250,8 +232,8 @@ assert(increased_alpha <= 255, f"Alpha should not exceed 255, got {increased_alp
# Test zero opacity (fully transparent)
var transparent_test = animation.frame_buffer(5)
transparent_test.fill_pixels(0xFF0000FF) # Red with full alpha
transparent_test.apply_opacity(0) # Make fully transparent
transparent_test.fill_pixels(transparent_test.pixels, 0xFF0000FF) # Red with full alpha
transparent_test.apply_opacity(transparent_test.pixels, 0) # Make fully transparent
var transparent_pixel = transparent_test.get_pixel_color(0)
var transparent_alpha = (transparent_pixel >> 24) & 0xFF
@ -259,8 +241,8 @@ assert(transparent_alpha == 0, f"Alpha should be 0 (transparent), got {transpare
# Test maximum opacity (should cap at 255)
var max_test = animation.frame_buffer(5)
max_test.fill_pixels(0x800000FF) # Red with 50% alpha
max_test.apply_opacity(511) # Apply maximum opacity
max_test.fill_pixels(max_test.pixels, 0x800000FF) # Red with 50% alpha
max_test.apply_opacity(max_test.pixels, 511) # Apply maximum opacity
var max_pixel = max_test.get_pixel_color(0)
var max_alpha = (max_pixel >> 24) & 0xFF