ideas for dissolve effect?

Pixilang programming language
Post Reply
ainegil
Posts: 210
Joined: Thu Sep 22, 2022 11:37 pm

ideas for dissolve effect?

Post by ainegil »

I am looking for a dissolve effect for images similar to ink on wet paper.

A very simple solution would be to scale the image ( and blur, maybe) up and down
and add it, an then repeat with the new image and so on.

But thats too synthetic and regular I think, I am looking for something more organic and irregular/ random.
Also if you shrink the image the borders are not defined ( could be solved with mirrored copies maybe).

Maybe someone has some more ideas, or code even?
ainegil
Posts: 210
Joined: Thu Sep 22, 2022 11:37 pm

Re: ideas for dissolve effect?

Post by ainegil »



looks more like a lava lamp

Code: Select all

//
//  KNOWN ISSUES
//
//
// 1. does not do proper garbage collection, you may need to restart
// Pixilang if you do multiple renders / changes to the code
//



//set_pixel_size( WINDOW_XSIZE / 640 )

scr = get_screen()
xsize = 640//get_xsize( scr )
ysize = 360//get_ysize( scr )
resize( scr, xsize, ysize )
set_screen(scr)
fps = 25


// INPUT VIDEO file path
mpath = "/home/nancy/Videos/running woods 1.mp4"

//INPUT WAV file path
filename = "/home/nancy/Music/20231221 22B Conversation II.wav"
//

//OUTPUT MJPEG AVI video file:
avi_filename = "/home/nancy/Videos/pixilang/visualizer test 2 convers2 6d restore1.avi"

// ################################ Effect Settings: #########################################
// ###########################################################################################

preamp = dbtoa(-3)// amplify audio for analysis, amplifies effect amount only, not the audio
bands = 38 //8 // number of ERB bands, max 40
erbbandmin = 1.1 // minimum band ERB scale 1...40
erbbandmax = 38 // max band ERB scale 1...40

backframes = 0// opacity of past frames

blendmode = 0// 0|1|2| blendingmode : default, 1-src col, color
backmode = 0// 0|1|2 defuault, 1-src color, color 

phasestep = -0.2 * 2* M_PI / fps

sscreen = 1//

bgain = 1// gain

inverse_a = 0// 0|1 inverse blending

spread = 7 // spread amount

tint = #FFFDFE // frame tint
backtint = #FBFCFD // past frames tint

lmapsumsmoo = 4

duration = 10*60  // duration in seconds

//################################################################################
//################################################################################
//################################################################################
//################################################################################

// init:

include "../../lib/ffmpeg_video_export.pixi"
include "../../lib/ffmpeg_video_import.pixi"
include "../../lib/mjpeg.pixi"
include "../../lib/gl_primitives.pixi"
include "../../lib/gfx_primitives.pixi"

framecount = 0
startframe = 1
maxframe = duration * fps

vid_import = ffmpeg_video_import_open( 
mpath, 
xsize, ysize, 
startframe, maxframe )

wav = load( filename )
//Sound options:
sample_rate_scale = 1
logf( "WAV INFO:\n" )
logf( "  Sample Rate: %d\n", wav.sample_rate )
logf( "  Channels: %d\n", wav.channels )
logf( "  Loop Start (sample number): %d\n", wav.loop_start )
logf( "  Loop Length (number of samples): %d\n", wav.loop_len )
logf( "  Loop Type (0-none; 1-normal; 2-bidirectional): %d\n", wav.loop_type )
wav_ptr = 0
wav_size = get_size( wav ) //number of frames
wav_channels = wav.channels
wav_amp_max = 256
samplerate = wav.sample_rate
type = get_type( wav )
if type == INT16 { wav_amp_max = 1 << 15 }
if type == INT32 { wav_amp_max = 1 << 30 }
if type == FLOAT32 { wav_amp_max = 1 }

if avi_filename != 0
{
    vo = 1
    vo_f = fopen( avi_filename, "wb" )
    if vo_f <= 0 { logf( "Can't open video file for writing\n" ) halt }
    vo_encoder = mjpeg_encoder_open(
	fps,
	xsize,
	ysize,
	90, //Quality
	wav_channels, //Audio channels
	wav.sample_rate * sample_rate_scale, //Audio frames per second
	get_type( wav ), //Audio sample type
	MJPEG_ENCODER_FLAG_USEINDEX | MJPEG_ENCODER_FLAG_HASSOUND, //Flags
	vo_f )
    vo_audio_buf_size = mjpeg_encoder_get_audio_size( vo_encoder ) //Number of frames per audio chunk
    vo_audio_buf = new( vo_audio_buf_size * wav_channels, 1, get_type( wav ) )
    vo_audio_ch_bufs = new( wav_channels, 1, INT )
    i = 0 while i < wav_channels { vo_audio_ch_bufs[ i ] = new( vo_audio_buf_size, 1, get_type( wav ) ) i + 1 }
    logf( "Audio buffer size: %d frames\n", vo_audio_buf_size )
}
else
{
    set_audio_callback( audio_callback, 0, wav.sample_rate * sample_rate_scale, get_type( wav ), wav_channels, AUDIO_FLAG_INTERP2 )

    rate1 = get_audio_sample_rate( 0 )
    rate2 = get_audio_sample_rate( 1 )
    logf( "Local (defined by the set_audio_callback()) sample rate: %d Hz\n", rate1 )
    logf( "Global (defined in the global Pixilang preferences) sample rate: %d Hz\n", rate2 )
    if rate1 != rate2
    {
	logf( "%d != %d, so resampling will be enabled\n", rate1, rate2 )
    }
}
//-------------------


gfx_init()

fn get_timer2( $d ){ $d * 1024 ret( ( get_timer( 0 ) & ( $d - 1 ) ) / $d ) }


//########################################################################################

fn gl_callback( $userdata )
{

mutex_lock( g_graphics_sync )

    set_screen( GL_SCREEN ) 
   	if(framecount == 1) {clear()}




gl_use_prog( gl_prog )
gl_uniform( gl_prog.g_lookupcurve, lmapsmoo, 1, 0, 256 )
gl_uniform( gl_prog.g_lookup_p, lmappk, 1, 0, 256 )
gl_uniform( gl_prog.g_lookup_ph, lmapph, 1, 0, 256 )
gl_uniform( gl_prog.g_amount, lmapsumsmoo*2.618 + 0.382 )
gl_uniform( gl_prog.g_gain, maxbrightfac )
gl_uniform( gl_prog.g_lift, minbrights )
gl_uniform( gl_prog.g_time, 1.0 )
//set_alpha( imgW, img_A)
update_gl_data(imgW)


//gl_blend_func( GL_SRC_ALPHA, GL_ZERO)  


gl_blend_func() 
transp(127)
t_reset()
tscal = (xsize + lmapsumsmoo*38.832 )/xsize
t_rotate(lmapsumsmoo*2.6, 0,0,1)
pixi( imgW, 0,0, tint, tscal,tscal )
tscal = (xsize + lmapsumsmoo*24 )/xsize
t_rotate(-lmapsumsmoo*5.2, 0,0,1)
pixi( imgW, 0,0, tint, tscal,tscal )
t_reset()
pixi( imgW, 0,0, tint, 1,1 )
copy( imgW, GL_SCREEN)

// blur 
conv_filter(
    imgW_clone,  //destination
    imgW,  //source
    krnl_x2,  //kernel
//OPTIONAL PARAMETERS:
    24,  //divisor (dividing the result)
    0,  //offset (addition after division)
    CONV_FILTER_COLOR,  //flags (options)
   1, 1,  //kernel center XY (relative to the upper left corner)
    0, 0,  //destination XY
    0, 0,  //source XY
    xsize, ysize,
    1, 1  //XY pitch (step length)
)
// blur 
conv_filter(
    imgW,  //destination
    imgW_clone,  //source
    krnl_boxblr3,  //kernel
//OPTIONAL PARAMETERS:
    9,  //divisor (dividing the result)
    0,  //offset (addition after division)
    CONV_FILTER_COLOR,  //flags (options)
   1, 1,  //kernel center XY (relative to the upper left corner)
    0, 0,  //destination XY
    0, 0,  //source XY
    xsize, ysize,
    1, 1  //XY pitch (step length)
)



    gl_use_prog() //Back to default GLSL program


/*
	transp(0)
    ts = ""
    sprintf( ts, "FPS:%u\nframe:%u", FPS, lmapsumsmoo )
    print( ts, -hxsize , -hysize , GREEN, TOP | LEFT )

*/


copy(scr, GL_SCREEN)
//copy( imgW, GL_SCREEN)

	set_screen( scr ) //Back to the default screen
mutex_unlock( g_graphics_sync )
}

gl_vshader = GL_SHADER_TEX_RGB_SOLID  //Vertex shader 
gl_fshader = //Fragment shader
"PRECISION( MEDIUMP, float )
uniform sampler2D g_texture;
uniform float g_lookupcurve[256];
uniform float g_lookup_p[256];
uniform float g_lookup_ph[256];
uniform float g_amount;
uniform float g_time;
uniform vec4 g_color;
uniform float g_gain;
uniform float g_lift;
IN vec2 tex_coord_var;
void main()
{
    vec2 tc = tex_coord_var;
	vec4 pxl = texture2D( g_texture, tc );

 	vec4 sampl = pxl;
sampl *= g_gain;
sampl -= g_lift;
 sampl[3] =  sampl[0];
 sampl[3] +=  sampl[1];
 sampl[3] +=  sampl[2];
 //sampl[3] *= 0.3333;
float scal = 0.707;
scal /=g_amount;
tc.x += cos(sampl[0])*scal ;
tc.y += sin(sampl[0])*scal ;
tc.x += cos(sampl[1])*scal ;
tc.y += sin(sampl[1])*scal ;
tc.x += cos(sampl[2])*scal ;
tc.y += sin(sampl[2])*scal ;
tc.x = abs(tc.x);
tc.y = abs(tc.y);
//sampl[3] = 0.0;
	float fliy = 255.0 ;
	fliy *= tc.y;
	int liy = int(fliy);
	float flix = 255.0 ;
	flix *= tc.x;
	int lix = int(flix);
	float octa = 10.0;
	float r ;
	r = sqrt( (tc.x)* (tc.x) + (tc.y) * (tc.y));
	r *= 128.0;
	int ridx = int(r);
 	float modux ;
	float moduy ;
	float modua ;
	modua = g_lookupcurve[ ridx ] ;
	float modub ;
	float r2 ;
	r2 = sqrt( (1.0-tc.x) * (1.0-tc.x) + (1.0-tc.y) * (1.0-tc.y));
	r2 *= 128.0;
	int ridx2 = int(r2);
	modub = g_lookupcurve[ridx2 ] ;
	float sgn;
	if( tc.x >= 0.314159 ){
 	sgn = 1.0;
	}else{ 
	sgn = -1.0;
	}

	pxl[0] =(modua + modub)*1.1;
	pxl[1] =abs(modua - modub)*1.382;
	pxl[2] =(g_lookup_p[ ridx ] + g_lookup_p[ ridx2 ]+g_lookup_p[ 255-ridx ] 
+ g_lookup_p[ 255-ridx2 ]);
pxl[3] = sqrt(abs(pxl[1] * pxl[0] * pxl[2] ));
	sampl[3] /= 3.0;
sampl *= 2.618;
	pxl += sampl;
	pxl /= 3.618;
    gl_FragColor = pxl * g_color;
}
"

set_screen(scr)
gl_prog = gl_new_prog( gl_vshader, gl_fshader )

g_graphics_sync = mutex_create()

set_gl_callback( gl_callback, 0 ) 

// label




logf("INIT--------------------------done \n")

start_timer(0)

// ####################################################################################
// ######################   MAIN LOOP    ####################################################
// ####################################################################################
while( framecount < maxframe  )
{


mutex_lock( g_graphics_sync )
anaudio(wav_ptr)
	//ffmpeg_video_import_read(vid_import,imgbuf)

mutex_unlock( g_graphics_sync )
framecount +1
frame()


split_rgb( 0, imgW, img_Y, img_Cb, img_Cr)
//op_cn(OP_MAX, img_Y, maxbright)
op_cn(OP_MIN, img_Y, minbright)
//maxbright = maxbright >> 15
//maxbrightfac = maxbrightfac -( maxbrightfac + 255/maxbright)*0.05
minbright = minbright >> 15
minbrights = minbrights -(minbrights + minbright/255)*0.08

//if(maxbrightfac > 1.07){ maxbrightfac = 1.07}
//if(maxbrightfac < 1.0){ maxbrightfac = 1.0}
copy( img_Y_clone, img_Y)
convert_type( im_Y_clone, FLOAT)
op_cn( OP_MUL, im_Y_clone, 1/16777216)
op_cn(OP_SUM, im_Y_clone, brightsum)
brightsum = brightsum /get_size(im_Y_clone)
brightsum = brightsum *0.5
maxbrightfac = maxbrightfac -( maxbrightfac + 1/brightsum)*0.19
if(maxbrightfac > 1.1){ maxbrightfac = 1.1}
if(maxbrightfac < 1.0){ maxbrightfac = 1.0}
//
mutex_lock( g_graphics_sync )
    if vo 
    {
	//Video export:

	audio_callback( 0, 0, vo_audio_ch_bufs, vo_audio_buf_size, 0, -1, 0 )
	i = 0 while i < wav_channels 
	{
	    copy( vo_audio_buf, vo_audio_ch_bufs[ i ], i, 0, vo_audio_buf_size, wav_channels, 1 )
	    i + 1 
	}
	//mjpeg_encoder_write_image( vo_encoder, scr )
	mjpeg_encoder_write_image( vo_encoder, 0 )
    mjpeg_encoder_write_audio( vo_encoder, vo_audio_buf, 0, 0 )
	mjpeg_encoder_next_frame( vo_encoder )

	//frame()
    }
    else
    {
	//frame( 1000 / fps )
    }

    if !sample_loop && wav_ptr >= wav_size { breakall }
    //while( get_event() ) { if EVT[ EVT_TYPE ] == EVT_QUIT { halt } }
    while( get_event() ) { if EVT[ EVT_TYPE ] == EVT_QUIT { breakall } 
    }
    //framecount + 1;   
mutex_unlock( g_graphics_sync )  
}




//#######################################################################
//#######################################################################


//ffmpeg_video_import_close(vid_import)
if vo
{
    //Close Video Export:
    mjpeg_encoder_close( vo_encoder )
    fclose( vo_f )
    i = 0 while i < wav_channels { remove( vo_audio_ch_bufs[ i ] ) i + 1 }
    remove( vo_audio_ch_bufs )
    remove( vo_audio_buf )
}

i = 0 while i < wav_channels { remove( vo_audio_ch_bufs[ i ] ) i + 1 }

//
mutex_destroy( g_graphics_sync )
remove( gl_prog )
frame(2000)
// end, no proper cleanup
// ####################################################################
// FUNCTIONS ##########################################################
fn gfx_init()
{
// initial and global  values
 	hxsize = xsize /2
	hysize = ysize /2
 	thrdxsize = xsize /3
	thrdysize = ysize /3
 	qxsize = xsize /4
	qysize = ysize /4
 	fxsize = xsize /5
	fysize = ysize /5
	preampdivwavmax = preamp / wav_amp_max  	
	ftsize = 4096 //4096
	ftbinf = wav.sample_rate/ftsize
	ftbinfrez = 1 / ftbinf
	hftsize = ftsize / 2
	ftbufr = new( ftsize, 1, FLOAT )
 	ftbufi = new( ftsize, 1, FLOAT )
 	ftmag = new( hftsize, 1, FLOAT )
 	ftmagsmoo = new( hftsize, 1, FLOAT )

	winlen = 4*ftsize  // 2* wav.sample_rate/fps 
	wintable = new( winlen, 1, FLOAT )
	$i = 0 while $i <= winlen{
	wintable[$i] = cos(2*M_PI*$i/winlen)*0.5 + 0.5
	$i + 1
	}	

krnl_blr = new(3,3, INT) // blur kernel
clean(krnl_blr,1)
krnl_blr[1] = 2
krnl_blr[3] = 2
krnl_blr[5] = 2
krnl_blr[7] = 2
krnl_blr[4] = 4

krnl_x = new(3,3, INT) // blur kernel
clean(krnl_x,1)
krnl_x[1] = 4
krnl_x[3] = 2
krnl_x[5] = 2
krnl_x[7] = 4
krnl_x[4] = 4

krnl_x2 = new(3,3, INT) // blur kernel
clean(krnl_x2,1)
krnl_x2[0] = 3
krnl_x2[1] = 2
krnl_x2[2] = 3
krnl_x2[3] = 2
krnl_x2[4] = 4
krnl_x2[5] = 2
krnl_x2[6] = 3
krnl_x2[7] = 2
krnl_x2[8] = 3



krnl_boxblr3 = new(3,3, INT) // blur kernel
clean(krnl_boxblr3,1)

	
	// ERB scale
	erbsize = bands * 2
	erbbuf = new( erbsize, 1, FLOAT)
	erbiftlookup = new( erbsize, 1, INT32)
	
	// ERB filter lookup
	//
	$size = erbsize
	$ERBmin = erbbandmin //1
	$ERBrange = erbbandmax -$ERBmin  //40 -$ERBmin

	$i = 0 while $i < erbsize
	{
	$erb = $ERBrange*$i/(erbsize) + $ERBmin
	$erbf = ($erb) * 0.046729
	$erbf = pow(10,$erbf) -1
	$erbf = $erbf * 228.833 

	erbiftlookup[$i] = ($erbf * ftbinfrez) div 1
	$i +1
	}
	
	// create weights
	ftweight = new( hftsize, 1, FLOAT )
	$i = 0 while $i < erbsize{
		$fromidx = erbiftlookup[$i ]
		$toidx = erbiftlookup[$i + 1 ] -1	
		$d = $toidx - $fromidx 
		$j = 0 while $j < $d{
			$ii = $fromidx +$j
			$w = $j / ($d -1)	
		ftweight[$ii] = $w
			$j = $j +1
		}
 	$i +1
 	}
	
	//----------
	
	// 256 lookup from spectrum
	lmap = new( 256, 1, FLOAT) 
	lmapsmoo = new( 1, 256, FLOAT) // smoothed version
	lmappk = new( 1, 256, FLOAT) // peaks, map - smoothed
	lmapph = new( 1, 256, FLOAT) // animation phase
	C_GREY = get_color(127,127,127) // neutral grey
	C_erb = WHITE
	
	pxl32 = new(3, 1, INT32)// working pixel 32 bit
	
offsety = offsetmax*9/16

histogramme = new(256,1,FLOAT)
eqtable = new(256,1,FLOAT)
deeqtable = new(256,1,FLOAT)

imgbuf = new(xsize,ysize)// frame
clean(imgbuf, RED)
screenclone = new(xsize,ysize)// screen copy
screenclone_copy = new(xsize,ysize)// screen copy 2
canvas1 = new(hxsize, hysize)
canvas2 = new(hxsize, hysize)
canvas3 = new(hxsize, hysize)
imgW = new(xsize,ysize)
imgW_clone = new(xsize,ysize)
img_Y = clone(imgW)
img_Y_clone = clone(imgW)
img_Cb = clone(imgW)	
img_Cr = clone(imgW)
img_A = new(xsize,ysize, INT8)// alpha channel buffer
for($i =0; $i < get_size(img_A); $i +1){
	img_A[$i] = 255
}
set_alpha( imgW, img_A)
set_flags( imgW, GL_MIN_LINEAR | GL_MAG_LINEAR | GL_NICEST )

}

fn anaudio(){

// FFT
	clean(ftbufi)
    	clean(ftbufr)

    $p = wav_ptr
    $t = $p / wav_size
    $i = 0 while $i < ftsize
    {
	if $i <=  winlen{
	ftbufi[ $i ] = 0
	ftbufr[ $i ] = wav[ $p + $i * wav_channels ] * preampdivwavmax * wintable[$i]
	}else{
	ftbufi[ $i ] = 0
	ftbufr[ $i ] = 0
	}
	$i + 1
    }	
fft(1, ftbufi,ftbufr,ftsize)
	
	// map on ERB scale 
	// for color
	$erbgrav = 0
	$erbvmax = 0
	$erbsum = 0
	$i = 0 while $i < erbsize
	{
	erbbuf[$i]=0
	// pass 1 up
	$fromidx = erbiftlookup[$i ]
	$toidx = erbiftlookup[$i + 1 ] -1	
	$d = $toidx - $fromidx 
	$j = 0 while $j < $d
		{
		$ii = $fromidx + $j
		$v = sqrt(ftbufr[$ii]*ftbufr[$ii]+ftbufi[$ii]*ftbufi[$ii])
			erbbuf[$i] = erbbuf[$i] + $v * ftweight[$ii]		
		$j = $j +1 
		}
	erbbuf[$i] = erbbuf[$i] *80/$d
	// pass 2 down
	$fromidx = erbiftlookup[$i + 1 ]
	$toidx = erbiftlookup[$i +2 ] -1	
	$d = $toidx - $fromidx 
	$j = 0 while $j < $d
		{
		$ii = $fromidx + $j
		$v = sqrt(ftbufr[$ii]*ftbufr[$ii]+ftbufi[$ii]*ftbufi[$ii]) 
			erbbuf[$i] = erbbuf[$i] + $v * (1-ftweight[$ii])		
		$j = $j +1 
		}
	erbbuf[$i] = erbbuf[$i] *80/$d
	$erbsum = $erbsum + erbbuf[$i]
	$erbgrav = $erbgrav + erbbuf[$i] * ($i -erbsize/2)
	$i + 1
	}
	
	if $erbvmax > 1 { $erbvmax = 1 } 
	
	//------------------------------------------
	
	// create light lookup map
	lmapsum=0
	clipping = 0;
	$size = 256
	$i = 0 while $i < $size 
	{ 	
	$erbi = ($i * erbsize / $size ) 
	$frac = mod($erbi,1) 
	
	$erbi = $erbi div 1
	$v = erbbuf[$erbi] 
	if $erbi < $size -1 {
	$v2 = erbbuf[$erbi +1] 	
	$v = $v + ($v2 - $v) * $frac}
		
	//lmap[$i] = (atodb($v) +54 )/54 
	lmap[$i] = (atodb($v) +36 )/36 
	if lmap[$i] > lmapsmoo[0,$i] {


		tmp = lmap[$i]-lmapsmoo[0,$i]
		lmapsmoo[0,$i] = lmap[$i]
		if tmp > lmappk[0,$i] {
		lmappk[0,$i] = tmp
		lmapph[0,$i] = 0
		}else{ 
		lmappk[0,$i] = lmappk[0,$i] *0.97 
		lmapph[0,$i] = lmapph[0,$i] + phasestep
		}
	}else{ 
		lmapsmoo[0,$i] = lmapsmoo[0,$i]*0.97
		lmappk[0,$i] = lmappk[0,$i] *0.97
		lmapph[0,$i] = lmapph[0,$i] + phasestep
	}//96}
	
	if lmapsmoo[0,$i] > 1 { lmapsmoo[0,$i] = 1 clipping = 1}
	if lmapsmoo[0,$i] < 0 { lmapsmoo[0,$i] = 0}

	//lmapsmoo[$i] = lmapsmoo[$i] * lmapsmoo[$i]
	lmapsum = lmapsum + lmapsmoo[0,$i]/256

	$i + 1
	}
lmapsumsmoo = 	lmapsumsmoo +( lmapsum  - lmapsumsmoo )*0.01
ret(1)
}


fn audio_callback(
    $stream, 
    $userdata, 
    $channels, 
    $frames, 
    $output_time_in_system_ticks, 
    $in_channels, 
    $latency_in_frames )
{
    if wav_ptr >= wav_size
    {
	if !sample_loop
	{
	    ret( 0 )
	}
    }
    $c = 0 while( $c < wav_channels )
    {
	copy( $channels[ $c ], wav, 0, wav_ptr + $c, $frames, 1, wav_channels )
	$c + 1
    }
    wav_ptr + $frames * wav_channels
    if sample_loop
    {
	if wav_ptr >= wav_size
	{
	    $ff = ( wav_ptr - wav_size ) / wav_channels
	    $p = $frames - $ff
	    wav_ptr = 0
	    $c = 0 while( $c < wav_channels )
	    {
		copy( $channels[ $c ], wav, $p, wav_ptr + $c, $ff, 1, wav_channels )
    		$c + 1
	    }
	    wav_ptr + $ff * wav_channels
	}
    }
    ret( 1 )
}

fn atodb($a){
// converts amplitude to dB
	$db = 20*log10($a)
ret($db)
}
fn dbtoa($db){
// converts db to linear amplitude
	$a = pow(10,($db)*0.05)
ret($a)
}

fn hsvrgb($h,$s,$v){
// HSV to RGB
$h = $h % 360
$M = 255*$v
$m = $M*(1- $s)
$z = 255*($v*$s)*(1-abs(mod(($h/60.0),2)-1))
if $h < 60 {
$r = $M
$g = $z + $m
$b = $m
}else{
if $h < 120 {
$r = $z + $m
$g = $M 
$b = $m
}else{
if $h < 180 {
$r = $m 
$g = $M 
$b = $z + $m
}else{
if $h < 240 {
$r = $m 
$g = $z + $m
$b = $M 
}else{
if $h < 300 {
$r = $z + $m
$g = $m 
$b = $M 
}else{
$r = $M 
$g = $m 
$b = $z + $m
}}}}
}
ret(get_color($r ,$g ,$b ))
}

fn get_hist_red($img){
	clean(histogramme)
	$i = 0 
	$fac = 1/( get_size($img) )
	while $i < get_size($img){
	histogramme[get_red($img[$i])] = histogramme[get_red($img[$i])] + $fac
	$i + 1
	}
ret(1)
}

fn get_hist($img){
	clean(histogramme)
	$i = 0 
	$fac = 1/( get_size($img) )
	while $i < get_size($img){
	histogramme[($img[$i])] = histogramme[($img[$i])] + $fac
	$i + 1
	}
ret(1)
}


fn eq_red($img, $amount){
	get_hist_red($img)
	//clean(eqtable)
	copy(eqtable, histogramme)
	op_cn(OP_H_INTEGRAL, eqtable, 0)
	op_cn(OP_SMUL, eqtable, 255)
	$am1m = 1 - $amount
	for( $i=0; $i < get_size($img); $i + 1){
		$img[$i] = get_color( eqtable[get_red($img[$i])*$amount + get_red($img[$i])*$am1m] , 0, 0)
}
ret($img)
}

fn eq_onechannel($img, $amount){
	get_hist($img)
	//clean(eqtable)
	copy(eqtable, histogramme)
	op_cn(OP_H_INTEGRAL, eqtable, 0)
	op_cn(OP_SMUL, eqtable, 255)
	$am1m = 1 - $amount
	for( $i=0; $i < get_size($img); $i + 1){
		$img[$i] =  eqtable[($img[$i])*$amount + ($img[$i])*$am1m] 
}
ret($img)
}

fn get_eqtable($img){
	get_hist_red($img)
	copy(eqtable, histogramme)
	op_cn(OP_H_INTEGRAL, eqtable, 0)
	op_cn(OP_SMUL, eqtable, 255)
ret(1)
}

fn deeq_red($img, $amount){
	$am1m = 1 - $amount
	clean(deeqtable)
	//for ($i = 0; $i < 256;$i +1){ 
	//deeqtable[$i] =  $i
	//}
	for ($i = 0; $i < 256;$i +1){ 
	$ti = eqtable[$i]
	deeqtable[$ti] =  $i //+ ($i - $ti)
	}
	for ($i = 1; $i < 256;$i +1){ 
	if deeqtable[$i] == 0 {deeqtable[$i] = (deeqtable[$i-1] + 1) & 255}
	}

	for( $i=0; $i < get_size($img); $i + 1){
		$img[$i] = get_color( deeqtable[get_red($img[$i])*$amount + get_red($img[$i])*$am1m] , 0, 0)
}
ret($img)
}
gl_fshader_old = //Fragment shader
"PRECISION( MEDIUMP, float )
uniform sampler2D g_texture;
uniform float g_time;
uniform float g_amount;
uniform float g_bgain;
uniform int g_inverse;
uniform vec4 g_color;
uniform float g_lookupcurve[256];
IN vec2 tex_coord_var;
void main()
{
    vec2 tc = tex_coord_var;
	vec2 tc2 = tc;
	vec2 tc3 = tc;
	vec2 tc4 = tc;
    vec2 offxy ;
offxy[0] = texture2D( g_texture, tc )[0] + texture2D( g_texture, tc )[1] + texture2D( g_texture, tc )[2] ;
offxy[0] /= 3.0;
offxy[1] = 0.0;//offxy[0];
float na; 
//na = 1.0 - offxy[0];
na =  offxy[0];
float ca = 255.0 *  na;
int li =  int( ca );
if(li > 255){ li = 255 ;}
if( li < 0 ){ li = 0;}
li = 255 - li;
float liv = g_lookupcurve[li];
vec4 gnewa;
//float m_pi = 3.14159;
//float m_pi_dv2 = 1.570796;
offxy[0] = liv*liv  *na;
offxy[1] = offxy[0] ;

offxy[0] *= g_amount ;
offxy[1] *= g_amount ;
na = cos(offxy[0] * 1.570796 ) ;
	gnewa[ 0 ] = 1.0 - ( 1.0-na)*(1.0-na);
    tc.x += offxy[0];
    tc.y += offxy[1];
	gnewa[ 0 ] = gnewa[ 0 ] + texture2D( g_texture, tc )[0]+ texture2D( g_texture, tc )[1] + texture2D( g_texture, tc )[2];
    tc2.x -= offxy[0];
    tc2.y -= offxy[1];
	gnewa[ 0 ] = gnewa[ 0 ] + texture2D( g_texture, tc2 )[0]+ texture2D( g_texture, tc2 )[1] + texture2D( g_texture, tc2 )[2];
    //tc3.x += offxy[0];
    tc3.y -= offxy[1];
	gnewa[ 0 ] = gnewa[ 0 ] + texture2D( g_texture, tc3 )[0]+ texture2D( g_texture, tc3 )[1] + texture2D( g_texture, tc3 )[2];
   // tc4.x -= offxy[0];
    tc4.y += offxy[1];
	gnewa[ 0 ] = gnewa[ 0 ] + texture2D( g_texture, tc4 )[0]+ texture2D( g_texture, tc4 )[1] + texture2D( g_texture, tc4 )[2];
	//gnewa[ 0 ] /= 5.0;
	gnewa[ 0 ] = 1.0 -  gnewa[ 0 ];
	gnewa[ 0 ] = 1.0 - (  gnewa[ 0 ] * gnewa[ 0 ] * gnewa[ 0] * gnewa[ 0]);
	gnewa[ 0 ] += liv * liv;
	gnewa[ 0 ] *= g_bgain;
	gnewa[ 1 ] = gnewa[ 0 ];
	gnewa[ 2 ] = gnewa[ 0 ];
	gnewa[ 3 ] = gnewa[ 0 ]*gnewa[ 0 ];
float av = ( gnewa[ 0 ] + gnewa[ 1 ] + gnewa[ 2 ] )/3;
float liv1 =  0.29+ 0.71*liv;
	liv1 *= liv1;
	gnewa[ 0 ] -= av;
	gnewa[ 0 ] *= liv1;
	gnewa[ 0 ] += av;
	gnewa[ 1 ] -= av;
	gnewa[ 1 ] *= liv1;
	gnewa[ 1 ] += av;
	gnewa[ 2 ] -= av;
	gnewa[ 2 ] *= liv1;
	gnewa[ 2 ] += av;
if( g_inverse){
	liv = 1.0 -liv;
	liv *= liv;
	gnewa[ 3 ] = 1.0- gnewa[ 3 ];
	gnewa[ 3 ] *= liv;
}else{
	liv *= liv;
	gnewa[ 3 ] *= liv;
}




	//gnewa[ 3 ] += liv;
    gl_FragColor = gnewa * g_color;
}
"

ainegil
Posts: 210
Joined: Thu Sep 22, 2022 11:37 pm

Re: ideas for dissolve effect?

Post by ainegil »

yes good idea.
Random alpha channels or alpha channels based on the image result.
I totally forgot about this idea, have to try this.
ainegil
Posts: 210
Joined: Thu Sep 22, 2022 11:37 pm

Re: ideas for dissolve effect?

Post by ainegil »

Maybe this can also be combined with this effect:
grain extract.jpg
grain extract.jpg (14.01 KiB) Viewed 607 times
This is made from two layers (not in Pixilang but KDEnlive), the layers are converted to contrast greyscale,
and the upper layer is blurred very much, and the result is:
lower_layer - upper_layer + 0.5, this blending mode is called "grain extract" mode in KDEnlive and GIMP.

So the filled areas are fading, and if this is repeated it might look interesting and enhence the dissolve impression.
Basically a high pass effect plus an offset.
ainegil
Posts: 210
Joined: Thu Sep 22, 2022 11:37 pm

Re: ideas for dissolve effect?

Post by ainegil »

Here is a quick test for stills, interestingly it gives a slight 3D effect with the attached test image:

Code: Select all


//################  description :  #####################################################
//
// Test for dissolve effect.
//
// Stacks random shifted copies, uses the (inverted) result as alpha channel, 
// applies soft "grain extratc" mode.
//
//################  basic  paraemters :  ###############################################

img = load("test_small.jpg") // test image

radius = 36 		// spread radius pixels
nonlinearity = 1.5	// spread radius nonlineraity
squeeze = 1.75		// x/y squeeze
scalevariancex = 16	// random x scale pixels
scalevariancey = 24 // random y scale pixels
iterations = 24		// steps per frame
gravityoffset = 5	// y offset

basetransp = 255 / iterations 	


//################  init ###############################################################

// create angles to vector table to save a bit on trig operations:

vectable = new( 2, 32768, FLOAT32)
for( i = 0; i < 32768; i + 1){

vectable[ 0, i ] = cos( i * 2 * M_PI / 32768 - M_PI) 
vectable[ 1, i ] = sin( i * 2 * M_PI / 32768 - M_PI) 
}



img_put = clone( img )
img_alpha = new( get_xsize( img ), get_ysize( img ), PIXEL )

set_pixel_size( WINDOW_XSIZE / 640 )
resize( get_screen(), 640, 360 )


scr = get_screen()
scr_clone = clone( scr )
scr_clone_cb = clone( scr ) 
scr_clone_cr = clone( scr ) 

xsize = 640//get_xsize( scr )
ysize = 360//get_ysize( scr )

//################  process   ##########################################################


loop:


transp(basetransp)

for( i = 0; i < iterations; i +1){
rnda = rand() 
rnda = pow( rnda, nonlinearity )
rndr = radius * rand() / 32768
rndscl = rand() / 32768
rndx = vectable[ 0, rnda ] * rndr
rndy = vectable[ 1, rnda ] * rndr * squeeze

scalex = ( xsize + scalevariancex * rndscl ) / xsize
scaley = ( ysize + scalevariancey * rndscl ) / ysize

pixi( img_put, rndx, rndy +  gravityoffset, WHITE, scalex, scaley )

}

frame(1000/25)

// set alpha:

set_alpha( img_put )
copy( scr_clone, scr )
convert_type( img_alpha, PIXEL )
split_ycbcr( 0,  scr_clone, img_alpha, scr_clone_cb, scr_clone_cr ) 
// darken alpha to compensate merge effect:
op_cn( OP_COLOR_SUB, img_alpha, #7F0000)
// multiply alpha:
op_cc( OP_COLOR_MUL, img_alpha, img_alpha)
// invert alpha:
op_cn( OP_COLOR_SUB2, img_alpha, #FF0000)

convert_type( img_alpha, INT8 )


set_alpha( img_put, img_alpha )

// merge result with "grain extract" for new iteration:
copy( img_put, scr_clone )
op_cn( OP_COLOR_MUL, scr_clone, #3F3F3F)
op_cc( OP_COLOR_SUB, img_put, scr_clone)
op_cn( OP_COLOR_ADD, img_put, #1F1F1F)


goto loop

test_small.jpg
test_small.jpg (58.9 KiB) Viewed 604 times
for some reason its quite fast in the beginning but slows down quickly.

I am curious how this will look like with video.

Thanks for the tip.
ainegil
Posts: 210
Joined: Thu Sep 22, 2022 11:37 pm

Re: ideas for dissolve effect?

Post by ainegil »



Code will follow in minute, have to switch machines.
Its not the effect I wanted, but its not completely bad either.
Also failed experiments are as important as successful ones.

Input was first rendered with the last code from the cartoon render thread.

EDIT:

Code: Select all





set_pixel_size( WINDOW_XSIZE / 960 )
resize( get_screen(), 960, 540 )


scr = get_screen()
xsize = 960//get_xsize( scr )
ysize = 540//get_ysize( scr )
fps = 25

// INPUT VIDEO file path
mpath = "input video.mp4"

//INPUT WAV file path
filename = "input audio.wav"
//

//OUTPUT MJPEG AVI video file:
avi_filename = "output.avi"

// ################################ Effect Settings: #########################################

duration = 3*60 +40 // duration in seconds


img = new(xsize, ysize, PIXEL) // 

radius = 0 		// spread radius pixels
nonlinearity = 3	// spread radius nonlineraity
squeeze = 16/9	// x/y squeeze
scalevariancex = 27	// random x scale pixels
scalevariancey = 27 // random y scale pixels
iterations = 9		// steps per frame
gravityoffset = 5	// y offset


basetransp = 255 / iterations 
	
sample_loop = 1

//################  init ###############################################################



// init:

include "../../lib/ffmpeg_video_export.pixi"
include "../../lib/ffmpeg_video_import.pixi"
include "../../lib/mjpeg.pixi"

framecount = 0
startframe = 1
maxframe = duration * fps

// create angles to vector table to save a bit on trig operations:

vectable = new( 2, 32768, FLOAT32)
for( i = 0; i < 32768; i + 1){

vectable[ 0, i ] = cos( i * 2 * M_PI / 32768 - M_PI) 
vectable[ 1, i ] = sin( i * 2 * M_PI / 32768 - M_PI) 
}



img_put = clone( img )
img_newframe = clone( img )
img_oldframe = clone( img )



//clean( img_put, WHITE )
img_alpha = new( get_xsize( img ), get_ysize( img ), INT8 )
img_delta1 = clone( img_alpha )
img_delta2 = clone( img_alpha )
clean( img_alpha, 127 )



scr = get_screen()
scr_clone = clone( scr )
scr_clone2 = clone( scr )
scr_clone_y = clone( scr ) 
scr_clone_cb = clone( scr ) 
scr_clone_cr = clone( scr ) 



mina = 0
maxa = #FF0000


vid_import = ffmpeg_video_import_open( 
mpath, 
xsize, ysize, 
startframe, maxframe )

wav = load( filename )
//Sound options:
sample_rate_scale = 1
logf( "WAV INFO:\n" )
logf( "  Sample Rate: %d\n", wav.sample_rate )
logf( "  Channels: %d\n", wav.channels )
logf( "  Loop Start (sample number): %d\n", wav.loop_start )
logf( "  Loop Length (number of samples): %d\n", wav.loop_len )
logf( "  Loop Type (0-none; 1-normal; 2-bidirectional): %d\n", wav.loop_type )
wav_ptr = 0
wav_size = get_size( wav ) //number of frames
wav_channels = wav.channels
wav_amp_max = 256
samplerate = wav.sample_rate
type = get_type( wav )
if type == INT16 { wav_amp_max = 1 << 15 }
if type == INT32 { wav_amp_max = 1 << 30 }
if type == FLOAT32 { wav_amp_max = 1 }

if avi_filename != 0
{
    vo = 1
    vo_f = fopen( avi_filename, "wb" )
    if vo_f <= 0 { logf( "Can't open video file for writing\n" ) halt }
    vo_encoder = mjpeg_encoder_open(
	fps,
	xsize,
	ysize,
	90, //Quality
	wav_channels, //Audio channels
	wav.sample_rate * sample_rate_scale, //Audio frames per second
	get_type( wav ), //Audio sample type
	MJPEG_ENCODER_FLAG_USEINDEX | MJPEG_ENCODER_FLAG_HASSOUND, //Flags
	vo_f )
    vo_audio_buf_size = mjpeg_encoder_get_audio_size( vo_encoder ) //Number of frames per audio chunk
    vo_audio_buf = new( vo_audio_buf_size * wav_channels, 1, get_type( wav ) )
    vo_audio_ch_bufs = new( wav_channels, 1, INT )
    i = 0 while i < wav_channels { vo_audio_ch_bufs[ i ] = new( vo_audio_buf_size, 1, get_type( wav ) ) i + 1 }
    logf( "Audio buffer size: %d frames\n", vo_audio_buf_size )
}
else
{
    set_audio_callback( audio_callback, 0, wav.sample_rate * sample_rate_scale, get_type( wav ), wav_channels, AUDIO_FLAG_INTERP2 )

    rate1 = get_audio_sample_rate( 0 )
    rate2 = get_audio_sample_rate( 1 )
    logf( "Local (defined by the set_audio_callback()) sample rate: %d Hz\n", rate1 )
    logf( "Global (defined in the global Pixilang preferences) sample rate: %d Hz\n", rate2 )
    if rate1 != rate2
    {
	logf( "%d != %d, so resampling will be enabled\n", rate1, rate2 )
    }
}
//-------------------



krnl_boxblr3 = new(3,3, INT) // blur kernel
clean(krnl_boxblr3,1)

krnl_9x = new(9,1, INT) // blur kernel
clean(krnl_9x,1)

krnl_9y = new(1,9, INT) // blur kernel
clean(krnl_9y,1)

//gfx_init()
start_timer(0)
clear(#888888)

// MAIN LOOP ####################################################

while( framecount < maxframe  )
{

//anaudio(wav_ptr)

ffmpeg_video_import_read(vid_import,imgbuf)
if framecount == 0 {
clear()
}

copy( img_newframe, scr )

//premul
//op_cc( OP_COLOR_MUL, img_newframe, img_newframe)

transp(255)
//clear()
pixi( img_put)



op_cc( OP_COLOR_SUB, scr_clone2, img_newframe )
op_cn( OP_COLOR_MUL, scr_clone2, #1F1F1F)

op_cc( OP_COLOR_SUB, img_newframe, scr_clone2) // was scr_clone


//style
/*
split_ycbcr( 0, img_newframe, scr_clone_y, scr_clone_cb, scr_clone_cr)
copy( scr_clone2, scr_clone_y)
op_cn( OP_COLOR_SUB2, scr_clone_y, #FF0000)
op_cc( OP_COLOR_MUL, scr_clone_y, scr_clone_y)
op_cc( OP_COLOR_MUL, scr_clone_y, scr_clone_y)
op_cn( OP_COLOR_SUB2, scr_clone_y, #FF0000)
//op_cn( OP_COLOR_MUL, scr_clone_y, #7F0000)
op_cc( OP_COLOR_ADD, scr_clone_y, scr_clone_y)

op_cn( OP_COLOR_MUL, scr_clone2, #3F0000)
op_cc( OP_COLOR_ADD, scr_clone2, scr_clone2)
op_cc( OP_COLOR_ADD, scr_clone2, scr_clone2)
op_cc( OP_COLOR_MUL, scr_clone2, scr_clone2)
op_cc( OP_COLOR_MUL, scr_clone_y, scr_clone2)
op_cn( OP_SMUL, scr_clone_y, 4)

split_ycbcr( 1, img_newframe, scr_clone_y, scr_clone_cb, scr_clone_cr)
*/


pixi( img_newframe)
op_cn( OP_SUB2, img_alpha, 255)
op_cn( OP_DIV, img_alpha, 2)
op_cn( OP_ADD, img_alpha, 127)
set_alpha( img_put, img_alpha )


for( i = 0; i < iterations; i +1){
rnda = rand() 
rndr = radius * pow( rand() / 32768, nonlinearity ) 
rndscl = rand() / 32768
rndscl = pow( rndscl, nonlinearity )
rndx = vectable[ 0, rnda ] * rndr
rndy = vectable[ 1, rnda ] * rndr * squeeze

scalex = ( xsize + scalevariancex * i/iterations  ) / xsize
scaley = ( ysize + scalevariancey * i/iterations  ) / ysize


//transp(basetransp*i/iterations+ basetransp /2)
transp(basetransp)
pixi( img_put, rndx, rndy + gravityoffset * i / iterations , WHITE, scalex, scaley )

}



//frame(1000/25)
frame()
// set alpha:

set_alpha( img_put )
copy( scr_clone, scr )
//copy( scr_clone2, scr )
copy( img_put, scr )

conv_filter(
    scr_clone_cb,  //destination
    img_put,  //source
    krnl_9x,  //kernel
//OPTIONAL PARAMETERS:
    9,  //divisor (dividing the result)
    0,  //offset (addition after division)
    CONV_FILTER_COLOR,  //flags (options)
   1, 1,  //kernel center XY (relative to the upper left corner)
    0, 0,  //destination XY
    0, 0,  //source XY
    xsize, ysize,
    1, 1  //XY pitch (step length)
)

conv_filter(
    scr_clone2,  //destination
    scr_clone_cb,  //source
    krnl_9y,  //kernel
//OPTIONAL PARAMETERS:
    9,  //divisor (dividing the result)
    0,  //offset (addition after division)
    CONV_FILTER_COLOR,  //flags (options)
   1, 1,  //kernel center XY (relative to the upper left corner)
    0, 0,  //destination XY
    0, 0,  //source XY
    xsize, ysize,
    1, 1  //XY pitch (step length)
)

convert_type( img_alpha, PIXEL )
split_ycbcr( 0,  scr_clone, img_alpha, scr_clone_cb, scr_clone_cr ) 


// mult
//op_cn( OP_COLOR_SUB2, img_alpha, #FF0000)
//op_cc( OP_COLOR_MUL, img_alpha, img_alpha)
//op_cc( OP_COLOR_MUL, img_alpha, img_alpha)

//op_cn( OP_COLOR_SUB2, img_alpha, #FF0000)

//op_cn( OP_COLOR_ADD, img_alpha, #7F0000)


// convert
convert_type( img_alpha, INT8 )

// merge result 
op_cn( OP_COLOR_MUL, scr_clone2, #7F7F7F)
op_cc( OP_COLOR_ADD, img_put, scr_clone2) // was SUB
op_cn( OP_COLOR_SUB, img_put, #404040)  // was ADD
//set_alpha( img_put, img_alpha )


//
    if vo 
    {
	//Video export:
	audio_callback( 0, 0, vo_audio_ch_bufs, vo_audio_buf_size, 0, -1, 0 )
	i = 0 while i < wav_channels 
	{
	    copy( vo_audio_buf, vo_audio_ch_bufs[ i ], i, 0, vo_audio_buf_size, wav_channels, 1 )
	    i + 1 
	}
	mjpeg_encoder_write_image( vo_encoder, scr )
        mjpeg_encoder_write_audio( vo_encoder, vo_audio_buf, 0, 0 )
	mjpeg_encoder_next_frame( vo_encoder )
	frame()
    }
    else
    {
	frame( 1000 / fps )
    }
    if !sample_loop && wav_ptr >= wav_size { breakall }
    //while( get_event() ) { if EVT[ EVT_TYPE ] == EVT_QUIT { halt } }
    while( get_event() ) { if EVT[ EVT_TYPE ] == EVT_QUIT { breakall } 
    }
    framecount + 1;     
}




//#######################################################################
//#######################################################################


ffmpeg_video_import_close(vid_import)
if vo
{
    //Close Video Export:
    mjpeg_encoder_close( vo_encoder )
    fclose( vo_f )
    i = 0 while i < wav_channels { remove( vo_audio_ch_bufs[ i ] ) i + 1 }
    remove( vo_audio_ch_bufs )
    remove( vo_audio_buf )
}
i = 0 while i < wav_channels { remove( vo_audio_ch_bufs[ i ] ) i + 1 }

// end, no proper cleanup
// ####################################################################
// FUNCTIONS ##########################################################
fn gfx_init()
{
// not used atm
}




fn audio_callback(
    $stream, 
    $userdata, 
    $channels, 
    $frames, 
    $output_time_in_system_ticks, 
    $in_channels, 
    $latency_in_frames )
{
    if wav_ptr >= wav_size
    {
	if !sample_loop
	{
	    ret( 0 )
	}
    }
    $c = 0 while( $c < wav_channels )
    {
	copy( $channels[ $c ], wav, 0, wav_ptr + $c, $frames, 1, wav_channels )
	$c + 1
    }
    wav_ptr + $frames * wav_channels
    if sample_loop
    {
	if wav_ptr >= wav_size
	{
	    $ff = ( wav_ptr - wav_size ) / wav_channels
	    $p = $frames - $ff
	    wav_ptr = 0
	    $c = 0 while( $c < wav_channels )
	    {
		copy( $channels[ $c ], wav, $p, wav_ptr + $c, $ff, 1, wav_channels )
    		$c + 1
	    }
	    wav_ptr + $ff * wav_channels
	}
    }
    ret( 1 )
}

fn atodb($a){
// converts amplitude to dB
	$db = 20*log10($a)
ret($db)
}
fn dbtoa($db){
// converts db to linear amplitude
	$a = pow(10,($db)*0.05)
ret($a)
}

fn hsvrgb($h,$s,$v){
// HSV to RGB
$h = $h % 360
$M = 255*$v
$m = $M*(1- $s)
$z = 255*($v*$s)*(1-abs(mod(($h/60.0),2)-1))
if $h < 60 {
$r = $M
$g = $z + $m
$b = $m
}else{
if $h < 120 {
$r = $z + $m
$g = $M 
$b = $m
}else{
if $h < 180 {
$r = $m 
$g = $M 
$b = $z + $m
}else{
if $h < 240 {
$r = $m 
$g = $z + $m
$b = $M 
}else{
if $h < 300 {
$r = $z + $m
$g = $m 
$b = $M 
}else{
$r = $M 
$g = $m 
$b = $z + $m
}}}}
}
ret(get_color($r ,$g ,$b ))
}

fn get_hist_red($img){
	clean(histogramme)
	$i = 0 
	$fac = 1/( get_size($img) )
	while $i < get_size($img){
	histogramme[get_red($img[$i])] = histogramme[get_red($img[$i])] + $fac
	$i + 1
	}
ret(1)
}

fn get_hist($img){
	clean(histogramme)
	$i = 0 
	$fac = 1/( get_size($img) )
	while $i < get_size($img){
	histogramme[($img[$i])] = histogramme[($img[$i])] + $fac
	$i + 1
	}
ret(1)
}


fn eq_red($img, $amount){
	get_hist_red($img)
	//clean(eqtable)
	copy(eqtable, histogramme)
	op_cn(OP_H_INTEGRAL, eqtable, 0)
	op_cn(OP_SMUL, eqtable, 255)
	$am1m = 1 - $amount
	for( $i=0; $i < get_size($img); $i + 1){
		$img[$i] = get_color( eqtable[get_red($img[$i])*$amount + get_red($img[$i])*$am1m] , 0, 0)
}
ret($img)
}

fn eq_onechannel($img, $amount){
	get_hist($img)
	//clean(eqtable)
	copy(eqtable, histogramme)
	op_cn(OP_H_INTEGRAL, eqtable, 0)
	op_cn(OP_SMUL, eqtable, 255)
	$am1m = 1 - $amount
	for( $i=0; $i < get_size($img); $i + 1){
		$img[$i] =  eqtable[($img[$i])*$amount + ($img[$i])*$am1m] 
}
ret($img)
}

fn get_eqtable($img){
	get_hist_red($img)
	copy(eqtable, histogramme)
	op_cn(OP_H_INTEGRAL, eqtable, 0)
	op_cn(OP_SMUL, eqtable, 255)
ret(1)
}

fn deeq_red($img, $amount){
	$am1m = 1 - $amount
	clean(deeqtable)
	//for ($i = 0; $i < 256;$i +1){ 
	//deeqtable[$i] =  $i
	//}
	for ($i = 0; $i < 256;$i +1){ 
	$ti = eqtable[$i]
	deeqtable[$ti] =  $i //+ ($i - $ti)
	}
	for ($i = 1; $i < 256;$i +1){ 
	if deeqtable[$i] == 0 {deeqtable[$i] = (deeqtable[$i-1] + 1) & 255}
	}

	for( $i=0; $i < get_size($img); $i + 1){
		$img[$i] = get_color( deeqtable[get_red($img[$i])*$amount + get_red($img[$i])*$am1m] , 0, 0)
}
ret($img)
}



This code was apllied 2 times, then the result was time inverted, and the code run another 2 times.
Then both versions were multiplied, and a brightness curve had been apllied to make it brighter ( in KDEnlive).

The code is more interesting when theere are really dark and light areas.
When it is applied over and over, it becomes an intersting morphing blur.

Mabe I have to do another example for this.

But its not really what I imagined.
Post Reply