diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/__init__.py b/legacy/aim_metrics/aim_metrics/colour_perception/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp10_wave.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp10_wave.py deleted file mode 100644 index 7291ada..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp10_wave.py +++ /dev/null @@ -1,125 +0,0 @@ -############################################### -# WAVE (Weighted Affective Valence Estimates) # -############################################### -# -# V1.0 -# 23/11/2018 -# -# Implemented by: -# Yustynn Panicker -# (yustynn.panicker@aalto.fi) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# This is the mean of a simple mapping of pixel colors to the color preference values -# experimentally obtained by Palmer and Schloss. -# -# Under their hypothesis that people's color preferences reflect their dispositions -# towards objects of those colors, they had participants grade their feelings towards -# sets of objects of particular colors, and used those gradings to construct these -# color preference values. -# -# It should be noted that these preferences are likely significantly influenced by -# sociocultural factors, and thus this particular set of preference values may not -# accurately reflect all website visitors' impressions of the color scheme. -# -############# -# Technical # -############# -# -# Inputs: PNG image (base64) -# Returns: List of 1 item: Average WAVE Score Across Pixels (float) -# -############## -# References # -############## -# -# 1. Palmer, S.E. and Schloss, K.B. An Ecological Valence Theory of Human Color Preference. -# Proceedings of the National Academy of Sciences 107, 19 (2010), 8877-8882. -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -import base64 -import numpy as np -from io import BytesIO -from PIL import Image - -WAVE_COLOR_TO_SCORE = { - (24, 155, 154): 0.6377440347071583, - (37, 152, 114): 0.7125813449023862, - (59, 125, 181): 0.7396963123644252, - (86, 197, 208): 0.8297180043383949, - (96, 163, 215): 1.0, - (101, 190, 131): 0.648590021691974, - (115, 56, 145): 0.8080260303687636, - (124, 159, 201): 0.8318872017353579, - (126, 152, 68): 0.3579175704989154, - (129, 199, 144): 0.5726681127982647, - (133, 204, 208): 0.5932754880694144, - (156, 78, 155): 0.6843817787418656, - (159, 90, 48): 0.18329718004338397, - (162, 32, 66): 0.8481561822125814, - (162, 115, 167): 0.7451193058568331, - (162, 149, 59): 0.0, - (164, 219, 228): 0.7028199566160521, - (170, 194, 228): 0.7537960954446855, - (177, 200, 101): 0.33731019522776573, - (179, 208, 68): 0.4652928416485901, - (184, 158, 199): 0.63882863340564, - (193, 224, 196): 0.46095444685466386, - (204, 119, 141): 0.4859002169197397, - (208, 154, 119): 0.39154013015184386, - (218, 198, 118): 0.49132321041214755, - (224, 231, 153): 0.2928416485900217, - (235, 45, 92): 0.5488069414316703, - (242, 149, 185): 0.4577006507592191, - (243, 145, 51): 0.7114967462039046, - (251, 200, 166): 0.3741865509761389, - (252, 232, 158): 0.5140997830802604, - (253, 228, 51): 0.7201735357917572 -} - -match_colors = list(WAVE_COLOR_TO_SCORE.keys()) - - -def execute(pngb64): - """ - Average WAVE score - average WAVE score across pixels (float) - - :param pngb64: PNG image (base64) - :return: List of 1 item: average WAVE score across pixels (float) - """ - - im = Image.open(BytesIO(base64.b64decode(pngb64))) - imarr = np.array(im) - imarr = np.delete(imarr, 3, axis=2) # remove alpha values - - ax1, ax2, _ = imarr.shape - num_match_colors = len(match_colors) - - # repeated values for every possible match color - repeated_imarr = np.tile(imarr, num_match_colors) \ - .reshape(ax1, ax2, num_match_colors, 3) - - l2_norms = ((repeated_imarr - np.array(match_colors)) ** 2).sum(axis=3) - - match_indices = l2_norms.argmin(axis=2).flatten() - wave_values = [WAVE_COLOR_TO_SCORE[match_colors[i]] for i in match_indices] - - - return [np.mean(wave_values)] diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp11_color_harmony.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp11_color_harmony.py deleted file mode 100644 index f8bf0cd..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp11_color_harmony.py +++ /dev/null @@ -1,359 +0,0 @@ -############################################ -# Color Harmony - Distance to Color Scheme # -############################################ -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Thomas Langerak -# (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# Harmonic colors are sets of colors that are aesthetically pleasing -# in terms of human visual perception. The notion of color harmony in this work is based on the schemes -# developed by Matsuda [Matsuda 1995; Tokumaru et al. 2002], which descend from Itten's notions of -# harmony [Itten 1960], widely accepted in applicable fields involving colors. (See paper for al references) -# -# This paper uses distance to colourschemes to "enhance" photographs. This is not proven for user interfaces. -# There, though it is common practice in design their is limited scientific evidence in the paper the formulated -# this formalism. -# -# Also note that colourscheme X will most likely will be the closest in distance. This does not mean it is the best -# this is due how X is defined. (It covers a large part of the hue circle and is spaced in such a way that the max -# distance to a border is relatively small compared to other colour schemes. -# -# For an overview of what the schemes look like, please take a look at the paper. -# -############# -# Technical # -############# -# -# Inputs: File path to image -# Returns: 8 item float list of distance to colour schemes -# order: i, V, L, L_inverse, I, T, X, Y -# -############## -# References # -############## -# -# 1. Cohen-Or, D., Sorkine, O., Gal, R., Leyvand, T. and Xu, Y. Color harmonization. -# ACM Transactions on Graphics 25, 3 (2006), 624. -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from skimage import io, util, transform, color -import math -import collections -import numpy as np -import base64 -from PIL import Image -from io import BytesIO -import matplotlib.pyplot as plt - - -def execute(b64): - # Input, colour reduction, get rid of unnescessary data, scale down, and get it in the correct structure - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img = util.img_as_ubyte(img) - img = img / 255. - img = color.rgb2hsv(img) - img = img.reshape(-1, 3) - img = np.delete(img, 2, 1) - img = [tuple(l) for l in img] - - # Find all unique h, s combinations - hist = collections.Counter(img) - hist = hist.items() - - # Get everything in their own array - h = [] - s = [] - frequency = [] - - for x in range(len(hist)): - if hist[x][1] > 5: # Find some argument for this, most papers regarding this topic use 5 - h.append(int(hist[x][0][0] * 360)) - s.append(hist[x][0][1]) - frequency.append(hist[x][1]) - - dist_i_all = [] - dist_V_all = [] - dist_L_inverse_all = [] - dist_L_all = [] - dist_I_all = [] - dist_T_all = [] - dist_X_all = [] - dist_Y_all = [] - - # For every angle in the colour spectra - for alpha in range(1, 361): - dist_i = 0.0 - dist_V = 0.0 - dist_L = 0.0 - dist_L_inverse = 0.0 - dist_I = 0.0 - dist_T = 0.0 - dist_Y = 0.0 - dist_X = 0.0 - - # For every unique h,s combination - for pixel in range(len(h)): - # Calculated to total distance of an image to each colour space - dist_i += borders_i(alpha, h[pixel], s[pixel]) * frequency[pixel] - dist_V += borders_V(alpha, h[pixel], s[pixel]) * frequency[pixel] - dist_L += borders_L(alpha, h[pixel], s[pixel]) * frequency[pixel] - dist_L_inverse += borders_L_inverse(alpha, h[pixel], s[pixel]) * frequency[pixel] - dist_I += borders_I(alpha, h[pixel], s[pixel]) * frequency[pixel] - dist_T += borders_T(alpha, h[pixel], s[pixel]) * frequency[pixel] - dist_X += borders_X(alpha, h[pixel], s[pixel]) * frequency[pixel] - dist_Y += borders_Y(alpha, h[pixel], s[pixel]) * frequency[pixel] - - # Add the total distance to the list - dist_i_all.append(dist_i) - dist_V_all.append(dist_V) - dist_L_inverse_all.append(dist_L_inverse) - dist_L_all.append(dist_L) - dist_I_all.append(dist_I) - dist_T_all.append(dist_T) - dist_X_all.append(dist_X) - dist_Y_all.append(dist_Y) - - # Find the shortest distance for each colour spectrum - distances = [min(dist_i_all), min(dist_V_all), min(dist_L_all), min(dist_L_inverse_all), min(dist_I_all), - min(dist_T_all), min(dist_X_all), min(dist_Y_all)] - - # Find the angle of at which that spectrum occurce - alphas = [dist_i_all.index(distances[0]), dist_V_all.index(distances[1]), dist_L_all.index(distances[2]), - dist_L_inverse_all.index(distances[3]), dist_I_all.index(distances[4]), - dist_T_all.index(distances[5]), dist_X_all.index(distances[6]), dist_Y_all.index(distances[7])] - - - # Create a list of hues of 360 of occurences. This is mainly useful for visualizing stuff - pixels = [] - for i in range(0, 360): - if i in h: - pixels.append(frequency[h.index(i)]) - else: - pixels.append(0) - - - results_all = [distances, alphas, pixels] - - # plt.plot(dist_i_all, 'r', linewidth=1.0, label="i") - # plt.plot(dist_V_all, 'g', linewidth=1.0, label="V") - # plt.plot(dist_L_all, 'b', linewidth=1.0, label="L") - # plt.plot(dist_L_inverse_all, 'y', linewidth=1.0, label="L Inverse") - # plt.plot(dist_I_all, 'm', linewidth=1.0, label="I") - # plt.plot(dist_T_all, 'k', linewidth=1.0, label="T") - # plt.plot(dist_X_all, 'c', linewidth=1.0, label="X") - # plt.plot(dist_Y_all, '#ee7600', linewidth=1.0, label="Y") - # plt.ylabel('distance') - # plt.xlabel('alpha') - # plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) - # plt.show() - - # get the minimal of all. Maybe at alpha to it? - # score = min(results_all) - # scheme = results_all.index(score) - # result = [score, scheme] - return results_all - - -# Below here we define all formulas to calculate length to each border in each spectra. This is based on: -# https://igl.ethz.ch/projects/color-harmonization/harmonization.pdf -PI = math.pi -TAU = 2 * PI - - -# https://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles -def smallest_angle(x, y): - a = (x - y) % TAU - b = (y - x) % TAU - return -a if a < b else b - - -def borders_i(alpha, pixel_angle, saturation): # Input degrees, hue degrees - # Get the borders of all areas and translate to radians - border = [math.radians((alpha - 18 / 2) % 360), math.radians((alpha + 18 / 2) % 360)] - pixel_angle = math.radians(pixel_angle) - - # Get the distance to each border - distance = [] - for x in range(len(border)): - distance.append(abs(smallest_angle(border[x], pixel_angle))) - distance_alpha = abs(smallest_angle(pixel_angle, math.radians(alpha))) - - if distance_alpha <= math.radians(18 / 2): - result = 0.0 - - # else it is the distance to the closest border - else: - result = min(distance) * saturation - - # print math.radians(alpha), math.radians(18 / 2), border[0], border[1], pixel_angle, distance[0], distance[1], distance_alpha, result - return result - - -def borders_V(alpha, pixel_angle, saturation): - # Get the borders of all areas and translate to radians - border = [math.radians((alpha - 93.6 / 2) % 360), math.radians((alpha + 93.6 / 2) % 360)] - pixel_angle = math.radians(pixel_angle) - - # Get the distance to each border - distance = [] - for x in range(len(border)): - distance.append(abs(smallest_angle(border[x], pixel_angle))) - distance_alpha = abs(smallest_angle(pixel_angle, math.radians(alpha))) - if distance_alpha < math.radians(93.6 / 2): - result = 0 - # else it is the distance to the closest border - else: - result = min(distance) * saturation - - return result - - -def borders_L(alpha, pixel_angle, saturation): - # Get the borders of all areas and translate to radians - border = [math.radians((alpha - 93.6 / 2) % 360), math.radians((alpha + 93.6 / 2) % 360), - math.radians((alpha + 90 - 18 / 2) % 360), math.radians((alpha + 90 + 18 / 2) % 360)] - - # Get the distance to each border - pixel_angle = math.radians(pixel_angle) - distance = [] - for x in range(len(border)): - distance.append(abs(smallest_angle(border[x], pixel_angle))) - distance_alpha = abs(smallest_angle(pixel_angle, math.radians(alpha))) - distance_alpha_2 = abs(smallest_angle(pixel_angle, (math.radians((alpha + 90) % 360)))) - - if distance_alpha <= math.radians(93.6 / 2) or distance_alpha_2 <= math.radians(18 / 2): - result = 0 - # else it is the distance to the closest border - else: - result = min(distance) * saturation - - return result - - -def borders_L_inverse(alpha, pixel_angle, saturation): - # Get the borders of all areas and translate to radians - border = [math.radians((alpha - 93.6 / 2) % 360), math.radians((alpha + 93.6 / 2) % 360), - math.radians((alpha - 90 - 18 / 2) % 360), math.radians((alpha - 90 + 18 / 2) % 360)] - # Get the distance to each border - pixel_angle = math.radians(pixel_angle) - distance = [] - - for x in range(len(border)): - distance.append(abs(smallest_angle(border[x], pixel_angle))) - distance_alpha = abs(smallest_angle(pixel_angle, math.radians(alpha))) - distance_alpha_2 = abs(smallest_angle(pixel_angle, (math.radians((alpha - 90) % 360)))) - - if distance_alpha <= math.radians(93.6 / 2) or distance_alpha_2 <= math.radians(18 / 2): - result = 0 - # else it is the distance to the closest border - else: - result = min(distance) * saturation - - return result - - -def borders_I(alpha, pixel_angle, saturation): - # Get the borders of all areas and translate to radians - border = [math.radians((alpha - 18 / 2) % 360), math.radians((alpha + 18 / 2) % 360), - math.radians((alpha + 180 - 18 / 2) % 360), math.radians((alpha + 180 + 18 / 2) % 360)] - pixel_angle = math.radians(pixel_angle) - - # Get the distance to each border - distance = [] - for x in range(len(border)): - distance.append(abs(smallest_angle(border[x], pixel_angle))) - distance_alpha = abs(smallest_angle(pixel_angle, math.radians(alpha))) - distance_alpha_2 = abs(smallest_angle(pixel_angle, (math.radians(alpha + 180 % 360)))) - if distance_alpha <= math.radians(18 / 2) or distance_alpha_2 <= math.radians(18 / 2): - result = 0 - # else it is the distance to the closest border - else: - result = min(distance) * saturation - - return result - - -def borders_T(alpha, pixel_angle, saturation): - # Get the borders of all areas and translate to radians - border = [math.radians((alpha - 180 / 2) % 360), math.radians((alpha + 180 / 2) % 360)] - pixel_angle = math.radians(pixel_angle) - - # Get the distance to each border - distance = [] - for x in range(len(border)): - distance.append(abs(smallest_angle(border[x], pixel_angle))) - distance_alpha = abs(smallest_angle(pixel_angle, math.radians(alpha))) - - if distance_alpha <= math.radians(180 / 2): - result = 0 - # else it is the distance to the closest border - else: - result = min(distance) * saturation - - return result - - -def borders_Y(alpha, pixel_angle, saturation): - # Get the borders of all areas and translate to radians - border = [math.radians((alpha - 93.6 / 2) % 360), math.radians((alpha + 93.6 / 2) % 360), - math.radians((alpha + 180 - 18 / 2) % 360), math.radians((alpha + 180 + 18 / 2) % 360)] - pixel_angle = math.radians(pixel_angle) - - # Get the distance to each border - distance = [] - for x in range(len(border)): - distance.append(abs(smallest_angle(border[x], pixel_angle))) - distance_alpha = abs(smallest_angle(pixel_angle, math.radians(alpha))) - distance_alpha_2 = abs(smallest_angle(pixel_angle, (math.radians((alpha + 180) % 360)))) - if distance_alpha <= math.radians(93.6 / 2) or distance_alpha_2 <= math.radians(18 / 2): - result = 0 - # else it is the distance to the closest border - else: - result = min(distance) * saturation - - return result - - -def borders_X(alpha, pixel_angle, saturation): - # Get the borders of all areas and translate to radians - border = [math.radians((alpha - 93.6 / 2) % 360), math.radians((alpha + 93.6 / 2) % 360), - math.radians((alpha + 180 - 93.6 / 2) % 360), math.radians((alpha + 180 + 93.6 / 2) % 360)] - pixel_angle = math.radians(pixel_angle) - - # Get the distance to each border - distance = [] - for x in range(len(border)): - distance.append(abs(smallest_angle(border[x], pixel_angle))) - distance_alpha = abs(smallest_angle(pixel_angle, math.radians(alpha))) - distance_alpha_2 = abs(smallest_angle(pixel_angle, (math.radians((alpha + 180) % 360)))) - if distance_alpha <= math.radians(93.6 / 2) or distance_alpha_2 <= math.radians(93.6 / 2): - result = 0 - # else it is the distance to the closest border - else: - result = min(distance) * saturation - - return result diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp1_png_file_size.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp1_png_file_size.py deleted file mode 100644 index 12743dd..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp1_png_file_size.py +++ /dev/null @@ -1,53 +0,0 @@ -############################### -# Color Range - PNG File Size # -############################### -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Yuxi Zhu (matlab) & Thomas Langerak (converted to python) -# (zhuyuxi1990@gmail.com) & (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# According to the paper by Minukovich and De Angeli (1) the file size (in png) is a good indication to indicate -# the colour range. The higher the file size the more colourful the image is likely to be. Of course this is also -# depended on the size (in pixels). This function returns the file size in bytes. It does not account for the -# dimensions. -# -############# -# Technical # -############# -# -# Inputs: PNG image (base64) -# Returns: List of 1 item: PNG File Size (in bytes) (int) -# -############## -# References # -############## -# -# 1. Miniukovich, A. and De Angeli, A. Computation of Interface Aesthetics. -# Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems - CHI '15, (2015). -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -def execute(b64): - png_size = int(len(b64) * 0.75) - - return [png_size] diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp2_number_of_colours.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp2_number_of_colours.py deleted file mode 100644 index 34ce929..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp2_number_of_colours.py +++ /dev/null @@ -1,78 +0,0 @@ -################################### -# Color Range - Number of Colours # -################################### -# -# V1.1 -# 15/10/2018 -# -# Implemented by: -# Kseniia Palin & Markku Laine -# (kseniia.palin@aalto.fi) & (markku.laine@aalto.fi) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# The number of unique colours is an indication of colour variance according to (1). Every unique colour is counted. -# If it occurs often enough (more than 5 times for a website, more than 2 for a smartphone application) it is taken -# into account for the final count. It is important to notice that more colours on first thought might be bad. However -# this metric is heavily prone to the amount of images on a website. Hence a simple photograph website, might sore -# high on this scale. -# -############# -# Technical # -############# -# -# Inputs: PNG image (base64), type (website=0/mobile=1) (int) -# Returns: List of 1 item: Number of Colours (int) -# -############## -# References # -############## -# -# 1. Miniukovich, A. and De Angeli, A. Computation of Interface Aesthetics. -# Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems - CHI '15, (2015). -# -# 2. Miniukovich, A., De Angeli A.. Quantification of Interface Visual Complexity. In the 2014 International -# Working Conference on Advanced Visual Interfaces (2014), ACM, 153-160. -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from skimage import util -import collections -import numpy as np -import base64 -from PIL import Image -from io import BytesIO -import matplotlib.image as mpimg - - -def execute(b64, type): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = mpimg.imread(b64, format='PNG') - img = img[:,:,:3] - img = img.reshape(-1, 3) - img = [tuple(l) for l in img] - - # Create histogram - hist = collections.Counter(img) - - # Count the number of colors: only values that occur more than 5 (for web, type=0) or 2 (for mobile, type=1) times per image are counted - min_threshold = 5 if type == 0 else 2 if type == 1 else 0 - count_rgb = [len({x : hist[x] for x in hist if hist[x] >= min_threshold })] - - return count_rgb diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp3_HSV_avg.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp3_HSV_avg.py deleted file mode 100644 index dfcceab..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp3_HSV_avg.py +++ /dev/null @@ -1,111 +0,0 @@ -############################# -# Color Range - HSV Average # -############################# -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Yuxi Zhu (matlab) & Thomas Langerak (converted to python) -# (zhuyuxi1990@gmail.com) & (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# Hasler & Susstrunk validated this metric in their paper. It looks at the average value and standard deviation for -# every value in the HSV colour space. -# -############# -# Technical # -############# -# -# Inputs: JPG image (base64) -# Returns: List of 5 items: Average Hue (float), Average Saturation (float), Standard Deviation of Saturation (float), Average Value (float), Standard Deviation of Value (float) -# -############## -# References # -############## -# -# 1. Hasler, D. and Susstrunk, S. Measuring Colourfuness in Natural Images. (2003). -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from skimage import color, util -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img = np.array(img) - img = util.img_as_ubyte(img) - img = img / 255. # this division is needed to get proper values. for hue, saturation and value (0 to 360, 0 to 1,0 to 1) - img = color.rgb2hsv(img) - img = img.reshape(-1, 3) - img = [tuple(l) for l in img] - - h = [] - s = [] - v = [] - - # Give each channel its own list - for items in img: - [hue, sat, val] = [items[0], items[1], items[2]] - h.append(hue * 360) - s.append(sat) - v.append(val) - - # Hue is an angle, so cannot simple add and average it - sumsin = sum(sind(h[:])) - sumcos = sum(cosd(h[:])) - - # Get the average value and standard deviation over H,S and V - avgHue = atan2d(sumsin, sumcos) % 360 - avgSaturation = np.mean(s) - stdSaturation = np.std(s) - avgValue = np.mean(v) - stdValue = np.std(v) - result = [avgHue, avgSaturation, stdSaturation, avgValue, stdValue] - - return result - - -# Functions for easy use of radials in sin,cos and tan. based on: -# https://stackoverflow.com/questions/43100286/python-trigonometric-calculations-in-degrees -def sind(x): - y = np.sin(np.deg2rad(x)) - y = np.rad2deg(y) - - return y - - -def cosd(x): - y = np.cos(np.deg2rad(x)) - y = np.rad2deg(y) - - return y - - -def atan2d(x, y): - z = np.arctan2(np.deg2rad(x), np.deg2rad(y)) - z = np.rad2deg(z) - - return z diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp4_HSV_unique.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp4_HSV_unique.py deleted file mode 100644 index 3e3ccf6..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp4_HSV_unique.py +++ /dev/null @@ -1,97 +0,0 @@ -############################ -# Color Range - HSV Unique # -############################ -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Yuxi Zhu (matlab) & Thomas Langerak (converted to python) -# (zhuyuxi1990@gmail.com) & (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# Hasler & Susstrunk validated this metric in their paper. It looks at the number of unique values in the HSV colour -# space. -# -############# -# Technical # -############# -# -# Inputs: JPG image (base64) -# Returns: List of 4 items: Number of Unique HSV (int), Number of Unique Hue (int), Number of Unique Saturation (int), Number of Unique Value (int) -# -############## -# References # -############## -# -# 1. Hasler, D. and Susstrunk, S. Measuring Colourfuness in Natural Images. (2003). -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from skimage import color, util -import collections -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img= np.array(img) - img = util.img_as_ubyte(img) - img = color.rgb2hsv(img) - img = img.reshape(-1, 3) - img = [tuple(l) for l in img] - - hist = collections.Counter(img) - hist = hist.items() - - hsv_unique = [] - count = [] - h = [] - s = [] - v = [] - - for x in range(len(hist)): - add = [hist[x][0][0], hist[x][0][1], hist[x][0][2]] - hsv_unique.append(add) - count.append(hist[x][1]) - h.append(hist[x][0][0]) - s.append(hist[x][0][1]) - v.append(hist[x][0][2]) - - # Get all unique values, still has all counts (so no minimal occurence). This probably needs some changing in the future - h_unique = np.unique(h) - s_unique = np.unique(s) - v_unique = np.unique(v) - - new_hsv = [] - - # Only often enough occuring values for hsv - for x in range(len(hsv_unique)): - if count[x] > 5: - new_hsv.append(hsv_unique[x]) - - - result = [len(new_hsv), len(h_unique), len(s_unique), len(v_unique)] - - return result diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp5_LAB_avg.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp5_LAB_avg.py deleted file mode 100644 index afa7249..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp5_LAB_avg.py +++ /dev/null @@ -1,77 +0,0 @@ -############################# -# Color Range - LAB Average # -############################# -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Yuxi Zhu (matlab) & Thomas Langerak (converted to python) -# (zhuyuxi1990@gmail.com) & (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# This is a very similar implementation as CR4_HSV_avg. The main difference is the colour space that is used. -# -############# -# Technical # -############# -# -# Inputs: JPG image (base64) -# Returns: List of 6 items: Mean Lightness (float), Standard Deviation Lightness (float), Mean A (Green-Red Space) (float), Standard Deviation A (float), Mean B (Yellow-Blue Space) (float), Standard Deviation B (float) -# -############## -# References # -############## -# -# 1. Hasler, D. and Susstrunk, S. Measuring Colourfuness in Natural Images. (2003). -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from skimage import color, util -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img= np.array(img) - img = util.img_as_ubyte(img) - - # Convert the LAB space - lab = color.rgb2lab(img) - - L = lab[:, :, 0] - A = lab[:, :, 1] - B = lab[:, :, 2] - - # Get average and standard deviation for each value separately - meanL = np.mean(L) - stdL = np.std(L) - meanA = np.mean(A) - stdA = np.std(A) - meanB = np.mean(B) - stdB = np.std(B) - - result = [meanL, stdL, meanA, stdA, meanB, stdB] - - return result diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp6_hassler_susstrunk.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp6_hassler_susstrunk.py deleted file mode 100644 index 298e87c..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp6_hassler_susstrunk.py +++ /dev/null @@ -1,86 +0,0 @@ -##################################### -# Color Range - Hassler & Susstrunk # -##################################### -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Yuxi Zhu (matlab) & Thomas Langerak (converted to python) -# (zhuyuxi1990@gmail.com) & (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# This metric was proposed by Hasler and Susstrunk as a more computational efficient alternative to CR3-CR6. -# This metric is proven to have a very high correspondence to the users perception (95%). It relies on the RGYB color -# spectrum and mainly looks at the average standard deviation for all value. The higher the STD is, the more colourful -# the image is perceived. The nested loop however make it more computational heavy than it was originally intended. -# Also it should be noted that this does not the Hue into account, which has been proven to be a significant factor. -# -############# -# Technical # -############# -# -# Inputs: JPG image (base64) -# Returns: List of 7 items: Mean Distribution (Red-Green) (float), Standard Deviation Distribution (Red-Green) (float), Mean Distribution (Yellow-Blue) (float), Standard Deviation Distribution (Yellow-Blue) (float), Mean Distribution (RGYB) (float), Standard Deviation Distribution (RGYB) (float), Colorfulness (float) -# -############## -# References # -############## -# -# 1. Hasler, D. and Susstrunk, S. Measuring Colourfuness in Natural Images. (2003). -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from skimage import util -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img= np.array(img) - img = util.img_as_ubyte(img) - img = img.reshape(-1, 3) - img = [tuple(l) for l in img] - - rg = [] - yb = [] - for item in img: - [r, g, b] = [int(item[0]), int(item[1]), int(item[2])] - # These formulae are proposed in Hasler, D. and Susstrunk, S. Measuring Colourfuness in Natural Images. (2003) - rg.append(np.abs(r - g)) - yb.append(np.abs((0.5 * (r + g)) - b)) - - meanRG = np.mean(rg) - stdRG = np.std(rg) - meanYB = np.mean(yb) - stdYB = np.std(yb) - meanRGYB = np.sqrt(meanRG ** 2 + meanYB ** 2) - stdRGYB = np.sqrt(stdRG ** 2 + stdYB ** 2) - - # Proposed in the same paper - colourfulness = stdRGYB + 0.3 * meanRGYB - - result = [meanRG, stdRG, meanYB, stdYB, meanRGYB, stdRGYB, colourfulness] - - return result diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp7_static_clusters.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp7_static_clusters.py deleted file mode 100644 index d38944a..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp7_static_clusters.py +++ /dev/null @@ -1,85 +0,0 @@ -################################## -# Color Range - Dynamic Clusters # -################################## -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Yuxi Zhu (matlab) & Thomas Langerak (converted to python) -# (zhuyuxi1990@gmail.com) & (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# The number of static 32-sized color clusters (the sub-cube edge size of clusters is 32 values out of possible 256, -# per each RGB channel). Only clusters containing more than 5 values are counted. It is significant factor for -# dominant colours and clutter, but not for colour variance. CR3 is proven to be more accurate, though also more -# computational complex. -# -############# -# Technical # -############# -# -# Inputs: JPG image (base64) -# Returns: List of 1 item: Number of Clusters (int) -# -############## -# References # -############## -# -# 1. Miniukovich, A. and De Angeli, A. Computation of Interface Aesthetics. -# Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems - CHI '15, (2015). -# -# 2. Miniukovich, A., De Angeli A.. Quantification of Interface Visual Complexity. In the 2014 International -# Working Conference on Advanced Visual Interfaces (2014), ACM, 153-160. -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from skimage import util -import collections -import math -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img= np.array(img) - img = util.img_as_ubyte(img) - img = img.reshape(-1, 3) - img = [tuple(l) for l in img] - - # Get unique colours and their frequencies - # Divide rgb spectrum (0-255) to a 32x32x32 matrix - hist = collections.Counter(img) - hist = hist.items() - cluster = np.zeros((32, 32, 32)) - for x in range(len(hist)): - rc = int(math.ceil((hist[x][0][0] / 8) + 1)) - 1 - gc = int(math.ceil((hist[x][0][1] / 8) + 1)) - 1 - bc = int(math.ceil((hist[x][0][2] / 8) + 1)) - 1 - cluster[rc, gc, bc] += hist[x][1] - - # The amount of cells that have more than 5 entries - result = (cluster > 5).sum() - - return [result] diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp8_dynamic_clusters.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp8_dynamic_clusters.py deleted file mode 100644 index 02bc08f..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp8_dynamic_clusters.py +++ /dev/null @@ -1,158 +0,0 @@ -################################## -# Color Range - Dynamic Clusters # -################################## -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Yuxi Zhu (matlab) & Thomas Langerak (converted to python) -# (zhuyuxi1990@gmail.com) & (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# In the paper by Miniukovich and De Angeli suggest (among others) two factors for an indication for colourfulness -# The number of dynamic clusters and the number of colours per dynamic cluster. -# -# "The number of dynamic clusters of colors after color reduction (more than 5 pixels). If a difference between -# two colors in a color cube is less than or equal to 3, two colors are united in the same cluster, which continues -# recursively for all colors. Only clusters containing more than 5 values are counted." -# -# The number of clusters has not proven statiscally relevant. The number of colours per clusters is. Both are returned -# with this function. -# -############# -# Technical # -############# -# -# Inputs: JPG image (base64) -# Returns: List of 2 items: Number of Clusters (int), Average number of colours per Cluster (int) -# -############## -# References # -############## -# -# 1. Miniukovich, A. and De Angeli, A. Computation of Interface Aesthetics. -# Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems - CHI '15, (2015). -# -# 2. Miniukovich, A., De Angeli A.. Quantification of Interface Visual Complexity. In the 2014 International -# Working Conference on Advanced Visual Interfaces (2014), ACM, 153-160. -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from operator import itemgetter -from skimage import util -import collections -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img= np.array(img) - img = util.img_as_ubyte(img) - img = img.reshape(-1, 3) - img = [tuple(l) for l in img] - hist = collections.Counter(img) - hist = hist.items() - frequency = [] - - # Create list from histogram - for x in range(len(hist)): - add = [hist[x][0][0], hist[x][0][1], hist[x][0][2], hist[x][1]] - frequency.append(add) - - # Sort the pixels on frequency. This way we can cut the while loop short - frequency = sorted(frequency, key=itemgetter(3)) - k = len(frequency) - 1 - - # Create first cluster - center_of_clusters = [] - add = [frequency[0][0], frequency[0][1], frequency[0][2], frequency[0][3], 1] - center_of_clusters.append(add) - - # Find for all colour points a cluster - while k >= 0: - - # Only colour points with enough presence - if frequency[k][3] < 6: - break - else: - belong1cluster = False - - # For every colour point calculate distance to all clusters - for center in range(len(center_of_clusters)): - point_freq = np.array([frequency[k][0], frequency[k][1], frequency[k][2]]) - point_center = np.array([center_of_clusters[center][0], center_of_clusters[center][1], - center_of_clusters[center][2]]) - distance = np.linalg.norm(point_freq - point_center) - - # If a cluster is close enough, add this colour and recalculate the cluster - # Now the colour goes to the first cluster fullfilling this. Maybe it should be also the closest? - if distance <= 3: - new_count = center_of_clusters[center][3] + frequency[k][3] - new_center = [ - int(( - point_freq[0] * frequency[k][3] + point_center[0] * center_of_clusters[center][ - 3]) / new_count), - int(( - point_freq[1] * frequency[k][3] + point_center[1] * center_of_clusters[center][ - 3]) / new_count), - int(( - point_freq[2] * frequency[k][3] + point_center[2] * center_of_clusters[center][ - 3]) / new_count)] - center_of_clusters[center][0] = new_center[0] - center_of_clusters[center][1] = new_center[1] - center_of_clusters[center][2] = new_center[2] - center_of_clusters[center][3] = new_count - center_of_clusters[center][4] += 1 - belong1cluster = True - break - - # Create new cluster if the colour point is not close enough to other clusters - if belong1cluster == False: - add = [frequency[k][0], frequency[k][1], frequency[k][2], frequency[k][3], 1] - center_of_clusters.append(add) - k -= 1 - - # Only keep clusters with more than 5 colour entries - new_center_of_clusters = [] - for x in range(len(center_of_clusters)): - if center_of_clusters[x][4] > 5: - new_center_of_clusters.append(center_of_clusters[x]) - - # Number of clusters, not statistically relevant - count_dynamic_cluster = len(new_center_of_clusters) - - # Average number of colours per cluster - average_colour_dynamic_cluster = 0 - for x in range(len(new_center_of_clusters)): - average_colour_dynamic_cluster += new_center_of_clusters[x][4] - - try: - average_colour_dynamic_cluster = average_colour_dynamic_cluster / count_dynamic_cluster - except ZeroDivisionError: - average_colour_dynamic_cluster = 0 - - result = [int(count_dynamic_cluster), int(average_colour_dynamic_cluster)] - - return result diff --git a/legacy/aim_metrics/aim_metrics/colour_perception/cp9_luminance_sd.py b/legacy/aim_metrics/aim_metrics/colour_perception/cp9_luminance_sd.py deleted file mode 100644 index 9cfeed3..0000000 --- a/legacy/aim_metrics/aim_metrics/colour_perception/cp9_luminance_sd.py +++ /dev/null @@ -1,72 +0,0 @@ -################################## -# Color Range - Dynamic Clusters # -################################## -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Thomas Langerak -# (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# This is the standard deviation of luminance over all pixels. It has been proven to not be statically relevant for -# the perceived colour variance of a webpage. -# -############# -# Technical # -############# -# -# Inputs: JPG image (base64) -# Returns: List of 1 item: Standard Deviation in Luminance (float) -# -############## -# References # -############## -# -# 1. Miniukovich, A., De Angeli A.. Quantification of Interface Visual Complexity. In the 2014 International -# Working Conference on Advanced Visual Interfaces (2014), ACM, 153-160. -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from skimage import util -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img = np.array(img) - img = util.img_as_ubyte(img) - img = img.reshape(-1, 3) - img = [tuple(l) for l in img] - - lum = [] - for pixel in img: - # Based on: https://en.wikipedia.org/wiki/Luma_(video) - y = 0.2126 * pixel[0] + 0.7152 * pixel[1] + 0.0722 * pixel[2] - lum.append(y) - - result = np.std(lum) - - return [result] diff --git a/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf1_edge_density.py b/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf1_edge_density.py deleted file mode 100644 index 3e2b184..0000000 --- a/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf1_edge_density.py +++ /dev/null @@ -1,84 +0,0 @@ -########################## -# Clutter - Edge Density # -########################## -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Thomas Langerak -# (hello@thomaslangerak.nl) -# -# Based on code by: -# Yuxi Zhu -# (zhuyuxi1990@gmail.com) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# Mack and Oliva suggested Edge Density as a metric for clutter. This is the ratio of how many pixels are regard as -# an edge compared to the total number of pixels in the image. Miniukovich and De Angeli found it to have a clear -# indication of clutter, also Rosenholtz et al. did find any relevance. However noted that the lack of colour variance -# decreases the accuracy compared to Feature Congestion. -# -############# -# Technical # -############# -# -# Inputs: JPG image (base64) -# Returns: List of 1 item: Edge Density (float) -# -############## -# References # -############## -# -# 1. Miniukovich, A. and De Angeli, A. Computation of Interface Aesthetics. -# Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems - CHI '15, (2015). -# -# 2. Rosenholtz, R., Li, Y. and Nakano, L. Measuring visual clutter. Journal of Vision 7, 2 (2007), 17. -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -# Sigma from original paper in the Canny is not taken into account. -# -import cv2 -from skimage import util, color -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img= np.array(img) - img_la = color.rgb2gray(img) - img_la = util.img_as_ubyte(img_la) - - # 0.11 and 0.27, sigma = 1, from Measuring visual clutter - # See sigma here: https://dsp.stackexchange.com/questions/4716/differences-between-opencv-canny-and-matlab-canny - img_la = cv2.GaussianBlur(img_la, (7, 7), 1) - cd = cv2.Canny(img_la, 0.11, 0.27) - total = cd.shape[0] * cd.shape[1] # Total number of pixels - number_edges = np.count_nonzero(cd) # Number of edge pixels - contour_density = float(number_edges) / float(total) # Ratio - - result = [contour_density] - - return result diff --git a/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf2_edge_congestion.py b/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf2_edge_congestion.py deleted file mode 100644 index 13ffaa5..0000000 --- a/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf2_edge_congestion.py +++ /dev/null @@ -1,154 +0,0 @@ -################################ -# Complexity - Edge Congestion # -################################ -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Thomas Langerak -# (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# (Taken from 2) -# Discriminating and tracing a line in a line congestion situation can be problematic. This issue often emerges in the -# domain of large-graph visualization. Wong et al. [41] stated the problem: 'the density of edges is so great that -# they obscure nodes, individual edges and even visual information beneath the graph'. They also proposed an -# interactive solution to edge congestion - in graphs the edges were bent away from users' point of attention without -# changing the number of nodes or edges. A more sophisticated discussion of edge congestion comes from the research on -# crowding and is grounded on the notion of critical spacing - the distance between objects at which object perception -# starts to degrade [16]. For example, the crowding model of visual clutter [36] uses eccentricity-based critical -# spacing to account for information loss in peripheral visual field. However, the accompanying algorithm accounts -# simultaneously for both visual clutter and edge congestion, and might need reconfiguration to account for edge -# congestion only. -# -############# -# Technical # -############# -# -# Inputs: PNG image (base64) -# Returns: List of 1 item: Edge Congestion (float) -# -############## -# References # -############## -# -# 1. Levi, D. Crowding. An essential bottleneck for object recognition: A mini-review. Vision Research 48, 5 (2008), -# 635-654. -# -# 2. Miniukovich, A. and De Angeli, A. Quantification of interface visual complexity. Proceedings of the 2014 -# International Working Conference on Advanced Visual Interfaces - AVI '14, (2014). -# -# 3. van den Berg, R., Cornelissen, F. and Roerdink, J. A crowding model of visual clutter. Journal of Vision 9, 4 -# (2009), 24-24. -# -# 4. Wong, N., Carpendale, S., and Greenberg, S. EdgeLens: 2003. An Interactive Method for Managing Edge Congestion -# in Graphs. IEEE Symposium on Information Visualization (October 19-21, 2003), 51-58. -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -from skimage import util -import math -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img = np.array(img) - img = util.img_as_ubyte(img) - height, width, depth = img.shape - borders = np.zeros((img.shape[0], img.shape[1]), dtype=np.int) - - # Do edge detection. create_border return 0 or 255 depending on the difference with neigboring pixels - for x in range(1, width - 1): - for y in range(1, height - 1): - borders[y][x] = create_border(img, borders, y, x) - - count_edge = 0 - count_uncongested = 0 - threshold = 4 # Paper says 20, this is insane. The amount of pixels a person needs to differentiate between two elements - - # Create numpy array from list - borders = np.array(borders) - - # Assumme screen border is always a border - for x in range(threshold, width - threshold): - for y in range(threshold, height - threshold): - if borders[y][x] == 255: - count_edge += 1 - - # Sum left, right, up, down for number of pixels in threshold - arr_right = borders[y, x + 1:x + threshold] - sum_right = sum(arr_right) - arr_left = borders[y, x - threshold:x - 1] - sum_left = sum(arr_left) - arr_up = borders[y + 1:y + threshold, x] - sum_up = sum(arr_up) - arr_down = borders[y - threshold:y - 1, x] - sum_down = sum(arr_down) - - # If the sum is zero, it means there are no other pixels nearby. It needs to be in all directions non-0 - # for a pixel to be congested - if sum_right == 0 or sum_left == 0 or sum_up == 0 or sum_down == 0: - count_uncongested += 1 - - try: - count_congested = count_edge - count_uncongested - result = float(count_congested) / float(count_edge) - except ZeroDivisionError: - result = 0 - - return [result] - - -def create_border(img, borders, y, x): - r1 = int(img[y][x][0]) - g1 = int(img[y][x][1]) - b1 = int(img[y][x][2]) - - points_2 = [ - [y, x + 1], - [y, x - 1], - [y + 1, x], - [y - 1, x] - ] - - ret = 0 - for n in range(4): - x2 = points_2[n][1] - y2 = points_2[n][0] - - r2 = int(img[y2][x2][0]) - g2 = int(img[y2][x2][1]) - b2 = int(img[y2][x2][2]) - - dst_r = math.fabs(r2 - r1) - dst_g = math.fabs(g2 - g1) - dst_b = math.fabs(b2 - b1) - - if (dst_r > 50 or dst_b > 50 or dst_g > 50) and borders[y2][x2] == 0: - ret = 255 - break - - return ret diff --git a/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf3_jpeg_file_size.py b/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf3_jpeg_file_size.py deleted file mode 100644 index e10931c..0000000 --- a/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf3_jpeg_file_size.py +++ /dev/null @@ -1,51 +0,0 @@ -########################### -# Clutter - JPG File Size # -########################### -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Thomas Langerak -# (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# According to the paper by Minukovich and De Angeli (1) the file size (in jpg) is a good indication to indicate -# the clutter. -# -############# -# Technical # -############# -# -# Inputs: JPG image (base64) -# Returns: List of 1 item: JPEG File Size (in bytes) (int) -# -############## -# References # -############## -# -# 1. Miniukovich, A. and De Angeli, A. Computation of Interface Aesthetics. -# Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems - CHI '15, (2015). -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -def execute(b64): - jpg_size = int(len(b64) * 0.75) - - return [jpg_size] diff --git a/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf4_figure_ground_contrast.py b/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf4_figure_ground_contrast.py deleted file mode 100644 index 5f219d3..0000000 --- a/legacy/aim_metrics/aim_metrics/perceptual_fluency/pf4_figure_ground_contrast.py +++ /dev/null @@ -1,108 +0,0 @@ -####################################### -# Complexity - Figure-Ground Contrast # -####################################### -# -# V1.0 -# 29/05/2017 -# -# Implemented by: -# Yuxi Zhu (matlab) & Thomas Langerak (converted to python) -# (zhuyuxi1990@gmail.com) & (hello@thomaslangerak.nl) -# -# Supervisor: -# Antti Oulasvirta -# -# This work was funded by Technology Industries of Finland in a three-year -# project grant on self-optimizing web services. The principal investigator -# is Antti Oulasvirta of Aalto University (antti.oulasvirta@aalto.fi) -# -########### -# Summary # -########### -# -# (Taken from 2) -# Psychologists often use luminance or color contrast to manipulate perceptual fluency. For example, Reber et al. -# showed participants phrases in green or red on a white background (high contrast condition), and in yellow or -# light-blue color on a white background (low contrast condition). The high-contrast phrases were judged as true -# facts significantly above the chance level, whereas the low-contrast phrases were not. The authors attributed -# it to the difference in reading difficulty, and thus, processing fluency. Similarly, Reber et al. showed -# participants 70% black (high contrast) and 30% black (low contrast) words on a white background. In the high -# contrast scenario, the participants were significantly faster at detecting and recognizing words. Hall et al. -# explored text readability of web pages, and found white-black text-background combinations to be more readable -# than light- and dark-blue, or cyan-black combinations. However, the studies above did not measure contrast -# automatically. -# -############# -# Technical # -############# -# -# Inputs: PNG image (base64) -# Returns: List of 1 item: Figure-Ground Contrast (float) -# -############## -# References # -############## -# -# 1. Hall, R. and Hanna, P. The impact of web page text-background colour combinations on readability, retention, -# aesthetics and behavioural intention. Behaviour & Information Technology 23, 3 (2004), 183-195. -# -# 2. Miniukovich, A. and De Angeli, A. Quantification of interface visual complexity. Proceedings of the 2014 -# International Working Conference on Advanced Visual Interfaces - AVI '14, (2014). -# -# 3. Reber, R., Winkielman, P. and Schwarz, N. Effects of Perceptual Fluency on Affective Judgments. Psychological -# Science 9, 1 (1998), 45-48. -# -# 4. Reber, R., Wurtz, P. and Zimmermann, T. Exploring 'fringe' consciousness: The subjective experience of -# perceptual fluency and its objective bases. Consciousness and Cognition 13, 1 (2004), 47-60. -# -############## -# Change Log # -############## -# -############### -# Bugs/Issues # -############### -# -import cv2 -from skimage import util, color -import numpy as np -import base64 -from PIL import Image -from io import BytesIO - - -def execute(b64): - b64 = base64.b64decode(b64) - b64 = BytesIO(b64) - img = Image.open(b64) - img= np.array(img) - img_la = color.rgb2gray(img) - img_la = util.img_as_ubyte(img_la) - - # Get the number of edge pixels per level. See 1 - edge_per_level = [] - for x in range(1, 8): - # Blur is needed: https://dsp.stackexchange.com/questions/4716/differences-between-opencv-canny-and-matlab-canny - img_la = cv2.GaussianBlur(img_la, (7, 7), 2) - cd = cv2.Canny(img_la, x * 0.04, x * 0.1) # Higher level from 0.1-0.7, lower level is 40% of higher - number_edges = np.count_nonzero(cd) # Number of edge pixels - edge_per_level.append(number_edges) - - difference = [] - - # Calculate the difference between each level - for x in range(len(edge_per_level) - 1): - difference.append(edge_per_level[x] - edge_per_level[x + 1]) - - # Give weight per level. Lower levels have more impact so higher weight - weighted_sum = 0 - for x in range(len(difference)): - weighted_sum += difference[x] * (1.0 - ((x - 1.0) / 6.0)) - - # Normalize - try: - result = [weighted_sum / (edge_per_level[0] - edge_per_level[5])] - except ZeroDivisionError: - result = [0] - - return result