🔍Detect-A-Quilt! Now in a Web Browser Near You!
Your Quilt has ? columns and ? rows
{
"splashscreen": {
"autoclose": true
},
"packages": [
"numpy",
"opencv-python",
"matplotlib"
]
}
# import the stuff
from pyodide import to_js, create_proxy
import asyncio
import base64
from js import window
from js import Math
from js import performance
from js import Uint8ClampedArray, ImageData
from js import document
from js import console
import cv2
import numpy as np
import matplotlib.pyplot as plt
#setup a function for the event listener to call
async def updateImageDisplay(event):
console.log('testing the event listener')
# update the image element! This is important!
imageElement = document.getElementById('imageElement')
imageElement.src = window.URL.createObjectURL(event.target.files.item(0))
console.log("updating image")
await asyncio.sleep(0.25)
# setup the canvas element, draw the quilt to the canvas, at reduced size
canvas = document.getElementById('canvasElement')
canvas_width = canvas.getAttribute('width')
canvas_height = canvas.getAttribute('height')
DIM = 512
ctx = canvas.getContext('2d')
ctx.drawImage(imageElement,0,0, 512,512)
#setup a second canvas element
fourier_canvas = document.getElementById('fourier')
fourier_ctx = fourier_canvas.getContext('2d')
#convert the first canvas into data! Then take that data and pass it into opencv
dataurl = canvas.toDataURL()
image_b64 = dataurl.split(",")[1]
binary = base64.b64decode(image_b64)
image = np.asarray(bytearray(binary), dtype="uint8")
image = cv2.imdecode(image, 0)
# taken from ↓ and modified
# https://stackoverflow.com/a/72322879
# Copyright 2022 Google LLC.
# SPDX-License-Identifier: Apache-2.0
#transform the image's frequency domain
spectrum = np.fft.rfft2(image)
# Partially whiten the spectrum. This tends to make the autocorrelation sharper,
# but it also amplifies noise. The -0.6 exponent is the strength of the
# whitening normalization, where -1.0 would be full normalization and 0.0 would
# be the usual unnormalized autocorrelation.
spectrum *= (1e-12 + np.abs(spectrum))**-0.6
# Exclude some very low frequencies, since these are irrelevant to the texture.
fx = np.arange(spectrum.shape[1])
fy = np.fft.fftshift(np.arange(spectrum.shape[0]) - spectrum.shape[0] // 2)
fx, fy = np.meshgrid(fx, fy)
spectrum[np.sqrt(fx**2 + fy**2) < 10] = 0
# Compute the autocorrelation and inverse transform.
acorr = np.real(np.fft.irfft2(np.abs(spectrum)**2))
# end google llc snippet
# colormap and normalize it
cmap = plt.cm.get_cmap('Greys')
norm = plt.Normalize(vmin=0, vmax=np.percentile(acorr, 99.8))
img = cmap(norm(acorr))
# convert to cv2 image
img = (img * 255).astype(np.uint8)
# grayscale
img = cv2.cvtColor(img, cv2.COLOR_RGBA2GRAY)
img = cv2.bitwise_not(img)
# blur
BLUR = 4
img = cv2.blur(img, (BLUR, BLUR))
# threshold
_, img = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY)
# erode and dilate
kern_sz = 3
kern = np.ones((kern_sz, kern_sz), np.uint8)
img = cv2.erode(img, kern)
img = cv2.dilate(img, kern)
# set up a blob detector
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = 3
params.maxArea = 30
params.filterByColor = False
params.filterByCircularity = False
params.filterByInertia = False
params.filterByConvexity = False
detector = cv2.SimpleBlobDetector_create(params)
# detect
keypoints = detector.detect(img)
# grab first keypoint, which is likely the corner, and guess views from it
x_list = sorted([kp.pt[0] for kp in keypoints])
y_list = sorted([kp.pt[1] for kp in keypoints])
best_x = 0
best_y = 0
EDGE = 8
for x in x_list:
if x <= EDGE: continue
if best_x == 0:
best_x = x
break
for y in y_list:
if y <= EDGE: continue
if best_y == 0:
best_y = y
break
if best_x == 0: best_x = DIM
if best_y == 0: best_y = DIM
views_x = round(DIM / best_x)
views_y = round(DIM / best_y)
# output count
print(f'views: {views_x} x {views_y}')
#update the web page with the count values
columns = document.getElementById("columns")
rows = document.getElementById("rows")
columns.innerHTML = views_x
rows.innerHTML = views_y
# show cool math stuff
img = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
img = cv2.addWeighted(image, 1, image, 1, 0)
img = cv2.drawKeypoints(
img, keypoints, np.array([]), (0, 0, 255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
img = cv2.resize(img, (DIM, DIM), interpolation=cv2.INTER_NEAREST)
# push the image generated by cv2 up into the second canvas
doubleprocessedImage = cv2.cvtColor(img, cv2.COLOR_RGB2RGBA)
data = Uint8ClampedArray.new(to_js(doubleprocessedImage.tobytes()))
width, height, _ = doubleprocessedImage.shape
image_data = ImageData.new(data, width, height)
fourier_ctx.putImageData(image_data, 0, 0)
def setup():
# Create a JsProxy for the callback function
input_proxy = create_proxy(updateImageDisplay)
# Set the listener to the callback
e = document.getElementById("input")
e.addEventListener("change", input_proxy)
setup()