2024-01-16 21:21:17 +01:00
|
|
|
import random
|
2024-01-17 17:12:33 +01:00
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
2024-01-16 21:21:17 +01:00
|
|
|
from io import BytesIO
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
import multiprocessing
|
|
|
|
|
|
|
|
import requests
|
|
|
|
from mpi4py import MPI
|
|
|
|
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
def count_randomized_hits(iterations):
|
|
|
|
counter = 0
|
|
|
|
for i in range(iterations):
|
|
|
|
x = random.uniform(0, 1)
|
|
|
|
y = random.uniform(0, 1)
|
|
|
|
if ((x ** 2) + (y ** 2)) ** (1 / 2) <= 1:
|
|
|
|
counter += 1
|
|
|
|
return counter
|
|
|
|
|
|
|
|
|
|
|
|
def monte_carlo_methode(n, mode=0):
|
2024-01-17 17:12:33 +01:00
|
|
|
func_comm = MPI.COMM_WORLD
|
|
|
|
func_rank = func_comm.Get_rank()
|
|
|
|
func_size = func_comm.Get_size()
|
|
|
|
|
2024-01-16 21:21:17 +01:00
|
|
|
if mode == 1: # Multithreading mode
|
2024-01-17 17:12:33 +01:00
|
|
|
if func_rank == 0:
|
|
|
|
num_threads = 16
|
|
|
|
iterations_per_thread = n // num_threads
|
|
|
|
|
|
|
|
with ThreadPoolExecutor(max_workers=num_threads) as executor:
|
|
|
|
hits = list(executor.map(count_randomized_hits, [iterations_per_thread] * num_threads))
|
2024-01-16 21:21:17 +01:00
|
|
|
hit = sum(hits)
|
|
|
|
|
|
|
|
elif mode == 2: # MPI parallel mode
|
2024-01-17 17:12:33 +01:00
|
|
|
func_comm = MPI.COMM_WORLD
|
|
|
|
func_rank = func_comm.Get_rank()
|
|
|
|
func_size = func_comm.Get_size()
|
|
|
|
|
|
|
|
n = func_comm.bcast(n, root=0)
|
|
|
|
|
|
|
|
local_hit = count_randomized_hits(n // func_size)
|
|
|
|
hit = func_comm.reduce(local_hit, op=MPI.SUM, root=0)
|
|
|
|
hit = int(func_comm.bcast(hit, root=0))
|
2024-01-16 21:21:17 +01:00
|
|
|
|
|
|
|
else: # Default mode
|
2024-01-17 17:12:33 +01:00
|
|
|
if func_rank == 0:
|
|
|
|
hit = count_randomized_hits(n)
|
2024-01-16 21:21:17 +01:00
|
|
|
|
2024-01-17 17:12:33 +01:00
|
|
|
if func_rank == 0:
|
|
|
|
pi_approx = (hit / n) * 4
|
|
|
|
pi_diff = abs(np.pi - pi_approx)
|
|
|
|
return pi_approx, pi_diff
|
2024-01-16 21:21:17 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def uniform_kernel(n):
|
|
|
|
if n % 2 == 0:
|
|
|
|
print("Size needs to be odd")
|
|
|
|
exit(1)
|
|
|
|
K = 1 / n / n * np.ones([n, n])
|
|
|
|
return K
|
|
|
|
|
|
|
|
|
|
|
|
def gauss_kernel(s):
|
|
|
|
n = 3 * s
|
|
|
|
pos = np.arange(-n, n + 1)
|
|
|
|
x = np.meshgrid(pos, pos)
|
|
|
|
K = 1.0 / (2.0 * np.pi * s * s) * np.exp(-(x[0] ** 2 + x[1] ** 2) / (2.0 * s * s))
|
|
|
|
K = K / sum(sum(K))
|
|
|
|
return K
|
|
|
|
|
|
|
|
|
2024-01-16 21:57:55 +01:00
|
|
|
def process_image_part(data_part, kernel, padding):
|
2024-01-16 21:21:17 +01:00
|
|
|
y_part_size, x_part_size, _ = data_part.shape
|
2024-01-16 21:57:55 +01:00
|
|
|
pad_y, pad_x = padding
|
2024-01-17 17:12:33 +01:00
|
|
|
data_part_new = np.zeros((y_part_size - 2 * pad_y, x_part_size - 2 * pad_x, 3))
|
|
|
|
# DO NOT CHANGE THIS LOOP
|
2024-01-16 21:57:55 +01:00
|
|
|
for i in range(pad_y, y_part_size - pad_y):
|
|
|
|
for j in range(pad_x, x_part_size - pad_x):
|
2024-01-16 21:21:17 +01:00
|
|
|
for k in range(3):
|
|
|
|
new_value = 0.0
|
2024-01-17 17:12:33 +01:00
|
|
|
for ii in range(kernel.shape[1]):
|
|
|
|
for jj in range(kernel.shape[0]):
|
|
|
|
iii = ii - (kernel.shape[1] - 1) // 2
|
|
|
|
jjj = jj - (kernel.shape[0] - 1) // 2
|
2024-01-16 21:21:17 +01:00
|
|
|
new_value += kernel[ii, jj] * data_part[i + iii, j + jjj, k]
|
2024-01-16 21:57:55 +01:00
|
|
|
data_part_new[i - pad_y, j - pad_x, k] = new_value
|
2024-01-16 21:21:17 +01:00
|
|
|
return data_part_new
|
|
|
|
|
|
|
|
|
2024-01-17 17:12:33 +01:00
|
|
|
def overlapping_submatrices(arr, n, overlap):
|
|
|
|
sub_array_length = len(arr) // n
|
|
|
|
# Zugegeben, den Loop hab ich auch nur durch ChatGPT hinbekommen :O
|
|
|
|
sub_arrays = [
|
|
|
|
arr[i * sub_array_length - min(i * overlap, overlap): (i + 1) * sub_array_length + min((n - i - 1) * overlap,
|
|
|
|
overlap)]
|
|
|
|
for i in range(n)
|
|
|
|
]
|
|
|
|
return np.array(sub_arrays, dtype=object)
|
2024-01-16 21:57:55 +01:00
|
|
|
|
|
|
|
|
2024-01-16 21:21:17 +01:00
|
|
|
def process_image(img, func=0, mode=0):
|
|
|
|
if isinstance(img, str):
|
|
|
|
img = Image.open(img)
|
|
|
|
|
|
|
|
if img.mode == "P":
|
|
|
|
img = img.convert(mode="RGB")
|
|
|
|
|
|
|
|
data = np.asarray(img, dtype=np.float64) / 255.0
|
2024-01-17 17:12:33 +01:00
|
|
|
padding = 9
|
|
|
|
|
|
|
|
print(f"Before: {data.shape}")
|
2024-01-16 21:21:17 +01:00
|
|
|
|
|
|
|
if func == 1:
|
2024-01-17 17:12:33 +01:00
|
|
|
kernel = uniform_kernel(padding)
|
2024-01-16 21:21:17 +01:00
|
|
|
else:
|
2024-01-17 17:12:33 +01:00
|
|
|
kernel = gauss_kernel(padding)
|
2024-01-16 21:21:17 +01:00
|
|
|
|
2024-01-17 17:12:33 +01:00
|
|
|
padding = (padding // 2, padding // 2)
|
2024-01-16 21:57:55 +01:00
|
|
|
|
2024-01-16 21:21:17 +01:00
|
|
|
if mode == 1: # Multithreading mode
|
2024-01-17 17:12:33 +01:00
|
|
|
num_threads = 5
|
|
|
|
data_parts = overlapping_submatrices(data, num_threads, padding[0])
|
|
|
|
with ThreadPoolExecutor(max_workers=num_threads) as executor:
|
|
|
|
data_new_parts = list(
|
|
|
|
executor.map(process_image_part, data_parts, [kernel] * num_threads, [padding] * num_threads))
|
2024-01-16 21:21:17 +01:00
|
|
|
data_new = np.concatenate(data_new_parts, axis=0)
|
|
|
|
|
|
|
|
elif mode == 2: # MPI parallel mode
|
|
|
|
comm = MPI.COMM_WORLD
|
|
|
|
rank = comm.Get_rank()
|
|
|
|
size = comm.Get_size()
|
2024-01-17 17:12:33 +01:00
|
|
|
|
2024-01-16 21:21:17 +01:00
|
|
|
if rank == 0:
|
2024-01-17 17:12:33 +01:00
|
|
|
data_parts = overlapping_submatrices(data, size, padding[0])
|
|
|
|
else:
|
|
|
|
data_parts = None
|
|
|
|
|
|
|
|
data_part = comm.scatter(data_parts, root=0)
|
|
|
|
data_part_new = process_image_part(data_part, kernel, padding)
|
|
|
|
|
|
|
|
if rank == 0:
|
|
|
|
data_new_parts = comm.gather(data_part_new, root=0)
|
2024-01-16 21:21:17 +01:00
|
|
|
data_new = np.concatenate(data_new_parts, axis=0)
|
|
|
|
else:
|
|
|
|
data_new = None
|
2024-01-17 17:12:33 +01:00
|
|
|
|
2024-01-16 21:21:17 +01:00
|
|
|
data_new = comm.bcast(data_new, root=0)
|
|
|
|
|
|
|
|
else: # Default mode
|
2024-01-16 21:57:55 +01:00
|
|
|
data_new = process_image_part(data, kernel, padding)
|
2024-01-16 21:21:17 +01:00
|
|
|
|
|
|
|
data_new = data_new * 255.0
|
|
|
|
data_new = np.uint8(data_new)
|
2024-01-17 17:12:33 +01:00
|
|
|
print(f"After: {data_new.shape}")
|
2024-01-16 21:21:17 +01:00
|
|
|
return Image.fromarray(data_new, mode="RGB")
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2024-01-17 17:12:33 +01:00
|
|
|
comm = MPI.COMM_WORLD
|
|
|
|
rank = comm.Get_rank()
|
|
|
|
size = comm.Get_size()
|
|
|
|
|
|
|
|
FLAG_gauss = 0
|
|
|
|
FLAG_uniform = 1
|
|
|
|
FLAG_default = 0
|
|
|
|
FLAG_threaded = 1
|
|
|
|
FLAG_network = 2
|
|
|
|
|
|
|
|
if rank == 0:
|
|
|
|
print(monte_carlo_methode(1000, mode=FLAG_default))
|
|
|
|
print(monte_carlo_methode(1000, mode=FLAG_threaded))
|
|
|
|
print(monte_carlo_methode(1000, mode=FLAG_network))
|
2024-01-16 21:21:17 +01:00
|
|
|
|
|
|
|
url = "https://i.wfcdn.de/teaser/660/27020.jpg"
|
|
|
|
response = requests.get(url)
|
|
|
|
|
|
|
|
if response.status_code == 200:
|
|
|
|
image = Image.open(BytesIO(response.content))
|
2024-01-17 17:12:33 +01:00
|
|
|
image = process_image(image, FLAG_uniform, FLAG_network)
|
2024-01-16 21:21:17 +01:00
|
|
|
image.show()
|