UNI_Python/ha_09/loosen_janniclas_1540907_10.py
2024-01-17 17:12:33 +01:00

185 lines
5.5 KiB
Python

import random
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
import numpy as np
import multiprocessing
import requests
from mpi4py import MPI
from PIL import Image
def count_randomized_hits(iterations):
counter = 0
for i in range(iterations):
x = random.uniform(0, 1)
y = random.uniform(0, 1)
if ((x ** 2) + (y ** 2)) ** (1 / 2) <= 1:
counter += 1
return counter
def monte_carlo_methode(n, mode=0):
func_comm = MPI.COMM_WORLD
func_rank = func_comm.Get_rank()
func_size = func_comm.Get_size()
if mode == 1: # Multithreading mode
if func_rank == 0:
num_threads = 16
iterations_per_thread = n // num_threads
with ThreadPoolExecutor(max_workers=num_threads) as executor:
hits = list(executor.map(count_randomized_hits, [iterations_per_thread] * num_threads))
hit = sum(hits)
elif mode == 2: # MPI parallel mode
func_comm = MPI.COMM_WORLD
func_rank = func_comm.Get_rank()
func_size = func_comm.Get_size()
n = func_comm.bcast(n, root=0)
local_hit = count_randomized_hits(n // func_size)
hit = func_comm.reduce(local_hit, op=MPI.SUM, root=0)
hit = int(func_comm.bcast(hit, root=0))
else: # Default mode
if func_rank == 0:
hit = count_randomized_hits(n)
if func_rank == 0:
pi_approx = (hit / n) * 4
pi_diff = abs(np.pi - pi_approx)
return pi_approx, pi_diff
def uniform_kernel(n):
if n % 2 == 0:
print("Size needs to be odd")
exit(1)
K = 1 / n / n * np.ones([n, n])
return K
def gauss_kernel(s):
n = 3 * s
pos = np.arange(-n, n + 1)
x = np.meshgrid(pos, pos)
K = 1.0 / (2.0 * np.pi * s * s) * np.exp(-(x[0] ** 2 + x[1] ** 2) / (2.0 * s * s))
K = K / sum(sum(K))
return K
def process_image_part(data_part, kernel, padding):
y_part_size, x_part_size, _ = data_part.shape
pad_y, pad_x = padding
data_part_new = np.zeros((y_part_size - 2 * pad_y, x_part_size - 2 * pad_x, 3))
# DO NOT CHANGE THIS LOOP
for i in range(pad_y, y_part_size - pad_y):
for j in range(pad_x, x_part_size - pad_x):
for k in range(3):
new_value = 0.0
for ii in range(kernel.shape[1]):
for jj in range(kernel.shape[0]):
iii = ii - (kernel.shape[1] - 1) // 2
jjj = jj - (kernel.shape[0] - 1) // 2
new_value += kernel[ii, jj] * data_part[i + iii, j + jjj, k]
data_part_new[i - pad_y, j - pad_x, k] = new_value
return data_part_new
def overlapping_submatrices(arr, n, overlap):
sub_array_length = len(arr) // n
# Zugegeben, den Loop hab ich auch nur durch ChatGPT hinbekommen :O
sub_arrays = [
arr[i * sub_array_length - min(i * overlap, overlap): (i + 1) * sub_array_length + min((n - i - 1) * overlap,
overlap)]
for i in range(n)
]
return np.array(sub_arrays, dtype=object)
def process_image(img, func=0, mode=0):
if isinstance(img, str):
img = Image.open(img)
if img.mode == "P":
img = img.convert(mode="RGB")
data = np.asarray(img, dtype=np.float64) / 255.0
padding = 9
print(f"Before: {data.shape}")
if func == 1:
kernel = uniform_kernel(padding)
else:
kernel = gauss_kernel(padding)
padding = (padding // 2, padding // 2)
if mode == 1: # Multithreading mode
num_threads = 5
data_parts = overlapping_submatrices(data, num_threads, padding[0])
with ThreadPoolExecutor(max_workers=num_threads) as executor:
data_new_parts = list(
executor.map(process_image_part, data_parts, [kernel] * num_threads, [padding] * num_threads))
data_new = np.concatenate(data_new_parts, axis=0)
elif mode == 2: # MPI parallel mode
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if rank == 0:
data_parts = overlapping_submatrices(data, size, padding[0])
else:
data_parts = None
data_part = comm.scatter(data_parts, root=0)
data_part_new = process_image_part(data_part, kernel, padding)
if rank == 0:
data_new_parts = comm.gather(data_part_new, root=0)
data_new = np.concatenate(data_new_parts, axis=0)
else:
data_new = None
data_new = comm.bcast(data_new, root=0)
else: # Default mode
data_new = process_image_part(data, kernel, padding)
data_new = data_new * 255.0
data_new = np.uint8(data_new)
print(f"After: {data_new.shape}")
return Image.fromarray(data_new, mode="RGB")
if __name__ == '__main__':
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
FLAG_gauss = 0
FLAG_uniform = 1
FLAG_default = 0
FLAG_threaded = 1
FLAG_network = 2
if rank == 0:
print(monte_carlo_methode(1000, mode=FLAG_default))
print(monte_carlo_methode(1000, mode=FLAG_threaded))
print(monte_carlo_methode(1000, mode=FLAG_network))
url = "https://i.wfcdn.de/teaser/660/27020.jpg"
response = requests.get(url)
if response.status_code == 200:
image = Image.open(BytesIO(response.content))
image = process_image(image, FLAG_uniform, FLAG_network)
image.show()