From 16177ffa797d59bf465700cade0511b4f84707c7 Mon Sep 17 00:00:00 2001 From: Loosen-IT Date: Wed, 17 Jan 2024 17:12:33 +0100 Subject: [PATCH] 17/01/2023 --- .idea/workspace.xml | 6 +- ha_09/loosen_janniclas_1540907_10.py | 130 ++++++++++++++++----------- hosts.txt | 2 + 3 files changed, 84 insertions(+), 54 deletions(-) create mode 100644 hosts.txt diff --git a/.idea/workspace.xml b/.idea/workspace.xml index cacf0ff..0090b45 100644 --- a/.idea/workspace.xml +++ b/.idea/workspace.xml @@ -5,8 +5,7 @@ - - + @@ -160,6 +159,7 @@ + @@ -179,7 +179,7 @@ - + diff --git a/ha_09/loosen_janniclas_1540907_10.py b/ha_09/loosen_janniclas_1540907_10.py index d5746ad..35365b0 100644 --- a/ha_09/loosen_janniclas_1540907_10.py +++ b/ha_09/loosen_janniclas_1540907_10.py @@ -1,4 +1,5 @@ import random +from concurrent.futures import ThreadPoolExecutor from io import BytesIO import numpy as np @@ -19,33 +20,40 @@ def count_randomized_hits(iterations): return counter -FLAG_default = 0 -FLAG_threaded = 1 -FLAG_network = 2 - - def monte_carlo_methode(n, mode=0): + func_comm = MPI.COMM_WORLD + func_rank = func_comm.Get_rank() + func_size = func_comm.Get_size() + if mode == 1: # Multithreading mode - num_threads = 16 - iterations_per_thread = n // num_threads - with multiprocessing.Pool(num_threads) as pool: - hits = pool.map(count_randomized_hits, [iterations_per_thread] * num_threads) + if func_rank == 0: + num_threads = 16 + iterations_per_thread = n // num_threads + + with ThreadPoolExecutor(max_workers=num_threads) as executor: + hits = list(executor.map(count_randomized_hits, [iterations_per_thread] * num_threads)) hit = sum(hits) elif mode == 2: # MPI parallel mode - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - size = comm.Get_size() - local_hit = count_randomized_hits(n // size) - hit = comm.reduce(local_hit, op=MPI.SUM, root=0) + func_comm = MPI.COMM_WORLD + func_rank = func_comm.Get_rank() + func_size = func_comm.Get_size() + + n = func_comm.bcast(n, root=0) + + local_hit = count_randomized_hits(n // func_size) + hit = func_comm.reduce(local_hit, op=MPI.SUM, root=0) + hit = int(func_comm.bcast(hit, root=0)) else: # Default mode - hit = count_randomized_hits(n) + if func_rank == 0: + hit = count_randomized_hits(n) - pi_approx = (hit / n) * 4 - pi_diff = abs(np.pi - pi_approx) + if func_rank == 0: + pi_approx = (hit / n) * 4 + pi_diff = abs(np.pi - pi_approx) + return pi_approx, pi_diff - return pi_approx, pi_diff def uniform_kernel(n): @@ -65,33 +73,33 @@ def gauss_kernel(s): return K -FLAG_gauss = 0 -FLAG_uniform = 1 - - def process_image_part(data_part, kernel, padding): y_part_size, x_part_size, _ = data_part.shape - - data_part_new = np.zeros((data_part.shape[0] - padding[0], data_part.shape[1] - padding[1], 3)) - pad_y, pad_x = padding + data_part_new = np.zeros((y_part_size - 2 * pad_y, x_part_size - 2 * pad_x, 3)) + # DO NOT CHANGE THIS LOOP for i in range(pad_y, y_part_size - pad_y): for j in range(pad_x, x_part_size - pad_x): for k in range(3): new_value = 0.0 - for ii in range(kernel.shape[0]): - for jj in range(kernel.shape[1]): - iii = ii - pad_y - jjj = jj - pad_x + for ii in range(kernel.shape[1]): + for jj in range(kernel.shape[0]): + iii = ii - (kernel.shape[1] - 1) // 2 + jjj = jj - (kernel.shape[0] - 1) // 2 new_value += kernel[ii, jj] * data_part[i + iii, j + jjj, k] data_part_new[i - pad_y, j - pad_x, k] = new_value return data_part_new -def split_array(arr, n, overlap): - sub_array_length = (len(arr) + (n - 1) * overlap) // n - sub_arrays = [arr[i * (sub_array_length - overlap): i * (sub_array_length - overlap) + sub_array_length] for i in range(n)] - return np.array(sub_arrays) +def overlapping_submatrices(arr, n, overlap): + sub_array_length = len(arr) // n + # Zugegeben, den Loop hab ich auch nur durch ChatGPT hinbekommen :O + sub_arrays = [ + arr[i * sub_array_length - min(i * overlap, overlap): (i + 1) * sub_array_length + min((n - i - 1) * overlap, + overlap)] + for i in range(n) + ] + return np.array(sub_arrays, dtype=object) def process_image(img, func=0, mode=0): @@ -102,34 +110,44 @@ def process_image(img, func=0, mode=0): img = img.convert(mode="RGB") data = np.asarray(img, dtype=np.float64) / 255.0 + padding = 9 + + print(f"Before: {data.shape}") if func == 1: - kernel = uniform_kernel(7) + kernel = uniform_kernel(padding) else: - kernel = gauss_kernel(3) + kernel = gauss_kernel(padding) - padding = [(kernel.shape[0] // 2), kernel.shape[1] // 2] + padding = (padding // 2, padding // 2) if mode == 1: # Multithreading mode - num_threads = 16 - - data_parts = split_array(data, num_threads, padding[0]) - - with multiprocessing.Pool(num_threads) as pool: - data_new_parts = pool.starmap(process_image_part, zip(data_parts, [kernel]*num_threads, [padding]*num_threads)) + num_threads = 5 + data_parts = overlapping_submatrices(data, num_threads, padding[0]) + with ThreadPoolExecutor(max_workers=num_threads) as executor: + data_new_parts = list( + executor.map(process_image_part, data_parts, [kernel] * num_threads, [padding] * num_threads)) data_new = np.concatenate(data_new_parts, axis=0) elif mode == 2: # MPI parallel mode comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() - data_part = np.array_split(data, size, axis=0)[rank] - data_new_part = process_image_part(data_part, kernel, padding) - data_new_parts = comm.gather(data_new_part, root=0) + if rank == 0: + data_parts = overlapping_submatrices(data, size, padding[0]) + else: + data_parts = None + + data_part = comm.scatter(data_parts, root=0) + data_part_new = process_image_part(data_part, kernel, padding) + + if rank == 0: + data_new_parts = comm.gather(data_part_new, root=0) data_new = np.concatenate(data_new_parts, axis=0) else: data_new = None + data_new = comm.bcast(data_new, root=0) else: # Default mode @@ -137,20 +155,30 @@ def process_image(img, func=0, mode=0): data_new = data_new * 255.0 data_new = np.uint8(data_new) - + print(f"After: {data_new.shape}") return Image.fromarray(data_new, mode="RGB") - if __name__ == '__main__': - print(monte_carlo_methode(1000, FLAG_default)) - print(monte_carlo_methode(1000, FLAG_threaded)) - print(monte_carlo_methode(1000, FLAG_network)) + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + size = comm.Get_size() + + FLAG_gauss = 0 + FLAG_uniform = 1 + FLAG_default = 0 + FLAG_threaded = 1 + FLAG_network = 2 + + if rank == 0: + print(monte_carlo_methode(1000, mode=FLAG_default)) + print(monte_carlo_methode(1000, mode=FLAG_threaded)) + print(monte_carlo_methode(1000, mode=FLAG_network)) url = "https://i.wfcdn.de/teaser/660/27020.jpg" response = requests.get(url) if response.status_code == 200: image = Image.open(BytesIO(response.content)) - image = process_image(image, FLAG_uniform, FLAG_threaded) + image = process_image(image, FLAG_uniform, FLAG_network) image.show() diff --git a/hosts.txt b/hosts.txt new file mode 100644 index 0000000..a99417a --- /dev/null +++ b/hosts.txt @@ -0,0 +1,2 @@ +192.168.178.77:2 +192.168.178.73:2 \ No newline at end of file