main
Jan-Niclas Loosen 8 months ago
parent 3a520b8f97
commit 16177ffa79

@ -5,8 +5,7 @@
</component>
<component name="ChangeListManager">
<list default="true" id="42ed9a51-9564-48c5-b5fa-035301b1578d" name="Changes" comment="">
<change beforePath="$PROJECT_DIR$/.idea/UNI_Python.iml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/UNI_Python.iml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/misc.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/misc.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/hosts.txt" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/ha_09/loosen_janniclas_1540907_10.py" beforeDir="false" afterPath="$PROJECT_DIR$/ha_09/loosen_janniclas_1540907_10.py" afterDir="false" />
</list>
@ -160,6 +159,7 @@
<workItem from="1705414770423" duration="3134000" />
<workItem from="1705419658987" duration="941000" />
<workItem from="1705431087533" duration="5323000" />
<workItem from="1705489565514" duration="13418000" />
</task>
<servers />
</component>
@ -179,7 +179,7 @@
</component>
<component name="com.intellij.coverage.CoverageDataManagerImpl">
<SUITE FILE_PATH="coverage/UNI_Python$loosen_janniclas_1540907_09__1_.coverage" NAME="loosen_janniclas_1540907_09 (1) Coverage Results" MODIFIED="1704717881190" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/ha_08" />
<SUITE FILE_PATH="coverage/UNI_Python$loosen_janniclas_1540907_10.coverage" NAME="loosen_janniclas_1540907_10 Coverage Results" MODIFIED="1705438650599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/ha_09" />
<SUITE FILE_PATH="coverage/UNI_Python$loosen_janniclas_1540907_10.coverage" NAME="loosen_janniclas_1540907_10 Coverage Results" MODIFIED="1705503074164" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/ha_09" />
<SUITE FILE_PATH="coverage/UNI_Python$control.coverage" NAME="control Coverage Results" MODIFIED="1705244854276" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/ext_01" />
<SUITE FILE_PATH="coverage/UNI_Python$loosen_janniclas_1540907_09.coverage" NAME="loosen_janniclas_1540907_09 Coverage Results" MODIFIED="1704711219437" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/ha_08" />
</component>

@ -1,4 +1,5 @@
import random
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
import numpy as np
@ -19,33 +20,40 @@ def count_randomized_hits(iterations):
return counter
FLAG_default = 0
FLAG_threaded = 1
FLAG_network = 2
def monte_carlo_methode(n, mode=0):
func_comm = MPI.COMM_WORLD
func_rank = func_comm.Get_rank()
func_size = func_comm.Get_size()
if mode == 1: # Multithreading mode
num_threads = 16
iterations_per_thread = n // num_threads
with multiprocessing.Pool(num_threads) as pool:
hits = pool.map(count_randomized_hits, [iterations_per_thread] * num_threads)
if func_rank == 0:
num_threads = 16
iterations_per_thread = n // num_threads
with ThreadPoolExecutor(max_workers=num_threads) as executor:
hits = list(executor.map(count_randomized_hits, [iterations_per_thread] * num_threads))
hit = sum(hits)
elif mode == 2: # MPI parallel mode
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
local_hit = count_randomized_hits(n // size)
hit = comm.reduce(local_hit, op=MPI.SUM, root=0)
func_comm = MPI.COMM_WORLD
func_rank = func_comm.Get_rank()
func_size = func_comm.Get_size()
n = func_comm.bcast(n, root=0)
local_hit = count_randomized_hits(n // func_size)
hit = func_comm.reduce(local_hit, op=MPI.SUM, root=0)
hit = int(func_comm.bcast(hit, root=0))
else: # Default mode
hit = count_randomized_hits(n)
if func_rank == 0:
hit = count_randomized_hits(n)
pi_approx = (hit / n) * 4
pi_diff = abs(np.pi - pi_approx)
if func_rank == 0:
pi_approx = (hit / n) * 4
pi_diff = abs(np.pi - pi_approx)
return pi_approx, pi_diff
return pi_approx, pi_diff
def uniform_kernel(n):
@ -65,33 +73,33 @@ def gauss_kernel(s):
return K
FLAG_gauss = 0
FLAG_uniform = 1
def process_image_part(data_part, kernel, padding):
y_part_size, x_part_size, _ = data_part.shape
data_part_new = np.zeros((data_part.shape[0] - padding[0], data_part.shape[1] - padding[1], 3))
pad_y, pad_x = padding
data_part_new = np.zeros((y_part_size - 2 * pad_y, x_part_size - 2 * pad_x, 3))
# DO NOT CHANGE THIS LOOP
for i in range(pad_y, y_part_size - pad_y):
for j in range(pad_x, x_part_size - pad_x):
for k in range(3):
new_value = 0.0
for ii in range(kernel.shape[0]):
for jj in range(kernel.shape[1]):
iii = ii - pad_y
jjj = jj - pad_x
for ii in range(kernel.shape[1]):
for jj in range(kernel.shape[0]):
iii = ii - (kernel.shape[1] - 1) // 2
jjj = jj - (kernel.shape[0] - 1) // 2
new_value += kernel[ii, jj] * data_part[i + iii, j + jjj, k]
data_part_new[i - pad_y, j - pad_x, k] = new_value
return data_part_new
def split_array(arr, n, overlap):
sub_array_length = (len(arr) + (n - 1) * overlap) // n
sub_arrays = [arr[i * (sub_array_length - overlap): i * (sub_array_length - overlap) + sub_array_length] for i in range(n)]
return np.array(sub_arrays)
def overlapping_submatrices(arr, n, overlap):
sub_array_length = len(arr) // n
# Zugegeben, den Loop hab ich auch nur durch ChatGPT hinbekommen :O
sub_arrays = [
arr[i * sub_array_length - min(i * overlap, overlap): (i + 1) * sub_array_length + min((n - i - 1) * overlap,
overlap)]
for i in range(n)
]
return np.array(sub_arrays, dtype=object)
def process_image(img, func=0, mode=0):
@ -102,34 +110,44 @@ def process_image(img, func=0, mode=0):
img = img.convert(mode="RGB")
data = np.asarray(img, dtype=np.float64) / 255.0
padding = 9
print(f"Before: {data.shape}")
if func == 1:
kernel = uniform_kernel(7)
kernel = uniform_kernel(padding)
else:
kernel = gauss_kernel(3)
kernel = gauss_kernel(padding)
padding = [(kernel.shape[0] // 2), kernel.shape[1] // 2]
padding = (padding // 2, padding // 2)
if mode == 1: # Multithreading mode
num_threads = 16
data_parts = split_array(data, num_threads, padding[0])
with multiprocessing.Pool(num_threads) as pool:
data_new_parts = pool.starmap(process_image_part, zip(data_parts, [kernel]*num_threads, [padding]*num_threads))
num_threads = 5
data_parts = overlapping_submatrices(data, num_threads, padding[0])
with ThreadPoolExecutor(max_workers=num_threads) as executor:
data_new_parts = list(
executor.map(process_image_part, data_parts, [kernel] * num_threads, [padding] * num_threads))
data_new = np.concatenate(data_new_parts, axis=0)
elif mode == 2: # MPI parallel mode
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
data_part = np.array_split(data, size, axis=0)[rank]
data_new_part = process_image_part(data_part, kernel, padding)
data_new_parts = comm.gather(data_new_part, root=0)
if rank == 0:
data_parts = overlapping_submatrices(data, size, padding[0])
else:
data_parts = None
data_part = comm.scatter(data_parts, root=0)
data_part_new = process_image_part(data_part, kernel, padding)
if rank == 0:
data_new_parts = comm.gather(data_part_new, root=0)
data_new = np.concatenate(data_new_parts, axis=0)
else:
data_new = None
data_new = comm.bcast(data_new, root=0)
else: # Default mode
@ -137,20 +155,30 @@ def process_image(img, func=0, mode=0):
data_new = data_new * 255.0
data_new = np.uint8(data_new)
print(f"After: {data_new.shape}")
return Image.fromarray(data_new, mode="RGB")
if __name__ == '__main__':
print(monte_carlo_methode(1000, FLAG_default))
print(monte_carlo_methode(1000, FLAG_threaded))
print(monte_carlo_methode(1000, FLAG_network))
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
FLAG_gauss = 0
FLAG_uniform = 1
FLAG_default = 0
FLAG_threaded = 1
FLAG_network = 2
if rank == 0:
print(monte_carlo_methode(1000, mode=FLAG_default))
print(monte_carlo_methode(1000, mode=FLAG_threaded))
print(monte_carlo_methode(1000, mode=FLAG_network))
url = "https://i.wfcdn.de/teaser/660/27020.jpg"
response = requests.get(url)
if response.status_code == 200:
image = Image.open(BytesIO(response.content))
image = process_image(image, FLAG_uniform, FLAG_threaded)
image = process_image(image, FLAG_uniform, FLAG_network)
image.show()

@ -0,0 +1,2 @@
192.168.178.77:2
192.168.178.73:2
Loading…
Cancel
Save