Commit 996a5735 authored by Cecilia Nievas's avatar Cecilia Nievas
Browse files

Merge branch 'feature/ind04' into 'master'

Added subsequent functions to create cells around SERA industrial points

See merge request !14
parents 2574d4b2 3987a530
This diff is collapsed.
......@@ -746,3 +746,486 @@ def test_swell_cells_with_buffer():
for i in range(len(expected_geometries)):
assert expected_geometries[i] == function_gdf["geometry"].values[i]
def test_determine_cardinal_point():
# Create input data for a realistic test:
theta_threshold = 25.0
angles_deg = np.array(
[
0.0,
24.0,
26.0,
64.0,
66.0,
90.0,
114.0,
116.0,
154.0,
156.0,
180.0,
185.0,
-0.1,
-24.0,
-26.0,
-64.0,
-66.0,
-90.0,
-114.0,
-116.0,
-154.0,
-156.0,
-180.0,
-185.0,
]
)
expected_output = np.array(
[
"E",
"E",
"NE",
"NE",
"N",
"N",
"N",
"NW",
"NW",
"W",
"W",
"UU",
"E",
"E",
"SE",
"SE",
"S",
"S",
"S",
"SW",
"SW",
"W",
"W",
"UU",
]
)
# Call function to test:
function_cardinal_pts = gdet_cr_ind.determine_cardinal_point(
angles_deg, theta_threshold, inputangle="degrees"
)
# Go one by one the elements of the output array:
for i in range(len(expected_output)):
assert function_cardinal_pts[i] == expected_output[i]
# Same test as above, but giving the angles in radians instead of degrees:
angles_rad = np.deg2rad(angles_deg)
theta_threshold_rad = np.deg2rad(theta_threshold)
# Call function to test:
function_cardinal_pts = gdet_cr_ind.determine_cardinal_point(
angles_rad, theta_threshold_rad, inputangle="radians"
)
# Go one by one the elements of the output array:
for i in range(len(expected_output)):
assert function_cardinal_pts[i] == expected_output[i]
# Test that all results are unknown ("UU") if the threshold angle is negative:
theta_threshold = -25.0
# Call function to test:
function_cardinal_pts = gdet_cr_ind.determine_cardinal_point(
angles_deg, theta_threshold, inputangle="degrees"
)
# Go one by one the elements of the output array:
for i in range(len(expected_output)):
assert function_cardinal_pts[i] == "UU"
def test_adjust_coord_of_polygons():
# Create datasets to test:
spacing = 1.0
# One polygon1, around which polygon2 will be "moved" to change their relative position:
lon_w = 12.0
lat_s = 40.0
lon_e = lon_w + spacing # 13.0
lat_n = lat_s + spacing # 41.0
polygon1 = Polygon([(lon_w, lat_s), (lon_e, lat_s), (lon_e, lat_n), (lon_w, lat_n)])
# An array of polygons at different positions with respect to polygon1:
polygons2 = [
Polygon([(lon_w, 41.2), (lon_e, 41.2), (lon_e, 42.2), (lon_w, 42.2)]), # gap
Polygon([(lon_w, 39.2), (lon_e, 39.2), (lon_e, 40.2), (lon_w, 40.2)]), # overlap
Polygon([(13.2, lat_s), (14.2, lat_s), (14.2, lat_n), (13.2, lat_n)]), # gap
Polygon([(11.2, lat_s), (12.2, lat_s), (12.2, lat_n), (11.2, lat_n)]), # overlap
Polygon([(12.8, 40.8), (13.8, 40.8), (13.8, 41.8), (12.8, 41.8)]), # overlap
Polygon([(10.8, 41.2), (11.8, 41.2), (11.8, 42.2), (10.8, 42.2)]), # gap
Polygon([(13.2, 38.8), (14.2, 38.8), (14.2, 39.8), (13.2, 39.8)]), # overlap
Polygon([(11.2, 39.2), (12.2, 39.2), (12.2, 40.2), (11.2, 40.2)]),
] # over
# Polygons in polygons2 are to the following cardinal points with respect to polygon1:
cardinal_pts = ["N", "S", "E", "W", "NE", "NW", "SE", "SW"]
# Results should be:
should_be_1 = [
Polygon([(lon_w, lat_s), (lon_e, lat_s), (lon_e, 41.1), (lon_w, 41.1)]),
Polygon([(lon_w, 40.1), (lon_e, 40.1), (lon_e, lat_n), (lon_w, lat_n)]),
Polygon([(lon_w, lat_s), (13.1, lat_s), (13.1, lat_n), (lon_w, lat_n)]),
Polygon([(12.1, lat_s), (lon_e, lat_s), (lon_e, lat_n), (12.1, lat_n)]),
Polygon([(lon_w, lat_s), (12.9, lat_s), (12.9, 40.9), (lon_w, 40.9)]),
Polygon([(11.9, lat_s), (lon_e, lat_s), (lon_e, 41.1), (11.9, 41.1)]),
Polygon([(lon_w, 39.9), (13.1, 39.9), (13.1, lat_n), (lon_w, lat_n)]),
Polygon([(12.1, 40.1), (lon_e, 40.1), (lon_e, lat_n), (12.1, lat_n)]),
]
should_be_2 = [
Polygon([(lon_w, 41.1), (lon_e, 41.1), (lon_e, 42.2), (lon_w, 42.2)]),
Polygon([(lon_w, 39.2), (lon_e, 39.2), (lon_e, 40.1), (lon_w, 40.1)]),
Polygon([(13.1, lat_s), (14.2, lat_s), (14.2, lat_n), (13.1, lat_n)]),
Polygon([(11.2, lat_s), (12.1, lat_s), (12.1, lat_n), (11.2, lat_n)]),
Polygon([(12.9, 40.9), (13.8, 40.9), (13.8, 41.8), (12.9, 41.8)]),
Polygon([(10.8, 41.1), (11.9, 41.1), (11.9, 42.2), (10.8, 42.2)]),
Polygon([(13.1, 38.8), (14.2, 38.8), (14.2, 39.9), (13.1, 39.9)]),
Polygon([(11.2, 39.2), (12.1, 39.2), (12.1, 40.1), (11.2, 40.1)]),
]
# Go one by one the elements of polygons2, calculate and assert:
for i in range(len(polygons2)):
function_poly1, function_poly2 = gdet_cr_ind.adjust_coord_of_polygons(
polygon1, polygons2[i], cardinal_pts[i]
)
assert function_poly1.bounds == should_be_1[i].bounds
assert function_poly1.area == should_be_1[i].area
assert function_poly2.bounds == should_be_2[i].bounds
assert function_poly2.area == should_be_2[i].area
def test_auto_adjust_overlaps_gaps():
# Test that the function terminates if input variable "case" is not correct:
# ==========================================================================
assert {} == gdet_cr_ind.auto_adjust_overlaps_gaps([], [], "", "", 0.0, 0.0, "some_case")
# General variables for all tests that follow:
col_lon = "LONGITUDE"
col_lat = "LATITUDE"
width_EW = 30.0 / (60.0 * 60.0) # 30 arcsec
width_NS = 30.0 / (60.0 * 60.0) # 30 arcsec
col_1 = "id_1"
col_2 = "id_2"
# Test a case of overlaps:
# ========================
# Define a GeoDataFrame with the cells' geometry:
smaller_spacing = 0.0082
points_lon = np.array(
[
12.0,
12.0,
12.0 + smaller_spacing,
12.0 + (smaller_spacing - 0.0001),
12.0 + 2.0 * (smaller_spacing - 0.0001),
]
)
points_lat = np.array(
[
40.0,
40.0 + smaller_spacing,
40.0,
40.0 + smaller_spacing,
40.0 + 2.0 * (smaller_spacing - 0.0001),
]
)
longitudes_w = points_lon - width_EW / 2.0
longitudes_e = points_lon + width_EW / 2.0
latitudes_s = points_lat - width_NS / 2.0
latitudes_n = points_lat + width_NS / 2.0
geoms = []
for i in range(0, len(points_lon)):
geoms.append(
Polygon(
[
(longitudes_w[i], latitudes_s[i]),
(longitudes_e[i], latitudes_s[i]),
(longitudes_e[i], latitudes_n[i]),
(longitudes_w[i], latitudes_n[i]),
]
)
)
d = {
"id": np.array(["ID_%s" % (i) for i in range(1, len(geoms) + 1)]),
"LONGITUDE": points_lon,
"LATITUDE": points_lat,
"lon_w": longitudes_w,
"lon_e": longitudes_e,
"lat_s": latitudes_s,
"lat_n": latitudes_n,
"geometry": geoms,
}
cells_gdf = gpd.GeoDataFrame(d, geometry="geometry", crs="EPSG:4326")
# Define a GeoDataFrame with the cells' intersections:
ids = {}
ids[1] = ["ID_1", "ID_1", "ID_1", "ID_2", "ID_2", "ID_3", "ID_4"]
ids[2] = ["ID_2", "ID_3", "ID_4", "ID_3", "ID_4", "ID_4", "ID_5"]
geoms = [
Polygon(
[
(12.0 - width_EW / 2.0, 40.0 + smaller_spacing - width_NS / 2.0), # IDs 1 & 2
(12.0 + width_EW / 2.0, 40.0 + smaller_spacing - width_NS / 2.0),
(12.0 + width_EW / 2.0, 40.0 + width_NS / 2.0),
(12.0 - width_EW / 2.0, 40.0 + width_NS / 2.0),
]
),
Polygon(
[
(12.0 + smaller_spacing - width_EW / 2.0, 40.0 - width_NS / 2.0), # IDs 1 & 3
(12.0 + width_EW / 2.0, 40.0 - width_NS / 2.0),
(12.0 + width_EW / 2.0, 40.0 + width_NS / 2.0),
(12.0 + smaller_spacing - width_EW / 2.0, 40.0 + width_NS / 2.0),
]
),
Polygon(
[
(
12.0 + (smaller_spacing - 0.0001) - width_EW / 2.0,
40.0 + smaller_spacing - width_NS / 2.0,
), # IDs 1 & 4
(12.0 + width_EW / 2.0, 40.0 + smaller_spacing - width_NS / 2.0),
(12.0 + width_EW / 2.0, 40.0 + width_NS / 2.0),
(12.0 + (smaller_spacing - 0.0001) - width_EW / 2.0, 40.0 + width_NS / 2.0),
]
),
Polygon(
[
(
12.0 + smaller_spacing - width_EW / 2.0,
40.0 + smaller_spacing - width_NS / 2.0,
), # IDs 2 & 3
(12.0 + width_EW / 2.0, 40.0 + smaller_spacing - width_NS / 2.0),
(12.0 + width_EW / 2.0, 40.0 + width_NS / 2.0),
(12.0 + smaller_spacing - width_EW / 2.0, 40.0 + width_NS / 2.0),
]
),
Polygon(
[
(
12.0 + (smaller_spacing - 0.0001) - width_EW / 2.0,
40.0 + smaller_spacing - width_NS / 2.0,
), # IDs 2 & 4
(12.0 + width_EW / 2.0, 40.0 + smaller_spacing - width_NS / 2.0),
(12.0 + width_EW / 2.0, 40.0 + smaller_spacing + width_NS / 2.0),
(
12.0 + (smaller_spacing - 0.0001) - width_EW / 2.0,
40.0 + smaller_spacing + width_NS / 2.0,
),
]
),
Polygon(
[
(
12.0 + smaller_spacing - width_EW / 2.0,
40.0 + smaller_spacing - width_NS / 2.0,
), # IDs 3 & 4
(
12.0 + (smaller_spacing - 0.0001) + width_EW / 2.0,
40.0 + smaller_spacing - width_NS / 2.0,
),
(12.0 + (smaller_spacing - 0.0001) + width_EW / 2.0, 40.0 + width_NS / 2.0),
(12.0 + smaller_spacing - width_EW / 2.0, 40.0 + width_NS / 2.0),
]
),
Polygon(
[
(
12.0 + 2.0 * (smaller_spacing - 0.0001) - width_EW / 2.0,
40.0 + 2.0 * (smaller_spacing - 0.0001) - width_NS / 2.0,
), # IDs 4 & 5
(
12.0 + (smaller_spacing - 0.0001) + width_EW / 2.0,
40.0 + 2.0 * (smaller_spacing - 0.0001) - width_NS / 2.0,
),
(
12.0 + (smaller_spacing - 0.0001) + width_EW / 2.0,
40.0 + smaller_spacing + width_NS / 2.0,
),
(
12.0 + 2.0 * (smaller_spacing - 0.0001) - width_EW / 2.0,
40.0 + smaller_spacing + width_NS / 2.0,
),
]
),
]
cols = ["LONGITUDE", "LATITUDE", "lon_w", "lon_e", "lat_s", "lat_n"]
aux_dict = {}
for col_name in cols:
for val in range(1, 3):
aux_list = []
for i in range(0, len(ids[val])):
aux_list.append(cells_gdf[cells_gdf.id == ids[val][i]][col_name].values[0])
aux_dict["%s_%s" % (col_name, str(val))] = np.array(aux_list)
d = {"id_1": ids[1], "id_2": ids[2]}
for key in aux_dict.keys():
d[key] = aux_dict[key]
d["geometry"] = geoms
intersections_gdf = gpd.GeoDataFrame(d, geometry="geometry", crs="EPSG:4326")
# Expected output:
expected_out_gdf = deepcopy(cells_gdf)
# Manual adustment done by row of the intersections GeoDataFrame (i.e. the order matters):
# IDs 1 & 2
expected_out_gdf["lat_n"].values[0] = (
expected_out_gdf["lat_n"].values[0] + expected_out_gdf["lat_s"].values[1]
) / 2.0
expected_out_gdf["lat_s"].values[1] = expected_out_gdf["lat_n"].values[0]
# IDs 1 & 3:
expected_out_gdf["lon_e"].values[0] = (
expected_out_gdf["lon_e"].values[0] + expected_out_gdf["lon_w"].values[2]
) / 2.0
expected_out_gdf["lon_w"].values[2] = expected_out_gdf["lon_e"].values[0]
# IDs 1 & 4:
expected_out_gdf["lat_n"].values[0] = (
expected_out_gdf["lat_n"].values[0] + expected_out_gdf["lat_s"].values[3]
) / 2.0
expected_out_gdf["lat_s"].values[3] = expected_out_gdf["lat_n"].values[0]
expected_out_gdf["lon_e"].values[0] = (
expected_out_gdf["lon_e"].values[0] + expected_out_gdf["lon_w"].values[3]
) / 2.0
expected_out_gdf["lon_w"].values[3] = expected_out_gdf["lon_e"].values[0]
# IDs 2 & 3:
expected_out_gdf["lon_e"].values[1] = (
expected_out_gdf["lon_e"].values[1] + expected_out_gdf["lon_w"].values[2]
) / 2.0
expected_out_gdf["lon_w"].values[2] = expected_out_gdf["lon_e"].values[1]
expected_out_gdf["lat_s"].values[1] = (
expected_out_gdf["lat_s"].values[1] + expected_out_gdf["lat_n"].values[2]
) / 2.0
expected_out_gdf["lat_n"].values[2] = expected_out_gdf["lat_s"].values[1]
# IDs 2 & 4:
expected_out_gdf["lon_e"].values[1] = (
expected_out_gdf["lon_e"].values[1] + expected_out_gdf["lon_w"].values[3]
) / 2.0
expected_out_gdf["lon_w"].values[3] = expected_out_gdf["lon_e"].values[1]
# IDs 3 & 4:
expected_out_gdf["lat_n"].values[2] = (
expected_out_gdf["lat_n"].values[2] + expected_out_gdf["lat_s"].values[3]
) / 2.0
expected_out_gdf["lat_s"].values[3] = expected_out_gdf["lat_n"].values[2]
# IDs 4 & 5:
expected_out_gdf["lat_n"].values[3] = (
expected_out_gdf["lat_n"].values[3] + expected_out_gdf["lat_s"].values[4]
) / 2.0
expected_out_gdf["lat_s"].values[4] = expected_out_gdf["lat_n"].values[3]
expected_out_gdf["lon_e"].values[3] = (
expected_out_gdf["lon_e"].values[3] + expected_out_gdf["lon_w"].values[4]
) / 2.0
expected_out_gdf["lon_w"].values[4] = expected_out_gdf["lon_e"].values[3]
new_geoms = []
for i in range(0, expected_out_gdf.shape[0]):
lon_w = expected_out_gdf["lon_w"].values[i]
lon_e = expected_out_gdf["lon_e"].values[i]
lat_s = expected_out_gdf["lat_s"].values[i]
lat_n = expected_out_gdf["lat_n"].values[i]
new_geoms.append(
Polygon([(lon_w, lat_s), (lon_e, lat_s), (lon_e, lat_n), (lon_w, lat_n)])
)
expected_out_gdf["geometry"] = new_geoms
# Output from function to be tested:
function_out_gdf, flag = gdet_cr_ind.auto_adjust_overlaps_gaps(
cells_gdf,
intersections_gdf,
col_lon,
col_lat,
width_EW,
width_NS,
"overlap",
col_1=col_1,
col_2=col_2,
)
assert flag == False
for rownum in range(0, expected_out_gdf.shape[0]):
bounds_expected = expected_out_gdf["geometry"].values[rownum].bounds
bounds_function = function_out_gdf["geometry"].values[rownum].bounds
for position in range(0, 4):
assert round(bounds_expected[position], 12) == round(bounds_function[position], 12)
np.testing.assert_allclose(
expected_out_gdf["lon_w"].values, function_out_gdf["lon_w"].values, rtol=0.0, atol=1e-08
)
np.testing.assert_allclose(
expected_out_gdf["lon_e"].values, function_out_gdf["lon_e"].values, rtol=0.0, atol=1e-08
)
np.testing.assert_allclose(
expected_out_gdf["lat_s"].values, function_out_gdf["lat_s"].values, rtol=0.0, atol=1e-08
)
np.testing.assert_allclose(
expected_out_gdf["lat_n"].values, function_out_gdf["lat_n"].values, rtol=0.0, atol=1e-08
)
# Test a case of gaps:
# ====================
# The output GeoDataFrame from the test of overlaps (expected_out_gdf) contains gaps and
# is used herein to test case="gap" (this would be the normal flow of the main code too):
# The new input is:
cells_gdf = deepcopy(expected_out_gdf)
cells_offset_gdf = gdet_cr_ind.swell_cells_with_buffer(
cells_gdf, 0.25 * width_EW, 0.25 * width_NS
)
intersections_gdf = gdet_cr_ind.overlap_by_full_geom_intersection(
cells_offset_gdf, "id_1", "id_2"
)
# The new expected output is:
# IDs 1 & 2
expected_out_gdf["lat_n"].values[0] = (
expected_out_gdf["lat_n"].values[0] + expected_out_gdf["lat_s"].values[1]
) / 2.0
expected_out_gdf["lat_s"].values[1] = expected_out_gdf["lat_n"].values[0]
# IDs 1 & 3:
expected_out_gdf["lon_e"].values[0] = (
expected_out_gdf["lon_e"].values[0] + expected_out_gdf["lon_w"].values[2]
) / 2.0
expected_out_gdf["lon_w"].values[2] = expected_out_gdf["lon_e"].values[0]
new_geoms = []
for i in range(0, expected_out_gdf.shape[0]):
lon_w = expected_out_gdf["lon_w"].values[i]
lon_e = expected_out_gdf["lon_e"].values[i]
lat_s = expected_out_gdf["lat_s"].values[i]
lat_n = expected_out_gdf["lat_n"].values[i]
new_geoms.append(
Polygon([(lon_w, lat_s), (lon_e, lat_s), (lon_e, lat_n), (lon_w, lat_n)])
)
expected_out_gdf["geometry"] = new_geoms
# Output from function to be tested:
function_out_gdf, flag = gdet_cr_ind.auto_adjust_overlaps_gaps(
cells_gdf,
intersections_gdf,
col_lon,
col_lat,
width_EW,
width_NS,
"gap",
col_1=col_1,
col_2=col_2,
)
assert flag == True
for rownum in range(0, expected_out_gdf.shape[0]):
bounds_expected = expected_out_gdf["geometry"].values[rownum].bounds
bounds_function = function_out_gdf["geometry"].values[rownum].bounds
for position in range(0, 4):
assert round(bounds_expected[position], 12) == round(bounds_function[position], 12)
np.testing.assert_allclose(
expected_out_gdf["lon_w"].values, function_out_gdf["lon_w"].values, rtol=0.0, atol=1e-08
)
np.testing.assert_allclose(
expected_out_gdf["lon_e"].values, function_out_gdf["lon_e"].values, rtol=0.0, atol=1e-08
)
np.testing.assert_allclose(
expected_out_gdf["lat_s"].values, function_out_gdf["lat_s"].values, rtol=0.0, atol=1e-08
)
np.testing.assert_allclose(
expected_out_gdf["lat_n"].values, function_out_gdf["lat_n"].values, rtol=0.0, atol=1e-08
)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment