From dea5b7f1840aa6ea4f1de11a14eaa5a99ee7a6cd Mon Sep 17 00:00:00 2001 From: Viktor Roytman Date: Mon, 25 Apr 2016 11:33:23 -0400 Subject: [PATCH 1/9] Convert self.coords_values to np.array A different part of the code tried to access what used to be the distance matrix by indexing with a float. Fixes #57 --- sequencer/NetworkPlan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencer/NetworkPlan.py b/sequencer/NetworkPlan.py index 275a424..36f8d9e 100644 --- a/sequencer/NetworkPlan.py +++ b/sequencer/NetworkPlan.py @@ -45,7 +45,7 @@ def __init__(self, shp, csv, **kwargs): loc_tol = self.TOL ) - self.coord_values = self.coords.values() + self.coord_values = np.array(self.coords.values()) # Set the edge weight to the distance between those nodes self._weight_edges() From 07dbbb18f6ac49b3775f14cd7243cd5c2e717c9a Mon Sep 17 00:00:00 2001 From: Viktor Roytman Date: Mon, 25 Apr 2016 12:39:53 -0400 Subject: [PATCH 2/9] Raise exception if a node index is not an integer --- sequencer/Sequencer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sequencer/Sequencer.py b/sequencer/Sequencer.py index b79bb62..10d5367 100644 --- a/sequencer/Sequencer.py +++ b/sequencer/Sequencer.py @@ -236,6 +236,8 @@ def _build_edge_wkt(self): # Iterate through the nodes and their parent for rank, fnode, tnode in zip(r.index, r['Sequence..Upstream.id'], r['Sequence..Vertex.id']): if not np.isnan(fnode): + if np.any(np.mod([fnode, tnode], 1) != 0): + raise Exception('Non-integral node index in results.') # Set the edge attributes with those found in sequencing self.networkplan.network.edge[fnode][tnode]['rank'] = int(rank) self.networkplan.network.edge[fnode][tnode]['distance'] = float(self.networkplan._distance(fnode, tnode)) From bae460f80dcf843ae2a703ea2fabb670f01b1428 Mon Sep 17 00:00:00 2001 From: chrisnatali Date: Tue, 26 Apr 2016 18:09:44 -0400 Subject: [PATCH 3/9] test with fake nodes, some refactoring --- demo_sequencer.py | 8 ++-- sequencer/NetworkPlan.py | 71 ++++++++++++++++++++++++----------- sequencer/Sequencer.py | 5 ++- sequencer/Tests/Test_Suite.py | 60 ++++++++++++++++++++++++++++- sequencer/Utils.py | 10 +++-- 5 files changed, 124 insertions(+), 30 deletions(-) diff --git a/demo_sequencer.py b/demo_sequencer.py index 72f483d..b53b72e 100644 --- a/demo_sequencer.py +++ b/demo_sequencer.py @@ -1,11 +1,11 @@ from sequencer import NetworkPlan from sequencer.Models import EnergyMaximizeReturn -csv = '/Users/blogle/Downloads/3305/metrics-local.csv' -shp = '/Users/blogle/Downloads/3305/networks-proposed.shp' +csv = 'data/sumaila/input/metrics-local.csv' +shp = 'data/sumaila/input/networks-proposed.shp' -nwp = NetworkPlan(shp, csv, prioritize='Population') +nwp = NetworkPlan.from_files(shp, csv, prioritize='Population') model = EnergyMaximizeReturn(nwp) results = model.sequence() -model.output('/Users/blogle/Desktop/output/') +model.output('output') diff --git a/sequencer/NetworkPlan.py b/sequencer/NetworkPlan.py index 275a424..97ef8a4 100644 --- a/sequencer/NetworkPlan.py +++ b/sequencer/NetworkPlan.py @@ -19,31 +19,27 @@ class NetworkPlan(object): NetworkPlan containing NetworkPlanner proposed network and accompanying nodal metrics - Parameters - ---------- - shp : file or string (File, directory, or filename to read). - csv : string or file handle / StringIO. - - Example - ---------- - NetworkPlan('/Users/blogle/Downloads/1643/networks-proposed.shp', - '/Users/blogle/Downloads/1643/metrics-local.csv') """ TOL = .5 # meters at the equator, tolerance is stricter towards the poles - def __init__(self, shp, csv, **kwargs): - self.shp_p, self.csv_p = shp, csv + def __init__(self, network, metrics, **kwargs): self.priority_metric = kwargs['prioritize'] if 'prioritize' in kwargs else 'population' + self.proj = kwargs['proj'] if 'proj' in kwargs else 'utm' - logger.info('Asserting Input Projections Match') - self._assert_proj_match(shp, csv) + # do the real init + self._init_helper(network, metrics) + + + def _init_helper(self, network, metrics): + """ + re-usable init bypassing read shp,csv + """ # Load in and align input data logger.info('Aligning Network Nodes With Input Metrics') - self._network, self._metrics = prep_data( nx.read_shp(shp), - pd.read_csv(csv, header=1), - loc_tol = self.TOL - ) + self._network, self._metrics = prep_data(network, + metrics, + loc_tol = self.TOL) self.coord_values = self.coords.values() @@ -63,6 +59,40 @@ def __init__(self, shp, csv, **kwargs): #Fillna values with Zero self._metrics = self.metrics.fillna(0) + + @classmethod + def from_files(cls, shp, csv, **kwargs): + """ + Parameters + ---------- + shp : file or string (File, directory, or filename to read). + csv : string or file handle / StringIO. + + Example + ---------- + NetworkPlan.from_files('networks-proposed.shp', + 'metrics-local.csv') + """ + + logger.info('Asserting Input Projections Match') + + cls._assert_proj_match(shp, csv) + # Use fiona to open the shapefile as this includes the projection type + + shapefile = fiona.open(shp) + # Pass along the projection + kwargs['proj'] = shapefile.crs['proj'] + + nwp = cls(nx.read_shp(shp), pd.read_csv(csv, header=1), **kwargs) + + # FIXME: + # This is a really bad way to do this...the sequencer has a dependency + # on the csv_p attribute (i.e. the original csv) so leave it for now + nwp.csv_p = csv + return nwp + + + @classmethod def _assert_proj_match(self, shp, csv): """Ensure that the projections match before continuing""" # Use fiona to open the shapefile as this includes the projection type @@ -83,9 +113,8 @@ def _assert_proj_match(self, shp, csv): logger.error("csv and shp Projections Don't Match") raise AssertionError("csv and shapefile Projections Don't Match") - # Save the state of the projection - self.proj = shapefile.crs['proj'] - + + def assert_is_tree(self): in_degree = self.network.in_degree() @@ -322,4 +351,4 @@ def write_file(name): csv = os.path.join(directory_name, 'metrics-local.csv') shp = os.path.join(directory_name, 'network-proposed.shp') - return NetworkPlan(shp, csv) + return NetworkPlan.from_files(shp, csv) diff --git a/sequencer/Sequencer.py b/sequencer/Sequencer.py index b79bb62..df53d57 100644 --- a/sequencer/Sequencer.py +++ b/sequencer/Sequencer.py @@ -289,7 +289,10 @@ def _clean_results(self): """This joins the sequenced results on the metrics dataframe and reappends the dropped rows""" logger.info('Joining Sequencer Results on Input Metrics') - + if not hasattr(self.networkplan, 'csv_p'): + logger.info('no original csv file found, skipping') + return + orig = pd.read_csv(self.networkplan.csv_p, header=1) orig.columns = parse_cols(orig) self.networkplan.metrics.index.name = 'Sequence..Vertex.id' diff --git a/sequencer/Tests/Test_Suite.py b/sequencer/Tests/Test_Suite.py index c60eb84..6fa3836 100644 --- a/sequencer/Tests/Test_Suite.py +++ b/sequencer/Tests/Test_Suite.py @@ -46,6 +46,64 @@ def gen_data(): return metrics, network.to_directed() + +def gen_data_with_fakes(): + """ + generate network and metrics where some of the network + nodes do not have corresponding metrics records + + This should be sufficient for tests requiring fake nodes + + network looks like + + o * + | / \ + * * * + / \ + * * + + where o is a fake node, * is not + """ + + # create disjoint graph with 2 trees, one rooted by a fake node + network = nx.graph.Graph() + edges = ((0, 1), (0, 2), (3, 4), (3, 5)) + network.add_edges_from(edges) + + # now add fake root to tree at 3 + network.add_edge(6, 3) + + # set coordinates + base_coord = np.array([10, 10]) + coord_dict = {i: base_coord + [i*-1, i*-1] for i in range(6)} + nx.set_node_attributes(network, 'coords', coord_dict) + # and set fake node coordinates + nx.set_node_attributes(network, 'coords', {6: np.array([10, 11])}) + + # now set the metrics dataframe without the fake node + metrics_data = {'Demand...Projected.nodal.demand.per.year': + [100, 50, 25, 12, 6, 3], + 'Population': [100, 50, 25, 12, 6, 3]} + + metrics = DataFrame(metrics_data) + metrics['X'] = [ coord_dict[i][0] for i in range(6) ] + metrics['Y'] = [ coord_dict[i][1] for i in range(6) ] + + return metrics, network + +def test_sequencer_with_fakes(): + """ + Make sure we work with fake nodes + """ + + # for now, just make sure it runs without exceptions + metrics, network = gen_data_with_fakes() + nwp = NetworkPlan(network, metrics, prioritize='Population', proj='wgs4') + model = EnergyMaximizeReturn(nwp) + model.sequence() + #todo: check the result + + class TestNetworkPlan(NetworkPlan): def __init__(self): @@ -144,7 +202,7 @@ def test_sequencer_compare(): input_dir = "data/sumaila/input" csv_file = os.path.join(input_dir, "metrics-local.csv") shp_file = os.path.join(input_dir, "networks-proposed.shp") - nwp = NetworkPlan(shp_file, csv_file, prioritize='Population') + nwp = NetworkPlan.from_files(shp_file, csv_file, prioritize='Population') model = EnergyMaximizeReturn(nwp) model.sequence() diff --git a/sequencer/Utils.py b/sequencer/Utils.py index e2ae2a9..7e772f8 100644 --- a/sequencer/Utils.py +++ b/sequencer/Utils.py @@ -7,6 +7,7 @@ import numpy as np from numpy import sin, cos, pi, arcsin, sqrt import string +import collections def prep_data(network, metrics, loc_tol=.5): """ @@ -15,7 +16,12 @@ def prep_data(network, metrics, loc_tol=.5): """ # convert the node names from coords to integers, cache the coords as attrs - network = nx.convert_node_labels_to_integers(network, label_attribute='coords') + # but ONLY if the nodes are themselves collections (which is the default for + # networkx shapefile import) + # otherwise, assume the coords attribute exists + if(len(network.nodes()) > 0 and + isinstance(network.nodes()[0], collections.Iterable)): + network = nx.convert_node_labels_to_integers(network, label_attribute='coords') # convert special characters to dot notation metrics.columns = parse_cols(metrics) @@ -28,7 +34,6 @@ def prep_data(network, metrics, loc_tol=.5): # cast coords to tuples (hashable) node_df['coords'] = node_df['coords'].apply(tuple) - metrics['m_coords'] = metrics['m_coords'].apply(tuple) # build a vector of all the coordinates in the metrics dataframe coords_vec = np.vstack(metrics['m_coords'].values) @@ -46,7 +51,6 @@ def fuzzy_match(coord): # cast the coordinates back to tuples (hashable) node_df['m_coords'] = node_df['m_coords'].apply(tuple) - metrics['m_coords'] = metrics['m_coords'].apply(tuple) # now that we have identical metric coords in both node_df and metrics join on that column metrics = pd.merge(metrics, node_df, on='m_coords', left_index=True).sort() From 9209ba7db208ca16ed23c4cba4c8fcafc8b5c495 Mon Sep 17 00:00:00 2001 From: chrisnatali Date: Wed, 27 Apr 2016 21:17:21 -0400 Subject: [PATCH 4/9] better fix for #57, fake node test, refactored NetworkPlan init --- sequencer/NetworkPlan.py | 28 ++++++++++++++-------------- sequencer/Sequencer.py | 12 ++++-------- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/sequencer/NetworkPlan.py b/sequencer/NetworkPlan.py index 0dd1f70..4ef9e32 100644 --- a/sequencer/NetworkPlan.py +++ b/sequencer/NetworkPlan.py @@ -4,8 +4,6 @@ import fiona import numpy as np import networkx as nx -from scipy.sparse import csr_matrix -import scipy.sparse.csgraph as graph import pandas as pd import logging import copy @@ -26,13 +24,18 @@ def __init__(self, network, metrics, **kwargs): self.priority_metric = kwargs['prioritize'] if 'prioritize' in kwargs else 'population' self.proj = kwargs['proj'] if 'proj' in kwargs else 'utm' - # do the real init + # FIXME: + # Remove the dependency that sequencer has on the + # original metrics file (this is terrible coupling) + # see sequencer:_clean_results() + self._original_metrics = metrics + self._init_helper(network, metrics) def _init_helper(self, network, metrics): """ - re-usable init bypassing read shp,csv + All initialization (cleaning up metrics, network, etc) """ # Load in and align input data @@ -41,7 +44,7 @@ def _init_helper(self, network, metrics): metrics, loc_tol = self.TOL) - self.coord_values = np.array(self.coords.values()) + self.coord_values = self.coords.values() # Set the edge weight to the distance between those nodes self._weight_edges() @@ -83,14 +86,7 @@ def from_files(cls, shp, csv, **kwargs): # Pass along the projection kwargs['proj'] = shapefile.crs['proj'] - nwp = cls(nx.read_shp(shp), pd.read_csv(csv, header=1), **kwargs) - - # FIXME: - # This is a really bad way to do this...the sequencer has a dependency - # on the csv_p attribute (i.e. the original csv) so leave it for now - nwp.csv_p = csv - return nwp - + return cls(nx.read_shp(shp), pd.read_csv(csv, header=1), **kwargs) @classmethod def _assert_proj_match(self, shp, csv): @@ -283,12 +279,16 @@ def network(self): """returns the DiGraph Object representation of the graph""" return self._network + @property + def original_metrics(self): + """returns the original (unprocessed) metrics data_frame""" + return self._original_metrics + @property def metrics(self): """returns the nodal metrics Pandas DataFrame""" return self._metrics - def download_scenario(scenario_number, directory_name=None, username=None, password=None, np_url='http://networkplanner.modilabs.org/'): diff --git a/sequencer/Sequencer.py b/sequencer/Sequencer.py index 29c57ec..9fc0ad7 100644 --- a/sequencer/Sequencer.py +++ b/sequencer/Sequencer.py @@ -132,8 +132,7 @@ def upstream_distance(self, node): return 0.0 def sequence(self): - self.results = pd.DataFrame(self._sequence()).set_index('Sequence..Far.sighted.sequence') - + self.results = pd.DataFrame(self._sequence(), dtype=object).set_index('Sequence..Far.sighted.sequence') # Post process for output self._build_node_wkt() self._build_edge_wkt() @@ -235,7 +234,7 @@ def _build_edge_wkt(self): r = self.results # Iterate through the nodes and their parent for rank, fnode, tnode in zip(r.index, r['Sequence..Upstream.id'], r['Sequence..Vertex.id']): - if not np.isnan(fnode): + if fnode: if np.any(np.mod([fnode, tnode], 1) != 0): raise Exception('Non-integral node index in results.') # Set the edge attributes with those found in sequencing @@ -291,11 +290,8 @@ def _clean_results(self): """This joins the sequenced results on the metrics dataframe and reappends the dropped rows""" logger.info('Joining Sequencer Results on Input Metrics') - if not hasattr(self.networkplan, 'csv_p'): - logger.info('no original csv file found, skipping') - return - - orig = pd.read_csv(self.networkplan.csv_p, header=1) + # FIXME: Remove this dependency on original_metrics + orig = self.networkplan.original_metrics orig.columns = parse_cols(orig) self.networkplan.metrics.index.name = 'Sequence..Vertex.id' sequenced_metrics = pd.merge(self.networkplan.metrics.reset_index(), self.results.reset_index(), on='Sequence..Vertex.id') From 0261f3b800644c4b741af444b51a535831847592 Mon Sep 17 00:00:00 2001 From: chrisnatali Date: Wed, 27 Apr 2016 21:38:42 -0400 Subject: [PATCH 5/9] check fnode for None explicitly (woops) --- sequencer/Sequencer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencer/Sequencer.py b/sequencer/Sequencer.py index 9fc0ad7..dec257b 100644 --- a/sequencer/Sequencer.py +++ b/sequencer/Sequencer.py @@ -234,7 +234,7 @@ def _build_edge_wkt(self): r = self.results # Iterate through the nodes and their parent for rank, fnode, tnode in zip(r.index, r['Sequence..Upstream.id'], r['Sequence..Vertex.id']): - if fnode: + if not fnode == None: if np.any(np.mod([fnode, tnode], 1) != 0): raise Exception('Non-integral node index in results.') # Set the edge attributes with those found in sequencing From 6f3a5ba06bb94e656b595f3a5c0a22463a59eeea Mon Sep 17 00:00:00 2001 From: Viktor Roytman Date: Thu, 28 Apr 2016 10:39:50 -0400 Subject: [PATCH 6/9] Fix None checks https://www.python.org/dev/peps/pep-0008/#programming-recommendations --- sequencer/Sequencer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sequencer/Sequencer.py b/sequencer/Sequencer.py index dec257b..acda0b1 100644 --- a/sequencer/Sequencer.py +++ b/sequencer/Sequencer.py @@ -127,7 +127,7 @@ def _sequence(self): def upstream_distance(self, node): """Computes the edge distance from a node to it's parent""" parent = self.parent(node) - if parent != None: + if parent is not None: return self.networkplan._distance(parent, node) return 0.0 @@ -234,7 +234,7 @@ def _build_edge_wkt(self): r = self.results # Iterate through the nodes and their parent for rank, fnode, tnode in zip(r.index, r['Sequence..Upstream.id'], r['Sequence..Vertex.id']): - if not fnode == None: + if fnode is not None: if np.any(np.mod([fnode, tnode], 1) != 0): raise Exception('Non-integral node index in results.') # Set the edge attributes with those found in sequencing From ab6e0426a4ba4f674a83ec87effa74dadbd3b9a7 Mon Sep 17 00:00:00 2001 From: Viktor Roytman Date: Thu, 28 Apr 2016 14:12:20 -0400 Subject: [PATCH 7/9] Use dict.get(key, default) https://github.com/SEL-Columbia/Sequencer/pull/62#discussion-diff-61441162 --- sequencer/NetworkPlan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencer/NetworkPlan.py b/sequencer/NetworkPlan.py index 4ef9e32..e4bdb43 100644 --- a/sequencer/NetworkPlan.py +++ b/sequencer/NetworkPlan.py @@ -22,7 +22,7 @@ class NetworkPlan(object): def __init__(self, network, metrics, **kwargs): self.priority_metric = kwargs['prioritize'] if 'prioritize' in kwargs else 'population' - self.proj = kwargs['proj'] if 'proj' in kwargs else 'utm' + self.proj = kwargs.get('proj', 'utm') # FIXME: # Remove the dependency that sequencer has on the From 0a8c26a8494d2965915c7b8a37c4200139fc6a65 Mon Sep 17 00:00:00 2001 From: Viktor Roytman Date: Thu, 28 Apr 2016 14:15:10 -0400 Subject: [PATCH 8/9] Remove integer index check superceded by 9209ba7 https://github.com/SEL-Columbia/Sequencer/pull/62#discussion_r61443860 --- sequencer/Sequencer.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/sequencer/Sequencer.py b/sequencer/Sequencer.py index acda0b1..6209c78 100644 --- a/sequencer/Sequencer.py +++ b/sequencer/Sequencer.py @@ -235,8 +235,6 @@ def _build_edge_wkt(self): # Iterate through the nodes and their parent for rank, fnode, tnode in zip(r.index, r['Sequence..Upstream.id'], r['Sequence..Vertex.id']): if fnode is not None: - if np.any(np.mod([fnode, tnode], 1) != 0): - raise Exception('Non-integral node index in results.') # Set the edge attributes with those found in sequencing self.networkplan.network.edge[fnode][tnode]['rank'] = int(rank) self.networkplan.network.edge[fnode][tnode]['distance'] = float(self.networkplan._distance(fnode, tnode)) From a26a42d290c64880e213f6e80e1cc33636a777b2 Mon Sep 17 00:00:00 2001 From: chrisnatali Date: Fri, 29 Apr 2016 15:49:13 -0400 Subject: [PATCH 9/9] more comprehensive test plus other cleanup --- sequencer/Models.py | 2 ++ sequencer/NetworkPlan.py | 4 +-- sequencer/Sequencer.py | 8 +++-- sequencer/Tests/Test_Suite.py | 60 +++++++++++++++++++++++++---------- 4 files changed, 53 insertions(+), 21 deletions(-) diff --git a/sequencer/Models.py b/sequencer/Models.py index a604dce..1c4a991 100644 --- a/sequencer/Models.py +++ b/sequencer/Models.py @@ -12,3 +12,5 @@ def _strip_cols(self): def sequence(self): super(EnergyMaximizeReturn, self).sequence() self._strip_cols() + # return to be consistent with parent + return self.output_frame diff --git a/sequencer/NetworkPlan.py b/sequencer/NetworkPlan.py index 4ef9e32..ed00259 100644 --- a/sequencer/NetworkPlan.py +++ b/sequencer/NetworkPlan.py @@ -21,8 +21,8 @@ class NetworkPlan(object): TOL = .5 # meters at the equator, tolerance is stricter towards the poles def __init__(self, network, metrics, **kwargs): - self.priority_metric = kwargs['prioritize'] if 'prioritize' in kwargs else 'population' - self.proj = kwargs['proj'] if 'proj' in kwargs else 'utm' + self.priority_metric = kwargs.get('prioritize', 'population') + self.proj = kwargs.get('proj', 'utm') # FIXME: # Remove the dependency that sequencer has on the diff --git a/sequencer/Sequencer.py b/sequencer/Sequencer.py index acda0b1..24ece15 100644 --- a/sequencer/Sequencer.py +++ b/sequencer/Sequencer.py @@ -132,6 +132,12 @@ def upstream_distance(self, node): return 0.0 def sequence(self): + """ + Compute the sequence (aka rank) of nodes and edges + + This modifies the NetworkPlan member (so make a deep copy if you + need the original) + """ self.results = pd.DataFrame(self._sequence(), dtype=object).set_index('Sequence..Far.sighted.sequence') # Post process for output self._build_node_wkt() @@ -235,8 +241,6 @@ def _build_edge_wkt(self): # Iterate through the nodes and their parent for rank, fnode, tnode in zip(r.index, r['Sequence..Upstream.id'], r['Sequence..Vertex.id']): if fnode is not None: - if np.any(np.mod([fnode, tnode], 1) != 0): - raise Exception('Non-integral node index in results.') # Set the edge attributes with those found in sequencing self.networkplan.network.edge[fnode][tnode]['rank'] = int(rank) self.networkplan.network.edge[fnode][tnode]['distance'] = float(self.networkplan._distance(fnode, tnode)) diff --git a/sequencer/Tests/Test_Suite.py b/sequencer/Tests/Test_Suite.py index 6fa3836..f27d3ad 100644 --- a/sequencer/Tests/Test_Suite.py +++ b/sequencer/Tests/Test_Suite.py @@ -54,15 +54,17 @@ def gen_data_with_fakes(): This should be sufficient for tests requiring fake nodes - network looks like + network looks like (fake node starred, demand in parens) - o * - | / \ - * * * - / \ - * * + 6* + | + | + 0(100) 3(12) + / \ / \ + / \ / \ + 1(50) 2(25) 4(6) 5(3) - where o is a fake node, * is not + Also returns edge_rank: dict of edge -> rank """ # create disjoint graph with 2 trees, one rooted by a fake node @@ -73,35 +75,59 @@ def gen_data_with_fakes(): # now add fake root to tree at 3 network.add_edge(6, 3) - # set coordinates + # set coordinates (roughly match diagram above) base_coord = np.array([10, 10]) - coord_dict = {i: base_coord + [i*-1, i*-1] for i in range(6)} - nx.set_node_attributes(network, 'coords', coord_dict) - # and set fake node coordinates - nx.set_node_attributes(network, 'coords', {6: np.array([10, 11])}) + fake_coord = np.array([20, 9]) + coord_dict = {0: base_coord, + 1: base_coord + [-1, 1], + 2: base_coord + [1, 1], + 3: fake_coord + [0, 1], + 4: fake_coord + [-1, 2], + 5: fake_coord + [1, 2], + 6: fake_coord} + nx.set_node_attributes(network, 'coords', coord_dict) # now set the metrics dataframe without the fake node metrics_data = {'Demand...Projected.nodal.demand.per.year': [100, 50, 25, 12, 6, 3], 'Population': [100, 50, 25, 12, 6, 3]} metrics = DataFrame(metrics_data) + # Note, we skip fake node here metrics['X'] = [ coord_dict[i][0] for i in range(6) ] metrics['Y'] = [ coord_dict[i][1] for i in range(6) ] - - return metrics, network + + # assign expected ranks to nodes, edges (the sequence) + # note: + # - ranks are 1-based and originally assigned to nodes + # - edges are assigned rank based on the "to" node + # - fake nodes are skipped when assigning rank + # (See Sequencer.sequencer._sequence for details) + node_rank = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6} + edge_rank = {(0, 1): 2, (0, 2): 3, (6, 3): 4, (3, 4): 5, (3, 5): 6} + return metrics, network, node_rank, edge_rank + def test_sequencer_with_fakes(): """ Make sure we work with fake nodes """ # for now, just make sure it runs without exceptions - metrics, network = gen_data_with_fakes() + metrics, network, node_rank, edge_rank = gen_data_with_fakes() nwp = NetworkPlan(network, metrics, prioritize='Population', proj='wgs4') model = EnergyMaximizeReturn(nwp) - model.sequence() - #todo: check the result + results = model.sequence() + + node_ids = results['Sequence..Vertex.id'] + sequence_ids = results['Sequence..Far.sighted.sequence'] + actual_node_rank = dict(zip(node_ids, sequence_ids)) + actual_edge_rank = {k: v['rank'] for k, v in + model.networkplan.network.edge.iteritems()} + assert node_rank == actual_node_rank,\ + "Node sequencing is not what was expected" + assert edge_rank == actual_edge_rank,\ + "Edge sequencing is not what was expected" class TestNetworkPlan(NetworkPlan):