Skip to content
Snippets Groups Projects
Commit c4a6242b authored by Malte Bauch's avatar Malte Bauch
Browse files

Merge branch 'v.0.1.0-codename-threadbare' into 28-simple-ncurse-alike-cli-to-manage-gosdn

parents 349fac49 a73fa7dc
No related branches found
No related tags found
3 merge requests!90Develop,!59Resolve "Simple ncurse-alike cli to manage gosdn",!53V.0.1.0 Codename Threadbare
Pipeline #53010 passed
...@@ -3,7 +3,8 @@ variables: ...@@ -3,7 +3,8 @@ variables:
stages: stages:
- test - test
- documentation - build
- deploy
before_script: before_script:
- git config --global url."https://$GO_MODULES_USER:$GO_MODULES_ACCESS_TOKEN@code.fbi.h-da.de".insteadOf "https://code.fbi.h-da.de" - git config --global url."https://$GO_MODULES_USER:$GO_MODULES_ACCESS_TOKEN@code.fbi.h-da.de".insteadOf "https://code.fbi.h-da.de"
...@@ -12,3 +13,4 @@ include: ...@@ -12,3 +13,4 @@ include:
- local: '/build/ci/.code-quality-ci.yml' - local: '/build/ci/.code-quality-ci.yml'
- local: '/build/ci/.documentation-ci.yml' - local: '/build/ci/.documentation-ci.yml'
- local: '/build/ci/.security-and-compliance-ci.yml' - local: '/build/ci/.security-and-compliance-ci.yml'
- local: '/build/ci/.build-container.yml'
\ No newline at end of file
FROM golang:1.15-alpine AS builder
ARG GITLAB_USER
ARG GITLAB_TOKEN
WORKDIR /src/gosdn
COPY . .
RUN apk add git
RUN git config --global url."https://$GITLAB_USER:$GITLAB_TOKEN@code.fbi.h-da.de".insteadOf "https://code.fbi.h-da.de"
RUN go mod download
RUN CGO_ENABLED=0 GOOS=linux go build ./cmd/gosdn
FROM alpine:latest
EXPOSE 8443
EXPOSE 55055
COPY --from=builder /src/gosdn/gosdn .
COPY --from=builder /src/gosdn/configs ./configs
ENTRYPOINT [ "./gosdn" ]
CMD [""]
variables:
DOCKER_IMAGE_SHA: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
build:docker:
stage: build
tags:
- baremetal
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
script:
- >
docker build \
--build-arg GITLAB_USER=$GO_MODULES_USER \
--build-arg GITLAB_TOKEN=$GO_MODULES_ACCESS_TOKEN \
-t $DOCKER_IMAGE_SHA .
.deploy: &deploy
stage: deploy
needs: ["build:docker"]
tags:
- baremetal
script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
- docker tag $DOCKER_IMAGE_SHA $TAG
- docker push $TAG
- curl --insecure -X POST $HOOK
deploy:develop:
variables:
TAG: $CI_REGISTRY_IMAGE:develop
HOOK: $PORTAINER_HOOK_DEVELOP
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH
<<: *deploy
deploy:tagged:
rules:
- if: $CI_COMMIT_TAG
script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
- docker tag $CI_REGISTRY_IMAGE:latest $CI_COMMIT_TAG
- docker push $CI_COMMIT_TAG
deploy:latest:
variables:
TAG: $CI_REGISTRY_IMAGE:latest
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
<<: *deploy
\ No newline at end of file
...@@ -5,7 +5,7 @@ documentation:pdf: ...@@ -5,7 +5,7 @@ documentation:pdf:
name: pandoc/latex name: pandoc/latex
entrypoint: entrypoint:
- '' - ''
stage: documentation stage: deploy
rules: rules:
- changes: - changes:
- documentation/design/*.md - documentation/design/*.md
...@@ -22,7 +22,7 @@ documentation:pdf: ...@@ -22,7 +22,7 @@ documentation:pdf:
- cargo install mdbook - cargo install mdbook
image: image:
name: rust:latest name: rust:latest
stage: documentation stage: deploy
script: script:
- mdbook build documentation --dest-dir public - mdbook build documentation --dest-dir public
cache: cache:
......
...@@ -4,13 +4,12 @@ import ( ...@@ -4,13 +4,12 @@ import (
"code.fbi.h-da.de/cocsn/gosdn/log" "code.fbi.h-da.de/cocsn/gosdn/log"
"code.fbi.h-da.de/cocsn/gosdn/nucleus" "code.fbi.h-da.de/cocsn/gosdn/nucleus"
"flag" "flag"
"log/syslog"
) )
func main() { func main() {
// register our supported flags // register our supported flags
cliListenAddr := flag.String("cli-listen-addr", "localhost", "The IP address of the grpcCLI.") cliListenAddr := flag.String("cli-listen-addr", "", "The IP address of the grpcCLI.")
cliListenPort := flag.String("cli-server-port", "55055", "The port number of the grpcCLI") cliListenPort := flag.String("cli-server-port", "55055", "The port number of the grpcCLI")
configFileName := flag.String("config-file", "", "Path to the config file") configFileName := flag.String("config-file", "", "Path to the config file")
...@@ -18,16 +17,6 @@ func main() { ...@@ -18,16 +17,6 @@ func main() {
cliSocket := *cliListenAddr + ":" + *cliListenPort cliSocket := *cliListenAddr + ":" + *cliListenPort
log.Loglevel(log.DEBUG) log.Loglevel(log.DEBUG)
syslogWriter, err := syslog.New(syslog.LOG_ALERT, "gosdn")
defer func() {
if err := syslogWriter.Close(); err != nil {
log.Fatal(err)
}
}()
if err != nil {
log.Fatal(err)
}
log.LoglevelOutput(log.INFO, syslogWriter)
// Setup a channel to communicate if goSDN should shutdown. // Setup a channel to communicate if goSDN should shutdown.
IsRunningChannel := make(chan bool) IsRunningChannel := make(chan bool)
......
# Introduction # Introduction
Lorem ipsum at nusquam appellantur his, labitur bonorum pri no [@dueck:trio]. His no decore nemore graecis. In eos meis nominavi, liber soluta vim cu. Sea commune suavitate interpretaris eu, vix eu libris efficiantur. Data networks consists out of a variety of different network elements, link types, end hosts, services and requirements of such services. Further data networks consists not only of a single plane, but have different (logical) networking planes that have different tasks within any data network, i.e., the control plane, data plane and the network management plane. Keeping track of the different elements, links, hosts, services, their interactions, their runtime behavior on the 3 networking planes is a non-trivial tasks that is usually subsumed under the very broad term of network operations.
There are different approaches for network operations that are not only divided by their logical distinction but also how an implementer, typically network equipment vendor, is implementing the network elements and the particular operations.
We outline two basic approaches to network operation:
1. fully integrated network operations of all networking planes, i.e., usually called the traditional approach.
2. separation of control- and data planes, i.e., usually called the Software Defined Networking (SDN) approch, though there have been implementations of this concept earlier than SDN with other names, e.g., Forwarding and Control Separation (ForCeS) and others.
## Motivation ## Motivation
## Overarching Project Goals ## Overarching Project Goals
* Keep It Simple, Stupid (KISS) * Keep It Simple, Stupid (KISS)
* Reuse existing technologies bits wherever possible * Reuse existing technologies bits wherever possible if those are stable, i.e., documented, maintained etc, on a long time scale
* Integrate state-of-the-art technologies and methodologies * Integrate state-of-the-art technologies and methodologies
* Document, Document, Document
* Automate almost everything right from the beginning * Automate almost everything right from the beginning
* Document, Document, Document
* be an excellent citizen: test, test, test * be an excellent citizen: test, test, test
* no hacks!
Some unsorted thoughts the be ordered yet: Some unsorted thoughts the be ordered yet:
...@@ -22,4 +31,30 @@ Some unsorted thoughts the be ordered yet: ...@@ -22,4 +31,30 @@ Some unsorted thoughts the be ordered yet:
* modules should be loaded (or unloaded) during runtime of the controller core * modules should be loaded (or unloaded) during runtime of the controller core
## Use Cases to be considered
The development of a general purpose SDN controller is not the primary goal at this early stage of the project.
Instead there are two use cases to be considered in the implemenation works that are currently ongoing:
* Primary: optical domain SDN-controller for the CoCSN project
* Secondary: SDN-controller for our local labs to manage an Ethernet-based lab environment
### Primary: optical domain SDN-controller for the CoCSN project
For this use case we initally do not consider the direct control of optical network elements, e.g., Optical Add-Drop Multiplexer (OADM) but we focus on optical network domains managed by another (SDN) controllers. The goSDN controller communicates with this domain controller and can request information about the optical network elements, the links between them and the optical and logical configuration of the network domain.
In a second step, the goSDN controller has to communicate with multiple domain controllers and has to find potential interchange points between these multiple domains. This is the preparation for a later step in this use case, when the goSDN controller has to find a network path between two end-points across multiple optical domains, including backup paths.
The intention here is to use an existing SDN southbound interface, very likely based on RESTCONF.
### Secondary: SDN-controller for our local labs to manage an Ethernet-based lab environment
For this use case we consider one of our local labs, e.g., either the telecommunications or networking lab, and how this lab with all its networking parts can be managed by the goSDN controller. In this case, the controller has to learn about all (network) elements, the links and the topology by obtaining all the required information and its own topology computation. This will require an interface between goSDN and the network components that is potentially beyond the typical SDN southbound interfaces.
## Structure of this Memo ## Structure of this Memo
This memo starts with this introduction that sets the stage for the theoretical underpinings of the SDN-controller
and the acutal implementation (and the various choice for this). Chapter 2 discusses the related work and chapter 3
outlines the theoretical foundations related to the control of networks and their relation to SDN. Chapter 4 uses
the output of Chapter 3 to define the conceptual design of the goSDN controller and some discussions about the pro
and cons of conceptual design. Chapter 5 describes the actual design of the current goSDN implementation and is
meant to be a compendium for the source code.
...@@ -61,3 +61,18 @@ Some conceptual building blocks for a network supervisor: ...@@ -61,3 +61,18 @@ Some conceptual building blocks for a network supervisor:
* **Northbound Interface (SBI)** * **Northbound Interface (SBI)**
* **East-West-bound Interface (SBI)** * **East-West-bound Interface (SBI)**
## Applying Changes to What Plane?
Some basic thoughts to dissect how different approaches are applying changes to the various planes.
### Changes to the Control Plane
### Changes to the Data Plane
This is the use case for the SDN approach: A so-called SDN-controller applies policy rules to the data plane. These policy rules are defining the handling of the flows in the networks on a larger scale or to be more precise the handling of more less specified packets.
A change to the data plane will not directly trigger a change to other planes. Though the flow of packets on the data plane can be observed by the control plane and the control plane can take action depending on the data packets.
### Changes to the Management Plane
...@@ -2,6 +2,220 @@ ...@@ -2,6 +2,220 @@
## Why we do this in go ## Why we do this in go
Because it rocks, but let's see afterwards what can be written here.
## Storing Information
Section XXX (Conceptual Design of a SDN Controller as Network Supervisor)
discusses the need to store information about for element inventories and
topology inventories.
### Element Inventories
Storing information about network elements and their properties is a relative
static process, at least when one considers potential changes over time.
Typically such network elements are added to a network and they will remain in
the network for a longer time, i.e., multiple minutes or even longer.
### Topology Inventory
Every network has one given physical topology (G<sub>physical</sub> ) and on
top of this at least one logical topology (G<sub>logical1</sub>). There may be
multiple logical topologies (G<sub>n+1</sub>) on top logical topologies
(G<sub>n</sub>), i.e., a recursion. Such logical topologies (G<sub>n+1</sub>)
can again have other logical topologies as recursion or other logical topologies
in parallel.
A topology consists out of interfaces, which are attached to their respective
network elements, and links between these interfaces.
Mathematically, such a topology can be described as a directed graph, whereas
the interfaces of the network elements are the nodes and the links are
the edges.
G<sub>physical</sub> ist a superset of G<sub>logical1</sub>.
The topology inventory has to store the particular graph for any topology and
also the connections between the different levels of topologies. For instance,
the G<sub>logical1</sub> is linked to G<sub>physical</sub>. (needs to be clear
if changes in n-1 graph has impact on n graph).
For further study at this point: Which type of database and implementation of
databases should be used to store the different topology graphs and their
pontential dependencies? How should the interface between gosdn and this
database look like?
Here is an attempt to describe the above text in a graphical reprensetation (kinda of...not perfect yet):
```mermaid
graph TB
SubGraph1 --> SubGraph1Flow
subgraph "G_logical1"
SubGraph1Flow(Logical Net)
Node1_l1[Node1_l1] <--> Node2_l1[Node2_l1] <--> Node3_l1[Node3_l1] <--> Node4_l1[Node4_l1] <--> Node5_l1[Node5_l1] <--> Node1_l1[Node1_l1]
end
subgraph "G_physical"
Node1[Node 1] <--> Node2[Node 2] <--> Node3[Node 3]
Node4[Node 4] <--> Node2[Node 2] <--> Node5[Node 5]
Net_physical[Net_physical] --> SubGraph1[Reference to G_logical1]
end
```
### Potential other Inventories
There may be the potential need to store information beyond pure topologies,
actually about network flows, i.e., information about a group of packets
belonging together.
## Database
A database will be used for the management and persistence of network
topologies and their associated elements within goSDN.
Since network topologies are often depicted as graphs, it was obvious to stick
to this concept and, also due to their increasing popularity, to use a graph
database. After a more intensive examination of graph databases it was found
that they (with their labels, nodes, relations and properties) are well suited
for a representation of network topologies.
The first basic idea was to create different single graphs representing the
different network topologies and label each node and edge to ensure a clear
assignment to a topology.
This would mean that nodes and edges of a graph have 1...n labels.
Therefore if you want to display a simple network topology in a graph, you can
display the different network elements as individual nodes and the edges between
network elements as their respective connections, such as Ethernet.
This works with both physical and logical topologies, which are described in
more detail [here](#topology-inventory).
So a simple topology in a graph database could look like shown below.
```mermaid
graph TD
A[Node 1 - Label: 'Host,physical'] -->|Ethernet - Label: 'physical'| B[Node 2 - Label: 'Hub,physical']
C[Node 3 - Label: 'Host,physical'] -->|Ethernet - Label: 'physical'| B
B -->|Ethernet - Label: 'physical'| D[Node 4 - Label: 'Host,physical']
B -->|Ethernet - Label: 'physical'| E[Node 5 - Label: 'Host,physical']
```
For this purpose some experiments with the [Redis](https://redis.io/)-Database
module [`RedisGraph`](https://oss.redislabs.com/redisgraph/) were carried out
first. The basic implementation was possible, but the function of assigning
several labels to one node/edge is missing (originally we considered this to be
indispensable especially to map different topologies).
For this reason we looked around for an alternative and with
[neo4j](https://neo4j.com/) we found a graph database, which gives us the
possibility to label nodes and edges with a multitude of labels and offers a
wide range of additional plugins such as [apoc](https://neo4j.com/labs/apoc/).
### neo4j
TODO: add a little description for neo4j in general
#### Implementation With neo4j
The current implementation offers the possibility to persist different network
elements (e.g. devices, interfaces...) and their physical topology and mainly
serves to represent the prototypical dataflow of goSDN to the database.
The following figure shows our first idea of a persistence of network
topologies with neo4j (to save space, only the labels were included).
```mermaid
graph TD
PND[PND 1]
A --> |belongs to| PND
B --> |belongs to| PND
C --> |belongs to| PND
D --> |belongs to| PND
E --> |belongs to| PND
A[Label: 'Host,physical,logical1'] --> |Label: 'physical'| B[Label: 'Hub,physical,logical1']
D[Label: 'Host,physical,logical1'] --> |Label: 'physical'| B
B --> |Label: 'physical'| C[Label: 'Host,physical,logical1']
B --> |Label: 'physical'| E[Label: 'Host,physical,logical1']
A --> |Label: 'logical1'| B
B --> |Label: 'logical1'| C
C --> |Label: 'logical1'| D
D --> |Label: 'logical1'| E
E --> |Label: 'logical1'| A
```
The basic idea is to assign the different network elements to a specific
Principal Network Domain (PND). The different topologies are represented by a
neo4j relationship between the network elements that are stored as neo4j nodes.
However, with this current variant it is not possible, as required in
[Topology Inventory](#topology-inventory), to represent topologies that are hierarchically
interdependent, since neo4j does not allow relations to be stored as properties
(as described [here](https://neo4j.com/docs/cypher-manual/current/syntax/values/#structural-types)).
Furthermore, multiple links between the same nodes which belong to the same
topology are difficult to represent, since this model only provides a single
link between nodes of a certain topology.
For the reason mentioned above, a more complex idea for persistence is available
for the further development, which hopefully allows us to persist and map
network elements, PNDs and topologies with all their hirarchical dependencies.
The following figure tries to visualize this idea.
```mermaid
graph TD
subgraph "dependencies of topologies"
logical1 -->|related_to| physical
logical5 -->|related_to| physical
logical3 -->|related_to| logical1
end
subgraph "every node belongs to a specific PND"
Node1 -->|belongs_to| PND
Node2 -->|belongs_to| PND
Node3 -->|belongs_to| PND
Node4 -->|belongs_to| PND
Node5 -->|belongs_to| PND
end
subgraph "relationship between nodes (nodes can be linked by 0...n links)"
lp2[link_physical]
lp3[link_physical]
lp4[link_physical]
lp5[link_logical1]
lp2 --> |connects| Node4
lp2 --> |connects| Node2
lp3 --> |connects| Node2
lp3 --> |connects| Node3
lp4 --> |connects| Node2
lp4 --> |connects| Node5
lp5 --> |connects| Node1
lp5 --> |connects| Node2
end
subgraph "links are part of a topology"
lp1[link_physical]
lp1 --> |connects| Node1
lp1 --> |connects| Node2
lp1 --> |part_of| physical
end
subgraph "links can contain 1...n layers"
lp2 --> |contains| ODUH
lp2 --> |contains| OTUCN
lp2 --> |contains| ODUCN
end
```
The basic structure explained in the upper part remains the same.
However, the relations, which previously served as links between the respective
nodes, now become **separate nodes**. These nodes now act as links between the
respective network elements and are part of a network topology (which itself
is represented as a separate node in the graph). By this change, network
topologies can now be interdependent. Furthermore, as can be seen in the figure
above, you can add additional nodes to the link nodes by using this scheme.
So a physical link between two nodes could e.g. **contain** several cables.
All other information can be stored in the properties of the respective nodes/edges.
The above idea is not yet approved and there are still open questions.
- Is there a better solution for the assumption that there are several different physical connections between the same nodes than separate link nodes between them?
- Can topologies run over different PNDs -> membership to different PNDs?
- Where can we benefit from using different layers? (e.g. possible saving of unnecessary relations between nodes)
- Do the sdn controllers provide us with the necessary information to map the topologies in this way?
- ...
## YANG to code ## YANG to code
...@@ -11,7 +225,7 @@ The base of the development of goSDN are YANG modules. The RESTful API used for ...@@ -11,7 +225,7 @@ The base of the development of goSDN are YANG modules. The RESTful API used for
### YANG ### YANG
YANG defines an abstract netwoprk interface. It is the foundation of the RESTCONF protocol. Several code generators exist to generate code stubs from a given definition. YANG defines an abstract network interface. It is the foundation of the RESTCONF protocol. Several code generators exist to generate code stubs from a given definition.
### OpenAPI ### OpenAPI
...@@ -29,18 +243,18 @@ For now we can only use the OpenAPI 2.0 standard. This is because `go-swagger` d ...@@ -29,18 +243,18 @@ For now we can only use the OpenAPI 2.0 standard. This is because `go-swagger` d
## Storing Information ## Storing Information
This section keeps by now some loose thoughts about what information has to be stored how and where. This section keeps by now some loose thoughts about what information has to be stored how and where.
There seem to be two classes of information to be stored in the controller: There seem to be two classes of information to be stored in the controller:
* short-living information, such as, current configured network flows or obtained network configuration out of use case #1 (CoCSN) * short-living information, such as, current configured network flows or obtained network configuration out of use case #1 (CoCSN)
* long-time information, such as, information about principle network domains, elements in such a domain if directly learned from SBI, etc * long-time information, such as, information about principle network domains, elements in such a domain if directly learned from SBI, etc
Long-time information should be persistenly stored in the database and survive reboots of goSDN etc. Short-Living information doesn't have to survive reboots of goSDN Long-time information should be persistenly stored in the database and survive reboots of goSDN etc. Short-Living information doesn't have to survive reboots of goSDN
### Some more details for implementation for the database(s) ### Some more details for implementation for the database(s)
We define the principle network domain (PND) and each piece of information of any PND has to be stored in relation the particular PND. We define the principle network domain (PND) and each piece of information of any PND has to be stored in relation the particular PND.
Specification of a PND: Specification of a PND:
* Human readable name of PND * Human readable name of PND
...@@ -48,4 +262,4 @@ Specification of a PND: ...@@ -48,4 +262,4 @@ Specification of a PND:
* Set of supported Southbound-Interfaces, e.g., RESTCONF, TAPI, OpenFlow etc * Set of supported Southbound-Interfaces, e.g., RESTCONF, TAPI, OpenFlow etc
* Physical Inventory Network Elements, hosts and links, pontentially only the SBI SDN controller * Physical Inventory Network Elements, hosts and links, pontentially only the SBI SDN controller
A PND entry must be explicitly generated, though some information can be automatically be generated, e.g., the physical inventory for use-case #1 (CoCSN) would mean that the information about the SBI domain specific SDN controller is entered. A PND entry must be explicitly generated, though some information can be automatically be generated, e.g., the physical inventory for use-case #1 (CoCSN) would mean that the information about the SBI domain specific SDN controller is entered.
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment