I have a python script that aims to make a linear topology of length 2, with 2 controllers: a custom and standard pox. The ping between the hosts is not working and the only hint I have is: "You could rely on the L3_learning component documentation: ... "
from mininet.log import setLogLevel, info
from mininet.net import Mininet
from mininet.topolib import TreeTopo
from mininet.cli import CLI
from mininet.node import Controller, OVSSwitch
from os import environ
# DO NOT MODIFY THIS PART
POXDIR = environ[ 'HOME' ] + '/pox'
class CustomPOX( Controller ):
"Custom POX() subclass that connects defines the POX component to load"
def __init__( self, name, cdir=POXDIR,
command='python pox.py',
cargs=( 'openflow.of_01 --port=%s '
'forwarding.l2_learning' ),
**kwargs ):
Controller.__init__( self, name, cdir=cdir,
command=command,
cargs=cargs, **kwargs )
# controllers={ 'pox': POX }
class CustomSwitch( OVSSwitch ):
"Custom Switch() subclass that connects to different controllers"
def start( self, controllers ):
return OVSSwitch.start( self, [ cmap[ self.name ] ] )
# THIS PART CAN BE CHANGED AND ADAPTED IF REQUIRED
setLogLevel( 'info' )
def multiControllerNet():
"Create a network from semi-scratch with multiple controllers."
c1 = CustomPOX ('pox', ip='127.0.0.1', port=6633)
c2 = Controller ('c2', port=6633)
cmap = {'s1': c1, 's2': c2}
net = Mininet( switch=CustomSwitch, build=False, waitConnected=True )
info( "*** Creating (reference) controllers\n" )
for c in [ c1, c2 ]:
net.addController(c)
info( "*** Creating switches\n" )
s1 = net.addSwitch( 's1' )
s2 = net.addSwitch( 's2' )
info( "*** Creating hosts\n" )
h1 = net.addHost('host1', mac='00:00:00:00:10:01', ip='10.0.10.1/24')
h2 = net.addHost('host2', mac='00:00:00:00:20:01', ip='10.0.30.1/24')
info( "*** Creating links\n" )
net.addLink( s1, h1 )
net.addLink( s2, h2 )
net.addLink( s1, s2 )
info( "*** Starting network\n" )
net.build()
c1.start()
c2.start()
s1.start( [ c1 ] )
s2.start( [ c2 ] )
info( "*** Testing network\n" )
net.pingAll()
info( "*** Running CLI\n" )
CLI( net )
info( "*** Stopping network\n" )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' ) # for CLI output
multiControllerNet()
The annotations are not mine
First of all, I'm not sure why the hosts can't communicate. Because 2 switches under two different controllers are not linked by default?
I tried to swap l2 with l3 but it doesn't change anything, which is normal since reading the documentation, I don't see what the advantage would be in this situation.
I tried to assign / change the ip addresses of the controllers but it had no effect.
Should I add a rule that the controllers must connect the switches? If so, is this topic (How to add flow rules in POX controller) the right solution? Because it doesn't seem to fit in the logic of my script.
Or is it more about changing / adding a parameter when I define my controllers?
Ok it's just change l2 with l3 and add ip route in CLI ( host1 ip route add 10.0.30.0/24 dev host1-eth0 / host2 ip route add 10.0.10.0/24 dev host2-eth0 ).
L3 will broadcast if it don't know the host so -> connect with other switch.
After that: start pox controller and ping works !
Related
What I expect:
A ros node is run, it has a publisher and a subscriber coded into it, and another dynamic subscriber
The publisher publishes data about the node to another master node, which then publishes to the subscriber in the node
a) a topic
b) type of message from that topic
Taking this information, the node must assign the topic and message type to the dynamic subscriber
The node, same code, will be run multiple times, but with different data that is being published to the master node so they all get their topics from the master node individually which might or might not be same
To be honest, I am pretty new to ros and I havent tried much, and I am unsure where to start too
I think It's not good practice and I would recommend not doing i, but here's how you can:
Let's take the " slave " node
you need to publish info about this node, let's assume we will publish the node name node1. So you will create a publisher that publishes message type String.
creat a subscriber that receives a message type String your node needs to wait for this subscriber to receive a message with the name of the the new topic and it's message type (seperated by a comma). you can seperate them into "topic and type
you will run into the problem that you need to import this " new message type" that changes from node to node which is actually doable for example see this answer
now your code should be something like this (not a running version):
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
class Nodes(object):
def __init__(self, name):
rospy.init_node('node_name', anonymous=True)
self.Name = name
self.pub = rospy.Publisher('nodes_info', String, queue_size=10)
self.pub.publish(self.Name)
self.sub_once = rospy.Subscriber('master_topic', String, self.sub_callback, queue_size=10)
self.rate = 10
self.sub_msg = None
self.topic = None
self.type = None
def sub_callback(self, msg):
self.sub_msg = msg.data
chunks = self.sub_msg.split(',')
self.topic = chunks[0]
self.type = chunks[1]
##
# DO dynamic import here
##
self.sub_once.unregister()
def callback(self,data)
# DO SOMETHING ( you main callback)
d = data
def spin(self):
r = rospy.Rate(self.rate)
while self.sub_msg == None:
r.sleep()
while not rospy.is_shutdown():
rospy.Subscriber(topic, type, self.callback)
print(self.sub_msg)
r.sleep()
if __name__ == "__main__":
SC = Nodes("node1")
SC.spin()
I'm exploring Kubeflow as an option to deploy and connect various components of a typical ML pipeline. I'm using docker containers as Kubeflow components and so far I've been unable to successfully use ContainerOp.file_outputs object to pass results between components.
Based on my understanding of the feature, creating and saving to a file that's declared as one of the file_outputs of a component should cause it to persist and be accessible for reading by the following component.
This is how I attempted to declare this in my pipeline python code:
import kfp.dsl as dsl
import kfp.gcp as gcp
#dsl.pipeline(name='kubeflow demo')
def pipeline(project_id='kubeflow-demo-254012'):
data_collector = dsl.ContainerOp(
name='data collector',
image='eu.gcr.io/kubeflow-demo-254012/data-collector',
arguments=[ "--project_id", project_id ],
file_outputs={ "output": '/output.txt' }
)
data_preprocessor = dsl.ContainerOp(
name='data preprocessor',
image='eu.gcr.io/kubeflow-demo-254012/data-preprocessor',
arguments=[ "--project_id", project_id ]
)
data_preprocessor.after(data_collector)
#TODO: add other components
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline, __file__ + '.tar.gz')
In the python code for the data-collector.py component I fetch the dataset then write it to output.txt. I'm able to read from the file within the same component but not inside data-preprocessor.py where I get a FileNotFoundError.
Is the use of file_outputs invalid for container-based Kubeflow components or am I incorrectly using it in my code? If it's not an option in my case, is it possible to programmatically create Kubernetes volumes inside the pipeline declaration python code and use them instead of file_outputs?
Files created in one Kubeflow pipeline component are local to the container. To reference it in the subsequent steps, you would need to pass it as:
data_preprocessor = dsl.ContainerOp(
name='data preprocessor',
image='eu.gcr.io/kubeflow-demo-254012/data-preprocessor',
arguments=["--fetched_dataset", data_collector.outputs['output'],
"--project_id", project_id,
]
Note: data_collector.outputs['output'] will contain the actual string contents of the file /output.txt (not a path to the file). If you want for it to contain the path of the file, you'll need to write the dataset to shared storage (like s3, or a mounted PVC volume) and write the path/link to the shared storage to /output.txt. data_preprocessor can then read the dataset based on the path.
There are three main steps:
save a outputs.txt file which will include data/parameter/anything that you want to pass to next component.
Note: it should be at the root level i.e /output.txt
pass file_outputs={'output': '/output.txt'} as arguments as shown is example.
inside a container_op which you will write inside dsl.pipeline pass argument (to respective argument of commponent which needs output from earlier component) as comp1.output (here comp1 is 1st component which produces output & stores it in /output.txt)
import kfp
from kfp import dsl
def SendMsg(
send_msg: str = 'akash'
):
return dsl.ContainerOp(
name = 'Print msg',
image = 'docker.io/akashdesarda/comp1:latest',
command = ['python', 'msg.py'],
arguments=[
'--msg', send_msg
],
file_outputs={
'output': '/output.txt',
}
)
def GetMsg(
get_msg: str
):
return dsl.ContainerOp(
name = 'Read msg from 1st component',
image = 'docker.io/akashdesarda/comp2:latest',
command = ['python', 'msg.py'],
arguments=[
'--msg', get_msg
]
)
#dsl.pipeline(
name = 'Pass parameter',
description = 'Passing para')
def passing_parameter(send_msg):
comp1 = SendMsg(send_msg)
comp2 = GetMsg(comp1.output)
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(passing_parameter, __file__ + '.tar.gz')
You don't have to write the data to shared storage, you can use kfp.dsl.InputArgumentPath to pass an output from a python function to the input of a container op.
#kfp.dsl.pipeline(
name='Build Model Server Pipeline',
description='Build a kserve model server pipeline.'
)
def build_model_server_pipeline(s3_src_path):
download_s3_files_task = download_archive_step(s3_src_path)
tarball_path = "/tmp/artifact.tar"
artifact_tarball = kfp.dsl.InputArgumentPath(download_s3_files_task.outputs['output_tarball'], path=tarball_path)
build_container = kfp.dsl.ContainerOp(name ='build_container',
image ='python:3.8',
command=['sh', '-c'],
arguments=[
'ls -l ' + tarball_path + ';'
],
artifact_argument_paths=[artifact_tarball],
)
I am attempting to use Kong's API Gateway, however, I want to use its serverless plugin to add some custom implementation before Kong processes the request. I basically want to read in a JSON file volume mapped in the Kong container and do some stuff with it before it processes the request.
When I follow their example I am able to see the logs printed. However, when I start adding Lua code (in custom-auth.lua) it seems to complain. A simple print("hello world") statement on the top of the file gives me the following error.
2021/02/05 02:00:19 [error] 22#0: *8042 [kong] init.lua:270 [pre-function] /usr/local/share/lua/5.1/sandbox.lua:170: [string "..."]:3: attempt to index global 'print' (a nil value), client: 172.31.0.1, server: kong, request: "GET /v1/myEndpoint HTTP/2.0", host: "localhost:8443"
Code reference to error:
170 if not t[1] then error(t[2]) end
sandbox.lua:
local sandbox = {
_VERSION = "sandbox 0.5",
_DESCRIPTION = "A pure-lua solution for running untrusted Lua code.",
_URL = "https://github.com/kikito/sandbox.lua",
_LICENSE = [[
MIT LICENSE
Copyright (c) 2021 Enrique GarcĂa Cota
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
]],
}
-- quotas don't work in LuaJIT since debug.sethook works differently there
local quota_supported = type(_G.jit) == "nil"
sandbox.quota_supported = quota_supported
-- PUC-Rio Lua 5.1 does not support deactivation of bytecode
local bytecode_blocked = _ENV or type(_G.jit) == "table"
sandbox.bytecode_blocked = bytecode_blocked
-- The base environment is merged with the given env option (or an empty table, if no env provided)
--
local BASE_ENV = {}
-- List of unsafe packages/functions:
--
-- * string.rep: can be used to allocate millions of bytes in 1 operation
-- * {set|get}metatable: can be used to modify the metatable of global objects (strings, integers)
-- * collectgarbage: can affect performance of other systems
-- * dofile: can access the server filesystem
-- * _G: It has access to everything. It can be mocked to other things though.
-- * load{file|string}: All unsafe because they can grant acces to global env
-- * raw{get|set|equal}: Potentially unsafe
-- * module|require|module: Can modify the host settings
-- * string.dump: Can display confidential server info (implementation of functions)
-- * math.randomseed: Can affect the host sytem
-- * io.*, os.*: Most stuff there is unsafe, see below for exceptions
-- Safe packages/functions below
([[
_VERSION assert error ipairs next pairs
pcall select tonumber tostring type unpack xpcall
coroutine.create coroutine.resume coroutine.running coroutine.status
coroutine.wrap coroutine.yield
math.abs math.acos math.asin math.atan math.atan2 math.ceil
math.cos math.cosh math.deg math.exp math.fmod math.floor
math.frexp math.huge math.ldexp math.log math.log10 math.max
math.min math.modf math.pi math.pow math.rad math.random
math.sin math.sinh math.sqrt math.tan math.tanh
os.clock os.difftime os.time
string.byte string.char string.find string.format string.gmatch
string.gsub string.len string.lower string.match string.reverse
string.sub string.upper
table.insert table.maxn table.remove table.sort
]]):gsub('%S+', function(id)
local module, method = id:match('([^%.]+)%.([^%.]+)')
if module then
BASE_ENV[module] = BASE_ENV[module] or {}
BASE_ENV[module][method] = _G[module][method]
else
BASE_ENV[id] = _G[id]
end
end)
local function protect_module(module, module_name)
return setmetatable({}, {
__index = module,
__newindex = function(_, attr_name, _)
error('Can not modify ' .. module_name .. '.' .. attr_name .. '. Protected by the sandbox.')
end
})
end
('coroutine math os string table'):gsub('%S+', function(module_name)
BASE_ENV[module_name] = protect_module(BASE_ENV[module_name], module_name)
end)
-- auxiliary functions/variables
local string_rep = string.rep
local function sethook(f, key, quota)
if type(debug) ~= 'table' or type(debug.sethook) ~= 'function' then return end
debug.sethook(f, key, quota)
end
local function cleanup()
sethook()
string.rep = string_rep -- luacheck: no global
end
-- Public interface: sandbox.protect
function sandbox.protect(code, options)
options = options or {}
local quota = false
if options.quota and not quota_supported then
error("options.quota is not supported on this environment (usually LuaJIT). Please unset options.quota")
end
if options.quota ~= false then
quota = options.quota or 500000
end
assert(type(code) == 'string', "expected a string")
local passed_env = options.env or {}
local env = {}
for k, v in pairs(BASE_ENV) do
local pv = passed_env[k]
if pv ~= nil then
env[k] = pv
else
env[k] = v
end
end
setmetatable(env, { __index = options.env })
env._G = env
local f
if bytecode_blocked then
f = assert(load(code, nil, 't', env))
else
f = assert(loadstring(code))
setfenv(f, env)
end
return function(...)
if quota and quota_supported then
local timeout = function()
cleanup()
error('Quota exceeded: ' .. tostring(quota))
end
sethook(timeout, "", quota)
end
string.rep = nil -- luacheck: no global
local t = table.pack(pcall(f, ...))
cleanup()
if not t[1] then error(t[2]) end
return table.unpack(t, 2, t.n)
end
end
-- Public interface: sandbox.run
function sandbox.run(code, options, ...)
return sandbox.protect(code, options)(...)
end
-- make sandbox(f) == sandbox.protect(f)
setmetatable(sandbox, {__call = function(_,code,o) return sandbox.protect(code,o) end})
return sandbox
Any help would be appreciated.
Figured it out, Kong defaults the Lua modules to sandbox but can be set when starting Kong as an environment variable KONG_UNTRUSTED_LUA: "on" or in the kong.conf to enable all modules.
untrusted_lua = on
# Accepted values are:
#
# - `off`: disallow any loading of Lua functions
# from admin supplied sources (such as via the Admin API).
#
# Note using the `off` option will render plugins such as
# Serverless Functions unusable.
# - `sandbox`: allow loading of Lua functions from admin
# supplied sources, but use a sandbox when
# executing them. The sandboxed
# function will have restricted access
# to the global environment and only
# have access to standard Lua functions
# that will generally not cause harm to
# the Kong node.
#
# In this mode, the `require` function inside
# the sandbox only allows loading external Lua
# modules that are explicitly listed in
# `untrusted_lua_sandbox_requires` below.
#
# LuaJIT bytecode loading is disabled.
#
# Warning: LuaJIT is not designed as a secure
# runtime for running malicious code, therefore,
# you should properly protect your Admin API endpoint
# even with sandboxing enabled. The sandbox only
# provides protection against trivial attackers or
# unintentional modification of the Kong global
# environment.
# - `on`: allow loading of Lua functions from admin
# supplied sources and do not use a sandbox when
# executing them. Functions will have unrestricted
# access to global environment and able to load any
# Lua modules. This is similar to the behavior in Kong
# prior to 2.3.0.
#
# LuaJIT bytecode loading is disabled.
I'm working on a problem in which I only want to create a particular rule if a certain Bazel config has been specified (via '--config'). We have been using Bazel since 0.11 and have a bunch of build infrastructure that works around former limitations in Bazel. I am incrementally porting us up to newer versions. One of the features that was missing was compiler transitions, and so we rolled our own using configs and some external scripts.
My first attempt at solving my problem looks like this:
load("#rules_cc//cc:defs.bzl", "cc_library")
# use this with a select to pick targets to include/exclude based on config
# see __build_if_role for an example
def noop_impl(ctx):
pass
noop = rule(
implementation = noop_impl,
attrs = {
"deps": attr.label_list(),
},
)
def __sanitize(config):
if len(config) > 2 and config[:2] == "//":
config = config[2:]
return config.replace(":", "_").replace("/", "_")
def build_if_config(**kwargs):
config = kwargs['config']
kwargs.pop('config')
name = kwargs['name'] + '_' + __sanitize(config)
binary_target_name = kwargs['name']
kwargs['name'] = binary_target_name
cc_library(**kwargs)
noop(
name = name,
deps = select({
config: [ binary_target_name ],
"//conditions:default": [],
})
)
This almost gets me there, but the problem is that if I want to build a library as an output, then it becomes an intermediate dependency, and therefore gets deleted or never built.
For example, if I do this:
build_if_config(
name="some_lib",
srcs=[ "foo.c" ],
config="//:my_config",
)
and then I run
bazel build --config my_config //:some_lib
Then libsome_lib.a does not make it to bazel-out, although if I define it using cc_library, then it does.
Is there a way that I can just create the appropriate rule directly in the macro instead of creating a noop rule and using a select? Or another mechanism?
Thanks in advance for your help!
As I noted in my comment, I was misunderstanding how Bazel figures out its dependencies. The create a file section of The Rules Tutorial explains some of the details, and I followed along here for some of my solution.
Basically, the problem was not that the built files were not sticking around, it was that they were never getting built. Bazel did not know to look in the deps variable and build those things: it seems I had to create an action which uses the deps, and then register an action by returning a (list of) DefaultInfo
Below is my new noop_impl function
def noop_impl(ctx):
if len(ctx.attr.deps) == 0:
return None
# ctx.attr has the attributes of this rule
dep = ctx.attr.deps[0]
# DefaultInfo is apparently some sort of globally available
# class that can be used to index Target objects
infile = dep[DefaultInfo].files.to_list()[0]
outfile = ctx.actions.declare_file('lib' + ctx.label.name + '.a')
ctx.actions.run_shell(
inputs = [infile],
outputs = [outfile],
command = "cp %s %s" % (infile.path, outfile.path),
)
# we can also instantiate a DefaultInfo to indicate what output
# we provide
return [DefaultInfo(files = depset([outfile]))]
I'm exploring Kubeflow as an option to deploy and connect various components of a typical ML pipeline. I'm using docker containers as Kubeflow components and so far I've been unable to successfully use ContainerOp.file_outputs object to pass results between components.
Based on my understanding of the feature, creating and saving to a file that's declared as one of the file_outputs of a component should cause it to persist and be accessible for reading by the following component.
This is how I attempted to declare this in my pipeline python code:
import kfp.dsl as dsl
import kfp.gcp as gcp
#dsl.pipeline(name='kubeflow demo')
def pipeline(project_id='kubeflow-demo-254012'):
data_collector = dsl.ContainerOp(
name='data collector',
image='eu.gcr.io/kubeflow-demo-254012/data-collector',
arguments=[ "--project_id", project_id ],
file_outputs={ "output": '/output.txt' }
)
data_preprocessor = dsl.ContainerOp(
name='data preprocessor',
image='eu.gcr.io/kubeflow-demo-254012/data-preprocessor',
arguments=[ "--project_id", project_id ]
)
data_preprocessor.after(data_collector)
#TODO: add other components
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline, __file__ + '.tar.gz')
In the python code for the data-collector.py component I fetch the dataset then write it to output.txt. I'm able to read from the file within the same component but not inside data-preprocessor.py where I get a FileNotFoundError.
Is the use of file_outputs invalid for container-based Kubeflow components or am I incorrectly using it in my code? If it's not an option in my case, is it possible to programmatically create Kubernetes volumes inside the pipeline declaration python code and use them instead of file_outputs?
Files created in one Kubeflow pipeline component are local to the container. To reference it in the subsequent steps, you would need to pass it as:
data_preprocessor = dsl.ContainerOp(
name='data preprocessor',
image='eu.gcr.io/kubeflow-demo-254012/data-preprocessor',
arguments=["--fetched_dataset", data_collector.outputs['output'],
"--project_id", project_id,
]
Note: data_collector.outputs['output'] will contain the actual string contents of the file /output.txt (not a path to the file). If you want for it to contain the path of the file, you'll need to write the dataset to shared storage (like s3, or a mounted PVC volume) and write the path/link to the shared storage to /output.txt. data_preprocessor can then read the dataset based on the path.
There are three main steps:
save a outputs.txt file which will include data/parameter/anything that you want to pass to next component.
Note: it should be at the root level i.e /output.txt
pass file_outputs={'output': '/output.txt'} as arguments as shown is example.
inside a container_op which you will write inside dsl.pipeline pass argument (to respective argument of commponent which needs output from earlier component) as comp1.output (here comp1 is 1st component which produces output & stores it in /output.txt)
import kfp
from kfp import dsl
def SendMsg(
send_msg: str = 'akash'
):
return dsl.ContainerOp(
name = 'Print msg',
image = 'docker.io/akashdesarda/comp1:latest',
command = ['python', 'msg.py'],
arguments=[
'--msg', send_msg
],
file_outputs={
'output': '/output.txt',
}
)
def GetMsg(
get_msg: str
):
return dsl.ContainerOp(
name = 'Read msg from 1st component',
image = 'docker.io/akashdesarda/comp2:latest',
command = ['python', 'msg.py'],
arguments=[
'--msg', get_msg
]
)
#dsl.pipeline(
name = 'Pass parameter',
description = 'Passing para')
def passing_parameter(send_msg):
comp1 = SendMsg(send_msg)
comp2 = GetMsg(comp1.output)
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(passing_parameter, __file__ + '.tar.gz')
You don't have to write the data to shared storage, you can use kfp.dsl.InputArgumentPath to pass an output from a python function to the input of a container op.
#kfp.dsl.pipeline(
name='Build Model Server Pipeline',
description='Build a kserve model server pipeline.'
)
def build_model_server_pipeline(s3_src_path):
download_s3_files_task = download_archive_step(s3_src_path)
tarball_path = "/tmp/artifact.tar"
artifact_tarball = kfp.dsl.InputArgumentPath(download_s3_files_task.outputs['output_tarball'], path=tarball_path)
build_container = kfp.dsl.ContainerOp(name ='build_container',
image ='python:3.8',
command=['sh', '-c'],
arguments=[
'ls -l ' + tarball_path + ';'
],
artifact_argument_paths=[artifact_tarball],
)