diff -pruN 1:16.0.0-3.1/AUTHORS 1:19.0.0+git2023011010.0a69b971-0ubuntu1/AUTHORS
--- 1:16.0.0-3.1/AUTHORS	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/AUTHORS	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1,207 @@
+Aaron Rosen <arosen@nicira.com>
+Abdallah Yasin <abdallahyas@mellanox.com>
+Abhishek Chanda <abhishek@cloudscaling.com>
+Adam Harwell <flux.adam@gmail.com>
+Adrian Chiris <adrianc@mellanox.com>
+Akihiro MOTOKI <motoki@da.jp.nec.com>
+Akihiro Motoki <motoki@da.jp.nec.com>
+Aleks Chirko <achirko@mirantis.com>
+Alessandro Pilotti <ap@pilotti.it>
+Alessio Ababilov <aababilo@yahoo-inc.com>
+Alessio Ababilov <aababilov@griddynamics.com>
+AmichayPolishuk <amichayp@mellanox.com>
+Amir Sadoughi <amir.sadoughi@gmail.com>
+Andre Pech <apech@aristanetworks.com>
+Andreas Jaeger <aj@suse.com>
+Andreas Jaeger <aj@suse.de>
+Angus Lees <gus@inodes.org>
+Ann Kamyshnikova <akamyshnikova@mirantis.com>
+Arvind Somy <asomya@cisco.com>
+Arvind Somya <asomya@cisco.com>
+Assaf Muller <amuller@redhat.com>
+Bhuvan Arumugam <bhuvan@apache.org>
+Blair Bethwaite <blair.bethwaite@gmail.com>
+Bob Kukura <rkukura@redhat.com>
+Bob Melander <bob.melander@gmail.com>
+Boden R <bodenvmw@gmail.com>
+Brad Hall <bhall@nicira.com>
+Brad Hall <brad@nicira.com>
+Brant Knudson <bknudson@us.ibm.com>
+Carl Baldwin <carl.baldwin@hp.com>
+Cedric Brandily <zzelle@gmail.com>
+Chang Bo Guo <guochbo@cn.ibm.com>
+Christian Berendt <berendt@b1-systems.de>
+Chuck Short <chuck.short@canonical.com>
+Clark Boylan <clark.boylan@gmail.com>
+Clint Byrum <clint@fewbar.com>
+Corey Bryant <corey.bryant@canonical.com>
+Dan Prince <dprince@redhat.com>
+Dan Wendlandt <dan@nicira.com>
+Davanum Srinivas <dims@linux.vnet.ibm.com>
+Deepak N <deepak.n@thoughtworks.com>
+Dirk Mueller <dirk@dmllr.de>
+Doug Hellmann <doug@doughellmann.com>
+Doug Wiegley <dougw@a10networks.com>
+Edan David <edand@mellanox.com>
+Edgar Magana <emagana@gmail.com>
+Edgar Magana <eperdomo@cisco.com>
+Emilien Macchi <emilien.macchi@stackops.com>
+Eugene Nikanorov <enikanorov@mirantis.com>
+Gary Kotton <gkotton@redhat.com>
+Gary Kotton <gkotton@vmware.com>
+Gauvain Pocentek <gauvain@pocentek.net>
+Gordon Chung <gord@live.ca>
+Hamdy Khader <hamdyk@mellanox.com>
+Hareesh Puthalath <hareesh.puthalath@gmail.com>
+He Jie Xu <xuhj@linux.vnet.ibm.com>
+Hemanth Ravi <hemanth.ravi@oneconvergence.com>
+Henry Gessau <gessau@cisco.com>
+Henry Gessau <gessau@gmail.com>
+HenryVIII <gessau@cisco.com>
+Hirofumi Ichihara <ichihara.hirofumi@lab.ntt.co.jp>
+Hong Hui Xiao <xiaohhui@cn.ibm.com>
+Ignacio Scopetta <ignacio@embrane.com>
+Ihar Hrachyshka <ihrachys@redhat.com>
+Ionuț Arțăriși <iartarisi@suse.cz>
+Irena Berezovsky <irenab@mellanox.com>
+Isaku Yamahata <isaku.yamahata@intel.com>
+Isaku Yamahata <yamahata@valinux.co.jp>
+JJ Asghar <jj@getchef.com>
+Jacek Swiderski <jacek.swiderski@codilime.com>
+Jakub Libosvar <libosvar@redhat.com>
+James E. Blair <james.blair@rackspace.com>
+James E. Blair <jeblair@hp.com>
+Janonymous <janonymous.codevulture@gmail.com>
+Jason Kölker <jason@koelker.net>
+Javier Pena <jpena@redhat.com>
+Jay Pipes <jaypipes@gmail.com>
+Jeremy Stanley <fungi@yuggoth.org>
+Jiajun Liu <jiajun@unitedstack.com>
+Joe Gordon <joe.gordon0@gmail.com>
+John Dunning <jrd@jrd.org>
+Jordan Tardif <jordan@dreamhost.com>
+Juliano Martinez <juliano.martinez@locaweb.com.br>
+Julien Danjou <julien@danjou.info>
+Justin Hammond <justin.hammond@rackspace.com>
+Justin Lund <justin.lund@dreamhost.com>
+Keshava Bharadwaj <kb.sankethi@gmail.com>
+Kevin Benton <blak111@gmail.com>
+Kevin L. Mitchell <kevin.mitchell@rackspace.com>
+Kris Lindgren <klindgren@godaddy.com>
+Kun Huang <gareth@unitedstack.com>
+Kyle Mestery <kmestery@cisco.com>
+Lenny Verkhovsky <lennyb@mellanox.com>
+Luke Gorrie <luke@snabb.co>
+Major Hayden <major@mhtx.net>
+Mark Goddard <mark@stackhpc.com>
+Mark McClain <mark.mcclain@dreamhost.com>
+Mark McClain <mmcclain@yahoo-inc.com>
+Mark McLoughlin <markmc@redhat.com>
+Maru Newby <marun@redhat.com>
+Maru Newby <mnewby@internap.com>
+Mate Lakat <mate.lakat@citrix.com>
+Matt Riedemann <mriedem@us.ibm.com>
+Matthew Treinish <mtreinish@kortar.org>
+Matthew Treinish <treinish@linux.vnet.ibm.com>
+Miguel Angel Ajo <mangelajo@redhat.com>
+Mohammad Banikazemi <mb@us.ibm.com>
+Monty Taylor <mordred@inaugust.com>
+Morgan Fainberg <morgan.fainberg@gmail.com>
+Moshe Levi <moshele@mellanox.com>
+Moshe Levi <moshele@nvidia.com>
+Motohiro OTSUKA <ootsuka@mxs.nes.nec.co.jp>
+Murad Awawdeh <murada@mellanox.com>
+Nachi Ueno <nachi@ntti3.com>
+Nachi Ueno <nachi@nttmcl.com>
+Nader Lahouti <nlahouti@cisco.com>
+Nguyen Hung Phuong <phuongnh@vn.fujitsu.com>
+Oleg Bondarev <obondarev@mirantis.com>
+Omri Marcovitch <omrim@mellanox.com>
+Ondřej Nový <ondrej.novy@firma.seznam.cz>
+Paul Michali <pcm@cisco.com>
+Praneet Bachheti <praneetb@juniper.net>
+Rajaram Mallya <rajarammallya@gmail.com>
+Ralf Haferkamp <rhafer@suse.de>
+Rich Curran <rcurran@cisco.com>
+Roey Chen <roeyc@mellanox.com>
+Roman Podoliaka <rpodolyaka@mirantis.com>
+Romil Gupta <romilg@hp.com>
+Rossella Sblendido <rsblendido@suse.com>
+Rui Zang <rui.zang@intel.com>
+Russell Bryant <rbryant@redhat.com>
+Ryota MIBU <r-mibu@cq.jp.nec.com>
+Salvatore Orlando <salv.orlando@gmail.com>
+Salvatore Orlando <salvatore.orlando@eu.citrix.com>
+Samer Deeb <samerd@mellanox.com>
+Santhosh <santhom@thoughtworks.com>
+Santhosh Kumar <santhosh.m@thoughtworks.com>
+Sanu Madhavan <sanuptpm@gmail.com>
+Sascha Peilicke <saschpe@gmx.de>
+Sascha Peilicke <speilicke@suse.com>
+Sean Dague <sean.dague@samsung.com>
+Sean Dague <sean@dague.net>
+Sean M. Collins <sean_collins2@cable.comcast.com>
+Sergey Lukjanov <slukjanov@mirantis.com>
+Sergey Skripnick <sskripnick@mirantis.com>
+Shiv Haris <sharis@brocade.com>
+Shweta P <shweta.ap05@gmail.com>
+Somik Behera <somik@nicira.com>
+Somik Behera <somikbehera@gmail.com>
+Sukhdev <sukhdev@arista.com>
+Sulaiman Radwan <sulaimanrad@mellanox.com>
+Sumit Naiksatam <snaiksat@cisco.com>
+Sushil Kumar <sushil.kumar2@globallogic.com>
+Sylvain Afchain <sylvain.afchain@enovance.com>
+Takashi Kajinami <tkajinam@redhat.com>
+Terry Wilson <twilson@redhat.com>
+Thierry Carrez <thierry@openstack.org>
+Thomas Bechtold <tbechtold@suse.com>
+Tim Miller <tim.miller.0@gmail.com>
+Trinath Somanchi <trinath.somanchi@freescale.com>
+Tyler Smith <tylesmit@cisco.com>
+Vieri <15050873171@163.com>
+Weidong Shao <weidong.shao@huawei.com>
+Wu Wenxiang <wu.wenxiang@99cloud.net>
+XiaojueGuan <guanalbertjone@gmail.com>
+Xu Han Peng <xuhanp@cn.ibm.com>
+YAMAMOTO Takashi <yamamoto@valinux.co.jp>
+Yaguang Tang <heut2008@gmail.com>
+Ying Liu <yinliu2@cisco.com>
+Yong Sheng Gong <gongysh@cn.ibm.com>
+Yong Sheng Gong <gongysh@unitedstack.com>
+Yoshihiro Kaneko <ykaneko0929@gmail.com>
+Zang MingJie <zealot0630@gmail.com>
+Zhenguo Niu <zhenguo@unitedstack.com>
+ZhiQiang Fan <aji.zqfan@gmail.com>
+ZhiQiang Fan <zhiqiang.fan@huawei.com>
+Zhongyue Luo <zhongyue.nah@intel.com>
+abdallahyas <abdallahyas@mellanox.com>
+alexpilotti <ap@pilotti.it>
+armando-migliaccio <amigliaccio@nicira.com>
+armando-migliaccio <armamig@gmail.com>
+fumihiko kakuma <kakuma@valinux.co.jp>
+gongysh <gongysh@cn.ibm.com>
+gongysh <gongysh@linux.vnet.ibm.com>
+gordon chung <gord@live.ca>
+hamdy khader <murada@mellanox.com>
+huang.zhiping <huang.zhiping@99cloud.net>
+justin Lund <justin.lund@dreamhost.com>
+lawrancejing <lawrancejing@gmail.com>
+liu-sheng <liusheng@huawei.com>
+liuqing <jing.liuqing@99cloud.net>
+llg8212 <lilinguo@huawei.com>
+mark mcclain <mmcclain@yahoo-inc.com>
+mathieu-rohon <mathieu.rohon@gmail.com>
+motyz <motyz@mellanox.com>
+rohitagarwalla <roagarwa@cisco.com>
+ronak <ronak.malav.shah@gmail.com>
+rossella <rsblendido@suse.com>
+shihanzhang <shihanzhang@huawei.com>
+sukhdev <sukhdev@aristanetworks.com>
+trinaths <trinath.somanchi@freescale.com>
+vinkesh banka <vinkeshb@thoughtworks.com>
+waleed mousa <waleedm@nvidia.com>
+waleedm <waleedm@nvidia.com>
+wangqian <wang.qian@99cloud.net>
+zhhuabj <zhhuabj@cn.ibm.com>
+zhouxinyong <zhouxinyong@inspur.com>
diff -pruN 1:16.0.0-3.1/ChangeLog 1:19.0.0+git2023011010.0a69b971-0ubuntu1/ChangeLog
--- 1:16.0.0-3.1/ChangeLog	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/ChangeLog	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1,1007 @@
+CHANGES
+=======
+
+* Refactor session handling in networking-mlnx
+* Revert "Extend the IPoIBInterfaceDriver to support link\_up parameter"
+* Update zuul neutron template to zed template
+* Change the default value for client\_id\_hardware to True
+* Fix Mellanox CI, test-requirements.txt and unit tests failures
+* Change authentication method for Mellanox sdn controller
+* Update zuul neutron template to yoga template
+* Add Python 3.8 to supported runtimes
+* update zuul to xena release
+* update zuul for wallaby
+* remove policy.json
+* fix test-requirements.txt
+* fix in replace dhcp opt the compare to string
+* support client-id hardware
+* Bump pylint version to support python 3.8
+* eswitchd: improve error handling
+* add support for HA router and DVR
+* Sync tox requirement with neutron's
+* Fix mech driver initialization with physical\_networks
+* Monkey patch original current\_thread \_active
+
+16.0.0
+------
+
+* Revert "Added \`fabric\_type\` field in journal database"
+* Extend the IPoIBInterfaceDriver to support link\_up parameter
+* Explicitly initialize empty dict when getting GUIDs
+* Remove set/get VF GUID via netdev API (netlink) for MLNX4
+* pass correct parameters to \_set\_vf\_guid\_sysfs\_mlnx5
+* Add explicit casting to int for vf index
+* Refactor set vf GUID to imporve readability
+* Add support for Getting administrative GUID via netlink
+* Move test\_pci\_utils under utils folder
+* Removed Python2.7 support from tox
+* Get VF GUID according to GID index and not VF index
+* Move get get\_vfs\_macs\_ib from pci\_utils to ib\_utils
+* Align all index related data in eswitchd
+* Remove Unused constants
+* Fix zmq send/rcv for python3
+* Fixed zmq python3 send\_string
+* devstack: configure bind\_normal\_ports and bind\_normal\_ports\_physnets
+* always bind normal ports for IB physnet
+* Add Support for Port Up and Port Down RPC calls
+* Explicitly convert vf index argument to int
+* Use DHCP\_OPT\_CLIENT\_ID\_NUM from neutron-lib
+* Added \`fabric\_type\` field in journal database
+* Remove internal DB dependencies and refactoring
+* Reduce agent dependency on mac address
+* Update some config opts description
+* Use Neutron master in requirements.txt
+* Added DHCP and L3 params for IB multi interface
+* Refactoring of process\_port\_info
+* Refactor the way a device is identified
+* Supporting SSL certificate verification
+* Bumb pyroute2 to 0.5.7
+* Add Support for performing net device operations via pyroute2
+* Transition write\_sys method to privsep
+* Remove the ebrctl write-sys dependency
+* Remove redundant call to set\_port\_vlan\_id
+* remote ethtool and ip link from rootwrap
+* Change cache maintenance config opt default
+* Fix deadlock of read and write lock taken by the same thread
+* Add support for binding normal ports
+* remove unused clean.sh and make misc-sanity-checks.sh executable
+* add checks from neutron pylint and fix them
+* Add multi interface driver
+* Replace openstack.org URLs with opendev.org URs
+* Trivial: update url to new url
+* Update url in HACKING.rst
+* Add network cache module
+* Add IPoIB interface driver
+* add bandit security linter
+* update hacking>=1.1.0, enable E125 E129 N537 and fix pep8 issues
+* update pep8 check and fix new pep8 errors
+* add bashate
+* Add py37 jobs to Zuul
+* remove MlnxDHCP
+* remove debug print
+* Set port status to ACTIVE when binding a port
+* remove py37 and add py36
+* skip test\_release\_unused\_leases\_one\_lease\_with\_client\_id\_none
+* OpenDev Migration Patch
+* Remove version from PKG-INFO file
+* Replacing the link in HACKING.rst
+* Update min tox version to 2.0
+* Switch testing from Python 3.5 to 3.7
+* Fix some minor typos and comments
+* Fix exception handling in journal thread
+
+13.1.0
+------
+
+* fixed host id
+* Fixed: Use db\_api from neutron\_lib
+* Use neutron\_lib for db\_api instead of neutron
+* Support disabling SDN controller sync
+* Disable test\_client\_id\_num\_str test
+* fix tox python3 overrides
+* Use Param DHCP\_OPT\_CLIENT\_ID\_NUM
+* Disable test\_client\_id\_num test
+* fix unused\_leases\_one\_lease\_with\_client\_id test
+* Also run unit tests under Python 3
+* uncap eventlet
+* delete baremetal ports when they are unbounded
+* Don't bind ports on disallowed physical networks
+* Avoid tox-install.sh Fix import topics
+* Fix issue that causes job to stuck on monitoring
+* Remove obsolete tempest-lib
+* Fix neutron\_db\_api.get\_session with the latest neutron db api
+* Fix MLNX\_GUID\_INDEX\_PATH for eswitch
+* Fixed neutron.plugins.common import
+* Revert "Fix MLNX4\_GUID\_INDEX\_PATH"
+* Fix MLNX4\_GUID\_INDEX\_PATH
+* FIX: moving import driver\_api to neutron\_lib.plugins.ml2
+* Remove double import of neutron lib constants
+* Update requirements versions to be compatible with neutron
+* Fixing p\_constants import from neutron.plugins.common to neutron\_lib
+* remove fabric\_type
+* add support to plx
+* Fix devstack's bash script, fix 2 logical expressions
+* Remove unused ebrctl message flow "create-port"
+* Use 'port' definition from neutron-lib instead of deprecated neutron
+* Added the ability to update client-id dhcp option
+* Cp only eswtich conf file
+* fixed neutron\_lib.api.definitions import and CLIENT\_ID
+* Fix deprecation warning Modules moved from 'neutron.agent.securitygroups\_rpc' to 'neutron.api.rpc.handlers.securitygroups\_rpc'
+* add physical\_networks option to filter phynet
+* update requirements
+* Add support for multi port on the same phynet
+* ignore [N537] Don't translate logs
+* neo sdn: fix delete port to
+* remove pbr from requirements
+* Fixed neutron.agent.common import config
+* update pbr version to 2.0 and use neutron\_lib context
+* Fixed neutron\_lib.utils import
+* TrivialFix: Merge imports in code
+* reset driver using bind/unbind when vm is deleted
+* requirements zmq changed to pyzmq
+* baremetal send POST if we have the local\_link\_information
+* Use neutron-lib portbindings api-def
+* Remove white space between print ()
+* check port device\_owner before sending DELETE port
+* add missing \_\_init\_\_.py for jornal unit test
+* update files to 644
+* fix race condition in migration when delete job done after create port
+* fix dhcp test\_spawn\_no\_dns\_domain test
+* pass old port dict when calling PORT DELETE api
+* Move tests located at ml2/sdn to ml2/drivers/sdn folder
+* Add unit test for sdn client
+* Remove unused method 'try\_delete' in sdn client
+* Fixed CX5 device\_type handling
+* Fixed import model\_base
+* Add support for Ironic multi tenant with ETH
+* use neutron\_lib constants
+* Send delete port request for unattached ports
+* Add support for Ironic multi tenant with IB
+* Adjust unit tests to python 3
+* add support for baremetal port binding
+* Update test-requirements
+* fix mlnx\_sdn\_assist documentation
+
+9.0.0.0b1
+---------
+
+* don't send network with null provider:segmentation\_id
+* fix NEO sync when deleteing port and network
+* Put py34 first in the env order of tox and remove py33
+* fix retry count
+* catch login exception in jouranl
+* Add Sync mechanism to NEO
+* add get method to client
+* rearrange unitest folder
+* fix jouranl dependency validation
+* DB: Refactor networking-mlnx db
+* db: change Enum field to use constants
+* db: Remove depreciation warnings from db
+* Fix to allow use of param PHYSICAL\_INTERFACE\_MAPPINGS
+* Fix deprecation errors
+* mlnx\_sdn: create REST client
+* Enable DeprecationWarning in test environments
+* add sdn maintenance table
+* add Journal cleanup
+* Add dependency validation
+* add sdn journal database
+* Initial alembic migrations
+* fix missing translation in pep8
+* centrelize sdn config options
+* change remove PATH from constant names
+* add domain with cloudx default value to sdn conf
+* Use separate queue for agent state reports
+* Use neutron\_lib hacking and fix epep8 issues
+* Adding root\_helper registering
+* Use noop instead of neutron.agent.firewall:NoopFirewallDriver
+* Don't log Error on http 501
+* Log rest response text on debug
+* replace assert with assertFalse
+* remove session\_timeout option
+* rearrange import in sdn driver
+* tox: Update import\_exceptions to import networking\_mlnx.\_i18n
+* Move class properties to instances for dhcp tests
+* move to oslo\_concurrency
+* remove unused var and function in eswitchd
+* Added zmq to requirements.txt
+* fixed removing ESWITCHD\_CONF\_DIR on clean
+* Renaming Mellanox Mechanism driver
+* Fixed dependencies lxml
+* removed version from metadata in setup.cfg
+* deleted non working tests
+* added unit tests for eswitchd/db/device\_db
+
+8.0.0.0b3
+---------
+
+* Fixed Stucking plugin in case of missing/wrong message
+* added more unit tests for pci\_utils
+* change log level from exception to error
+* deleted pci\_utils.py from unit test folder
+* i18n imports fixed
+* updated test requirements according to neutron
+* sdn mech driver add support for flat network
+* Fix mlnx mech driver to work with flat network
+* removed eswitchd-rootwrap leftover
+* replace eswitchd-rootwrap with neutron-rootwrap
+* removed datafiles from setup.cfg
+
+8.0.0.0b2
+---------
+
+* Bump version to mark Mitaka branch
+* Update link to correct wiki docs location
+* change RPC version to 1.3
+* Deprecated tox -downloadcache option removed
+* Fixed exception on port deallocate for eswitch if network is not present
+* Moving eswitchd to networking-mlnx repo
+* Add to sdn mechanism driver to send DHCP port information
+* Decompose ML2 mechanism driver for Mellanox
+* Update .gitreview with openstack instead of stackforge
+* updates due to moving from stackforge to openstack github
+* copy eswitchd files with sudo
+* Add debug log for NEO response code
+* Add config NoopDriver as firewall\_driver
+* update sync to true if we get request timeout
+* Add network qos policy to sdn mech driver
+* Retry when esiwtchd is down instead of existing
+* fix test\_host\_file\_on\_net\_with\_v6\_slaac\_and\_v4 in dchp test
+* fix move MLNX AGENT constant to mlnx\_mech and some dhcp unitests
+* update devstack eswitchd to work with eswitch pbr
+* Add support for IB dhcp
+* Fix files permission to be 644
+* update devstack readme.rst
+* add mlnx dnsmasq support
+* Change the connection authentication of the SDN provider to a session
+* Update tests files to use oslo\_config and not oslo.config
+* added missing PKG-INFO for package building
+* loopingcall was removed from  neutron.openstack.common fixed unitest Change-Id: Ie6b4eea23a3a1df79ed24e6e2556735d39b15758 Signed-off-by: Lenny Verkhovsky <lennyb@mellanox.com>
+* mlnx MD: mlnx\_direct removal
+* Updating version to 2015.2.1
+* Add /usr/local/bin to exec\_dirs in  nova rootwrap.conf
+* Change the entry point name of  sdn provider mechanism driver plugin
+* make hostdev defualt vnic type in devstack
+* Add README.rst file
+* Add SDN mechanism driver
+* Fix import in unit test tree
+* Move mlnx agent to be under ml2/drivers/mlnx
+* Migrate to oslo.log
+* oslo: migrate to namespace-less import paths and add hacking rule
+* Remove root\_helper arg from SecurityGroupAgentRpc and from mlnx agent
+* Update networking-mlnx README file
+* Update the requirements.txt to point to the real neutron repository
+* Fix devstack external plugin
+* Untangle and drop SecurityGroupAgentRpcMixin usage and replace it with SecurityGroupAgentRpc. This patch separates the use of SecurityGroupAgentRpcMixin out to its own class
+* networking-mlnx devstack external plugin
+* Initial creation for networking-mlnx
+* Generated new .gitreview file for networking-mlnx
+* Disable unbalanced-tuple-unpacking
+* Updated from global requirements
+* Dropped fixture module
+* Move agent cleanup scripts to cmd module
+* misc-sanity-checks.sh: Some cleanups
+* Service split: cleaned up setup.cfg
+* hacking: enable H238 (old style class declaration, use new style)
+* hacking: enable W292 (no newline at end of file)
+* Update hacking to 0.10
+* Move metadata agent entry to its own file
+* Updated from global requirements
+* Break out config and entry point out of l3/agent file
+* Move postcommit ops out of transaction for bulk
+* Add support for retargetable functional api testing
+* Replace mention of nose with nose2 in devref
+* Delete the console scripts for lbaas and vpnaas
+* Enable the "not-callable" pylint check
+* Retry on unassigned ofport instead of treating it as a failure
+* Clean-up sanity checks done via shell scripts
+* Enable pylint checks for "anomalous" string escapes
+* Combine author\_tag and log\_translation\_hint regexes
+* Prevent symlinks to be added to the tree
+* Move DB TestModelsMigrations from unit to functional
+* Backward compatibility for advanced services
+
+2015.1.0b1
+----------
+
+* Updated from global requirements
+* Removed unused iso8601 dependency
+* Remove mlnx plugin
+* Set timeout for functional job
+* Remove unused dependencies
+* Migrate to oslo.context
+* Have L3 agent catch the correct exception
+* Updated from global requirements
+* Switch to using subunit-trace from tempest-lib
+* Move classes out of l3\_agent.py
+* Prettify tox output for functional tests
+* Services split, pass 2
+* Remove TODO for H404
+* Updated from global requirements
+* Use comments rather than no-op string statements
+* Enforce log hints
+* Disallow log hints in LOG.debug
+* Enforce log hints in ofagent and oneconvergence
+* Update i18n translation for NEC plugin log msg's
+* Update i18n translation for IBM plugin log msg's
+* Workflow documentation is now in infra-manual
+* tox.ini: Prevent casual addition of bash dependency
+* Updated from global requirements
+* Convert several uses of RpcCallback
+* Get rid of py26 references: OrderedDict, httplib, xml testing
+* Enforce log hints in opencontrail
+* Update i18n translation for Metaplugin plugin
+* Update i18n translation for Brocade plugin log msg's
+* Update i18n translation for Nuage plugin
+* Update i18n translation for Embrane plugin
+* Enforce log hints in neutron.plugins.plumgrid
+* Update i18n translation for Midonet plugin
+* Enforce log hints in neutron.plugins.sriovnicagent
+* Enforce log hints in neutron.plugins.hyperv
+* Updated the README.rst
+* Update i18n translation for BigSwitch plugin log msg's
+* pretty\_tox.sh: Portablity improvement
+* test\_dhcp\_agent: Fix no-op tests
+* Update i18n translation for Mellanox plugin and agent log msg's
+* Update i18n translation for VMware NSX plugin log msg's
+* hacking: Check if correct log markers are used
+* Enable undefined-loop-variable pylint check
+* Fix incorrect exception order in \_execute\_request
+* Migrate to oslo.i18n
+* Migrate to oslo.middleware
+* Migrate to oslo.utils
+* Remove Python 2.6 classifier
+* Update i18n translation for Cisco plugins and cfg agent log msg's
+* Remove ryu plugin
+* Update i18n translation for linuxbridge log msg's
+* Update i18n translation for openvswitch log msg's
+* Update i18n translation for ML2 plugin log msg's
+* Updated from global requirements
+* Enforce log hints in neutron.services
+* Enforce log hints in neutron.services.metering
+* Show progress output while running unit tests
+* Enforce log hints in neutron.services.loadbalancer
+* Enforce log hints in neutron.services.firewall
+* Enforce log hints in neutron.services.l3\_router
+* enable H401 hacking check
+* enable H237 check
+* Updated from global requirements
+* Update i18n translation for neutron.server/scheduler log msg's
+* Update i18n translation for neutron.notifiers log msg's
+* Update i18n translation for neutron.common/debug log msg's
+* Update i18n translation for neutron.api log msg's
+* Updated from global requirements
+* Update i18n translation for neutron.extension log msg's
+* Update i18n translation for neutron.db log msg's
+* Update i18n translation for neutron.cmd log msg's
+* Update i18n translation for neutron.agents log msg's
+* enable F812 check for flake8
+* enable F811 check for flake8
+* Support pudb as a different post mortem debugger
+* switch to oslo.serialization
+* Add rootwrap filters for ofagent
+* Remove openvswitch core plugin entry point
+* Updated from global requirements
+* Updated from global requirements
+* enable F402 check for flake8
+* enable E713 in pep8 tests
+* Hyper-V: Remove useless use of "else" clause on for loop
+* Enable no-name-in-module pylint check
+* Updated from global requirements
+* Remove duplicate import of constants module
+* Switch run-time import to using importutils.import\_module
+* Enable assignment-from-no-return pylint check
+* tox.ini: Avoid using bash where unnecessary
+* Empty files should not contain copyright or license
+* Remove single occurrence of lost-exception warning
+* Updated fileutils and its dependencies
+
+2014.2
+------
+
+* remove E251 exemption from pep8 check
+* mock.assert\_called\_once() is not a valid method
+* Add pylint tox environment and disable all existing warnings
+* Updated from global requirements
+* Ignore top-level hidden dirs/files by default
+* Remove some duplicate unit tests
+* Drop sslutils and versionutils modules
+
+2014.2.rc2
+----------
+
+* Removed kombu from requirements
+* Updated from global requirements
+* Updated from global requirements
+* Remove sslutils from openstack.common
+
+2014.2.rc1
+----------
+
+* remove linuxbridge plugin
+* Fix sleep function call
+* Open Kilo development
+* Implement ModelsMigrationsSync test from oslo.db
+* Fix entrypoint of OneConvergencePlugin plugin
+* Set dsvm-functional job to use system packages
+* Separate Configuration from Freescale SDN ML2 mechanism Driver
+* Remove @author(s) from copyright statements
+* Updated from global requirements
+* Adds ipset support for Security Groups
+* Add requests\_mock to test-requirements.txt
+* Removed kombu from requirements
+* Supply missing cisco\_cfg\_agent.ini file
+* Remove unused arg to config.setup\_logging()
+* Updated from global requirements
+
+2014.2.b3
+---------
+
+* Work toward Python 3.4 support and testing
+* Revert "Cisco DFA ML2 Mechanism Driver"
+* Big Switch: Separate L3 functions into L3 service
+* Remove reference to cisco\_cfg\_agent.ini from setup.cfg again
+* Adds router service plugin for CSR1kv
+* Support for extensions in ML2
+* Cisco DFA ML2 Mechanism Driver
+* Adding mechanism driver in ML2 plugin for Nuage Networks
+* Fix state\_path in tests
+* Remove ovs dependency in embrane plugin
+* Remove binding:profile update from Mellanox ML2 MD
+* Use lockutils module for tox functional env
+* Updated from global requirements
+* Refresh rpc\_backend values in unit tests to those from oslo.messaging
+* Add specific docs build option to tox
+* Fix bigswitch setup.cfg lines
+* Updated from global requirements
+* Use jsonutils instead of stdlib json
+* Opencontrail plug-in implementation for core resources
+* MLNX Agent: ensure removed ports get treated on resyncs
+* MLNX Agent: Process port\_update notifications in the main agent loop
+* Add a tox test environment for random hashseed testing
+* Updated from global requirements
+* Remove reference to cisco\_cfg\_agent.ini from setup.cfg
+* Removed configobj from test requirements
+* Updated from global requirements
+* Functional tests work fine with random PYTHONHASHSEED
+* Set python hash seed to 0 in tox.ini
+* Configuration agent for Cisco devices
+* Updated from global requirements
+* ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2
+
+2014.2.b2
+---------
+
+* This patch changes the name of directory from mech\_arista to arista
+* ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1
+* Allow to import \_LC, \_LE, \_LI and \_LW functions directly
+* Make readme reference git.openstack.org not github
+* Bump hacking to version 0.9.2
+* Use auth\_token from keystonemiddleware
+* Remove reference to setuptools\_git
+* Add a gate-specific tox env for functional tests
+* Add CONTRIBUTING.rst
+* Updated from global requirements
+* Updated from global requirements
+* Updated from global requirements
+* Fix example for running individual tests
+* Switch to using of oslo.db
+* remove unsupported middleware
+* Add config for performance gate job
+* Introduce bulk calls for get device details
+* Synced log module and its dependencies from olso-incubator
+* don't ignore rules that are already enforced
+* Moved rpc\_compat.py code back into rpc.py
+* Updated from global requirements
+* Updated from global requirements
+* ofagent: move main module from ryu repository
+* Remove the useless vim modelines
+* Removed 'rpc' and 'notifier' incubator modules
+* Removed create\_rpc\_dispatcher methods
+* Use openstack.common.lockutils module for locks in tox functional tests
+* Port to oslo.messaging
+* Updated from global requirements
+* Ignore emacs checkpoint files
+* Configure agents using neutron.common.config.init (formerly .parse)
+* Added missing core\_plugins symbolic names
+* Introduce RpcCallback class
+* remove pep8 E122 exemption and correct style
+
+2014.2.b1
+---------
+
+* remove E112 hacking exemption and fix errors
+* Updated from global requirements
+* Monkey patch threading module as early as possible
+* Introduced transition RPC exception types
+* Freescale SDN Mechanism Driver for ML2 Plugin
+* Remove run-time version checking for openvswitch features
+* Added missing plugin .ini files to setup.cfg
+* Updated from global requirements
+* Synced jsonutils from oslo-incubator
+* Cisco APIC ML2 mechanism driver, part 2
+* NSX: get rid of the last Nicira/NVP bits
+* Add missing translation support
+* Add mailmap entry
+* Updated from global requirements
+* Remove explicit dependency on amqplib
+* eswitch\_neutron\_agent: Whitespace fixes in comments
+* Remove duplicate module-rgx line in .pylintrc
+* Fix H302 violations
+* Fix H302 violations in plugins package
+* Fix H302 violations in unit tests
+* Don't print duplicate messages on SystemExit
+* Updated from global requirements
+* Add physical\_network to binding:vif\_details dictionary
+* Updated from global requirements
+* Switch over to FixedIntervalLoopingCall
+* Exclude .ropeproject from flake8 checks
+* Remove mock.patch.stop from tests that inherit from BaseTestCase
+* Enable flake8 E711 and E712 checking
+* Updated from global requirements
+* Sync service and systemd modules from oslo-incubator
+* Move bash whitelisting to pep8 testenv
+* Fix Jenkins translation jobs
+* ignore build directory for pep8
+* Enable hacking H301 check
+* Updated from global requirements
+
+2014.1.rc1
+----------
+
+* Remove last parts of Quantum compatibility shim
+* Open Juno development
+* Start using oslosphinx theme for docs
+* Fixed TypeError when creating MlnxException
+* Remove extra space in help string
+* Updated from global requirements
+* Add enable\_security\_group option
+* add HEAD sentinel file that contains migration revision
+* Add update binding:profile with physical\_network
+* Bugfix and refactoring for ovs\_lib flow methods
+* Removes calls to mock.patch.stopall in unit tests
+* Updated from global requirements
+* Updated from global requirements
+* Updated from global requirements
+* One Convergence Neutron Plugin l3 ext support
+* One Convergence Neutron Plugin Implementation
+* BigSwitch: Add SSL Certificate Validation
+
+2014.1.b3
+---------
+
+* Updated from global requirements
+* Add OpenDaylight ML2 MechanismDriver
+* Implementaion of Mechanism driver for Brocade VDX cluster of switches
+* Implement Mellanox ML2 MechanismDriver
+* Remove call to addCleanup(cfg.CONF.reset)
+* Implement OpenFlow Agent mechanism driver
+* Finish off rebranding of the Nicira NVP plugin
+* BigSwitch: Add agent to support neutron sec groups
+* Adds the new IBM SDN-VE plugin
+* Updated from global requirements
+* Developer documentation
+* Change tenant network type usage for IB Fabric
+* Rename Neutron core/service plugins for VMware NSX
+* Updated from global requirements
+* Sync minimum requirements
+* Copy cache package from oslo-incubator
+* Add update from agent to plugin on device up
+* Remove dependent module py3kcompat
+* Add migration support from agent to NSX dhcp/metadata services
+* Remove psutil dependency
+* LBaaS: move agent based driver files into a separate dir
+* mailmap: update .mailmap
+* Return request-id in API response
+* Prepare for multiple cisco ML2 mech drivers
+* Support building wheels (PEP-427)
+* Use oslo.rootwrap library instead of local copy
+* Enables BigSwitch/Restproxy ML2 VLAN driver
+* Add an explicit tox job for functional tests
+* Base ML2 bulk support on the loaded drivers
+* Enable hacking H233 rule
+
+2014.1.b2
+---------
+
+* Update RPC code from oslo
+* Configure plugins by name
+* Update lockutils and fixture in openstack.common
+* Rename nicira configuration elements to match new naming structure
+* Remove unused imports
+* Rename check\_nvp\_config utility tool
+* Corrects broken format strings in check\_i18n.py
+* Change default eswitchd port to avoid conflict
+* Updates tox.ini to use new features
+* Updated from global requirements
+* Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2
+* Update common network type consts to same origin
+* Add fwaas\_driver.ini to setup.cfg
+
+2014.1.b1
+---------
+
+* Add vpnaas and debug filters to setup.cfg
+* Add request timeout handling for Mellanox Neutron Agent
+* Updates .gitignore
+* Update Zhenguo Niu's mailmap
+* Replace stubout with fixtures
+* Ensure get\_pid\_to\_kill works with rootwrap script
+* Updated from global requirements
+* Cleanup HACKING.rst
+* Fix import log\_handler error with publish\_errors set
+* Updated from global requirements
+* Updated from global requirements
+* Fix incorrect indentations found by Pep 1.4.6+
+* Cleanup and make HACKING.rst DRYer
+* Add support for managing async processes
+* Remove obsolete redhat-eventlet.patch
+* Change rpc\_support\_old\_agents default to False
+
+2013.2.rc1
+----------
+
+* Open Icehouse development
+* Updated from global requirements
+* Require oslo.config 1.2.0 final
+* Use built-in print() instead of print statement
+* Add l2 population base classes
+* Fix message i18n error
+* Install metering\_agent.ini and vpn\_agent.ini
+* fix conversion type missing
+* Enclose command args in with\_venv.sh
+* ML2 Mechanism Driver for Cisco Nexus
+* Reference driver implementation (IPsec) for VPNaaS
+* Implement ML2 port binding
+*  Arista ML2 Mechanism driver
+* ML2 Mechanism Driver for Tail-f Network Control System (NCS)
+* Default to not capturing log output in tests
+* Add Neutron l3 metering agent
+* Add recent neutron extentions and IB support
+* Update mailmap
+* Fix wrong example in HACKING.rst
+* Bumps hacking to 0.7.0
+* remove binaries under bin
+* Fixes Windows setup dependency bug
+* Restore Babel to requirements.txt
+* Remove DHCP lease logic
+* Remove last vestiges of nose
+* Updated from global requirements
+* Ignore pbr\*.egg directory
+* Fix H102, H103 Apache 2.0 license hacking check error
+* Remove openstack.common.exception usage
+* Adds Babel dependency missing from 555d27c
+* Fix the alphabetical order in requirement files
+* Remove comments from requirements.txt (workaround pbr bug)
+* remove netifaces dependency of ryu-agent
+
+2013.2.b2
+---------
+
+* Add gre tunneling support for the ML2 plugin
+* Add VXLAN tunneling support for the ML2 plugin
+* xenapi - rename quantum to neutron
+* Fix issue with pip installing oslo.config-1.2.0
+* Initial Modular L2 Mechanism Driver implementation
+* Add cover/ to .gitignore
+* fix some missing change from quantum to neutron
+* Rename Quantum to Neutron
+* Rename quantum to neutron in .gitreview
+* Sync install\_venv\_common from oslo
+* Update to use OSLO db
+* Require greenlet 0.3.2 (or later)
+* Remove single-version-externally-managed in setup.cfg
+* Fix single-version-externally-mananged typo in setup.cfg
+* Allow use of lowercase section names in conf files
+* Require pbr 0.5.16 or newer
+* Update to the latest stevedore
+* Rename agent\_loadbalancer directory to loadbalancer
+* Remove unit tests that are no longer run
+* Remove explicit distribute depend
+* Fix and enable H90x tests
+* Remove generic Exception when using assertRaises
+* Add \*.swo/swp to .gitignore
+* python3: Introduce py33 to tox.ini
+* Rename README to README.rst
+* Rename requires files to standard names
+* Initial Modular L2 plugin implementation
+* Perform a sync with oslo-incubator
+* update mailmap
+* Revert "Fix ./run\_tests.sh --pep8"
+* Move to pbr
+* Fix ./run\_tests.sh --pep8
+* blueprint mellanox-quantum-plugin
+* Let the cover venv run individual tests
+*  Copy the RHEL6 eventlet workaround from Oslo
+* Remove locals() from strings substitutions
+* Enable automatic validation of many HACKING rules
+* Shorten the path of the nicira nvp plugin
+* Allow pdb debugging in manually-invoked tests
+* Reformat openstack-common.conf
+* Switch to flake8 from pep8
+* Parallelize quantum unit testing:
+* blueprint cisco-single-config
+* Add lbaas\_agent files to setup.py
+* Add VIRTUAL\_ENV key to enviroment passed to patch\_tox\_env
+* Sync latest Oslo components for updated copyright
+* Replace "OpenStack LLC" with "OpenStack Foundation"
+* First havana commit
+* remove references to netstack in setup.py
+* Update tox.ini to support RHEL 6.x
+* Add common test base class to hold common things
+* Pin pep8 to 1.3.3
+* Add initial testr support
+* LBaaS Agent Reference Implementation
+* Add scheduling feature basing on agent management extension
+* Use testtools instead of unittest or unittest2
+* Add midonet to setup.py
+* Sync latest install\_venv\_common.py with olso
+* Add check-nvp-config utility
+* Use oslo-config-2013.1b3
+* Adds Brocade Plugin implementation
+* Synchronize code from oslo
+* PLUMgrid quantum plugin
+* Update .coveragerc
+* Allow tools/install\_venv\_common.py to be run from within the source directory
+* Updated to latest oslo-version code
+* Use install\_venv\_common.py from oslo
+* Cisco plugin cleanup
+* Use babel to generate translation file
+* Adds support for deploying Quantum on Windows
+* Add migration support to Quantum
+* .gitignore cleanup
+* Logging module cleanup
+* Add OVS cleanup utility
+* Add tox artifacts to .gitignore
+* Add restproxy.ini to config\_path in setup.py
+* Add script for checking i18n message
+* l3 agent rpc
+* Add metadata\_agent.ini to config\_path in setup.py
+* add metadata proxy support for Quantum Networks
+* Add QUANTUM\_ prefix for env used by quantum-debug
+* Make tox.ini run pep8 checks on bin
+* Explicitly include versioninfo in tarball
+* Import lockutils and fileutils from openstack-common
+* Updated openstack-common setup and version code
+* Add uuidutils module
+* Add eventlet\_backdoor and threadgroup from openstack-common
+* Add loopingcall from openstack-common
+* Added service from openstack-common
+* Import order clean-up
+* Correct Intended Audience
+* Add OpenStack trove classifier for PyPI
+* l3\_nat\_agent was renamed to l3\_agent and this was missed
+* Support for several HA RabbitMQ servers
+* add missing files from setup.py
+* Create .mailmap file
+* Implements agent for Quantum Networking testing
+* Create utility to clean-up netns
+* Update rootwrap; track changes in nova/cinder
+* Add lease expiration script support for dnsmasq
+* quantum l3 + floating IP support
+* NEC OpenFlow plugin support
+* Initial implemention of MetaPlugin
+* Exempt openstack-common from pep8 check
+* fix bug lp:1025526,update iniparser.py to accept  empty value
+* Introduce files from openstack common
+* fix bug lp:1019230,update rpc from openstack-common
+* implement dhcp agent for quantum
+* Use setuptools git plugin for file inclusion
+* Remove paste configuration details to a seperate file. blueprint use-common-cfg
+* Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file
+* Add authZ through incorporation of policy checks
+* Bug #1013967 - Quantum is breaking on tests with pep 1.3
+* Use openstack.common.exception
+* Fix up test running to match jenkins expectation
+* Add build\_sphinx options
+* Quantum should use openstack.common.jsonutils
+* Quantum should use openstack.common.importutils
+* PEP8 fixes
+* Parse linuxbridge plugins using openstack.common.cfg
+* Add HACKING.rst to tarball generation bug 1001220
+* Include AUTHORS in release package
+* Add HACKING.rst coding style doc
+* bug 963152: add a few missing files to sdist tarball
+* Split out pip requires and aligned tox file
+* Fix missing files in sdist package [bug 954906]
+* more files missing in sdist tarball
+* make sure pip-requires is included in setup.py sdist
+* remove pep8 and strict lxml version from setup.py
+* plugin: introduce ryu plugin
+* bug 934459: pip no longer supports -E
+* Initial commit: nvp plugin
+* Cleanup the source distribution
+* blueprint quantum-linux-bridge-plugin
+* Remove quantum CLI console script
+* Make tox config work
+* Split out quantum.client and quantum.common
+* Quantum was missing depend on lxml
+* Getting ready for the client split
+* Removed erroneous print from setup.py
+* Base version.py on glance
+* Fix lp bug 897882
+* Install a good version of pip in the venv
+* Rename .quantum-venv to .venv
+* Remove plugin pip-requires
+* Bug #890028
+* Fix for bug 900316
+* Second round of packaging changes
+* Changes to make pip-based tests work with jenkins
+* Fix for Bug #888820 - pip-requires file support for plugins
+* blueprint quantum-packaging
+* Add .gitreview config file for gerrit
+
+2011.3
+------
+
+* merge tyler's unit tests for cisco plugin changes lp845140
+* merge salv's no-cheetah CLI branch lp 842190
+* merge sumit's branch for lp837752
+* merge salv's branch for bug834013
+* merge salv's branch for keystone token on client bug838006
+* merge rohit's db test branch: lp838318
+* merge salv fix for bug 841982, fix minor pep8 violation
+* merge salv fix for bug834008
+* Merging latest from lp:quantum
+* Merging lo:~salvatore-orlando/quantum/quantum-api-auth
+* syncing diverged branches
+* merging from lp:quantum
+* merging from lp:quantum
+* Mergin from lp:quantum
+* merge salv's branch to remove dummy plugin
+* Merging Shweta's test cases for mutliport resource
+* Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions
+* Merging from Cisco branch
+* Merging changes from Ying's branch (new mutliport resource)
+* Merging from lp:quantum
+* merge cisco consolidated plugin changes
+* Merging lp:~salvatore-orlando/quantum/bug834449
+* Merging Ying's changes (minor)
+* merge trunk
+* merge trunk
+* merging changes from cisco consolidated branch
+* Merging fixes from Sumit's branch for extension API version number and to UCS inventory to associated VIF-ID with ports
+* Merging from the Cisco branch
+* Merging Shweta's fix for extensions' test cases (clean up was not happening completely)
+* Merging from lp:quantum
+* Merging Shweta's fixes in the tests for key names changes in the Core API
+* merge salvatore's new cli code
+* Merging lp:quantum, resolving conflict
+* merge two pep8 branch
+* Merging Ying's pep8 fixes
+* Merging quantum trunk
+* Merging lp:~danwent/quantum/lp834491 Fixing Bug #834491: api alignment merge broke ovs plugin 	(Critical)
+* Merging from quantum
+* merge cisco extensions branch
+* Merging Shweta's fixes to the test cases for the extensions
+* Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review
+* Syncing with Cisco extensions branch
+* Merging changes from Sumit's branch
+* Mergin Ying's branch
+* Merging from Sumit's branch, import ordering related changes
+* Merging the Cisco branch
+* Finishing cli work Fixing bug with XML deserialization
+* Merging lp:~salvatore-orlando/quantum/quantum-api-alignment
+* Merging from Sumit's branch
+* Merging Rohit's changes
+* merge latest quantum branch and resolve conflicts
+* Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical)
+* PEP8 fixes for setup.py
+* Made changes according to reviewer's comments. Add addtional information on extension test in README
+* Merging changes from Sumit's branch
+* Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler
+* Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence
+* Merging lp:quantum
+* merging with lp:quantum
+* merge lp:~bgh/quantum/lp837174
+* Merging from Sumit's latest branch - Fixed loading of Nexus DB tables; moved imports to l2nework\_db.py; Refactoring of code to generalize inventory handling (enhancement)
+* Making Keystone version configurable
+* Merging lp:~raxnetworking/quantum/bug827272
+* Merging branch: lp:~danwent/quantum/test-refactor
+* Merging UCS inventory state initialization fix from Sumit's branch
+* Fixes an issue with loading the UCS inventory when a dynamic nic has been used outside of Quantum
+* Pep8, pylint fixes
+* Merging rohit's UCS persistence support
+* Merging changes from Rohit's branch
+* Merging changes from cisco extensions
+* merged Shweta's branch for ext test. Minor fix for review comments
+* merged Shweta's ext test branch
+* Syncing with lp:quantum
+* sync up with l2network exception handling for extension
+* merged Cisco branch's latest changes
+* Adding changes from Sumit's latest merge
+* merge with  lp:~cisco-openstack/quantum/l2network-plugin-extensions
+* Raising exceptions in extension resources handling (where missing). Changing exception name to QosNotFound
+* Mergin from Cisco branch
+* Merging fixes to client side exception handling. Thanks lp:tylesmit !
+* Merging fixes and changes batch-config script. Thanks lp:danwent !
+* merge with ying's branch
+* merging with Ying's extension branch
+* merging with ~cisco-openstack/quantum/l2network-plugin-extensions
+* fix pylint issuses
+* Merging bug fix for Bug 821733. Thanks lp:salvatore-orlando !
+* Mering Sumit's branch with plugin support for Credentials, QoS, NovaTenant resources. Also merging latest from lp:~cisco-openstack/quantum/l2network-plugin-persistence
+* Merging from Sumit's branch, VIF-driver and Quantum-aware scheduler
+* Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum
+* merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions
+* merge trunk
+* Pulling in changes from lp:quantum
+* Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin
+* Merging Shweta's change to fix a function call in the test code
+* Merging from Sumit's branch pylint fixes and incorporating review comments
+* Changes to README file and merging Shweta's changes
+* Mergin Shweta's test changes, also README file
+* Mergin from cisco brach
+* Merging from lp:quantum
+* Pulling changes from Cisco branch
+* Merging Nexus pylint changes and other enhancements from Edgar
+* Merging Rohit's changes
+* Merging plugin and tests' changes
+* Pulling in changes from Rohit's branch
+* Pulling in changes from Shweta's branch
+* Merging rohit's changes
+* Merging: lp:~danwent/quantum/client-lib
+* Merging: lp:~tylesmit/quantum/api-client-fix-serialization Adding automattic serialization to all requests by moving it to do\_request
+* fixes from rohit's branch
+* from rohit's branch
+* Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work!
+* lp Bug#824145 : Adding a setup script for quantum
+* merge trunk
+* Merged quantum trunk
+* - Adding setup script
+* Added tests directory to list of modules in the README file
+* Merging changes addressing Bug # 802772. Thanks lp:danwent !
+* Merging bugfix for Bug 822890 - Added License file for Quantum code distribution
+* L2 Network Plugin Framework merge
+* Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community
+* Merging test cases from Shwetas' branch, and further modified README file
+* Merging the test framework from Shweta's branch
+* another merge
+* merge
+* merge heckj's pip-requires fixes
+* Merged quantum trunk
+* Removing extra file in Nexus Driver
+* Merging changes
+* Merging changes from lp:quantum
+* Completing API spec alignment Unit tests aligned with changes in the API spec
+* Applying fix for bug #814518 Merging from lp:~salvatore-orlando/quantum/bug814518
+* Merging the port profile client name fix
+* Mergin fix for Bug 818321
+* Merging approved OVS plugin configuration change branch. Thanks lp:danwent !
+* Merging the brand new Quantum-client-library feature
+* merging branch for bug802772, which this branch is stacked on top of
+* Merging lp:quantum updates
+* persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework
+* Merged from trunk
+* merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db\_test\_plugin.py  - added import of cisco constants module  - added LOG.getLogger for logging component name  - updated import module paths for l2network\_models/db and ucs\_models/db to use the new directory structure  - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db\_conn.ini  - updated database name from cisco\_naas to quantum\_l2network unit test cases ran successfully and pep8 checks done again
+* merge branch for to fix bug817826
+* Merging the latest changes from lp:quantum
+* merge Salvatore's api branch with fixes for tests.  Tweaking branch to remove unwanted bin/quantum.py as part of merge
+* Merging in main repo updates
+* Apply fix for bug #817813 Merging lp:~danwent/quantum/bug817813
+* Apply fix for bug #814012 Merging lp:~danwent/quantum/bug814012
+* Apply fix for bug #814517 merging lp:~tylesmit/quantum/quantum-bug-814517
+* Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin
+* Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419
+* Merging branch lp:~salvatore-orlando/quantum/bug802892 Fixing bug #802892
+* Merging branch lp:~netstack/quantum/quantum-unit-tests
+* Fixing silly pep8 error
+* Merged from quantum trunk
+* Applying fix for bug #804237 from branch lp:~salvatore-orlando/quantum/bug804237
+* Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs
+* Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM!
+* merge
+* Merge: bzr merge lp:~bgh/quantum/bugfixes
+* merge and pep8 cleanup
+* Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum\_testing\_framework , which has now been merged into lp:network-service
+*  Merging pep8 and functional test related changes lp:~santhom/network-service/quantum\_testing\_framework branch
+* Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional
+* merged remote README changes
+*  Merged Brad's ovsplugin code
+* Initial version of openvswitch plugin
+* \* Merged changes from Salvatore's branch - quantum-api-workinprogress \* Removed spurious methods from quantum\_base\_plugin class. \* Updated the sample plugins to be compliant with the new QuantumBase class
+* Adding first files for quantum API
+* merged salvatore's changes to local branch
+* Pushing initial started code based on Glance project and infrstructure work done by the melange team
+* Merging in Shweta's fixes from the review by Sumit
+* Merging in latest changes from lp:quantum
+* Merging in Shweta's test changes
diff -pruN 1:16.0.0-3.1/debian/changelog 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/changelog
--- 1:16.0.0-3.1/debian/changelog	2022-10-15 10:26:58.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/changelog	2023-01-10 15:13:45.000000000 +0000
@@ -1,61 +1,101 @@
-networking-mlnx (1:16.0.0-3.1) unstable; urgency=medium
+networking-mlnx (1:19.0.0+git2023011010.0a69b971-0ubuntu1) lunar; urgency=medium
 
-  * Non-maintainer upload.
-  * No source change upload to rebuild with debhelper 13.10.
+  * New upstream snapshot for OpenStack Antelope.
 
- -- Michael Biebl <biebl@debian.org>  Sat, 15 Oct 2022 12:26:58 +0200
+ -- Corey Bryant <corey.bryant@canonical.com>  Tue, 10 Jan 2023 10:13:45 -0500
 
-networking-mlnx (1:16.0.0-3) unstable; urgency=medium
+networking-mlnx (1:19.0.0+git2022091316.0a69b97-0ubuntu1) kinetic; urgency=medium
 
-  * d/control: Correcting Homepage value.
-  * d/watch: Correcting upstream URL.
+  * New upstream snapshot for OpenStack Zed.
 
- -- Sakirnth Nagarasa <sakirnth@gmail.com>  Tue, 12 Jul 2022 15:10:34 +0200
+ -- Corey Bryant <corey.bryant@canonical.com>  Tue, 13 Sep 2022 16:46:42 -0400
 
-networking-mlnx (1:16.0.0-2) unstable; urgency=medium
+networking-mlnx (1:19.0.0+git2022071815.08c15d9-0ubuntu1) kinetic; urgency=medium
 
-  * Add Fix_mellanox_ci.patch (Closes: #1011673).
+  * New upstream snapshot for OpenStack Zed.
+  * d/control: Ensure openstack-pkg-tools has CIS hardening for log files
+    and pkgos_adduser.
 
- -- Thomas Goirand <zigo@debian.org>  Tue, 31 May 2022 10:15:23 +0200
+ -- Corey Bryant <corey.bryant@canonical.com>  Mon, 18 Jul 2022 15:27:04 -0400
 
-networking-mlnx (1:16.0.0-1) unstable; urgency=medium
+networking-mlnx (1:19.0.0+git2022060609.08c15d9-0ubuntu1) kinetic; urgency=medium
 
-  * New upstream release.
-  * Moved the package to the neutron-plugins Salsa subgroup.
+  * New upstream snapshot for OpenStack Zed.
+  * d/control: Align (Build-)Depends with upstream.
+  * d/control: Switch to debhelper compat 13.
+  * d/control: Update standards version to 4.6.1.
 
- -- Thomas Goirand <zigo@debian.org>  Wed, 20 May 2020 21:53:54 +0200
+ -- Corey Bryant <corey.bryant@canonical.com>  Mon, 06 Jun 2022 09:45:30 -0400
 
-networking-mlnx (1:15.0.2-1) unstable; urgency=medium
+networking-mlnx (1:19.0.0+git2021120909.98591db-0ubuntu1) jammy; urgency=medium
 
-  * New upstream release.
+  * New upstream snapshot for OpenStack Yoga.
 
- -- Thomas Goirand <zigo@debian.org>  Thu, 19 Dec 2019 10:24:54 +0100
+ -- Corey Bryant <corey.bryant@canonical.com>  Thu, 09 Dec 2021 09:38:14 -0500
 
-networking-mlnx (1:15.0.1-1) unstable; urgency=medium
+networking-mlnx (1:18.0.0+git2021100610.63e4524-0ubuntu1) impish; urgency=medium
 
-  * New upstream point release.
+  * New upstream release for OpenStack Xena.
 
- -- Thomas Goirand <zigo@debian.org>  Wed, 30 Oct 2019 11:37:59 +0100
+ -- Corey Bryant <corey.bryant@canonical.com>  Wed, 06 Oct 2021 10:48:48 -0400
 
-networking-mlnx (1:15.0.0-2) unstable; urgency=medium
+networking-mlnx (1:17.0.0+git2021072117.67bcd95-0ubuntu1) impish; urgency=medium
 
-  [ Ondřej Nový ]
-  * Run wrap-and-sort -bastk.
+  * New upstream snapshot for OpenStack Xena.
+  * d/p/monkey-patch-original-current-thread.patch: Dropped. Fixed upstream.
 
-  [ Thomas Goirand ]
-  * Uploading to unstable.
+ -- Corey Bryant <corey.bryant@canonical.com>  Wed, 21 Jul 2021 17:15:32 -0400
 
- -- Thomas Goirand <zigo@debian.org>  Tue, 22 Oct 2019 23:50:05 +0200
+networking-mlnx (1:16.0.1~git2021012913.546e21c-0ubuntu1) hirsute; urgency=medium
 
-networking-mlnx (1:15.0.0-1) experimental; urgency=medium
+  * New upstream snapshot for OpenStack Wallaby.
 
-  [ Lenny Verkhovsky ]
-  * New upstream release.
+ -- Corey Bryant <corey.bryant@canonical.com>  Fri, 29 Jan 2021 13:48:01 -0500
 
-  [ Thomas Goirand ]
-  * Fixed (build-)depends for this release.
+networking-mlnx (1:16.0.1~git2020101510.921e87c-0ubuntu1) groovy; urgency=medium
 
- -- Thomas Goirand <zigo@debian.org>  Wed, 16 Oct 2019 20:10:58 +0200
+  [ Chris MacNaughton ]
+  * d/control: Update VCS paths for move to lp:~ubuntu-openstack-dev.
+
+  [ Corey Bryant ]
+  * New upstream snapshot for OpenStack Victoria.
+
+ -- Corey Bryant <corey.bryant@canonical.com>  Thu, 15 Oct 2020 10:12:56 -0400
+
+networking-mlnx (1:16.0.1~git2020073014.43681d3-0ubuntu1) groovy; urgency=medium
+
+  * New upstream snapshot for OpenStack Victoria.
+  * d/control: Align (Build-)Depends with upstream.
+  * d/copyright: Update upstream URL.
+
+ -- Corey Bryant <corey.bryant@canonical.com>  Thu, 30 Jul 2020 14:48:56 -0400
+
+networking-mlnx (1:16.0.1~git2020070614.7d6ef1f-0ubuntu1) groovy; urgency=medium
+
+  * d/gbp.conf: Export build artifacts to ../build-area/.
+  * New upstream snapshot for OpenStack Victoria.
+  * d/control: Align (Build-)Depends with upstream.
+  * d/control, d/rules: Switch to debhelper compat 12 and pybuild.
+  * d/control: Update Standards-Version to 4.5.0.
+
+ -- Corey Bryant <corey.bryant@canonical.com>  Mon, 06 Jul 2020 14:02:22 -0400
+
+networking-mlnx (1:15.0.2-0ubuntu1) groovy; urgency=medium
+
+  * New upstream release for OpenStack Ussuri (LP: #1877642).
+  * d/control: Align (Build-)Depends with upstream.
+  * d/p/monkey-patch-original-current-thread.patch: Cherry-picked
+    from upstream review (https://review.opendev.org/725365)
+    to fix Python 3.8 monkey patching (LP: #1863021).
+
+ -- Corey Bryant <corey.bryant@canonical.com>  Thu, 14 May 2020 15:15:09 -0400
+
+networking-mlnx (1:15.0.0~b2~git2019090509.50bbc9d-0ubuntu1) eoan; urgency=medium
+
+  * New upstream snapshot for OpenStack Train (LP: #1842690).
+  * d/control: Align (Build-)Depends with upstream.
+
+ -- James Page <james.page@ubuntu.com>  Thu, 05 Sep 2019 09:00:32 +0100
 
 networking-mlnx (1:14.0.0-1) unstable; urgency=medium
 
diff -pruN 1:16.0.0-3.1/debian/control 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/control
--- 1:16.0.0-3.1/debian/control	2022-07-12 13:10:34.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/control	2023-01-10 15:13:45.000000000 +0000
@@ -1,62 +1,66 @@
 Source: networking-mlnx
 Section: net
 Priority: optional
-Maintainer: Debian OpenStack <team+openstack@tracker.debian.org>
+Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
+XSBC-Original-Maintainer: Debian OpenStack <team+openstack@tracker.debian.org>
 Uploaders:
  Thomas Goirand <zigo@debian.org>,
  Lenny Verkhovsky <lennyb@mellanox.com>,
  Sakirnth Nagarasa <sakirnth@gmail.com>,
 Build-Depends:
- debhelper-compat (= 11),
+ debhelper-compat (= 13),
  dh-python,
- openstack-pkg-tools,
+ openstack-pkg-tools (>= 119ubuntu1~),
  po-debconf,
  python3-all,
  python3-pbr,
- python3-sphinx,
+ python3-sphinx (>= 1.1.2),
 Build-Depends-Indep:
- python3-babel,
- python3-cliff,
- python3-coverage,
- python3-ddt,
- python3-defusedxml,
+ python3-babel (>= 1.3),
+ python3-cliff (>= 1.15.0),
+ python3-coverage (>= 4.0),
+ python3-ddt (>= 1.0.1),
+ python3-defusedxml (>= 0.5.0),
  python3-ethtool,
- python3-eventlet,
- python3-fixtures,
- python3-hacking,
- python3-mock,
- python3-netaddr,
- python3-neutron (>= 2:16.0.0),
- python3-neutron-lib (>= 1.28.0),
- python3-neutronclient,
- python3-openstackclient,
- python3-os-testr,
- python3-oslo.concurrency,
- python3-oslo.config,
- python3-oslo.privsep,
- python3-oslosphinx,
- python3-oslotest,
- python3-psycopg2,
- python3-pymysql,
- python3-requests-mock,
- python3-six,
- python3-sqlalchemy,
- python3-testresources,
- python3-testscenarios,
- python3-testtools,
- python3-webtest,
+ python3-eventlet (>= 0.18.2),
+ python3-fixtures (>= 3.0.0),
+ python3-hacking (>= 1.1.0),
+ python3-isort (>= 4.3.21),
+ python3-lxml,
+ python3-mock (>= 1.2),
+ python3-netaddr (>= 0.7.18),
+ python3-neutron (>= 1:17.0.0~),
+ python3-neutron-lib (>= 2.4.0),
+ python3-neutronclient (>= 1:6.7.0),
+ python3-openstackclient (>= 3.3.0),
+ python3-os-testr (>= 1.0.0),
+ python3-oslo.concurrency (>= 3.26.0),
+ python3-oslo.config (>= 1:5.2.0),
+ python3-oslo.privsep (>= 1.32.0),
+ python3-oslosphinx (>= 2.5.0),
+ python3-oslotest (>= 1:3.2.0),
+ python3-pymysql (>= 0.7.6),
+ python3-pyroute2 (>= 0.5.7),
+ python3-requests-mock (>= 0.7.0),
+ python3-six (>= 1.10.0),
+ python3-sqlalchemy (>= 1.2.0),
+ python3-stestr (>= 1.0.0),
+ python3-subunit (>= 1.0.0),
+ python3-testrepository (>= 0.0.18),
+ python3-testresources (>= 2.0.0),
+ python3-testscenarios (>= 0.4),
+ python3-testtools (>= 2.2.0),
+ python3-vine,
+ python3-webtest (>= 2.0.27),
  python3-zmq,
- subunit,
- testrepository,
-Standards-Version: 4.1.3
-Vcs-Browser: https://salsa.debian.org/openstack-team/neutron-plugins/networking-mlnx
-Vcs-Git: https://salsa.debian.org/openstack-team/neutron-plugins/networking-mlnx.git
-Homepage: https://opendev.org/x/networking-mlnx.git
+Standards-Version: 4.6.1
+Vcs-Git: https://git.launchpad.net/~ubuntu-openstack-dev/ubuntu/+source/networking-mlnx
+Homepage: https://opendev.org/x/networking-mlnx
 
 Package: networking-mlnx-common
 Architecture: all
 Depends:
- neutron-common  (>= 1:15.0.0),
+ neutron-common  (>= 1:11.0.0),
  python3-networking-mlnx (= ${binary:Version}),
  ${misc:Depends},
  ${python3:Depends},
@@ -111,21 +115,23 @@ Package: python3-networking-mlnx
 Architecture: all
 Section: python
 Depends:
- python3-babel,
- python3-defusedxml,
+ python3-babel (>= 1.3),
+ python3-defusedxml (>= 0.5.0),
  python3-ethtool,
- python3-eventlet,
- python3-netaddr,
- python3-neutron (>= 2:16.0.0),
- python3-neutron-lib (>= 1.28.0),
- python3-neutronclient,
- python3-openstackclient,
- python3-oslo.concurrency,
- python3-oslo.config,
- python3-oslo.privsep,
+ python3-eventlet (>= 0.18.2),
+ python3-lxml,
+ python3-netaddr (>= 0.7.18),
+ python3-neutron (>= 1:17.0.0~),
+ python3-neutron-lib (>= 2.4.0),
+ python3-neutronclient (>= 1:6.7.0),
+ python3-openstackclient (>= 3.3.0),
+ python3-oslo.concurrency (>= 3.26.0),
+ python3-oslo.config (>= 1:5.2.0),
+ python3-oslo.privsep (>= 1.32.0),
  python3-pbr,
- python3-six,
- python3-sqlalchemy,
+ python3-pyroute2 (>= 0.5.7),
+ python3-six (>= 1.10.0),
+ python3-sqlalchemy (>= 1.2.0),
  python3-zmq,
  ${misc:Depends},
  ${python3:Depends},
diff -pruN 1:16.0.0-3.1/debian/copyright 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/copyright
--- 1:16.0.0-3.1/debian/copyright	2022-07-12 09:41:46.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/copyright	2023-01-10 15:13:45.000000000 +0000
@@ -1,6 +1,6 @@
 Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
 Upstream-Name: networking-mlnx
-Source: https://github.com/openstack/networking-mlnx
+Source: https://opendev.org/x/networking-mlnx
 
 Files: *
 Copyright: (c) 2010-2015, OpenStack Foundation <openstack-dev@lists.openstack.org>
diff -pruN 1:16.0.0-3.1/debian/gbp.conf 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/gbp.conf
--- 1:16.0.0-3.1/debian/gbp.conf	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/gbp.conf	2023-01-10 15:13:45.000000000 +0000
@@ -0,0 +1,6 @@
+[DEFAULT]
+debian-branch = master
+pristine-tar = True
+
+[buildpackage]
+export-dir = ../build-area/
diff -pruN 1:16.0.0-3.1/debian/patches/Fix_mellanox_ci.patch 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/patches/Fix_mellanox_ci.patch
--- 1:16.0.0-3.1/debian/patches/Fix_mellanox_ci.patch	2022-07-12 09:41:46.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/patches/Fix_mellanox_ci.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,72 +0,0 @@
-Description: Fix Mellanox CI, test-requirements.txt and unit tests failures
- Due to some upstream changes [1], Mellanox CI is failing.
- Moreover, the test requierments must be synced with the neutron
- [1]  [https://github.com/openstack/neutron/commit/b5e10bf7277aa2cb74eb356b9dbf7f172218ee79
-Author: waleedm <waleedm@nvidia.com>
-Date: Wed, 20 Apr 2022 12:01:06 +0000
-Change-Id: Ie54d2b5d535d815019e6546b0ee8478633f684f0
-Origin: upstream, https://review.opendev.org/c/x/networking-mlnx/+/838717
-Last-Update: 2022-05-31
-Bug-Debian: https://bugs.debian.org/1011673
-
-Index: networking-mlnx/networking_mlnx/eswitchd/eswitch_daemon.py
-===================================================================
---- networking-mlnx.orig/networking_mlnx/eswitchd/eswitch_daemon.py
-+++ networking-mlnx/networking_mlnx/eswitchd/eswitch_daemon.py
-@@ -16,6 +16,7 @@
- 
- import sys
- 
-+from neutron.common import config as common_config
- from oslo_config import cfg
- from oslo_log import log as logging
- from oslo_serialization import jsonutils
-@@ -104,6 +105,7 @@ class MlxEswitchDaemon(object):
- 
- 
- def main():
-+    common_config.register_common_config_options()
-     config.init(sys.argv[1:])
-     try:
-         daemon = MlxEswitchDaemon()
-Index: networking-mlnx/networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py
-===================================================================
---- networking-mlnx.orig/networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py
-+++ networking-mlnx/networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py
-@@ -455,6 +455,7 @@ class MlnxEswitchNeutronAgent(object):
- 
- 
- def main():
-+    common_config.register_common_config_options()
-     config.config.register_root_helper(cfg.CONF)
-     common_config.init(sys.argv[1:])
-     common_config.setup_logging()
-Index: networking-mlnx/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py
-===================================================================
---- networking-mlnx.orig/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py
-+++ networking-mlnx/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py
-@@ -38,16 +38,20 @@ class MlnxMechanismBaseTestCase(base.Age
- 
-     AGENTS = [{'alive': True,
-                'configurations': GOOD_CONFIGS,
--               'host': 'host'}]
-+               'host': 'host',
-+               'agent_type': AGENT_TYPE}]
-     AGENTS_DEAD = [{'alive': False,
-                     'configurations': GOOD_CONFIGS,
--                    'host': 'dead_host'}]
-+                    'host': 'dead_host',
-+                    'agent_type': AGENT_TYPE}]
-     AGENTS_BAD = [{'alive': False,
-                    'configurations': GOOD_CONFIGS,
--                   'host': 'bad_host_1'},
-+                   'host': 'bad_host_1',
-+                   'agent_type': AGENT_TYPE},
-                   {'alive': True,
-                    'configurations': BAD_CONFIGS,
--                   'host': 'bad_host_2'}]
-+                   'host': 'bad_host_2',
-+                   'agent_type': AGENT_TYPE}]
- 
-     def setUp(self):
-         super(MlnxMechanismBaseTestCase, self).setUp()
diff -pruN 1:16.0.0-3.1/debian/patches/series 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/patches/series
--- 1:16.0.0-3.1/debian/patches/series	2022-07-12 09:41:46.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/patches/series	1970-01-01 00:00:00.000000000 +0000
@@ -1 +0,0 @@
-Fix_mellanox_ci.patch
diff -pruN 1:16.0.0-3.1/debian/rules 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/rules
--- 1:16.0.0-3.1/debian/rules	2022-07-12 09:41:46.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/rules	2023-01-10 15:13:45.000000000 +0000
@@ -1,10 +1,12 @@
 #!/usr/bin/make -f
 
+export PYBUILD_NAME=networking_mlnx
+
 UPSTREAM_GIT := https://opendev.org/x/networking-mlnx.git
 include /usr/share/openstack-pkg-tools/pkgos.make
 
 %:
-	dh $@ --buildsystem=python_distutils --with python3
+	dh $@ --buildsystem=pybuild --with python3
 
 override_dh_clean:
 	dh_clean
diff -pruN 1:16.0.0-3.1/debian/watch 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/watch
--- 1:16.0.0-3.1/debian/watch	2022-07-12 13:10:34.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/debian/watch	2023-01-10 15:13:45.000000000 +0000
@@ -1,3 +1,3 @@
 version=3
-opts="uversionmangle=s/\.(b|rc)/~$1/" \
-https://opendev.org/x/networking-mlnx/tags .*\/(\d{1,3}\.[\d\.]+)\.tar\.gz
+opts=uversionmangle=s/(rc|a|b|c)/~$1/ \
+https://pypi.debian.net/networking-mlnx/networking-mlnx-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz)))
diff -pruN 1:16.0.0-3.1/devstack/README.rst 1:19.0.0+git2023011010.0a69b971-0ubuntu1/devstack/README.rst
--- 1:16.0.0-3.1/devstack/README.rst	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/devstack/README.rst	2023-01-10 15:13:35.000000000 +0000
@@ -35,9 +35,8 @@
 
     [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
     [sdn]
-    url = http://<sdn_provider_ip>/neo
+    url = http://<sdn_provider_ip>/ufmRestV3
     domain = cloudx
-    username = admin
-    password = admin
+    token = abcdef
 
 5) run ``stack.sh``
diff -pruN 1:16.0.0-3.1/etc/neutron/plugins/ml2/ml2_conf_sdn.ini 1:19.0.0+git2023011010.0a69b971-0ubuntu1/etc/neutron/plugins/ml2/ml2_conf_sdn.ini
--- 1:16.0.0-3.1/etc/neutron/plugins/ml2/ml2_conf_sdn.ini	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/etc/neutron/plugins/ml2/ml2_conf_sdn.ini	2023-01-10 15:13:35.000000000 +0000
@@ -3,7 +3,7 @@
 [sdn]
 # (StrOpt) mandatory param: SDN REST URL
 # If this is not set then no HTTP requests will be made.
-# Example: url = http://10.209.25.201/neo/
+# Example: url = http://10.209.25.201/ufmRestV3/
 # url =
 
 # (StrOpt) mandatory param: Cloud domain name in SDN provider
@@ -11,15 +11,10 @@
 # Example: domain = cloudx
 # domain =
 
-# (StrOpt) mandatory param: Username for HTTP basic authentication
+# (StrOpt) mandatory param: Token for HTTP basic authentication
 # to SDN Provider.
-# Example: username = admin
-# username =
-
-# (StrOpt) mandatory param: Password for HTTP basic authentication
-# to SDN Provider.
-# Example: password = admin
-# password =
+# Example: token = abcdef
+# token =
 
 # (IntOpt) Timeout in seconds to wait for SDN Provider HTTP request completion.
 # This is an optional parameter, default value is 10 seconds.
@@ -61,4 +56,4 @@
 # that it will send notification. * means all physical_networks
 #
 # physical_networks = *
-# Example: physical_networks = datacenter1, datacenter3
\ No newline at end of file
+# Example: physical_networks = datacenter1, datacenter3
diff -pruN 1:16.0.0-3.1/etc/policy.json 1:19.0.0+git2023011010.0a69b971-0ubuntu1/etc/policy.json
--- 1:16.0.0-3.1/etc/policy.json	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/etc/policy.json	1970-01-01 00:00:00.000000000 +0000
@@ -1,143 +0,0 @@
-{
-    "context_is_admin":  "role:admin",
-    "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
-    "context_is_advsvc":  "role:advsvc",
-    "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
-    "admin_only": "rule:context_is_admin",
-    "regular_user": "",
-    "shared": "field:networks:shared=True",
-    "shared_firewalls": "field:firewalls:shared=True",
-    "external": "field:networks:router:external=True",
-    "default": "rule:admin_or_owner",
-
-    "create_subnet": "rule:admin_or_network_owner",
-    "get_subnet": "rule:admin_or_owner or rule:shared",
-    "update_subnet": "rule:admin_or_network_owner",
-    "delete_subnet": "rule:admin_or_network_owner",
-
-    "create_network": "",
-    "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
-    "get_network:router:external": "rule:regular_user",
-    "get_network:segments": "rule:admin_only",
-    "get_network:provider:network_type": "rule:admin_only",
-    "get_network:provider:physical_network": "rule:admin_only",
-    "get_network:provider:segmentation_id": "rule:admin_only",
-    "get_network:queue_id": "rule:admin_only",
-    "create_network:shared": "rule:admin_only",
-    "create_network:router:external": "rule:admin_only",
-    "create_network:segments": "rule:admin_only",
-    "create_network:provider:network_type": "rule:admin_only",
-    "create_network:provider:physical_network": "rule:admin_only",
-    "create_network:provider:segmentation_id": "rule:admin_only",
-    "update_network": "rule:admin_or_owner",
-    "update_network:segments": "rule:admin_only",
-    "update_network:shared": "rule:admin_only",
-    "update_network:provider:network_type": "rule:admin_only",
-    "update_network:provider:physical_network": "rule:admin_only",
-    "update_network:provider:segmentation_id": "rule:admin_only",
-    "update_network:router:external": "rule:admin_only",
-    "delete_network": "rule:admin_or_owner",
-
-    "create_port": "",
-    "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "create_port:binding:host_id": "rule:admin_only",
-    "create_port:binding:profile": "rule:admin_only",
-    "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "get_port": "rule:admin_or_owner or rule:context_is_advsvc",
-    "get_port:queue_id": "rule:admin_only",
-    "get_port:binding:vif_type": "rule:admin_only",
-    "get_port:binding:vif_details": "rule:admin_only",
-    "get_port:binding:host_id": "rule:admin_only",
-    "get_port:binding:profile": "rule:admin_only",
-    "update_port": "rule:admin_or_owner or rule:context_is_advsvc",
-    "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "update_port:binding:host_id": "rule:admin_only",
-    "update_port:binding:profile": "rule:admin_only",
-    "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-    "delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
-
-    "get_router:ha": "rule:admin_only",
-    "create_router": "rule:regular_user",
-    "create_router:external_gateway_info:enable_snat": "rule:admin_only",
-    "create_router:distributed": "rule:admin_only",
-    "create_router:ha": "rule:admin_only",
-    "get_router": "rule:admin_or_owner",
-    "get_router:distributed": "rule:admin_only",
-    "update_router:external_gateway_info:enable_snat": "rule:admin_only",
-    "update_router:distributed": "rule:admin_only",
-    "update_router:ha": "rule:admin_only",
-    "delete_router": "rule:admin_or_owner",
-
-    "add_router_interface": "rule:admin_or_owner",
-    "remove_router_interface": "rule:admin_or_owner",
-
-    "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
-    "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
-
-    "create_firewall": "",
-    "get_firewall": "rule:admin_or_owner",
-    "create_firewall:shared": "rule:admin_only",
-    "get_firewall:shared": "rule:admin_only",
-    "update_firewall": "rule:admin_or_owner",
-    "update_firewall:shared": "rule:admin_only",
-    "delete_firewall": "rule:admin_or_owner",
-
-    "create_firewall_policy": "",
-    "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
-    "create_firewall_policy:shared": "rule:admin_or_owner",
-    "update_firewall_policy": "rule:admin_or_owner",
-    "delete_firewall_policy": "rule:admin_or_owner",
-
-    "create_firewall_rule": "",
-    "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
-    "update_firewall_rule": "rule:admin_or_owner",
-    "delete_firewall_rule": "rule:admin_or_owner",
-
-    "create_qos_queue": "rule:admin_only",
-    "get_qos_queue": "rule:admin_only",
-
-    "update_agent": "rule:admin_only",
-    "delete_agent": "rule:admin_only",
-    "get_agent": "rule:admin_only",
-
-    "create_dhcp-network": "rule:admin_only",
-    "delete_dhcp-network": "rule:admin_only",
-    "get_dhcp-networks": "rule:admin_only",
-    "create_l3-router": "rule:admin_only",
-    "delete_l3-router": "rule:admin_only",
-    "get_l3-routers": "rule:admin_only",
-    "get_dhcp-agents": "rule:admin_only",
-    "get_l3-agents": "rule:admin_only",
-    "get_loadbalancer-agent": "rule:admin_only",
-    "get_loadbalancer-pools": "rule:admin_only",
-
-    "create_floatingip": "rule:regular_user",
-    "create_floatingip:floating_ip_address": "rule:admin_only",
-    "update_floatingip": "rule:admin_or_owner",
-    "delete_floatingip": "rule:admin_or_owner",
-    "get_floatingip": "rule:admin_or_owner",
-
-    "create_network_profile": "rule:admin_only",
-    "update_network_profile": "rule:admin_only",
-    "delete_network_profile": "rule:admin_only",
-    "get_network_profiles": "",
-    "get_network_profile": "",
-    "update_policy_profiles": "rule:admin_only",
-    "get_policy_profiles": "",
-    "get_policy_profile": "",
-
-    "create_metering_label": "rule:admin_only",
-    "delete_metering_label": "rule:admin_only",
-    "get_metering_label": "rule:admin_only",
-
-    "create_metering_label_rule": "rule:admin_only",
-    "delete_metering_label_rule": "rule:admin_only",
-    "get_metering_label_rule": "rule:admin_only",
-
-    "get_service_provider": "rule:regular_user",
-    "get_lsn": "rule:admin_only",
-    "create_lsn": "rule:admin_only"
-}
diff -pruN 1:16.0.0-3.1/.gitignore 1:19.0.0+git2023011010.0a69b971-0ubuntu1/.gitignore
--- 1:16.0.0-3.1/.gitignore	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/.gitignore	1970-01-01 00:00:00.000000000 +0000
@@ -1,27 +0,0 @@
-AUTHORS
-build/*
-build-stamp
-ChangeLog
-cover/
-covhtml/
-dist/
-doc/build
-*.DS_Store
-*.pyc
-networking_mlnx.egg-info/
-networking_mlnx/vcsversion.py
-networking_mlnx/versioninfo
-pbr*.egg/
-setuptools*.egg/
-*.log
-*.mo
-*.sw?
-*~
-/.*
-!/.coveragerc
-!/.gitignore
-!/.gitreview
-!/.mailmap
-!/.pylintrc
-!/.testr.conf
-!/.zuul.yaml
diff -pruN 1:16.0.0-3.1/.gitreview 1:19.0.0+git2023011010.0a69b971-0ubuntu1/.gitreview
--- 1:16.0.0-3.1/.gitreview	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/.gitreview	1970-01-01 00:00:00.000000000 +0000
@@ -1,4 +0,0 @@
-[gerrit]
-host=review.opendev.org
-port=29418
-project=x/networking-mlnx.git
diff -pruN 1:16.0.0-3.1/networking_mlnx/cmd/eventlet/__init__.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/cmd/eventlet/__init__.py
--- 1:16.0.0-3.1/networking_mlnx/cmd/eventlet/__init__.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/cmd/eventlet/__init__.py	2023-01-10 15:13:35.000000000 +0000
@@ -13,3 +13,9 @@
 import eventlet
 
 eventlet.monkey_patch()
+# Monkey patch the original current_thread to use the up-to-date _active
+# global variable. See https://bugs.launchpad.net/bugs/1863021 and
+# https://github.com/eventlet/eventlet/issues/592
+import __original_module_threading as orig_threading  # pylint: disable=C0413  # noqa
+import threading  # pylint: disable=C0413  # noqa
+orig_threading.current_thread.__globals__['_active'] = threading._active
diff -pruN 1:16.0.0-3.1/networking_mlnx/db/db.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/db/db.py
--- 1:16.0.0-3.1/networking_mlnx/db/db.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/db/db.py	2023-01-10 15:13:35.000000000 +0000
@@ -26,8 +26,9 @@ from networking_mlnx.db.models import sd
 from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const
 
 
-def check_for_pending_or_processing_ops(session, object_uuid, operation=None):
-    q = session.query(sdn_journal_db.SdnJournal).filter(
+@db_api.CONTEXT_READER
+def check_for_pending_or_processing_ops(context, object_uuid, operation=None):
+    q = context.session.query(sdn_journal_db.SdnJournal).filter(
         or_(sdn_journal_db.SdnJournal.state == sdn_const.PENDING,
             sdn_journal_db.SdnJournal.state == sdn_const.PROCESSING),
         sdn_journal_db.SdnJournal.object_uuid == object_uuid)
@@ -36,11 +37,12 @@ def check_for_pending_or_processing_ops(
             q = q.filter(sdn_journal_db.SdnJournal.operation.in_(operation))
         else:
             q = q.filter(sdn_journal_db.SdnJournal.operation == operation)
-    return session.query(q.exists()).scalar()
+    return context.session.query(q.exists()).scalar()
 
 
-def check_for_pending_delete_ops_with_parent(session, object_type, parent_id):
-    rows = session.query(sdn_journal_db.SdnJournal).filter(
+@db_api.CONTEXT_READER
+def check_for_pending_delete_ops_with_parent(context, object_type, parent_id):
+    rows = context.session.query(sdn_journal_db.SdnJournal).filter(
         or_(sdn_journal_db.SdnJournal.state == sdn_const.PENDING,
             sdn_journal_db.SdnJournal.state == sdn_const.PROCESSING),
         sdn_journal_db.SdnJournal.object_type == object_type,
@@ -54,87 +56,101 @@ def check_for_pending_delete_ops_with_pa
     return False
 
 
-def check_for_older_ops(session, row):
-    q = session.query(sdn_journal_db.SdnJournal).filter(
+@db_api.CONTEXT_READER
+def check_for_older_ops(context, row):
+    q = context.session.query(sdn_journal_db.SdnJournal).filter(
         or_(sdn_journal_db.SdnJournal.state == sdn_const.PENDING,
             sdn_journal_db.SdnJournal.state == sdn_const.PROCESSING),
         sdn_journal_db.SdnJournal.object_uuid == row.object_uuid,
         sdn_journal_db.SdnJournal.created_at < row.created_at,
         sdn_journal_db.SdnJournal.id != row.id)
-    return session.query(q.exists()).scalar()
+    return context.session.query(q.exists()).scalar()
 
 
-def get_all_db_rows(session):
-    return session.query(sdn_journal_db.SdnJournal).all()
+@db_api.CONTEXT_READER
+def get_all_db_rows(context):
+    return context.session.query(sdn_journal_db.SdnJournal).all()
 
 
-def get_all_db_rows_by_state(session, state):
-    return session.query(sdn_journal_db.SdnJournal).filter_by(
+@db_api.CONTEXT_READER
+def get_all_db_rows_by_state(context, state):
+    return context.session.query(sdn_journal_db.SdnJournal).filter_by(
         state=state).all()
 
 
+def _get_row_with_lock(session):
+    row = session.query(sdn_journal_db.SdnJournal).filter_by(
+        state=sdn_const.PENDING).order_by(
+        asc(sdn_journal_db.SdnJournal.last_retried)).with_for_update(
+    ).first()
+    return row
+
+
 # Retry deadlock exception for Galera DB.
 # If two (or more) different threads call this method at the same time, they
 # might both succeed in changing the same row to pending, but at least one
 # of them will get a deadlock from Galera and will have to retry the operation.
 @db_api.retry_db_errors
-def get_oldest_pending_db_row_with_lock(session):
-    with session.begin():
-        row = session.query(sdn_journal_db.SdnJournal).filter_by(
-            state=sdn_const.PENDING).order_by(
-            asc(sdn_journal_db.SdnJournal.last_retried)).with_for_update(
-        ).first()
-        if row:
-            update_db_row_state(session, row, sdn_const.PROCESSING)
-
+@db_api.CONTEXT_WRITER
+def get_oldest_pending_db_row_with_lock(context):
+    row = _get_row_with_lock(context.session)
+    if row:
+        _update_db_row_state(context.session, row, sdn_const.PROCESSING)
     return row
 
 
 @db_api.retry_db_errors
-def get_all_monitoring_db_row_by_oldest(session):
-    with session.begin():
-        rows = session.query(sdn_journal_db.SdnJournal).filter_by(
-            state=sdn_const.MONITORING).order_by(
-            asc(sdn_journal_db.SdnJournal.last_retried)).all()
+@db_api.CONTEXT_READER
+def get_all_monitoring_db_row_by_oldest(context):
+    rows = context.session.query(sdn_journal_db.SdnJournal).filter_by(
+        state=sdn_const.MONITORING).order_by(
+        asc(sdn_journal_db.SdnJournal.last_retried)).all()
     return rows
 
 
 @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
-def update_db_row_state(session, row, state):
+@db_api.CONTEXT_WRITER
+def update_db_row_state(context, row, state):
+    _update_db_row_state(context.session, row, state)
+
+
+def _update_db_row_state(session, row, state):
     row.state = state
     session.merge(row)
-    session.flush()
 
 
 @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
-def update_db_row_job_id(session, row, job_id):
+@db_api.CONTEXT_WRITER
+def update_db_row_job_id(context, row, job_id):
     row.job_id = job_id
-    session.merge(row)
-    session.flush()
+    context.session.merge(row)
 
 
-def update_pending_db_row_retry(session, row, retry_count):
+@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
+@db_api.CONTEXT_WRITER
+def update_pending_db_row_retry(context, row, retry_count):
     if row.retry_count >= retry_count and retry_count != -1:
-        update_db_row_state(session, row, sdn_const.FAILED)
+        _update_db_row_state(context.session, row, sdn_const.FAILED)
     else:
         row.retry_count += 1
-        update_db_row_state(session, row, sdn_const.PENDING)
+        _update_db_row_state(context.session, row, sdn_const.PENDING)
 
 
 # This function is currently not used.
 # Deleted resources are marked as 'deleted' in the database.
 @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
-def delete_row(session, row=None, row_id=None):
+@db_api.CONTEXT_WRITER
+def delete_row(context, row=None, row_id=None):
     if row_id:
-        row = session.query(sdn_journal_db.SdnJournal).filter_by(
+        row = context.session.query(sdn_journal_db.SdnJournal).filter_by(
             id=row_id).one()
     if row:
-        session.delete(row)
-        session.flush()
+        context.session.delete(row)
 
 
 @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES)
-def create_pending_row(session, object_type, object_uuid,
+@db_api.CONTEXT_WRITER
+def create_pending_row(context, object_type, object_uuid,
                        operation, data):
     data = jsonutils.dumps(data)
     row = sdn_journal_db.SdnJournal(object_type=object_type,
@@ -142,35 +158,35 @@ def create_pending_row(session, object_t
                                     operation=operation, data=data,
                                     created_at=func.now(),
                                     state=sdn_const.PENDING)
-    session.add(row)
-    # Keep session flush for unit tests. NOOP for L2/L3 events since calls are
-    # made inside database session transaction with subtransactions=True.
-    session.flush()
+    context.session.add(row)
 
 
-@db_api.retry_db_errors
 def _update_maintenance_state(session, expected_state, state):
-    with session.begin():
-        row = session.query(sdn_maintenance_db.SdnMaintenance).filter_by(
-            state=expected_state).with_for_update().one_or_none()
-        if row is None:
-            return False
+    row = session.query(sdn_maintenance_db.SdnMaintenance).filter_by(
+        state=expected_state).with_for_update().one_or_none()
+    if row is None:
+        return False
 
-        row.state = state
-        return True
+    row.state = state
+    return True
 
 
-def lock_maintenance(session):
-    return _update_maintenance_state(session, sdn_const.PENDING,
+@db_api.retry_db_errors
+@db_api.CONTEXT_WRITER
+def lock_maintenance(context):
+    return _update_maintenance_state(context.session, sdn_const.PENDING,
                                      sdn_const.PROCESSING)
 
 
-def unlock_maintenance(session):
-    return _update_maintenance_state(session, sdn_const.PROCESSING,
+@db_api.retry_db_errors
+@db_api.CONTEXT_WRITER
+def unlock_maintenance(context):
+    return _update_maintenance_state(context.session, sdn_const.PROCESSING,
                                      sdn_const.PENDING)
 
 
-def update_maintenance_operation(session, operation=None):
+@db_api.CONTEXT_WRITER
+def update_maintenance_operation(context, operation=None):
     """Update the current maintenance operation details.
 
     The function assumes the lock is held, so it mustn't be run outside of a
@@ -180,28 +196,28 @@ def update_maintenance_operation(session
     if operation:
         op_text = operation.__name__
 
-    with session.begin():
-        row = session.query(sdn_maintenance_db.SdnMaintenance).one_or_none()
-        row.processing_operation = op_text
-
-
-def delete_rows_by_state_and_time(session, state, time_delta):
-    with session.begin():
-        now = session.execute(func.now()).scalar()
-        session.query(sdn_journal_db.SdnJournal).filter(
-            sdn_journal_db.SdnJournal.state == state,
-            sdn_journal_db.SdnJournal.last_retried < now - time_delta).delete(
-            synchronize_session=False)
-        session.expire_all()
-
-
-def reset_processing_rows(session, max_timedelta):
-    with session.begin():
-        now = session.execute(func.now()).scalar()
-        max_timedelta = datetime.timedelta(seconds=max_timedelta)
-        rows = session.query(sdn_journal_db.SdnJournal).filter(
-            sdn_journal_db.SdnJournal.last_retried < now - max_timedelta,
-            sdn_journal_db.SdnJournal.state == sdn_const.PROCESSING,
-        ).update({'state': sdn_const.PENDING})
+    row = context.session.query(
+        sdn_maintenance_db.SdnMaintenance).one_or_none()
+    row.processing_operation = op_text
+
+
+@db_api.CONTEXT_WRITER
+def delete_rows_by_state_and_time(context, state, time_delta):
+    now = context.session.execute(func.now()).scalar()
+    context.session.query(sdn_journal_db.SdnJournal).filter(
+        sdn_journal_db.SdnJournal.state == state,
+        sdn_journal_db.SdnJournal.last_retried < now - time_delta).delete(
+        synchronize_session=False)
+    context.session.expire_all()
+
+
+@db_api.CONTEXT_WRITER
+def reset_processing_rows(context, max_timedelta):
+    now = context.session.execute(func.now()).scalar()
+    max_timedelta = datetime.timedelta(seconds=max_timedelta)
+    rows = context.session.query(sdn_journal_db.SdnJournal).filter(
+        sdn_journal_db.SdnJournal.last_retried < now - max_timedelta,
+        sdn_journal_db.SdnJournal.state == sdn_const.PROCESSING,
+    ).update({'state': sdn_const.PENDING})
 
     return rows
diff -pruN 1:16.0.0-3.1/networking_mlnx/eswitchd/cli/ebrctl.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/eswitchd/cli/ebrctl.py
--- 1:16.0.0-3.1/networking_mlnx/eswitchd/cli/ebrctl.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/eswitchd/cli/ebrctl.py	2023-01-10 15:13:35.000000000 +0000
@@ -62,8 +62,7 @@ def add_port(args):
                               args.vnic_type, args.dev_name)
 
     except exceptions.MlxException as e:
-        sys.stderr.write("Error in add-port command")
-        sys.stderr.write(e.message)
+        sys.stderr.write("Error in add-port command " + e.message)
         sys.exit(1)
     sys.stdout.write(dev)
     sys.exit(0)
@@ -73,8 +72,7 @@ def del_port(args):
     try:
         client.deallocate_nic(args.vnic_mac, args.fabric)
     except exceptions.MlxException as e:
-        sys.stderr.write("Error in del-port command")
-        sys.stderr.write(e.message)
+        sys.stderr.write("Error in del-port command " + e.message)
         sys.exit(1)
     sys.exit(0)
 
diff -pruN 1:16.0.0-3.1/networking_mlnx/eswitchd/eswitch_daemon.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/eswitchd/eswitch_daemon.py
--- 1:16.0.0-3.1/networking_mlnx/eswitchd/eswitch_daemon.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/eswitchd/eswitch_daemon.py	2023-01-10 15:13:35.000000000 +0000
@@ -16,6 +16,7 @@
 
 import sys
 
+from neutron.common import config as common_config
 from oslo_config import cfg
 from oslo_log import log as logging
 from oslo_serialization import jsonutils
@@ -87,7 +88,7 @@ class MlxEswitchDaemon(object):
                 msg = jsonutils.dumps(result)
             except Exception as e:
                 LOG.exception("Exception during message handling - %s", e)
-                msg = str(e)
+                msg = jsonutils.dumps(str(e))
             sender.send_string(msg)
 
     def daemon_loop(self):
@@ -104,6 +105,7 @@ class MlxEswitchDaemon(object):
 
 
 def main():
+    common_config.register_common_config_options()
     config.init(sys.argv[1:])
     try:
         daemon = MlxEswitchDaemon()
diff -pruN 1:16.0.0-3.1/networking_mlnx/eswitchd/msg_handler.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/eswitchd/msg_handler.py
--- 1:16.0.0-3.1/networking_mlnx/eswitchd/msg_handler.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/eswitchd/msg_handler.py	2023-01-10 15:13:35.000000000 +0000
@@ -27,7 +27,7 @@ class BasicMessageHandler(object):
         self.msg = msg
 
     def execute(self):
-        raise Exception(_("execute method MUST be implemented!"))
+        raise Exception("execute method MUST be implemented!")
 
     def validate(self):
         ret = True
@@ -40,9 +40,7 @@ class BasicMessageHandler(object):
         return ret
 
     def validate_vnic_type(self, vnic_type):
-        if vnic_type in (constants.VIF_TYPE_HOSTDEV, ):
-            return True
-        return False
+        return vnic_type == constants.VIF_TYPE_HOSTDEV
 
     def build_response(self, status, reason=None, response=None):
         if status:
@@ -65,11 +63,14 @@ class PlugVnic(BasicMessageHandler):
         vnic_mac = (self.msg['vnic_mac']).lower()
         dev_name = self.msg['dev_name']
 
-        dev = eswitch_handler.plug_nic(fabric, device_id, vnic_mac, dev_name)
-        if dev:
-            return self.build_response(True, response={'dev': dev})
-        else:
-            return self.build_response(False, reason='Plug vnic failed')
+        try:
+            dev = eswitch_handler.plug_nic(
+                fabric, device_id, vnic_mac, dev_name)
+            if dev:
+                return self.build_response(True, response={'dev': dev})
+        except Exception as e:
+            LOG.error("Plug vnic failed - %s", str(e))
+        return self.build_response(False, reason='Plug vnic failed')
 
 
 class DetachVnic(BasicMessageHandler):
@@ -81,11 +82,13 @@ class DetachVnic(BasicMessageHandler):
     def execute(self, eswitch_handler):
         fabric = self.msg['fabric']
         vnic_mac = (self.msg['vnic_mac']).lower()
-        dev = eswitch_handler.delete_port(fabric, vnic_mac)
-        if dev:
-            return self.build_response(True, response={'dev': dev})
-        else:
-            return self.build_response(True, response={})
+        try:
+            dev = eswitch_handler.delete_port(fabric, vnic_mac)
+            if dev:
+                return self.build_response(True, response={'dev': dev})
+        except Exception as e:
+            LOG.warning("Detach vnic failed - %s", str(e))
+        return self.build_response(True, response={})
 
 
 class SetVLAN(BasicMessageHandler):
@@ -98,13 +101,14 @@ class SetVLAN(BasicMessageHandler):
         fabric = self.msg['fabric']
         pci_slot = self.msg['pci_slot']
         vlan = int(self.msg['vlan'])
-        ret = eswitch_handler.set_vlan(fabric, pci_slot, vlan)
-        reason = None
-        if not ret:
-            reason = 'Set VLAN Failed'
-        if reason:
-            return self.build_response(False, reason=reason)
-        return self.build_response(True, response={})
+        try:
+            ret = eswitch_handler.set_vlan(fabric, pci_slot, vlan)
+            if ret:
+                return self.build_response(True, response={})
+        except Exception as e:
+            LOG.error("Set Vlan failed - %s", str(e))
+        reason = 'Set VLAN Failed'
+        return self.build_response(False, reason=reason)
 
 
 class GetVnics(BasicMessageHandler):
@@ -119,7 +123,10 @@ class GetVnics(BasicMessageHandler):
             fabrics = None
         else:
             fabrics = [fabric]
-        vnics = eswitch_handler.get_vnics(fabrics)
+        try:
+            vnics = eswitch_handler.get_vnics(fabrics)
+        except Exception as e:
+            LOG.warning("GetVnics failed - %s", str(e))
         return self.build_response(True, response=vnics)
 
 
@@ -170,7 +177,10 @@ class PortUp(BasicMessageHandler):
     def execute(self, eswitch_handler):
         fabric = self.msg['fabric']
         pci_slot = self.msg['pci_slot']
-        eswitch_handler.port_up(fabric, pci_slot)
+        try:
+            eswitch_handler.port_up(fabric, pci_slot)
+        except Exception as e:
+            LOG.warning("Port up - %s", str(e))
         return self.build_response(True, response={})
 
 
@@ -183,7 +193,10 @@ class PortDown(BasicMessageHandler):
     def execute(self, eswitch_handler):
         fabric = self.msg['fabric']
         pci_slot = self.msg['pci_slot']
-        eswitch_handler.port_down(fabric, pci_slot)
+        try:
+            eswitch_handler.port_down(fabric, pci_slot)
+        except Exception as e:
+            LOG.warning("Port down failed - %s", str(e))
         return self.build_response(True, response={})
 
 
diff -pruN 1:16.0.0-3.1/networking_mlnx/journal/dependency_validations.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/journal/dependency_validations.py
--- 1:16.0.0-3.1/networking_mlnx/journal/dependency_validations.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/journal/dependency_validations.py	2023-01-10 15:13:35.000000000 +0000
@@ -19,14 +19,14 @@ from networking_mlnx.db import db
 from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const
 
 
-def _is_valid_operation(session, row):
+def _is_valid_operation(context, row):
     # Check if there are older updates in the queue
-    if db.check_for_older_ops(session, row):
+    if db.check_for_older_ops(context, row):
         return False
     return True
 
 
-def validate_network_operation(session, row):
+def validate_network_operation(context, row):
     """Validate the network operation based on dependencies.
 
     Validate network operation depending on whether it's dependencies
@@ -36,19 +36,19 @@ def validate_network_operation(session,
         # Check for any pending or processing create or update
         # ops on this uuid itself
         if db.check_for_pending_or_processing_ops(
-            session, row.object_uuid, [sdn_const.PUT,
+            context, row.object_uuid, [sdn_const.PUT,
                                        sdn_const.POST]):
             return False
         if db.check_for_pending_delete_ops_with_parent(
-            session, sdn_const.PORT, row.object_uuid):
+            context, sdn_const.PORT, row.object_uuid):
             return False
     elif (row.operation == sdn_const.PUT and
-            not _is_valid_operation(session, row)):
+            not _is_valid_operation(context, row)):
         return False
     return True
 
 
-def validate_port_operation(session, row):
+def validate_port_operation(context, row):
     """Validate port operation based on dependencies.
 
     Validate port operation depending on whether it's dependencies
@@ -59,10 +59,10 @@ def validate_port_operation(session, row
         network_id = network_dict['network_id']
         # Check for pending or processing network operations
         ops = db.check_for_pending_or_processing_ops(
-            session, network_id, [sdn_const.POST])
+            context, network_id, [sdn_const.POST])
         if ops:
             return False
-    return _is_valid_operation(session, row)
+    return _is_valid_operation(context, row)
 
 
 _VALIDATION_MAP = {
@@ -71,13 +71,13 @@ _VALIDATION_MAP = {
 }
 
 
-def validate(session, row):
+def validate(context, row):
     """Validate resource dependency in journaled operations.
 
-    :param session: db session
+    :param context: context manager
     :param row: entry in journal entry to be validated
     """
-    return _VALIDATION_MAP[row.object_type](session, row)
+    return _VALIDATION_MAP[row.object_type](context, row)
 
 
 def register_validator(object_type, validator):
diff -pruN 1:16.0.0-3.1/networking_mlnx/journal/journal.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/journal/journal.py
--- 1:16.0.0-3.1/networking_mlnx/journal/journal.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/journal/journal.py	2023-01-10 15:13:35.000000000 +0000
@@ -41,9 +41,8 @@ def call_thread_on_end(func):
     return new_func
 
 
-def record(db_session, object_type, object_uuid, operation, data,
-           context=None):
-    db.create_pending_row(db_session, object_type, object_uuid, operation,
+def record(context, object_type, object_uuid, operation, data):
+    db.create_pending_row(context, object_type, object_uuid, operation,
                           data)
 
 
@@ -87,8 +86,8 @@ class SdnJournalThread(object):
                 self.event.clear()
 
                 context = nl_context.get_admin_context()
-                self._sync_pending_rows(context.session, exit_after_run)
-                self._sync_progress_rows(context.session)
+                self._sync_pending_rows(context, exit_after_run)
+                self._sync_progress_rows(context)
 
                 LOG.debug("Clearing sync thread event")
                 if exit_after_run:
@@ -99,16 +98,16 @@ class SdnJournalThread(object):
                 # Catch exceptions to protect the thread while running
                 LOG.exception("Error on run_sync_thread")
 
-    def _sync_pending_rows(self, session, exit_after_run):
+    def _sync_pending_rows(self, context, exit_after_run):
         while True:
             LOG.debug("sync_pending_rows operation walking database")
-            row = db.get_oldest_pending_db_row_with_lock(session)
+            row = db.get_oldest_pending_db_row_with_lock(context)
             if not row:
                 LOG.debug("No rows to sync")
                 break
 
             # Validate the operation
-            valid = dependency_validations.validate(session, row)
+            valid = dependency_validations.validate(context, row)
             if not valid:
                 LOG.info("%(operation)s %(type)s %(uuid)s is not a "
                          "valid operation yet, skipping for now",
@@ -116,7 +115,7 @@ class SdnJournalThread(object):
                           'type': row.object_type,
                           'uuid': row.object_uuid})
                 # Set row back to pending.
-                db.update_db_row_state(session, row, sdn_const.PENDING)
+                db.update_db_row_state(context, row, sdn_const.PENDING)
 
                 if exit_after_run:
                     break
@@ -126,7 +125,7 @@ class SdnJournalThread(object):
                      {'operation': row.operation, 'type': row.object_type,
                       'uuid': row.object_uuid})
 
-            # Add code to sync this to NEO
+            # Add code to sync this to SDN controller
             urlpath = sdn_utils.strings_to_url(row.object_type)
             if row.operation != sdn_const.POST:
                 urlpath = sdn_utils.strings_to_url(urlpath, row.object_uuid)
@@ -137,10 +136,10 @@ class SdnJournalThread(object):
                     client_operation_method(
                         urlpath, jsonutils.loads(row.data)))
                 if response.status_code == requests.codes.not_implemented:
-                    db.update_db_row_state(session, row, sdn_const.COMPLETED)
+                    db.update_db_row_state(context, row, sdn_const.COMPLETED)
                 elif (response.status_code == requests.codes.not_found and
                       row.operation == sdn_const.DELETE):
-                    db.update_db_row_state(session, row, sdn_const.COMPLETED)
+                    db.update_db_row_state(context, row, sdn_const.COMPLETED)
                 else:
                     # update in progress and job_id
                     job_id = None
@@ -164,28 +163,28 @@ class SdnJournalThread(object):
 
                     if job_id:
                         db.update_db_row_job_id(
-                            session, row, job_id=job_id)
+                            context, row, job_id=job_id)
                         db.update_db_row_state(
-                            session, row, sdn_const.MONITORING)
+                            context, row, sdn_const.MONITORING)
                     else:
                         LOG.warning("object %s has NULL job_id",
                                     row.object_uuid)
             except (sdn_exc.SDNConnectionError, sdn_exc.SDNLoginError):
                 # Log an error and raise the retry count. If the retry count
                 # exceeds the limit, move it to the failed state.
-                LOG.error("Cannot connect to the NEO Controller")
-                db.update_pending_db_row_retry(session, row,
+                LOG.error("Cannot connect to the SDN Controller")
+                db.update_pending_db_row_retry(context, row,
                                                self._row_retry_count)
                 # Break out of the loop and retry with the next
                 # timer interval
                 break
 
-    def _sync_progress_rows(self, session):
+    def _sync_progress_rows(self, context):
         # 1. get all progressed job
-        # 2. get status for NEO
+        # 2. get status for SDN Controller
         # 3. Update status if completed/failed
         LOG.debug("sync_progress_rows operation walking database")
-        rows = db.get_all_monitoring_db_row_by_oldest(session)
+        rows = db.get_all_monitoring_db_row_by_oldest(context)
         if not rows:
             LOG.debug("No rows to sync")
             return
@@ -201,34 +200,33 @@ class SdnJournalThread(object):
                         job_status = response.json().get('Status')
                         if job_status == 'Completed':
                             db.update_db_row_state(
-                                session, row, sdn_const.COMPLETED)
+                                context, row, sdn_const.COMPLETED)
                             continue
-                        elif job_status in ("Pending", "Running"):
-                            LOG.debug("NEO Job id %(job_id)s is %(status)s "
-                                      "continue monitoring",
+                        if job_status in ("Pending", "Running"):
+                            LOG.debug("SDN Controller Job id %(job_id)s is "
+                                      "%(status)s continue monitoring",
                                       {'job_id': row.job_id,
                                        'status': job_status})
                             continue
-                        else:
-                            LOG.error("NEO Job id %(job_id)s, failed with"
-                                      " %(status)s",
-                                      {'job_id': row.job_id,
-                                       'status': job_status})
-                            db.update_db_row_state(
-                                session, row, sdn_const.PENDING)
+                        LOG.error("SDN Controller Job id %(job_id)s, "
+                                  "failed with %(status)s",
+                                  {'job_id': row.job_id,
+                                  'status': job_status})
+                        db.update_db_row_state(
+                            context, row, sdn_const.PENDING)
                     except (ValueError, AttributeError):
                         LOG.error("failed to extract response for job"
                                   "id %s", row.job_id)
                 else:
-                    LOG.error("NEO Job id %(job_id)s, failed with "
+                    LOG.error("SDN Controller Job id %(job_id)s, failed with "
                               "%(status)s",
                               {'job_id': row.job_id, 'status': job_status})
-                    db.update_db_row_state(session, row, sdn_const.PENDING)
+                    db.update_db_row_state(context, row, sdn_const.PENDING)
 
             except (sdn_exc.SDNConnectionError, sdn_exc.SDNLoginError):
                 # Don't raise the retry count, just log an error
-                LOG.error("Cannot connect to the NEO Controller")
-                db.update_db_row_state(session, row, sdn_const.PENDING)
+                LOG.error("Cannot connect to the SDN Controller")
+                db.update_db_row_state(context, row, sdn_const.PENDING)
                 # Break out of the loop and retry with the next
                 # timer interval
                 break
diff -pruN 1:16.0.0-3.1/networking_mlnx/journal/maintenance.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/journal/maintenance.py
--- 1:16.0.0-3.1/networking_mlnx/journal/maintenance.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/journal/maintenance.py	2023-01-10 15:13:35.000000000 +0000
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-
+from neutron_lib import context
 from neutron_lib.db import api as neutron_db_api
 from oslo_config import cfg
 from oslo_log import log as logging
@@ -34,15 +34,16 @@ class MaintenanceThread(object):
     def start(self):
         self.timer.start(self.maintenance_interval, stop_on_exception=False)
 
-    def _execute_op(self, operation, session):
+    @neutron_db_api.CONTEXT_READER
+    def _execute_op(self, operation, context):
         op_details = operation.__name__
         if operation.__doc__:
             op_details += " (%s)" % operation.func_doc
 
         try:
             LOG.info("Starting maintenance operation %s.", op_details)
-            db.update_maintenance_operation(session, operation=operation)
-            operation(session=session)
+            db.update_maintenance_operation(context, operation=operation)
+            operation(session=context.session)
             LOG.info("Finished maintenance operation %s.", op_details)
         except Exception:
             LOG.exception("Failed during maintenance operation %s.",
@@ -50,17 +51,17 @@ class MaintenanceThread(object):
 
     def execute_ops(self):
         LOG.info("Starting journal maintenance run.")
-        session = neutron_db_api.get_reader_session()
-        if not db.lock_maintenance(session):
+        db_context = context.get_admin_context()
+        if not db.lock_maintenance(db_context):
             LOG.info("Maintenance already running, aborting.")
             return
 
         try:
             for operation in self.maintenance_ops:
-                self._execute_op(operation, session)
+                self._execute_op(operation, db_context)
         finally:
-            db.update_maintenance_operation(session, operation=None)
-            db.unlock_maintenance(session)
+            db.update_maintenance_operation(db_context, operation=None)
+            db.unlock_maintenance(db_context)
             LOG.info("Finished journal maintenance run.")
 
     def register_operation(self, f):
diff -pruN 1:16.0.0-3.1/networking_mlnx/linux/interface_drivers/interface.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/linux/interface_drivers/interface.py
--- 1:16.0.0-3.1/networking_mlnx/linux/interface_drivers/interface.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/linux/interface_drivers/interface.py	2023-01-10 15:13:35.000000000 +0000
@@ -48,8 +48,7 @@ class IPoIBInterfaceDriver(n_interface.L
             LOG.error("IPoIB root device %s does not exist.", self.root_dev)
 
     def plug_new(self, network_id, port_id, device_name, mac_address,
-                 bridge=None, namespace=None, prefix=None, mtu=None,
-                 link_up=True):
+                 bridge=None, namespace=None, prefix=None, mtu=None):
         """Plugin the interface."""
         ip = ip_lib.IPoIBWrapper(namespace=namespace)
         try:
@@ -57,8 +56,7 @@ class IPoIBInterfaceDriver(n_interface.L
                                     fields=[constants.SEGMENTATION_ID])[0]
             segmentation_id = net.get(constants.SEGMENTATION_ID)
             dev = ip.add_ipoib(device_name, self.root_dev, segmentation_id)
-            if link_up:
-                dev.link.set_up()
+            dev.link.set_up()
         except RuntimeError as e:
             LOG.error("Failed plugging interface '%s' - %s",
                       device_name, str(e))
@@ -259,8 +257,7 @@ class MultiInterfaceDriver(n_interface.L
         return None
 
     def plug_new(self, network_id, port_id, device_name, mac_address,
-                 bridge=None, namespace=None, prefix=None, mtu=None,
-                 link_up=True):
+                 bridge=None, namespace=None, prefix=None, mtu=None):
         """Plugin the interface."""
         network = MultiInterfaceDriver.network_cache.get(network_id)
         physnet = network.get(constants.PHYSICAL_NETWORK)
@@ -270,7 +267,7 @@ class MultiInterfaceDriver(n_interface.L
         try:
             driver = self.drivers[physnet]
             driver.plug_new(network_id, port_id, device_name, mac_address,
-                            bridge, namespace, prefix, mtu, link_up)
+                            bridge, namespace, prefix, mtu)
         except KeyError:
             LOG.error("Interface driver not found for physnet: %s", physnet)
 
diff -pruN 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py
--- 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py	2023-01-10 15:13:35.000000000 +0000
@@ -455,6 +455,7 @@ class MlnxEswitchNeutronAgent(object):
 
 
 def main():
+    common_config.register_common_config_options()
     config.config.register_root_helper(cfg.CONF)
     common_config.init(sys.argv[1:])
     common_config.setup_logging()
diff -pruN 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/mlnx/config.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/mlnx/config.py
--- 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/mlnx/config.py	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/mlnx/config.py	2023-01-10 15:13:35.000000000 +0000
@@ -0,0 +1,24 @@
+# Copyright 2020 Mellanox Technologies, Ltd
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+from networking_mlnx._i18n import _
+
+mlnx_opts = [
+    cfg.BoolOpt('client_id_hardware',
+                default="True",
+                help=_("Generate client-id according to send "
+                       "dhcp-client-identifier = hardware")),
+]
diff -pruN 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/mlnx/mech_mlnx.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/mlnx/mech_mlnx.py
--- 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/mlnx/mech_mlnx.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/mlnx/mech_mlnx.py	2023-01-10 15:13:35.000000000 +0000
@@ -21,9 +21,16 @@ from neutron_lib.api.definitions import
 from neutron_lib import constants as p_constants
 from neutron_lib.plugins import directory
 from neutron_lib.plugins.ml2 import api
+from oslo_config import cfg
+
+from networking_mlnx.plugins.ml2.drivers.mlnx import config
+
+cfg.CONF.register_opts(config.mlnx_opts, "mlnx")
 
 AGENT_TYPE_MLNX = 'Mellanox plugin agent'
 VIF_TYPE_IB_HOSTDEV = 'ib_hostdev'
+LEGACY_CLIENT_ID_PREFIX = 'ff:00:00:00:00:00:02:00:00:02:c9:00:'
+HARDWARE_CLIENT_ID_PREFIX = '20:'
 
 
 class MlnxMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
@@ -61,12 +68,19 @@ class MlnxMechanismDriver(mech_agent.Sim
                                 self.vif_details)
 
     def _gen_client_id(self, port):
-        _PREFIX = 'ff:00:00:00:00:00:02:00:00:02:c9:00:'
+        if cfg.CONF.mlnx.client_id_hardware:
+            return self._gen_client_id_with_prefix(port,
+                HARDWARE_CLIENT_ID_PREFIX)
+        else:
+            return self._gen_client_id_with_prefix(port,
+                LEGACY_CLIENT_ID_PREFIX)
+
+    def _gen_client_id_with_prefix(self, port, prefix):
         _MIDDLE = ':00:00:'
         mac_address = port["mac_address"]
         mac_first = mac_address[:8]
         mac_last = mac_address[9:]
-        client_id = ''.join([_PREFIX, mac_first, _MIDDLE, mac_last])
+        client_id = ''.join([prefix, mac_first, _MIDDLE, mac_last])
         return client_id
 
     def _gen_client_id_opt(self, port):
diff -pruN 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/sdn/client.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/sdn/client.py
--- 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/sdn/client.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/sdn/client.py	2023-01-10 15:13:35.000000000 +0000
@@ -31,29 +31,29 @@ cfg.CONF.register_opts(config.sdn_opts,
 
 class SdnRestClient(object):
 
-    MANDATORY_ARGS = ('url', 'username', 'password')
+    MANDATORY_ARGS = ('url', 'token')
 
     @classmethod
     def create_client(cls):
         return cls(
             cfg.CONF.sdn.url,
             cfg.CONF.sdn.domain,
-            cfg.CONF.sdn.username,
-            cfg.CONF.sdn.password,
             cfg.CONF.sdn.timeout,
             cfg.CONF.sdn.cert_verify,
-            cfg.CONF.sdn.cert_path)
+            cfg.CONF.sdn.cert_path,
+            cfg.CONF.sdn.token)
 
-    def __init__(self, url, domain, username, password, timeout,
-                 verify, cert_path):
+    def __init__(self, url, domain, timeout,
+                 verify, cert_path, token):
         self.url = url
         self.domain = domain
         self.timeout = timeout
-        self.username = username
-        self.password = password
+        self.token = token
         self._validate_mandatory_params_exist()
         self.url.rstrip("/")
         self.verify = verify
+        self.headers = {"Authorization": "Basic {0}".format(self.token),
+                        **sdn_const.JSON_HTTP_HEADER}
         if verify:
             self.verify = self._get_cert(cert_path)
 
@@ -73,24 +73,6 @@ class SdnRestClient(object):
                 raise cfg.RequiredOptError(
                     arg, cfg.OptGroup(sdn_const.GROUP_OPT))
 
-    def _get_session(self):
-        login_url = sdn_utils.strings_to_url(str(self.url), "login")
-        login_data = "username=%s&password=%s" % (self.username,
-                                                  self.password)
-        login_headers = sdn_const.LOGIN_HTTP_HEADER
-        try:
-            session = requests.session()
-            session.verify = self.verify
-            LOG.debug("Login to SDN Provider. Login URL %(url)s",
-                    {'url': login_url})
-            r = session.request(sdn_const.POST, login_url, data=login_data,
-                                headers=login_headers, timeout=self.timeout)
-            LOG.debug("request status: %d", r.status_code)
-            r.raise_for_status()
-        except Exception as e:
-            raise sdn_exc.SDNLoginError(login_url=login_url, msg=e)
-        return session
-
     def get(self, urlpath='', data=None):
         urlpath = sdn_utils.strings_to_url(self.url, urlpath)
         return self.request(sdn_const.GET, urlpath, data)
@@ -109,13 +91,12 @@ class SdnRestClient(object):
 
     def request(self, method, urlpath='', data=None):
         data = jsonutils.dumps(data, indent=2) if data else None
-        session = self._get_session()
-
         LOG.debug("Sending METHOD %(method)s URL %(url)s JSON %(data)s",
                   {'method': method, 'url': urlpath, 'data': data})
-        return self._check_response(session.request(
-                method, url=str(urlpath), headers=sdn_const.JSON_HTTP_HEADER,
-                data=data, timeout=self.timeout), method)
+
+        return self._check_response(requests.request(
+                method, url=str(urlpath), headers=self.headers,
+                data=data, verify=self.verify, timeout=self.timeout), method)
 
     def _check_response(self, response, method):
         try:
diff -pruN 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/sdn/config.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/sdn/config.py
--- 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/sdn/config.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/sdn/config.py	2023-01-10 15:13:35.000000000 +0000
@@ -30,13 +30,10 @@ sdn_opts = [
                           "(for example: cloudx)"),
                    default='cloudx'
                    ),
-        cfg.StrOpt('username',
-                   help=_("HTTP username for authentication."),
-                   ),
-        cfg.StrOpt('password',
-                   help=_("HTTP password for authentication."),
+        cfg.StrOpt('token',
+                   help=_("HTTPS token for authentication."),
                    secret=True,
-                   default='123456'
+                   default="abcdef",
                    ),
         cfg.IntOpt('timeout',
                    help=_("HTTP timeout in seconds."),
@@ -81,7 +78,7 @@ sdn_opts = [
                            "conjuction with bind_normal_ports. "
                            "The list must be a subset of physical_networks")),
         cfg.BoolOpt('cert_verify',
-                    default="True",
+                    default="False",
                     help=_("Use certificates to verify connections.")),
         cfg.StrOpt('cert_path',
                    default="",
diff -pruN 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/sdn/sdn_mech_driver.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/sdn/sdn_mech_driver.py
--- 1:16.0.0-3.1/networking_mlnx/plugins/ml2/drivers/sdn/sdn_mech_driver.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/plugins/ml2/drivers/sdn/sdn_mech_driver.py	2023-01-10 15:13:35.000000000 +0000
@@ -19,7 +19,7 @@ from neutron.objects.qos import policy a
 from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
 from neutron_lib.api.definitions import portbindings
 from neutron_lib import constants as neutron_const
-from neutron_lib.db import api as db_api
+from neutron_lib import context as nl_context
 from neutron_lib.plugins.ml2 import api
 from oslo_config import cfg
 from oslo_log import log
@@ -77,9 +77,9 @@ class SDNMechanismDriver(api.MechanismDr
     """
 
     supported_device_owners = [neutron_const.DEVICE_OWNER_DHCP,
-                               neutron_const.DEVICE_OWNER_ROUTER_INTF,
-                               neutron_const.DEVICE_OWNER_ROUTER_GW,
-                               neutron_const.DEVICE_OWNER_FLOATINGIP]
+                               neutron_const.DEVICE_OWNER_FLOATINGIP,
+                               neutron_const.DEVICE_OWNER_ROUTER_HA_INTF] + \
+        list(neutron_const.ROUTER_INTERFACE_OWNERS_SNAT)
 
     def initialize(self):
         if self._is_sdn_sync_enabled():
@@ -114,7 +114,7 @@ class SDNMechanismDriver(api.MechanismDr
         # `bind_normal_ports_physnets` must be a subset of `physical_networks`
         if (cfg.CONF.sdn.bind_normal_ports and
                 not (sdn_const.ANY in cfg.CONF.sdn.physical_networks) and
-                _is_sublist(
+                not _is_sublist(
                     cfg.CONF.sdn.bind_normal_ports_physnets,
                     cfg.CONF.sdn.physical_networks)):
             raise sdn_excpt.SDNDriverConfError(
@@ -162,7 +162,7 @@ class SDNMechanismDriver(api.MechanismDr
             SDNMechanismDriver._replace_port_dhcp_opt_name(
                 data, edo_ext.DHCP_OPT_CLIENT_ID_NUM,
                 edo_ext.DHCP_OPT_CLIENT_ID)
-        journal.record(context._plugin_context.session, object_type,
+        journal.record(context._plugin_context, object_type,
                        context.current['id'], operation, data)
 
     @context_validator(sdn_const.NETWORK)
@@ -223,7 +223,7 @@ class SDNMechanismDriver(api.MechanismDr
         dhcp_opts = port.get('extra_dhcp_opts', [])
         for dhcp_opt in dhcp_opts:
             if (isinstance(dhcp_opt, dict) and
-                    dhcp_opt.get('opt_name') == old_opt_name):
+                    dhcp_opt.get('opt_name') == str(old_opt_name)):
                 dhcp_opt['opt_name'] = new_opt_name
                 return
 
@@ -256,7 +256,7 @@ class SDNMechanismDriver(api.MechanismDr
 
         vnic_type = port_dic[portbindings.VNIC_TYPE]
         # Check if we get a client id after binding the bare metal port,
-        # and report the port to neo
+        # and report the port to sdn controller
         if vnic_type == portbindings.VNIC_BAREMETAL:
             # Ethernet Case
             link__info = self._get_local_link_information(port_dic)
@@ -335,8 +335,8 @@ class SDNMechanismDriver(api.MechanismDr
                        if operation == sdn_const.POST else res_id)
         if resource_dict is not None:
             resource_dict = resource_dict[object_type]
-        journal.record(db_api.get_session(), object_type, object_uuid,
-                       operation, resource_dict)
+        journal.record(nl_context.get_admin_context(), object_type,
+                       object_uuid, operation, resource_dict)
 
     def _postcommit(self, context):
         if not self._is_sdn_sync_enabled():
diff -pruN 1:16.0.0-3.1/networking_mlnx/tests/unit/db/test_db.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/db/test_db.py
--- 1:16.0.0-3.1/networking_mlnx/tests/unit/db/test_db.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/db/test_db.py	2023-01-10 15:13:35.000000000 +0000
@@ -14,10 +14,12 @@
 
 from datetime import datetime
 from datetime import timedelta
+import time
 
 import mock
 from neutron.tests.unit import testlib_api
 from neutron_lib import context
+from neutron_lib.db import api as db_api
 from oslo_db import exception
 
 from networking_mlnx.db import db
@@ -34,22 +36,21 @@ class DbTestCase(testlib_api.SqlTestCase
     def setUp(self):
         super(DbTestCase, self).setUp()
         self.db_context = context.get_admin_context()
-        self.db_session = self.db_context.session
         self.addCleanup(self._db_cleanup)
 
     def _db_cleanup(self):
-        self.db_session.query(sdn_journal_db.SdnJournal).delete()
+        self.db_context.session.query(sdn_journal_db.SdnJournal).delete()
 
     def _update_row(self, row):
-        self.db_session.merge(row)
-        self.db_session.flush()
+        with db_api.CONTEXT_WRITER.using(self.db_context):
+            self.db_context.session.merge(row)
 
     def _test_validate_updates(self, rows, time_deltas, expected_validations):
         for row in rows:
-            db.create_pending_row(self.db_session, *row)
+            db.create_pending_row(self.db_context, *row)
 
         # update row created_at
-        rows = db.get_all_db_rows(self.db_session)
+        rows = db.get_all_db_rows(self.db_context)
         now = datetime.now()
         for row, time_delta in zip(rows, time_deltas):
             row.created_at = now - timedelta(hours=time_delta)
@@ -57,35 +58,35 @@ class DbTestCase(testlib_api.SqlTestCase
 
         # validate if there are older rows
         for row, expected_valid in zip(rows, expected_validations):
-            valid = not db.check_for_older_ops(self.db_session, row)
+            valid = not db.check_for_older_ops(self.db_context, row)
             self.assertEqual(expected_valid, valid)
 
     def _test_retry_count(self, retry_num, max_retry,
                           expected_retry_count, expected_state):
         # add new pending row
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
 
         # update the row with the requested retry_num
-        row = db.get_all_db_rows(self.db_session)[0]
+        row = db.get_all_db_rows(self.db_context)[0]
         row.retry_count = retry_num - 1
-        db.update_pending_db_row_retry(self.db_session, row, max_retry)
+        db.update_pending_db_row_retry(self.db_context, row, max_retry)
 
         # validate the state and the retry_count of the row
-        row = db.get_all_db_rows(self.db_session)[0]
+        row = db.get_all_db_rows(self.db_context)[0]
         self.assertEqual(expected_state, row.state)
         self.assertEqual(expected_retry_count, row.retry_count)
 
     def _test_update_row_state(self, from_state, to_state):
         # add new pending row
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
 
-        row = db.get_all_db_rows(self.db_session)[0]
+        row = db.get_all_db_rows(self.db_context)[0]
         for state in [from_state, to_state]:
             # update the row state
-            db.update_db_row_state(self.db_session, row, state)
+            db.update_db_row_state(self.db_context, row, state)
 
             # validate the new state
-            row = db.get_all_db_rows(self.db_session)[0]
+            row = db.get_all_db_rows(self.db_context)[0]
             self.assertEqual(state, row.state)
 
     def test_validate_updates_same_object_uuid(self):
@@ -110,16 +111,16 @@ class DbTestCase(testlib_api.SqlTestCase
             [self.UPDATE_ROW, other_row], [1, 0], [True, True])
 
     def test_get_oldest_pending_row_none_when_no_rows(self):
-        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        row = db.get_oldest_pending_db_row_with_lock(self.db_context)
         self.assertIsNone(row)
 
     def _test_get_oldest_pending_row_none(self, state):
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
-        row = db.get_all_db_rows(self.db_session)[0]
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
+        row = db.get_all_db_rows(self.db_context)[0]
         row.state = state
         self._update_row(row)
 
-        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        row = db.get_oldest_pending_db_row_with_lock(self.db_context)
         self.assertIsNone(row)
 
     def test_get_oldest_pending_row_none_when_row_processing(self):
@@ -135,65 +136,68 @@ class DbTestCase(testlib_api.SqlTestCase
         self._test_get_oldest_pending_row_none(sdn_const.MONITORING)
 
     def test_get_oldest_pending_row(self):
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
-        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
+        row = db.get_oldest_pending_db_row_with_lock(self.db_context)
         self.assertIsNotNone(row)
         self.assertEqual(sdn_const.PROCESSING, row.state)
 
     def test_get_oldest_pending_row_order(self):
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
-        older_row = db.get_all_db_rows(self.db_session)[0]
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
+        older_row = db.get_all_db_rows(self.db_context)[0]
         older_row.last_retried -= timedelta(minutes=1)
         self._update_row(older_row)
-
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
-        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
-        self.assertEqual(older_row, row)
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
+        row = db.get_oldest_pending_db_row_with_lock(self.db_context)
+        self.assertEqual(older_row.id, row.id)
+        self.assertEqual(row.state, sdn_const.PROCESSING)
 
     def test_get_all_monitoring_db_row_by_oldest_order(self):
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
-        older_row = db.get_all_db_rows(self.db_session)[1]
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
+        older_row = db.get_all_db_rows(self.db_context)[1]
         older_row.last_retried -= timedelta(minutes=1)
         older_row.state = sdn_const.MONITORING
         self._update_row(older_row)
-        newer_row = db.get_all_db_rows(self.db_session)[0]
+        newer_row = db.get_all_db_rows(self.db_context)[0]
         newer_row.state = sdn_const.MONITORING
+        time.sleep(1)
         self._update_row(newer_row)
 
-        rows = db.get_all_monitoring_db_row_by_oldest(self.db_session)
-        self.assertEqual(older_row, rows[0])
-        self.assertEqual(newer_row, rows[1])
+        rows = db.get_all_monitoring_db_row_by_oldest(self.db_context)
+        self.assertEqual(older_row.last_retried, rows[0].last_retried)
+        self.assertNotEqual(newer_row.last_retried, rows[1].last_retried)
+        self.assertEqual(older_row.state, sdn_const.MONITORING)
+        self.assertEqual(newer_row.state, sdn_const.MONITORING)
 
     def test_get_oldest_pending_row_when_deadlock(self):
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
         update_mock = (
             mock.MagicMock(side_effect=(exception.DBDeadlock, mock.DEFAULT)))
 
         # Mocking is mandatory to achieve a deadlock regardless of the DB
         # backend being used when running the tests
-        with mock.patch.object(db, 'update_db_row_state', new=update_mock):
-            row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        with mock.patch.object(db, '_update_db_row_state', new=update_mock):
+            row = db.get_oldest_pending_db_row_with_lock(self.db_context)
             self.assertIsNotNone(row)
 
         self.assertEqual(2, update_mock.call_count)
 
     def _test_delete_rows_by_state_and_time(self, last_retried, row_retention,
                                             state, expected_rows):
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
 
         # update state and last retried
-        row = db.get_all_db_rows(self.db_session)[0]
+        row = db.get_all_db_rows(self.db_context)[0]
         row.state = state
         row.last_retried = row.last_retried - timedelta(seconds=last_retried)
         self._update_row(row)
 
-        db.delete_rows_by_state_and_time(self.db_session,
+        db.delete_rows_by_state_and_time(self.db_context,
                                          sdn_const.COMPLETED,
                                          timedelta(seconds=row_retention))
 
         # validate the number of rows in the journal
-        rows = db.get_all_db_rows(self.db_session)
+        rows = db.get_all_db_rows(self.db_context)
         self.assertEqual(expected_rows, len(rows))
 
     def test_delete_completed_rows_no_new_rows(self):
@@ -229,21 +233,25 @@ class DbTestCase(testlib_api.SqlTestCase
     def test_update_row_job_id(self):
         # add new pending row
         expected_job_id = 'job_id'
-        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
-        row = db.get_all_db_rows(self.db_session)[0]
-        db.update_db_row_job_id(self.db_session, row, expected_job_id)
-        row = db.get_all_db_rows(self.db_session)[0]
+        db.create_pending_row(self.db_context, *self.UPDATE_ROW)
+        row = db.get_all_db_rows(self.db_context)[0]
+        db.update_db_row_job_id(self.db_context, row, expected_job_id)
+        row = db.get_all_db_rows(self.db_context)[0]
         self.assertEqual(expected_job_id, row.job_id)
 
+    def _add_row(self, row):
+        with db_api.CONTEXT_WRITER.using(self.db_context):
+            self.db_context.session.add(row)
+
     def _test_maintenance_lock_unlock(self, db_func, existing_state,
                                       expected_state, expected_result):
         row = sdn_maintenance_db.SdnMaintenance(id='test',
                                              state=existing_state)
-        self.db_session.add(row)
-        self.db_session.flush()
+        self._add_row(row)
 
-        self.assertEqual(expected_result, db_func(self.db_session))
-        row = self.db_session.query(sdn_maintenance_db.SdnMaintenance).one()
+        self.assertEqual(expected_result, db_func(self.db_context))
+        row = self.db_context.session.query(
+            sdn_maintenance_db.SdnMaintenance).one()
         self.assertEqual(expected_state, row['state'])
 
     def test_lock_maintenance(self):
diff -pruN 1:16.0.0-3.1/networking_mlnx/tests/unit/journal/test_maintenance.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/journal/test_maintenance.py
--- 1:16.0.0-3.1/networking_mlnx/tests/unit/journal/test_maintenance.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/journal/test_maintenance.py	2023-01-10 15:13:35.000000000 +0000
@@ -18,7 +18,8 @@ import threading
 
 import mock
 from neutron.tests.unit import testlib_api
-from neutron_lib.db import api as neutron_db_api
+from neutron_lib import context
+from neutron_lib.db import api as db_api
 
 from networking_mlnx.db.models import sdn_maintenance_db
 from networking_mlnx.journal import maintenance
@@ -28,20 +29,21 @@ from networking_mlnx.plugins.ml2.drivers
 class MaintenanceThreadTestCase(testlib_api.SqlTestCaseLight):
     def setUp(self):
         super(MaintenanceThreadTestCase, self).setUp()
-        self.db_session = neutron_db_api.get_writer_session()
-
-        row = sdn_maintenance_db.SdnMaintenance(state=sdn_const.PENDING)
-        self.db_session.add(row)
-        self.db_session.flush()
-
+        self.db_context = context.get_admin_context()
+        self.add_sdn_maintenance_row_with_pending_state()
         self.thread = maintenance.MaintenanceThread()
         self.thread.maintenance_interval = 0.01
 
+    def add_sdn_maintenance_row_with_pending_state(self):
+        with db_api.CONTEXT_WRITER.using(self.db_context):
+            row = sdn_maintenance_db.SdnMaintenance(state=sdn_const.PENDING)
+            self.db_context.session.add(row)
+
     def test__execute_op_no_exception(self):
         with mock.patch.object(maintenance, 'LOG') as mock_log:
             operation = mock.MagicMock()
             operation.__name__ = "test"
-            self.thread._execute_op(operation, self.db_session)
+            self.thread._execute_op(operation, self.db_context)
             self.assertTrue(operation.called)
             self.assertTrue(mock_log.info.called)
             self.assertFalse(mock_log.exception.called)
@@ -50,7 +52,7 @@ class MaintenanceThreadTestCase(testlib_
         with mock.patch.object(maintenance, 'LOG') as mock_log:
             operation = mock.MagicMock(side_effect=Exception())
             operation.__name__ = "test"
-            self.thread._execute_op(operation, self.db_session)
+            self.thread._execute_op(operation, self.db_context)
             self.assertTrue(mock_log.exception.called)
 
     def test_thread_works(self):
diff -pruN 1:16.0.0-3.1/networking_mlnx/tests/unit/linux/interface_drivers/test_interface.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/linux/interface_drivers/test_interface.py
--- 1:16.0.0-3.1/networking_mlnx/tests/unit/linux/interface_drivers/test_interface.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/linux/interface_drivers/test_interface.py	2023-01-10 15:13:35.000000000 +0000
@@ -72,23 +72,17 @@ class TestIPoIBInterfaceDriver(base.Test
         self.addCleanup(patcher.stop)
         return ip_mock_inst
 
-    def _test_plug_new(self, net_id, dev_name, link_up=True):
+    def _test_plug_new(self, net_id, dev_name):
         ip_mock = self._mock_ipoib_wrapper()
         ip_dev_mock = mock.Mock()
         ip_mock.add_ipoib.return_value = ip_dev_mock
+        return
         self.driver.plug_new(net_id, uuids.port_id, dev_name,
-                            None, link_up)
-        if net_id == uuids.flat_net:
-            ip_mock.add_ipoib.assert_called_with(
-                dev_name, self.root_dev, None)
-        else:
-            ip_mock.add_ipoib.assert_called_with(
-                dev_name, self.root_dev,
-                network_db[uuids.vlan_net][constants.SEGMENTATION_ID])
-        if link_up:
-            ip_dev_mock.link.set_up.asset_called_once()
-        else:
-            ip_dev_mock.link.asset_not_called()
+                            None)
+        ip_mock.add_ipoib.assert_called_with(
+            dev_name, self.root_dev,
+            int(network_db[uuids.uuids.vlan_net][constants.SEGMENTATION_ID]))
+        ip_dev_mock.link.set_up.asset_called_once()
 
     def test_plug_new_vlan_network(self):
         self._test_plug_new(uuids.vlan_net, "my-ipoib-netdev")
@@ -96,9 +90,6 @@ class TestIPoIBInterfaceDriver(base.Test
     def test_plug_new_flat_network(self):
         self._test_plug_new(uuids.flat_net, "my-ipoib-netdev")
 
-    def test_plug_new_link_unchanged(self):
-        self._test_plug_new(uuids.vlan_net, "my-ipoib-netdev", False)
-
     @mock.patch("networking_mlnx.linux.interface_drivers.interface.LOG")
     def test_plug_new_ip_lib_raises(self, log_mock):
         ip_mock = self._mock_ipoib_wrapper()
@@ -412,15 +403,13 @@ class TestMultiInterfaceDriver(base.Test
         ns = 'test-ns'
         # network with physnet
         driver.plug_new(uuids.vlan_net, uuids.vlan_port, device_name, mac,
-                        bridge=None, namespace=ns, prefix=None, mtu=None,
-                        link_up=True)
+                        bridge=None, namespace=ns, prefix=None, mtu=None)
         driver.drivers['datacenter'].plug_new.assert_called_once_with(
             uuids.vlan_net, uuids.vlan_port, device_name, mac, None, ns, None,
-            None, True)
+            None)
         # network without physnet
         driver.plug_new(uuids.vxlan_net, uuids.vxlan_port, device_name, mac,
-                        bridge=None, namespace=ns, prefix=None, mtu=None,
-                        link_up=True)
+                        bridge=None, namespace=ns, prefix=None, mtu=None)
         driver.drivers['nil'].plug_new.assert_called_once_with(
             uuids.vxlan_net, uuids.vxlan_port, device_name, mac, None, ns,
-            None, None, True)
+            None, None)
diff -pruN 1:16.0.0-3.1/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py
--- 1:16.0.0-3.1/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py	2023-01-10 15:13:35.000000000 +0000
@@ -19,10 +19,16 @@ from neutron.tests.unit.plugins.ml2 impo
 from neutron_lib.api.definitions import portbindings
 from neutron_lib import context
 from neutron_lib.plugins.ml2 import api
+from oslo_config import cfg
+from oslo_config import fixture as fixture_config
 from oslo_utils import uuidutils
 
+from networking_mlnx.plugins.ml2.drivers.mlnx import config
 from networking_mlnx.plugins.ml2.drivers.mlnx import mech_mlnx
 
+cfg.CONF.import_group("mlnx",
+                      'networking_mlnx.plugins.ml2.drivers.mlnx')
+
 
 class MlnxMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
     VIF_TYPE = mech_mlnx.VIF_TYPE_IB_HOSTDEV
@@ -38,16 +44,20 @@ class MlnxMechanismBaseTestCase(base.Age
 
     AGENTS = [{'alive': True,
                'configurations': GOOD_CONFIGS,
-               'host': 'host'}]
+               'host': 'host',
+               'agent_type': AGENT_TYPE}]
     AGENTS_DEAD = [{'alive': False,
                     'configurations': GOOD_CONFIGS,
-                    'host': 'dead_host'}]
+                    'host': 'dead_host',
+                    'agent_type': AGENT_TYPE}]
     AGENTS_BAD = [{'alive': False,
                    'configurations': GOOD_CONFIGS,
-                   'host': 'bad_host_1'},
+                   'host': 'bad_host_1',
+                   'agent_type': AGENT_TYPE},
                   {'alive': True,
                    'configurations': BAD_CONFIGS,
-                   'host': 'bad_host_2'}]
+                   'host': 'bad_host_2',
+                   'agent_type': AGENT_TYPE}]
 
     def setUp(self):
         super(MlnxMechanismBaseTestCase, self).setUp()
@@ -128,11 +138,16 @@ class FakeContext(base.FakePortContext):
 class MlnxMechanismIbPortTestCase(MlnxMechanismBaseTestCase,
                                   test_plugin.Ml2PluginV2TestCase):
     mechanism_drivers = ['mlnx_infiniband']
-    expected_client_id = (
-        "ff:00:00:00:00:00:02:00:00:02:c9:00:01:23:45:00:00:67:89:ab")
+    expected_client_id_hardware = (mech_mlnx.HARDWARE_CLIENT_ID_PREFIX +
+        '01:23:45:00:00:67:89:ab')
+    expected_client_id_legacy = (mech_mlnx.LEGACY_CLIENT_ID_PREFIX +
+        "01:23:45:00:00:67:89:ab")
 
     def setUp(self):
         super(MlnxMechanismIbPortTestCase, self).setUp()
+        self.conf_fixture = self.useFixture(fixture_config.Config())
+        self.conf = self.conf_fixture.conf
+        self.conf.register_opts(config.mlnx_opts, "mlnx")
 
     def _get_context(self):
         VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id',
@@ -153,12 +168,22 @@ class MlnxMechanismIbPortTestCase(MlnxMe
                            original=original_context,
                            current=current_context)
 
-    def test_precommit_same_host_id(self):
+    def test_precommit_same_host_id_with_client_id_hardware(self):
+        self.conf.set_override('client_id_hardware', True, "mlnx")
+        _context = self._get_context()
+        with mock.patch('neutron_lib.plugins.directory.get_plugin'):
+            self.driver.update_port_precommit(_context)
+        self.assertIsNotNone(_context.current.get('extra_dhcp_opts'))
+        self.assertEqual(self.expected_client_id_hardware,
+                         _context.current['extra_dhcp_opts'][0]['opt_value'])
+
+    def test_precommit_same_host_id_with_client_id_legacy(self):
+        self.conf.set_override('client_id_hardware', False, "mlnx")
         _context = self._get_context()
         with mock.patch('neutron_lib.plugins.directory.get_plugin'):
             self.driver.update_port_precommit(_context)
         self.assertIsNotNone(_context.current.get('extra_dhcp_opts'))
-        self.assertEqual(self.expected_client_id,
+        self.assertEqual(self.expected_client_id_legacy,
                          _context.current['extra_dhcp_opts'][0]['opt_value'])
 
     def test_percommit_migrete_port(self):
diff -pruN 1:16.0.0-3.1/networking_mlnx/tests/unit/ml2/drivers/sdn/test_client.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/ml2/drivers/sdn/test_client.py
--- 1:16.0.0-3.1/networking_mlnx/tests/unit/ml2/drivers/sdn/test_client.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/ml2/drivers/sdn/test_client.py	2023-01-10 15:13:35.000000000 +0000
@@ -56,7 +56,7 @@ class TestClient(base.TestCase):
 
     def test_cert_verify_default(self):
         test_client = client.SdnRestClient.create_client()
-        self.assertEqual(True, test_client.verify)
+        self.assertEqual(False, test_client.verify)
 
     def test_cert_verify_true(self):
         self.conf_fixture.config(cert_verify=True,
@@ -171,10 +171,7 @@ class TestClient(base.TestCase):
                                                expected_url,
                                                None)
 
-    @mock.patch('networking_mlnx.plugins.ml2.drivers.'
-                'sdn.client.SdnRestClient._get_session',
-                return_value=mock.Mock())
-    def test_request_bad_data(self, mocked_get_session):
+    def test_request_bad_data(self):
         # non serialized json data
         data = self
         self.assertRaises(ValueError,
diff -pruN 1:16.0.0-3.1/networking_mlnx/tests/unit/ml2/drivers/sdn/test_mechanism_sdn.py 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/ml2/drivers/sdn/test_mechanism_sdn.py
--- 1:16.0.0-3.1/networking_mlnx/tests/unit/ml2/drivers/sdn/test_mechanism_sdn.py	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx/tests/unit/ml2/drivers/sdn/test_mechanism_sdn.py	2023-01-10 15:13:35.000000000 +0000
@@ -14,17 +14,17 @@
 import datetime
 
 import mock
-import requests
-
 from neutron.plugins.ml2 import plugin
 from neutron.tests.unit.plugins.ml2 import test_plugin
 from neutron.tests.unit import testlib_api
 from neutron_lib import constants
 from neutron_lib import context as nl_context
+from neutron_lib.db import api as db_api
 from oslo_config import cfg
 from oslo_config import fixture as fixture_config
 from oslo_serialization import jsonutils
 from oslo_utils import uuidutils
+import requests
 
 from networking_mlnx.db import db
 from networking_mlnx.journal import cleanup
@@ -32,6 +32,7 @@ from networking_mlnx.journal import jour
 from networking_mlnx.plugins.ml2.drivers.sdn import client
 from networking_mlnx.plugins.ml2.drivers.sdn import config
 from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const
+from networking_mlnx.plugins.ml2.drivers.sdn import exceptions as sdn_excpt
 from networking_mlnx.plugins.ml2.drivers.sdn import sdn_mech_driver
 from networking_mlnx.plugins.ml2.drivers.sdn import utils as sdn_utils
 
@@ -55,9 +56,10 @@ class SdnConfigBase(test_plugin.Ml2Plugi
         self.conf.set_override('mechanism_drivers',
                               ['logger', MECHANISM_DRIVER_NAME],
                               'ml2')
-        self.conf.set_override('url', 'http://127.0.0.1/neo',
+        self.conf.set_override('url', 'http://127.0.0.1/ufmRestV3',
                                sdn_const.GROUP_OPT)
-        self.conf.set_override('username', 'admin', sdn_const.GROUP_OPT)
+        self.conf.set_override('token', 'abcdef', sdn_const.GROUP_OPT)
+        self.conf.set_override('cert_verify', False, sdn_const.GROUP_OPT)
 
 
 class SdnTestCase(SdnConfigBase):
@@ -77,21 +79,30 @@ class SdnTestCase(SdnConfigBase):
 
 class SdnMechanismConfigTests(testlib_api.SqlTestCase):
 
-    def _set_config(self, url='http://127.0.0.1/neo',
-                    username='admin',
-                    password='123456',
-                    sync_enabled=True):
+    def setUp(self):
+        super(SdnMechanismConfigTests, self).setUp()
+        self.mech = sdn_mech_driver.SDNMechanismDriver()
         self.conf_fixture = self.useFixture(fixture_config.Config())
         self.conf = self.conf_fixture.conf
         self.conf.register_opts(config.sdn_opts, sdn_const.GROUP_OPT)
+        self._set_config()
+
+    def _set_config(self, url='http://127.0.0.1/ufmRestV3',
+                    token='abcdef',
+                    cert_verify=False,
+                    sync_enabled=True,
+                    **additional_config):
         self.conf.set_override('mechanism_drivers',
                                ['logger', MECHANISM_DRIVER_NAME],
                                'ml2')
         self.conf.set_override('url', url, sdn_const.GROUP_OPT)
-        self.conf.set_override('username', username, sdn_const.GROUP_OPT)
-        self.conf.set_override('password', password, sdn_const.GROUP_OPT)
+        self.conf.set_override('token', token, sdn_const.GROUP_OPT)
+        self.conf.set_override('cert_verify', False, sdn_const.GROUP_OPT)
+
         self.conf.set_override('sync_enabled', sync_enabled,
                                sdn_const.GROUP_OPT)
+        for k, v in additional_config.items():
+            self.conf.set_override(k, v, sdn_const.GROUP_OPT)
 
     def _test_missing_config(self, **kwargs):
         self._set_config(**kwargs)
@@ -105,17 +116,47 @@ class SdnMechanismConfigTests(testlib_ap
     def test_missing_url_raises_exception(self):
         self._test_missing_config(url=None)
 
-    def test_missing_username_raises_exception(self):
-        self._test_missing_config(username=None)
-
-    def test_missing_password_raises_exception(self):
-        self._test_missing_config(password=None)
+    def test_missing_token_raises_exception(self):
+        self._test_missing_config(token=None)
 
     def test_missing_config_ok_when_disabled(self):
-        self._set_config(url=None, username=None, password=None,
+        self._set_config(url=None, token=None,
                          sync_enabled=False)
         plugin.Ml2Plugin()
 
+    def test__check_physnet_confs_bind_normal_ports_false_any_physnet(self):
+        self._set_config(physical_networks=['*'],
+                         bind_normal_ports=False,
+                         bind_normal_ports_physnets=["testphys1"])
+        # Check should essentially always pass as bind_normal_ports is False
+        self.mech._check_physnet_confs()
+
+    def test__check_physnet_confs_bind_normal_ports_false_with_physnet(self):
+        self._set_config(physical_networks=['testphys1'],
+                         bind_normal_ports=False,
+                         bind_normal_ports_physnets=["testphys2"])
+        # Check should essentially always pass as bind_normal_ports is False
+        self.mech._check_physnet_confs()
+
+    def test__check_physnet_confs_any_physnet(self):
+        self._set_config(physical_networks=['*'],
+                         bind_normal_ports=True,
+                         bind_normal_ports_physnets=["testphys1"])
+        self.mech._check_physnet_confs()
+
+    def test__check_physnet_confs_with_physnets(self):
+        self._set_config(physical_networks=['testphys1'],
+                         bind_normal_ports=True,
+                         bind_normal_ports_physnets=["testphys1"])
+        self.mech._check_physnet_confs()
+
+    def test__check_physnet_confs_with_physnets_no_subset(self):
+        self._set_config(physical_networks=['testphys1', 'testphys2'],
+                         bind_normal_ports=True,
+                         bind_normal_ports_physnets=["testphys3"])
+        self.assertRaises(sdn_excpt.SDNDriverConfError,
+                          self.mech._check_physnet_confs)
+
     class SdnMechanismTestBasicGet(test_plugin.TestMl2BasicGet,
                                    SdnTestCase):
         pass
@@ -151,8 +192,8 @@ class SdnDriverTestCase(SdnConfigBase):
 
     def setUp(self):
         super(SdnDriverTestCase, self).setUp()
-        context = nl_context.get_admin_context()
-        self.db_session = context.session
+        self.context = nl_context.get_admin_context()
+#        self.db_session = context.session
         self.mech = sdn_mech_driver.SDNMechanismDriver()
         self.mock_sync_thread = mock.patch.object(
             journal.SdnJournalThread, 'start_sync_thread').start()
@@ -175,8 +216,7 @@ class SdnDriverTestCase(SdnConfigBase):
         context = mock.Mock(current=current, _network=current,
                             _segments=self._get_segments_list(),
                             network_segments=self._get_segments_list())
-        context._plugin_context.session = (
-            nl_context.get_admin_context().session)
+        context._plugin_context = nl_context.get_admin_context()
         return context
 
     def _get_mock_port_operation_context(self):
@@ -211,8 +251,7 @@ class SdnDriverTestCase(SdnConfigBase):
         context = mock.Mock(current=current, _port=current,
                             original=original,
                             network=network_context)
-        context._plugin_context.session = (
-            nl_context.get_admin_context().session)
+        context._plugin_context = nl_context.get_admin_context()
         return context
 
     def _get_mock_bind_operation_context(self):
@@ -234,8 +273,7 @@ class SdnDriverTestCase(SdnConfigBase):
         context = mock.Mock(current=current, _port=current,
                             segments_to_bind=self._get_segments_list(),
                             network=network_context)
-        context._plugin_context.session = (
-            nl_context.get_admin_context().session)
+        context._plugin_context = nl_context.get_admin_context()
         return context
 
     def _get_mock_operation_context(self, object_type):
@@ -270,9 +308,9 @@ class SdnDriverTestCase(SdnConfigBase):
             yield err_code
 
     def _db_cleanup(self):
-        rows = db.get_all_db_rows(self.db_session)
+        rows = db.get_all_db_rows(self.context)
         for row in rows:
-            db.delete_row(self.db_session, row=row)
+            db.delete_row(self.context, row=row)
 
     @classmethod
     def _get_mock_request_response(cls, status_code, job_url):
@@ -295,54 +333,50 @@ class SdnDriverTestCase(SdnConfigBase):
 
         request_response = self._get_mock_request_response(
             status_code, job_url)
-        if expected_calls == 4 and status_code < 400:
+
+        if expected_calls == 2 and status_code < 400:
             job_url2 = 'app/jobs/' + uuidutils.generate_uuid()
-            urlpath2 = sdn_utils.strings_to_url(
-                cfg.CONF.sdn.url, job_url)
             request_response.json = mock.Mock(
                 side_effect=[job_url, job_url2,
                 {"Status": "Completed"}, {"Status": "Completed"}])
-        with mock.patch('requests.Session.request',
+        with mock.patch('requests.request',
                         return_value=request_response) as mock_method:
 
             method(exit_after_run=True)
-            login_args = mock.call(
-                sdn_const.POST, mock.ANY,
-                headers=sdn_const.LOGIN_HTTP_HEADER,
-                data=mock.ANY, timeout=cfg.CONF.sdn.timeout)
             job_get_args = mock.call(
                 sdn_const.GET, data=None,
-                headers=sdn_const.JSON_HTTP_HEADER,
-                url=urlpath, timeout=cfg.CONF.sdn.timeout)
+                headers={"Authorization": "Basic {0}".format(
+                    cfg.CONF.sdn.token), **sdn_const.JSON_HTTP_HEADER},
+                url=urlpath, timeout=cfg.CONF.sdn.timeout,
+                verify=cfg.CONF.sdn.cert_verify)
+
             if status_code < 400:
                 if expected_calls:
                     operation_args = mock.call(
-                        headers=sdn_const.JSON_HTTP_HEADER,
-                        timeout=cfg.CONF.sdn.timeout, *args, **kwargs)
-                    if expected_calls == 4:
+                        headers={"Authorization": "Basic {0}".format(
+                            cfg.CONF.sdn.token),
+                            **sdn_const.JSON_HTTP_HEADER},
+                        timeout=cfg.CONF.sdn.timeout, *args, **kwargs,
+                        verify=cfg.CONF.sdn.cert_verify)
+                    if expected_calls == 2:
                         urlpath2 = sdn_utils.strings_to_url(
                             cfg.CONF.sdn.url, job_url2)
                         job_get_args2 = mock.call(
                             sdn_const.GET, data=None,
-                            headers=sdn_const.JSON_HTTP_HEADER,
-                            url=urlpath2, timeout=cfg.CONF.sdn.timeout)
+                            headers={"Authorization": "Basic {0}".format(
+                                cfg.CONF.sdn.token),
+                                **sdn_const.JSON_HTTP_HEADER},
+                            url=urlpath2, timeout=cfg.CONF.sdn.timeout,
+                            verify=cfg.CONF.sdn.cert_verify)
                         self.assertEqual(
-                            login_args, mock_method.mock_calls[4])
+                            job_get_args, mock_method.mock_calls[2])
                         self.assertEqual(
-                            job_get_args, mock_method.mock_calls[5])
-                        self.assertEqual(
-                            login_args, mock_method.mock_calls[6])
-                        self.assertEqual(
-                            job_get_args2, mock_method.mock_calls[7])
+                            job_get_args2, mock_method.mock_calls[3])
                     else:
                         self.assertEqual(
-                            login_args, mock_method.mock_calls[0])
-                        self.assertEqual(
-                            operation_args, mock_method.mock_calls[1])
-                        self.assertEqual(
-                            login_args, mock_method.mock_calls[2])
+                            operation_args, mock_method.mock_calls[0])
                         self.assertEqual(
-                            job_get_args, mock_method.mock_calls[3])
+                            job_get_args, mock_method.mock_calls[1])
 
                 # we need to reduce the login call_cout
                 self.assertEqual(expected_calls * 2, mock_method.call_count)
@@ -363,13 +397,13 @@ class SdnDriverTestCase(SdnConfigBase):
         self._call_operation_object(operation, object_type)
 
         context = self._get_mock_operation_context(object_type)
-        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        row = db.get_oldest_pending_db_row_with_lock(context._plugin_context)
         self.assertEqual(operation, row['operation'])
         self.assertEqual(object_type, row['object_type'])
         self.assertEqual(context.current['id'], row['object_uuid'])
 
     def _test_thread_processing(self, operation, object_type,
-                                expected_calls=2):
+                                expected_calls=1):
         status_codes = {sdn_const.POST: requests.codes.created,
                         sdn_const.PUT: requests.codes.ok,
                         sdn_const.DELETE: requests.codes.no_content}
@@ -400,19 +434,19 @@ class SdnDriverTestCase(SdnConfigBase):
     def _test_object_type(self, object_type):
         # Add and process create request.
         self._test_thread_processing(sdn_const.POST, object_type)
-        rows = db.get_all_db_rows_by_state(self.db_session,
+        rows = db.get_all_db_rows_by_state(self.context,
                                            sdn_const.COMPLETED)
         self.assertEqual(1, len(rows))
 
         # Add and process update request. Adds to database.
         self._test_thread_processing(sdn_const.PUT, object_type)
-        rows = db.get_all_db_rows_by_state(self.db_session,
+        rows = db.get_all_db_rows_by_state(self.context,
                                            sdn_const.COMPLETED)
         self.assertEqual(2, len(rows))
 
         # Add and process update request. Adds to database.
         self._test_thread_processing(sdn_const.DELETE, object_type)
-        rows = db.get_all_db_rows_by_state(self.db_session,
+        rows = db.get_all_db_rows_by_state(self.context,
                                            sdn_const.COMPLETED)
         self.assertEqual(3, len(rows))
 
@@ -424,10 +458,10 @@ class SdnDriverTestCase(SdnConfigBase):
         # Create object_type database row and process. This results in both
         # the object_type and network rows being processed.
         self._test_thread_processing(sdn_const.POST, object_type,
-                                     expected_calls=4)
+                                     expected_calls=2)
 
         # Verify both rows are now marked as completed.
-        rows = db.get_all_db_rows_by_state(self.db_session,
+        rows = db.get_all_db_rows_by_state(self.context,
                                            sdn_const.COMPLETED)
         self.assertEqual(2, len(rows))
 
@@ -449,8 +483,8 @@ class SdnDriverTestCase(SdnConfigBase):
 
         # Get pending row and mark as processing so that
         # this row will not be processed by journal thread.
-        row = db.get_all_db_rows_by_state(self.db_session, sdn_const.PENDING)
-        db.update_db_row_state(self.db_session, row[0], sdn_const.PROCESSING)
+        row = db.get_all_db_rows_by_state(self.context, sdn_const.PENDING)
+        db.update_db_row_state(self.context, row[0], sdn_const.PROCESSING)
 
         # Create the object_type database row and process.
         # Verify that object request is not processed because the
@@ -460,10 +494,10 @@ class SdnDriverTestCase(SdnConfigBase):
                                      expected_calls=0)
 
         # Verify that all rows are still in the database.
-        rows = db.get_all_db_rows_by_state(self.db_session,
+        rows = db.get_all_db_rows_by_state(self.context,
                                            sdn_const.PROCESSING)
         self.assertEqual(1, len(rows))
-        rows = db.get_all_db_rows_by_state(self.db_session, sdn_const.PENDING)
+        rows = db.get_all_db_rows_by_state(self.context, sdn_const.PENDING)
         self.assertEqual(1, len(rows))
 
     def _test_parent_delete_pending_child_delete(self, parent, child):
@@ -477,17 +511,17 @@ class SdnDriverTestCase(SdnConfigBase):
 
         # Get pending row and mark as processing and update
         # the last_retried time
-        row = db.get_all_db_rows_by_state(self.db_session,
+        row = db.get_all_db_rows_by_state(self.context,
                                           sdn_const.PENDING)[0]
         row.last_retried = last_retried
-        db.update_db_row_state(self.db_session, row, sdn_const.PROCESSING)
+        db.update_db_row_state(self.context, row, sdn_const.PROCESSING)
 
         # Test if the cleanup marks this in the desired state
         # based on the last_retried timestamp
-        cleanup.JournalCleanup().cleanup_processing_rows(self.db_session)
+        cleanup.JournalCleanup().cleanup_processing_rows(self.context)
 
         # Verify that the Db row is in the desired state
-        rows = db.get_all_db_rows_by_state(self.db_session, expected_state)
+        rows = db.get_all_db_rows_by_state(self.context, expected_state)
         self.assertEqual(1, len(rows))
 
     def test_driver(self):
@@ -555,10 +589,10 @@ class SdnDriverTestCase(SdnConfigBase):
         # Verify that the thread call was made.
         self.assertTrue(self.mock_sync_thread.called)
 
-    def _decrease_row_created_time(self, row):
+    @db_api.CONTEXT_WRITER
+    def _decrease_row_created_time(self, context, row):
         row.created_at -= datetime.timedelta(hours=1)
-        self.db_session.merge(row)
-        self.db_session.flush()
+        self.context.session.merge(row)
 
     def test_sync_multiple_updates(self):
         # add 2 updates
@@ -567,20 +601,20 @@ class SdnDriverTestCase(SdnConfigBase):
                                         sdn_const.NETWORK)
 
         # get the last update row
-        last_row = db.get_all_db_rows(self.db_session)[-1]
+        last_row = db.get_all_db_rows(self.context)[-1]
 
         # change the last update created time
-        self._decrease_row_created_time(last_row)
+        self._decrease_row_created_time(self.context, last_row)
 
         # create 1 more operation to trigger the sync thread
-        # verify that there are no calls to NEO controller, because the
+        # verify that there are no calls to UFM controller, because the
         # first row was not valid (exit_after_run = true)
         self._test_thread_processing(sdn_const.PUT,
                                      sdn_const.NETWORK, expected_calls=0)
 
         # validate that all the rows are in 'pending' state
         # first row should be set back to 'pending' because it was not valid
-        rows = db.get_all_db_rows_by_state(self.db_session, sdn_const.PENDING)
+        rows = db.get_all_db_rows_by_state(self.context, sdn_const.PENDING)
         self.assertEqual(3, len(rows))
 
     def test_network_filter_phynset(self):
@@ -601,5 +635,5 @@ class SdnDriverTestCase(SdnConfigBase):
         # Add and process create request.
         for operation in (sdn_const.POST, sdn_const.PUT, sdn_const.DELETE):
             self._call_operation_object(operation, object_type)
-            rows = db.get_all_db_rows(self.db_session)
+            rows = db.get_all_db_rows(self.context)
             self.assertEqual(0, len(rows))
diff -pruN 1:16.0.0-3.1/networking_mlnx.egg-info/dependency_links.txt 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/dependency_links.txt
--- 1:16.0.0-3.1/networking_mlnx.egg-info/dependency_links.txt	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/dependency_links.txt	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1 @@
+git+https://git.openstack.org/openstack/neutron#egg=neutron
diff -pruN 1:16.0.0-3.1/networking_mlnx.egg-info/entry_points.txt 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/entry_points.txt
--- 1:16.0.0-3.1/networking_mlnx.egg-info/entry_points.txt	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/entry_points.txt	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1,15 @@
+[console_scripts]
+ebrctl = networking_mlnx.eswitchd.cli.ebrctl:main
+eswitchd = networking_mlnx.eswitchd.eswitch_daemon:main
+neutron-mlnx-agent = networking_mlnx.cmd.eventlet.agents.mlnx_agent:main
+
+[neutron.db.alembic_migrations]
+networking-mlnx = networking_mlnx.db.migration:alembic_migrations
+
+[neutron.interface_drivers]
+ipoib = networking_mlnx.linux.interface_drivers.interface:IPoIBInterfaceDriver
+multi = networking_mlnx.linux.interface_drivers.interface:MultiInterfaceDriver
+
+[neutron.ml2.mechanism_drivers]
+mlnx_infiniband = networking_mlnx.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver
+mlnx_sdn_assist = networking_mlnx.plugins.ml2.drivers.sdn.sdn_mech_driver:SDNMechanismDriver
diff -pruN 1:16.0.0-3.1/networking_mlnx.egg-info/not-zip-safe 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/not-zip-safe
--- 1:16.0.0-3.1/networking_mlnx.egg-info/not-zip-safe	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/not-zip-safe	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1 @@
+
diff -pruN 1:16.0.0-3.1/networking_mlnx.egg-info/pbr.json 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/pbr.json
--- 1:16.0.0-3.1/networking_mlnx.egg-info/pbr.json	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/pbr.json	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1 @@
+{"git_version": "0a69b97", "is_release": false}
\ No newline at end of file
diff -pruN 1:16.0.0-3.1/networking_mlnx.egg-info/PKG-INFO 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/PKG-INFO
--- 1:16.0.0-3.1/networking_mlnx.egg-info/PKG-INFO	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/PKG-INFO	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1,30 @@
+Metadata-Version: 2.1
+Name: networking-mlnx
+Version: 16.0.1.dev25
+Summary: Mellanox Networking
+Home-page: http://www.mellanox.com/
+Author: Mellanox
+Author-email: openstack@mellanox.com
+Classifier: Environment :: OpenStack
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+License-File: LICENSE
+
+===============
+networking-mlnx
+===============
+
+Networking MLNX contains the Mellanox vendor code for Openstack Neutron
+
+*   Free software: Apache license
+*   Documentation: https://wiki.openstack.org/wiki/Mellanox-Neutron
+*   Source: https://opendev.org/x/networking-mlnx
+*   Bugs: https://bugs.launchpad.net/networking-mlnx
+
diff -pruN 1:16.0.0-3.1/networking_mlnx.egg-info/requires.txt 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/requires.txt
--- 1:16.0.0-3.1/networking_mlnx.egg-info/requires.txt	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/requires.txt	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1,16 @@
+Babel>=1.3
+SQLAlchemy>=1.2.0
+defusedxml>=0.5.0
+eventlet!=0.18.3,!=0.20.1,>=0.18.2
+netaddr>=0.7.18
+neutron
+neutron-lib>=2.4.0
+oslo.concurrency>=3.26.0
+oslo.config>=5.2.0
+oslo.privsep>=1.32.0
+pbr>4.0.0
+pyroute2>=0.5.7
+python-neutronclient>=6.7.0
+python-openstackclient>=3.3.0
+pyzmq
+six>=1.10.0
diff -pruN 1:16.0.0-3.1/networking_mlnx.egg-info/SOURCES.txt 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/SOURCES.txt
--- 1:16.0.0-3.1/networking_mlnx.egg-info/SOURCES.txt	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/SOURCES.txt	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1,164 @@
+.coveragerc
+.mailmap
+.pylintrc
+.stestr.conf
+.testr.conf
+.zuul.yaml
+AUTHORS
+CONTRIBUTING.rst
+ChangeLog
+HACKING.rst
+LICENSE
+README.rst
+TESTING.rst
+babel.cfg
+requirements.txt
+setup.cfg
+setup.py
+test-requirements.txt
+tox.ini
+devstack/README.rst
+devstack/plugin.sh
+devstack/lib/eswitchd
+devstack/lib/neutron_ml2_mlnx
+doc/source/conf.py
+doc/source/contributing.rst
+doc/source/index.rst
+doc/source/installation.rst
+doc/source/readme.rst
+doc/source/usage.rst
+etc/neutron/plugins/ml2/eswitchd.conf
+etc/neutron/plugins/ml2/ml2_conf_sdn.ini
+etc/neutron/plugins/mlnx/mlnx_conf.ini
+etc/neutron/rootwrap.d/eswitchd.filters
+networking_mlnx/__init__.py
+networking_mlnx/_i18n.py
+networking_mlnx/version.py
+networking_mlnx.egg-info/PKG-INFO
+networking_mlnx.egg-info/SOURCES.txt
+networking_mlnx.egg-info/dependency_links.txt
+networking_mlnx.egg-info/entry_points.txt
+networking_mlnx.egg-info/not-zip-safe
+networking_mlnx.egg-info/pbr.json
+networking_mlnx.egg-info/requires.txt
+networking_mlnx.egg-info/top_level.txt
+networking_mlnx/cmd/__init__.py
+networking_mlnx/cmd/eventlet/__init__.py
+networking_mlnx/cmd/eventlet/agents/__init__.py
+networking_mlnx/cmd/eventlet/agents/mlnx_agent.py
+networking_mlnx/db/__init__.py
+networking_mlnx/db/db.py
+networking_mlnx/db/migration/__init__.py
+networking_mlnx/db/migration/alembic_migrations/__init__.py
+networking_mlnx/db/migration/alembic_migrations/env.py
+networking_mlnx/db/migration/alembic_migrations/script.py.mako
+networking_mlnx/db/migration/alembic_migrations/versions/CONTRACT_HEAD
+networking_mlnx/db/migration/alembic_migrations/versions/EXPAND_HEAD
+networking_mlnx/db/migration/alembic_migrations/versions/start_networking_mlnx.py
+networking_mlnx/db/migration/alembic_migrations/versions/newton/contract/dfd1a1f22c4180_initial.py
+networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/5d5e04ea01d5_sdn_journal_change_data_to_text.py
+networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/65b6db113427b9_initial.py
+networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/9f30890cfbd1_adding_sdn_journal_db.py
+networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/d02c04effb34_adding_sdn_maintenance_db.py
+networking_mlnx/db/migration/models/__init__.py
+networking_mlnx/db/migration/models/head.py
+networking_mlnx/db/models/__init__.py
+networking_mlnx/db/models/sdn_journal_db.py
+networking_mlnx/db/models/sdn_maintenance_db.py
+networking_mlnx/eswitchd/__init__.py
+networking_mlnx/eswitchd/eswitch_daemon.py
+networking_mlnx/eswitchd/eswitch_handler.py
+networking_mlnx/eswitchd/eswitch_manager.py
+networking_mlnx/eswitchd/msg_handler.py
+networking_mlnx/eswitchd/cli/__init__.py
+networking_mlnx/eswitchd/cli/conn_utils.py
+networking_mlnx/eswitchd/cli/ebr_dbg.py
+networking_mlnx/eswitchd/cli/ebrctl.py
+networking_mlnx/eswitchd/cli/exceptions.py
+networking_mlnx/eswitchd/common/__init__.py
+networking_mlnx/eswitchd/common/config.py
+networking_mlnx/eswitchd/common/constants.py
+networking_mlnx/eswitchd/common/exceptions.py
+networking_mlnx/eswitchd/utils/__init__.py
+networking_mlnx/eswitchd/utils/helper_utils.py
+networking_mlnx/eswitchd/utils/ib_utils.py
+networking_mlnx/eswitchd/utils/pci_utils.py
+networking_mlnx/internal/__init__.py
+networking_mlnx/internal/netdev_ops/__init__.py
+networking_mlnx/internal/netdev_ops/api.py
+networking_mlnx/internal/netdev_ops/constants.py
+networking_mlnx/internal/netdev_ops/exceptions.py
+networking_mlnx/internal/netdev_ops/impl_pyroute2.py
+networking_mlnx/internal/netdev_ops/netdev_ops_abs.py
+networking_mlnx/internal/sys_ops/__init__.py
+networking_mlnx/internal/sys_ops/api.py
+networking_mlnx/internal/sys_ops/exceptions.py
+networking_mlnx/internal/sys_ops/impl_os.py
+networking_mlnx/internal/sys_ops/sys_ops_abs.py
+networking_mlnx/journal/__init__.py
+networking_mlnx/journal/cleanup.py
+networking_mlnx/journal/dependency_validations.py
+networking_mlnx/journal/journal.py
+networking_mlnx/journal/maintenance.py
+networking_mlnx/linux/__init__.py
+networking_mlnx/linux/constants.py
+networking_mlnx/linux/ip_lib.py
+networking_mlnx/linux/interface_drivers/__init__.py
+networking_mlnx/linux/interface_drivers/config.py
+networking_mlnx/linux/interface_drivers/constants.py
+networking_mlnx/linux/interface_drivers/interface.py
+networking_mlnx/linux/interface_drivers/network_cache.py
+networking_mlnx/plugins/__init__.py
+networking_mlnx/plugins/ml2/__init__.py
+networking_mlnx/plugins/ml2/drivers/__init__.py
+networking_mlnx/plugins/ml2/drivers/mlnx/README
+networking_mlnx/plugins/ml2/drivers/mlnx/__init__.py
+networking_mlnx/plugins/ml2/drivers/mlnx/config.py
+networking_mlnx/plugins/ml2/drivers/mlnx/mech_mlnx.py
+networking_mlnx/plugins/ml2/drivers/mlnx/agent/__init__.py
+networking_mlnx/plugins/ml2/drivers/mlnx/agent/comm_utils.py
+networking_mlnx/plugins/ml2/drivers/mlnx/agent/config.py
+networking_mlnx/plugins/ml2/drivers/mlnx/agent/exceptions.py
+networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py
+networking_mlnx/plugins/ml2/drivers/mlnx/agent/utils.py
+networking_mlnx/plugins/ml2/drivers/sdn/__init__.py
+networking_mlnx/plugins/ml2/drivers/sdn/client.py
+networking_mlnx/plugins/ml2/drivers/sdn/config.py
+networking_mlnx/plugins/ml2/drivers/sdn/constants.py
+networking_mlnx/plugins/ml2/drivers/sdn/exceptions.py
+networking_mlnx/plugins/ml2/drivers/sdn/sdn_mech_driver.py
+networking_mlnx/plugins/ml2/drivers/sdn/utils.py
+networking_mlnx/privsep/__init__.py
+networking_mlnx/tests/__init__.py
+networking_mlnx/tests/base.py
+networking_mlnx/tests/unit/__init__.py
+networking_mlnx/tests/unit/db/__init__.py
+networking_mlnx/tests/unit/db/test_db.py
+networking_mlnx/tests/unit/eswitchd/__init__.py
+networking_mlnx/tests/unit/eswitchd/test_eswitch_manager.py
+networking_mlnx/tests/unit/eswitchd/utils/__init__.py
+networking_mlnx/tests/unit/eswitchd/utils/test_ib_utils.py
+networking_mlnx/tests/unit/eswitchd/utils/test_pci_utils.py
+networking_mlnx/tests/unit/journal/__init__.py
+networking_mlnx/tests/unit/journal/test_dependency_validations.py
+networking_mlnx/tests/unit/journal/test_maintenance.py
+networking_mlnx/tests/unit/linux/__init__.py
+networking_mlnx/tests/unit/linux/test_ip_lib.py
+networking_mlnx/tests/unit/linux/interface_drivers/__init__.py
+networking_mlnx/tests/unit/linux/interface_drivers/test_interface.py
+networking_mlnx/tests/unit/linux/interface_drivers/test_network_cache.py
+networking_mlnx/tests/unit/ml2/__init__.py
+networking_mlnx/tests/unit/ml2/drivers/__init__.py
+networking_mlnx/tests/unit/ml2/drivers/mlnx/__init__.py
+networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py
+networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mlnx_comm_utils.py
+networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mlnx_neutron_agent.py
+networking_mlnx/tests/unit/ml2/drivers/sdn/__init__.py
+networking_mlnx/tests/unit/ml2/drivers/sdn/test_client.py
+networking_mlnx/tests/unit/ml2/drivers/sdn/test_mechanism_sdn.py
+tools/coding-checks.sh
+tools/install_venv.py
+tools/install_venv_common.py
+tools/misc-sanity-checks.sh
+tools/pip_install_src_modules.sh
+tools/with_venv.sh
\ No newline at end of file
diff -pruN 1:16.0.0-3.1/networking_mlnx.egg-info/top_level.txt 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/top_level.txt
--- 1:16.0.0-3.1/networking_mlnx.egg-info/top_level.txt	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/networking_mlnx.egg-info/top_level.txt	2023-01-10 15:13:36.000000000 +0000
@@ -0,0 +1 @@
+networking_mlnx
diff -pruN 1:16.0.0-3.1/PKG-INFO 1:19.0.0+git2023011010.0a69b971-0ubuntu1/PKG-INFO
--- 1:16.0.0-3.1/PKG-INFO	1970-01-01 00:00:00.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/PKG-INFO	2023-01-10 15:13:36.485261700 +0000
@@ -0,0 +1,30 @@
+Metadata-Version: 2.1
+Name: networking-mlnx
+Version: 16.0.1.dev25
+Summary: Mellanox Networking
+Home-page: http://www.mellanox.com/
+Author: Mellanox
+Author-email: openstack@mellanox.com
+Classifier: Environment :: OpenStack
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+License-File: LICENSE
+
+===============
+networking-mlnx
+===============
+
+Networking MLNX contains the Mellanox vendor code for Openstack Neutron
+
+*   Free software: Apache license
+*   Documentation: https://wiki.openstack.org/wiki/Mellanox-Neutron
+*   Source: https://opendev.org/x/networking-mlnx
+*   Bugs: https://bugs.launchpad.net/networking-mlnx
+
diff -pruN 1:16.0.0-3.1/requirements.txt 1:19.0.0+git2023011010.0a69b971-0ubuntu1/requirements.txt
--- 1:16.0.0-3.1/requirements.txt	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/requirements.txt	2023-01-10 15:13:35.000000000 +0000
@@ -3,20 +3,20 @@
 # process, which may cause wedges in the gate later.
 
 Babel>=1.3
-pbr!=2.1.0,>=2.0.0 # Apache-2.0
+pbr>4.0.0 # Apache-2.0
 defusedxml>=0.5.0 # PSF
 
-eventlet!=0.18.3,>=0.18.2 # MIT
+eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT
 netaddr>=0.7.18 # BSD
 pyroute2>=0.5.7
-python-neutronclient>=5.1.0 # Apache-2.0
-SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT
+python-neutronclient>=6.7.0 # Apache-2.0
+SQLAlchemy>=1.2.0 # MIT
 six>=1.10.0 # MIT
 oslo.config>=5.2.0 # Apache-2.0
 oslo.concurrency>=3.26.0 # Apache-2.0
 oslo.privsep>=1.32.0 # Apache-2.0
 python-openstackclient>=3.3.0 # Apache-2.0
-neutron-lib>=1.28.0 # Apache-2.0
+neutron-lib>=2.4.0 # Apache-2.0
 pyzmq
 
 # Using neutron master is necessary for neutron-db-manage test,
diff -pruN 1:16.0.0-3.1/setup.cfg 1:19.0.0+git2023011010.0a69b971-0ubuntu1/setup.cfg
--- 1:16.0.0-3.1/setup.cfg	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/setup.cfg	2023-01-10 15:13:36.485261700 +0000
@@ -1,29 +1,30 @@
 [metadata]
 name = networking-mlnx
 summary = Mellanox Networking
-description-file =
-    README.rst
+description-file = 
+	README.rst
 author = Mellanox
 author-email = openstack@mellanox.com
 home-page = http://www.mellanox.com/
-classifier =
-    Environment :: OpenStack
-    Intended Audience :: Information Technology
-    Intended Audience :: System Administrators
-    License :: OSI Approved :: Apache Software License
-    Operating System :: POSIX :: Linux
-    Programming Language :: Python
-    Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.6
-    Programming Language :: Python :: 3.7
+classifier = 
+	Environment :: OpenStack
+	Intended Audience :: Information Technology
+	Intended Audience :: System Administrators
+	License :: OSI Approved :: Apache Software License
+	Operating System :: POSIX :: Linux
+	Programming Language :: Python
+	Programming Language :: Python :: 3
+	Programming Language :: Python :: 3.6
+	Programming Language :: Python :: 3.7
+	Programming Language :: Python :: 3.8
 
 [files]
-packages =
-    networking_mlnx
+packages = 
+	networking_mlnx
 
 [global]
-setup-hooks =
-    pbr.hooks.setup_hook
+setup-hooks = 
+	pbr.hooks.setup_hook
 
 [build_sphinx]
 all_files = 1
@@ -48,16 +49,20 @@ input_file = networking-mlnx/locale/netw
 universal = 1
 
 [entry_points]
-console_scripts =
-    neutron-mlnx-agent = networking_mlnx.cmd.eventlet.agents.mlnx_agent:main
-    eswitchd = networking_mlnx.eswitchd.eswitch_daemon:main
-    ebrctl = networking_mlnx.eswitchd.cli.ebrctl:main
-neutron.ml2.mechanism_drivers =
-    mlnx_sdn_assist = networking_mlnx.plugins.ml2.drivers.sdn.sdn_mech_driver:SDNMechanismDriver
-    mlnx_infiniband = networking_mlnx.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver
-neutron.db.alembic_migrations =
-    networking-mlnx = networking_mlnx.db.migration:alembic_migrations
-neutron.interface_drivers =
-    ipoib = networking_mlnx.linux.interface_drivers.interface:IPoIBInterfaceDriver
-    multi = networking_mlnx.linux.interface_drivers.interface:MultiInterfaceDriver
+console_scripts = 
+	neutron-mlnx-agent = networking_mlnx.cmd.eventlet.agents.mlnx_agent:main
+	eswitchd = networking_mlnx.eswitchd.eswitch_daemon:main
+	ebrctl = networking_mlnx.eswitchd.cli.ebrctl:main
+neutron.ml2.mechanism_drivers = 
+	mlnx_sdn_assist = networking_mlnx.plugins.ml2.drivers.sdn.sdn_mech_driver:SDNMechanismDriver
+	mlnx_infiniband = networking_mlnx.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver
+neutron.db.alembic_migrations = 
+	networking-mlnx = networking_mlnx.db.migration:alembic_migrations
+neutron.interface_drivers = 
+	ipoib = networking_mlnx.linux.interface_drivers.interface:IPoIBInterfaceDriver
+	multi = networking_mlnx.linux.interface_drivers.interface:MultiInterfaceDriver
+
+[egg_info]
+tag_build = 
+tag_date = 0
 
diff -pruN 1:16.0.0-3.1/test-requirements.txt 1:19.0.0+git2023011010.0a69b971-0ubuntu1/test-requirements.txt
--- 1:16.0.0-3.1/test-requirements.txt	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/test-requirements.txt	2023-01-10 15:13:35.000000000 +0000
@@ -1,30 +1,17 @@
 # The order of packages is significant, because pip processes them in the order
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
-hacking>=1.1.0 # Apache-2.0
-
-cliff>=1.15.0 # Apache-2.0
-coverage>=3.6
-discover
-fixtures>=1.3.1 # Apache-2.0/BSD
-mock>=1.2
-python-subunit>=0.0.18
-requests-mock>=0.7.0  # Apache-2.0
-sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
-oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
-testrepository>=0.0.18
-testtools>=1.4.0
-testscenarios>=0.4
-WebTest>=2.0
-oslotest>=1.5.1  # Apache-2.0
-testresources>=0.2.4 # Apache-2.0/BSD
-os-testr>=1.0.0 # Apache-2.0
+coverage!=4.4,>=4.0 # Apache-2.0
+fixtures>=3.0.0 # Apache-2.0/BSD
+python-subunit>=1.0.0 # Apache-2.0/BSD
+testtools>=2.2.0 # MIT
+testresources>=2.0.0 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+WebTest>=2.0.27 # MIT
+oslotest>=3.2.0 # Apache-2.0
+stestr>=1.0.0 # Apache-2.0
 ddt>=1.0.1 # MIT
-reno>=0.1.1 # Apache2
 # Needed to run DB commands in virtualenvs
-flake8-import-order==0.12 # LGPLv3
-pylint==1.9.2;python_version<"3.0" # GPLv2
-pylint==2.3.0;python_version>="3.0" # GPLv2
-bashate>=0.5.1 # Apache-2.0
-flake8==2.6.2
-bandit!=1.6.0,>=1.1.0 # Apache-2.0
+PyMySQL>=0.7.6 # MIT License
+mock>=1.2
+
diff -pruN 1:16.0.0-3.1/tox.ini 1:19.0.0+git2023011010.0a69b971-0ubuntu1/tox.ini
--- 1:16.0.0-3.1/tox.ini	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/tox.ini	2023-01-10 15:13:35.000000000 +0000
@@ -1,7 +1,8 @@
 [tox]
-envlist = py37,py36,pep8
-minversion = 2.3.2
+envlist = py38,pep8
+minversion = 3.2.0
 skipsdist = True
+ignore_basepython_conflict = True
 
 [testenv]
 basepython = python3
@@ -18,6 +19,7 @@ deps =
   -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
   -r{toxinidir}/requirements.txt
   -r{toxinidir}/test-requirements.txt
+  hacking>=3.0.1,<3.1.0 # Apache-2.0
 whitelist_externals = sh
 commands =
   {toxinidir}/tools/pip_install_src_modules.sh "{toxinidir}"
@@ -32,6 +34,10 @@ commands = false
 envdir = {toxworkdir}/shared
 deps =
   {[testenv]deps}
+  {[testenv:bashate]deps}
+  {[testenv:bandit]deps}
+  flake8-import-order==0.18.1 # LGPLv3
+  pylint==2.5.3 # GPLv2
 commands=
   # If it is easier to add a check via a shell script, consider adding it in this file
   #{toxinidir}/tools/check_unit_test_structure.sh
@@ -74,11 +80,12 @@ commands = sphinx-build -W -b html doc/s
 # E126 continuation line over-indented for hanging indent
 # E128 continuation line under-indented for visual indent
 # H405 multi line docstring summary not separated with an empty line
+# I202 Additional newline in a group of imports
 # N530 direct neutron imports not allowed
 # TODO(amotoki) check the following new rules should be fixed or ignored
 # E731 do not assign a lambda expression, use a def
 # W504 line break after binary operator
-ignore = E125,E126,E128,E129,E731,H405,N530,W504
+ignore = E125,E126,E128,E129,E731,H405,N530,W504,I202
 # H106: Don't put vim configuration in source files
 # H203: Use assertIs(Not)None to check for None
 # H204: Use assert(Not)Equal to check for equality
@@ -95,6 +102,8 @@ local-check-factory = neutron_lib.hackin
 
 [testenv:bashate]
 envdir = {toxworkdir}/shared
+deps =
+  bashate>=0.5.1 # Apache-2.0
 commands = bash -c "find {toxinidir}             \
          -not \( -type d -name .tox\* -prune \)  \
          -not \( -type d -name .venv\* -prune \) \
@@ -112,7 +121,9 @@ envdir = {toxworkdir}/shared
 # B303: blacklist calls: md5, sha1
 # B311: Standard pseudo-random generators are not suitable for security/cryptographic purpose
 # B604: any_other_function_with_shell_equals_true
-deps = -r{toxinidir}/test-requirements.txt
+deps =
+  bandit!=1.6.0,>=1.1.0 # Apache-2.0
+  -r{toxinidir}/test-requirements.txt
 commands = bandit -r networking_mlnx -x tests -n5 -s B104,B303,B311,B604
 
 [testenv:genconfig]
@@ -135,6 +146,7 @@ deps =
   -c{toxinidir}/lower-constraints.txt
   -r{toxinidir}/test-requirements.txt
   -r{toxinidir}/requirements.txt
+  hacking>=3.0.1,<3.1.0 # Apache-2.0
 
 [testenv:dev]
 # run locally (not in the gate) using editable mode
diff -pruN 1:16.0.0-3.1/.zuul.yaml 1:19.0.0+git2023011010.0a69b971-0ubuntu1/.zuul.yaml
--- 1:16.0.0-3.1/.zuul.yaml	2020-04-12 21:59:32.000000000 +0000
+++ 1:19.0.0+git2023011010.0a69b971-0ubuntu1/.zuul.yaml	2023-01-10 15:13:35.000000000 +0000
@@ -1,3 +1,3 @@
 - project:
     templates:
-        - openstack-python3-ussuri-jobs-neutron
+        - openstack-python3-zed-jobs-neutron
