diff options
author | alk3pInjection <webmaster@raspii.tech> | 2023-07-15 01:27:24 +0800 |
---|---|---|
committer | alk3pInjection <webmaster@raspii.tech> | 2023-07-15 01:27:32 +0800 |
commit | 14ce8d83ab4c649fa95fb4df0b34c4e30d89a950 (patch) | |
tree | ff3a68dd9af93cfe4f7526fd01d075919eb6528f | |
parent | 3edd61c14ac3874eb0fc1af94d467b0320810a24 (diff) |
data-ipa-cfg-mgr: Restore libipanatuminekotachibana-mr1
This reverts commit 4f92615, 856dafa.
Change-Id: I3b4978aafb08429a69af567a02a060c7c43b1382
56 files changed, 15014 insertions, 10 deletions
diff --git a/Makefile.am b/Makefile.am index 2842f0f..fab2aff 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,3 +1,3 @@ ACLOCAL_AMFLAGS = -I m4 AUTOMAKE_OPTIONS = foreign -SUBDIRS = ipacm/src/ +SUBDIRS = ipanat/src ipacm/src/ diff --git a/configure.ac b/configure.ac index 4c83c0a..6662ef6 100644 --- a/configure.ac +++ b/configure.ac @@ -4,8 +4,8 @@ AC_PREREQ([2.65]) AC_INIT(data-ipa, 1.0.0) AM_INIT_AUTOMAKE(data-ipa, 1.0.0) -AC_OUTPUT(Makefile ipacm/src/Makefile) -AC_CONFIG_SRCDIR([ipacm/src/IPACM_Main.cpp]) +AC_OUTPUT(Makefile ipanat/src/Makefile ipacm/src/Makefile) +AC_CONFIG_SRCDIR([ipanat/src/ipa_nat_drv.c]) AC_CONFIG_HEADERS([config.h]) AC_CONFIG_MACRO_DIR([m4]) @@ -26,11 +26,6 @@ AC_ARG_WITH(sanitized-headers, [CPPFLAGS="$CPPFLAGS -idirafter $withval"]) AM_CONDITIONAL(KERNELMODULES, [test -n -eq 0]) -AC_ARG_WITH(ipanat-headers, - AS_HELP_STRING([--with-ipanat-headers=DIR], - [Specify the location of the ipanat headers]), - [CPPFLAGS="$CPPFLAGS -idirafter $withval"]) - AC_ARG_WITH([glib], AC_HELP_STRING([--with-glib], [enable glib, building HLOS systems which use glib])) diff --git a/ipacm/src/Makefile.am b/ipacm/src/Makefile.am index be370f4..92ea904 100644 --- a/ipacm/src/Makefile.am +++ b/ipacm/src/Makefile.am @@ -1,4 +1,5 @@ AM_CPPFLAGS = -I./../inc \ + -I$(top_srcdir)/ipanat/inc \ ${LIBXML_CFLAGS} AM_CPPFLAGS += -Wall -Wundef -Wno-trigraphs AM_CPPFLAGS += -DDEBUG -g -DFEATURE_ETH_BRIDGE_LE -DFEATURE_L2TP @@ -27,8 +28,8 @@ ipacm_SOURCES = IPACM_Main.cpp \ bin_PROGRAMS = ipacm -requiredlibs = ${LIBXML_LIB} -lxml2 -lpthread -lnetfilter_conntrack \ - -lnfnetlink -lipanat +requiredlibs = ${LIBXML_LIB} -lxml2 -lpthread -lnetfilter_conntrack -lnfnetlink\ + ../../ipanat/src/libipanat.la AM_CPPFLAGS += "-std=c++0x" diff --git a/ipanat/Android.bp b/ipanat/Android.bp new file mode 100644 index 0000000..1bf8ff9 --- /dev/null +++ b/ipanat/Android.bp @@ -0,0 +1,33 @@ + + +cc_library_shared { + name: "libipanat", + + header_libs: ["device_kernel_headers"]+["qti_kernel_headers"], + + srcs: [ + "src/ipa_nat_map.cpp", + "src/ipa_table.c", + "src/ipa_nat_statemach.c", + "src/ipa_nat_drvi.c", + "src/ipa_nat_drv.c", + "src/ipa_mem_descriptor.c", + "src/ipa_nat_utils.c", + ], + + shared_libs: + ["libcutils", + "libdl", + "libbase", + "libutils", + ], + export_include_dirs: ["inc"], + vendor: true, + cflags: [ + "-DDEBUG", + "-Wall", + "-Werror", + ] + ["-DFEATURE_IPA_ANDROID"], + + clang: true, +} diff --git a/ipanat/inc/ipa_ipv6ct.h b/ipanat/inc/ipa_ipv6ct.h new file mode 100644 index 0000000..d0c84c8 --- /dev/null +++ b/ipanat/inc/ipa_ipv6ct.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef IPA_IPV6CT_H +#define IPA_IPV6CT_H + +#include <stdint.h> + +/** + * enum ipa_ipv6_ct_direction_settings_type - direction filter settings + * + * IPA_IPV6CT_DIRECTION_DENY_ALL - deny inbound and outbound + * IPA_IPV6CT_DIRECTION_ALLOW_OUT - allow outbound and deny inbound + * IPA_IPV6CT_DIRECTION_ALLOW_IN - allow inbound and deny outbound + * IPA_IPV6CT_DIRECTION_ALLOW_ALL - allow inbound and outbound + */ +typedef enum +{ + IPA_IPV6CT_DIRECTION_DENY_ALL = 0, + IPA_IPV6CT_DIRECTION_ALLOW_OUT = 1, + IPA_IPV6CT_DIRECTION_ALLOW_IN = 2, + IPA_IPV6CT_DIRECTION_ALLOW_ALL = 3 +} ipa_ipv6_ct_direction_settings_type; + +/** + * struct ipa_ipv6ct_rule - To hold IPv6CT rule + * @src_ipv6_lsb: source IPv6 address LSB + * @src_ipv6_msb: source IPv6 address MSB + * @dest_ipv6_lsb: destination IPv6 address LSB + * @dest_ipv6_msb: destination IPv6 address MSB + * @direction_settings: direction filter settings (inbound/outbound) (see ipa_ipv6_ct_direction_settings_type) + * @src_port: source port + * @dest_port: destination port + * @protocol: protocol of rule (tcp/udp) + */ +typedef struct { + uint64_t src_ipv6_lsb; + uint64_t src_ipv6_msb; + uint64_t dest_ipv6_lsb; + uint64_t dest_ipv6_msb; + ipa_ipv6_ct_direction_settings_type direction_settings; + uint16_t src_port; + uint16_t dest_port; + uint8_t protocol; +} ipa_ipv6ct_rule; + +/** + * ipa_ipv6ct_add_tbl() - create IPv6CT table + * @number_of_entries: [in] number of IPv6CT entries + * @table_handle: [out] handle of new IPv6CT table + * + * To create new IPv6CT table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_ipv6ct_add_tbl(uint16_t number_of_entries, uint32_t* table_handle); + +/** + * ipa_ipv6ct_del_tbl() - delete IPv6CT table + * @table_handle: [in] Handle of IPv6CT table + * + * To delete given IPv6CT table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_ipv6ct_del_tbl(uint32_t table_handle); + +/** + * ipa_ipv6ct_add_rule() - to insert new IPv6CT rule + * @table_handle: [in] handle of IPv6CT table + * @user_rule: [in] Pointer to new rule + * @rule_handle: [out] Return the handle to rule + * + * To insert new rule into a IPv6CT table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_ipv6ct_add_rule(uint32_t table_handle, const ipa_ipv6ct_rule* user_rule, uint32_t* rule_handle); + +/** + * ipa_ipv6ct_del_rule() - to delete IPv6CT rule + * @table_handle: [in] handle of IPv6CT table + * @rule_handle: [in] IPv6CT rule handle + * + * To delete a rule from a IPv6CT table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_ipv6ct_del_rule(uint32_t table_handle, uint32_t rule_handle); + +/** + * ipa_ipv6ct_query_timestamp() - to query timestamp + * @table_handle: [in] handle of IPv6CT table + * @rule_handle: [in] IPv6CT rule handle + * @time_stamp: [out] time stamp of rule + * + * To retrieve the timestamp that lastly the IPv6CT rule was accessed + * + * Returns: 0 On Success, negative on failure + */ +int ipa_ipv6ct_query_timestamp(uint32_t table_handle, uint32_t rule_handle, uint32_t* time_stamp); + +/** + * ipa_ipv6ct_dump_table() - dumps IPv6CT table + * @table_handle: [in] handle of IPv6CT table + */ +void ipa_ipv6ct_dump_table(uint32_t tbl_hdl); + +#endif + diff --git a/ipanat/inc/ipa_ipv6cti.h b/ipanat/inc/ipa_ipv6cti.h new file mode 100644 index 0000000..4d4e160 --- /dev/null +++ b/ipanat/inc/ipa_ipv6cti.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef IPA_IPV6CTI_H +#define IPA_IPV6CTI_H + +#include "ipa_table.h" +#include "ipa_mem_descriptor.h" +#include "ipa_nat_utils.h" + +#define IPA_IPV6CT_MAX_TBLS 1 + +#define IPA_IPV6CT_RULE_FLAG_FIELD_OFFSET 34 +#define IPA_IPV6CT_RULE_NEXT_FIELD_OFFSET 40 +#define IPA_IPV6CT_RULE_PROTO_FIELD_OFFSET 38 + +#define IPA_IPV6CT_FLAG_ENABLE_BIT 1 + +#define IPA_IPV6CT_DIRECTION_ALLOW_BIT 1 +#define IPA_IPV6CT_DIRECTION_DISALLOW_BIT 0 + +#define IPA_IPV6CT_INVALID_PROTO_FIELD_VALUE 0xFF00 +#define IPA_IPV6CT_INVALID_PROTO_FIELD_CMP 0xFF + +typedef enum +{ + IPA_IPV6CT_TABLE_FLAGS, + IPA_IPV6CT_TABLE_NEXT_INDEX, + IPA_IPV6CT_TABLE_PROTOCOL, + IPA_IPV6CT_TABLE_DMA_CMD_MAX +} ipa_ipv6ct_table_dma_cmd_type; + +/*------------------------ IPV6CT Table Entry --------------------------------------------------- + + ------------------------------------------------------------------------------------------------- + | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + --------------------------------------------------------------------------------------------------- + | Outbound Src IPv6 Address (8 LSB Bytes) | + --------------------------------------------------------------------------------------------------- + | Outbound Src IPv6 Address (8 MSB Bytes) | + --------------------------------------------------------------------------------------------------- + | Outbound Dest IPv6 Address (8 LSB Bytes) | + --------------------------------------------------------------------------------------------------- + | Outbound Dest IPv6 Address (8 MSB Bytes) | + --------------------------------------------------------------------------------------------------- + | Protocol | TimeStamp (3B) | Flags (2B) | Reserved (2B) | + | (1B) | |Enable|Redirect|Resv | | + --------------------------------------------------------------------------------------------------- + |Reserved |Direction(1B)| Src Port (2B) | Dest Port (2B) | Next Index (2B) | + | (1B) |IN|OUT|Resv | | | | + --------------------------------------------------------------------------------------------------- + | SW Specific Parameters(4B) | Reserved (4B) | + | Prev Index (2B) | Reserved (2B) | | + --------------------------------------------------------------------------------------------------- + | Reserved (8B) | + --------------------------------------------------------------------------------------------------- + + Dont change below structure definition. + It should be same as above(little endian order) + -------------------------------------------------------------------------------------------------*/ +typedef struct +{ + uint64_t src_ipv6_lsb : 64; + uint64_t src_ipv6_msb : 64; + uint64_t dest_ipv6_lsb : 64; + uint64_t dest_ipv6_msb : 64; + + uint64_t rsvd1 : 30; + uint64_t redirect : 1; + uint64_t enable : 1; + uint64_t time_stamp : 24; + uint64_t protocol : 8; + + uint64_t next_index : 16; + uint64_t dest_port : 16; + uint64_t src_port : 16; + uint64_t rsvd2 : 6; + uint64_t out_allowed : 1; + uint64_t in_allowed : 1; + uint64_t rsvd3 : 8; + + uint64_t rsvd4 : 48; + uint64_t prev_index : 16; + + uint64_t rsvd5 : 64; +} ipa_ipv6ct_hw_entry; + +/* + ---------------------- + | 1 | 0 | + ---------------------- + | Flags(2B) | + |Enable|Redirect|Resv| + ---------------------- +*/ +typedef struct +{ + uint32_t rsvd1 : 14; + uint32_t redirect : 1; + uint32_t enable : 1; +} ipa_ipv6ct_flags; + +typedef struct +{ + ipa_mem_descriptor mem_desc; + ipa_table table; + ipa_table_dma_cmd_helper table_dma_cmd_helpers[IPA_IPV6CT_TABLE_DMA_CMD_MAX]; +} ipa_ipv6ct_table; + +typedef struct +{ + ipa_descriptor* ipa_desc; + ipa_ipv6ct_table tables[IPA_IPV6CT_MAX_TBLS]; + uint8_t table_cnt; +} ipa_ipv6ct; + +#endif diff --git a/ipanat/inc/ipa_mem_descriptor.h b/ipanat/inc/ipa_mem_descriptor.h new file mode 100644 index 0000000..e33100c --- /dev/null +++ b/ipanat/inc/ipa_mem_descriptor.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef IPA_MEM_DESCRIPTOR_H +#define IPA_MEM_DESCRIPTOR_H + +#include <stdint.h> +#include <stdbool.h> +#include <linux/msm_ipa.h> + +typedef struct +{ + int orig_rqst_size; + int mmap_size; + void* base_addr; + void* mmap_addr; + uint32_t addr_offset; + unsigned long allocate_ioctl_num; + unsigned long delete_ioctl_num; + char name[IPA_RESOURCE_NAME_MAX]; + uint8_t table_index; + uint8_t valid; + bool consider_using_sram; + bool sram_available; + bool sram_to_be_used; + struct ipa_nat_in_sram_info nat_sram_info; +} ipa_mem_descriptor; + +void ipa_mem_descriptor_init( + ipa_mem_descriptor* desc, + const char* device_name, + int size, + uint8_t table_index, + unsigned long allocate_ioctl_num, + unsigned long delete_ioctl_num, + bool consider_using_sram ); + +int ipa_mem_descriptor_allocate_memory( + ipa_mem_descriptor* desc, + int ipa_fd); + +int ipa_mem_descriptor_delete( + ipa_mem_descriptor* desc, + int ipa_fd); + +#endif + diff --git a/ipanat/inc/ipa_nat_drv.h b/ipanat/inc/ipa_nat_drv.h new file mode 100644 index 0000000..739230a --- /dev/null +++ b/ipanat/inc/ipa_nat_drv.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef IPA_NAT_DRV_H +#define IPA_NAT_DRV_H + +#include "ipa_nat_utils.h" + +#include <stdint.h> /* uint32_t */ +#include <stdbool.h> + +/** + * ipa_nat_is_sram_supported() - Reports if sram is available for use + */ +bool ipa_nat_is_sram_supported(void); + +/** + * struct ipa_nat_ipv4_rule - To hold ipv4 nat rule + * @target_ip: destination ip address + * @private_ip: private ip address + * @target_port: destination port + * @private_port: private port + * @protocol: protocol of rule (tcp/udp) + * @pdn_index: PDN index in the PDN config table + * @redirect: used internally by various API calls + * @enable: used internally by various API calls + * @time_stamp: used internally by various API calls + */ +typedef struct { + uint32_t target_ip; + uint32_t private_ip; + uint16_t target_port; + uint16_t private_port; + uint16_t public_port; + uint8_t protocol; + uint8_t pdn_index; + uint8_t redirect; + uint8_t enable; + uint32_t time_stamp; +} ipa_nat_ipv4_rule; + +static inline char* prep_nat_ipv4_rule_4print( + const ipa_nat_ipv4_rule* rule_ptr, + char* buf_ptr, + uint32_t buf_sz ) +{ + if ( rule_ptr && buf_ptr && buf_sz ) + { + snprintf( + buf_ptr, buf_sz, + "IPV4 RULE: " + "protocol(0x%02X) " + "public_port(0x%04X) " + "target_ip(0x%08X) " + "target_port(0x%04X) " + "private_ip(0x%08X) " + "private_port(0x%04X) " + "pdn_index(0x%02X)", + rule_ptr->protocol, + rule_ptr->public_port, + rule_ptr->target_ip, + rule_ptr->target_port, + rule_ptr->private_ip, + rule_ptr->private_port, + rule_ptr->pdn_index); + } + + return buf_ptr; +} + +/** + * struct ipa_nat_pdn_entry - holds a PDN entry data + * @public_ip: PDN's public ip address + * @src_metadata: metadata to be used for source NAT metadata replacement + * @dst_metadata: metadata to be used for destination NAT metadata replacement + */ +typedef struct { + uint32_t public_ip; + uint32_t src_metadata; + uint32_t dst_metadata; +} ipa_nat_pdn_entry; + +/** + * ipa_nat_add_ipv4_tbl() - create ipv4 nat table + * @public_ip_addr: [in] public ipv4 address + * @mem_type_ptr: [in] type of memory table is to reside in + * @number_of_entries: [in] number of nat entries + * @table_handle: [out] Handle of new ipv4 nat table + * + * To create new ipv4 nat table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_add_ipv4_tbl( + uint32_t public_ip_addr, + const char *mem_type_ptr, + uint16_t number_of_entries, + uint32_t *table_handle); + +/** + * ipa_nat_del_ipv4_tbl() - delete ipv4 table + * @table_handle: [in] Handle of ipv4 nat table + * + * To delete given ipv4 nat table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_del_ipv4_tbl(uint32_t table_handle); + +/** + * ipa_nat_add_ipv4_rule() - to insert new ipv4 rule + * @table_handle: [in] handle of ipv4 nat table + * @rule: [in] Pointer to new rule + * @rule_handle: [out] Return the handle to rule + * + * To insert new ipv4 nat rule into ipv4 nat table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_add_ipv4_rule(uint32_t table_handle, + const ipa_nat_ipv4_rule * rule, + uint32_t *rule_handle); + +/** + * ipa_nat_del_ipv4_rule() - to delete ipv4 nat rule + * @table_handle: [in] handle of ipv4 nat table + * @rule_handle: [in] ipv4 nat rule handle + * + * To insert new ipv4 nat rule into ipv4 nat table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_del_ipv4_rule(uint32_t table_handle, + uint32_t rule_handle); + + +/** + * ipa_nat_query_timestamp() - to query timestamp + * @table_handle: [in] handle of ipv4 nat table + * @rule_handle: [in] ipv4 nat rule handle + * @time_stamp: [out] time stamp of rule + * + * To retrieve the timestamp that lastly the + * nat rule was accessed + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_query_timestamp(uint32_t table_handle, + uint32_t rule_handle, + uint32_t *time_stamp); + + +/** + * ipa_nat_modify_pdn() - modify single PDN entry in the PDN config table + * @table_handle: [in] handle of ipv4 nat table + * @pdn_index : [in] the index of the entry to be modified + * @pdn_info : [in] values for the PDN entry to be changed + * + * Modify a PDN entry + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_modify_pdn(uint32_t tbl_hdl, + uint8_t pdn_index, + ipa_nat_pdn_entry *pdn_info); + +/** +* ipa_nat_get_pdn_index() - get a PDN index for a public ip +* @public_ip : [in] IPv4 address of the PDN entry +* @pdn_index : [out] the index of the requested PDN entry +* +* Get a PDN entry +* +* Returns: 0 On Success, negative on failure +*/ +int ipa_nat_get_pdn_index(uint32_t public_ip, uint8_t *pdn_index); + +/** +* ipa_nat_alloc_pdn() - allocate a PDN for new WAN +* @pdn_info : [in] values for the PDN entry to be created +* @pdn_index : [out] the index of the requested PDN entry +* +* allocate a new PDN entry +* +* Returns: 0 On Success, negative on failure +*/ +int ipa_nat_alloc_pdn(ipa_nat_pdn_entry *pdn_info, + uint8_t *pdn_index); + +/** +* ipa_nat_get_pdn_count() - get the number of allocated PDNs +* @pdn_cnt : [out] the number of allocated PDNs +* +* get the number of allocated PDNs +* +* Returns: 0 On Success, negative on failure +*/ +int ipa_nat_get_pdn_count(uint8_t *pdn_cnt); + +/** +* ipa_nat_dealloc_pdn() - deallocate a PDN entry +* @pdn_index : [in] pdn index to be deallocated +* +* deallocate a PDN in specified index - zero the PDN entry +* +* Returns: 0 On Success, negative on failure +*/ +int ipa_nat_dealloc_pdn(uint8_t pdn_index); + + +/** + * ipa_nat_dump_ipv4_table() - dumps IPv4 NAT table + * @table_handle: [in] handle of IPv4 NAT table + */ +void ipa_nat_dump_ipv4_table(uint32_t tbl_hdl); + +/** + * ipa_nat_vote_clock() - used for voting clock + * @vote_type: [in] desired vote type + */ +int ipa_nat_vote_clock( + enum ipa_app_clock_vote_type vote_type ); + +/** + * ipa_nat_switch_to() - While in HYBRID mode only, used for switching + * from SRAM to DDR or the reverse. + * @nmi: memory type to switch to + * @hold_state: Will the new memory type get locked in (ie. no more + * oscilation between the memory types) + */ +int ipa_nat_switch_to( + enum ipa3_nat_mem_in nmi, + bool hold_state ); + +#endif + diff --git a/ipanat/inc/ipa_nat_drvi.h b/ipanat/inc/ipa_nat_drvi.h new file mode 100644 index 0000000..ae6d363 --- /dev/null +++ b/ipanat/inc/ipa_nat_drvi.h @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef IPA_NAT_DRVI_H +#define IPA_NAT_DRVI_H + +#include "ipa_table.h" +#include "ipa_mem_descriptor.h" +#include "ipa_nat_utils.h" + +#undef MAKE_TBL_HDL +#define MAKE_TBL_HDL(hdl, mt) \ + ((mt) << 31 | (hdl)) + +#undef BREAK_TBL_HDL +#define BREAK_TBL_HDL(hdl_in, mt, hdl_out) \ + do { \ + mt = (hdl_in) >> 31 & 0x0000000001; \ + hdl_out = (hdl_in) & 0x00000000FF; \ + } while ( 0 ) + +#undef VALID_TBL_HDL +#define VALID_TBL_HDL(h) \ + (((h) & 0x00000000FF) == IPA_NAT_MAX_IP4_TBLS) + +/*======= IMPLEMENTATION related data structures and functions ======= */ + +#define IPA_NAT_MAX_IP4_TBLS 1 + +#define IPA_NAT_RULE_FLAG_FIELD_OFFSET 18 +#define IPA_NAT_RULE_NEXT_FIELD_OFFSET 8 +#define IPA_NAT_RULE_PROTO_FIELD_OFFSET 22 + +#define IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET 2 +#define IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET 0 + +#define IPA_NAT_FLAG_ENABLE_BIT 1 + +#define IPA_NAT_INVALID_PROTO_FIELD_VALUE 0xFF00 +/* + * IPA_NAT_INVALID_PROTO_FIELD_VALUE above is what's passed to the IPA + * in a DMA command. It is written into the NAT's rule, by the + * IPA. After being written, It minifests in the rule in the form + * below, hence it will be used when perusing the "struct + * ipa_nat_rule". + */ +#define IPA_NAT_INVALID_PROTO_FIELD_VALUE_IN_RULE 0xFF + +typedef enum { + IPA_NAT_TABLE_FLAGS, + IPA_NAT_TABLE_NEXT_INDEX, + IPA_NAT_TABLE_PROTOCOL, + IPA_NAT_INDEX_TABLE_ENTRY, + IPA_NAT_INDEX_TABLE_NEXT_INDEX, + IPA_NAT_TABLE_DMA_CMD_MAX +} ipa_nat_table_dma_cmd_type; + +/* + * ------------------------ NAT Table Entry ----------------------------------------- + * + * ------------------------------------------------------------------------------------ + * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * ------------------------------------------------------------------------------------ + * | Target IP(4B) | Private IP(4B) | + * ------------------------------------------------------------------------------------ + * |Target Port(2B) | Private Port(2B) | Public Port(2B) | Next Index(2B) | + * ------------------------------------------------------------------------------------ + * | Proto | TimeStamp(3B) | Flags(2B) | IP check sum Diff(2B)| + * | (1B) | |EN|Redirect|Resv | | + * ------------------------------------------------------------------------------------ + * | TCP/UDP checksum |PDN info|Reserved | SW Specific Parameters(4B) | + * | diff (2B) | (1B) | (1B) | | + * ------------------------------------------------------------------------------------ + * + * Dont change below structure definition. + * + * It should be same as above(little endian order) + * + * ------------------------------------------------------------------------------- + */ +struct ipa_nat_rule { + uint64_t private_ip:32; + uint64_t target_ip:32; + + uint64_t next_index:16; + uint64_t public_port:16; + uint64_t private_port:16; + uint64_t target_port:16; + + uint64_t ip_chksum:16; + uint64_t rsvd1:14; + uint64_t redirect:1; + uint64_t enable:1; + uint64_t time_stamp:24; + uint64_t protocol:8; + + /*-------------------------------------------------- + 32 bit sw_spec_params is interpreted as follows + ------------------------------------ + | 16 bits | 16 bits | + ------------------------------------ + | index table | prev index | + | entry | | + ------------------------------------ + --------------------------------------------------*/ + uint64_t prev_index:16; + uint64_t indx_tbl_entry:16; + uint64_t rsvd2:8; + /*----------------------------------------- + 8 bit PDN info is interpreted as following + ------------------------------------ + | 4 bits | 4 bits | + ------------------------------------ + | PDN index | reserved | + | | | + ------------------------------------ + -------------------------------------------*/ + uint64_t rsvd3:4; + uint64_t pdn_index:4; + uint64_t tcp_udp_chksum:16; +}; + +static inline char* prep_nat_rule_4print( + struct ipa_nat_rule* rule_ptr, + char* buf_ptr, + uint32_t buf_sz ) +{ + if ( rule_ptr && buf_ptr && buf_sz ) + { + snprintf( + buf_ptr, buf_sz, + "NAT RULE: " + "protocol(0x%02X) " + "public_port(0x%04X) " + "target_ip(0x%08X) " + "target_port(0x%04X) " + "private_ip(0x%08X) " + "private_port(0x%04X) " + "pdn_index(0x%02X) " + "ip_chksum(0x%04X) " + "tcp_udp_chksum(0x%04X) " + "redirect(0x%02X) " + "enable(0x%02X) " + "time_stamp(0x%08X) " + "indx_tbl_entry(0x%04X) " + "prev_index(0x%04X) " + "next_index(0x%04X)", + rule_ptr->protocol, + rule_ptr->public_port, + rule_ptr->target_ip, + rule_ptr->target_port, + rule_ptr->private_ip, + rule_ptr->private_port, + rule_ptr->pdn_index, + rule_ptr->ip_chksum, + rule_ptr->tcp_udp_chksum, + rule_ptr->redirect, + rule_ptr->enable, + rule_ptr->time_stamp, + rule_ptr->indx_tbl_entry, + rule_ptr->prev_index, + rule_ptr->next_index); + } + + return buf_ptr; +} + +static inline const char *ipa3_nat_mem_in_as_str( + enum ipa3_nat_mem_in nmi) +{ + switch (nmi) { + case IPA_NAT_MEM_IN_DDR: + return "IPA_NAT_MEM_IN_DDR"; + case IPA_NAT_MEM_IN_SRAM: + return "IPA_NAT_MEM_IN_SRAM"; + default: + break; + } + return "???"; +} + +static inline char *ipa_ioc_v4_nat_init_as_str( + struct ipa_ioc_v4_nat_init *ptr, + char *buf, + uint32_t buf_sz) +{ + if (ptr && buf && buf_sz) { + snprintf( + buf, buf_sz, + "V4 NAT INIT: tbl_index(0x%02X) ipv4_rules_offset(0x%08X) expn_rules_offset(0x%08X) index_offset(0x%08X) index_expn_offset(0x%08X) table_entries(0x%04X) expn_table_entries(0x%04X) ip_addr(0x%08X)", + ptr->tbl_index, + ptr->ipv4_rules_offset, + ptr->expn_rules_offset, + ptr->index_offset, + ptr->index_expn_offset, + ptr->table_entries, + ptr->expn_table_entries, + ptr->ip_addr); + } + return buf; +} + +/* + --------------------------------------- + | 1 | 0 | + --------------------------------------- + | Flags(2B) | + |Enable|Redirect|Resv | + --------------------------------------- +*/ +typedef struct { + uint32_t rsvd1:14; + uint32_t redirect:1; + uint32_t enable:1; +} ipa_nat_flags; + +struct ipa_nat_indx_tbl_rule { + uint16_t tbl_entry; + uint16_t next_index; +}; + +struct ipa_nat_indx_tbl_meta_info { + uint16_t prev_index; +}; + +struct ipa_nat_ip4_table_cache { + uint32_t public_addr; + ipa_mem_descriptor mem_desc; + ipa_table table; + ipa_table index_table; + struct ipa_nat_indx_tbl_meta_info *index_expn_table_meta; + ipa_table_dma_cmd_helper table_dma_cmd_helpers[IPA_NAT_TABLE_DMA_CMD_MAX]; +}; + +struct ipa_nat_cache { + ipa_descriptor* ipa_desc; + struct ipa_nat_ip4_table_cache ip4_tbl[IPA_NAT_MAX_IP4_TBLS]; + uint8_t table_cnt; + enum ipa3_nat_mem_in nmi; +}; + +int ipa_nati_add_ipv4_tbl( + uint32_t public_ip_addr, + const char *mem_type_ptr, + uint16_t number_of_entries, + uint32_t *table_hanle); + +int ipa_nati_del_ipv4_table(uint32_t tbl_hdl); + +int ipa_nati_query_timestamp(uint32_t tbl_hdl, + uint32_t rule_hdl, + uint32_t *time_stamp); + +int ipa_nati_modify_pdn(struct ipa_ioc_nat_pdn_entry *entry); + +int ipa_nati_get_pdn_index(uint32_t public_ip, uint8_t *pdn_index); + +int ipa_nati_alloc_pdn(ipa_nat_pdn_entry *pdn_info, uint8_t *pdn_index); + +int ipa_nati_get_pdn_cnt(void); + +int ipa_nati_dealloc_pdn(uint8_t pdn_index); + +int ipa_nati_add_ipv4_rule(uint32_t tbl_hdl, + const ipa_nat_ipv4_rule *clnt_rule, + uint32_t *rule_hdl); + +int ipa_nati_del_ipv4_rule(uint32_t tbl_hdl, + uint32_t rule_hdl); + +int ipa_nati_get_sram_size( + uint32_t* size_ptr); + +int ipa_nati_clear_ipv4_tbl( + uint32_t tbl_hdl ); + +int ipa_nati_copy_ipv4_tbl( + uint32_t src_tbl_hdl, + uint32_t dst_tbl_hdl, + ipa_table_walk_cb copy_cb ); + +typedef enum +{ + USE_NAT_TABLE = 0, + USE_INDEX_TABLE = 1, + + USE_MAX +} WhichTbl2Use; + +#define VALID_WHICHTBL2USE(w) \ + ( (w) >= USE_NAT_TABLE && (w) < USE_MAX ) + +int ipa_nati_walk_ipv4_tbl( + uint32_t tbl_hdl, + WhichTbl2Use which, + ipa_table_walk_cb walk_cb, + void* arb_data_ptr ); + +/* + * The following used for retrieving table stats. + */ +typedef struct +{ + enum ipa3_nat_mem_in nmi; + uint32_t tot_ents; + uint32_t tot_base_ents; + uint32_t tot_base_ents_filled; + uint32_t tot_expn_ents; + uint32_t tot_expn_ents_filled; + uint32_t tot_chains; + uint32_t min_chain_len; + uint32_t max_chain_len; + float avg_chain_len; +} ipa_nati_tbl_stats; + +int ipa_nati_ipv4_tbl_stats( + uint32_t tbl_hdl, + ipa_nati_tbl_stats* nat_stats_ptr, + ipa_nati_tbl_stats* idx_stats_ptr ); + +int ipa_nati_vote_clock( + enum ipa_app_clock_vote_type vote_type ); + +int ipa_NATI_add_ipv4_tbl( + enum ipa3_nat_mem_in nmi, + uint32_t public_ip_addr, + uint16_t number_of_entries, + uint32_t* tbl_hdl); + +int ipa_NATI_del_ipv4_table( + uint32_t tbl_hdl); + +int ipa_NATI_clear_ipv4_tbl( + uint32_t tbl_hdl ); + +int ipa_NATI_walk_ipv4_tbl( + uint32_t tbl_hdl, + WhichTbl2Use which, + ipa_table_walk_cb walk_cb, + void* arb_data_ptr ); + +int ipa_NATI_ipv4_tbl_stats( + uint32_t tbl_hdl, + ipa_nati_tbl_stats* nat_stats_ptr, + ipa_nati_tbl_stats* idx_stats_ptr ); + +int ipa_NATI_query_timestamp( + uint32_t tbl_hdl, + uint32_t rule_hdl, + uint32_t* time_stamp); + +int ipa_NATI_add_ipv4_rule( + uint32_t tbl_hdl, + const ipa_nat_ipv4_rule* clnt_rule, + uint32_t* rule_hdl); + +int ipa_NATI_del_ipv4_rule( + uint32_t tbl_hdl, + uint32_t rule_hdl); + +int ipa_NATI_post_ipv4_init_cmd( + uint32_t tbl_hdl ); + +#endif /* #ifndef IPA_NAT_DRVI_H */ diff --git a/ipanat/inc/ipa_nat_map.h b/ipanat/inc/ipa_nat_map.h new file mode 100644 index 0000000..d81061b --- /dev/null +++ b/ipanat/inc/ipa_nat_map.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#if !defined(_IPA_NATI_MAP_H_) +# define _IPA_NATI_MAP_H_ + +#include <stdint.h> + +# ifdef __cplusplus +extern "C" +{ +# endif /* __cplusplus */ + +/* Used below */ +#define MAKE_AS_STR_CASE(v) case v: return #v + +/* + * The following is used to describe which map to use. + * + * PLEASE KEEP THE FOLLOWING IN SYNC WITH ipa_which_map_as_str() + * BELOW. + */ +typedef enum +{ + MAP_NUM_00 = 0, + MAP_NUM_01 = 1, + MAP_NUM_02 = 2, + MAP_NUM_03 = 3, + + MAP_NUM_99 = 4, + + MAP_NUM_MAX +} ipa_which_map; + +#define VALID_IPA_USE_MAP(w) \ + ( (w) >= MAP_NUM_00 || (w) < MAP_NUM_MAX ) + +/* KEEP THE FOLLOWING IN SYNC WITH ABOVE. */ +static inline const char* ipa_which_map_as_str( + ipa_which_map w ) +{ + switch ( w ) + { + MAKE_AS_STR_CASE(MAP_NUM_00); + MAKE_AS_STR_CASE(MAP_NUM_01); + MAKE_AS_STR_CASE(MAP_NUM_02); + MAKE_AS_STR_CASE(MAP_NUM_03); + + MAKE_AS_STR_CASE(MAP_NUM_99); + default: + break; + } + + return "???"; +} + +int ipa_nat_map_add( + ipa_which_map which, + uint32_t key, + uint32_t val ); + +int ipa_nat_map_find( + ipa_which_map which, + uint32_t key, + uint32_t* val_ptr ); + +int ipa_nat_map_del( + ipa_which_map which, + uint32_t key, + uint32_t* val_ptr ); + +int ipa_nat_map_clear( + ipa_which_map which ); + +int ipa_nat_map_dump( + ipa_which_map which ); + +# ifdef __cplusplus +} +# endif /* __cplusplus */ + +#endif /* #if !defined(_IPA_NATI_MAP_H_) */ diff --git a/ipanat/inc/ipa_nat_statemach.h b/ipanat/inc/ipa_nat_statemach.h new file mode 100644 index 0000000..94b86ed --- /dev/null +++ b/ipanat/inc/ipa_nat_statemach.h @@ -0,0 +1,349 @@ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#if !defined(_IPA_NAT_STATEMACH_H_) +# define _IPA_NAT_STATEMACH_H_ + +#define MAKE_AS_STR_CASE(v) case v: return #v + +/******************************************************************************/ +/** + * The following enum represents the states that a nati object can be + * in. + */ +typedef enum { + NATI_STATE_NULL = 0, + NATI_STATE_DDR_ONLY = 1, /* NAT in DDR only (traditional) */ + NATI_STATE_SRAM_ONLY = 2, /* NAT in SRAM only (new) */ + NATI_STATE_HYBRID = 3, /* NAT simultaneously in both SRAM/DDR */ + NATI_STATE_HYBRID_DDR = 4, /* NAT transitioned from SRAM to DDR */ + + NATI_STATE_LAST +} ipa_nati_state; + +/* KEEP THE FOLLOWING IN SYNC WITH ABOVE. */ +static inline const char* ipa_nati_state_as_str( + ipa_nati_state s ) +{ + switch ( s ) + { + MAKE_AS_STR_CASE(NATI_STATE_NULL); + MAKE_AS_STR_CASE(NATI_STATE_DDR_ONLY); + MAKE_AS_STR_CASE(NATI_STATE_SRAM_ONLY); + MAKE_AS_STR_CASE(NATI_STATE_HYBRID); + MAKE_AS_STR_CASE(NATI_STATE_HYBRID_DDR); + MAKE_AS_STR_CASE(NATI_STATE_LAST); + + default: + break; + } + + return "???"; +} + +# undef strcasesame +# define strcasesame(a, b) (!strcasecmp(a, b)) + +static inline ipa_nati_state mem_type_str_to_ipa_nati_state( + const char* str ) +{ + if ( str ) { + if (strcasesame(str, "HYBRID" )) + return NATI_STATE_HYBRID; + if (strcasesame(str, "SRAM" )) + return NATI_STATE_SRAM_ONLY; + } + return NATI_STATE_DDR_ONLY; +} + +/******************************************************************************/ +/** + * The following enum represents the API triggers that may or may not + * cause a nati object to transition through its various allowable + * states defined in ipa_nati_state above. + */ +typedef enum { + NATI_TRIG_NULL = 0, + NATI_TRIG_ADD_TABLE = 1, + NATI_TRIG_DEL_TABLE = 2, + NATI_TRIG_CLR_TABLE = 3, + NATI_TRIG_WLK_TABLE = 4, + NATI_TRIG_TBL_STATS = 5, + NATI_TRIG_ADD_RULE = 6, + NATI_TRIG_DEL_RULE = 7, + NATI_TRIG_TBL_SWITCH = 8, + NATI_TRIG_GOTO_DDR = 9, + NATI_TRIG_GOTO_SRAM = 10, + NATI_TRIG_GET_TSTAMP = 11, + + NATI_TRIG_LAST +} ipa_nati_trigger; + +/******************************************************************************/ +/** + * The following structure used to keep switch stats. + */ +typedef struct +{ + uint32_t pass; + uint32_t fail; +} nati_switch_stats; + +/******************************************************************************/ +/** + * The following structure used to direct map usage. + * + * Maps are needed to map rule handles..orig to new and new to orig. + * See comments in ipa_nat_statemach.c on this topic... + */ +typedef struct +{ + uint32_t orig2new_map; + uint32_t new2orig_map; +} nati_map_pair; + +/******************************************************************************/ +/** + * The following is a nati object that will maintain state relative to + * various API calls. + */ +typedef struct +{ + ipa_nati_state prev_state; + ipa_nati_state curr_state; + bool hold_state; + ipa_nati_state state_to_hold; + uint32_t ddr_tbl_hdl; + uint32_t sram_tbl_hdl; + uint32_t tot_slots_in_sram; + uint32_t back_to_sram_thresh; + /* + * tot_rules_in_table[0] for ddr, and + * tot_rules_in_table[1] for sram + */ + uint32_t tot_rules_in_table[2]; + /* + * map_pairs[0] for ddr, and + * map_pairs[1] for sram + */ + nati_map_pair map_pairs[2]; + /* + * sw_stats[0] for ddr, and + * sw_stats[1] for sram + */ + nati_switch_stats sw_stats[2]; +} ipa_nati_obj; + +/* + * For use with the arrays above..in ipa_nati_obj... + */ +#undef DDR_SUB +#undef SRAM_SUB + +#define DDR_SUB 0 +#define SRAM_SUB 1 + +#undef BACK2_UNSTARTED_STATE +#define BACK2_UNSTARTED_STATE() \ + nati_obj.prev_state = nati_obj.curr_state = NATI_STATE_NULL; + +#undef IN_UNSTARTED_STATE +#define IN_UNSTARTED_STATE() \ + ( nati_obj.prev_state == NATI_STATE_NULL ) + +#undef IN_HYBRID_STATE +#define IN_HYBRID_STATE() \ + ( nati_obj.curr_state == NATI_STATE_HYBRID || \ + nati_obj.curr_state == NATI_STATE_HYBRID_DDR ) + +#undef COMPATIBLE_NMI_4SWITCH +#define COMPATIBLE_NMI_4SWITCH(n) \ + ( (n) == IPA_NAT_MEM_IN_SRAM && nati_obj.curr_state == NATI_STATE_HYBRID_DDR ) || \ + ( (n) == IPA_NAT_MEM_IN_DDR && nati_obj.curr_state == NATI_STATE_HYBRID ) || \ + ( (n) == IPA_NAT_MEM_IN_DDR && nati_obj.curr_state == NATI_STATE_DDR_ONLY ) || \ + ( (n) == IPA_NAT_MEM_IN_SRAM && nati_obj.curr_state == NATI_STATE_SRAM_ONLY ) + +#undef GEN_HOLD_STATE +#define GEN_HOLD_STATE() \ + ( ! IN_HYBRID_STATE() ) ? nati_obj.curr_state : \ + (nati_obj.curr_state == NATI_STATE_HYBRID) ? NATI_STATE_SRAM_ONLY : \ + NATI_STATE_DDR_ONLY + +#undef SRAM_CURRENTLY_ACTIVE +#define SRAM_CURRENTLY_ACTIVE() \ + ( nati_obj.curr_state == NATI_STATE_SRAM_ONLY || \ + nati_obj.curr_state == NATI_STATE_HYBRID ) + +#define SRAM_TO_BE_ACCESSED(t) \ + ( SRAM_CURRENTLY_ACTIVE() || \ + (t) == NATI_TRIG_GOTO_SRAM || \ + (t) == NATI_TRIG_TBL_SWITCH ) + +/* + * NOTE: The exclusion of timestamp retrieval and table creation + * below. + * + * Why? + * + * In re timestamp: + * + * Because timestamp retrieval institutes too many repetitive + * accesses, hence would lead to too many successive votes. Instead, + * it will be handled differently and in the app layer above. + * + * In re table creation: + * + * Because it can't be known, apriori, whether or not sram is + * really available for use. Instead, we'll move table creation + * voting to a place where we know sram is available. + */ +#undef VOTE_REQUIRED +#define VOTE_REQUIRED(t) \ + ( SRAM_TO_BE_ACCESSED(t) && \ + (t) != NATI_TRIG_GET_TSTAMP && \ + (t) != NATI_TRIG_ADD_TABLE ) + +/******************************************************************************/ +/** + * A helper macro for changing a nati object's state... + */ +# undef SET_NATIOBJ_STATE +# define SET_NATIOBJ_STATE(x, s) { \ + (x)->prev_state = (x)->curr_state; \ + (x)->curr_state = s; \ + } + +/******************************************************************************/ +/** + * A function signature for a state/trigger callback function... + */ +typedef int (*nati_statemach_cb)( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ); + +/******************************************************************************/ +/** + * A structure for relating state to trigger callbacks. + */ +typedef struct +{ + ipa_nati_state state; + ipa_nati_trigger trigger; + nati_statemach_cb sm_cb; + const char* state_as_str; + const char* trigger_as_str; + const char* sm_cb_as_str; +} nati_statemach_tuple; + +#undef SM_ROW +#define SM_ROW(s, t, f) \ + { s, t, f, #s, #t, #f } + +/******************************************************************************/ +/** + * FUNCTION: ipa_nati_statemach + * + * PARAMS: + * + * @nati_obj_ptr (IN) A pointer to an initialized nati object + * + * @trigger (IN) The trigger to run through the state machine + * + * @arb_data_ptr (IN) Anything you like. Will be passed, untouched, + * to the state/trigger callback function. + * + * DESCRIPTION: + * + * This function allows a nati object and a trigger to be run + * through the state machine. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +int ipa_nati_statemach( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ); + +/* + * To follow are a number of structures, designed to hold function + * arguments, that are to be passed into the state machine... + */ +typedef struct +{ + uint32_t public_ip_addr; + uint16_t number_of_entries; + uint32_t* tbl_hdl; + const char* mem_type_ptr; +} table_add_args; + +typedef struct +{ + uint32_t tbl_hdl; +} table_del_args; + +typedef table_del_args table_clear_args; + +typedef struct +{ + uint32_t tbl_hdl; + WhichTbl2Use which; + ipa_table_walk_cb walk_cb; + void* arb_data_ptr; +} table_walk_args; + +typedef struct +{ + uint32_t tbl_hdl; + ipa_nati_tbl_stats* nat_stats_ptr; + ipa_nati_tbl_stats* idx_stats_ptr; +} table_stats_args; + +typedef struct +{ + uint32_t tbl_hdl; + const ipa_nat_ipv4_rule* clnt_rule; + uint32_t* rule_hdl; +} rule_add_args; + +typedef struct +{ + uint32_t tbl_hdl; + uint32_t rule_hdl; +} rule_del_args; + +typedef struct +{ + uint32_t tbl_hdl; + uint32_t rule_hdl; + uint32_t* time_stamp; +} timestap_query_args; + +#endif /* #if !defined(_IPA_NAT_STATEMACH_H_) */ diff --git a/ipanat/inc/ipa_nat_utils.h b/ipanat/inc/ipa_nat_utils.h new file mode 100644 index 0000000..b6545da --- /dev/null +++ b/ipanat/inc/ipa_nat_utils.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2013, 2018-2019 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef IPA_NAT_UTILS_H +#define IPA_NAT_UTILS_H + +#include <stdio.h> +#include <string.h> +#include <syslog.h> +#include <time.h> +#include <linux/msm_ipa.h> + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#if !defined(MSM_IPA_TESTS) && !defined(FEATURE_IPA_ANDROID) +#ifdef USE_GLIB +#include <glib.h> +#define strlcpy g_strlcpy +#else +size_t strlcpy(char* dst, const char* src, size_t size); +#endif +#endif + +#define IPAERR(fmt, ...) printf("ERR: %s:%d %s() " fmt, __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__); + +#define IPAINFO(fmt, ...) printf("INFO: %s:%d %s() " fmt, __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__); + +#define IPAWARN(fmt, ...) printf("WARN: %s:%d %s() " fmt, __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__); + +#undef UNUSED +#define UNUSED(v) (void)(v) + +#ifdef NAT_DEBUG +#define IPADBG(fmt, ...) printf("%s:%d %s() " fmt, __FILE__, __LINE__, __FUNCTION__, ##__VA_ARGS__); +#else +#define IPADBG(fmt, ...) +#endif + +typedef struct +{ + int fd; + enum ipa_hw_type ver; +} ipa_descriptor; + +ipa_descriptor* ipa_descriptor_open(void); + +void ipa_descriptor_close( + ipa_descriptor*); + +void ipa_read_debug_info( + const char* debug_file_path); + +static inline char* prep_ioc_nat_dma_cmd_4print( + struct ipa_ioc_nat_dma_cmd* cmd_ptr, + char* buf_ptr, + uint32_t buf_sz ) +{ + uint32_t i, len, buf_left; + + if ( cmd_ptr && buf_ptr && buf_sz ) + { + snprintf( + buf_ptr, + buf_sz, + "NAT_DMA_CMD: mem_type(%u) entries(%u) ", + cmd_ptr->mem_type, + cmd_ptr->entries); + + for ( i = 0; i < cmd_ptr->entries; i++ ) + { + len = strlen(buf_ptr); + + buf_left = buf_sz - len; + + if ( buf_left > 0 && buf_left < buf_sz ) + { + snprintf( + buf_ptr + len, + buf_left, + "[%u](table_index(0x%02X) base_addr(0x%02X) offset(0x%08X) data(0x%04X)) ", + i, + cmd_ptr->dma[i].table_index, + cmd_ptr->dma[i].base_addr, + cmd_ptr->dma[i].offset, + (uint32_t) cmd_ptr->dma[i].data); + } + } + } + + return buf_ptr; +} + +#undef NANOS_PER_SEC +#undef MICROS_PER_SEC +#undef MILLIS_PER_SEC + +#define NANOS_PER_SEC 1000000000 +#define MICROS_PER_SEC 1000000 +#define MILLIS_PER_SEC 1000 + +/** + * A macro for converting seconds to nanoseconds... + */ +#define SECS2NanSECS(x) ((x) * NANOS_PER_SEC) + +/** + * A macro for converting seconds to microseconds... + */ +#define SECS2MicSECS(x) ((x) * MICROS_PER_SEC) + +/** + * A macro for converting seconds to milliseconds... + */ +#define SECS2MilSECS(x) ((x) * MILLIS_PER_SEC) + +/******************************************************************************/ + +typedef enum +{ + TimeAsNanSecs = 0, + TimeAsMicSecs = 1, + TimeAsMilSecs = 2 +} TimeAs_t; + +#undef VALID_TIMEAS +#define VALID_TIMEAS(ta) \ + ( (ta) == TimeAsNanSecs || \ + (ta) == TimeAsMicSecs || \ + (ta) == TimeAsMilSecs ) + +int currTimeAs( + TimeAs_t timeAs, + uint64_t* valPtr ); + +#endif /* IPA_NAT_UTILS_H */ diff --git a/ipanat/inc/ipa_table.h b/ipanat/inc/ipa_table.h new file mode 100644 index 0000000..aa2159b --- /dev/null +++ b/ipanat/inc/ipa_table.h @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef IPA_TABLE_H +#define IPA_TABLE_H + +#include <stdint.h> +#include <stdbool.h> +#include <linux/msm_ipa.h> + +#define IPA_TABLE_MAX_ENTRIES 5120 + +#define IPA_TABLE_INVALID_ENTRY 0x0 + +#undef VALID_INDEX +#define VALID_INDEX(idx) \ + ( (idx) != IPA_TABLE_INVALID_ENTRY ) + +#undef VALID_RULE_HDL +#define VALID_RULE_HDL(hdl) \ + ( (hdl) != IPA_TABLE_INVALID_ENTRY ) + +#undef GOTO_REC +#define GOTO_REC(tbl, rec_idx) \ + ( (tbl)->table_addr + ((rec_idx) * (tbl)->entry_size) ) + +typedef enum +{ + IPA_NAT_BASE_TBL = 0, + IPA_NAT_EXPN_TBL = 1, + IPA_NAT_INDX_TBL = 2, + IPA_NAT_INDEX_EXPN_TBL = 3, + IPA_IPV6CT_BASE_TBL = 4, + IPA_IPV6CT_EXPN_TBL = 5, +} ipa_table_dma_type; + +#define VALID_IPA_TABLE_DMA_TYPE(t) \ + ( (t) >= IPA_NAT_BASE_TBL && (t) <= IPA_IPV6CT_EXPN_TBL ) + +/* + * --------- NAT Rule Handle Entry ID structure --------- + * + * +-----------+-----------+------------------+----------------+ + * | 1 bit | 2 bits | 12 bits | 1 bit | + * +-----------+-----------+------------------+----------------+ + * | 0 - DDR | reserved | index into table | 0 - base | + * | 1 - SRAM | | | 1 - expansion | + * +-----------+-----------+------------------+----------------+ + */ +#define IPA_TABLE_TYPE_BITS 0x00000001 +#define IPA_TABLE_TYPE_MASK 0x00000001 +#define IPA_TABLE_INDX_MASK 0x00000FFF +#define IPA_TABLE_TYPE_MEM_SHIFT 15 + +#undef BREAK_RULE_HDL +#define BREAK_RULE_HDL(tbl, hdl, mt, iet, indx) \ + do { \ + mt = ((hdl) >> IPA_TABLE_TYPE_MEM_SHIFT) & IPA_TABLE_TYPE_MASK; \ + iet = (hdl) & IPA_TABLE_TYPE_MASK; \ + indx = ((hdl) >> IPA_TABLE_TYPE_BITS) & IPA_TABLE_INDX_MASK; \ + indx += (iet) ? tbl->table_entries : 0; \ + /*IPADBG("hdl(%u) -> mt(%u) iet(%u) indx(%u)\n", hdl, mt, iet, indx);*/ \ + } while ( 0 ) + +typedef int (*entry_validity_checker)( + void* entry); + +typedef uint16_t (*entry_next_index_getter)( + void* entry); + +typedef uint16_t (*entry_prev_index_getter)( + void* entry, + uint16_t entry_index, + void* meta, + uint16_t base_table_size); + +typedef void (*entry_prev_index_setter)( + void* entry, + uint16_t entry_index, + uint16_t prev_index, + void* meta, + uint16_t base_table_size); + +typedef int (*entry_head_inserter)( + void* entry, + void* user_data, + uint16_t* dma_command_data); + +typedef int (*entry_tail_inserter)( + void* entry, + void* user_data); + +typedef uint16_t (*entry_delete_head_dma_command_data_getter)( + void* head, + void* next_entry); + +typedef struct +{ + entry_validity_checker entry_is_valid; + entry_next_index_getter entry_get_next_index; + entry_prev_index_getter entry_get_prev_index; + entry_prev_index_setter entry_set_prev_index; + entry_head_inserter entry_head_insert; + entry_tail_inserter entry_tail_insert; + entry_delete_head_dma_command_data_getter + entry_get_delete_head_dma_command_data; +} ipa_table_entry_interface; + +typedef enum +{ + HELP_UPDATE_HEAD = 0, + HELP_UPDATE_ENTRY = 1, + HELP_DELETE_HEAD = 2, + + HELP_UPDATE_MAX, +} dma_help_type; + +#undef VALID_DMA_HELP_TYPE +#define VALID_DMA_HELP_TYPE(t) \ + ( (t) >= HELP_UPDATE_HEAD && (t) < HELP_UPDATE_MAX ) + +typedef struct +{ + uint32_t offset; + ipa_table_dma_type table_type; + ipa_table_dma_type expn_table_type; + uint8_t table_indx; +} ipa_table_dma_cmd_helper; + +typedef struct +{ + char name[IPA_RESOURCE_NAME_MAX]; + + enum ipa3_nat_mem_in nmi; + + int entry_size; + + uint16_t table_entries; + uint16_t expn_table_entries; + uint32_t tot_tbl_ents; + + uint8_t* table_addr; + uint8_t* expn_table_addr; + + uint16_t cur_tbl_cnt; + uint16_t cur_expn_tbl_cnt; + + ipa_table_entry_interface* entry_interface; + + ipa_table_dma_cmd_helper* dma_help[HELP_UPDATE_MAX]; + + void* meta; + int meta_entry_size; +} ipa_table; + +typedef struct +{ + uint16_t prev_index; + void* prev_entry; + + uint16_t curr_index; + void* curr_entry; + + uint16_t next_index; + void* next_entry; +} ipa_table_iterator; + + +void ipa_table_init( + ipa_table* table, + const char* table_name, + enum ipa3_nat_mem_in nmi, + int entry_size, + void* meta, + int meta_entry_size, + ipa_table_entry_interface* entry_interface); + +int ipa_table_calculate_entries_num( + ipa_table* table, + uint16_t number_of_entries, + enum ipa3_nat_mem_in nmi); + +int ipa_table_calculate_size( + ipa_table* table); + +uint8_t* ipa_table_calculate_addresses( + ipa_table* table, + uint8_t* base_addr); + +void ipa_table_reset( + ipa_table* table); + +int ipa_table_add_entry( + ipa_table* table, + void* user_data, + uint16_t* index, + uint32_t* rule_hdl, + struct ipa_ioc_nat_dma_cmd* cmd); + +void ipa_table_create_delete_command( + ipa_table* table, + struct ipa_ioc_nat_dma_cmd* cmd, + ipa_table_iterator* iterator); + +void ipa_table_delete_entry( + ipa_table* table, + ipa_table_iterator* iterator, + uint8_t is_prev_empty); + +void ipa_table_erase_entry( + ipa_table* table, + uint16_t index); + +int ipa_table_get_entry( + ipa_table* table, + uint32_t entry_handle, + void** entry, + uint16_t* entry_index); + +void* ipa_table_get_entry_by_index( + ipa_table* table, + uint16_t index); + +void ipa_table_dma_cmd_helper_init( + ipa_table_dma_cmd_helper* dma_cmd_helper, + uint8_t table_indx, + ipa_table_dma_type table_type, + ipa_table_dma_type expn_table_type, + uint32_t offset); + +void ipa_table_dma_cmd_generate( + ipa_table_dma_cmd_helper* dma_cmd_helper, + uint8_t is_expn, + uint32_t entry_offset, + uint16_t data, + struct ipa_ioc_nat_dma_cmd* cmd); + +int ipa_table_iterator_init( + ipa_table_iterator* iterator, + ipa_table* table, + void* curr_entry, + uint16_t curr_index); + +int ipa_table_iterator_next( + ipa_table_iterator* iterator, + ipa_table* table); + +int ipa_table_iterator_end( + ipa_table_iterator* iterator, + ipa_table* table, + uint16_t head_index, + void* head); + +int ipa_table_iterator_is_head_with_tail( + ipa_table_iterator* iterator); + +int ipa_calc_num_sram_table_entries( + uint32_t sram_size, + uint32_t table1_ent_size, + uint32_t table2_ent_size, + uint16_t* num_entries_ptr); + +typedef int (*ipa_table_walk_cb)( + ipa_table* table_ptr, + uint32_t rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ); + +typedef enum +{ + WHEN_SLOT_EMPTY = 0, + WHEN_SLOT_FILLED = 1, + + WHEN_SLOT_MAX +} When2Callback; + +#define VALID_WHEN2CALLBACK(w) \ + ( (w) >= WHEN_SLOT_EMPTY && (w) < WHEN_SLOT_MAX ) + +int ipa_table_walk( + ipa_table* table, + uint16_t start_index, + When2Callback when, + ipa_table_walk_cb walk_cb, + void* arb_data_ptr ); + +int ipa_table_add_dma_cmd( + ipa_table* tbl_ptr, + dma_help_type help_type, + void* rec_ptr, + uint16_t rec_index, + uint16_t data_for_entry, + struct ipa_ioc_nat_dma_cmd* cmd_ptr ); + +#endif diff --git a/ipanat/src/Makefile.am b/ipanat/src/Makefile.am new file mode 100644 index 0000000..abfc353 --- /dev/null +++ b/ipanat/src/Makefile.am @@ -0,0 +1,59 @@ +AM_CFLAGS = -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs -Wno-unused-variable + +if KERNELMODULES +AM_CFLAGS += -I./../inc $(KERNEL_DIR)/include +else +AM_CFLAGS += -I./../inc +endif + +#AM_CFLAGS += -g -DDEBUG -DNAT_DEBUG + +common_CFLAGS = -DUSE_GLIB @GLIB_CFLAGS@ + +if !KERNELMODULES +common_LDFLAGS = -lrt @GLIB_LIBS@ +endif + +if KERNELMODULES +library_includedir = ../inc $(KERNEL_DIR)/include $(pkgincludedir) +else +library_includedir = $(pkgincludedir) +endif + +cpp_sources = \ + ipa_nat_map.cpp + +c_sources = \ + ipa_table.c \ + ipa_nat_statemach.c \ + ipa_nat_drvi.c \ + ipa_nat_drv.c \ + ipa_mem_descriptor.c \ + ipa_nat_utils.c \ + ipa_ipv6ct.c + +library_include_HEADERS = \ + ../inc/ipa_mem_descriptor.h \ + ../inc/ipa_nat_drv.h \ + ../inc/ipa_nat_drvi.h \ + ../inc/ipa_nat_map.h \ + ../inc/ipa_nat_statemach.h \ + ../inc/ipa_nat_utils.h \ + ../inc/ipa_table.h + +if KERNELMODULES +noinst_LIBRARIES = libipanat.a +libipanat_a_C = @C@ +libipanat_a_CC = @CC@ +libipanat_a_SOURCES = $(c_sources) $(cpp_sources) +libipanat_a_CFLAGS = $(AM_CFLAGS) $(common_CFLAGS) +libipanat_a_CXXFLAGS = $(AM_CFLAGS) $(common_CPPFLAGS) +else +lib_LTLIBRARIES = libipanat.la +libipanat_la_C = @C@ +libipanat_la_CC = @CC@ +libipanat_la_SOURCES = $(c_sources) $(cpp_sources) +libipanat_la_CFLAGS = $(AM_CFLAGS) $(common_CFLAGS) +libipanat_la_CXXFLAGS = $(AM_CFLAGS) $(common_CPPFLAGS) +libipanat_la_LDFLAGS = -shared $(common_LDFLAGS) -version-info 1:0:0 +endif diff --git a/ipanat/src/ipa_ipv6ct.c b/ipanat/src/ipa_ipv6ct.c new file mode 100644 index 0000000..a5ffbb0 --- /dev/null +++ b/ipanat/src/ipa_ipv6ct.c @@ -0,0 +1,856 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "ipa_ipv6ct.h" +#include "ipa_ipv6cti.h" + +#include <sys/ioctl.h> +#include <stdlib.h> +#include <errno.h> +#include <pthread.h> +#include <unistd.h> + +#define IPA_IPV6CT_DEBUG_FILE_PATH "/sys/kernel/debug/ipa/ipv6ct" +#define IPA_IPV6CT_TABLE_NAME "IPA IPv6CT table" + +static int ipa_ipv6ct_create_table(ipa_ipv6ct_table* ipv6ct_table, uint16_t number_of_entries, uint8_t table_index); +static int ipa_ipv6ct_destroy_table(ipa_ipv6ct_table* ipv6ct_table); +static void ipa_ipv6ct_create_table_dma_cmd_helpers(ipa_ipv6ct_table* ipv6ct_table, uint8_t table_indx); +static int ipa_ipv6ct_post_init_cmd(ipa_ipv6ct_table* ipv6ct_table, uint8_t tbl_index); +static int ipa_ipv6ct_post_dma_cmd(struct ipa_ioc_nat_dma_cmd* cmd); +static uint16_t ipa_ipv6ct_hash(const ipa_ipv6ct_rule* rule, uint16_t size); +static uint16_t ipa_ipv6ct_xor_segments(uint64_t num); + +static int table_entry_is_valid(void* entry); +static uint16_t table_entry_get_next_index(void* entry); +static uint16_t table_entry_get_prev_index(void* entry, uint16_t entry_index, void* meta, uint16_t base_table_size); +static void table_entry_set_prev_index(void* entry, uint16_t entry_index, uint16_t prev_index, + void* meta, uint16_t base_table_size); +static int table_entry_head_insert(void* entry, void* user_data, uint16_t* dma_command_data); +static int table_entry_tail_insert(void* entry, void* user_data); +static uint16_t table_entry_get_delete_head_dma_command_data(void* head, void* next_entry); + +static ipa_ipv6ct ipv6ct; +static pthread_mutex_t ipv6ct_mutex = PTHREAD_MUTEX_INITIALIZER; + +static ipa_table_entry_interface entry_interface = +{ + table_entry_is_valid, + table_entry_get_next_index, + table_entry_get_prev_index, + table_entry_set_prev_index, + table_entry_head_insert, + table_entry_tail_insert, + table_entry_get_delete_head_dma_command_data +}; + +/** + * ipa_ipv6ct_add_tbl() - Adds a new IPv6CT table + * @number_of_entries: [in] number of IPv6CT entries + * @table_handle: [out] handle of new IPv6CT table + * + * This function creates new IPv6CT table and posts IPv6CT init command to HW + * + * Returns: 0 On Success, negative on failure + */ +int ipa_ipv6ct_add_tbl(uint16_t number_of_entries, uint32_t* table_handle) +{ + int ret; + ipa_ipv6ct_table* ipv6ct_table; + + IPADBG("\n"); + + if (table_handle == NULL || number_of_entries == 0) + { + IPAERR("Invalid parameters table_handle=%pK number_of_entries=%d\n", table_handle, number_of_entries); + return -EINVAL; + } + + *table_handle = 0; + + if (ipv6ct.table_cnt >= IPA_IPV6CT_MAX_TBLS) + { + IPAERR("Can't add addition IPv6 connection tracking table. Maximum %d tables allowed\n", IPA_IPV6CT_MAX_TBLS); + return -EINVAL; + } + + if (!ipv6ct.ipa_desc) + { + ipv6ct.ipa_desc = ipa_descriptor_open(); + if (ipv6ct.ipa_desc == NULL) + { + IPAERR("failed to open IPA driver file descriptor\n"); + return -EIO; + } + } + + if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0) + { + IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver); + ret = -EPERM; + goto bail_ipa_desc; + } + + ipv6ct_table = &ipv6ct.tables[ipv6ct.table_cnt]; + ret = ipa_ipv6ct_create_table(ipv6ct_table, number_of_entries, ipv6ct.table_cnt); + if (ret) + { + IPAERR("unable to create ipv6ct table Error: %d\n", ret); + goto bail_ipa_desc; + } + + /* Initialize the ipa hw with ipv6ct table dimensions */ + ret = ipa_ipv6ct_post_init_cmd(ipv6ct_table, ipv6ct.table_cnt); + if (ret) + { + IPAERR("unable to post ipv6ct_init command Error %d\n", ret); + goto bail_ipv6ct_table; + } + + /* Return table handle */ + ++ipv6ct.table_cnt; + *table_handle = ipv6ct.table_cnt; + + IPADBG("Returning table handle 0x%x\n", *table_handle); + return 0; + +bail_ipv6ct_table: + ipa_ipv6ct_destroy_table(ipv6ct_table); +bail_ipa_desc: + if (!ipv6ct.table_cnt) { + ipa_descriptor_close(ipv6ct.ipa_desc); + ipv6ct.ipa_desc = NULL; + } + return ret; +} + +int ipa_ipv6ct_del_tbl(uint32_t table_handle) +{ + ipa_ipv6ct_table* ipv6ct_table; + int ret; + + IPADBG("\n"); + + if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0) + { + IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver); + return -EINVAL; + } + + if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS) + { + IPAERR("invalid table handle %d passed\n", table_handle); + return -EINVAL; + } + IPADBG("Passed Table Handle: 0x%x\n", table_handle); + + if (pthread_mutex_lock(&ipv6ct_mutex)) + { + IPAERR("unable to lock the ipv6ct mutex\n"); + return -EINVAL; + } + + ipv6ct_table = &ipv6ct.tables[table_handle - 1]; + if (!ipv6ct_table->mem_desc.valid) + { + IPAERR("invalid table handle %d\n", table_handle); + ret = -EINVAL; + goto unlock; + } + + ret = ipa_ipv6ct_destroy_table(ipv6ct_table); + if (ret) + { + IPAERR("unable to delete IPV6CT table with handle %d\n", table_handle); + goto unlock; + } + + if (!--ipv6ct.table_cnt) { + ipa_descriptor_close(ipv6ct.ipa_desc); + ipv6ct.ipa_desc = NULL; + } + +unlock: + if (pthread_mutex_unlock(&ipv6ct_mutex)) + { + IPAERR("unable to unlock the ipv6ct mutex\n"); + return (ret) ? ret : -EPERM; + } + + IPADBG("return\n"); + return ret; +} + +int ipa_ipv6ct_add_rule(uint32_t table_handle, const ipa_ipv6ct_rule* user_rule, uint32_t* rule_handle) +{ + int ret; + ipa_ipv6ct_table* ipv6ct_table; + uint16_t new_entry_index; + uint32_t new_entry_handle; + uint32_t cmd_sz = + sizeof(struct ipa_ioc_nat_dma_cmd) + + (MAX_DMA_ENTRIES_FOR_ADD * sizeof(struct ipa_ioc_nat_dma_one)); + char cmd_buf[cmd_sz]; + struct ipa_ioc_nat_dma_cmd* cmd = + (struct ipa_ioc_nat_dma_cmd*) cmd_buf; + + IPADBG("In\n"); + + memset(cmd_buf, 0, sizeof(cmd_buf)); + + if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0) + { + IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver); + return -EINVAL; + } + + if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS || + rule_handle == NULL || user_rule == NULL) + { + IPAERR("Invalid parameters table_handle=%d rule_handle=%pK user_rule=%pK\n", + table_handle, rule_handle, user_rule); + return -EINVAL; + } + IPADBG("Passed Table handle: 0x%x\n", table_handle); + + if (user_rule->protocol == IPA_IPV6CT_INVALID_PROTO_FIELD_CMP) + { + IPAERR("invalid parameter protocol=%d\n", user_rule->protocol); + return -EINVAL; + } + + if (pthread_mutex_lock(&ipv6ct_mutex)) + { + IPAERR("unable to lock the ipv6ct mutex\n"); + return -EINVAL; + } + + ipv6ct_table = &ipv6ct.tables[table_handle - 1]; + if (!ipv6ct_table->mem_desc.valid) + { + IPAERR("invalid table handle %d\n", table_handle); + ret = -EINVAL; + goto unlock; + } + + new_entry_index = ipa_ipv6ct_hash(user_rule, ipv6ct_table->table.table_entries - 1); + + ret = ipa_table_add_entry(&ipv6ct_table->table, (void*)user_rule, &new_entry_index, &new_entry_handle, cmd); + if (ret) + { + IPAERR("failed to add a new IPV6CT entry\n"); + goto unlock; + } + + ret = ipa_ipv6ct_post_dma_cmd(cmd); + if (ret) + { + IPAERR("unable to post dma command\n"); + goto bail; + } + + if (pthread_mutex_unlock(&ipv6ct_mutex)) + { + IPAERR("unable to unlock the ipv6ct mutex\n"); + return -EPERM; + } + + *rule_handle = new_entry_handle; + + IPADBG("return\n"); + + return 0; + +bail: + ipa_table_erase_entry(&ipv6ct_table->table, new_entry_index); + +unlock: + if (pthread_mutex_unlock(&ipv6ct_mutex)) + IPAERR("unable to unlock the ipv6ct mutex\n"); + + IPADBG("return\n"); + + return ret; +} + +int ipa_ipv6ct_del_rule(uint32_t table_handle, uint32_t rule_handle) +{ + ipa_ipv6ct_table* ipv6ct_table; + ipa_table_iterator table_iterator; + ipa_ipv6ct_hw_entry* entry; + uint16_t index; + uint32_t cmd_sz = + sizeof(struct ipa_ioc_nat_dma_cmd) + + (MAX_DMA_ENTRIES_FOR_DEL * sizeof(struct ipa_ioc_nat_dma_one)); + char cmd_buf[cmd_sz]; + struct ipa_ioc_nat_dma_cmd* cmd = + (struct ipa_ioc_nat_dma_cmd*) cmd_buf; + int ret; + + IPADBG("In\n"); + + memset(cmd_buf, 0, sizeof(cmd_buf)); + + if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0) + { + IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver); + return -EINVAL; + } + + if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS || + rule_handle == IPA_TABLE_INVALID_ENTRY) + { + IPAERR("Invalid parameters table_handle=%d rule_handle=%d\n", table_handle, rule_handle); + return -EINVAL; + } + IPADBG("Passed Table: 0x%x and rule handle 0x%x\n", table_handle, rule_handle); + + if (pthread_mutex_lock(&ipv6ct_mutex)) + { + IPAERR("unable to lock the ipv6ct mutex\n"); + return -EINVAL; + } + + ipv6ct_table = &ipv6ct.tables[table_handle - 1]; + if (!ipv6ct_table->mem_desc.valid) + { + IPAERR("invalid table handle %d\n", table_handle); + ret = -EINVAL; + goto unlock; + } + + ret = ipa_table_get_entry(&ipv6ct_table->table, rule_handle, (void**)&entry, &index); + if (ret) + { + IPAERR("unable to retrive the entry with handle=%d in IPV6CT table with handle=%d\n", + rule_handle, table_handle); + goto unlock; + } + + ret = ipa_table_iterator_init(&table_iterator, &ipv6ct_table->table, entry, index); + if (ret) + { + IPAERR("unable to create iterator which points to the entry index=%d in IPV6CT table with handle=%d\n", + index, table_handle); + goto unlock; + } + + ipa_table_create_delete_command(&ipv6ct_table->table, cmd, &table_iterator); + + ret = ipa_ipv6ct_post_dma_cmd(cmd); + if (ret) + { + IPAERR("unable to post dma command\n"); + goto unlock; + } + + if (!ipa_table_iterator_is_head_with_tail(&table_iterator)) + { + /* The entry can be deleted */ + uint8_t is_prev_empty = (table_iterator.prev_entry != NULL && + ((ipa_ipv6ct_hw_entry*)table_iterator.prev_entry)->protocol == IPA_IPV6CT_INVALID_PROTO_FIELD_CMP); + ipa_table_delete_entry(&ipv6ct_table->table, &table_iterator, is_prev_empty); + } + +unlock: + if (pthread_mutex_unlock(&ipv6ct_mutex)) + { + IPAERR("unable to unlock the ipv6ct mutex\n"); + return (ret) ? ret : -EPERM; + } + + IPADBG("return\n"); + + return ret; +} + +int ipa_ipv6ct_query_timestamp(uint32_t table_handle, uint32_t rule_handle, uint32_t* time_stamp) +{ + int ret; + ipa_ipv6ct_table* ipv6ct_table; + ipa_ipv6ct_hw_entry *entry; + + IPADBG("\n"); + + if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0) + { + IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver); + return -EINVAL; + } + + if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS || + rule_handle == IPA_TABLE_INVALID_ENTRY || time_stamp == NULL) + { + IPAERR("invalid parameters passed table_handle=%d rule_handle=%d time_stamp=%pK\n", + table_handle, rule_handle, time_stamp); + return -EINVAL; + } + IPADBG("Passed Table: %d and rule handle %d\n", table_handle, rule_handle); + + if (pthread_mutex_lock(&ipv6ct_mutex)) + { + IPAERR("unable to lock the ipv6ct mutex\n"); + return -EINVAL; + } + + ipv6ct_table = &ipv6ct.tables[table_handle - 1]; + if (!ipv6ct_table->mem_desc.valid) + { + IPAERR("invalid table handle %d\n", table_handle); + ret = -EINVAL; + goto unlock; + } + + ret = ipa_table_get_entry(&ipv6ct_table->table, rule_handle, (void**)&entry, NULL); + if (ret) + { + IPAERR("unable to retrive the entry with handle=%d in IPV6CT table with handle=%d\n", + rule_handle, table_handle); + goto unlock; + } + + *time_stamp = entry->time_stamp; + +unlock: + if (pthread_mutex_unlock(&ipv6ct_mutex)) + { + IPAERR("unable to unlock the ipv6ct mutex\n"); + return (ret) ? ret : -EPERM; + } + + IPADBG("return\n"); + return ret; +} + +/** +* ipv6ct_hash() - Find the index into ipv6ct table +* @rule: [in] an IPv6CT rule +* @size: [in] size of the IPv6CT table +* +* This hash method is used to find the hash index of an entry into IPv6CT table. +* In case of result zero, N-1 will be returned, where N is size of IPv6CT table. +* +* Returns: >0 index into IPv6CT table, negative on failure +*/ +static uint16_t ipa_ipv6ct_hash(const ipa_ipv6ct_rule* rule, uint16_t size) +{ + uint16_t hash = 0; + + IPADBG("src_ipv6_lsb 0x%llx\n", rule->src_ipv6_lsb); + IPADBG("src_ipv6_msb 0x%llx\n", rule->src_ipv6_msb); + IPADBG("dest_ipv6_lsb 0x%llx\n", rule->dest_ipv6_lsb); + IPADBG("dest_ipv6_msb 0x%llx\n", rule->dest_ipv6_msb); + IPADBG("src_port: 0x%x dest_port: 0x%x\n", rule->src_port, rule->dest_port); + IPADBG("protocol: 0x%x size: 0x%x\n", rule->protocol, size); + + hash ^= ipa_ipv6ct_xor_segments(rule->src_ipv6_lsb); + hash ^= ipa_ipv6ct_xor_segments(rule->src_ipv6_msb); + hash ^= ipa_ipv6ct_xor_segments(rule->dest_ipv6_lsb); + hash ^= ipa_ipv6ct_xor_segments(rule->dest_ipv6_msb); + + hash ^= rule->src_port; + hash ^= rule->dest_port; + hash ^= rule->protocol; + + /* + * The size passed to hash function expected be power^2-1, while the actual size is power^2, + * actual_size = size + 1 + */ + hash &= size; + + /* If the hash resulted to zero then set it to maximum value as zero is unused entry in ipv6ct table */ + if (hash == 0) + { + hash = size; + } + + IPADBG("ipa_ipv6ct_hash returning value: %d\n", hash); + return hash; +} + +static uint16_t ipa_ipv6ct_xor_segments(uint64_t num) +{ + const uint64_t mask = 0xffff; + const size_t bits_in_two_byte = 16; + uint16_t ret = 0; + + IPADBG("\n"); + + while (num) + { + ret ^= (uint16_t)(num & mask); + num >>= bits_in_two_byte; + } + + IPADBG("return\n"); + return ret; +} + +static int table_entry_is_valid(void* entry) +{ + ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry; + + IPADBG("\n"); + + return ipv6ct_entry->enable; +} + +static uint16_t table_entry_get_next_index(void* entry) +{ + uint16_t result; + ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry; + + IPADBG("\n"); + + result = ipv6ct_entry->next_index; + + IPADBG("Next entry of %pK is %d\n", entry, result); + return result; +} + +static uint16_t table_entry_get_prev_index(void* entry, uint16_t entry_index, void* meta, uint16_t base_table_size) +{ + uint16_t result; + ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry; + + IPADBG("\n"); + + result = ipv6ct_entry->prev_index; + + IPADBG("Previous entry of %d is %d\n", entry_index, result); + return result; +} + +static void table_entry_set_prev_index(void* entry, uint16_t entry_index, uint16_t prev_index, + void* meta, uint16_t base_table_size) +{ + ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry; + + IPADBG("Previous entry of %d is %d\n", entry_index, prev_index); + + ipv6ct_entry->prev_index = prev_index; + + IPADBG("return\n"); +} + +static int table_entry_copy_from_user(void* entry, void* user_data) +{ + ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry; + const ipa_ipv6ct_rule* user_rule = (const ipa_ipv6ct_rule*)user_data; + + IPADBG("\n"); + + ipv6ct_entry->src_ipv6_lsb = user_rule->src_ipv6_lsb; + ipv6ct_entry->src_ipv6_msb = user_rule->src_ipv6_msb; + ipv6ct_entry->dest_ipv6_lsb = user_rule->dest_ipv6_lsb; + ipv6ct_entry->dest_ipv6_msb = user_rule->dest_ipv6_msb; + ipv6ct_entry->protocol = user_rule->protocol; + ipv6ct_entry->src_port = user_rule->src_port; + ipv6ct_entry->dest_port = user_rule->dest_port; + + switch (user_rule->direction_settings) + { + case IPA_IPV6CT_DIRECTION_DENY_ALL: + break; + case IPA_IPV6CT_DIRECTION_ALLOW_OUT: + ipv6ct_entry->out_allowed = IPA_IPV6CT_DIRECTION_ALLOW_BIT; + break; + case IPA_IPV6CT_DIRECTION_ALLOW_IN: + ipv6ct_entry->in_allowed = IPA_IPV6CT_DIRECTION_ALLOW_BIT; + break; + case IPA_IPV6CT_DIRECTION_ALLOW_ALL: + ipv6ct_entry->out_allowed = IPA_IPV6CT_DIRECTION_ALLOW_BIT; + ipv6ct_entry->in_allowed = IPA_IPV6CT_DIRECTION_ALLOW_BIT; + break; + default: + IPAERR("wrong value for IPv6CT direction setting parameter %d\n", user_rule->direction_settings); + return -EINVAL; + } + + IPADBG("return\n"); + return 0; +} + +static int table_entry_head_insert(void* entry, void* user_data, uint16_t* dma_command_data) +{ + int ret; + + IPADBG("\n"); + + ret = table_entry_copy_from_user(entry, user_data); + if (ret) + { + IPAERR("unable to copy from user a new entry\n"); + return ret; + } + + *dma_command_data = 0; + ((ipa_ipv6ct_flags*)dma_command_data)->enable = IPA_IPV6CT_FLAG_ENABLE_BIT; + + IPADBG("return\n"); + return 0; +} + +static int table_entry_tail_insert(void* entry, void* user_data) +{ + int ret; + + IPADBG("\n"); + + ret = table_entry_copy_from_user(entry, user_data); + if (ret) + { + IPAERR("unable to copy from user a new entry\n"); + return ret; + } + + ((ipa_ipv6ct_hw_entry*)entry)->enable = IPA_IPV6CT_FLAG_ENABLE_BIT; + + IPADBG("return\n"); + return 0; +} + +static uint16_t table_entry_get_delete_head_dma_command_data(void* head, void* next_entry) +{ + IPADBG("\n"); + return IPA_IPV6CT_INVALID_PROTO_FIELD_VALUE; +} + +/** + * ipa_ipv6ct_create_table() - Creates a new IPv6CT table + * @ipv6ct_table: [in] IPv6CT table + * @number_of_entries: [in] number of IPv6CT entries + * @table_index: [in] the index of the IPv6CT table + * + * This function creates new IPv6CT table: + * - Initializes table, memory descriptor and table_dma_cmd_helpers structures + * - Allocates, maps and clears the memory for table + * + * Returns: 0 On Success, negative on failure + */ +static int ipa_ipv6ct_create_table(ipa_ipv6ct_table* ipv6ct_table, uint16_t number_of_entries, uint8_t table_index) +{ + int ret, size; + + IPADBG("\n"); + + ipa_table_init( + &ipv6ct_table->table, IPA_IPV6CT_TABLE_NAME, IPA_NAT_MEM_IN_DDR, + sizeof(ipa_ipv6ct_hw_entry), NULL, 0, &entry_interface); + + ret = ipa_table_calculate_entries_num( + &ipv6ct_table->table, number_of_entries, IPA_NAT_MEM_IN_DDR); + + if (ret) + { + IPAERR("unable to calculate number of entries in ipv6ct table %d, while required by user %d\n", + table_index, number_of_entries); + return ret; + } + + size = ipa_table_calculate_size(&ipv6ct_table->table); + IPADBG("IPv6CT table size: %d\n", size); + + ipa_mem_descriptor_init( + &ipv6ct_table->mem_desc, + IPA_IPV6CT_DEV_NAME, + size, + table_index, + IPA_IOC_ALLOC_IPV6CT_TABLE, + IPA_IOC_DEL_IPV6CT_TABLE, + false); /* false here means don't consider using sram */ + + ret = ipa_mem_descriptor_allocate_memory( + &ipv6ct_table->mem_desc, + ipv6ct.ipa_desc->fd); + + if (ret) + { + IPAERR("unable to allocate ipv6ct memory descriptor Error: %d\n", ret); + goto bail; + } + + ipa_table_calculate_addresses(&ipv6ct_table->table, ipv6ct_table->mem_desc.base_addr); + + ipa_table_reset(&ipv6ct_table->table); + + ipa_ipv6ct_create_table_dma_cmd_helpers(ipv6ct_table, table_index); + + IPADBG("return\n"); + return 0; + +bail: + memset(ipv6ct_table, 0, sizeof(*ipv6ct_table)); + return ret; +} + +static int ipa_ipv6ct_destroy_table(ipa_ipv6ct_table* ipv6ct_table) +{ + int ret; + + IPADBG("\n"); + + ret = ipa_mem_descriptor_delete(&ipv6ct_table->mem_desc, ipv6ct.ipa_desc->fd); + if (ret) + IPAERR("unable to delete IPV6CT descriptor\n"); + + memset(ipv6ct_table, 0, sizeof(*ipv6ct_table)); + + IPADBG("return\n"); + return ret; +} + +/** + * ipa_ipv6ct_create_table_dma_cmd_helpers() - + * Creates dma_cmd_helpers for base table in the received IPv6CT table + * @ipv6ct_table: [in] IPv6CT table + * @table_indx: [in] The index of the IPv6CT table + * + * A DMA command helper helps to generate the DMA command for one + * specific field change. Each table has 3 different types of field + * change: update_head, update_entry and delete_head. This function + * creates the helpers and updates the base table correspondingly. + */ +static void ipa_ipv6ct_create_table_dma_cmd_helpers( + ipa_ipv6ct_table* ipv6ct_table, + uint8_t table_indx ) +{ + IPADBG("\n"); + + ipa_table_dma_cmd_helper_init( + &ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_FLAGS], + table_indx, + IPA_IPV6CT_BASE_TBL, + IPA_IPV6CT_EXPN_TBL, + ipv6ct_table->mem_desc.addr_offset + IPA_IPV6CT_RULE_FLAG_FIELD_OFFSET); + + ipa_table_dma_cmd_helper_init( + &ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_NEXT_INDEX], + table_indx, + IPA_IPV6CT_BASE_TBL, + IPA_IPV6CT_EXPN_TBL, + ipv6ct_table->mem_desc.addr_offset + IPA_IPV6CT_RULE_NEXT_FIELD_OFFSET); + + ipa_table_dma_cmd_helper_init( + &ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_PROTOCOL], + table_indx, + IPA_IPV6CT_BASE_TBL, + IPA_IPV6CT_EXPN_TBL, + ipv6ct_table->mem_desc.addr_offset + IPA_IPV6CT_RULE_PROTO_FIELD_OFFSET); + + ipv6ct_table->table.dma_help[HELP_UPDATE_HEAD] = + &ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_FLAGS]; + ipv6ct_table->table.dma_help[HELP_UPDATE_ENTRY] = + &ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_NEXT_INDEX]; + ipv6ct_table->table.dma_help[HELP_DELETE_HEAD] = + &ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_PROTOCOL]; + + IPADBG("return\n"); +} + +static int ipa_ipv6ct_post_init_cmd(ipa_ipv6ct_table* ipv6ct_table, uint8_t tbl_index) +{ + struct ipa_ioc_ipv6ct_init cmd; + int ret; + + IPADBG("\n"); + + cmd.tbl_index = tbl_index; + + cmd.base_table_offset = ipv6ct_table->mem_desc.addr_offset; + cmd.expn_table_offset = cmd.base_table_offset + (ipv6ct_table->table.table_entries * sizeof(ipa_ipv6ct_hw_entry)); + + /* Driverr/HW expected base table size to be power^2-1 due to H/W hash calculation */ + cmd.table_entries = ipv6ct_table->table.table_entries - 1; + cmd.expn_table_entries = ipv6ct_table->table.expn_table_entries; + + ret = ioctl(ipv6ct.ipa_desc->fd, IPA_IOC_INIT_IPV6CT_TABLE, &cmd); + if (ret) + { + IPAERR("unable to post init cmd Error: %d IPA fd %d\n", ret, ipv6ct.ipa_desc->fd); + return ret; + } + + IPADBG("Posted IPA_IOC_INIT_IPV6CT_TABLE to kernel successfully\n"); + return 0; +} + +static int ipa_ipv6ct_post_dma_cmd(struct ipa_ioc_nat_dma_cmd* cmd) +{ + IPADBG("\n"); + + cmd->mem_type = IPA_NAT_MEM_IN_DDR; + + if (ioctl(ipv6ct.ipa_desc->fd, IPA_IOC_TABLE_DMA_CMD, cmd)) + { + IPAERR("ioctl (IPA_IOC_TABLE_DMA_CMD) on fd %d has failed\n", + ipv6ct.ipa_desc->fd); + return -EIO; + } + IPADBG("posted IPA_IOC_TABLE_DMA_CMD to kernel successfully\n"); + return 0; +} + +void ipa_ipv6ct_dump_table(uint32_t table_handle) +{ + ipa_ipv6ct_table* ipv6ct_table; + + if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0) + { + IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver); + return; + } + + if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS) + { + IPAERR("invalid parameters passed %d\n", table_handle); + return; + } + + if (pthread_mutex_lock(&ipv6ct_mutex)) + { + IPAERR("unable to lock the ipv6ct mutex\n"); + return; + } + + ipv6ct_table = &ipv6ct.tables[table_handle - 1]; + if (!ipv6ct_table->mem_desc.valid) + { + IPAERR("invalid table handle %d\n", table_handle); + goto unlock; + } + + /* Prevents interleaving with later kernel printouts. Flush doesn't help. */ + sleep(1); + ipa_read_debug_info(IPA_IPV6CT_DEBUG_FILE_PATH); + sleep(1); + +unlock: + if (pthread_mutex_unlock(&ipv6ct_mutex)) + IPAERR("unable to unlock the ipv6ct mutex\n"); +} diff --git a/ipanat/src/ipa_mem_descriptor.c b/ipanat/src/ipa_mem_descriptor.c new file mode 100644 index 0000000..172564d --- /dev/null +++ b/ipanat/src/ipa_mem_descriptor.c @@ -0,0 +1,366 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "ipa_mem_descriptor.h" +#include "ipa_nat_utils.h" + +#include <sys/ioctl.h> +#include <fcntl.h> +#include <sys/mman.h> +#include <errno.h> +#include <unistd.h> + +#define IPA_DEV_DIR "/dev/" + +#ifdef IPA_ON_R3PC +#define IPA_DEVICE_MMAP_MEM_SIZE (2 * 1024UL * 1024UL - 1) +#endif + +static int AllocateMemory( + ipa_mem_descriptor* desc, + int ipa_fd) +{ + struct ipa_ioc_nat_ipv6ct_table_alloc cmd; + int ret = 0; + + IPADBG("In\n"); + +#ifndef IPA_ON_R3PC + /* + * If/when the number of NAT table entries requested yields a byte + * count that will fit in SRAM, SRAM will be used to hold the NAT + * table. When SRAM is used, some odd things can happen, relative + * to mmap'ing's virtual memory scheme, that require us to make + * some adjustments. + * + * To be more specific, the real physical SRAM location for the + * table and the table's size may not play well with Linux's + * mmap'ing virtual memory scheme....which likes everything to be + * PAGE_SIZE aligned and sized in multiples of PAGE_SIZE. + * + * Given the above, if the NAT table's (in SRAM) physical address + * in not on a PAGE_SIZE boundary, it will be offset into the + * mmap'd virtual memory, hence we need to know that offset in + * order to get to the table. If said offset plus the table's + * size takes it across a PAGE_SIZE boundary, we need to allocate + * more space to ensure that the table is completely within the + * mmap'd virtual memory. + */ + desc->sram_available = desc->sram_to_be_used = false; + + memset(&desc->nat_sram_info, 0, sizeof(desc->nat_sram_info)); + + ret = ioctl( + ipa_fd, + IPA_IOC_GET_NAT_IN_SRAM_INFO, + &desc->nat_sram_info); + + if ( ret == 0 ) + { + IPADBG("sram_mem_available_for_nat(0x%08x) " + "nat_table_offset_into_mmap(0x%08x) " + "best_nat_in_sram_size_rqst(0x%08x)\n", + desc->nat_sram_info.sram_mem_available_for_nat, + desc->nat_sram_info.nat_table_offset_into_mmap, + desc->nat_sram_info.best_nat_in_sram_size_rqst); + + desc->sram_available = true; + + if ( desc->consider_using_sram ) + { + if (desc->orig_rqst_size <= + desc->nat_sram_info.sram_mem_available_for_nat) + { + desc->sram_to_be_used = true; + } + } + } +#endif + + /* + * Now do the actual allocation... + */ + memset(&cmd, 0, sizeof(cmd)); + + cmd.size = desc->orig_rqst_size; + + ret = ioctl(ipa_fd, desc->allocate_ioctl_num, &cmd); + + if (ret) + { + IPAERR("Unable to post %s allocate table command. Error %d IPA fd %d\n", + desc->name, ret, ipa_fd); + goto bail; + } + + desc->addr_offset = cmd.offset; + + IPADBG("The memory desc for %s allocated successfully\n", desc->name); + +bail: + IPADBG("Out\n"); + + return ret; +} + +static int MapMemory( + ipa_mem_descriptor* desc, + int ipa_fd) +{ + char device_full_path[IPA_RESOURCE_NAME_MAX]; + size_t ipa_dev_dir_path_len; + int device_fd; + int ret = 0; + + UNUSED(ipa_fd); + + IPADBG("In\n"); + + ipa_dev_dir_path_len = + strlcpy(device_full_path, IPA_DEV_DIR, IPA_RESOURCE_NAME_MAX); + + if (ipa_dev_dir_path_len >= IPA_RESOURCE_NAME_MAX) + { + IPAERR("Unable to copy a string with size %zu to buffer with size %d\n", + ipa_dev_dir_path_len, IPA_RESOURCE_NAME_MAX); + ret = -EINVAL; + goto bail; + } + + strlcpy(device_full_path + ipa_dev_dir_path_len, + desc->name, IPA_RESOURCE_NAME_MAX - ipa_dev_dir_path_len); + + device_fd = open(device_full_path, O_RDWR); + + if (device_fd < 0) + { + IPAERR("unable to open the desc %s in path %s. Error:%d\n", + desc->name, device_full_path, device_fd); + ret = -EIO; + goto bail; + } + +#ifndef IPA_ON_R3PC + /* + * If/when the number of NAT table entries requested yields a byte + * count that will fit in SRAM, SRAM will be used to hold the NAT + * table. When SRAM is used, some odd things can happen, relative + * to mmap'ing's virtual memory scheme, that require us to make + * some adjustments. + * + * To be more specific, the real physical SRAM location for the + * table and the table's size may not play well with Linux's + * mmap'ing virtual memory scheme....which likes everything to be + * PAGE_SIZE aligned and sized in multiples of PAGE_SIZE. + * + * Given the above, if the NAT table's (in SRAM) physical address + * in not on a PAGE_SIZE boundary, it will be offset into the + * mmap'd virtual memory, hence we need to know that offset in + * order to get to the table. If said offset plus the table's + * size takes it across a PAGE_SIZE boundary, we need to allocate + * more space to ensure that the table is completely within the + * mmap'd virtual memory. + */ + desc->mmap_size = + ( desc->sram_to_be_used ) ? + desc->nat_sram_info.best_nat_in_sram_size_rqst : + desc->orig_rqst_size; + + desc->mmap_addr = desc->base_addr = + (void* )mmap( + NULL, + desc->mmap_size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + device_fd, + 0); +#else + IPADBG("user space r3pc\n"); + desc->mmap_addr = desc->base_addr = + (void *) mmap( + (caddr_t)0, + IPA_DEVICE_MMAP_MEM_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED, + device_fd, + 0); +#endif + + if (desc->base_addr == MAP_FAILED) + { + IPAERR("Unable to mmap the memory for %s\n", desc->name); + ret = -EINVAL; + goto close; + } + + if ( desc->sram_to_be_used ) + { + desc->base_addr = + (uint8_t*) (desc->base_addr) + + desc->nat_sram_info.nat_table_offset_into_mmap; + } + + IPADBG("mmap for %s return value 0x%lx -> 0x%lx\n", + desc->name, + (long unsigned int) desc->mmap_addr, + (long unsigned int) desc->base_addr); + +close: + if (close(device_fd)) + { + IPAERR("unable to close the file descriptor for %s\n", desc->name); + ret = -EINVAL; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +static int DeallocateMemory( + ipa_mem_descriptor* desc, + int ipa_fd) +{ + struct ipa_ioc_nat_ipv6ct_table_del cmd; + int ret = 0; + + IPADBG("In\n"); + + memset(&cmd, 0, sizeof(cmd)); + + cmd.table_index = desc->table_index; + + cmd.mem_type = + ( desc->sram_to_be_used ) ? + IPA_NAT_MEM_IN_SRAM : + IPA_NAT_MEM_IN_DDR; + + ret = ioctl(ipa_fd, desc->delete_ioctl_num, &cmd); + + if (ret) + { + IPAERR("unable to post table delete command for %s Error: %d IPA fd %d\n", + desc->name, ret, ipa_fd); + goto bail; + } + + IPADBG("posted delete command for %s to kernel successfully\n", desc->name); + +bail: + IPADBG("Out\n"); + + return ret; +} + +void ipa_mem_descriptor_init( + ipa_mem_descriptor* desc, + const char* device_name, + int size, + uint8_t table_index, + unsigned long allocate_ioctl_num, + unsigned long delete_ioctl_num, + bool consider_using_sram ) +{ + IPADBG("In\n"); + + strlcpy(desc->name, device_name, IPA_RESOURCE_NAME_MAX); + + desc->orig_rqst_size = desc->mmap_size = size; + desc->table_index = table_index; + desc->allocate_ioctl_num = allocate_ioctl_num; + desc->delete_ioctl_num = delete_ioctl_num; + desc->consider_using_sram = consider_using_sram; + + IPADBG("Out\n"); +} + +int ipa_mem_descriptor_allocate_memory( + ipa_mem_descriptor* desc, + int ipa_fd) +{ + int ret; + + IPADBG("In\n"); + + ret = AllocateMemory(desc, ipa_fd); + + if (ret) + { + IPAERR("unable to allocate %s\n", desc->name); + goto bail; + } + + ret = MapMemory(desc, ipa_fd); + + if (ret) + { + IPAERR("unable to map %s\n", desc->name); + DeallocateMemory(desc, ipa_fd); + goto bail; + } + + desc->valid = TRUE; + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_mem_descriptor_delete( + ipa_mem_descriptor* desc, + int ipa_fd) +{ + int ret = 0; + + IPADBG("In\n"); + + if (! desc->valid) + { + IPAERR("invalid desc handle passed\n"); + ret = -EINVAL; + goto bail; + } + + desc->valid = FALSE; + +#ifndef IPA_ON_R3PC + munmap(desc->mmap_addr, desc->mmap_size); +#else + munmap(desc->mmap_addr, IPA_DEVICE_MMAP_MEM_SIZE); +#endif + + ret = DeallocateMemory(desc, ipa_fd); + +bail: + IPADBG("Out\n"); + + return ret; +} + diff --git a/ipanat/src/ipa_nat_drv.c b/ipanat/src/ipa_nat_drv.c new file mode 100644 index 0000000..0fbf473 --- /dev/null +++ b/ipanat/src/ipa_nat_drv.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ipa_nat_drv.h" +#include "ipa_nat_drvi.h" + +#include <errno.h> + +/** + * ipa_nat_add_ipv4_tbl() - create ipv4 nat table + * @public_ip_addr: [in] public ipv4 address + * @mem_type_ptr: [in] type of memory table is to reside in + * @number_of_entries: [in] number of nat entries + * @table_handle: [out] Handle of new ipv4 nat table + * + * To create new ipv4 nat table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_add_ipv4_tbl( + uint32_t public_ip_addr, + const char *mem_type_ptr, + uint16_t number_of_entries, + uint32_t *tbl_hdl) +{ + int ret; + + if (tbl_hdl == NULL || mem_type_ptr == NULL || number_of_entries == 0) { + IPAERR( + "Invalid parameters tbl_hdl=%pK mem_type_ptr=%p number_of_entries=%d\n", + tbl_hdl, + mem_type_ptr, + number_of_entries); + return -EINVAL; + } + + *tbl_hdl = 0; + + ret = ipa_nati_add_ipv4_tbl( + public_ip_addr, mem_type_ptr, number_of_entries, tbl_hdl); + + if (ret) { + IPAERR("unable to add NAT table\n"); + return ret; + } + + IPADBG("Returning table handle 0x%x\n", *tbl_hdl); + + return ret; +} /* __ipa_nat_add_ipv4_tbl() */ + +/** + * ipa_nat_del_ipv4_tbl() - delete ipv4 table + * @table_handle: [in] Handle of ipv4 nat table + * + * To delete given ipv4 nat table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_del_ipv4_tbl( + uint32_t tbl_hdl) +{ + if ( ! VALID_TBL_HDL(tbl_hdl) ) { + IPAERR("Invalid table handle passed 0x%08X\n", tbl_hdl); + return -EINVAL; + } + + IPADBG("Passed Table Handle: 0x%08X\n", tbl_hdl); + + return ipa_nati_del_ipv4_table(tbl_hdl); +} + +/** + * ipa_nat_add_ipv4_rule() - to insert new ipv4 rule + * @table_handle: [in] handle of ipv4 nat table + * @rule: [in] Pointer to new rule + * @rule_handle: [out] Return the handle to rule + * + * To insert new ipv4 nat rule into ipv4 nat table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_add_ipv4_rule( + uint32_t tbl_hdl, + const ipa_nat_ipv4_rule *clnt_rule, + uint32_t *rule_hdl) +{ + int result = -EINVAL; + + if ( ! VALID_TBL_HDL(tbl_hdl) || + rule_hdl == NULL || + clnt_rule == NULL ) { + IPAERR( + "Invalid parameters tbl_hdl=%d clnt_rule=%pK rule_hdl=%pK\n", + tbl_hdl, clnt_rule, rule_hdl); + return result; + } + + IPADBG("Passed Table handle: 0x%x\n", tbl_hdl); + + if (ipa_nati_add_ipv4_rule(tbl_hdl, clnt_rule, rule_hdl)) { + return result; + } + + IPADBG("Returning rule handle %u\n", *rule_hdl); + + return 0; +} + +/** + * ipa_nat_del_ipv4_rule() - to delete ipv4 nat rule + * @table_handle: [in] handle of ipv4 nat table + * @rule_handle: [in] ipv4 nat rule handle + * + * To insert new ipv4 nat rule into ipv4 nat table + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_del_ipv4_rule( + uint32_t tbl_hdl, + uint32_t rule_hdl) +{ + int result = -EINVAL; + + if ( ! VALID_TBL_HDL(tbl_hdl) || ! VALID_RULE_HDL(rule_hdl) ) + { + IPAERR("Invalid parameters tbl_hdl=0x%08X rule_hdl=0x%08X\n", + tbl_hdl, rule_hdl); + return result; + } + + IPADBG("Passed Table: 0x%08X and rule handle 0x%08X\n", tbl_hdl, rule_hdl); + + result = ipa_nati_del_ipv4_rule(tbl_hdl, rule_hdl); + if (result) { + IPAERR( + "Unable to delete rule with handle 0x%08X " + "from hw for NAT table with handle 0x%08X\n", + rule_hdl, tbl_hdl); + return result; + } + + return 0; +} + +/** + * ipa_nat_query_timestamp() - to query timestamp + * @table_handle: [in] handle of ipv4 nat table + * @rule_handle: [in] ipv4 nat rule handle + * @time_stamp: [out] time stamp of rule + * + * To retrieve the timestamp that lastly the + * nat rule was accessed + * + * Returns: 0 On Success, negative on failure + */ +int ipa_nat_query_timestamp( + uint32_t tbl_hdl, + uint32_t rule_hdl, + uint32_t *time_stamp) +{ + if ( ! VALID_TBL_HDL(tbl_hdl) || + ! VALID_RULE_HDL(rule_hdl) || + time_stamp == NULL ) + { + IPAERR("Invalid parameters passed tbl_hdl=0x%x rule_hdl=%u time_stamp=%pK\n", + tbl_hdl, rule_hdl, time_stamp); + return -EINVAL; + } + + IPADBG("Passed Table 0x%x and rule handle %u\n", tbl_hdl, rule_hdl); + + return ipa_nati_query_timestamp(tbl_hdl, rule_hdl, time_stamp); +} + +/** +* ipa_nat_modify_pdn() - modify single PDN entry in the PDN config table +* @table_handle: [in] handle of ipv4 nat table +* @pdn_index : [in] the index of the entry to be modified +* @pdn_info : [in] values for the PDN entry to be changed +* +* Modify a PDN entry +* +* Returns: 0 On Success, negative on failure +*/ +int ipa_nat_modify_pdn( + uint32_t tbl_hdl, + uint8_t pdn_index, + ipa_nat_pdn_entry *pdn_info) +{ + struct ipa_ioc_nat_pdn_entry pdn_data; + + if ( ! VALID_TBL_HDL(tbl_hdl) || + pdn_info == NULL) { + IPAERR( + "invalid parameters passed tbl_hdl=%d pdn_info=%pK\n", + tbl_hdl, pdn_info); + return -EINVAL; + } + + if (pdn_index > IPA_MAX_PDN_NUM) { + IPAERR( + "PDN index %d is out of range maximum %d", + pdn_index, IPA_MAX_PDN_NUM); + return -EINVAL; + } + + pdn_data.pdn_index = pdn_index; + pdn_data.public_ip = pdn_info->public_ip; + pdn_data.src_metadata = pdn_info->src_metadata; + pdn_data.dst_metadata = pdn_info->dst_metadata; + + return ipa_nati_modify_pdn(&pdn_data); +} + +/** +* ipa_nat_get_pdn_index() - get a PDN index for a public ip +* @public_ip : [in] IPv4 address of the PDN entry +* @pdn_index : [out] the index of the requested PDN entry +* +* Get a PDN entry +* +* Returns: 0 On Success, negative on failure +*/ +int ipa_nat_get_pdn_index( + uint32_t public_ip, + uint8_t *pdn_index) +{ + if(!pdn_index) + { + IPAERR("NULL PDN index\n"); + return -EINVAL; + } + + return ipa_nati_get_pdn_index(public_ip, pdn_index); +} + +/** +* ipa_nat_alloc_pdn() - allocate a PDN for new WAN +* @pdn_info : [in] values for the PDN entry to be created +* @pdn_index : [out] the index of the requested PDN entry +* +* allocate a new PDN entry +* +* Returns: 0 On Success, negative on failure +*/ +int ipa_nat_alloc_pdn( + ipa_nat_pdn_entry *pdn_info, + uint8_t *pdn_index) +{ + if(!pdn_info) + { + IPAERR("NULL PDN info\n"); + return -EINVAL; + } + + if(!pdn_index) + { + IPAERR("NULL PDN index\n"); + return -EINVAL; + } + + return ipa_nati_alloc_pdn(pdn_info, pdn_index); +} + +/** +* ipa_nat_get_pdn_count() - get the number of allocated PDNs +* @pdn_cnt : [out] the number of allocated PDNs +* +* get the number of allocated PDNs +* +* Returns: 0 On Success, negative on failure +*/ +int ipa_nat_get_pdn_count( + uint8_t *pdn_cnt) +{ + if(!pdn_cnt) + { + IPAERR("NULL PDN count\n"); + return -EINVAL; + } + + *pdn_cnt = ipa_nati_get_pdn_cnt(); + + return 0; +} + +/** +* ipa_nat_dealloc_pdn() - deallocate a PDN entry +* @pdn_index : [in] pdn index to be deallocated +* +* deallocate a PDN in specified index - zero the PDN entry +* +* Returns: 0 On Success, negative on failure +*/ +int ipa_nat_dealloc_pdn( + uint8_t pdn_index) +{ + if(pdn_index > IPA_MAX_PDN_NUM) { + IPAERR("PDN index is out of range %d", pdn_index); + return -EINVAL; + } + + return ipa_nati_dealloc_pdn(pdn_index); +} + +/** + * ipa_nat_vote_clock() - used for voting clock + * @vote_type: [in] desired vote type + */ +int ipa_nat_vote_clock( + enum ipa_app_clock_vote_type vote_type ) +{ + if ( ! (vote_type >= IPA_APP_CLK_DEVOTE && + vote_type <= IPA_APP_CLK_RESET_VOTE) ) + { + IPAERR("Bad vote_type(%u) parameter\n", vote_type); + return -EINVAL; + } + + return ipa_nati_vote_clock(vote_type); +} diff --git a/ipanat/src/ipa_nat_drvi.c b/ipanat/src/ipa_nat_drvi.c new file mode 100644 index 0000000..b8d64f6 --- /dev/null +++ b/ipanat/src/ipa_nat_drvi.c @@ -0,0 +1,2689 @@ +/* + * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ipa_nat_drv.h" +#include "ipa_nat_drvi.h" + +#include <stdio.h> +#include <stdint.h> +#include <stdbool.h> +#include <sys/ioctl.h> +#include <stdlib.h> +#include <netinet/in.h> +#include <errno.h> +#include <pthread.h> +#include <unistd.h> +#include <linux/msm_ipa.h> + +#define MAX_DMA_ENTRIES_FOR_ADD 4 +#define MAX_DMA_ENTRIES_FOR_DEL 3 + +#define IPA_NAT_DEBUG_FILE_PATH "/sys/kernel/debug/ipa/ip4_nat" +#define IPA_NAT_TABLE_NAME "IPA NAT table" +#define IPA_NAT_INDEX_TABLE_NAME "IPA NAT index table" + +#undef min +#define min(a, b) ((a) < (b)) ? (a) : (b) + +#undef max +#define max(a, b) ((a) > (b)) ? (a) : (b) + +static struct ipa_nat_cache ipv4_nat_cache[IPA_NAT_MEM_IN_MAX]; + +static struct ipa_nat_cache *active_nat_cache_ptr = NULL; + +#undef DDR_IS_ACTIVE +#define DDR_IS_ACTIVE() \ + (active_nat_cache_ptr) ? \ + (active_nat_cache_ptr->nmi == IPA_NAT_MEM_IN_DDR) : \ + false + +#undef SRAM_IS_ACTIVE +#define SRAM_IS_ACTIVE() \ + (active_nat_cache_ptr) ? \ + (active_nat_cache_ptr->nmi == IPA_NAT_MEM_IN_SRAM) : \ + false + +extern pthread_mutex_t nat_mutex; + +static ipa_nat_pdn_entry pdns[IPA_MAX_PDN_NUM]; +static int num_pdns = 0; + +/* + * ---------------------------------------------------------------------------- + * Private helpers for manipulating regular tables + * ---------------------------------------------------------------------------- + */ +static int table_entry_is_valid( + void* entry) +{ + struct ipa_nat_rule* rule = (struct ipa_nat_rule*) entry; + + IPADBG("In\n"); + + IPADBG("enable(%u)\n", rule->enable); + + IPADBG("Out\n"); + + return rule->enable; +} + +static uint16_t table_entry_get_next_index( + void* entry) +{ + uint16_t result; + struct ipa_nat_rule* rule = (struct ipa_nat_rule*)entry; + + IPADBG("In\n"); + + result = rule->next_index; + + IPADBG("Next entry of %pK is %u\n", entry, result); + + IPADBG("Out\n"); + + return result; +} + +static uint16_t table_entry_get_prev_index( + void* entry, + uint16_t entry_index, + void* meta, + uint16_t base_table_size) +{ + uint16_t result; + struct ipa_nat_rule* rule = (struct ipa_nat_rule*)entry; + + UNUSED(entry_index); + UNUSED(meta); + UNUSED(base_table_size); + + IPADBG("In\n"); + + result = rule->prev_index; + + IPADBG("Previous entry of %u is %u\n", entry_index, result); + + IPADBG("Out\n"); + + return result; +} + +static void table_entry_set_prev_index( + void* entry, + uint16_t entry_index, + uint16_t prev_index, + void* meta, + uint16_t base_table_size) +{ + struct ipa_nat_rule* rule = (struct ipa_nat_rule*) entry; + + UNUSED(entry_index); + UNUSED(meta); + UNUSED(base_table_size); + + IPADBG("In\n"); + + IPADBG("Previous entry of %u is %u\n", entry_index, prev_index); + + rule->prev_index = prev_index; + + IPADBG("Out\n"); +} + +/** + * ipa_nati_calc_ip_cksum() - Calculate the source nat IP checksum diff + * @pub_ip_addr: [in] public ip address + * @priv_ip_addr: [in] Private ip address + * + * source nat ip checksum different is calculated as + * public_ip_addr - private_ip_addr + * Here we are using 1's complement to represent -ve number. + * So take 1's complement of private ip addr and add it + * to public ip addr. + * + * Returns: >0 ip checksum diff + */ +static uint16_t ipa_nati_calc_ip_cksum( + uint32_t pub_ip_addr, + uint32_t priv_ip_addr) +{ + uint16_t ret; + uint32_t cksum = 0; + + IPADBG("In\n"); + + /* Add LSB(2 bytes) of public ip address to cksum */ + cksum += (pub_ip_addr & 0xFFFF); + + /* Add MSB(2 bytes) of public ip address to cksum + and check for carry forward(CF), if any add it + */ + cksum += (pub_ip_addr>>16); + if (cksum >> 16) { + cksum = (cksum & 0x0000FFFF); + cksum += 1; + } + + /* Calculate the 1's complement of private ip address */ + priv_ip_addr = (~priv_ip_addr); + + /* Add LSB(2 bytes) of private ip address to cksum + and check for carry forward(CF), if any add it + */ + cksum += (priv_ip_addr & 0xFFFF); + if (cksum >> 16) { + cksum = (cksum & 0x0000FFFF); + cksum += 1; + } + + /* Add MSB(2 bytes) of private ip address to cksum + and check for carry forward(CF), if any add it + */ + cksum += (priv_ip_addr>>16); + if (cksum >> 16) { + cksum = (cksum & 0x0000FFFF); + cksum += 1; + } + + /* Return the LSB(2 bytes) of checksum */ + ret = (uint16_t)cksum; + + IPADBG("Out\n"); + + return ret; +} + +/** + * ipa_nati_calc_tcp_udp_cksum() - Calculate the source nat TCP/UDP checksum diff + * @pub_ip_addr: [in] public ip address + * @pub_port: [in] public tcp/udp port + * @priv_ip_addr: [in] Private ip address + * @priv_port: [in] Private tcp/udp prot + * + * source nat tcp/udp checksum is calculated as + * (pub_ip_addr + pub_port) - (priv_ip_addr + priv_port) + * Here we are using 1's complement to represent -ve number. + * So take 1's complement of prviate ip addr &private port + * and add it public ip addr & public port. + * + * Returns: >0 tcp/udp checksum diff + */ +static uint16_t ipa_nati_calc_tcp_udp_cksum( + uint32_t pub_ip_addr, + uint16_t pub_port, + uint32_t priv_ip_addr, + uint16_t priv_port) +{ + uint16_t ret = 0; + uint32_t cksum = 0; + + IPADBG("In\n"); + + /* Add LSB(2 bytes) of public ip address to cksum */ + cksum += (pub_ip_addr & 0xFFFF); + + /* Add MSB(2 bytes) of public ip address to cksum + and check for carry forward(CF), if any add it + */ + cksum += (pub_ip_addr>>16); + if (cksum >> 16) { + cksum = (cksum & 0x0000FFFF); + cksum += 1; + } + + /* Add public port to cksum and + check for carry forward(CF), if any add it */ + cksum += pub_port; + if (cksum >> 16) { + cksum = (cksum & 0x0000FFFF); + cksum += 1; + } + + /* Calculate the 1's complement of private ip address */ + priv_ip_addr = (~priv_ip_addr); + + /* Add LSB(2 bytes) of private ip address to cksum + and check for carry forward(CF), if any add it + */ + cksum += (priv_ip_addr & 0xFFFF); + if (cksum >> 16) { + cksum = (cksum & 0x0000FFFF); + cksum += 1; + } + + /* Add MSB(2 bytes) of private ip address to cksum + and check for carry forward(CF), if any add + */ + cksum += (priv_ip_addr>>16); + if (cksum >> 16) { + cksum = (cksum & 0x0000FFFF); + cksum += 1; + } + + /* Calculate the 1's complement of private port */ + priv_port = (~priv_port); + + /* Add public port to cksum and + check for carry forward(CF), if any add it */ + cksum += priv_port; + if (cksum >> 16) { + cksum = (cksum & 0x0000FFFF); + cksum += 1; + } + + /* return the LSB(2 bytes) of checksum */ + ret = (uint16_t)cksum; + + IPADBG("Out\n"); + + return ret; +} + +static int table_entry_copy_from_user( + void* entry, + void* user_data) +{ + uint32_t pub_ip_addr; + + struct ipa_nat_rule* nat_entry = (struct ipa_nat_rule*) entry; + const ipa_nat_ipv4_rule* user_rule = (const ipa_nat_ipv4_rule*) user_data; + + IPADBG("In\n"); + + pub_ip_addr = pdns[user_rule->pdn_index].public_ip; + + nat_entry->private_ip = user_rule->private_ip; + nat_entry->private_port = user_rule->private_port; + nat_entry->protocol = user_rule->protocol; + nat_entry->public_port = user_rule->public_port; + nat_entry->target_ip = user_rule->target_ip; + nat_entry->target_port = user_rule->target_port; + nat_entry->pdn_index = user_rule->pdn_index; + + nat_entry->ip_chksum = + ipa_nati_calc_ip_cksum(pub_ip_addr, user_rule->private_ip); + + if (IPPROTO_TCP == nat_entry->protocol || + IPPROTO_UDP == nat_entry->protocol) { + nat_entry->tcp_udp_chksum = ipa_nati_calc_tcp_udp_cksum( + pub_ip_addr, + user_rule->public_port, + user_rule->private_ip, + user_rule->private_port); + } + + IPADBG("Out\n"); + + return 0; +} + +static int table_entry_head_insert( + void* entry, + void* user_data, + uint16_t* dma_command_data) +{ + int ret; + + IPADBG("In\n"); + + IPADBG("entry(%p) user_data(%p) dma_command_data(%p)\n", + entry, + user_data, + dma_command_data); + + ret = table_entry_copy_from_user(entry, user_data); + + if (ret) { + IPAERR("unable to copy from user a new entry\n"); + goto bail; + } + + *dma_command_data = 0; + + ((ipa_nat_flags*)dma_command_data)->enable = IPA_NAT_FLAG_ENABLE_BIT; + +bail: + IPADBG("Out\n"); + + return ret; +} + +static int table_entry_tail_insert( + void* entry, + void* user_data) +{ + struct ipa_nat_rule* nat_entry = (struct ipa_nat_rule*) entry; + + int ret; + + IPADBG("In\n"); + + IPADBG("entry(%p) user_data(%p)\n", + entry, + user_data); + + ret = table_entry_copy_from_user(entry, user_data); + + if (ret) { + IPAERR("unable to copy from user a new entry\n"); + goto bail; + } + + nat_entry->enable = IPA_NAT_FLAG_ENABLE_BIT; + +bail: + IPADBG("Out\n"); + + return ret; +} + +static uint16_t table_entry_get_delete_head_dma_command_data( + void* head, + void* next_entry) +{ + UNUSED(head); + UNUSED(next_entry); + + IPADBG("In\n"); + + IPADBG("Out\n"); + + return IPA_NAT_INVALID_PROTO_FIELD_VALUE; +} + +/* + * ---------------------------------------------------------------------------- + * Private helpers for manipulating index tables + * ---------------------------------------------------------------------------- + */ +static int index_table_entry_is_valid( + void* entry) +{ + struct ipa_nat_indx_tbl_rule* rule = + (struct ipa_nat_indx_tbl_rule*) entry; + + int ret; + + IPADBG("In\n"); + + ret = (rule->tbl_entry) ? 1 : 0; + + IPADBG("enable(%d)\n", ret); + + IPADBG("Out\n"); + + return ret; +} + +static uint16_t index_table_entry_get_next_index( + void* entry) +{ + uint16_t result; + struct ipa_nat_indx_tbl_rule* rule = (struct ipa_nat_indx_tbl_rule*)entry; + + IPADBG("In\n"); + + result = rule->next_index; + + IPADBG("Next entry of %pK is %d\n", entry, result); + + IPADBG("Out\n"); + + return result; +} + +static uint16_t index_table_entry_get_prev_index( + void* entry, + uint16_t entry_index, + void* meta, + uint16_t base_table_size) +{ + uint16_t result = 0; + struct ipa_nat_indx_tbl_meta_info* index_expn_table_meta = + (struct ipa_nat_indx_tbl_meta_info*)meta; + + UNUSED(entry); + + IPADBG("In\n"); + + if (entry_index >= base_table_size) + result = index_expn_table_meta[entry_index - base_table_size].prev_index; + + IPADBG("Previous entry of %d is %d\n", entry_index, result); + + IPADBG("Out\n"); + + return result; +} + +static void index_table_entry_set_prev_index( + void* entry, + uint16_t entry_index, + uint16_t prev_index, + void* meta, + uint16_t base_table_size) +{ + struct ipa_nat_indx_tbl_meta_info* index_expn_table_meta = + (struct ipa_nat_indx_tbl_meta_info*) meta; + + UNUSED(entry); + + IPADBG("In\n"); + + IPADBG("Previous entry of %u is %u\n", entry_index, prev_index); + + if ( entry_index >= base_table_size ) + { + index_expn_table_meta[entry_index - base_table_size].prev_index = prev_index; + } + else if ( VALID_INDEX(prev_index) ) + { + IPAERR("Base table entry %u can't has prev entry %u, but only %u", + entry_index, prev_index, IPA_TABLE_INVALID_ENTRY); + } + + IPADBG("Out\n"); +} + +static int index_table_entry_head_insert( + void* entry, + void* user_data, + uint16_t* dma_command_data) +{ + IPADBG("In\n"); + + UNUSED(entry); + + IPADBG("entry(%p) user_data(%p) dma_command_data(%p)\n", + entry, + user_data, + dma_command_data); + + *dma_command_data = *((uint16_t*)user_data); + + IPADBG("Out\n"); + + return 0; +} + +static int index_table_entry_tail_insert( + void* entry, + void* user_data) +{ + struct ipa_nat_indx_tbl_rule* rule_ptr = + (struct ipa_nat_indx_tbl_rule*) entry; + + IPADBG("In\n"); + + IPADBG("entry(%p) user_data(%p)\n", + entry, + user_data); + + rule_ptr->tbl_entry = *((uint16_t*)user_data); + + IPADBG("Out\n"); + + return 0; +} + +static uint16_t index_table_entry_get_delete_head_dma_command_data( + void* head, + void* next_entry) +{ + uint16_t result; + struct ipa_nat_indx_tbl_rule* rule = + (struct ipa_nat_indx_tbl_rule*)next_entry; + + UNUSED(head); + + IPADBG("In\n"); + + result = rule->tbl_entry; + + IPADBG("Out\n"); + + return result; +} + +/* + * ---------------------------------------------------------------------------- + * Private data and functions used by this file's API + * ---------------------------------------------------------------------------- + */ +static ipa_table_entry_interface entry_interface = { + table_entry_is_valid, + table_entry_get_next_index, + table_entry_get_prev_index, + table_entry_set_prev_index, + table_entry_head_insert, + table_entry_tail_insert, + table_entry_get_delete_head_dma_command_data +}; + +static ipa_table_entry_interface index_entry_interface = { + index_table_entry_is_valid, + index_table_entry_get_next_index, + index_table_entry_get_prev_index, + index_table_entry_set_prev_index, + index_table_entry_head_insert, + index_table_entry_tail_insert, + index_table_entry_get_delete_head_dma_command_data +}; + +/** + * ipa_nati_create_table_dma_cmd_helpers() + * + * Creates dma_cmd_helpers for base and index tables in the received + * NAT table + * + * @nat_table: [in] NAT table + * @table_indx: [in] The index of the NAT table + * + * A DMA command helper helps to generate the DMA command for one + * specific field change. Each table has 3 different types of field + * change: update_head, update_entry and delete_head. This function + * creates the helpers for base and index tables and updates the + * tables correspondingly. + */ +static void ipa_nati_create_table_dma_cmd_helpers( + struct ipa_nat_ip4_table_cache* nat_table, + uint8_t table_indx) +{ + IPADBG("In\n"); + + /* + * Create helpers for base table + */ + ipa_table_dma_cmd_helper_init( + &nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_FLAGS], + table_indx, + IPA_NAT_BASE_TBL, + IPA_NAT_EXPN_TBL, + nat_table->mem_desc.addr_offset + IPA_NAT_RULE_FLAG_FIELD_OFFSET); + + ipa_table_dma_cmd_helper_init( + &nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_NEXT_INDEX], + table_indx, + IPA_NAT_BASE_TBL, + IPA_NAT_EXPN_TBL, + nat_table->mem_desc.addr_offset + IPA_NAT_RULE_NEXT_FIELD_OFFSET); + + ipa_table_dma_cmd_helper_init( + &nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_PROTOCOL], + table_indx, + IPA_NAT_BASE_TBL, + IPA_NAT_EXPN_TBL, + nat_table->mem_desc.addr_offset + IPA_NAT_RULE_PROTO_FIELD_OFFSET); + + /* + * Create helpers for index table + */ + ipa_table_dma_cmd_helper_init( + &nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_ENTRY], + table_indx, + IPA_NAT_INDX_TBL, + IPA_NAT_INDEX_EXPN_TBL, + nat_table->mem_desc.addr_offset + IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET); + + ipa_table_dma_cmd_helper_init( + &nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_NEXT_INDEX], + table_indx, + IPA_NAT_INDX_TBL, + IPA_NAT_INDEX_EXPN_TBL, + nat_table->mem_desc.addr_offset + IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET); + + /* + * Init helpers for base table + */ + nat_table->table.dma_help[HELP_UPDATE_HEAD] = + &nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_FLAGS]; + + nat_table->table.dma_help[HELP_UPDATE_ENTRY] = + &nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_NEXT_INDEX]; + + nat_table->table.dma_help[HELP_DELETE_HEAD] = + &nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_PROTOCOL]; + + /* + * Init helpers for index table + */ + nat_table->index_table.dma_help[HELP_UPDATE_HEAD] = + &nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_ENTRY]; + + nat_table->index_table.dma_help[HELP_UPDATE_ENTRY] = + &nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_NEXT_INDEX]; + + nat_table->index_table.dma_help[HELP_DELETE_HEAD] = + &nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_ENTRY]; + + IPADBG("Out\n"); +} + +/** + * ipa_nati_create_table() - Creates a new IPv4 NAT table + * @nat_table: [in] IPv4 NAT table + * @public_ip_addr: [in] public IPv4 address + * @number_of_entries: [in] number of NAT entries + * @table_index: [in] the index of the IPv4 NAT table + * + * This function creates new IPv4 NAT table: + * - Initializes table, index table, memory descriptor and + * table_dma_cmd_helpers structures + * - Allocates the index expansion table meta data + * - Allocates, maps and clears the memory for table and index table + * + * Returns: 0 On Success, negative on failure + */ +static int ipa_nati_create_table( + struct ipa_nat_cache* nat_cache_ptr, + struct ipa_nat_ip4_table_cache* nat_table, + uint32_t public_ip_addr, + uint16_t number_of_entries, + uint8_t table_index) +{ + int ret, size; + void* base_addr; + +#ifdef IPA_ON_R3PC + uint32_t nat_mem_offset = 0; +#endif + + IPADBG("In\n"); + + nat_table->public_addr = public_ip_addr; + + ipa_table_init( + &nat_table->table, + IPA_NAT_TABLE_NAME, + nat_cache_ptr->nmi, + sizeof(struct ipa_nat_rule), + NULL, + 0, + &entry_interface); + + ret = ipa_table_calculate_entries_num( + &nat_table->table, + number_of_entries, + nat_cache_ptr->nmi); + + if (ret) { + IPAERR( + "unable to calculate number of entries in " + "nat table %d, while required by user %d\n", + table_index, number_of_entries); + goto done; + } + + /* + * Allocate memory for NAT index expansion table meta data + */ + nat_table->index_expn_table_meta = (struct ipa_nat_indx_tbl_meta_info*) + calloc(nat_table->table.expn_table_entries, + sizeof(struct ipa_nat_indx_tbl_meta_info)); + + if (nat_table->index_expn_table_meta == NULL) { + size = nat_table->table.expn_table_entries * + sizeof(struct ipa_nat_indx_tbl_meta_info); + IPAERR( + "Fail to allocate ipv4 index expansion table meta with size %d\n", + size); + ret = -ENOMEM; + goto done; + } + + ipa_table_init( + &nat_table->index_table, + IPA_NAT_INDEX_TABLE_NAME, + nat_cache_ptr->nmi, + sizeof(struct ipa_nat_indx_tbl_rule), + nat_table->index_expn_table_meta, + sizeof(struct ipa_nat_indx_tbl_meta_info), + &index_entry_interface); + + nat_table->index_table.table_entries = + nat_table->table.table_entries; + + nat_table->index_table.expn_table_entries = + nat_table->table.expn_table_entries; + + nat_table->index_table.tot_tbl_ents = + nat_table->table.tot_tbl_ents; + + size = ipa_table_calculate_size(&nat_table->table); + size += ipa_table_calculate_size(&nat_table->index_table); + + IPADBG("Nat Base and Index Table size: %d\n", size); + + ipa_mem_descriptor_init( + &nat_table->mem_desc, + IPA_NAT_DEV_NAME, + size, + table_index, + IPA_IOC_ALLOC_NAT_TABLE, + IPA_IOC_DEL_NAT_TABLE, + true); /* true here means do consider using sram */ + + ret = ipa_mem_descriptor_allocate_memory( + &nat_table->mem_desc, + nat_cache_ptr->ipa_desc->fd); + + if (ret) { + IPAERR("unable to allocate nat memory descriptor Error: %d\n", ret); + goto bail_meta; + } + + base_addr = nat_table->mem_desc.base_addr; + +#ifdef IPA_ON_R3PC + ret = ioctl(nat_cache_ptr->ipa_desc->fd, + IPA_IOC_GET_NAT_OFFSET, + &nat_mem_offset); + if (ret) { + IPAERR("unable to post ant offset cmd Error: %d IPA fd %d\n", + ret, nat_cache_ptr->ipa_desc->fd); + goto bail_mem_desc; + } + base_addr += nat_mem_offset; +#endif + + base_addr = + ipa_table_calculate_addresses(&nat_table->table, base_addr); + ipa_table_calculate_addresses(&nat_table->index_table, base_addr); + + ipa_table_reset(&nat_table->table); + ipa_table_reset(&nat_table->index_table); + + ipa_nati_create_table_dma_cmd_helpers(nat_table, table_index); + + goto done; + +#ifdef IPA_ON_R3PC +bail_mem_desc: + ipa_mem_descriptor_delete(&nat_table->mem_desc, nat_cache_ptr->ipa_desc->fd); +#endif + +bail_meta: + free(nat_table->index_expn_table_meta); + memset(nat_table, 0, sizeof(*nat_table)); + +done: + IPADBG("Out\n"); + + return ret; +} + +static int ipa_nati_destroy_table( + struct ipa_nat_cache* nat_cache_ptr, + struct ipa_nat_ip4_table_cache* nat_table) +{ + int ret; + + IPADBG("In\n"); + + ret = ipa_mem_descriptor_delete( + &nat_table->mem_desc, nat_cache_ptr->ipa_desc->fd); + + if (ret) + IPAERR("unable to delete NAT descriptor\n"); + + free(nat_table->index_expn_table_meta); + + memset(nat_table, 0, sizeof(*nat_table)); + + IPADBG("Out\n"); + + return ret; +} + +static int ipa_nati_post_ipv4_init_cmd( + struct ipa_nat_cache* nat_cache_ptr, + struct ipa_nat_ip4_table_cache* nat_table, + uint8_t tbl_index, + bool focus_change ) +{ + struct ipa_ioc_v4_nat_init cmd; + + char buf[1024]; + int ret; + + IPADBG("In\n"); + + IPADBG("nat_cache_ptr(%p) nat_table(%p) tbl_index(%u) focus_change(%u)\n", + nat_cache_ptr, nat_table, tbl_index, focus_change); + + memset(&cmd, 0, sizeof(cmd)); + + cmd.tbl_index = tbl_index; + cmd.focus_change = focus_change; + + cmd.mem_type = nat_cache_ptr->nmi; + + cmd.ipv4_rules_offset = + nat_table->mem_desc.addr_offset; + + cmd.expn_rules_offset = + cmd.ipv4_rules_offset + + (nat_table->table.table_entries * sizeof(struct ipa_nat_rule)); + + cmd.index_offset = + cmd.expn_rules_offset + + (nat_table->table.expn_table_entries * sizeof(struct ipa_nat_rule)); + + cmd.index_expn_offset = + cmd.index_offset + + (nat_table->index_table.table_entries * sizeof(struct ipa_nat_indx_tbl_rule)); + + /* + * Driverr/HW expected base table size to be power^2-1 due to H/W + * hash calculation + */ + cmd.table_entries = + nat_table->table.table_entries - 1; + cmd.expn_table_entries = + nat_table->table.expn_table_entries; + + cmd.ip_addr = nat_table->public_addr; + + *buf = '\0'; + IPADBG("%s\n", ipa_ioc_v4_nat_init_as_str(&cmd, buf, sizeof(buf))); + + ret = ioctl(nat_cache_ptr->ipa_desc->fd, IPA_IOC_V4_INIT_NAT, &cmd); + + if (ret) { + IPAERR("unable to post init cmd Error: %d IPA fd %d\n", + ret, nat_cache_ptr->ipa_desc->fd); + goto bail; + } + + IPADBG("Posted IPA_IOC_V4_INIT_NAT to kernel successfully\n"); + +bail: + IPADBG("Out\n"); + + return ret; +} + +static void ipa_nati_copy_second_index_entry_to_head( + struct ipa_nat_ip4_table_cache* nat_table, + ipa_table_iterator* index_table_iterator, + struct ipa_ioc_nat_dma_cmd* cmd) +{ + uint16_t index; + struct ipa_nat_rule* table; + struct ipa_nat_indx_tbl_rule* index_table_rule = + (struct ipa_nat_indx_tbl_rule*)index_table_iterator->next_entry; + + IPADBG("In\n"); + + /* + * The DMA command for field tbl_entry already added by the + * index_table.ipa_table_create_delete_command() + */ + ipa_table_add_dma_cmd( + &nat_table->index_table, + HELP_UPDATE_ENTRY, + index_table_iterator->curr_entry, + index_table_iterator->curr_index, + index_table_rule->next_index, + cmd); + + /* Change the indx_tbl_entry field in the related table rule */ + if (index_table_rule->tbl_entry < nat_table->table.table_entries) { + index = index_table_rule->tbl_entry; + table = (struct ipa_nat_rule*)nat_table->table.table_addr; + } else { + index = index_table_rule->tbl_entry - nat_table->table.table_entries; + table = (struct ipa_nat_rule*)nat_table->table.expn_table_addr; + } + + table[index].indx_tbl_entry = index_table_iterator->curr_index; + + IPADBG("Out\n"); +} + +/** + * dst_hash() - Find the index into ipv4 base table + * @public_ip: [in] public_ip + * @trgt_ip: [in] Target IP address + * @trgt_port: [in] Target port + * @public_port: [in] Public port + * @proto: [in] Protocol (TCP/IP) + * @size: [in] size of the ipv4 base Table + * + * This hash method is used to find the hash index of new nat + * entry into ipv4 base table. In case of zero index, the + * new entry will be stored into N-1 index where N is size of + * ipv4 base table + * + * Returns: >0 index into ipv4 base table, negative on failure + */ +static uint16_t dst_hash( + struct ipa_nat_cache* nat_cache_ptr, + uint32_t public_ip, + uint32_t trgt_ip, + uint16_t trgt_port, + uint16_t public_port, + uint8_t proto, + uint16_t size) +{ + uint16_t hash = + ((uint16_t)(trgt_ip)) ^ + ((uint16_t)(trgt_ip >> 16)) ^ + (trgt_port) ^ + (public_port) ^ + (proto); + + IPADBG("In\n"); + + IPADBG("public_ip: 0x%08X public_port: 0x%04X\n", public_ip, public_port); + IPADBG("target_ip: 0x%08X target_port: 0x%04X\n", trgt_ip, trgt_port); + IPADBG("proto: 0x%02X size: 0x%04X\n", proto, size); + + if (nat_cache_ptr->ipa_desc->ver >= IPA_HW_v4_0) + hash ^= + ((uint16_t)(public_ip)) ^ + ((uint16_t)(public_ip >> 16)); + + /* + * The size passed to hash function expected be power^2-1, while + * the actual size is power^2, actual_size = size + 1 + */ + hash = (hash & size); + + /* + * If the hash resulted to zero then set it to maximum value as + * zero is unused entry in nat tables + */ + if (hash == 0) { + hash = size; + } + + IPADBG("dst_hash returning value: %d\n", hash); + + IPADBG("Out\n"); + + return hash; +} + +/** + * src_hash() - Find the index into ipv4 index base table + * @priv_ip: [in] Private IP address + * @priv_port: [in] Private port + * @trgt_ip: [in] Target IP address + * @trgt_port: [in] Target Port + * @proto: [in] Protocol (TCP/IP) + * @size: [in] size of the ipv4 index base Table + * + * This hash method is used to find the hash index of new nat + * entry into ipv4 index base table. In case of zero index, the + * new entry will be stored into N-1 index where N is size of + * ipv4 index base table + * + * Returns: >0 index into ipv4 index base table, negative on failure + */ +static uint16_t src_hash( + uint32_t priv_ip, + uint16_t priv_port, + uint32_t trgt_ip, + uint16_t trgt_port, + uint8_t proto, + uint16_t size) +{ + uint16_t hash = + ((uint16_t)(priv_ip)) ^ + ((uint16_t)(priv_ip >> 16)) ^ + (priv_port) ^ + ((uint16_t)(trgt_ip)) ^ + ((uint16_t)(trgt_ip >> 16)) ^ + (trgt_port) ^ + (proto); + + IPADBG("In\n"); + + IPADBG("private_ip: 0x%08X private_port: 0x%04X\n", priv_ip, priv_port); + IPADBG(" target_ip: 0x%08X target_port: 0x%04X\n", trgt_ip, trgt_port); + IPADBG("proto: 0x%02X size: 0x%04X\n", proto, size); + + /* + * The size passed to hash function expected be power^2-1, while + * the actual size is power^2, actual_size = size + 1 + */ + hash = (hash & size); + + /* + * If the hash resulted to zero then set it to maximum value as + * zero is unused entry in nat tables + */ + if (hash == 0) { + hash = size; + } + + IPADBG("src_hash returning value: %d\n", hash); + + IPADBG("Out\n"); + + return hash; +} + +static int ipa_nati_post_ipv4_dma_cmd( + struct ipa_nat_cache* nat_cache_ptr, + struct ipa_ioc_nat_dma_cmd* cmd) +{ + char buf[4096]; + int ret = 0; + + IPADBG("In\n"); + + cmd->mem_type = nat_cache_ptr->nmi; + + *buf = '\0'; + IPADBG("%s\n", prep_ioc_nat_dma_cmd_4print(cmd, buf, sizeof(buf))); + + if (ioctl(nat_cache_ptr->ipa_desc->fd, IPA_IOC_TABLE_DMA_CMD, cmd)) { + IPAERR("ioctl (IPA_IOC_TABLE_DMA_CMD) on fd %d has failed\n", + nat_cache_ptr->ipa_desc->fd); + ret = -EIO; + goto bail; + } + + IPADBG("Posted IPA_IOC_TABLE_DMA_CMD to kernel successfully\n"); + +bail: + IPADBG("Out\n"); + + return ret; +} + +/* + * ---------------------------------------------------------------------------- + * API functions exposed to the upper layers + * ---------------------------------------------------------------------------- + */ +int ipa_nati_modify_pdn( + struct ipa_ioc_nat_pdn_entry *entry) +{ + struct ipa_nat_cache* nat_cache_ptr; + int ret = 0; + + IPADBG("In\n"); + + nat_cache_ptr = + (ipv4_nat_cache[IPA_NAT_MEM_IN_DDR].ipa_desc) ? + &ipv4_nat_cache[IPA_NAT_MEM_IN_DDR] : + &ipv4_nat_cache[IPA_NAT_MEM_IN_SRAM]; + + if ( nat_cache_ptr->ipa_desc == NULL ) + { + IPAERR("Uninitialized cache file descriptor\n"); + ret = -EIO; + goto done; + } + + if (entry->public_ip == 0) + IPADBG("PDN %d public ip will be set to 0\n", entry->pdn_index); + + ret = ioctl(nat_cache_ptr->ipa_desc->fd, IPA_IOC_NAT_MODIFY_PDN, entry); + + if ( ret ) { + IPAERR("unable to call modify pdn icotl\nindex %d, ip 0x%X, src_metdata 0x%X, dst_metadata 0x%X IPA fd %d\n", + entry->pdn_index, + entry->public_ip, + entry->src_metadata, + entry->dst_metadata, + nat_cache_ptr->ipa_desc->fd); + goto done; + } + + pdns[entry->pdn_index].public_ip = entry->public_ip; + pdns[entry->pdn_index].dst_metadata = entry->dst_metadata; + pdns[entry->pdn_index].src_metadata = entry->src_metadata; + + IPADBG("posted IPA_IOC_NAT_MODIFY_PDN to kernel successfully and stored in cache\n index %d, ip 0x%X, src_metdata 0x%X, dst_metadata 0x%X\n", + entry->pdn_index, + entry->public_ip, + entry->src_metadata, + entry->dst_metadata); +done: + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_get_pdn_index( + uint32_t public_ip, + uint8_t *pdn_index) +{ + int i = 0; + + for(i = 0; i < (IPA_MAX_PDN_NUM - 1); i++) { + if(pdns[i].public_ip == public_ip) { + IPADBG("ip 0x%X matches PDN index %d\n", public_ip, i); + *pdn_index = i; + return 0; + } + } + + IPAERR("ip 0x%X does not match any PDN\n", public_ip); + + return -EIO; +} + +int ipa_nati_alloc_pdn( + ipa_nat_pdn_entry *pdn_info, + uint8_t *pdn_index) +{ + ipa_nat_pdn_entry zero_test; + struct ipa_ioc_nat_pdn_entry pdn_data; + int i, ret; + + IPADBG("alloc PDN for ip 0x%x\n", pdn_info->public_ip); + + memset(&zero_test, 0, sizeof(zero_test)); + + if(num_pdns >= (IPA_MAX_PDN_NUM - 1)) { + IPAERR("exceeded max num of PDNs, num_pdns %d\n", num_pdns); + return -EIO; + } + + for(i = 0; i < (IPA_MAX_PDN_NUM - 1); i++) { + if(pdns[i].public_ip == pdn_info->public_ip) + { + IPADBG("found the same pdn in index %d\n", i); + *pdn_index = i; + if((pdns[i].src_metadata != pdn_info->src_metadata) || + (pdns[i].dst_metadata != pdn_info->dst_metadata)) + { + IPAERR("WARNING: metadata values don't match! [%d, %d], [%d, %d]\n\n", + pdns[i].src_metadata, pdn_info->src_metadata, + pdns[i].dst_metadata, pdn_info->dst_metadata); + } + return 0; + } + + if(!memcmp((pdns + i), &zero_test, sizeof(ipa_nat_pdn_entry))) + { + IPADBG("found an empty pdn in index %d\n", i); + break; + } + } + + if(i >= (IPA_MAX_PDN_NUM - 1)) + { + IPAERR("couldn't find an empty entry while num is %d\n", + num_pdns); + return -EIO; + } + + pdn_data.pdn_index = i; + pdn_data.public_ip = pdn_info->public_ip; + pdn_data.src_metadata = pdn_info->src_metadata; + pdn_data.dst_metadata = pdn_info->dst_metadata; + + ret = ipa_nati_modify_pdn(&pdn_data); + if(!ret) + { + num_pdns++; + *pdn_index = i; + IPADBG("modify num_pdns (%d)\n", num_pdns); + } + + return ret; +} + +int ipa_nati_get_pdn_cnt(void) +{ + return num_pdns; +} + +int ipa_nati_dealloc_pdn( + uint8_t pdn_index) +{ + ipa_nat_pdn_entry zero_test; + struct ipa_ioc_nat_pdn_entry pdn_data; + int ret; + + IPADBG(" trying to deallocate PDN index %d\n", pdn_index); + + if(!num_pdns) + { + IPAERR("pdn table is already empty\n"); + return -EIO; + } + + memset(&zero_test, 0, sizeof(zero_test)); + + if(!memcmp((pdns + pdn_index), &zero_test, sizeof(ipa_nat_pdn_entry))) + { + IPAERR("pdn entry is a zero entry\n"); + return -EIO; + } + + IPADBG("PDN in index %d has ip 0x%X\n", pdn_index, pdns[pdn_index].public_ip); + + pdn_data.pdn_index = pdn_index; + pdn_data.src_metadata = 0; + pdn_data.dst_metadata = 0; + pdn_data.public_ip = 0; + + ret = ipa_nati_modify_pdn(&pdn_data); + if(ret) + { + IPAERR("failed modifying PDN\n"); + return -EIO; + } + + memset((pdns + pdn_index), 0, sizeof(ipa_nat_pdn_entry)); + + num_pdns--; + + IPADBG("successfully removed pdn from index %d num_pdns %d\n", pdn_index, num_pdns); + + return 0; +} + +/* + * ---------------------------------------------------------------------------- + * Previously public API functions, but have been hijacked (in + * ipa_nat_statemach.c). The new definitions that replaced these, now + * call the functions below. + * ---------------------------------------------------------------------------- + */ +int ipa_NATI_post_ipv4_init_cmd( + uint32_t tbl_hdl ) +{ + enum ipa3_nat_mem_in nmi; + struct ipa_nat_cache* nat_cache_ptr; + struct ipa_nat_ip4_table_cache* nat_table; + int ret; + + IPADBG("In\n"); + + BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl); + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) { + IPAERR("Bad cache type argument passed\n"); + ret = -EINVAL; + goto bail; + } + + IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi)); + + nat_cache_ptr = &ipv4_nat_cache[nmi]; + + if (pthread_mutex_lock(&nat_mutex)) { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto bail; + } + + if ( ! nat_cache_ptr->table_cnt ) { + IPAERR("No initialized table in NAT cache\n"); + ret = -EINVAL; + goto unlock; + } + + nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1]; + + ret = ipa_nati_post_ipv4_init_cmd( + nat_cache_ptr, + nat_table, + tbl_hdl - 1, + true); + + if (ret) { + IPAERR("unable to post nat_init command Error %d\n", ret); + goto unlock; + } + + active_nat_cache_ptr = nat_cache_ptr; + +unlock: + if (pthread_mutex_unlock(&nat_mutex)) { + IPAERR("unable to unlock the nat mutex\n"); + ret = (ret) ? ret : -EPERM; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +/** + * ipa_NATI_add_ipv4_tbl() - Adds a new IPv4 NAT table + * @ct: [in] the desired cache type to use + * @public_ip_addr: [in] public IPv4 address + * @number_of_entries: [in] number of NAT entries + * @table_handle: [out] handle of new IPv4 NAT table + * + * This function creates new IPv4 NAT table and posts IPv4 NAT init command to HW + * + * Returns: 0 On Success, negative on failure + */ +int ipa_NATI_add_ipv4_tbl( + enum ipa3_nat_mem_in nmi, + uint32_t public_ip_addr, + uint16_t number_of_entries, + uint32_t* tbl_hdl ) +{ + struct ipa_nat_cache* nat_cache_ptr; + struct ipa_nat_ip4_table_cache* nat_table; + int ret = 0; + + IPADBG("In\n"); + + *tbl_hdl = 0; + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) { + IPAERR("Bad cache type argument passed\n"); + ret = -EINVAL; + goto bail; + } + + IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi)); + + nat_cache_ptr = &ipv4_nat_cache[nmi]; + + if (pthread_mutex_lock(&nat_mutex)) { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto bail; + } + + nat_cache_ptr->nmi = nmi; + + if (nat_cache_ptr->table_cnt >= IPA_NAT_MAX_IP4_TBLS) { + IPAERR( + "Can't add addition NAT table. Maximum %d tables allowed\n", + IPA_NAT_MAX_IP4_TBLS); + ret = -EINVAL; + goto unlock; + } + + if ( ! nat_cache_ptr->ipa_desc ) { + nat_cache_ptr->ipa_desc = ipa_descriptor_open(); + if ( nat_cache_ptr->ipa_desc == NULL ) { + IPAERR("failed to open IPA driver file descriptor\n"); + ret = -EIO; + goto unlock; + } + } + + nat_table = &nat_cache_ptr->ip4_tbl[nat_cache_ptr->table_cnt]; + + ret = ipa_nati_create_table( + nat_cache_ptr, + nat_table, + public_ip_addr, + number_of_entries, + nat_cache_ptr->table_cnt); + + if (ret) { + IPAERR("unable to create nat table Error: %d\n", ret); + goto failed_create_table; + } + + /* + * Initialize the ipa hw with nat table dimensions + */ + ret = ipa_nati_post_ipv4_init_cmd( + nat_cache_ptr, + nat_table, + nat_cache_ptr->table_cnt, + false); + + if (ret) { + IPAERR("unable to post nat_init command Error %d\n", ret); + goto failed_post_init_cmd; + } + + active_nat_cache_ptr = nat_cache_ptr; + + /* + * Store the initial public ip address in the cached pdn table + * this is backward compatible for pre IPAv4 versions, we will + * always use this ip as the single PDN address + */ + pdns[0].public_ip = public_ip_addr; + num_pdns = 1; + + nat_cache_ptr->table_cnt++; + + /* + * Return table handle + */ + *tbl_hdl = MAKE_TBL_HDL(nat_cache_ptr->table_cnt, nmi); + + IPADBG("tbl_hdl value(0x%08X) num_pdns (%d)\n", *tbl_hdl, num_pdns); + + goto unlock; + +failed_post_init_cmd: + ipa_nati_destroy_table(nat_cache_ptr, nat_table); + +failed_create_table: + if (!nat_cache_ptr->table_cnt) { + ipa_descriptor_close(nat_cache_ptr->ipa_desc); + nat_cache_ptr->ipa_desc = NULL; + } + +unlock: + if (pthread_mutex_unlock(&nat_mutex)) { + IPAERR("unable to unlock the nat mutex\n"); + ret = -EPERM; + goto bail; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_NATI_del_ipv4_table( + uint32_t tbl_hdl ) +{ + enum ipa3_nat_mem_in nmi; + struct ipa_nat_cache* nat_cache_ptr; + struct ipa_nat_ip4_table_cache* nat_table; + + int ret; + + IPADBG("In\n"); + + BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl); + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) { + IPAERR("Bad cache type argument passed\n"); + ret = -EINVAL; + goto bail; + } + + IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi)); + + nat_cache_ptr = &ipv4_nat_cache[nmi]; + + nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1]; + + if (pthread_mutex_lock(&nat_mutex)) { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto bail; + } + + if (! nat_table->mem_desc.valid) { + IPAERR("invalid table handle %d\n", tbl_hdl); + ret = -EINVAL; + goto unlock; + } + + ret = ipa_nati_destroy_table(nat_cache_ptr, nat_table); + if (ret) { + IPAERR("unable to delete NAT table with handle %d\n", tbl_hdl); + goto unlock; + } + + if (! --nat_cache_ptr->table_cnt) { + ipa_descriptor_close(nat_cache_ptr->ipa_desc); + nat_cache_ptr->ipa_desc = NULL; + } + +unlock: + if (pthread_mutex_unlock(&nat_mutex)) { + IPAERR("unable to unlock the nat mutex\n"); + ret = (ret) ? ret : -EPERM; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_NATI_query_timestamp( + uint32_t tbl_hdl, + uint32_t rule_hdl, + uint32_t* time_stamp ) +{ + enum ipa3_nat_mem_in nmi; + struct ipa_nat_cache* nat_cache_ptr; + struct ipa_nat_ip4_table_cache* nat_table; + struct ipa_nat_rule* rule_ptr; + + char buf[1024]; + int ret; + + IPADBG("In\n"); + + BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl); + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) { + IPAERR("Bad cache type argument passed\n"); + ret = -EINVAL; + goto bail; + } + + IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi)); + + nat_cache_ptr = &ipv4_nat_cache[nmi]; + + nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1]; + + if (pthread_mutex_lock(&nat_mutex)) { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto bail; + } + + if ( ! nat_table->mem_desc.valid ) { + IPAERR("invalid table handle %d\n", tbl_hdl); + ret = -EINVAL; + goto unlock; + } + + ret = ipa_table_get_entry( + &nat_table->table, + rule_hdl, + (void**) &rule_ptr, + NULL); + + if (ret) { + IPAERR("Unable to retrive the entry with " + "handle=%u in NAT table with handle=0x%08X\n", + rule_hdl, tbl_hdl); + goto unlock; + } + + *buf = '\0'; + IPADBG("rule_hdl(0x%08X) -> %s\n", + rule_hdl, + prep_nat_rule_4print(rule_ptr, buf, sizeof(buf))); + + *time_stamp = rule_ptr->time_stamp; + +unlock: + if (pthread_mutex_unlock(&nat_mutex)) { + IPAERR("unable to unlock the nat mutex\n"); + ret = (ret) ? ret : -EPERM; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_NATI_add_ipv4_rule( + uint32_t tbl_hdl, + const ipa_nat_ipv4_rule* clnt_rule, + uint32_t* rule_hdl) +{ + uint32_t cmd_sz = + sizeof(struct ipa_ioc_nat_dma_cmd) + + (MAX_DMA_ENTRIES_FOR_ADD * sizeof(struct ipa_ioc_nat_dma_one)); + char cmd_buf[cmd_sz]; + struct ipa_ioc_nat_dma_cmd* cmd = + (struct ipa_ioc_nat_dma_cmd*) cmd_buf; + + enum ipa3_nat_mem_in nmi; + struct ipa_nat_cache* nat_cache_ptr; + struct ipa_nat_ip4_table_cache* nat_table; + struct ipa_nat_rule* rule; + + uint16_t new_entry_index; + uint16_t new_index_tbl_entry_index; + uint32_t new_entry_handle; + char buf[1024]; + + int ret = 0; + + IPADBG("In\n"); + + memset(cmd_buf, 0, sizeof(cmd_buf)); + + if ( ! VALID_TBL_HDL(tbl_hdl) || + ! clnt_rule || + ! rule_hdl ) + { + IPAERR("Bad arg: tbl_hdl(0x%08X) and/or clnt_rule(%p) and/or rule_hdl(%p)\n", + tbl_hdl, clnt_rule, rule_hdl); + ret = -EINVAL; + goto done; + } + + *rule_hdl = 0; + + IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl); + + BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl); + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) { + IPAERR("Bad cache type argument passed\n"); + ret = -EINVAL; + goto done; + } + + *buf = '\0'; + IPADBG("tbl_hdl(0x%08X) nmi(%s) %s\n", + tbl_hdl, + ipa3_nat_mem_in_as_str(nmi), + prep_nat_ipv4_rule_4print(clnt_rule, buf, sizeof(buf))); + + nat_cache_ptr = &ipv4_nat_cache[nmi]; + + nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1]; + + if (clnt_rule->protocol == IPAHAL_NAT_INVALID_PROTOCOL) { + IPAERR("invalid parameter protocol=%d\n", clnt_rule->protocol); + ret = -EINVAL; + goto done; + } + + /* + * Verify that the rule's PDN is valid + */ + if (clnt_rule->pdn_index >= IPA_MAX_PDN_NUM || + pdns[clnt_rule->pdn_index].public_ip == 0) { + IPAERR("invalid parameters, pdn index %d, public ip = 0x%X\n", + clnt_rule->pdn_index, pdns[clnt_rule->pdn_index].public_ip); + ret = -EINVAL; + goto done; + } + + if (pthread_mutex_lock(&nat_mutex)) { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto done; + } + + if (! nat_table->mem_desc.valid) { + IPAERR("invalid table handle %d\n", tbl_hdl); + ret = -EINVAL; + goto unlock; + } + + new_entry_index = dst_hash( + nat_cache_ptr, + pdns[clnt_rule->pdn_index].public_ip, + clnt_rule->target_ip, + clnt_rule->target_port, + clnt_rule->public_port, + clnt_rule->protocol, + nat_table->table.table_entries - 1); + + ret = ipa_table_add_entry( + &nat_table->table, + (void*) clnt_rule, + &new_entry_index, + &new_entry_handle, + cmd); + + if (ret) { + IPAERR("Failed to add a new NAT entry\n"); + goto unlock; + } + + new_index_tbl_entry_index = + src_hash(clnt_rule->private_ip, + clnt_rule->private_port, + clnt_rule->target_ip, + clnt_rule->target_port, + clnt_rule->protocol, + nat_table->table.table_entries - 1); + + ret = ipa_table_add_entry( + &nat_table->index_table, + (void*) &new_entry_index, + &new_index_tbl_entry_index, + NULL, + cmd); + + if (ret) { + IPAERR("failed to add a new NAT index entry\n"); + goto fail_add_index_entry; + } + + rule = ipa_table_get_entry_by_index( + &nat_table->table, + new_entry_index); + + if (rule == NULL) { + IPAERR("Failed to retrieve the entry in index %d for NAT table with handle=%d\n", + new_entry_index, tbl_hdl); + ret = -EPERM; + goto bail; + } + + rule->indx_tbl_entry = new_index_tbl_entry_index; + + rule->redirect = clnt_rule->redirect; + rule->enable = clnt_rule->enable; + rule->time_stamp = clnt_rule->time_stamp; + + IPADBG("new entry:%d, new index entry: %d\n", + new_entry_index, new_index_tbl_entry_index); + + IPADBG("rule_hdl(0x%08X) -> %s\n", + new_entry_handle, + prep_nat_rule_4print(rule, buf, sizeof(buf))); + + ret = ipa_nati_post_ipv4_dma_cmd(nat_cache_ptr, cmd); + + if (ret) { + IPAERR("unable to post dma command\n"); + goto bail; + } + + if (pthread_mutex_unlock(&nat_mutex)) { + IPAERR("unable to unlock the nat mutex\n"); + ret = -EPERM; + goto done; + } + + *rule_hdl = new_entry_handle; + + IPADBG("rule_hdl value(%u)\n", *rule_hdl); + + goto done; + +bail: + ipa_table_erase_entry(&nat_table->index_table, new_index_tbl_entry_index); + +fail_add_index_entry: + ipa_table_erase_entry(&nat_table->table, new_entry_index); + +unlock: + if (pthread_mutex_unlock(&nat_mutex)) + IPAERR("unable to unlock the nat mutex\n"); +done: + IPADBG("Out\n"); + + return ret; +} + +int ipa_NATI_del_ipv4_rule( + uint32_t tbl_hdl, + uint32_t rule_hdl ) +{ + uint32_t cmd_sz = + sizeof(struct ipa_ioc_nat_dma_cmd) + + (MAX_DMA_ENTRIES_FOR_DEL * sizeof(struct ipa_ioc_nat_dma_one)); + char cmd_buf[cmd_sz]; + struct ipa_ioc_nat_dma_cmd* cmd = + (struct ipa_ioc_nat_dma_cmd*) cmd_buf; + + enum ipa3_nat_mem_in nmi; + struct ipa_nat_cache* nat_cache_ptr; + struct ipa_nat_ip4_table_cache* nat_table; + struct ipa_nat_rule* table_rule; + struct ipa_nat_indx_tbl_rule* index_table_rule; + + ipa_table_iterator table_iterator; + ipa_table_iterator index_table_iterator; + + uint16_t index; + char buf[1024]; + int ret = 0; + + IPADBG("In\n"); + + memset(cmd_buf, 0, sizeof(cmd_buf)); + + IPADBG("tbl_hdl(0x%08X) rule_hdl(%u)\n", tbl_hdl, rule_hdl); + + BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl); + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) { + IPAERR("Bad cache type argument passed\n"); + ret = -EINVAL; + goto done; + } + + IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi)); + + nat_cache_ptr = &ipv4_nat_cache[nmi]; + + nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1]; + + if (pthread_mutex_lock(&nat_mutex)) { + IPAERR("Unable to lock the nat mutex\n"); + ret = -EINVAL; + goto done; + } + + if (! nat_table->mem_desc.valid) { + IPAERR("Invalid table handle 0x%08X\n", tbl_hdl); + ret = -EINVAL; + goto unlock; + } + + ret = ipa_table_get_entry( + &nat_table->table, + rule_hdl, + (void**) &table_rule, + &index); + + if (ret) { + IPAERR("Unable to retrive the entry with rule_hdl=%u\n", rule_hdl); + goto unlock; + } + + *buf = '\0'; + IPADBG("rule_hdl(0x%08X) -> %s\n", + rule_hdl, + prep_nat_rule_4print(table_rule, buf, sizeof(buf))); + + ret = ipa_table_iterator_init( + &table_iterator, + &nat_table->table, + table_rule, + index); + + if (ret) { + IPAERR("Unable to create iterator which points to the " + "entry %u in NAT table with handle=0x%08X\n", + index, tbl_hdl); + goto unlock; + } + + index = table_rule->indx_tbl_entry; + + index_table_rule = (struct ipa_nat_indx_tbl_rule*) + ipa_table_get_entry_by_index(&nat_table->index_table, index); + + if (index_table_rule == NULL) { + IPAERR("Unable to retrieve the entry in index %u " + "in NAT index table with handle=0x%08X\n", + index, tbl_hdl); + ret = -EPERM; + goto unlock; + } + + ret = ipa_table_iterator_init( + &index_table_iterator, + &nat_table->index_table, + index_table_rule, + index); + + if (ret) { + IPAERR("Unable to create iterator which points to the " + "entry %u in NAT index table with handle=0x%08X\n", + index, tbl_hdl); + goto unlock; + } + + ipa_table_create_delete_command( + &nat_table->index_table, + cmd, + &index_table_iterator); + + if (ipa_table_iterator_is_head_with_tail(&index_table_iterator)) { + + ipa_nati_copy_second_index_entry_to_head( + nat_table, &index_table_iterator, cmd); + /* + * Iterate to the next entry which should be deleted + */ + ret = ipa_table_iterator_next( + &index_table_iterator, &nat_table->index_table); + + if (ret) { + IPAERR("Unable to move the iterator to the next entry " + "(points to the entry %u in NAT index table)\n", + index); + goto unlock; + } + } + + ipa_table_create_delete_command( + &nat_table->table, + cmd, + &table_iterator); + + ret = ipa_nati_post_ipv4_dma_cmd(nat_cache_ptr, cmd); + + if (ret) { + IPAERR("Unable to post dma command\n"); + goto unlock; + } + + if (! ipa_table_iterator_is_head_with_tail(&table_iterator)) { + /* The entry can be deleted */ + uint8_t is_prev_empty = + (table_iterator.prev_entry != NULL && + ((struct ipa_nat_rule*)table_iterator.prev_entry)->protocol == + IPAHAL_NAT_INVALID_PROTOCOL); + + ipa_table_delete_entry( + &nat_table->table, &table_iterator, is_prev_empty); + } + + ipa_table_delete_entry( + &nat_table->index_table, + &index_table_iterator, + FALSE); + + if (index_table_iterator.curr_index >= nat_table->index_table.table_entries) + nat_table->index_expn_table_meta[ + index_table_iterator.curr_index - nat_table->index_table.table_entries]. + prev_index = IPA_TABLE_INVALID_ENTRY; + +unlock: + if (pthread_mutex_unlock(&nat_mutex)) { + IPAERR("Unable to unlock the nat mutex\n"); + ret = (ret) ? ret : -EPERM; + } + +done: + IPADBG("Out\n"); + + return ret; +} + +/* + * ---------------------------------------------------------------------------- + * New function to get sram size. + * ---------------------------------------------------------------------------- + */ +int ipa_nati_get_sram_size( + uint32_t* size_ptr) +{ + struct ipa_nat_cache* nat_cache_ptr = + &ipv4_nat_cache[IPA_NAT_MEM_IN_SRAM]; + struct ipa_nat_in_sram_info nat_sram_info; + int ret; + + IPADBG("In\n"); + + if (pthread_mutex_lock(&nat_mutex)) { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto bail; + } + + if ( ! nat_cache_ptr->ipa_desc ) { + nat_cache_ptr->ipa_desc = ipa_descriptor_open(); + if ( nat_cache_ptr->ipa_desc == NULL ) { + IPAERR("failed to open IPA driver file descriptor\n"); + ret = -EIO; + goto unlock; + } + } + + memset(&nat_sram_info, 0, sizeof(nat_sram_info)); + + ret = ioctl(nat_cache_ptr->ipa_desc->fd, + IPA_IOC_GET_NAT_IN_SRAM_INFO, + &nat_sram_info); + + if (ret) { + IPAERR("NAT_IN_SRAM_INFO ioctl failure %d on IPA fd %d\n", + ret, nat_cache_ptr->ipa_desc->fd); + goto unlock; + } + + if ( (*size_ptr = nat_sram_info.sram_mem_available_for_nat) == 0 ) + { + IPAERR("sram_mem_available_for_nat is zero\n"); + ret = -EINVAL; + goto unlock; + } + +unlock: + if (pthread_mutex_unlock(&nat_mutex)) { + IPAERR("unable to unlock the nat mutex\n"); + ret = (ret) ? ret : -EPERM; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +/* + * ---------------------------------------------------------------------------- + * Utility functions + * ---------------------------------------------------------------------------- + */ +static int print_nat_rule( + ipa_table* table_ptr, + uint32_t rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ) +{ + enum ipa3_nat_mem_in nmi; + uint8_t is_expn_tbl; + uint16_t rule_index; + + char buf[1024]; + + struct ipa_nat_rule* rule_ptr = + (struct ipa_nat_rule*) record_ptr; + + UNUSED(meta_record_ptr); + UNUSED(meta_record_index); + + if ( rule_ptr->protocol == IPA_NAT_INVALID_PROTO_FIELD_VALUE_IN_RULE ) + { + goto bail; + } + + BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index); + + nmi++; /* stop compiler usage warning */ + + printf(" %s %s (0x%04X) (0x%08X) -> %s\n", + (table_ptr->nmi == IPA_NAT_MEM_IN_DDR) ? "DDR" : "SRAM", + (is_expn_tbl) ? "EXP " : "BASE", + record_index, + rule_hdl, + prep_nat_rule_4print(rule_ptr, buf, sizeof(buf))); + + fflush(stdout); + + *((bool*) arb_data_ptr) = false; + +bail: + return 0; +} + +static int print_meta_data( + ipa_table* table_ptr, + uint32_t rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ) +{ + struct ipa_nat_indx_tbl_rule* index_entry = + (struct ipa_nat_indx_tbl_rule *) record_ptr; + + struct ipa_nat_indx_tbl_meta_info* mi_ptr = + (struct ipa_nat_indx_tbl_meta_info*) meta_record_ptr; + + enum ipa3_nat_mem_in nmi; + uint8_t is_expn_tbl; + uint16_t rule_index; + + UNUSED(meta_record_index); + + BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index); + + nmi++; /* stop compiler usage warning */ + + if ( mi_ptr ) + { + printf(" %s %s Entry_Index=0x%04X Table_Entry=0x%04X -> " + "Prev_Index=0x%04X Next_Index=0x%04X\n", + (table_ptr->nmi == IPA_NAT_MEM_IN_DDR) ? "DDR" : "SRAM", + (is_expn_tbl) ? "EXP " : "BASE", + record_index, + index_entry->tbl_entry, + mi_ptr->prev_index, + index_entry->next_index); + } + else + { + printf(" %s %s Entry_Index=0x%04X Table_Entry=0x%04X -> " + "Prev_Index=0xXXXX Next_Index=0x%04X\n", + (table_ptr->nmi == IPA_NAT_MEM_IN_DDR) ? "DDR" : "SRAM", + (is_expn_tbl) ? "EXP " : "BASE", + record_index, + index_entry->tbl_entry, + index_entry->next_index); + } + + fflush(stdout); + + *((bool*) arb_data_ptr) = false; + + return 0; +} + +void ipa_nat_dump_ipv4_table( + uint32_t tbl_hdl ) +{ + bool empty; + + if (pthread_mutex_lock(&nat_mutex)) { + IPAERR("unable to lock the nat mutex\n"); + return; + } + + printf("\nIPv4 active rules:\n"); + + empty = true; + + ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_NAT_TABLE, print_nat_rule, &empty); + + if ( empty ) + { + printf(" Empty\n"); + } + + printf("\nExpansion Index Table Meta Data:\n"); + + empty = true; + + ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_INDEX_TABLE, print_meta_data, &empty); + + if ( empty ) + { + printf(" Empty\n"); + } + + printf("\n"); + + if (pthread_mutex_unlock(&nat_mutex)) { + IPAERR("unable to unlock the nat mutex\n"); + } +} + +int ipa_NATI_clear_ipv4_tbl( + uint32_t tbl_hdl ) +{ + enum ipa3_nat_mem_in nmi; + struct ipa_nat_cache* nat_cache_ptr; + struct ipa_nat_ip4_table_cache* nat_table; + int ret = 0; + + IPADBG("In\n"); + + BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl); + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) { + IPAERR("Bad cache type argument passed\n"); + ret = -EINVAL; + goto bail; + } + + IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi)); + + nat_cache_ptr = &ipv4_nat_cache[nmi]; + + if (pthread_mutex_lock(&nat_mutex)) { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto bail; + } + + if ( ! nat_cache_ptr->table_cnt ) { + IPAERR("No initialized table in NAT cache\n"); + ret = -EINVAL; + goto unlock; + } + + nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1]; + + ipa_table_reset(&nat_table->table); + nat_table->table.cur_tbl_cnt = + nat_table->table.cur_expn_tbl_cnt = 0; + + ipa_table_reset(&nat_table->index_table); + nat_table->index_table.cur_tbl_cnt = + nat_table->index_table.cur_expn_tbl_cnt = 0; + +unlock: + if (pthread_mutex_unlock(&nat_mutex)) { + IPAERR("unable to unlock the nat mutex\n"); + ret = (ret) ? ret : -EPERM; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_copy_ipv4_tbl( + uint32_t src_tbl_hdl, + uint32_t dst_tbl_hdl, + ipa_table_walk_cb copy_cb ) +{ + int ret = 0; + + IPADBG("In\n"); + + if ( ! copy_cb ) + { + IPAERR("copy_cb is null\n"); + ret = -EINVAL; + goto bail; + } + + if (pthread_mutex_lock(&nat_mutex)) + { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto bail; + } + + /* + * Clear the destination table... + */ + ret = ipa_NATI_clear_ipv4_tbl(dst_tbl_hdl); + + if ( ret == 0 ) + { + uintptr_t dth = dst_tbl_hdl; + /* + * Now walk the source table and pass the valid records to the + * user's copy callback... + */ + ret = ipa_NATI_walk_ipv4_tbl( + src_tbl_hdl, USE_NAT_TABLE, copy_cb, (void*) dth); + + if ( ret != 0 ) + { + IPAERR("ipa_table_walk returned non-zero (%d)\n", ret); + goto unlock; + } + } + +unlock: + if (pthread_mutex_unlock(&nat_mutex)) + { + IPAERR("unable to unlock the nat mutex\n"); + ret = (ret) ? ret : -EPERM; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_NATI_walk_ipv4_tbl( + uint32_t tbl_hdl, + WhichTbl2Use which, + ipa_table_walk_cb walk_cb, + void* arb_data_ptr ) +{ + enum ipa3_nat_mem_in nmi; + uint32_t broken_tbl_hdl; + struct ipa_nat_cache* nat_cache_ptr; + struct ipa_nat_ip4_table_cache* nat_table; + ipa_table* ipa_tbl_ptr; + + int ret = 0; + + IPADBG("In\n"); + + if ( ! VALID_TBL_HDL(tbl_hdl) || + ! VALID_WHICHTBL2USE(which) || + ! walk_cb ) + { + IPAERR("Bad arg: tbl_hdl(0x%08X) and/or WhichTbl2Use(%u) and/or walk_cb(%p)\n", + tbl_hdl, which, walk_cb); + ret = -EINVAL; + goto bail; + } + + if ( pthread_mutex_lock(&nat_mutex) ) + { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto bail; + } + + /* + * Now walk the table and pass the valid records to the user's + * walk callback... + */ + BREAK_TBL_HDL(tbl_hdl, nmi, broken_tbl_hdl); + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) + { + IPAERR("Bad cache type argument passed\n"); + ret = -EINVAL; + goto unlock; + } + + nat_cache_ptr = &ipv4_nat_cache[nmi]; + + if ( ! nat_cache_ptr->table_cnt ) + { + IPAERR("No initialized table in NAT cache\n"); + ret = -EINVAL; + goto unlock; + } + + nat_table = &nat_cache_ptr->ip4_tbl[broken_tbl_hdl - 1]; + + ipa_tbl_ptr = + (which == USE_NAT_TABLE) ? + &nat_table->table : + &nat_table->index_table; + + ret = ipa_table_walk(ipa_tbl_ptr, 0, WHEN_SLOT_FILLED, walk_cb, arb_data_ptr); + + if ( ret != 0 ) + { + IPAERR("ipa_table_walk returned non-zero (%d)\n", ret); + goto unlock; + } + +unlock: + if ( pthread_mutex_unlock(&nat_mutex) ) + { + IPAERR("unable to unlock the nat mutex\n"); + ret = (ret) ? ret : -EPERM; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +typedef struct +{ + WhichTbl2Use which; + uint32_t tot_for_avg; + ipa_nati_tbl_stats* stats_ptr; +} chain_stat_help; + +static int gen_chain_stats( + ipa_table* table_ptr, + uint32_t rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ) +{ + chain_stat_help* csh_ptr = (chain_stat_help*) arb_data_ptr; + + enum ipa3_nat_mem_in nmi; + uint8_t is_expn_tbl; + uint16_t rule_index; + + uint32_t chain_len = 0; + + UNUSED(record_index); + UNUSED(meta_record_ptr); + UNUSED(meta_record_index); + + BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index); + + if ( is_expn_tbl ) + { + nmi++; /* stop compiler usage warning */ + return 1; + } + + if ( csh_ptr->which == USE_NAT_TABLE ) + { + struct ipa_nat_rule* list_elem_ptr = + (struct ipa_nat_rule*) record_ptr; + + if ( list_elem_ptr->next_index ) + { + chain_len = 1; + + while ( list_elem_ptr->next_index ) + { + chain_len++; + + list_elem_ptr = (struct ipa_nat_rule*) + GOTO_REC(table_ptr, list_elem_ptr->next_index); + } + } + } + else + { + struct ipa_nat_indx_tbl_rule* list_elem_ptr = + (struct ipa_nat_indx_tbl_rule*) record_ptr; + + if ( list_elem_ptr->next_index ) + { + chain_len = 1; + + while ( list_elem_ptr->next_index ) + { + chain_len++; + + list_elem_ptr = (struct ipa_nat_indx_tbl_rule*) + GOTO_REC(table_ptr, list_elem_ptr->next_index); + } + } + } + + if ( chain_len ) + { + csh_ptr->stats_ptr->tot_chains += 1; + + csh_ptr->tot_for_avg += chain_len; + + if ( csh_ptr->stats_ptr->min_chain_len == 0 ) + { + csh_ptr->stats_ptr->min_chain_len = chain_len; + } + else + { + csh_ptr->stats_ptr->min_chain_len = + min(csh_ptr->stats_ptr->min_chain_len, chain_len); + } + + csh_ptr->stats_ptr->max_chain_len = + max(csh_ptr->stats_ptr->max_chain_len, chain_len); + } + + return 0; +} + +int ipa_NATI_ipv4_tbl_stats( + uint32_t tbl_hdl, + ipa_nati_tbl_stats* nat_stats_ptr, + ipa_nati_tbl_stats* idx_stats_ptr ) +{ + enum ipa3_nat_mem_in nmi; + uint32_t broken_tbl_hdl; + struct ipa_nat_cache* nat_cache_ptr; + struct ipa_nat_ip4_table_cache* nat_table; + ipa_table* ipa_tbl_ptr; + + chain_stat_help csh; + + int ret = 0; + + IPADBG("In\n"); + + if ( ! VALID_TBL_HDL(tbl_hdl) || + ! nat_stats_ptr || + ! idx_stats_ptr ) + { + IPAERR("Bad arg: " + "tbl_hdl(0x%08X) and/or " + "nat_stats_ptr(%p) and/or " + "idx_stats_ptr(%p)\n", + tbl_hdl, + nat_stats_ptr, + idx_stats_ptr ); + ret = -EINVAL; + goto bail; + } + + if ( pthread_mutex_lock(&nat_mutex) ) + { + IPAERR("unable to lock the nat mutex\n"); + ret = -EINVAL; + goto bail; + } + + memset(nat_stats_ptr, 0, sizeof(ipa_nati_tbl_stats)); + memset(idx_stats_ptr, 0, sizeof(ipa_nati_tbl_stats)); + + BREAK_TBL_HDL(tbl_hdl, nmi, broken_tbl_hdl); + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) + { + IPAERR("Bad cache type argument passed\n"); + ret = -EINVAL; + goto unlock; + } + + nat_cache_ptr = &ipv4_nat_cache[nmi]; + + if ( ! nat_cache_ptr->table_cnt ) + { + IPAERR("No initialized table in NAT cache\n"); + ret = -EINVAL; + goto unlock; + } + + nat_table = &nat_cache_ptr->ip4_tbl[broken_tbl_hdl - 1]; + + /* + * Gather NAT table stats... + */ + ipa_tbl_ptr = &nat_table->table; + + nat_stats_ptr->nmi = nmi; + + nat_stats_ptr->tot_base_ents = ipa_tbl_ptr->table_entries; + nat_stats_ptr->tot_expn_ents = ipa_tbl_ptr->expn_table_entries; + nat_stats_ptr->tot_ents = + nat_stats_ptr->tot_base_ents + nat_stats_ptr->tot_expn_ents; + + nat_stats_ptr->tot_base_ents_filled = ipa_tbl_ptr->cur_tbl_cnt; + nat_stats_ptr->tot_expn_ents_filled = ipa_tbl_ptr->cur_expn_tbl_cnt; + + memset(&csh, 0, sizeof(chain_stat_help)); + + csh.which = USE_NAT_TABLE; + csh.stats_ptr = nat_stats_ptr; + + ret = ipa_table_walk( + ipa_tbl_ptr, 0, WHEN_SLOT_FILLED, gen_chain_stats, &csh); + + if ( ret < 0 ) + { + IPAERR("Error gathering chain stats\n"); + ret = -EINVAL; + goto unlock; + } + + if ( csh.tot_for_avg && nat_stats_ptr->tot_chains ) + { + nat_stats_ptr->avg_chain_len = + (float) csh.tot_for_avg / (float) nat_stats_ptr->tot_chains; + } + + /* + * Now lets gather index table stats... + */ + ipa_tbl_ptr = &nat_table->index_table; + + idx_stats_ptr->nmi = nmi; + + idx_stats_ptr->tot_base_ents = ipa_tbl_ptr->table_entries; + idx_stats_ptr->tot_expn_ents = ipa_tbl_ptr->expn_table_entries; + idx_stats_ptr->tot_ents = + idx_stats_ptr->tot_base_ents + idx_stats_ptr->tot_expn_ents; + + idx_stats_ptr->tot_base_ents_filled = ipa_tbl_ptr->cur_tbl_cnt; + idx_stats_ptr->tot_expn_ents_filled = ipa_tbl_ptr->cur_expn_tbl_cnt; + + memset(&csh, 0, sizeof(chain_stat_help)); + + csh.which = USE_INDEX_TABLE; + csh.stats_ptr = idx_stats_ptr; + + ret = ipa_table_walk( + ipa_tbl_ptr, 0, WHEN_SLOT_FILLED, gen_chain_stats, &csh); + + if ( ret < 0 ) + { + IPAERR("Error gathering chain stats\n"); + ret = -EINVAL; + goto unlock; + } + + if ( csh.tot_for_avg && idx_stats_ptr->tot_chains ) + { + idx_stats_ptr->avg_chain_len = + (float) csh.tot_for_avg / (float) idx_stats_ptr->tot_chains; + } + + ret = 0; + +unlock: + if ( pthread_mutex_unlock(&nat_mutex) ) + { + IPAERR("unable to unlock the nat mutex\n"); + ret = (ret) ? ret : -EPERM; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_vote_clock( + enum ipa_app_clock_vote_type vote_type ) +{ + struct ipa_nat_cache* nat_cache_ptr = + &ipv4_nat_cache[IPA_NAT_MEM_IN_SRAM]; + + int ret = 0; + + IPADBG("In\n"); + + if ( ! nat_cache_ptr->ipa_desc ) { + nat_cache_ptr->ipa_desc = ipa_descriptor_open(); + if ( nat_cache_ptr->ipa_desc == NULL ) { + IPAERR("failed to open IPA driver file descriptor\n"); + ret = -EIO; + goto bail; + } + } + + ret = ioctl(nat_cache_ptr->ipa_desc->fd, + IPA_IOC_APP_CLOCK_VOTE, + vote_type); + + if (ret) { + IPAERR("APP_CLOCK_VOTE ioctl failure %d on IPA fd %d\n", + ret, nat_cache_ptr->ipa_desc->fd); + goto bail; + } + +bail: + IPADBG("Out\n"); + + return ret; +} diff --git a/ipanat/src/ipa_nat_map.cpp b/ipanat/src/ipa_nat_map.cpp new file mode 100644 index 0000000..d2bcf54 --- /dev/null +++ b/ipanat/src/ipa_nat_map.cpp @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <map> +#include <iterator> + +#include "ipa_nat_utils.h" + +#include "ipa_nat_map.h" + +static std::map<uint32_t, uint32_t> map_array[MAP_NUM_MAX]; + +/******************************************************************************/ + +int ipa_nat_map_add( + ipa_which_map which, + uint32_t key, + uint32_t val ) +{ + int ret_val = 0; + + std::pair<std::map<uint32_t, uint32_t>::iterator, bool> ret; + + IPADBG("In\n"); + + if ( ! VALID_IPA_USE_MAP(which) ) + { + IPAERR("Bad arg which(%u)\n", which); + ret_val = -1; + goto bail; + } + + IPADBG("[%s] key(%u) -> val(%u)\n", + ipa_which_map_as_str(which), key, val); + + ret = map_array[which].insert(std::pair<uint32_t, uint32_t>(key, val)); + + if ( ret.second == false ) + { + IPAERR("[%s] key(%u) already exists in map\n", + ipa_which_map_as_str(which), + key); + ret_val = -1; + } + +bail: + IPADBG("Out\n"); + + return ret_val; +} + +/******************************************************************************/ + +int ipa_nat_map_find( + ipa_which_map which, + uint32_t key, + uint32_t* val_ptr ) +{ + int ret_val = 0; + + std::map<uint32_t, uint32_t>::iterator it; + + IPADBG("In\n"); + + if ( ! VALID_IPA_USE_MAP(which) ) + { + IPAERR("Bad arg which(%u)\n", which); + ret_val = -1; + goto bail; + } + + IPADBG("[%s] key(%u)\n", + ipa_which_map_as_str(which), key); + + it = map_array[which].find(key); + + if ( it == map_array[which].end() ) + { + IPAERR("[%s] key(%u) not found in map\n", + ipa_which_map_as_str(which), + key); + ret_val = -1; + } + else + { + if ( val_ptr ) + { + *val_ptr = it->second; + IPADBG("[%s] key(%u) -> val(%u)\n", + ipa_which_map_as_str(which), + key, *val_ptr); + } + } + +bail: + IPADBG("Out\n"); + + return ret_val; +} + +/******************************************************************************/ + +int ipa_nat_map_del( + ipa_which_map which, + uint32_t key, + uint32_t* val_ptr ) +{ + int ret_val = 0; + + std::map<uint32_t, uint32_t>::iterator it; + + IPADBG("In\n"); + + if ( ! VALID_IPA_USE_MAP(which) ) + { + IPAERR("Bad arg which(%u)\n", which); + ret_val = -1; + goto bail; + } + + IPADBG("[%s] key(%u)\n", + ipa_which_map_as_str(which), key); + + it = map_array[which].find(key); + + if ( it == map_array[which].end() ) + { + IPAERR("[%s] key(%u) not found in map\n", + ipa_which_map_as_str(which), + key); + ret_val = -1; + } + else + { + if ( val_ptr ) + { + *val_ptr = it->second; + IPADBG("[%s] key(%u) -> val(%u)\n", + ipa_which_map_as_str(which), + key, *val_ptr); + } + map_array[which].erase(it); + } + +bail: + IPADBG("Out\n"); + + return ret_val; +} + +int ipa_nat_map_clear( + ipa_which_map which ) +{ + int ret_val = 0; + + IPADBG("In\n"); + + if ( ! VALID_IPA_USE_MAP(which) ) + { + IPAERR("Bad arg which(%u)\n", which); + ret_val = -1; + goto bail; + } + + map_array[which].clear(); + +bail: + IPADBG("Out\n"); + + return ret_val; +} + +int ipa_nat_map_dump( + ipa_which_map which ) +{ + std::map<uint32_t, uint32_t>::iterator it; + + int ret_val = 0; + + IPADBG("In\n"); + + if ( ! VALID_IPA_USE_MAP(which) ) + { + IPAERR("Bad arg which(%u)\n", which); + ret_val = -1; + goto bail; + } + + printf("Dumping: %s\n", ipa_which_map_as_str(which)); + + for ( it = map_array[which].begin(); + it != map_array[which].end(); + it++ ) + { + printf(" Key[%u|0x%08X] -> Value[%u|0x%08X]\n", + it->first, + it->first, + it->second, + it->second); + } + +bail: + IPADBG("Out\n"); + + return ret_val; +} diff --git a/ipanat/src/ipa_nat_statemach.c b/ipanat/src/ipa_nat_statemach.c new file mode 100644 index 0000000..b6cf284 --- /dev/null +++ b/ipanat/src/ipa_nat_statemach.c @@ -0,0 +1,2702 @@ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <errno.h> +#include <pthread.h> + +#include "ipa_nat_drv.h" +#include "ipa_nat_drvi.h" + +#include "ipa_nat_map.h" + +#include "ipa_nat_statemach.h" + +#undef PRCNT_OF +#define PRCNT_OF(v) \ + ((.25) * (v)) + +#undef CHOOSE_MEM_SUB +#define CHOOSE_MEM_SUB() \ + (nati_obj.curr_state == NATI_STATE_HYBRID) ? \ + SRAM_SUB : \ + DDR_SUB + +#undef CHOOSE_MAPS +#define CHOOSE_MAPS(o2n, n2o) \ + do { \ + uint32_t sub = CHOOSE_MEM_SUB(); \ + o2n = nati_obj.map_pairs[sub].orig2new_map; \ + n2o = nati_obj.map_pairs[sub].new2orig_map; \ + } while (0) + +#undef CHOOSE_CNTR +#define CHOOSE_CNTR() \ + &(nati_obj.tot_rules_in_table[CHOOSE_MEM_SUB()]) + +#undef CHOOSE_SW_STATS +#define CHOOSE_SW_STATS() \ + &(nati_obj.sw_stats[CHOOSE_MEM_SUB()]) + +/* + * BACKROUND INFORMATION + * + * As it relates to why this file exists... + * + * In the past, a NAT table API was presented to upper layer + * applications. Said API mananged low level details of NAT table + * creation, manipulation, and destruction. The API + * managed/manipulated NAT tables that lived exclusively in DDR. DDR + * based tables are fine, but lead to uneeded bus accesses to/from DDR + * by the IPA while doing its NAT duties. These accesses cause NAT to + * take longer than necessary. + * + * If the DDR bus accesses could be eliminated by storing the table in + * the IPA's internal memory (ie. SRAM), the IPA's IP V4 NAT could be + * sped up. This leads us to the following description of this file's + * intent. + * + * The purpose and intent of this file is to hijack the API described + * above, but in a way that allows the tables to live in both SRAM and + * DDR. The details of whether SRAM or DDR is being used is hidden + * from the application. More specifically, the API will allow the + * following to occur completely tranparent to the application using + * the API. + * + * (1) NAT tables can live exclusively in DDR (traditional and + * historically like before) + * + * (2) NAT tables can live simultaneously in SRAM and DDR. SRAM + * initially being used by the IPA, but both being kept in sync. + * When SRAM becomes too full, a switch to DDR will occur. + * + * (3) The same as (2) above, but after the switch to DDR occurs, + * we'll have the ability to switch back to SRAM if/when DDR + * table entry deletions take us to a small enough entry + * count. An entry count that when met, allows us to switch back + * using SRAM again. + * + * As above, all of these details will just magically happen unknown + * to the application using the API. The implementation is done via a + * state machine. + */ + +/* + * The following will be used to keep state machine state for and + * between API calls... + */ +static ipa_nati_obj nati_obj = { + .prev_state = NATI_STATE_NULL, + .curr_state = NATI_STATE_NULL, + .hold_state = false, + .state_to_hold = NATI_STATE_NULL, + .ddr_tbl_hdl = 0, + .sram_tbl_hdl = 0, + .tot_slots_in_sram = 0, + .back_to_sram_thresh = 0, + /* + * Remember: + * tot_rules_in_table[0] for ddr, and + * tot_rules_in_table[1] for sram + */ + .tot_rules_in_table = { 0, 0 }, + /* + * Remember: + * map_pairs[0] for ddr, and + * map_pairs[1] for sram + */ + .map_pairs = { {MAP_NUM_00, MAP_NUM_01}, {MAP_NUM_02, MAP_NUM_03} }, + /* + * Remember: + * sw_stats[0] for ddr, and + * sw_stats[1] for sram + */ + .sw_stats = { {0, 0}, {0, 0} }, +}; + +/* + * The following needed to protect nati_obj above, as well as a number + * of data stuctures within the file ipa_nat_drvi.c + */ +pthread_mutex_t nat_mutex; +static bool nat_mutex_init = false; + +static inline int mutex_init(void) +{ + static pthread_mutexattr_t nat_mutex_attr; + + int ret = 0; + + IPADBG("In\n"); + + ret = pthread_mutexattr_init(&nat_mutex_attr); + + if ( ret != 0 ) + { + IPAERR("pthread_mutexattr_init() failed: ret(%d)\n", ret ); + goto bail; + } + + ret = pthread_mutexattr_settype( + &nat_mutex_attr, PTHREAD_MUTEX_RECURSIVE); + + if ( ret != 0 ) + { + IPAERR("pthread_mutexattr_settype() failed: ret(%d)\n", + ret ); + goto bail; + } + + ret = pthread_mutex_init(&nat_mutex, &nat_mutex_attr); + + if ( ret != 0 ) + { + IPAERR("pthread_mutex_init() failed: ret(%d)\n", + ret ); + goto bail; + } + + nat_mutex_init = true; + +bail: + IPADBG("Out\n"); + + return ret; +} + +/* + * Function for taking/locking the mutex... + */ +static int take_mutex() +{ + int ret; + + if ( nat_mutex_init ) + { +again: + ret = pthread_mutex_lock(&nat_mutex); + } + else + { + ret = mutex_init(); + + if ( ret == 0 ) + { + goto again; + } + } + + if ( ret != 0 ) + { + IPAERR("Unable to lock the %s nat mutex\n", + (nat_mutex_init) ? "initialized" : "uninitialized"); + } + + return ret; +} + +/* + * Function for giving/unlocking the mutex... + */ +static int give_mutex() +{ + int ret = (nat_mutex_init) ? pthread_mutex_unlock(&nat_mutex) : -1; + + if ( ret != 0 ) + { + IPAERR("Unable to unlock the %s nat mutex\n", + (nat_mutex_init) ? "initialized" : "uninitialized"); + } + + return ret; +} + +/* + * **************************************************************************** + * + * HIJACKED API FUNCTIONS START HERE + * + * **************************************************************************** + */ +int ipa_nati_add_ipv4_tbl( + uint32_t public_ip_addr, + const char* mem_type_ptr, + uint16_t number_of_entries, + uint32_t* tbl_hdl) +{ + table_add_args args = { + .public_ip_addr = public_ip_addr, + .number_of_entries = number_of_entries, + .tbl_hdl = tbl_hdl, + .mem_type_ptr = mem_type_ptr, + }; + + int ret; + + IPADBG("In\n"); + + ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_ADD_TABLE, (void*) &args); + + if ( ret == 0 ) + { + IPADBG("tbl_hdl val(0x%08X)\n", *tbl_hdl); + } + + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_del_ipv4_table( + uint32_t tbl_hdl) +{ + table_del_args args = { + .tbl_hdl = tbl_hdl, + }; + + int ret; + + IPADBG("In\n"); + + ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_DEL_TABLE, (void*) &args); + + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_clear_ipv4_tbl( + uint32_t tbl_hdl ) +{ + table_clear_args args = { + .tbl_hdl = tbl_hdl, + }; + + int ret; + + IPADBG("In\n"); + + ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_CLR_TABLE, (void*) &args); + + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_walk_ipv4_tbl( + uint32_t tbl_hdl, + WhichTbl2Use which, + ipa_table_walk_cb walk_cb, + void* arb_data_ptr ) +{ + table_walk_args args = { + .tbl_hdl = tbl_hdl, + .which = which, + .walk_cb = walk_cb, + .arb_data_ptr = arb_data_ptr, + }; + + int ret; + + IPADBG("In\n"); + + ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_WLK_TABLE, (void*) &args); + + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_ipv4_tbl_stats( + uint32_t tbl_hdl, + ipa_nati_tbl_stats* nat_stats_ptr, + ipa_nati_tbl_stats* idx_stats_ptr ) +{ + table_stats_args args = { + .tbl_hdl = tbl_hdl, + .nat_stats_ptr = nat_stats_ptr, + .idx_stats_ptr = idx_stats_ptr, + }; + + int ret; + + IPADBG("In\n"); + + ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_TBL_STATS, (void*) &args); + + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_add_ipv4_rule( + uint32_t tbl_hdl, + const ipa_nat_ipv4_rule* clnt_rule, + uint32_t* rule_hdl ) +{ + rule_add_args args = { + .tbl_hdl = tbl_hdl, + .clnt_rule = clnt_rule, + .rule_hdl = rule_hdl, + }; + + int ret; + + IPADBG("In\n"); + + ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_ADD_RULE, (void*) &args); + + if ( ret == 0 ) + { + IPADBG("rule_hdl val(%u)\n", *rule_hdl); + } + + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_del_ipv4_rule( + uint32_t tbl_hdl, + uint32_t rule_hdl ) +{ + rule_del_args args = { + .tbl_hdl = tbl_hdl, + .rule_hdl = rule_hdl, + }; + + int ret; + + IPADBG("In\n"); + + ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_DEL_RULE, (void*) &args); + + IPADBG("Out\n"); + + return ret; +} + +int ipa_nati_query_timestamp( + uint32_t tbl_hdl, + uint32_t rule_hdl, + uint32_t* time_stamp) +{ + timestap_query_args args = { + .tbl_hdl = tbl_hdl, + .rule_hdl = rule_hdl, + .time_stamp = time_stamp, + }; + + int ret; + + IPADBG("In\n"); + + ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_GET_TSTAMP, (void*) &args); + + if ( ret == 0 ) + { + IPADBG("time_stamp val(0x%08X)\n", *time_stamp); + } + + IPADBG("Out\n"); + + return ret; +} + +int ipa_nat_switch_to( + enum ipa3_nat_mem_in nmi, + bool hold_state ) +{ + int ret = -1; + + IPADBG("In - current state %s\n", + ipa_nati_state_as_str(nati_obj.curr_state)); + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) + { + IPAERR("Bad nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi)); + + ret = -1; + + goto bail; + } + + ret = take_mutex(); + + if ( ret != 0 ) + { + goto bail; + } + + /* + * Are we here before the state machine has been started? + */ + if ( IN_UNSTARTED_STATE() ) + { + nati_obj.hold_state = hold_state; + + nati_obj.state_to_hold = + (nmi == IPA_NAT_MEM_IN_DDR) ? + NATI_STATE_DDR_ONLY : + NATI_STATE_SRAM_ONLY; + + IPADBG( + "Initial state will be %s before table init and it %s be held\n", + ipa_nati_state_as_str(nati_obj.state_to_hold), + (hold_state) ? "will" : "will not"); + + ret = 0; + + goto unlock; + } + + /* + * Are we here after we've already started in hybrid state? + */ + if ( IN_HYBRID_STATE() ) + { + ret = 0; + + if ( COMPATIBLE_NMI_4SWITCH(nmi) ) + { + ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_TBL_SWITCH, 0); + } + + if ( ret == 0 ) + { + nati_obj.hold_state = hold_state; + + if ( hold_state ) + { + nati_obj.state_to_hold = GEN_HOLD_STATE(); + } + + IPADBG( + "Current state is %s and it %s be held\n", + ipa_nati_state_as_str(nati_obj.curr_state), + (hold_state) ? "will" : "will not"); + } + + goto unlock; + } + + /* + * We've gotten here because we're not in an unstarted state, nor + * are we in hybrid state. This means we're either in + * NATI_STATE_DDR_ONLY or NATI_STATE_SRAM_ONLY + * + * Let's see what's being attempted and if it's OK... + */ + if ( hold_state ) + { + if ( COMPATIBLE_NMI_4SWITCH(nmi) ) + { + /* + * If we've gotten here, it means that the requested nmi, + * the current state, and the hold are compatible... + */ + nati_obj.state_to_hold = GEN_HOLD_STATE(); + nati_obj.hold_state = hold_state; + + IPADBG( + "Requesting to hold memory type %s at " + "current state %s will be done\n", + ipa3_nat_mem_in_as_str(nmi), + ipa_nati_state_as_str(nati_obj.curr_state)); + + ret = 0; + + goto unlock; + } + else + { + /* + * The requested nmi, the current state, and the hold are + * not compatible... + */ + IPAERR( + "Requesting to hold memory type %s and " + "current state %s are incompatible\n", + ipa3_nat_mem_in_as_str(nmi), + ipa_nati_state_as_str(nati_obj.curr_state)); + + ret = -1; + + goto unlock; + } + } + + /* + * If we've gotten here, it's because the holding of state is no + * longer desired... + */ + nati_obj.state_to_hold = NATI_STATE_NULL; + nati_obj.hold_state = hold_state; + + IPADBG("Holding of state is no longer desired\n"); + + ret = 0; + +unlock: + ret = give_mutex(); + +bail: + IPADBG("Out\n"); + + return ret; +} + +bool ipa_nat_is_sram_supported(void) +{ + return VALID_TBL_HDL(nati_obj.sram_tbl_hdl); +} + +/******************************************************************************/ +/* + * FUNCTION: migrate_rule + * + * PARAMS: + * + * table_ptr (IN) The table being walked + * + * tbl_rule_hdl (IN) The nat rule's handle from the source table + * + * record_ptr (IN) The nat rule record from the source table + * + * record_index (IN) The record above's index in the table being walked + * + * meta_record_ptr (IN) If meta data in table, this will be it + * + * meta_record_index (IN) The record above's index in the table being walked + * + * arb_data_ptr (IN) The destination table handle + * + * DESCRIPTION: + * + * This routine is intended to copy records from a source table to a + * destination table. + + * It is used in union with the ipa_nati_copy_ipv4_tbl() API call + * below. + * + * It is compatible with the ipa_table_walk() API. + * + * In the context of the ipa_nati_copy_ipv4_tbl(), the arguments + * passed in are as enumerated above. + * + * AN IMPORTANT NOTE ON RULE HANDLES WHEN IN MYBRID MODE + * + * The rule_hdl is used to find a rule in the nat table. It is, in + * effect, an index into the table. The applcation above us retains + * it for future manipulation of the rule in the table. + * + * In hybrid mode, a rule can and will move between SRAM and DDR. + * Because of this, its handle will change. The application has + * only the original handle and doesn't know of the new handle. A + * mapping, used in hybrid mode, will maintain a relationship + * between the original handle and the rule's current real handle... + * + * To help you get a mindset of how this is done: + * + * The original handle will map (point) to the new and new handle + * will map (point) back to original. + * + * NOTE WELL: There are two sets of maps. One for each memory type... + * + * RETURNS: + * + * Returns 0 on success, non-zero on failure + */ +static int migrate_rule( + ipa_table* table_ptr, + uint32_t tbl_rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ) +{ + struct ipa_nat_rule* nat_rule_ptr = (struct ipa_nat_rule*) record_ptr; + uint32_t dst_tbl_hdl = (uint32_t) arb_data_ptr; + + ipa_nat_ipv4_rule v4_rule; + + uint32_t orig_rule_hdl; + uint32_t new_rule_hdl; + + uint32_t src_orig2new_map, src_new2orig_map; + uint32_t dst_orig2new_map, dst_new2orig_map; + uint32_t* cnt_ptr; + + const char* mig_dir_ptr; + + char buf[1024]; + int ret; + + UNUSED(buf); + UNUSED(record_index); + UNUSED(meta_record_ptr); + UNUSED(meta_record_index); + + IPADBG("In\n"); + + IPADBG("tbl_mem_type(%s) tbl_rule_hdl(%u) -> %s\n", + ipa3_nat_mem_in_as_str(table_ptr->nmi), + tbl_rule_hdl, + prep_nat_rule_4print(nat_rule_ptr, buf, sizeof(buf))); + + IPADBG("dst_tbl_hdl(0x%08X)\n", dst_tbl_hdl); + + /* + * What is the type of the source table? + */ + if ( table_ptr->nmi == IPA_NAT_MEM_IN_SRAM ) + { + mig_dir_ptr = "SRAM -> DDR"; + + src_orig2new_map = nati_obj.map_pairs[SRAM_SUB].orig2new_map; + src_new2orig_map = nati_obj.map_pairs[SRAM_SUB].new2orig_map; + + dst_orig2new_map = nati_obj.map_pairs[DDR_SUB].orig2new_map; + dst_new2orig_map = nati_obj.map_pairs[DDR_SUB].new2orig_map; + + cnt_ptr = &(nati_obj.tot_rules_in_table[DDR_SUB]); + } + else + { + mig_dir_ptr = "DDR -> SRAM"; + + src_orig2new_map = nati_obj.map_pairs[DDR_SUB].orig2new_map; + src_new2orig_map = nati_obj.map_pairs[DDR_SUB].new2orig_map; + + dst_orig2new_map = nati_obj.map_pairs[SRAM_SUB].orig2new_map; + dst_new2orig_map = nati_obj.map_pairs[SRAM_SUB].new2orig_map; + + cnt_ptr = &(nati_obj.tot_rules_in_table[SRAM_SUB]); + } + + src_orig2new_map++; /* to avoid compiler usage warning */ + + if ( nat_rule_ptr->protocol == IPA_NAT_INVALID_PROTO_FIELD_VALUE_IN_RULE ) + { + IPADBG("%s: Special \"first rule in list\" case. " + "Rule's enabled bit on, but protocol implies deleted\n", + mig_dir_ptr); + ret = 0; + goto bail; + } + + ret = ipa_nat_map_find(src_new2orig_map, tbl_rule_hdl, &orig_rule_hdl); + + if ( ret != 0 ) + { + IPAERR("%s: ipa_nat_map_find(src_new2orig_map) fail\n", mig_dir_ptr); + goto bail; + } + + memset(&v4_rule, 0, sizeof(v4_rule)); + + v4_rule.private_ip = nat_rule_ptr->private_ip; + v4_rule.private_port = nat_rule_ptr->private_port; + v4_rule.protocol = nat_rule_ptr->protocol; + v4_rule.public_port = nat_rule_ptr->public_port; + v4_rule.target_ip = nat_rule_ptr->target_ip; + v4_rule.target_port = nat_rule_ptr->target_port; + v4_rule.pdn_index = nat_rule_ptr->pdn_index; + v4_rule.redirect = nat_rule_ptr->redirect; + v4_rule.enable = nat_rule_ptr->enable; + v4_rule.time_stamp = nat_rule_ptr->time_stamp; + + ret = ipa_NATI_add_ipv4_rule(dst_tbl_hdl, &v4_rule, &new_rule_hdl); + + if ( ret != 0 ) + { + IPAERR("%s: ipa_NATI_add_ipv4_rule() fail\n", mig_dir_ptr); + goto bail; + } + + (*cnt_ptr)++; + + /* + * The following is needed to maintain the original handle and + * have it point to the new handle. + * + * Remember, original handle points to new and the new handle + * points back to original. + */ + ret = ipa_nat_map_add(dst_orig2new_map, orig_rule_hdl, new_rule_hdl); + + if ( ret != 0 ) + { + IPAERR("%s: ipa_nat_map_add(dst_orig2new_map) fail\n", mig_dir_ptr); + goto bail; + } + + ret = ipa_nat_map_add(dst_new2orig_map, new_rule_hdl, orig_rule_hdl); + + if ( ret != 0 ) + { + IPAERR("%s: ipa_nat_map_add(dst_new2orig_map) fail\n", mig_dir_ptr); + goto bail; + } + + IPADBG("orig_rule_hdl(0x%08X) new_rule_hdl(0x%08X)\n", + orig_rule_hdl, new_rule_hdl); + +bail: + IPADBG("Out\n"); + + return ret; +} + +/* + * **************************************************************************** + * + * STATE MACHINE CODE BEGINS HERE + * + * **************************************************************************** + */ +static int _smUndef( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ); /* forward declaration */ + +/******************************************************************************/ +/* + * FUNCTION: _smDelTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the destruction of the DDR based NAT + * table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smDelTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_del_args* args = (table_del_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + + int ret; + + UNUSED(nati_obj_ptr); + UNUSED(trigger); + + IPADBG("In\n"); + + IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl); + + ret = ipa_NATI_del_ipv4_table(tbl_hdl); + + if ( ret == 0 && ! IN_HYBRID_STATE() ) + { + /* + * The following will create the preferred "initial state" for + * restart... + */ + BACK2_UNSTARTED_STATE(); + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smFirstTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the creation of the very first NAT table(s) + * before any others have ever been created... + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smFirstTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_add_args* args = (table_add_args*) arb_data_ptr; + + uint32_t public_ip_addr = args->public_ip_addr; + uint16_t number_of_entries = args->number_of_entries; + uint32_t* tbl_hdl_ptr = args->tbl_hdl; + const char* mem_type_ptr = args->mem_type_ptr; + + int ret; + + IPADBG("In\n"); + + /* + * This is the first time in here. Let the ipacm's XML config (or + * state_to_hold) drive initial state... + */ + SET_NATIOBJ_STATE( + nati_obj_ptr, + (nati_obj_ptr->hold_state && nati_obj_ptr->state_to_hold) ? + nati_obj_ptr->state_to_hold : + mem_type_str_to_ipa_nati_state(mem_type_ptr)); + + ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_ADD_TABLE, arb_data_ptr); + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smAddDdrTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the creation of a NAT table in DDR. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smAddDdrTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_add_args* args = (table_add_args*) arb_data_ptr; + + uint32_t public_ip_addr = args->public_ip_addr; + uint16_t number_of_entries = args->number_of_entries; + uint32_t* tbl_hdl_ptr = args->tbl_hdl; + + int ret; + + UNUSED(trigger); + + IPADBG("In\n"); + + IPADBG("public_ip_addr(0x%08X) number_of_entries(%u) tbl_hdl_ptr(%p)\n", + public_ip_addr, number_of_entries, tbl_hdl_ptr); + + ret = ipa_NATI_add_ipv4_tbl( + IPA_NAT_MEM_IN_DDR, + public_ip_addr, + number_of_entries, + &nati_obj_ptr->ddr_tbl_hdl); + + if ( ret == 0 ) + { + *tbl_hdl_ptr = nati_obj_ptr->ddr_tbl_hdl; + + IPADBG("DDR table creation successful: tbl_hdl(0x%08X)\n", + *tbl_hdl_ptr); + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smAddSramTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the creation of a NAT table in SRAM. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smAddSramTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_add_args* args = (table_add_args*) arb_data_ptr; + + uint32_t public_ip_addr = args->public_ip_addr; + uint16_t number_of_entries = args->number_of_entries; + uint32_t* tbl_hdl_ptr = args->tbl_hdl; + + uint32_t sram_size = 0; + + int ret; + + UNUSED(number_of_entries); + UNUSED(trigger); + + IPADBG("In\n"); + + IPADBG("public_ip_addr(0x%08X) tbl_hdl_ptr(%p)\n", + public_ip_addr, tbl_hdl_ptr); + + ret = ipa_nati_get_sram_size(&sram_size); + + if ( ret == 0 ) + { + ret = ipa_calc_num_sram_table_entries( + sram_size, + sizeof(struct ipa_nat_rule), + sizeof(struct ipa_nat_indx_tbl_rule), + (uint16_t*) &nati_obj_ptr->tot_slots_in_sram); + + if ( ret == 0 ) + { + nati_obj_ptr->back_to_sram_thresh = + PRCNT_OF(nati_obj_ptr->tot_slots_in_sram); + + IPADBG("sram_size(%u or 0x%x) tot_slots_in_sram(%u) back_to_sram_thresh(%u)\n", + sram_size, + sram_size, + nati_obj_ptr->tot_slots_in_sram, + nati_obj_ptr->back_to_sram_thresh); + + IPADBG("Voting clock on for sram table creation\n"); + + if ( (ret = ipa_nat_vote_clock(IPA_APP_CLK_VOTE)) != 0 ) + { + IPAERR("Voting clock on failed\n"); + goto done; + } + + ret = ipa_NATI_add_ipv4_tbl( + IPA_NAT_MEM_IN_SRAM, + public_ip_addr, + nati_obj_ptr->tot_slots_in_sram, + &nati_obj_ptr->sram_tbl_hdl); + + if ( ipa_nat_vote_clock(IPA_APP_CLK_DEVOTE) != 0 ) + { + IPAWARN("Voting clock off failed\n"); + } + + if ( ret == 0 ) + { + *tbl_hdl_ptr = nati_obj_ptr->sram_tbl_hdl; + + IPADBG("SRAM table creation successful: tbl_hdl(0x%08X)\n", + *tbl_hdl_ptr); + } + } + } + +done: + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smAddSramAndDdrTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the creation of NAT tables in both DDR + * and in SRAM. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smAddSramAndDdrTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_add_args* args = (table_add_args*) arb_data_ptr; + + uint32_t public_ip_addr = args->public_ip_addr; + uint16_t number_of_entries = args->number_of_entries; + uint32_t* tbl_hdl_ptr = args->tbl_hdl; + + uint32_t tbl_hdl; + + int ret; + + UNUSED(tbl_hdl_ptr); + + IPADBG("In\n"); + + nati_obj_ptr->tot_rules_in_table[SRAM_SUB] = 0; + nati_obj_ptr->tot_rules_in_table[DDR_SUB] = 0; + + ipa_nat_map_clear(nati_obj_ptr->map_pairs[SRAM_SUB].orig2new_map); + ipa_nat_map_clear(nati_obj_ptr->map_pairs[SRAM_SUB].new2orig_map); + ipa_nat_map_clear(nati_obj_ptr->map_pairs[DDR_SUB].orig2new_map); + ipa_nat_map_clear(nati_obj_ptr->map_pairs[DDR_SUB].new2orig_map); + + ret = _smAddSramTbl(nati_obj_ptr, trigger, arb_data_ptr); + + if ( ret == 0 ) + { + if ( nati_obj_ptr->tot_slots_in_sram >= number_of_entries ) + { + /* + * The number of slots in SRAM can accommodate what was + * being requested for DDR, hence no need to use DDR and + * we will continue by using SRAM only... + */ + SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_SRAM_ONLY); + } + else + { + /* + * SRAM not big enough. Let's create secondary DDR based + * table... + */ + table_add_args new_args = { + .public_ip_addr = public_ip_addr, + .number_of_entries = number_of_entries, + .tbl_hdl = &tbl_hdl, /* to protect app's table handle above */ + }; + + ret = _smAddDdrTbl(nati_obj_ptr, trigger, (void*) &new_args); + + if ( ret == 0 ) + { + /* + * The following will tell the IPA to change focus to + * SRAM... + */ + ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_GOTO_SRAM, 0); + } + } + } + else + { + /* + * SRAM table creation in HYBRID mode failed. Can we fall + * back to DDR only? We need to try and see what happens... + */ + ret = _smAddDdrTbl(nati_obj_ptr, trigger, arb_data_ptr); + + if ( ret == 0 ) + { + SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_DDR_ONLY); + } + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smDelSramAndDdrTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the destruction of the SRAM, then DDR + * based NAT tables. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smDelSramAndDdrTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + int ret; + + IPADBG("In\n"); + + nati_obj_ptr->tot_rules_in_table[SRAM_SUB] = 0; + nati_obj_ptr->tot_rules_in_table[DDR_SUB] = 0; + + ipa_nat_map_clear(nati_obj_ptr->map_pairs[SRAM_SUB].orig2new_map); + ipa_nat_map_clear(nati_obj_ptr->map_pairs[SRAM_SUB].new2orig_map); + ipa_nat_map_clear(nati_obj_ptr->map_pairs[DDR_SUB].orig2new_map); + ipa_nat_map_clear(nati_obj_ptr->map_pairs[DDR_SUB].new2orig_map); + + ret = _smDelTbl(nati_obj_ptr, trigger, arb_data_ptr); + + if ( ret == 0 ) + { + table_del_args new_args = { + .tbl_hdl = nati_obj_ptr->ddr_tbl_hdl, + }; + + ret = _smDelTbl(nati_obj_ptr, trigger, (void*) &new_args); + } + + if ( ret == 0 ) + { + /* + * The following will create the preferred "initial state" for + * restart... + */ + BACK2_UNSTARTED_STATE(); + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smClrTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the clearing of a table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smClrTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_clear_args* args = (table_clear_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + + enum ipa3_nat_mem_in nmi; + uint32_t unused_hdl, sub; + + int ret; + + UNUSED(trigger); + + IPADBG("In\n"); + + IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl); + + BREAK_TBL_HDL(tbl_hdl, nmi, unused_hdl); + + unused_hdl++; /* to avoid compiler usage warning */ + + if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) { + IPAERR("Bad cache type\n"); + ret = -EINVAL; + goto bail; + } + + sub = (nmi == IPA_NAT_MEM_IN_SRAM) ? SRAM_SUB : DDR_SUB; + + nati_obj_ptr->tot_rules_in_table[sub] = 0; + + ipa_nat_map_clear(nati_obj.map_pairs[sub].orig2new_map); + ipa_nat_map_clear(nati_obj.map_pairs[sub].new2orig_map); + + ret = ipa_NATI_clear_ipv4_tbl(tbl_hdl); + +bail: + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smClrTblHybrid + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the clearing of the appropriate hybrid + * table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smClrTblHybrid( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_clear_args* args = (table_clear_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + + table_clear_args new_args = { + .tbl_hdl = + (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ? + tbl_hdl : + nati_obj_ptr->ddr_tbl_hdl, + }; + + int ret; + + IPADBG("In\n"); + + ret = _smClrTbl(nati_obj_ptr, trigger, (void*) &new_args); + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smWalkTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the walk of a table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smWalkTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_walk_args* args = (table_walk_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + WhichTbl2Use which = args->which; + ipa_table_walk_cb walk_cb = args->walk_cb; + void* wadp = args->arb_data_ptr; + + int ret; + + UNUSED(nati_obj_ptr); + UNUSED(trigger); + + IPADBG("In\n"); + + IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl); + + ret = ipa_NATI_walk_ipv4_tbl(tbl_hdl, which, walk_cb, wadp); + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smWalkTblHybrid + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the walk of the appropriate hybrid + * table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smWalkTblHybrid( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_walk_args* args = (table_walk_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + WhichTbl2Use which = args->which; + ipa_table_walk_cb walk_cb = args->walk_cb; + void* wadp = args->arb_data_ptr; + + table_walk_args new_args = { + .tbl_hdl = + (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ? + tbl_hdl : + nati_obj_ptr->ddr_tbl_hdl, + .which = which, + .walk_cb = walk_cb, + .arb_data_ptr = wadp, + }; + + int ret; + + IPADBG("In\n"); + + ret = _smWalkTbl(nati_obj_ptr, trigger, (void*) &new_args); + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smStatTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will get size/usage stats for a table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smStatTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_stats_args* args = (table_stats_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + ipa_nati_tbl_stats* nat_stats_ptr = args->nat_stats_ptr; + ipa_nati_tbl_stats* idx_stats_ptr = args->idx_stats_ptr; + + int ret; + + UNUSED(nati_obj_ptr); + UNUSED(trigger); + + IPADBG("In\n"); + + IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl); + + ret = ipa_NATI_ipv4_tbl_stats(tbl_hdl, nat_stats_ptr, idx_stats_ptr); + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smStatTblHybrid + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the retrieval of table size/usage stats + * for the appropriate hybrid table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smStatTblHybrid( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + table_stats_args* args = (table_stats_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + ipa_nati_tbl_stats* nat_stats_ptr = args->nat_stats_ptr; + ipa_nati_tbl_stats* idx_stats_ptr = args->idx_stats_ptr; + + table_stats_args new_args = { + .tbl_hdl = + (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ? + tbl_hdl : + nati_obj_ptr->ddr_tbl_hdl, + .nat_stats_ptr = nat_stats_ptr, + .idx_stats_ptr = idx_stats_ptr, + }; + + int ret; + + IPADBG("In\n"); + + ret = _smStatTbl(nati_obj_ptr, trigger, (void*) &new_args); + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smAddRuleToTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the addtion of a NAT rule into the DDR + * based table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smAddRuleToTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + rule_add_args* args = (rule_add_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + ipa_nat_ipv4_rule* clnt_rule = (ipa_nat_ipv4_rule*) args->clnt_rule; + uint32_t* rule_hdl = args->rule_hdl; + + char buf[1024]; + + int ret; + + UNUSED(buf); + UNUSED(nati_obj_ptr); + UNUSED(trigger); + + IPADBG("In\n"); + + IPADBG("tbl_hdl(0x%08X) clnt_rule_ptr(%p) rule_hdl_ptr(%p) %s\n", + tbl_hdl, clnt_rule, rule_hdl, + prep_nat_ipv4_rule_4print(clnt_rule, buf, sizeof(buf))); + + clnt_rule->redirect = clnt_rule->enable = clnt_rule->time_stamp = 0; + + ret = ipa_NATI_add_ipv4_rule(tbl_hdl, clnt_rule, rule_hdl); + + if ( ret == 0 ) + { + uint32_t* cnt_ptr = CHOOSE_CNTR(); + + (*cnt_ptr)++; + + IPADBG("rule_hdl value(%u or 0x%08X)\n", + *rule_hdl, *rule_hdl); + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smDelRuleFromTbl + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the deletion of a NAT rule from the DDR + * based table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smDelRuleFromTbl( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + rule_del_args* args = (rule_del_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + uint32_t rule_hdl = args->rule_hdl; + + int ret; + + UNUSED(nati_obj_ptr); + UNUSED(trigger); + + IPADBG("In\n"); + + IPADBG("tbl_hdl(0x%08X) rule_hdl(%u)\n", tbl_hdl, rule_hdl); + + ret = ipa_NATI_del_ipv4_rule(tbl_hdl, rule_hdl); + + if ( ret == 0 ) + { + uint32_t* cnt_ptr = CHOOSE_CNTR(); + + (*cnt_ptr)--; + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smAddRuleHybrid + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the addition of a NAT rule into either + * the SRAM or DDR based table. + * + * *** !!! HOWEVER *** REMEMBER !!! *** + * + * We're here because we're in a HYBRID state...with the potential + * moving between SRAM and DDR. THIS HAS IMLICATIONS AS IT RELATES + * TO RULE MAPPING. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smAddRuleHybrid( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + rule_add_args* args = (rule_add_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + ipa_nat_ipv4_rule* clnt_rule = (ipa_nat_ipv4_rule*) args->clnt_rule; + uint32_t* rule_hdl = args->rule_hdl; + + rule_add_args new_args = { + .tbl_hdl = + (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ? + tbl_hdl : + nati_obj_ptr->ddr_tbl_hdl, + .clnt_rule = clnt_rule, + .rule_hdl = rule_hdl, + }; + + uint32_t orig2new_map, new2orig_map; + + int ret; + + IPADBG("In\n"); + + ret = _smAddRuleToTbl(nati_obj_ptr, trigger, (void*) &new_args); + + if ( ret == 0 ) + { + /* + * The rule_hdl is used to find a rule in the nat table. It + * is, in effect, an index into the table. The applcation + * above us retains it for future manipulation of the rule in + * the table. + * + * In hybrid mode, a rule can and will move between SRAM and + * DDR. Because of this, its handle will change. The + * application has only the original handle and doesn't know + * of the new handle. A mapping, used in hybrid mode, will + * maintain a relationship between the original handle and the + * rule's current real handle... + * + * To help you get a mindset of how this is done: + * + * The original handle will map (point) to the new and new + * handle will map (point) back to original. + * + * NOTE WELL: There are two sets of maps. One for each memory + * type... + */ + CHOOSE_MAPS(orig2new_map, new2orig_map); + + ret = ipa_nat_map_add(orig2new_map, *rule_hdl, *rule_hdl); + + if ( ret == 0 ) + { + ret = ipa_nat_map_add(new2orig_map, *rule_hdl, *rule_hdl); + } + } + else + { + if ( nati_obj_ptr->curr_state == NATI_STATE_HYBRID + && + ! nati_obj_ptr->hold_state ) + { + /* + * In hybrid mode, we always start in SRAM...hence + * NATI_STATE_HYBRID implies SRAM. The rule addition + * above did not work, meaning the SRAM table is full, + * hence let's jump to DDR... + * + * The following will focus us on DDR and cause the copy + * of data from SRAM to DDR. + */ + IPAINFO("Add of rule failed...attempting table switch\n"); + + ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_TBL_SWITCH, 0); + + if ( ret == 0 ) + { + SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_HYBRID_DDR); + + /* + * Now add the rule to DDR... + */ + ret = ipa_nati_statemach(nati_obj_ptr, trigger, arb_data_ptr); + } + } + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smDelRuleHybrid + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the deletion of a NAT rule from either + * the SRAM or DDR based table. + * + * *** !!! HOWEVER *** REMEMBER !!! *** + * + * We're here because we're in a HYBRID state...with the potential + * moving between SRAM and DDR. THIS HAS IMLICATIONS AS IT RELATES + * TO RULE MAPPING. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smDelRuleHybrid( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + rule_del_args* args = (rule_del_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + uint32_t orig_rule_hdl = args->rule_hdl; + + uint32_t new_rule_hdl; + + uint32_t orig2new_map, new2orig_map; + + int ret; + + IPADBG("In\n"); + + CHOOSE_MAPS(orig2new_map, new2orig_map); + + /* + * The rule_hdl is used to find a rule in the nat table. It is, + * in effect, an index into the table. The applcation above us + * retains it for future manipulation of the rule in the table. + * + * In hybrid mode, a rule can and will move between SRAM and DDR. + * Because of this, its handle will change. The application has + * only the original handle and doesn't know of the new handle. A + * mapping, used in hybrid mode, will maintain a relationship + * between the original handle and the rule's current real + * handle... + * + * To help you get a mindset of how this is done: + * + * The original handle will map (point) to the new and new + * handle will map (point) back to original. + * + * NOTE WELL: There are two sets of maps. One for each memory + * type... + */ + ret = ipa_nat_map_del(orig2new_map, orig_rule_hdl, &new_rule_hdl); + + if ( ret == 0 ) + { + rule_del_args new_args = { + .tbl_hdl = + (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ? + tbl_hdl : + nati_obj_ptr->ddr_tbl_hdl, + .rule_hdl = new_rule_hdl, + }; + + IPADBG("orig_rule_hdl(0x%08X) -> new_rule_hdl(0x%08X)\n", + orig_rule_hdl, new_rule_hdl); + + ipa_nat_map_del(new2orig_map, new_rule_hdl, NULL); + + ret = _smDelRuleFromTbl(nati_obj_ptr, trigger, (void*) &new_args); + + if ( ret == 0 && nati_obj_ptr->curr_state == NATI_STATE_HYBRID_DDR ) + { + /* + * We need to check when/if we can go back to SRAM. + * + * How/why can we go back? + * + * Given enough deletions, and when we get to a user + * defined threshold (ie. a percentage of what SRAM can + * hold), we can pop back to using SRAM. + */ + uint32_t* cnt_ptr = CHOOSE_CNTR(); + + if ( *cnt_ptr <= nati_obj_ptr->back_to_sram_thresh + && + ! nati_obj_ptr->hold_state ) + { + /* + * The following will focus us on SRAM and cause the copy + * of data from DDR to SRAM. + */ + IPAINFO("Switch back to SRAM threshold has been reached -> " + "Total rules in DDR(%u) <= SRAM THRESH(%u)\n", + *cnt_ptr, + nati_obj_ptr->back_to_sram_thresh); + + ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_TBL_SWITCH, 0); + + if ( ret == 0 ) + { + SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_HYBRID); + } + else + { + /* + * The following will force us stay in DDR for + * now, but the next delete will trigger the + * switch logic above to run again...perhaps it + * will work then. + */ + ret = 0; + } + } + } + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smGoToDdr + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the IPA to use the DDR based NAT + * table... + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smGoToDdr( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + int ret; + + UNUSED(trigger); + UNUSED(arb_data_ptr); + + IPADBG("In\n"); + + ret = ipa_NATI_post_ipv4_init_cmd(nati_obj_ptr->ddr_tbl_hdl); + + if ( ret == 0 ) + { + SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_HYBRID_DDR); + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smGoToSram + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause the IPA to use the SRAM based NAT + * table... + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smGoToSram( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + int ret; + + UNUSED(trigger); + UNUSED(arb_data_ptr); + + IPADBG("In\n"); + + ret = ipa_NATI_post_ipv4_init_cmd(nati_obj_ptr->sram_tbl_hdl); + + if ( ret == 0 ) + { + SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_HYBRID); + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smSwitchFromDdrToSram + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause a copy of the DDR table to SRAM and then + * will make the IPA use the SRAM... + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smSwitchFromDdrToSram( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + nati_switch_stats* sw_stats_ptr = CHOOSE_SW_STATS(); + + uint32_t* cnt_ptr = CHOOSE_CNTR(); + + ipa_nati_tbl_stats nat_stats, idx_stats; + + const char* mem_type; + + uint64_t start, stop; + + int stats_ret, ret; + + bool collect_stats = (bool) arb_data_ptr; + + UNUSED(cnt_ptr); + UNUSED(trigger); + UNUSED(arb_data_ptr); + + IPADBG("In\n"); + + stats_ret = (collect_stats) ? + ipa_NATI_ipv4_tbl_stats( + nati_obj_ptr->ddr_tbl_hdl, &nat_stats, &idx_stats) : + -1; + + currTimeAs(TimeAsNanSecs, &start); + + /* + * First, switch focus to SRAM... + */ + ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_GOTO_SRAM, 0); + + if ( ret == 0 ) + { + /* + * Clear destination counter... + */ + nati_obj_ptr->tot_rules_in_table[SRAM_SUB] = 0; + + /* + * Clear destination SRAM maps... + */ + ipa_nat_map_clear(nati_obj.map_pairs[SRAM_SUB].orig2new_map); + ipa_nat_map_clear(nati_obj.map_pairs[SRAM_SUB].new2orig_map); + + /* + * Now copy DDR's content to SRAM... + */ + ret = ipa_nati_copy_ipv4_tbl( + nati_obj_ptr->ddr_tbl_hdl, + nati_obj_ptr->sram_tbl_hdl, + migrate_rule); + + currTimeAs(TimeAsNanSecs, &stop); + + if ( ret == 0 ) + { + sw_stats_ptr->pass += 1; + + IPADBG("Transistion from DDR to SRAM took %f microseconds\n", + (float) (stop - start) / 1000.0); + } + else + { + sw_stats_ptr->fail += 1; + } + + IPADBG("Transistion pass/fail counts (DDR to SRAM) PASS: %u FAIL: %u\n", + sw_stats_ptr->pass, + sw_stats_ptr->fail); + + if ( stats_ret == 0 ) + { + mem_type = ipa3_nat_mem_in_as_str(nat_stats.nmi); + + /* + * NAT table stats... + */ + IPADBG("Able to add (%u) records to %s " + "NAT table of size (%u) or (%f) percent\n", + *cnt_ptr, + mem_type, + nat_stats.tot_ents, + ((float) *cnt_ptr / (float) nat_stats.tot_ents) * 100.0); + + IPADBG("Able to add (%u) records to %s " + "NAT BASE table of size (%u) or (%f) percent\n", + nat_stats.tot_base_ents_filled, + mem_type, + nat_stats.tot_base_ents, + ((float) nat_stats.tot_base_ents_filled / + (float) nat_stats.tot_base_ents) * 100.0); + + IPADBG("Able to add (%u) records to %s " + "NAT EXPN table of size (%u) or (%f) percent\n", + nat_stats.tot_expn_ents_filled, + mem_type, + nat_stats.tot_expn_ents, + ((float) nat_stats.tot_expn_ents_filled / + (float) nat_stats.tot_expn_ents) * 100.0); + + IPADBG("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + nat_stats.tot_chains, + nat_stats.min_chain_len, + nat_stats.max_chain_len, + nat_stats.avg_chain_len); + + /* + * INDEX table stats... + */ + IPADBG("Able to add (%u) records to %s " + "IDX table of size (%u) or (%f) percent\n", + *cnt_ptr, + mem_type, + idx_stats.tot_ents, + ((float) *cnt_ptr / (float) idx_stats.tot_ents) * 100.0); + + IPADBG("Able to add (%u) records to %s " + "IDX BASE table of size (%u) or (%f) percent\n", + idx_stats.tot_base_ents_filled, + mem_type, + idx_stats.tot_base_ents, + ((float) idx_stats.tot_base_ents_filled / + (float) idx_stats.tot_base_ents) * 100.0); + + IPADBG("Able to add (%u) records to %s " + "IDX EXPN table of size (%u) or (%f) percent\n", + idx_stats.tot_expn_ents_filled, + mem_type, + idx_stats.tot_expn_ents, + ((float) idx_stats.tot_expn_ents_filled / + (float) idx_stats.tot_expn_ents) * 100.0); + + IPADBG("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + idx_stats.tot_chains, + idx_stats.min_chain_len, + idx_stats.max_chain_len, + idx_stats.avg_chain_len); + + mem_type++; /* to avoid compiler usage warning */ + } + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smSwitchFromSramToDdr + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following will cause a copy of the SRAM table to DDR and then + * will make the IPA use the DDR... + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smSwitchFromSramToDdr( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + nati_switch_stats* sw_stats_ptr = CHOOSE_SW_STATS(); + + uint32_t* cnt_ptr = CHOOSE_CNTR(); + + ipa_nati_tbl_stats nat_stats, idx_stats; + + const char* mem_type; + + uint64_t start, stop; + + int stats_ret, ret; + + bool collect_stats = (bool) arb_data_ptr; + + UNUSED(cnt_ptr); + UNUSED(trigger); + UNUSED(arb_data_ptr); + + IPADBG("In\n"); + + stats_ret = (collect_stats) ? + ipa_NATI_ipv4_tbl_stats( + nati_obj_ptr->sram_tbl_hdl, &nat_stats, &idx_stats) : + -1; + + currTimeAs(TimeAsNanSecs, &start); + + /* + * First, switch focus to DDR... + */ + ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_GOTO_DDR, 0); + + if ( ret == 0 ) + { + /* + * Clear destination counter... + */ + nati_obj_ptr->tot_rules_in_table[DDR_SUB] = 0; + + /* + * Clear destination DDR maps... + */ + ipa_nat_map_clear(nati_obj.map_pairs[DDR_SUB].orig2new_map); + ipa_nat_map_clear(nati_obj.map_pairs[DDR_SUB].new2orig_map); + + /* + * Now copy SRAM's content to DDR... + */ + ret = ipa_nati_copy_ipv4_tbl( + nati_obj_ptr->sram_tbl_hdl, + nati_obj_ptr->ddr_tbl_hdl, + migrate_rule); + + currTimeAs(TimeAsNanSecs, &stop); + + if ( ret == 0 ) + { + sw_stats_ptr->pass += 1; + + IPADBG("Transistion from SRAM to DDR took %f microseconds\n", + (float) (stop - start) / 1000.0); + } + else + { + sw_stats_ptr->fail += 1; + } + + IPADBG("Transistion pass/fail counts (SRAM to DDR) PASS: %u FAIL: %u\n", + sw_stats_ptr->pass, + sw_stats_ptr->fail); + + if ( stats_ret == 0 ) + { + mem_type = ipa3_nat_mem_in_as_str(nat_stats.nmi); + + /* + * NAT table stats... + */ + IPADBG("Able to add (%u) records to %s " + "NAT table of size (%u) or (%f) percent\n", + *cnt_ptr, + mem_type, + nat_stats.tot_ents, + ((float) *cnt_ptr / (float) nat_stats.tot_ents) * 100.0); + + IPADBG("Able to add (%u) records to %s " + "NAT BASE table of size (%u) or (%f) percent\n", + nat_stats.tot_base_ents_filled, + mem_type, + nat_stats.tot_base_ents, + ((float) nat_stats.tot_base_ents_filled / + (float) nat_stats.tot_base_ents) * 100.0); + + IPADBG("Able to add (%u) records to %s " + "NAT EXPN table of size (%u) or (%f) percent\n", + nat_stats.tot_expn_ents_filled, + mem_type, + nat_stats.tot_expn_ents, + ((float) nat_stats.tot_expn_ents_filled / + (float) nat_stats.tot_expn_ents) * 100.0); + + IPADBG("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + nat_stats.tot_chains, + nat_stats.min_chain_len, + nat_stats.max_chain_len, + nat_stats.avg_chain_len); + + /* + * INDEX table stats... + */ + IPADBG("Able to add (%u) records to %s " + "IDX table of size (%u) or (%f) percent\n", + *cnt_ptr, + mem_type, + idx_stats.tot_ents, + ((float) *cnt_ptr / (float) idx_stats.tot_ents) * 100.0); + + IPADBG("Able to add (%u) records to %s " + "IDX BASE table of size (%u) or (%f) percent\n", + idx_stats.tot_base_ents_filled, + mem_type, + idx_stats.tot_base_ents, + ((float) idx_stats.tot_base_ents_filled / + (float) idx_stats.tot_base_ents) * 100.0); + + IPADBG("Able to add (%u) records to %s " + "IDX EXPN table of size (%u) or (%f) percent\n", + idx_stats.tot_expn_ents_filled, + mem_type, + idx_stats.tot_expn_ents, + ((float) idx_stats.tot_expn_ents_filled / + (float) idx_stats.tot_expn_ents) * 100.0); + + IPADBG("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + idx_stats.tot_chains, + idx_stats.min_chain_len, + idx_stats.max_chain_len, + idx_stats.avg_chain_len); + + mem_type++; /* to avoid compiler usage warning */ + } + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smGetTmStmp + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * Retrieve rule's timestamp from NAT table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smGetTmStmp( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + timestap_query_args* args = (timestap_query_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + uint32_t rule_hdl = args->rule_hdl; + uint32_t* time_stamp = args->time_stamp; + + int ret; + + UNUSED(nati_obj_ptr); + UNUSED(trigger); + UNUSED(arb_data_ptr); + + IPADBG("In\n"); + + IPADBG("tbl_hdl(0x%08X) rule_hdl(%u) time_stamp_ptr(%p)\n", + tbl_hdl, rule_hdl, time_stamp); + + ret = ipa_NATI_query_timestamp(tbl_hdl, rule_hdl, time_stamp); + + if ( ret == 0 ) + { + IPADBG("time_stamp(0x%08X)\n", *time_stamp); + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * FUNCTION: _smGetTmStmpHybrid + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * Retrieve rule's timestamp from the state approriate NAT table. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smGetTmStmpHybrid( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + timestap_query_args* args = (timestap_query_args*) arb_data_ptr; + + uint32_t tbl_hdl = args->tbl_hdl; + uint32_t orig_rule_hdl = args->rule_hdl; + uint32_t* time_stamp = args->time_stamp; + + uint32_t new_rule_hdl; + + uint32_t orig2new_map, new2orig_map; + + int ret; + + IPADBG("In\n"); + + CHOOSE_MAPS(orig2new_map, new2orig_map); + + new2orig_map++; /* to avoid compiler usage warning */ + + ret = ipa_nat_map_find(orig2new_map, orig_rule_hdl, &new_rule_hdl); + + if ( ret == 0 ) + { + timestap_query_args new_args = { + .tbl_hdl = + (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ? + tbl_hdl : + nati_obj_ptr->ddr_tbl_hdl, + .rule_hdl = new_rule_hdl, + .time_stamp = time_stamp, + }; + + ret = _smGetTmStmp(nati_obj_ptr, trigger, (void*) &new_args); + } + + IPADBG("Out\n"); + + return ret; +} + +/******************************************************************************/ +/* + * The following table relates a nati object's state and a transition + * trigger to a callback... + */ +static nati_statemach_tuple +_state_mach_tbl[NATI_STATE_LAST+1][NATI_TRIG_LAST+1] = +{ + { + SM_ROW( NATI_STATE_NULL, NATI_TRIG_NULL, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_ADD_TABLE, _smFirstTbl ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_DEL_TABLE, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_CLR_TABLE, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_WLK_TABLE, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_TBL_STATS, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_ADD_RULE, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_DEL_RULE, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_TBL_SWITCH, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_GOTO_DDR, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_GOTO_SRAM, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_GET_TSTAMP, _smUndef ), + SM_ROW( NATI_STATE_NULL, NATI_TRIG_LAST, _smUndef ), + }, + + { + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_NULL, _smUndef ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_ADD_TABLE, _smAddDdrTbl ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_DEL_TABLE, _smDelTbl ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_CLR_TABLE, _smClrTbl ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_WLK_TABLE, _smWalkTbl ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_TBL_STATS, _smStatTbl ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_ADD_RULE, _smAddRuleToTbl ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_DEL_RULE, _smDelRuleFromTbl ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_TBL_SWITCH, _smUndef ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_GOTO_DDR, _smUndef ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_GOTO_SRAM, _smUndef ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_GET_TSTAMP, _smGetTmStmp ), + SM_ROW( NATI_STATE_DDR_ONLY, NATI_TRIG_LAST, _smUndef ), + }, + + { + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_NULL, _smUndef ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_ADD_TABLE, _smAddSramTbl ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_DEL_TABLE, _smDelTbl ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_CLR_TABLE, _smClrTbl ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_WLK_TABLE, _smWalkTbl ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_TBL_STATS, _smStatTbl ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_ADD_RULE, _smAddRuleToTbl ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_DEL_RULE, _smDelRuleFromTbl ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_TBL_SWITCH, _smUndef ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_GOTO_DDR, _smUndef ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_GOTO_SRAM, _smUndef ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_GET_TSTAMP, _smGetTmStmp ), + SM_ROW( NATI_STATE_SRAM_ONLY, NATI_TRIG_LAST, _smUndef ), + }, + + { + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_NULL, _smUndef ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_ADD_TABLE, _smAddSramAndDdrTbl ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_DEL_TABLE, _smDelSramAndDdrTbl ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_CLR_TABLE, _smClrTblHybrid ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_WLK_TABLE, _smWalkTblHybrid ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_TBL_STATS, _smStatTblHybrid ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_ADD_RULE, _smAddRuleHybrid ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_DEL_RULE, _smDelRuleHybrid ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_TBL_SWITCH, _smSwitchFromSramToDdr ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_GOTO_DDR, _smGoToDdr ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_GOTO_SRAM, _smGoToSram ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_GET_TSTAMP, _smGetTmStmpHybrid ), + SM_ROW( NATI_STATE_HYBRID, NATI_TRIG_LAST, _smUndef ), + }, + + { + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_NULL, _smUndef ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_ADD_TABLE, _smUndef ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_DEL_TABLE, _smDelSramAndDdrTbl ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_CLR_TABLE, _smClrTblHybrid ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_WLK_TABLE, _smWalkTblHybrid ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_TBL_STATS, _smStatTblHybrid ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_ADD_RULE, _smAddRuleHybrid ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_DEL_RULE, _smDelRuleHybrid ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_TBL_SWITCH, _smSwitchFromDdrToSram ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_GOTO_DDR, _smGoToDdr ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_GOTO_SRAM, _smGoToSram ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_GET_TSTAMP, _smGetTmStmpHybrid ), + SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_LAST, _smUndef ), + }, + + { + SM_ROW( NATI_STATE_LAST, NATI_TRIG_NULL, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_ADD_TABLE, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_DEL_TABLE, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_CLR_TABLE, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_WLK_TABLE, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_TBL_STATS, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_ADD_RULE, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_DEL_RULE, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_TBL_SWITCH, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_GOTO_DDR, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_GOTO_SRAM, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_GET_TSTAMP, _smUndef ), + SM_ROW( NATI_STATE_LAST, NATI_TRIG_LAST, _smUndef ), + }, +}; + +/******************************************************************************/ +/* + * FUNCTION: _smUndef + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Whatever you like + * + * DESCRIPTION: + * + * The following does nothing, except report an undefined action for + * a particular state/trigger combo... + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +static int _smUndef( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + UNUSED(arb_data_ptr); + + IPAERR("CB(%s): undefined action for STATE(%s) with TRIGGER(%s)\n", + _state_mach_tbl[nati_obj_ptr->curr_state][trigger].sm_cb_as_str, + _state_mach_tbl[nati_obj_ptr->curr_state][trigger].state_as_str, + _state_mach_tbl[nati_obj_ptr->curr_state][trigger].trigger_as_str); + + return -1; +} + +/******************************************************************************/ +/* + * FUNCTION: ipa_nati_statemach + * + * PARAMS: + * + * nati_obj_ptr (IN) A pointer to an initialized nati object + * + * trigger (IN) The trigger to run through the state machine + * + * arb_data_ptr (IN) Anything you like. Will be passed, untouched, + * to the state/trigger callback function. + * + * DESCRIPTION: + * + * This function allows a nati object and a trigger to be run + * through the state machine. + * + * RETURNS: + * + * zero on success, otherwise non-zero + */ +int ipa_nati_statemach( + ipa_nati_obj* nati_obj_ptr, + ipa_nati_trigger trigger, + void* arb_data_ptr ) +{ + const char* ss_ptr = _state_mach_tbl[nati_obj_ptr->curr_state][trigger].state_as_str; + const char* ts_ptr = _state_mach_tbl[nati_obj_ptr->curr_state][trigger].trigger_as_str; + const char* cbs_ptr = _state_mach_tbl[nati_obj_ptr->curr_state][trigger].sm_cb_as_str; + + bool vote = false; + + int ret; + + UNUSED(ss_ptr); + UNUSED(ts_ptr); + UNUSED(cbs_ptr); + + IPADBG("In\n"); + + ret = take_mutex(); + + if ( ret != 0 ) + { + goto bail; + } + + IPADBG("STATE(%s) TRIGGER(%s) CB(%s)\n", ss_ptr, ts_ptr, cbs_ptr); + + vote = VOTE_REQUIRED(trigger); + + if ( vote ) + { + IPADBG("Voting clock on STATE(%s) TRIGGER(%s)\n", + ss_ptr, ts_ptr); + + if ( ipa_nat_vote_clock(IPA_APP_CLK_VOTE) != 0 ) + { + IPAERR("Voting failed STATE(%s) TRIGGER(%s)\n", ss_ptr, ts_ptr); + ret = -EINVAL; + goto unlock; + } + } + + ret = _state_mach_tbl[nati_obj_ptr->curr_state][trigger].sm_cb( + nati_obj_ptr, trigger, arb_data_ptr); + + if ( vote ) + { + IPADBG("Voting clock off STATE(%s) TRIGGER(%s)\n", + ss_ptr, ts_ptr); + + if ( ipa_nat_vote_clock(IPA_APP_CLK_DEVOTE) != 0 ) + { + IPAERR("Voting failed STATE(%s) TRIGGER(%s)\n", ss_ptr, ts_ptr); + } + } + +unlock: + ret = give_mutex(); + +bail: + IPADBG("Out\n"); + + return ret; +} diff --git a/ipanat/src/ipa_nat_utils.c b/ipanat/src/ipa_nat_utils.c new file mode 100644 index 0000000..e10207c --- /dev/null +++ b/ipanat/src/ipa_nat_utils.c @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2013, 2018-2019 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "ipa_nat_utils.h" +#include <sys/ioctl.h> +#include <errno.h> +#include <fcntl.h> +#include <unistd.h> +#include <pthread.h> +#include <stdlib.h> + +#define IPA_MAX_MSG_LEN 4096 + +static char dbg_buff[IPA_MAX_MSG_LEN]; + +#if !defined(MSM_IPA_TESTS) && !defined(USE_GLIB) && !defined(FEATURE_IPA_ANDROID) +size_t strlcpy(char* dst, const char* src, size_t size) +{ + size_t i; + + if (size == 0) + return strlen(src); + + for (i = 0; i < (size - 1) && src[i] != '\0'; ++i) + dst[i] = src[i]; + + dst[i] = '\0'; + + return i + strlen(src + i); +} +#endif + +ipa_descriptor* ipa_descriptor_open(void) +{ + ipa_descriptor* desc_ptr; + int res = 0; + + IPADBG("In\n"); + + desc_ptr = calloc(1, sizeof(ipa_descriptor)); + + if ( desc_ptr == NULL ) + { + IPAERR("Unable to allocate ipa_descriptor\n"); + goto bail; + } + + desc_ptr->fd = open(IPA_DEV_NAME, O_RDONLY); + + if (desc_ptr->fd < 0) + { + IPAERR("Unable to open ipa device\n"); + goto free; + } + + res = ioctl(desc_ptr->fd, IPA_IOC_GET_HW_VERSION, &desc_ptr->ver); + + if (res == 0) + { + IPADBG("IPA version is %d\n", desc_ptr->ver); + } + else + { + IPAERR("Unable to get IPA version. Error %d\n", res); + desc_ptr->ver = IPA_HW_None; + } + + goto bail; + +free: + free(desc_ptr); + desc_ptr = NULL; + +bail: + IPADBG("Out\n"); + + return desc_ptr; +} + +void ipa_descriptor_close( + ipa_descriptor* desc_ptr) +{ + IPADBG("In\n"); + + if ( desc_ptr ) + { + if ( desc_ptr->fd >= 0) + { + close(desc_ptr->fd); + } + free(desc_ptr); + } + + IPADBG("Out\n"); +} + +void ipa_read_debug_info( + const char* debug_file_path) +{ + size_t result; + FILE* debug_file; + + debug_file = fopen(debug_file_path, "r"); + if (debug_file == NULL) + { + printf("Failed to open %s\n", debug_file_path); + return; + } + + for (;;) + { + result = fread(dbg_buff, sizeof(char), IPA_MAX_MSG_LEN, debug_file); + if (!result) + break; + + if (result < IPA_MAX_MSG_LEN) + { + if (ferror(debug_file)) + { + printf("Failed to read from %s\n", debug_file_path); + break; + } + + dbg_buff[result] = '\0'; + } + else + { + dbg_buff[IPA_MAX_MSG_LEN - 1] = '\0'; + } + + + printf("%s", dbg_buff); + + if (feof(debug_file)) + break; + } + fclose(debug_file); +} + +void log_nat_message(char *msg) +{ + UNUSED(msg); + + return; +} + +int currTimeAs( + TimeAs_t timeAs, + uint64_t* valPtr ) +{ + struct timespec timeSpec; + + int ret = 0; + + if ( ! VALID_TIMEAS(timeAs) || ! valPtr ) + { + IPAERR("Bad arg: timeAs (%u) and/or valPtr (%p)\n", + timeAs, valPtr ); + ret = -1; + goto bail; + } + + memset(&timeSpec, 0, sizeof(timeSpec)); + + if ( clock_gettime(CLOCK_MONOTONIC, &timeSpec) != 0 ) + { + IPAERR("Can't get system clock time\n" ); + ret = -1; + goto bail; + } + + switch( timeAs ) + { + case TimeAsNanSecs: + *valPtr = + (uint64_t) (SECS2NanSECS((uint64_t) timeSpec.tv_sec) + + ((uint64_t) timeSpec.tv_nsec)); + break; + case TimeAsMicSecs: + *valPtr = + (uint64_t) (SECS2MicSECS((uint64_t) timeSpec.tv_sec) + + ((uint64_t) timeSpec.tv_nsec / 1000)); + break; + case TimeAsMilSecs: + *valPtr = + (uint64_t) (SECS2MilSECS((uint64_t) timeSpec.tv_sec) + + ((uint64_t) timeSpec.tv_nsec / 1000000)); + break; + } + +bail: + return ret; +} diff --git a/ipanat/src/ipa_table.c b/ipanat/src/ipa_table.c new file mode 100644 index 0000000..f73dff0 --- /dev/null +++ b/ipanat/src/ipa_table.c @@ -0,0 +1,1344 @@ +/* + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "ipa_table.h" +#include "ipa_nat_utils.h" + +#include <errno.h> + +#define IPA_BASE_TABLE_PERCENTAGE .8 +#define IPA_EXPANSION_TABLE_PERCENTAGE .2 + +#define IPA_BASE_TABLE_PCNT_4SRAM 1.00 +#define IPA_EXPANSION_TABLE_PCNT_4SRAM 0.43 + +/* + * The table number of entries is limited by Entry ID structure + * above. The base table max entries is limited by index into table + * bits number. + * + * The table max ents number is: (base table max ents / base table percentage) + * + * IPA_TABLE_MAX_ENTRIES = 2^(index into table) / IPA_BASE_TABLE_PERCENTAGE + */ + +static int InsertHead( + ipa_table* table, + void* rec_ptr, /* empty record in table */ + uint16_t rec_index, /* index of record above */ + void* user_data, + struct ipa_ioc_nat_dma_cmd* cmd ); + +static int InsertTail( + ipa_table* table, + void* rec_ptr, /* occupied record at index below */ + uint16_t* rec_index_ptr, /* pointer to index of record above */ + void* user_data, + struct ipa_ioc_nat_dma_cmd* cmd ); + +static uint16_t MakeEntryHdl( + ipa_table* tbl, + uint16_t tbl_entry ); + +static int FindExpnTblFreeEntry( + ipa_table* table, + void** free_entry, + uint16_t* entry_index ); + +static int Get2PowerTightUpperBound( + uint16_t num); + +static int GetEvenTightUpperBound( + uint16_t num); + +void ipa_table_init( + ipa_table* table, + const char* table_name, + enum ipa3_nat_mem_in nmi, + int entry_size, + void* meta, + int meta_entry_size, + ipa_table_entry_interface* entry_interface ) +{ + IPADBG("In\n"); + + memset(table, 0, sizeof(ipa_table)); + + strlcpy(table->name, table_name, IPA_RESOURCE_NAME_MAX); + + table->nmi = nmi; + table->entry_size = entry_size; + table->meta = meta; + table->meta_entry_size = meta_entry_size; + table->entry_interface = entry_interface; + + IPADBG("Table %s with entry size %d has been initialized\n", + table->name, table->entry_size); + + IPADBG("Out\n"); +} + +int ipa_table_calculate_entries_num( + ipa_table* table, + uint16_t number_of_entries, + enum ipa3_nat_mem_in nmi) +{ + uint16_t table_entries, expn_table_entries; + float btp, etp; + int result = 0; + + IPADBG("In\n"); + + if (number_of_entries > IPA_TABLE_MAX_ENTRIES) + { + IPAERR("Required number of %s entries %d exceeds the maximum %d\n", + table->name, number_of_entries, IPA_TABLE_MAX_ENTRIES); + result = -EINVAL; + goto bail; + } + + if ( nmi == IPA_NAT_MEM_IN_SRAM ) + { + btp = IPA_BASE_TABLE_PCNT_4SRAM; + etp = IPA_EXPANSION_TABLE_PCNT_4SRAM; + } + else + { + btp = IPA_BASE_TABLE_PERCENTAGE; + etp = IPA_EXPANSION_TABLE_PERCENTAGE; + } + + table_entries = Get2PowerTightUpperBound(number_of_entries * btp); + expn_table_entries = GetEvenTightUpperBound(number_of_entries * etp); + + table->tot_tbl_ents = table_entries + expn_table_entries; + + if ( table->tot_tbl_ents > IPA_TABLE_MAX_ENTRIES ) + { + IPAERR("Required number of %s entries %u " + "(user provided %u) exceeds the maximum %u\n", + table->name, + table->tot_tbl_ents, + number_of_entries, + IPA_TABLE_MAX_ENTRIES); + result = -EINVAL; + goto bail; + } + + table->table_entries = table_entries; + table->expn_table_entries = expn_table_entries; + + IPADBG("Num of %s entries:%u expn entries:%u total entries:%u\n", + table->name, + table->table_entries, + table->expn_table_entries, + table->tot_tbl_ents); + +bail: + IPADBG("Out\n"); + + return result; +} + +int ipa_table_calculate_size(ipa_table* table) +{ + int size = table->entry_size * (table->table_entries + table->expn_table_entries); + + IPADBG("In\n"); + + IPADBG("%s size: %d\n", table->name, size); + + IPADBG("Out\n"); + + return size; +} + +uint8_t* ipa_table_calculate_addresses( + ipa_table* table, + uint8_t* base_addr) +{ + uint8_t* result = NULL; + + IPADBG("In\n"); + + table->table_addr = base_addr; + table->expn_table_addr = + table->table_addr + table->entry_size * table->table_entries; + + IPADBG("Table %s addresses: table_addr %pK expn_table_addr %pK\n", + table->name, table->table_addr, table->expn_table_addr); + + result = table->expn_table_addr + table->entry_size * table->expn_table_entries; + + IPADBG("Out\n"); + + return result; +} + +void ipa_table_reset( + ipa_table* table) +{ + uint32_t i, tot; + + IPADBG("In\n"); + + IPADBG("memset %s table to 0, %pK\n", table->name, table->table_addr); + tot = table->entry_size * table->table_entries; + for ( i = 0; i < tot; i++ ) + { + table->table_addr[i] = '\0'; + } + + IPADBG("memset %s expn table to 0, %pK\n", table->name, table->expn_table_addr); + tot = table->entry_size * table->expn_table_entries; + for ( i = 0; i < tot; i++ ) + { + table->expn_table_addr[i] = '\0'; + } + + IPADBG("Out\n"); +} + +int ipa_table_add_entry( + ipa_table* table, + void* user_data, + uint16_t* rec_index_ptr, + uint32_t* rule_hdl, + struct ipa_ioc_nat_dma_cmd* cmd ) +{ + void* rec_ptr; + int ret = 0, occupied; + + IPADBG("In\n"); + + rec_ptr = GOTO_REC(table, *rec_index_ptr); + + /* + * Check whether there is any collision + */ + occupied = table->entry_interface->entry_is_valid(rec_ptr); + + if ( ! occupied ) + { + IPADBG("Collision free (in %s) ... found open slot\n", table->name); + ret = InsertHead(table, rec_ptr, *rec_index_ptr, user_data, cmd); + } + else + { + IPADBG("Collision (in %s) ... will probe for open slot\n", table->name); + ret = InsertTail(table, rec_ptr, rec_index_ptr, user_data, cmd); + } + + if (ret) + goto bail; + + IPADBG("New Entry Index %u in %s\n", *rec_index_ptr, table->name); + + if ( rule_hdl ) { + *rule_hdl = MakeEntryHdl(table, *rec_index_ptr); + IPADBG("rule_hdl value(%u)\n", *rule_hdl); + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +void ipa_table_create_delete_command( + ipa_table* table, + struct ipa_ioc_nat_dma_cmd* cmd, + ipa_table_iterator* iterator) +{ + IPADBG("In\n"); + + IPADBG("Delete rule at index(0x%04X) in %s\n", + iterator->curr_index, + table->name); + + if ( ! VALID_INDEX(iterator->prev_index) ) + { + /* + * The following two assigns (ie. the defaults), will cause + * the enabled bit in the record to be set to 0. + */ + uint16_t data = 0; + dma_help_type ht = HELP_UPDATE_HEAD; + + if ( VALID_INDEX(iterator->next_index) ) + { + /* + * NOTE WELL HERE: + * + * This record is the first in a chain/list of + * records. Delete means something different in this + * context. + * + * The code below will cause the change of the protocol + * field in the rule record to 0xFF. It does not set the + * enable bit in the record to 0. This is done in special + * cases when the record being deleted is the first in a + * list of records. + * + * What does this mean? It means that the record is + * functionally deleted, but not really deleted. Why? + * Because the IPA will no longer use it because of the + * bad protocol (ie. functionally deleted), but these + * higher level APIs still see it as "enabled." + * + * This all means that deleted really means two things: 1) + * Not enabled, and 2) Not a valid record. APIs that walk + * the table...looking for enabled records (ie. the + * enabled bit)....now have to be a bit smarter to see the + * bad protocol as well. + */ + data = table->entry_interface-> + entry_get_delete_head_dma_command_data( + iterator->curr_entry, iterator->next_entry); + + ht = HELP_DELETE_HEAD; + } + + ipa_table_add_dma_cmd(table, + ht, + iterator->curr_entry, + iterator->curr_index, + data, + cmd); + } + else + { + ipa_table_add_dma_cmd(table, + HELP_UPDATE_ENTRY, + iterator->prev_entry, + iterator->prev_index, + iterator->next_index, + cmd); + } + + IPADBG("Out\n"); +} + +void ipa_table_delete_entry( + ipa_table* table, + ipa_table_iterator* iterator, + uint8_t is_prev_empty) +{ + IPADBG("In\n"); + + if ( VALID_INDEX(iterator->next_index) ) + { + /* + * Update the next entry's prev_index field with current + * entry's prev_index + */ + table->entry_interface->entry_set_prev_index( + iterator->next_entry, + iterator->next_index, + iterator->prev_index, + table->meta, + table->table_entries); + } + else if (is_prev_empty) + { + if (iterator->prev_entry == NULL) + { + IPAERR("failed to delete of an empty head %d while delete the next entry %d in %s", + iterator->prev_index, iterator->curr_index, table->name); + } + else + { + /* + * Delete an empty head rule after the whole tail was deleted + */ + IPADBG("deleting the dead node %d for %s\n", + iterator->prev_index, table->name); + + memset(iterator->prev_entry, 0, table->entry_size); + + --table->cur_tbl_cnt; + } + } + + ipa_table_erase_entry(table, iterator->curr_index); + + IPADBG("Out\n"); +} + +void ipa_table_erase_entry( + ipa_table* table, + uint16_t index) +{ + void* entry = GOTO_REC(table, index); + + IPADBG("In\n"); + + IPADBG("table(%p) index(%u)\n", table, index); + + memset(entry, 0, table->entry_size); + + if ( index < table->table_entries ) + { + --table->cur_tbl_cnt; + } + else + { + --table->cur_expn_tbl_cnt; + } + + IPADBG("Out\n"); +} + +/** + * ipa_table_get_entry() - returns a table entry according to the received entry handle + * @table: [in] the table + * @entry_handle: [in] entry handle + * @entry: [out] the retrieved entry + * @entry_index: [out] absolute index of the retrieved entry + * + * Parse the entry handle to retrieve the entry and its index + * + * Returns: 0 on success, negative on failure + */ +int ipa_table_get_entry( + ipa_table* table, + uint32_t entry_handle, + void** entry, + uint16_t* entry_index ) +{ + enum ipa3_nat_mem_in nmi; + uint8_t is_expn_tbl; + uint16_t rec_index; + + int ret = 0; + + IPADBG("In\n"); + + IPADBG("table(%p) entry_handle(%u) entry(%p) entry_index(%p)\n", + table, entry_handle, entry, entry_index); + + /* + * Retrieve the memory and table type as well as the index + */ + BREAK_RULE_HDL(table, entry_handle, nmi, is_expn_tbl, rec_index); + + nmi++; /* to eliminate compiler usage warning */ + + if ( is_expn_tbl ) + { + IPADBG("Retrieving entry from expansion table\n"); + } + else + { + IPADBG("Retrieving entry from base (non-expansion) table\n"); + } + + if ( rec_index >= table->tot_tbl_ents ) + { + IPAERR("The entry handle's record index (%u) exceeds table size (%u)\n", + rec_index, table->tot_tbl_ents); + ret = -EINVAL; + goto bail; + } + + *entry = GOTO_REC(table, rec_index); + + if ( entry_index ) + { + *entry_index = rec_index; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +void* ipa_table_get_entry_by_index( + ipa_table* table, + uint16_t rec_index ) +{ + void* result = NULL; + + IPADBG("In\n"); + + IPADBG("table(%p) rec_index(%u)\n", + table, + rec_index); + + if ( ! rec_index || rec_index >= table->tot_tbl_ents ) + { + IPAERR("Invalid record index (%u): It's " + "either zero or exceeds table size (%u)\n", + rec_index, table->tot_tbl_ents); + goto bail; + } + + result = GOTO_REC(table, rec_index); + +bail: + IPADBG("Out\n"); + + return result; +} + +void ipa_table_dma_cmd_helper_init( + ipa_table_dma_cmd_helper* dma_cmd_helper, + uint8_t table_indx, + ipa_table_dma_type table_type, + ipa_table_dma_type expn_table_type, + uint32_t offset) +{ + IPADBG("In\n"); + + dma_cmd_helper->offset = offset; + dma_cmd_helper->table_indx = table_indx; + dma_cmd_helper->table_type = table_type; + dma_cmd_helper->expn_table_type = expn_table_type; + + IPADBG("Out\n"); +} + +void ipa_table_dma_cmd_generate( + ipa_table_dma_cmd_helper* dma_cmd_helper, + uint8_t is_expn, + uint32_t entry_offset, + uint16_t data, + struct ipa_ioc_nat_dma_cmd* cmd) +{ + struct ipa_ioc_nat_dma_one* dma = &cmd->dma[cmd->entries]; + + IPADBG("In\n"); + + IPADBG("is_expn(0x%02X) entry_offset(0x%08X) data(0x%04X)\n", + is_expn, entry_offset, data); + + dma->table_index = dma_cmd_helper->table_indx; + + /* + * DMA parameter base_addr is the table type (see the IPA + * architecture document) + */ + dma->base_addr = + (is_expn) ? + dma_cmd_helper->expn_table_type : + dma_cmd_helper->table_type; + + dma->offset = dma_cmd_helper->offset + entry_offset; + + dma->data = data; + + IPADBG("dma_entry[%u](table_index(0x%02X) " + "base_addr(0x%02X) data(0x%04X) offset(0x%08X))\n", + cmd->entries, + dma->table_index, + dma->base_addr, + dma->data, + dma->offset); + + cmd->entries++; + + IPADBG("Out\n"); +} + +int ipa_table_iterator_init( + ipa_table_iterator* iterator, + ipa_table* table, + void* curr_entry, + uint16_t curr_index) +{ + int occupied; + + int ret = 0; + + IPADBG("In\n"); + + memset(iterator, 0, sizeof(ipa_table_iterator)); + + occupied = table->entry_interface->entry_is_valid(curr_entry); + + if ( ! occupied ) + { + IPAERR("Invalid (not enabled) rule %u in %s\n", curr_index, table->name); + ret = -EINVAL; + goto bail; + } + + iterator->curr_entry = curr_entry; + iterator->curr_index = curr_index; + + iterator->prev_index = table->entry_interface->entry_get_prev_index( + curr_entry, + curr_index, + table->meta, + table->table_entries); + + iterator->next_index = table->entry_interface->entry_get_next_index( + curr_entry); + + if ( VALID_INDEX(iterator->prev_index) ) + { + iterator->prev_entry = ipa_table_get_entry_by_index( + table, + iterator->prev_index); + + if ( iterator->prev_entry == NULL ) + { + IPAERR("Failed to retrieve the entry at index 0x%04X for %s\n", + iterator->prev_index, table->name); + ret = -EPERM; + goto bail; + } + } + + if ( VALID_INDEX(iterator->next_index) ) + { + iterator->next_entry = ipa_table_get_entry_by_index( + table, + iterator->next_index); + + if ( iterator->next_entry == NULL ) + { + IPAERR("Failed to retrieve the entry at index 0x%04X for %s\n", + iterator->next_index, table->name); + ret = -EPERM; + goto bail; + } + } + + IPADBG("[index/entry] for " + "prev:[0x%04X/%p] " + "curr:[0x%04X/%p] " + "next:[0x%04X/%p] " + "\"%s\"\n", + iterator->prev_index, + iterator->prev_entry, + iterator->curr_index, + iterator->curr_entry, + iterator->next_index, + iterator->next_entry, + table->name); + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_table_iterator_next( + ipa_table_iterator* iterator, + ipa_table* table) +{ + int ret = 0; + + IPADBG("In\n"); + + iterator->prev_entry = iterator->curr_entry; + iterator->prev_index = iterator->curr_index; + iterator->curr_entry = iterator->next_entry; + iterator->curr_index = iterator->next_index; + + iterator->next_index = table->entry_interface->entry_get_next_index( + iterator->curr_entry); + + if ( ! VALID_INDEX(iterator->next_index) ) + { + iterator->next_entry = NULL; + } + else + { + iterator->next_entry = ipa_table_get_entry_by_index( + table, iterator->next_index); + + if (iterator->next_entry == NULL) + { + IPAERR("Failed to retrieve the entry at index %d for %s\n", + iterator->next_index, table->name); + ret = -EPERM; + goto bail; + } + } + + IPADBG("Iterator moved to: prev_index=%d curr_index=%d next_index=%d\n", + iterator->prev_index, iterator->curr_index, iterator->next_index); + + IPADBG(" prev_entry=%pK curr_entry=%pK next_entry=%pK\n", + iterator->prev_entry, iterator->curr_entry, iterator->next_entry); + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_table_iterator_end( + ipa_table_iterator* iterator, + ipa_table* table_ptr, + uint16_t rec_index, /* a table slot relative to hash */ + void* rec_ptr ) /* occupant record at index above */ +{ + bool found_end = false; + + int ret; + + IPADBG("In\n"); + + if ( ! iterator || ! table_ptr || ! rec_ptr ) + { + IPAERR("Bad arg: iterator(%p) and/or table_ptr (%p) and/or rec_ptr(%p)\n", + iterator, table_ptr, rec_ptr); + ret = -1; + goto bail; + } + + memset(iterator, 0, sizeof(ipa_table_iterator)); + + iterator->prev_index = rec_index; + iterator->prev_entry = rec_ptr; + + while ( 1 ) + { + uint16_t next_index = + table_ptr->entry_interface->entry_get_next_index(iterator->prev_entry); + + if ( ! VALID_INDEX(next_index) ) + { + found_end = true; + break; + } + + if ( next_index == iterator->prev_index ) + { + IPAERR("next_index(%u) and prev_index(%u) shouldn't be equal in %s\n", + next_index, + iterator->prev_index, + table_ptr->name); + break; + } + + iterator->prev_index = next_index; + iterator->prev_entry = GOTO_REC(table_ptr, next_index); + } + + if ( found_end ) + { + IPADBG("Iterator found end of list record\n"); + ret = 0; + } + else + { + IPAERR("Iterator can't find end of list record\n"); + ret = -1; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_table_iterator_is_head_with_tail( + ipa_table_iterator* iterator) +{ + int ret = 0; + + IPADBG("In\n"); + + ret = VALID_INDEX(iterator->next_index) && ! VALID_INDEX(iterator->prev_index); + + IPADBG("Out\n"); + + return ret; +} + +static int InsertHead( + ipa_table* table, + void* rec_ptr, /* empty record in table */ + uint16_t rec_index, /* index of record above */ + void* user_data, + struct ipa_ioc_nat_dma_cmd* cmd ) +{ + uint16_t enable_data = 0; + + int ret = 0; + + IPADBG("In\n"); + + ret = table->entry_interface->entry_head_insert( + rec_ptr, + user_data, + &enable_data); + + if (ret) + { + IPAERR("unable to insert a new entry to the head in %s\n", table->name); + goto bail; + } + + ipa_table_add_dma_cmd( + table, + HELP_UPDATE_HEAD, + rec_ptr, + rec_index, + enable_data, + cmd); + + ++table->cur_tbl_cnt; + +bail: + IPADBG("Out\n"); + + return ret; +} + +static int InsertTail( + ipa_table* table, + void* rec_ptr, /* occupied record at index below */ + uint16_t* rec_index_ptr, /* pointer to index of record above */ + void* user_data, + struct ipa_ioc_nat_dma_cmd* cmd ) +{ + bool is_index_tbl = (table->meta) ? true : false; + + ipa_table_iterator iterator; + + uint16_t enable_data = 0; + + int ret = 0; + + IPADBG("In\n"); + + /* + * The most important side effect of the following is to set the + * iterator's prev_index and prev_entry...which will be the last + * valid entry on the end of the list. + */ + ret = ipa_table_iterator_end(&iterator, table, *rec_index_ptr, rec_ptr); + + if ( ret ) + { + IPAERR("Failed to reach the end of list following rec_index(%u) in %s\n", + *rec_index_ptr, table->name); + goto bail; + } + + /* + * The most important side effect of the following is to set the + * iterator's curr_index and curr_entry with the next available + * expansion table open slot. + */ + ret = FindExpnTblFreeEntry(table, &iterator.curr_entry, &iterator.curr_index); + + if ( ret ) + { + IPAERR("FindExpnTblFreeEntry of %s failed\n", table->name); + goto bail; + } + + /* + * Copy data into curr_entry (ie. open slot). + */ + if ( is_index_tbl ) + { + ret = table->entry_interface->entry_tail_insert( + iterator.curr_entry, + user_data); + } + else + { + /* + * We need enable bit when not index table, hence... + */ + ret = table->entry_interface->entry_head_insert( + iterator.curr_entry, + user_data, + &enable_data); + } + + if (ret) + { + IPAERR("Unable to insert a new entry to the tail in %s\n", table->name); + goto bail; + } + + /* + * Update curr_entry's prev_index field with iterator.prev_index + */ + table->entry_interface->entry_set_prev_index( + iterator.curr_entry, /* set by FindExpnTblFreeEntry above */ + iterator.curr_index, /* set by FindExpnTblFreeEntry above */ + iterator.prev_index, /* set by ipa_table_iterator_end above */ + table->meta, + table->table_entries); + + if ( ! is_index_tbl ) + { + /* + * Generate dma command to have the IPA update the + * curr_entry's enable field when not the index table... + */ + ipa_table_add_dma_cmd( + table, + HELP_UPDATE_HEAD, + iterator.curr_entry, + iterator.curr_index, + enable_data, + cmd); + } + + /* + * Generate a dma command to have the IPA update the prev_entry's + * next_index with iterator.curr_index. + */ + ipa_table_add_dma_cmd( + table, + HELP_UPDATE_ENTRY, + iterator.prev_entry, + iterator.prev_index, + iterator.curr_index, + cmd); + + ++table->cur_expn_tbl_cnt; + + *rec_index_ptr = iterator.curr_index; + +bail: + IPADBG("Out\n"); + + return ret; +} + +/** + * MakeEntryHdl() - makes an entry handle + * @tbl_hdl: [in] tbl - the table + * @tbl_entry: [in] tbl_entry - table entry + * + * Calculate the entry handle which will be returned to client + * + * Returns: >0 table entry handle + */ +static uint16_t MakeEntryHdl( + ipa_table* tbl, + uint16_t tbl_entry ) +{ + uint16_t entry_hdl = 0; + + IPADBG("In\n"); + + if (tbl_entry >= tbl->table_entries) + { + /* + * Update the index into table + */ + entry_hdl = tbl_entry - tbl->table_entries; + entry_hdl = (entry_hdl << IPA_TABLE_TYPE_BITS); + /* + * Update the expansion table type bit + */ + entry_hdl = (entry_hdl | IPA_TABLE_TYPE_MASK); + } + else + { + entry_hdl = tbl_entry; + entry_hdl = (entry_hdl << IPA_TABLE_TYPE_BITS); + } + + /* + * Set memory type bit. + */ + entry_hdl = entry_hdl | (tbl->nmi << IPA_TABLE_TYPE_MEM_SHIFT); + + IPADBG("In: tbl_entry(%u) Out: entry_hdl(%u)\n", tbl_entry, entry_hdl); + + IPADBG("Out\n"); + + return entry_hdl; +} + +static int mt_slot( + ipa_table* table_ptr, + uint32_t rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ) +{ + UNUSED(table_ptr); + UNUSED(rule_hdl); + UNUSED(record_ptr); + UNUSED(meta_record_ptr); + UNUSED(meta_record_index); + UNUSED(arb_data_ptr); + + IPADBG("%s: Empty expansion slot: (%u) in table of size: (%u)\n", + table_ptr->name, + record_index, + table_ptr->tot_tbl_ents); + + return record_index; +} + +/* + * returns expn table entry absolute index + */ +static int FindExpnTblFreeEntry( + ipa_table* table, + void** free_entry, + uint16_t* entry_index ) +{ + int ret; + + IPADBG("In\n"); + + if ( ! table || ! free_entry || ! entry_index ) + { + IPAERR("Bad arg: table(%p) and/or " + "free_entry(%p) and/or entry_index(%p)\n", + table, free_entry, entry_index); + ret = -1; + goto bail; + } + + *entry_index = 0; + *free_entry = NULL; + + /* + * The following will start walk at expansion slots + * (ie. just after table->table_entries)... + */ + ret = ipa_table_walk(table, table->table_entries, WHEN_SLOT_EMPTY, mt_slot, 0); + + if ( ret > 0 ) + { + *entry_index = (uint16_t) ret; + + *free_entry = GOTO_REC(table, *entry_index); + + IPADBG("%s: entry_index val (%u) free_entry val (%p)\n", + table->name, + *entry_index, + *free_entry); + + ret = 0; + } + else + { + if ( ret < 0 ) + { + IPAERR("%s: While searching table for emtpy slot\n", + table->name); + } + else + { + IPADBG("%s: No empty slots (ie. expansion table full): " + "BASE (avail/used): (%u/%u) EXPN (avail/used): (%u/%u)\n", + table->name, + table->table_entries, + table->cur_tbl_cnt, + table->expn_table_entries, + table->cur_expn_tbl_cnt); + } + + ret = -1; + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +/** + * Get2PowerTightUpperBound() - Returns the tight upper bound which is a power of 2 + * @num: [in] given number + * + * Returns the tight upper bound for a given number which is power of 2 + * + * Returns: the tight upper bound which is power of 2 + */ +static int Get2PowerTightUpperBound(uint16_t num) +{ + uint16_t tmp = num, prev = 0, curr = 2; + + if (num == 0) + return 2; + + while (tmp != 1) + { + prev = curr; + curr <<= 1; + tmp >>= 1; + } + + return (num == prev) ? prev : curr; +} + +/** + * GetEvenTightUpperBound() - Returns the tight upper bound which is an even number + * @num: [in] given number + * + * Returns the tight upper bound for a given number which is an even number + * + * Returns: the tight upper bound which is an even number + */ +static int GetEvenTightUpperBound(uint16_t num) +{ + if (num == 0) + return 2; + + return (num % 2) ? num + 1 : num; +} + +int ipa_calc_num_sram_table_entries( + uint32_t sram_size, + uint32_t table1_ent_size, + uint32_t table2_ent_size, + uint16_t* num_entries_ptr) +{ + ipa_table nat_table; + ipa_table index_table; + int size = 0; + uint16_t tot; + + IPADBG("In\n"); + + IPADBG("sram_size(%x or %u)\n", sram_size, sram_size); + + *num_entries_ptr = 0; + + tot = 1; + + while ( 1 ) + { + IPADBG("Trying %u entries\n", tot); + + ipa_table_init(&nat_table, + "tmp_sram_table1", + IPA_NAT_MEM_IN_DDR, + table1_ent_size, + NULL, + 0, + NULL); + + ipa_table_init(&index_table, + "tmp_sram_table1", + IPA_NAT_MEM_IN_DDR, + table2_ent_size, + NULL, + 0, + NULL); + + nat_table.table_entries = index_table.table_entries = + Get2PowerTightUpperBound(tot * IPA_BASE_TABLE_PCNT_4SRAM); + nat_table.expn_table_entries = index_table.expn_table_entries = + GetEvenTightUpperBound(tot * IPA_EXPANSION_TABLE_PCNT_4SRAM); + + size = ipa_table_calculate_size(&nat_table); + size += ipa_table_calculate_size(&index_table); + + IPADBG("%u entries consumes size(0x%x or %u)\n", tot, size, size); + + if ( size > sram_size ) + break; + + *num_entries_ptr = tot; + + ++tot; + } + + IPADBG("Optimal number of entries: %u\n", *num_entries_ptr); + + IPADBG("Out\n"); + + return (*num_entries_ptr) ? 0 : -1; +} + +int ipa_table_walk( + ipa_table* ipa_tbl_ptr, + uint16_t start_index, + When2Callback when2cb, + ipa_table_walk_cb walk_cb, + void* arb_data_ptr ) +{ + uint16_t i; + uint32_t tot; + uint8_t* rec_ptr; + void* meta_record_ptr; + uint16_t meta_record_index; + + int ret = 0; + + IPADBG("In\n"); + + if ( ! ipa_tbl_ptr || + ! VALID_WHEN2CALLBACK(when2cb) || + ! walk_cb ) + { + IPAERR("Bad arg: ipa_tbl_ptr(%p) and/or " + "when2cb(%u) and/or walk_cb(%p)\n", + ipa_tbl_ptr, + when2cb, + walk_cb); + ret = -EINVAL; + goto bail; + } + + tot = + ipa_tbl_ptr->table_entries + + ipa_tbl_ptr->expn_table_entries; + + if ( start_index >= tot ) + { + IPAERR("Bad arg: start_index(%u)\n", start_index); + ret = -EINVAL; + goto bail; + } + + /* + * Go through table... + */ + for ( i = start_index, rec_ptr = GOTO_REC(ipa_tbl_ptr, start_index); + i < tot; + i++, rec_ptr += ipa_tbl_ptr->entry_size ) + { + bool call_back; + + if ( ipa_tbl_ptr->entry_interface->entry_is_valid(rec_ptr) ) + { + call_back = (when2cb == WHEN_SLOT_FILLED) ? true : false; + } + else + { + call_back = (when2cb == WHEN_SLOT_EMPTY) ? true : false; + } + + if ( call_back ) + { + uint32_t rule_hdl = MakeEntryHdl(ipa_tbl_ptr, i); + + meta_record_ptr = NULL; + meta_record_index = 0; + + if ( i >= ipa_tbl_ptr->table_entries && ipa_tbl_ptr->meta ) + { + meta_record_index = i - ipa_tbl_ptr->table_entries; + + meta_record_ptr = (uint8_t*) ipa_tbl_ptr->meta + + (meta_record_index * ipa_tbl_ptr->meta_entry_size); + } + + ret = walk_cb( + ipa_tbl_ptr, + rule_hdl, + rec_ptr, + i, + meta_record_ptr, + meta_record_index, + arb_data_ptr); + + if ( ret != 0 ) + { + if ( ret < 0 ) + { + IPAERR("walk_cb returned non-zero (%d)\n", ret); + } + else + { + IPADBG("walk_cb returned non-zero (%d)\n", ret); + } + goto bail; + } + } + } + +bail: + IPADBG("Out\n"); + + return ret; +} + +int ipa_table_add_dma_cmd( + ipa_table* tbl_ptr, + dma_help_type help_type, + void* rec_ptr, + uint16_t rec_index, + uint16_t data_for_entry, + struct ipa_ioc_nat_dma_cmd* cmd_ptr ) +{ + uint32_t tab_sz, entry_offset; + + uint8_t is_expn; + + int ret = 0; + + IPADBG("In\n"); + + if ( ! tbl_ptr || + ! VALID_DMA_HELP_TYPE(help_type) || + ! rec_ptr || + ! cmd_ptr ) + { + IPAERR("Bad arg: tbl_ptr(%p) and/or help_type(%u) " + "and/or rec_ptr(%p) and/or cmd_ptr(%p)\n", + tbl_ptr, + help_type, + rec_ptr, + cmd_ptr); + ret = -EINVAL; + goto bail; + } + + tab_sz = + tbl_ptr->table_entries + + tbl_ptr->expn_table_entries; + + if ( rec_index >= tab_sz ) + { + IPAERR("Bad arg: rec_index(%u)\n", rec_index); + ret = -EINVAL; + goto bail; + } + + is_expn = (rec_index >= tbl_ptr->table_entries); + + entry_offset = (uint8_t*) rec_ptr - + ((is_expn) ? tbl_ptr->expn_table_addr : tbl_ptr->table_addr); + + ipa_table_dma_cmd_generate( + tbl_ptr->dma_help[help_type], + is_expn, + entry_offset, + data_for_entry, + cmd_ptr); + +bail: + IPADBG("Out\n"); + + return ret; +} diff --git a/ipanat/test/Android.mk b/ipanat/test/Android.mk new file mode 100644 index 0000000..b026794 --- /dev/null +++ b/ipanat/test/Android.mk @@ -0,0 +1,57 @@ +BOARD_PLATFORM_LIST := test +ifeq ($(call is-board-platform-in-list,$(BOARD_PLATFORM_LIST)),true) +ifneq (,$(filter $(QCOM_BOARD_PLATFORMS),$(TARGET_BOARD_PLATFORM))) +ifneq (, $(filter aarch64 arm arm64, $(TARGET_ARCH))) + +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_C_INCLUDES := $(LOCAL_PATH)/ +LOCAL_C_INCLUDES += $(LOCAL_PATH)/../../ipanat/inc + +LOCAL_C_INCLUDES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include +LOCAL_ADDITIONAL_DEPENDENCIES := $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr + +LOCAL_MODULE := ipa_nat_test +LOCAL_SRC_FILES := \ + ipa_nat_testREG.c \ + ipa_nat_test000.c \ + ipa_nat_test001.c \ + ipa_nat_test002.c \ + ipa_nat_test003.c \ + ipa_nat_test004.c \ + ipa_nat_test005.c \ + ipa_nat_test006.c \ + ipa_nat_test007.c \ + ipa_nat_test008.c \ + ipa_nat_test009.c \ + ipa_nat_test010.c \ + ipa_nat_test011.c \ + ipa_nat_test012.c \ + ipa_nat_test013.c \ + ipa_nat_test014.c \ + ipa_nat_test015.c \ + ipa_nat_test016.c \ + ipa_nat_test017.c \ + ipa_nat_test018.c \ + ipa_nat_test019.c \ + ipa_nat_test020.c \ + ipa_nat_test021.c \ + ipa_nat_test022.c \ + ipa_nat_test023.c \ + ipa_nat_test024.c \ + ipa_nat_test025.c \ + ipa_nat_test999.c \ + main.c + +LOCAL_SHARED_LIBRARIES := libipanat + +LOCAL_MODULE_TAGS := debug +LOCAL_MODULE_PATH := $(TARGET_OUT_DATA)/kernel-tests/ip_accelerator + +include $(BUILD_EXECUTABLE) + +endif # $(TARGET_ARCH) +endif +endif diff --git a/ipanat/test/Makefile.am b/ipanat/test/Makefile.am new file mode 100644 index 0000000..cefa7ed --- /dev/null +++ b/ipanat/test/Makefile.am @@ -0,0 +1,46 @@ +AM_CPPFLAGS = -I./../inc \ + -I$(top_srcdir)/ipanat/inc + +AM_CPPFLAGS += -Wall -Wundef -Wno-trigraphs +AM_CPPFLAGS += -g -DDEBUG -DNAT_DEBUG + +ipanattest_SOURCES = \ + ipa_nat_testREG.c \ + ipa_nat_test000.c \ + ipa_nat_test001.c \ + ipa_nat_test002.c \ + ipa_nat_test003.c \ + ipa_nat_test004.c \ + ipa_nat_test005.c \ + ipa_nat_test006.c \ + ipa_nat_test007.c \ + ipa_nat_test008.c \ + ipa_nat_test009.c \ + ipa_nat_test010.c \ + ipa_nat_test011.c \ + ipa_nat_test012.c \ + ipa_nat_test013.c \ + ipa_nat_test014.c \ + ipa_nat_test015.c \ + ipa_nat_test016.c \ + ipa_nat_test017.c \ + ipa_nat_test018.c \ + ipa_nat_test019.c \ + ipa_nat_test020.c \ + ipa_nat_test021.c \ + ipa_nat_test022.c \ + ipa_nat_test023.c \ + ipa_nat_test024.c \ + ipa_nat_test025.c \ + ipa_nat_test999.c \ + main.c + +bin_PROGRAMS = ipanattest + +requiredlibs = ../src/libipanat.la + +ipanattest_LDADD = $(requiredlibs) + +LOCAL_MODULE := libipanat +LOCAL_PRELINK_MODULE := false +include $(BUILD_SHARED_LIBRARY) diff --git a/ipanat/test/README.txt b/ipanat/test/README.txt new file mode 100644 index 0000000..9a84b10 --- /dev/null +++ b/ipanat/test/README.txt @@ -0,0 +1,66 @@ +INTRODUCTION +------------ + +The ipanattest allow its user to drive NAT testing. It is run thusly: + +# ipanattest [-d -r N -i N -e N -m mt] +Where: + -d Each test is discrete (create table, add rules, destroy table) + If not specified, only one table create and destroy for all tests + -r N Where N is the number of times to run the inotify regression test + -i N Where N is the number of times (iterations) to run test + -e N Where N is the number of entries in the NAT + -m mt Where mt is the type of memory to use for the NAT + Legal mt's: DDR, SRAM, or HYBRID (ie. use SRAM and DDR) + -g M-N Run tests M through N only + +More about each command line option: + +-d Makes each test discrete; meaning that, each test will create a + table, add rules, then destory the table. + + Conversely, when -d not specified, each test will not create + and destroy a table. Only one table create and destroy at the + start and end of the run...with all test being run in between. + +-r N Will cause the inotify regression test to be run N times. + +-i N Will cause each test to be run N times + +-e N Will cause the creation of a table with N entries + +-m mt Will cause the NAT to live in either SRAM, DDR, or both + (ie. HYBRID) + +-g M-N Will cause test M to N to be run. This allows you to skip + or isolate tests + +When run with no arguments (ie. defaults): + + 1) The tests will be non-discrete + 2) With only one iteration of the tests + 3) On a DDR based table with one hundred entries + 4) No inotify regression will be run + +EXAMPLE COMMAND LINES +--------------------- + +To execute discrete tests (create, add rules, and delete table for +each test) one time on a table with one hundred entries: + +# ipanattest -d -i 1 -e 100 + +To execute non-discrete (create and delete table only once) tests five +times on a table with thirty-two entries: + +# ipanattest -i 5 -e 32 + +To execute inotify regression test 5 times + +# ipanattest -r 5 + +ADDING NEW TESTS +---------------- + +In main.c, please see and embellish nt_array[] and use the following +file as a model: ipa_nat_testMODEL.c diff --git a/ipanat/test/ipa_nat_test.h b/ipanat/test/ipa_nat_test.h new file mode 100644 index 0000000..150e4b6 --- /dev/null +++ b/ipanat/test/ipa_nat_test.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * =========================================================================== + * + * INCLUDE FILES FOR MODULE + * + * =========================================================================== + */ +#include <stdio.h> +#include <stdint.h> +#include <stdbool.h> +#include <stdlib.h> +#include <time.h> +#include <netinet/in.h> /* for proto definitions */ + +#include "ipa_nat_drv.h" +#include "ipa_nat_drvi.h" + +#undef array_sz +#define array_sz(a) \ + ( sizeof(a)/sizeof(a[0]) ) + +#define u32 uint32_t +#define u16 uint16_t +#define u8 uint8_t + +#define RAN_ADDR rand_ip_addr() +#define RAN_PORT rand_ip_port() + +static inline u32 rand_ip_addr() +{ + static char buf[64]; + + snprintf( + buf, sizeof(buf), + "%u.%u.%u.%u", + (rand() % 254) + 1, + rand() % 255, + rand() % 255, + (rand() % 254) + 1); + + return (u32) inet_addr(buf); +} + +static inline u16 rand_ip_port() +{ + return (u16) ((rand() % 60535) + 5000); +} + +/*============ Preconditions to run NAT Test cases =========*/ +#define IPA_NAT_TEST_PRE_COND_TE 20 + +#define CHECK_ERR(x) \ + if ( x ) { \ + IPAERR("Abrupt end of %s with " \ + "err: %d at line: %d\n", \ + __FUNCTION__, x, __LINE__); \ + return -1; \ + } + +#define CHECK_ERR_TBL_STOP(x, th) \ + if ( th ) { \ + int _ter_ = ipa_nat_validate_ipv4_table(th); \ + if ( _ter_ ) { \ + if ( sep ) { \ + ipa_nat_del_ipv4_tbl(th); \ + } \ + IPAERR("Abrupt end of %s with " \ + "err: %d at line: %d\n", \ + __FUNCTION__, _ter_, __LINE__); \ + return -1; \ + } \ + } \ + if ( x ) { \ + if ( th ) { \ + ipa_nat_dump_ipv4_table(th); \ + if( sep ) { \ + ipa_nat_del_ipv4_tbl(th); \ + } \ + } \ + IPAERR("Abrupt end of %s with " \ + "err: %d at line: %d\n", \ + __FUNCTION__, x, __LINE__); \ + return -1; \ + } + +#define CHECK_ERR_TBL_ACTION(x, th, action) \ + if ( th ) { \ + int _ter_ = ipa_nat_validate_ipv4_table(th); \ + if ( _ter_ ) { \ + IPAERR("ipa_nat_validate_ipv4_table() failed " \ + "in: %s at line: %d\n", \ + __FUNCTION__, __LINE__); \ + action; \ + } \ + } \ + if ( x ) { \ + if ( th ) { \ + ipa_nat_dump_ipv4_table(th); \ + } \ + IPAERR("error: %d in %s at line: %d\n", \ + x, __FUNCTION__, __LINE__); \ + action; \ + } + +typedef int (*NatTestFunc)( + const char*, u32, int, u32, int, void*); + +typedef struct +{ + const char* func_name; + int num_ents_trigger; + int test_hold_time_in_secs; + NatTestFunc func; +} NatTests; + +#undef NAT_TEST_ENTRY +#define NAT_TEST_ENTRY(f, n, ht) \ + {#f, (n), (ht), f} + +#define NAT_DEBUG +int ipa_nat_validate_ipv4_table(u32); + +int ipa_nat_testREG(const char*, u32, int, u32, int, void*); + +int ipa_nat_test000(const char*, u32, int, u32, int, void*); +int ipa_nat_test001(const char*, u32, int, u32, int, void*); +int ipa_nat_test002(const char*, u32, int, u32, int, void*); +int ipa_nat_test003(const char*, u32, int, u32, int, void*); +int ipa_nat_test004(const char*, u32, int, u32, int, void*); +int ipa_nat_test005(const char*, u32, int, u32, int, void*); +int ipa_nat_test006(const char*, u32, int, u32, int, void*); +int ipa_nat_test007(const char*, u32, int, u32, int, void*); +int ipa_nat_test008(const char*, u32, int, u32, int, void*); +int ipa_nat_test009(const char*, u32, int, u32, int, void*); +int ipa_nat_test010(const char*, u32, int, u32, int, void*); +int ipa_nat_test011(const char*, u32, int, u32, int, void*); +int ipa_nat_test012(const char*, u32, int, u32, int, void*); +int ipa_nat_test013(const char*, u32, int, u32, int, void*); +int ipa_nat_test014(const char*, u32, int, u32, int, void*); +int ipa_nat_test015(const char*, u32, int, u32, int, void*); +int ipa_nat_test016(const char*, u32, int, u32, int, void*); +int ipa_nat_test017(const char*, u32, int, u32, int, void*); +int ipa_nat_test018(const char*, u32, int, u32, int, void*); +int ipa_nat_test019(const char*, u32, int, u32, int, void*); +int ipa_nat_test020(const char*, u32, int, u32, int, void*); +int ipa_nat_test021(const char*, u32, int, u32, int, void*); +int ipa_nat_test022(const char*, u32, int, u32, int, void*); +int ipa_nat_test023(const char*, u32, int, u32, int, void*); +int ipa_nat_test024(const char*, u32, int, u32, int, void*); +int ipa_nat_test025(const char*, u32, int, u32, int, void*); +int ipa_nat_test999(const char*, u32, int, u32, int, void*); diff --git a/ipanat/test/ipa_nat_test000.c b/ipanat/test/ipa_nat_test000.c new file mode 100644 index 0000000..764a048 --- /dev/null +++ b/ipanat/test/ipa_nat_test000.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test000.c + + @brief + Verify the following scenario: + 1. Add ipv4 table +*/ +/*===========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test000( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + + int ret; + + IPADBG("In\n"); + + if ( ! sep ) + { + IPADBG("calling ipa_nat_add_ipv4_tbl()\n"); + + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, tbl_hdl_ptr); + CHECK_ERR_TBL_STOP(ret, *tbl_hdl_ptr); + + IPADBG("create nat ipv4 table successfully()\n"); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test001.c b/ipanat/test/ipa_nat_test001.c new file mode 100644 index 0000000..08942d2 --- /dev/null +++ b/ipanat/test/ipa_nat_test001.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test001.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. Add ipv4 rule + 3. Delete ipv4 table +*/ +/*===========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test001( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl; + ipa_nat_ipv4_rule ipv4_rule = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test002.c b/ipanat/test/ipa_nat_test002.c new file mode 100644 index 0000000..83992ec --- /dev/null +++ b/ipanat/test/ipa_nat_test002.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test002.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. Add ipv4 rule + 3. delete ipv4 rule + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test002( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl; + ipa_nat_ipv4_rule ipv4_rule = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test003.c b/ipanat/test/ipa_nat_test003.c new file mode 100644 index 0000000..6082620 --- /dev/null +++ b/ipanat/test/ipa_nat_test003.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/*=========================================================================*/ +/*! + @file + IPA_NAT_ipa_nat_test003.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. Add ipv4 rule + 3. Add ipv4 rule + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test003( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + + ipa_nat_ipv4_rule ipv4_rule = {0}; + u32 rule_hdl; + + int ret; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test004.c b/ipanat/test/ipa_nat_test004.c new file mode 100644 index 0000000..32936f1 --- /dev/null +++ b/ipanat/test/ipa_nat_test004.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test004.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. Delete a bogus table handle + 3. Delete ipv4 table +*/ +/*===========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test004( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret = 0; + u32 tbl_hdl1 = 0xFFFFFFFF; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_del_ipv4_tbl(tbl_hdl1); /* intentionally pass bad handle */ + + if ( ret == 0 ) + { + IPAERR("Able to delete table using invalid table handle\n"); + CHECK_ERR_TBL_STOP(-1, tbl_hdl); + } + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test005.c b/ipanat/test/ipa_nat_test005.c new file mode 100644 index 0000000..8ee78fc --- /dev/null +++ b/ipanat/test/ipa_nat_test005.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test005.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. Add ipv4 rule + 3. Delete ipv4 rule + 4. Add ipv4 rule + 5. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test005( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + ipa_nat_ipv4_rule ipv4_rule = {0}; + u32 rule_hdl; + + int ret; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test006.c b/ipanat/test/ipa_nat_test006.c new file mode 100644 index 0000000..7de4f91 --- /dev/null +++ b/ipanat/test/ipa_nat_test006.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test006.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. Add ipv rule + 3. Add same ipv rule + 4. Delete first followed by second + 5. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test006( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + + ipa_nat_ipv4_rule ipv4_rule = {0}; + + u32 rule_hdl; + u32 rule_hdl1; + + int ret; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test007.c b/ipanat/test/ipa_nat_test007.c new file mode 100644 index 0000000..4d26592 --- /dev/null +++ b/ipanat/test/ipa_nat_test007.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test007.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add same ipv rules + 3. delete second followed by first + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test007( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + + ipa_nat_ipv4_rule ipv4_rule = {0}; + + u32 rule_hdl; + u32 rule_hdl1; + + int ret; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test008.c b/ipanat/test/ipa_nat_test008.c new file mode 100644 index 0000000..350e6f9 --- /dev/null +++ b/ipanat/test/ipa_nat_test008.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test008.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add 2 distinct rules + 3. delete first followed by second + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test008( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl, rule_hdl1; + ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + ipv4_rule1.target_ip = RAN_ADDR; + ipv4_rule1.target_port = RAN_PORT; + ipv4_rule1.private_ip = RAN_ADDR; + ipv4_rule1.private_port = RAN_PORT; + ipv4_rule1.protocol = IPPROTO_TCP; + ipv4_rule1.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test009.c b/ipanat/test/ipa_nat_test009.c new file mode 100644 index 0000000..7cf74a4 --- /dev/null +++ b/ipanat/test/ipa_nat_test009.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test009.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add 2 distinct rules + 3. delete second followed by first + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test009( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl, rule_hdl1; + ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + ipv4_rule1.target_ip = RAN_ADDR; + ipv4_rule1.target_port = RAN_PORT; + ipv4_rule1.private_ip = RAN_ADDR; + ipv4_rule1.private_port = RAN_PORT; + ipv4_rule1.protocol = IPPROTO_TCP; + ipv4_rule1.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test010.c b/ipanat/test/ipa_nat_test010.c new file mode 100644 index 0000000..7d9cd88 --- /dev/null +++ b/ipanat/test/ipa_nat_test010.c @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test010.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add 3 distinct ipv4 rules + 3. delete first, second followed by last + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test010( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl, rule_hdl1, rule_hdl2; + ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}, ipv4_rule2 = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + ipv4_rule1.target_ip = RAN_ADDR; + ipv4_rule1.target_port = RAN_PORT; + ipv4_rule1.private_ip = RAN_ADDR; + ipv4_rule1.private_port = RAN_PORT; + ipv4_rule1.protocol = IPPROTO_TCP; + ipv4_rule1.public_port = RAN_PORT; + + ipv4_rule2.target_ip = RAN_ADDR; + ipv4_rule2.target_port = RAN_PORT; + ipv4_rule2.private_ip = RAN_ADDR; + ipv4_rule2.private_port = RAN_PORT; + ipv4_rule2.protocol = IPPROTO_TCP; + ipv4_rule2.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test011.c b/ipanat/test/ipa_nat_test011.c new file mode 100644 index 0000000..525a37f --- /dev/null +++ b/ipanat/test/ipa_nat_test011.c @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test011.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add 3 distinct ipv4 rules + 3. delete second, first followed by last + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test011( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl, rule_hdl1, rule_hdl2; + ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}, ipv4_rule2 = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + ipv4_rule1.target_ip = RAN_ADDR; + ipv4_rule1.target_port = RAN_PORT; + ipv4_rule1.private_ip = RAN_ADDR; + ipv4_rule1.private_port = RAN_PORT; + ipv4_rule1.protocol = IPPROTO_TCP; + ipv4_rule1.public_port = RAN_PORT; + + ipv4_rule2.target_ip = RAN_ADDR; + ipv4_rule2.target_port = RAN_PORT; + ipv4_rule2.private_ip = RAN_ADDR; + ipv4_rule2.private_port = RAN_PORT; + ipv4_rule2.protocol = IPPROTO_TCP; + ipv4_rule2.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test012.c b/ipanat/test/ipa_nat_test012.c new file mode 100644 index 0000000..81c7d72 --- /dev/null +++ b/ipanat/test/ipa_nat_test012.c @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test012.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add 3 distinct ipv4 rules + 3. Delete third, second, first + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test012( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl, rule_hdl1, rule_hdl2; + ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}, ipv4_rule2 = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + ipv4_rule1.target_ip = RAN_ADDR; + ipv4_rule1.target_port = RAN_PORT; + ipv4_rule1.private_ip = RAN_ADDR; + ipv4_rule1.private_port = RAN_PORT; + ipv4_rule1.protocol = IPPROTO_TCP; + ipv4_rule1.public_port = RAN_PORT; + + ipv4_rule2.target_ip = RAN_ADDR; + ipv4_rule2.target_port = RAN_PORT; + ipv4_rule2.private_ip = RAN_ADDR; + ipv4_rule2.private_port = RAN_PORT; + ipv4_rule2.protocol = IPPROTO_TCP; + ipv4_rule2.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test013.c b/ipanat/test/ipa_nat_test013.c new file mode 100644 index 0000000..d962065 --- /dev/null +++ b/ipanat/test/ipa_nat_test013.c @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test013.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add 3 distinct ipv4 rules + 3. Delete third, first and second + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test013( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl, rule_hdl1, rule_hdl2; + ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}, ipv4_rule2 = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + ipv4_rule1.target_ip = RAN_ADDR; + ipv4_rule1.target_port = RAN_PORT; + ipv4_rule1.private_ip = RAN_ADDR; + ipv4_rule1.private_port = RAN_PORT; + ipv4_rule1.protocol = IPPROTO_TCP; + ipv4_rule1.public_port = RAN_PORT; + + ipv4_rule2.target_ip = RAN_ADDR; + ipv4_rule2.target_port = RAN_PORT; + ipv4_rule2.private_ip = RAN_ADDR; + ipv4_rule2.private_port = RAN_PORT; + ipv4_rule2.protocol = IPPROTO_TCP; + ipv4_rule2.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test014.c b/ipanat/test/ipa_nat_test014.c new file mode 100644 index 0000000..3daa2d9 --- /dev/null +++ b/ipanat/test/ipa_nat_test014.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/*=========================================================================*/ +/*! + @file + ipa_nat_test014.cpp + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add same 3 ipv rules + 3. delete first, second and third + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test014( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl1, rule_hdl2, rule_hdl3; + ipa_nat_ipv4_rule ipv4_rule = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test015.c b/ipanat/test/ipa_nat_test015.c new file mode 100644 index 0000000..1dc8f97 --- /dev/null +++ b/ipanat/test/ipa_nat_test015.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/*=========================================================================*/ +/*! + @file + ipa_nat_test015.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add same 3 ipv rules + 3. delete first, third and second + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test015( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl1, rule_hdl2, rule_hdl3; + ipa_nat_ipv4_rule ipv4_rule = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test016.c b/ipanat/test/ipa_nat_test016.c new file mode 100644 index 0000000..f1ee4e6 --- /dev/null +++ b/ipanat/test/ipa_nat_test016.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/*=========================================================================*/ +/*! + @file + ipa_nat_test016.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add same 3 ipv rules + 3. delete second, first and third + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test016( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl1, rule_hdl2, rule_hdl3; + ipa_nat_ipv4_rule ipv4_rule = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test017.c b/ipanat/test/ipa_nat_test017.c new file mode 100644 index 0000000..23369e4 --- /dev/null +++ b/ipanat/test/ipa_nat_test017.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/*=========================================================================*/ +/*! + @file + ipa_nat_test017.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add same 3 ipv rules + 3. delete second, third and first + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test017( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl1, rule_hdl2, rule_hdl3; + ipa_nat_ipv4_rule ipv4_rule = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test018.c b/ipanat/test/ipa_nat_test018.c new file mode 100644 index 0000000..ca3d712 --- /dev/null +++ b/ipanat/test/ipa_nat_test018.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/*=========================================================================*/ +/*! + @file + ipa_nat_test018.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add same 3 ipv rules + 3. delete third, second and first + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test018( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl1, rule_hdl2, rule_hdl3; + ipa_nat_ipv4_rule ipv4_rule = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test019.c b/ipanat/test/ipa_nat_test019.c new file mode 100644 index 0000000..883294d --- /dev/null +++ b/ipanat/test/ipa_nat_test019.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/*=========================================================================*/ +/*! + @file + ipa_nat_test019.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add same 3 ipv rules + 3. delete third, first and second + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test019( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl1, rule_hdl2, rule_hdl3; + ipa_nat_ipv4_rule ipv4_rule = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test020.c b/ipanat/test/ipa_nat_test020.c new file mode 100644 index 0000000..2a32da0 --- /dev/null +++ b/ipanat/test/ipa_nat_test020.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test020.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add same 4 ipv rules + 3. delete third, second, fourth and first + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test020( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl1, rule_hdl2, rule_hdl3, rule_hdl4; + ipa_nat_ipv4_rule ipv4_rule = {0}; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl4); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl4); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test021.c b/ipanat/test/ipa_nat_test021.c new file mode 100644 index 0000000..a2d5a8d --- /dev/null +++ b/ipanat/test/ipa_nat_test021.c @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/*=========================================================================*/ +/*! + @file + ipa_nat_test021.cpp + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. add same 3 ipv rules + 3. delete Head and last entry + 4. add 2 new same ip4 entries + 5. Add head entry again + 6. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test021( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + u32 rule_hdl1, rule_hdl2, rule_hdl3; + ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule2 = {0}; + u32 rule_hdl21, rule_hdl22; + + /* Rule 1 */ + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + /* Rule 2*/ + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_UDP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + /* Delete head entry */ + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + /* Delete Last Entry */ + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + /* Add 2 different Entries */ + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl21); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl22); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + /* Add first entry again */ + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test022.c b/ipanat/test/ipa_nat_test022.c new file mode 100644 index 0000000..3767e19 --- /dev/null +++ b/ipanat/test/ipa_nat_test022.c @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test022.c + + @brief + Note: Verify the following scenario: + 1. Add ipv4 table + 2. Add ipv4 rules till filled + 3. Print stats + 4. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test022( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + + ipa_nat_ipv4_rule ipv4_rule; + u32 rule_hdls[2048]; + + ipa_nati_tbl_stats nstats, last_nstats; + ipa_nati_tbl_stats istats, last_istats; + + u32 i, tot; + + bool switched = false; + + const char* mem_type; + + int ret; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nati_clear_ipv4_tbl(tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + IPAINFO("Attempting rule adds to %s table of size: (%u)\n", + ipa3_nat_mem_in_as_str(nstats.nmi), + nstats.tot_ents); + + last_nstats = nstats; + last_istats = istats; + + memset(rule_hdls, 0, sizeof(rule_hdls)); + + for ( i = tot = 0; i < array_sz(rule_hdls); i++ ) + { + IPADBG("Trying %d ipa_nat_add_ipv4_rule()\n", i); + + memset(&ipv4_rule, 0, sizeof(ipv4_rule)); + + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdls[i]); + CHECK_ERR_TBL_ACTION(ret, tbl_hdl, break); + + IPADBG("Success %d ipa_nat_add_ipv4_rule() -> rule_hdl(0x%08X)\n", + i, rule_hdls[i]); + + ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats); + CHECK_ERR_TBL_ACTION(ret, tbl_hdl, break); + + /* + * Are we in hybrid mode and have we switched memory type? + * Check for it and print the appropriate stats. + */ + if ( nstats.nmi != last_nstats.nmi ) + { + mem_type = ipa3_nat_mem_in_as_str(last_nstats.nmi); + + switched = true; + + /* + * NAT table stats... + */ + IPAINFO("Able to add (%u) records to %s " + "NAT table of size (%u) or (%f) percent\n", + tot, + mem_type, + last_nstats.tot_ents, + ((float) tot / (float) last_nstats.tot_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "NAT BASE table of size (%u) or (%f) percent\n", + last_nstats.tot_base_ents_filled, + mem_type, + last_nstats.tot_base_ents, + ((float) last_nstats.tot_base_ents_filled / + (float) last_nstats.tot_base_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "NAT EXPN table of size (%u) or (%f) percent\n", + last_nstats.tot_expn_ents_filled, + mem_type, + last_nstats.tot_expn_ents, + ((float) last_nstats.tot_expn_ents_filled / + (float) last_nstats.tot_expn_ents) * 100.0); + + IPAINFO("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + last_nstats.tot_chains, + last_nstats.min_chain_len, + last_nstats.max_chain_len, + last_nstats.avg_chain_len); + + /* + * INDEX table stats... + */ + IPAINFO("Able to add (%u) records to %s " + "IDX table of size (%u) or (%f) percent\n", + tot, + mem_type, + last_istats.tot_ents, + ((float) tot / (float) last_istats.tot_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "IDX BASE table of size (%u) or (%f) percent\n", + last_istats.tot_base_ents_filled, + mem_type, + last_istats.tot_base_ents, + ((float) last_istats.tot_base_ents_filled / + (float) last_istats.tot_base_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "IDX EXPN table of size (%u) or (%f) percent\n", + last_istats.tot_expn_ents_filled, + mem_type, + last_istats.tot_expn_ents, + ((float) last_istats.tot_expn_ents_filled / + (float) last_istats.tot_expn_ents) * 100.0); + + IPAINFO("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + last_istats.tot_chains, + last_istats.min_chain_len, + last_istats.max_chain_len, + last_istats.avg_chain_len); + } + + last_nstats = nstats; + last_istats = istats; + + if ( switched ) + { + switched = false; + + IPAINFO("Continuing rule adds to %s table of size: (%u)\n", + ipa3_nat_mem_in_as_str(nstats.nmi), + nstats.tot_ents); + } + + tot++; + } + + ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + mem_type = ipa3_nat_mem_in_as_str(nstats.nmi); + + /* + * NAT table stats... + */ + IPAINFO("Able to add (%u) records to %s " + "NAT table of size (%u) or (%f) percent\n", + tot, + mem_type, + nstats.tot_ents, + ((float) tot / (float) nstats.tot_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "NAT BASE table of size (%u) or (%f) percent\n", + nstats.tot_base_ents_filled, + mem_type, + nstats.tot_base_ents, + ((float) nstats.tot_base_ents_filled / + (float) nstats.tot_base_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "NAT EXPN table of size (%u) or (%f) percent\n", + nstats.tot_expn_ents_filled, + mem_type, + nstats.tot_expn_ents, + ((float) nstats.tot_expn_ents_filled / + (float) nstats.tot_expn_ents) * 100.0); + + IPAINFO("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + nstats.tot_chains, + nstats.min_chain_len, + nstats.max_chain_len, + nstats.avg_chain_len); + + /* + * INDEX table stats... + */ + IPAINFO("Able to add (%u) records to %s " + "IDX table of size (%u) or (%f) percent\n", + tot, + mem_type, + istats.tot_ents, + ((float) tot / (float) istats.tot_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "IDX BASE table of size (%u) or (%f) percent\n", + istats.tot_base_ents_filled, + mem_type, + istats.tot_base_ents, + ((float) istats.tot_base_ents_filled / + (float) istats.tot_base_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "IDX EXPN table of size (%u) or (%f) percent\n", + istats.tot_expn_ents_filled, + mem_type, + istats.tot_expn_ents, + ((float) istats.tot_expn_ents_filled / + (float) istats.tot_expn_ents) * 100.0); + + IPAINFO("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + istats.tot_chains, + istats.min_chain_len, + istats.max_chain_len, + istats.avg_chain_len); + + IPAINFO("Deleting all rules\n"); + + for ( i = 0; i < tot; i++ ) + { + IPADBG("Trying %d ipa_nat_del_ipv4_rule(0x%08X)\n", + i, rule_hdls[i]); + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdls[i]); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + IPADBG("Success ipa_nat_del_ipv4_rule(%d)\n", i); + } + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test023.c b/ipanat/test/ipa_nat_test023.c new file mode 100644 index 0000000..501b223 --- /dev/null +++ b/ipanat/test/ipa_nat_test023.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test023.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. Add ipv rule three times to cause collisions and linking + 3. Delete rules in a particular order and observe list for expected + form + 4. Run 2 and 3 over and over until all delete cominations have been + run + 5. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test023( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + + ipa_nat_ipv4_rule ipv4_rule = {0}; + + u32 rule_hdl1; + u32 rule_hdl2; + u32 rule_hdl3; + + u32* rule_del_combos[6][3] = { + { &rule_hdl1, &rule_hdl2, &rule_hdl3 }, + { &rule_hdl1, &rule_hdl3, &rule_hdl2 }, + + { &rule_hdl2, &rule_hdl1, &rule_hdl3 }, + { &rule_hdl2, &rule_hdl3, &rule_hdl1 }, + + { &rule_hdl3, &rule_hdl1, &rule_hdl2 }, + { &rule_hdl3, &rule_hdl2, &rule_hdl1 }, + }; + + int i, j, ret; + + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + for ( i = 0; i < 6; i++ ) + { + IPADBG("Adding rule 1\n"); + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + IPADBG("Adding rule 2\n"); + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + IPADBG("Adding rule 3\n"); + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ipa_nat_dump_ipv4_table(tbl_hdl); + + for ( j = 0; j < 3; j++ ) + { + u32* rh_ptr = rule_del_combos[i][j]; + + IPADBG("Deleting rule %u\n", + ( rh_ptr == &rule_hdl1 ) ? 1 : + ( rh_ptr == &rule_hdl2 ) ? 2 : 3); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, *rh_ptr); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ipa_nat_dump_ipv4_table(tbl_hdl); + } + } + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test024.c b/ipanat/test/ipa_nat_test024.c new file mode 100644 index 0000000..216fc39 --- /dev/null +++ b/ipanat/test/ipa_nat_test024.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test024.c + + @brief + Verify the following scenario: + 1. Trigger thousands of table memory switches + +*/ +/*===========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test024( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + + int i, ret; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + for ( i = 0; i < 1000; i++ ) + { + ret = ipa_nat_test022( + nat_mem_type, pub_ip_add, total_entries, tbl_hdl, !sep, arb_data_ptr); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test025.c b/ipanat/test/ipa_nat_test025.c new file mode 100644 index 0000000..d1c1b9d --- /dev/null +++ b/ipanat/test/ipa_nat_test025.c @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test025.c + + @brief + Note: Verify the following scenario: + 1. Similare to test022, but with random deletes during adds +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +#undef VALID_RULE +#define VALID_RULE(r) ((r) != 0 && (r) != 0xFFFFFFFF) + +#undef GET_MAX +#define GET_MAX(ram, rdm) \ + do { \ + while ( (ram = rand() % 20) < 4); \ + while ( (rdm = rand() % 10) >= ram || rdm == 0 ); \ + IPADBG("rand_adds_max(%u) rand_dels_max(%u)\n", ram, rdm); \ + } while (0) + +int ipa_nat_test025( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + + ipa_nat_ipv4_rule ipv4_rule; + u32 rule_hdls[1024]; + + ipa_nati_tbl_stats nstats, last_nstats; + ipa_nati_tbl_stats istats, last_istats; + + u32 i; + u32 rand_adds_max, rand_dels_max; + u32 tot, tot_added, tot_deleted; + + bool switched = false; + + const char* mem_type; + + int ret; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + ret = ipa_nati_clear_ipv4_tbl(tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + IPAINFO("Attempting rule adds to %s table of size: (%u)\n", + ipa3_nat_mem_in_as_str(nstats.nmi), + nstats.tot_ents); + + last_nstats = nstats; + last_istats = istats; + + memset(rule_hdls, 0, sizeof(rule_hdls)); + + GET_MAX(rand_adds_max, rand_dels_max); + + tot = tot_added = tot_deleted = 0; + + for ( i = 0; i < array_sz(rule_hdls); i++ ) + { + IPADBG("Trying %u ipa_nat_add_ipv4_rule()\n", i); + + memset(&ipv4_rule, 0, sizeof(ipv4_rule)); + + ipv4_rule.protocol = IPPROTO_TCP; + ipv4_rule.public_port = RAN_PORT; + ipv4_rule.target_ip = RAN_ADDR; + ipv4_rule.target_port = RAN_PORT; + ipv4_rule.private_ip = RAN_ADDR; + ipv4_rule.private_port = RAN_PORT; + + ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdls[i]); + CHECK_ERR_TBL_ACTION(ret, tbl_hdl, break); + + IPADBG("Success %u ipa_nat_add_ipv4_rule() -> rule_hdl(0x%08X)\n", + i, rule_hdls[i]); + + ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats); + CHECK_ERR_TBL_ACTION(ret, tbl_hdl, break); + + /* + * Are we in hybrid mode and have we switched memory type? + * Check for it and print the appropriate stats. + */ + if ( nstats.nmi != last_nstats.nmi ) + { + mem_type = ipa3_nat_mem_in_as_str(last_nstats.nmi); + + switched = true; + + /* + * NAT table stats... + */ + IPAINFO("Able to add (%u) records to %s " + "NAT table of size (%u) or (%f) percent\n", + tot, + mem_type, + last_nstats.tot_ents, + ((float) tot / (float) last_nstats.tot_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "NAT BASE table of size (%u) or (%f) percent\n", + last_nstats.tot_base_ents_filled, + mem_type, + last_nstats.tot_base_ents, + ((float) last_nstats.tot_base_ents_filled / + (float) last_nstats.tot_base_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "NAT EXPN table of size (%u) or (%f) percent\n", + last_nstats.tot_expn_ents_filled, + mem_type, + last_nstats.tot_expn_ents, + ((float) last_nstats.tot_expn_ents_filled / + (float) last_nstats.tot_expn_ents) * 100.0); + + IPAINFO("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + last_nstats.tot_chains, + last_nstats.min_chain_len, + last_nstats.max_chain_len, + last_nstats.avg_chain_len); + + /* + * INDEX table stats... + */ + IPAINFO("Able to add (%u) records to %s " + "IDX table of size (%u) or (%f) percent\n", + tot, + mem_type, + last_istats.tot_ents, + ((float) tot / (float) last_istats.tot_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "IDX BASE table of size (%u) or (%f) percent\n", + last_istats.tot_base_ents_filled, + mem_type, + last_istats.tot_base_ents, + ((float) last_istats.tot_base_ents_filled / + (float) last_istats.tot_base_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "IDX EXPN table of size (%u) or (%f) percent\n", + last_istats.tot_expn_ents_filled, + mem_type, + last_istats.tot_expn_ents, + ((float) last_istats.tot_expn_ents_filled / + (float) last_istats.tot_expn_ents) * 100.0); + + IPAINFO("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + last_istats.tot_chains, + last_istats.min_chain_len, + last_istats.max_chain_len, + last_istats.avg_chain_len); + } + + last_nstats = nstats; + last_istats = istats; + + tot++; + + if ( ++tot_added == rand_adds_max ) + { + u32 j, k; + u32* hdl_ptr[tot]; + + for ( j = k = 0; j < array_sz(rule_hdls); j++ ) + { + if ( VALID_RULE(rule_hdls[j]) ) + { + hdl_ptr[k] = &(rule_hdls[j]); + + if ( ++k == tot ) + { + break; + } + } + } + + IPADBG("About to delete %u rules\n", rand_dels_max); + + while ( k ) + { + while ( j = rand() % k, ! VALID_RULE(*(hdl_ptr[j])) ); + + IPADBG("Trying ipa_nat_del_ipv4_rule(0x%08X)\n", + *(hdl_ptr[j])); + + ret = ipa_nat_del_ipv4_rule(tbl_hdl, *(hdl_ptr[j])); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + IPADBG("Success ipa_nat_del_ipv4_rule(0x%08X)\n", *(hdl_ptr[j])); + + *(hdl_ptr[j]) = 0xFFFFFFFF; + + --tot; + + if ( ++tot_deleted == rand_dels_max ) + { + break; + } + } + + GET_MAX(rand_adds_max, rand_dels_max); + + tot_added = tot_deleted = 0; + } + + if ( switched ) + { + switched = false; + + IPAINFO("Continuing rule adds to %s table of size: (%u)\n", + ipa3_nat_mem_in_as_str(nstats.nmi), + nstats.tot_ents); + } + } + + ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + mem_type = ipa3_nat_mem_in_as_str(nstats.nmi); + + /* + * NAT table stats... + */ + IPAINFO("Able to add (%u) records to %s " + "NAT table of size (%u) or (%f) percent\n", + tot, + mem_type, + nstats.tot_ents, + ((float) tot / (float) nstats.tot_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "NAT BASE table of size (%u) or (%f) percent\n", + nstats.tot_base_ents_filled, + mem_type, + nstats.tot_base_ents, + ((float) nstats.tot_base_ents_filled / + (float) nstats.tot_base_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "NAT EXPN table of size (%u) or (%f) percent\n", + nstats.tot_expn_ents_filled, + mem_type, + nstats.tot_expn_ents, + ((float) nstats.tot_expn_ents_filled / + (float) nstats.tot_expn_ents) * 100.0); + + IPAINFO("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + nstats.tot_chains, + nstats.min_chain_len, + nstats.max_chain_len, + nstats.avg_chain_len); + + /* + * INDEX table stats... + */ + IPAINFO("Able to add (%u) records to %s " + "IDX table of size (%u) or (%f) percent\n", + tot, + mem_type, + istats.tot_ents, + ((float) tot / (float) istats.tot_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "IDX BASE table of size (%u) or (%f) percent\n", + istats.tot_base_ents_filled, + mem_type, + istats.tot_base_ents, + ((float) istats.tot_base_ents_filled / + (float) istats.tot_base_ents) * 100.0); + + IPAINFO("Able to add (%u) records to %s " + "IDX EXPN table of size (%u) or (%f) percent\n", + istats.tot_expn_ents_filled, + mem_type, + istats.tot_expn_ents, + ((float) istats.tot_expn_ents_filled / + (float) istats.tot_expn_ents) * 100.0); + + IPAINFO("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n", + mem_type, + istats.tot_chains, + istats.min_chain_len, + istats.max_chain_len, + istats.avg_chain_len); + + IPAINFO("Deleting remaining rules\n"); + + for ( i = 0; i < array_sz(rule_hdls); i++ ) + { + if ( VALID_RULE(rule_hdls[i]) ) + { + IPADBG("Trying ipa_nat_del_ipv4_rule(0x%08X)\n", + rule_hdls[i]); + ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdls[i]); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + IPADBG("Success ipa_nat_del_ipv4_rule(%u)\n", rule_hdls[i]); + } + } + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_test999.c b/ipanat/test/ipa_nat_test999.c new file mode 100644 index 0000000..f82ef18 --- /dev/null +++ b/ipanat/test/ipa_nat_test999.c @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_test999.c + + @brief + Verify the following scenario: + 1. Delete ipv4 table +*/ +/*===========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_test999( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + int ret; + + IPADBG("In\n"); + + if ( ! sep ) + { + IPADBG("calling ipa_nat_del_ipv4_tbl()\n"); + + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + + *tbl_hdl_ptr = 0; + + CHECK_ERR(ret); + + IPADBG("deleted ipv4 nat table successfully.\n"); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_testMODEL.c b/ipanat/test/ipa_nat_testMODEL.c new file mode 100644 index 0000000..0f99159 --- /dev/null +++ b/ipanat/test/ipa_nat_testMODEL.c @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_testXXX.c + + @brief + Verify the following scenario: + +*/ +/*===========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_testXXX( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* tbl_hdl_ptr = (int*) arb_data_ptr; + + int ret; + + IPADBG("In\n"); + + if ( sep ) + { + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + } + + if ( sep ) + { + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + *tbl_hdl_ptr = 0; + CHECK_ERR(ret); + } + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/ipa_nat_testREG.c b/ipanat/test/ipa_nat_testREG.c new file mode 100644 index 0000000..10e29cf --- /dev/null +++ b/ipanat/test/ipa_nat_testREG.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*=========================================================================*/ +/*! + @file + ipa_nat_testREG.c + + @brief + Verify the following scenario: + 1. Add ipv4 table + 2. Delete ipv4 table +*/ +/*=========================================================================*/ + +#include "ipa_nat_test.h" + +int ipa_nat_testREG( + const char* nat_mem_type, + u32 pub_ip_add, + int total_entries, + u32 tbl_hdl, + int sep, + void* arb_data_ptr) +{ + int* ireg_ptr = (int*) arb_data_ptr; + + int i, ret; + + IPADBG("In\n"); + + for ( i = 0; i < *ireg_ptr; i++ ) + { + IPADBG("Executing iteration %d\n", i+1); + + IPADBG("Calling ipa_nat_add_ipv4_tbl()\n"); + + ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl); + + CHECK_ERR_TBL_STOP(ret, tbl_hdl); + + IPADBG("Iteration %d creation of nat ipv4 table successful\n", i+1); + + IPADBG("Calling ipa_nat_del_ipv4_tbl()\n"); + + ret = ipa_nat_del_ipv4_tbl(tbl_hdl); + + CHECK_ERR(ret); + + IPADBG("Iteration %d deletion of ipv4 nat table successful\n", i+1); + } + + IPADBG("Executed %d iterations:\n", i); + + IPADBG("Out\n"); + + return 0; +} diff --git a/ipanat/test/main.c b/ipanat/test/main.c new file mode 100644 index 0000000..16d46fd --- /dev/null +++ b/ipanat/test/main.c @@ -0,0 +1,507 @@ +/* + * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <stdio.h> +#include <stdint.h> +#include <stdbool.h> +#include <stdlib.h> +#include <unistd.h> +#include <libgen.h> +#include <string.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <errno.h> + +#include "ipa_nat_test.h" +#include "ipa_nat_map.h" + +#undef strcasesame +#define strcasesame(x, y) \ + (! strcasecmp((x), (y))) + +static inline const char* legal_mem_type( + const char* mt ) +{ + if ( strcasesame(mt, "DDR") ) return "DDR"; + if ( strcasesame(mt, "SRAM") ) return "SRAM"; + if ( strcasesame(mt, "HYBRID") ) return "HYBRID"; + return NULL; +} + +static int nat_rule_loop_check( + ipa_table* table_ptr, + uint32_t rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ) +{ + enum ipa3_nat_mem_in nmi; + uint8_t is_expn_tbl; + uint16_t rule_index; + uint32_t tbl_hdl = (uint32_t) arb_data_ptr; + + struct ipa_nat_rule* rule_ptr = + (struct ipa_nat_rule*) record_ptr; + + BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index); + + /* + * By virtue of this function being called back by the walk, this + * record_index is valid. Denote it as such in the map... + */ + if ( ipa_nat_map_add(MAP_NUM_99, record_index, 1) ) + { + IPAERR("ipa_nat_map_add(index(%u)) failed\n", record_index); + return -EINVAL; + } + + if ( rule_ptr->next_index == record_index ) + { + IPAERR("Infinite loop detected in IPv4 %s table, entry %u\n", + (is_expn_tbl) ? "expansion" : "base", + record_index); + + ipa_nat_dump_ipv4_table(tbl_hdl); + + return -EINVAL; + } + + return 0; +} + +static int nat_rule_validity_check( + ipa_table* table_ptr, + uint32_t rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ) +{ + enum ipa3_nat_mem_in nmi; + uint8_t is_expn_tbl; + uint16_t rule_index; + uint16_t index; + + struct ipa_nat_rule* rule_ptr = + (struct ipa_nat_rule*) record_ptr; + + BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index); + + index = rule_ptr->next_index; + + if ( index && ipa_nat_map_find(MAP_NUM_99, index, NULL) ) + { + IPAERR("Invalid next index %u found in IPv4 %s table entry %u\n", + index, + (is_expn_tbl) ? "expansion" : "base", + rule_index); + + return -EINVAL; + } + + if ( is_expn_tbl ) + { + index = rule_ptr->prev_index; + + if ( index && ipa_nat_map_find(MAP_NUM_99, index, NULL) ) + { + IPAERR("Invalid previous index %u found in IPv4 %s table entry %u\n", + index, + "expansion", + rule_index); + + return -EINVAL; + } + } + + return 0; +} + +static int index_loop_check( + ipa_table* table_ptr, + uint32_t rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ) +{ + enum ipa3_nat_mem_in nmi; + uint8_t is_expn_tbl; + uint16_t rule_index; + uint32_t tbl_hdl = (uint32_t) arb_data_ptr; + + struct ipa_nat_indx_tbl_rule* itr_ptr = + (struct ipa_nat_indx_tbl_rule*) record_ptr; + + BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index); + + /* + * By virtue of this function being called back by the walk, this + * record_index is valid. Denote it as such in the map... + */ + if ( ipa_nat_map_add(MAP_NUM_99, record_index, 1) ) + { + IPAERR("ipa_nat_map_add(index(%u)) failed\n", record_index); + return -EINVAL; + } + + if ( itr_ptr->next_index == record_index ) + { + IPAERR("Infinite loop detected in IPv4 index %s table, entry %u\n", + (is_expn_tbl) ? "expansion" : "base", + record_index); + + ipa_nat_dump_ipv4_table(tbl_hdl); + + return -EINVAL; + } + + return 0; +} + +static int index_validity_check( + ipa_table* table_ptr, + uint32_t rule_hdl, + void* record_ptr, + uint16_t record_index, + void* meta_record_ptr, + uint16_t meta_record_index, + void* arb_data_ptr ) +{ + enum ipa3_nat_mem_in nmi; + uint8_t is_expn_tbl; + uint16_t rule_index; + uint16_t index; + + struct ipa_nat_indx_tbl_rule* itr_ptr = + (struct ipa_nat_indx_tbl_rule*) record_ptr; + + BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index); + + index = itr_ptr->next_index; + + if ( index && ipa_nat_map_find(MAP_NUM_99, index, NULL) ) + { + IPAERR("Invalid next index %u found in IPv4 index %s table entry %u\n", + index, + (is_expn_tbl) ? "expansion" : "base", + rule_index); + + return -EINVAL; + } + + if ( is_expn_tbl ) + { + struct ipa_nat_indx_tbl_meta_info* mi_ptr = meta_record_ptr; + + if ( ! mi_ptr ) + { + IPAERR("Missing meta pointer for IPv4 index %s table entry %u\n", + "expansion", + rule_index); + + return -EINVAL; + } + + index = mi_ptr->prev_index; + + if ( index && ipa_nat_map_find(MAP_NUM_99, index, NULL) ) + { + IPAERR("Invalid previous index %u found in IPv4 index %s table entry %u\n", + index, + "expansion", + rule_index); + + return -EINVAL; + } + } + + return 0; +} + +int ipa_nat_validate_ipv4_table( + u32 tbl_hdl ) +{ + int ret; + + /* + * Map MAP_NUM_99 will be used to keep, and to check for, + * record validity. + * + * The first walk will fill it. The second walk will use it... + */ + ipa_nat_map_clear(MAP_NUM_99); + + IPADBG("Checking IPv4 active rules:\n"); + + ret = ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_NAT_TABLE, nat_rule_loop_check, tbl_hdl); + + if ( ret != 0 ) + { + return ret; + } + + ret = ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_NAT_TABLE, nat_rule_validity_check, 0); + + if ( ret != 0 ) + { + return ret; + } + + /* + * Map MAP_NUM_99 will be used to keep, and to check for, + * record validity. + * + * The first walk will fill it. The second walk will use it... + */ + ipa_nat_map_clear(MAP_NUM_99); + + IPADBG("Checking IPv4 index active rules:\n"); + + ret = ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_INDEX_TABLE, index_loop_check, tbl_hdl); + + if ( ret != 0 ) + { + return ret; + } + + ret = ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_INDEX_TABLE, index_validity_check, 0); + + if ( ret != 0 ) + { + return ret; + } + + return 0; +} + +static void +_dispUsage( + const char* progNamePtr ) +{ + printf( + "Usage: %s [-d -r N -i N -e N -m mt]\n" + "Where:\n" + " -d Each test is discrete (create table, add rules, destroy table)\n" + " If not specified, only one table create and destroy for all tests\n" + " -r N Where N is the number of times to run the inotify regression test\n" + " -i N Where N is the number of times (iterations) to run test\n" + " -e N Where N is the number of entries in the NAT\n" + " -m mt Where mt is the type of memory to use for the NAT\n" + " Legal mt's: DDR, SRAM, or HYBRID (ie. use SRAM and DDR)\n" + " -g M-N Run tests M through N only\n", + progNamePtr); + + fflush(stdout); +} + +static NatTests nt_array[] = { + NAT_TEST_ENTRY(ipa_nat_test000, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test001, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test002, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test003, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test004, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test005, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test006, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test007, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test008, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test009, 1, 0), + NAT_TEST_ENTRY(ipa_nat_test010, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test011, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test012, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test013, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test014, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test015, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test016, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test017, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test018, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test019, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test020, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test021, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test022, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test023, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test024, IPA_NAT_TEST_PRE_COND_TE, 0), + NAT_TEST_ENTRY(ipa_nat_test025, IPA_NAT_TEST_PRE_COND_TE, 0), + /* + * Add new tests just above this comment. Keep the following two + * at the end... + */ + NAT_TEST_ENTRY(ipa_nat_test999, 1, 0), + NAT_TEST_ENTRY(ipa_nat_testREG, 1, 0), +}; + +int main( + int argc, + char* argv[] ) +{ + int sep = 0; + int ireg = 0; + uint32_t nt = 1; + int total_ents = 100; + uint32_t ht = 0; + uint32_t start = 0, end = 0; + + char* nat_mem_type = "DDR"; + + uint32_t tbl_hdl = 0; + + uint32_t pub_ip_addr; + + uint32_t i, ub, cnt, exec, pass; + + void* adp; + + time_t t; + + int c, ret; + + IPADBG("Testing user space nat driver\n"); + + while ( (c = getopt(argc, argv, "dr:i:e:m:h:g:?")) != -1 ) + { + switch (c) + { + case 'd': + sep = 1; + break; + case 'r': + ireg = atoi(optarg); + break; + case 'i': + nt = atoi(optarg); + break; + case 'e': + total_ents = atoi(optarg); + break; + case 'm': + if ( ! (nat_mem_type = legal_mem_type(optarg)) ) + { + fprintf(stderr, "Illegal: -m %s\n", optarg); + _dispUsage(basename(argv[0])); + exit(0); + } + break; + case 'h': + ht = atoi(optarg); + break; + case 'g': + if ( sscanf(optarg, "%u-%u", &start, &end) != 2 + || + ( start >= end || end >= array_sz(nt_array) - 1 ) ) + { + fprintf(stderr, "Illegal: -f %s\n", optarg); + _dispUsage(basename(argv[0])); + exit(0); + } + break; + case '?': + default: + _dispUsage(basename(argv[0])); + exit(0); + break; + } + } + + srand(time(&t)); + + pub_ip_addr = RAN_ADDR; + + exec = pass = 0; + + for ( cnt = ret = 0; cnt < nt && ret == 0; cnt++ ) + { + IPADBG("ITERATION [%u] OF TESING\n", cnt + 1); + + if ( ireg ) + { + adp = &ireg; + i = array_sz(nt_array) - 1; + ub = array_sz(nt_array); + } + else + { + adp = &tbl_hdl; + i = ( end ) ? start : 0; + ub = ( end ) ? end : array_sz(nt_array) - 1; + + if ( i != 0 && ! sep ) + { + ipa_nat_test000( + nat_mem_type, pub_ip_addr, total_ents, tbl_hdl, 0, adp); + } + } + + for ( ; i < ub && ret == 0; i++ ) + { + if ( total_ents >= nt_array[i].num_ents_trigger ) + { + IPADBG("+------------------------------------------------+\n"); + IPADBG("| Executing test: %s |\n", nt_array[i].func_name); + IPADBG("+------------------------------------------------+\n"); + + ret = nt_array[i].func( + nat_mem_type, pub_ip_addr, total_ents, tbl_hdl, sep, adp); + + exec++; + + if ( ret == 0 ) + { + IPADBG("<<<<< Test %s SUCCEEDED >>>>>\n", nt_array[i].func_name); + + pass++; + + if ( ht || nt_array[i].test_hold_time_in_secs ) + { + ht = (ht) ? ht : nt_array[i].test_hold_time_in_secs; + + sleep(ht); + } + } + else + { + IPAERR("<<<<< Test %s FAILED >>>>>\n", nt_array[i].func_name); + } + } + } + } + + if ( ret && tbl_hdl ) + { + ipa_nat_test999( + nat_mem_type, pub_ip_addr, total_ents, tbl_hdl, 0, &tbl_hdl); + } + + IPADBG("Total NAT Tests Run:%u, Pass:%u, Fail:%u\n", + exec, pass, exec - pass); + + return 0; +} |