Header Shadow Image


HAproxy Configuration to AD Active Directory Servers

Quick configuration to proxy AD requests through another server. Set the following on the proxy server. Note the frontend server is on port 443 whereas the backend server is on port 389:

# vi /etc/haproxy18/haproxy.cfg
global
    log         127.0.0.1 local0 debug
    stats       socket /var/run/haproxy.sock mode 0600 level admin
    # stats     socket /var/lib/haproxy/stats
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    debug

defaults
    mode                    tcp
    log                     global
    option                  dontlognull
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

frontend ldap-ad-front
    log                         127.0.0.1       local0          debug
    bind                        terra01:443
    mode                        tcp
    option                      tcplog
    default_backend             ldap-ad-back


backend ldap-ad-back
    log        /dev/log local0 debug
    mode       tcp
    balance    roundrobin
    default-server inter 3s rise 2
    server     mds.xyz mds.xyz:389 maxconn 1024 check

    option                tcpka
    timeout server        20s
    timeout connect       10s

    # Below, ldap check procedure :
    option tcp-check
    tcp-check connect port 389
    tcp-check send-binary 300c0201                                      # LDAP bind request "" simple
    tcp-check send-binary 01                                            # message ID
    tcp-check send-binary 6007                                          # protocol Op
    tcp-check send-binary 0201                                          # bind request
    tcp-check send-binary 03                                            # LDAP v3
    tcp-check send-binary 04008000                                      # name, simple authentication
    tcp-check expect binary 0a0100                                      # bind response + result code: success
    tcp-check send-binary 30050201034200                                # unbind request

listen stats
    bind *:9000
    mode http
    stats enable
    stats hide-version
    stats realm Haproxy\ Statistics
    stats uri /haproxy-stats
    stats auth admin:s0meP@ssw0rd

Firewall zone file:

# cat /etc/firewalld/zones/public.xml
<?xml version="1.0" encoding="utf-8"?>
<zone>
<short>Public</short>
<description>For use in public areas. You do not trust the other computers on networks to not harm your computer. Only selected incoming connections are accepted.</description>
<service name="ssh"/>
<service name="dhcpv6-client"/>
<port protocol="tcp" port="22"/>
<port protocol="udp" port="22"/>
<port protocol="udp" port="443"/>
<port protocol="tcp" port="443"/>
<port protocol="udp" port="636"/>
<port protocol="tcp" port="636"/>
<port protocol="udp" port="3269"/>
<port protocol="tcp" port="3269"/>
<port protocol="udp" port="3268"/>
<port protocol="tcp" port="3268"/>
<port protocol="udp" port="389"/>
<port protocol="tcp" port="389"/>
<port protocol="udp" port="9000"/>
<port protocol="tcp" port="9000"/>
<port protocol="udp" port="137"/>
<port protocol="udp" port="138"/>
<port protocol="udp" port="2049"/>
</zone>

Test with the following from another host:

ldapsearch -W -b "DC=MDS,DC=XYZ" -h 10.0.0.100:443 -S sAMAccountName -D resu@mds.xyz '(&(objectClass=user)(sAMAccountName=*resu*))'

Command reference:

yum install haproxy18.x86_64
yum install openldap-clients
systemctl restart haproxy18
systemctl status haproxy18 -l
vi /etc/haproxy18/haproxy.cfg
vi /etc/firewalld/zones/public.xml
setsebool -P haproxy_connect_any=1
netstat -pnltu|grep -Ei haproxy

Cheers,

klist: Improper format of Kerberos configuration file while initializing krb5

There were extra spaces or space with a different font, in front of “” that got copied over from an editor.  Once replaced, everything worked fine:

root@g73sw01:/etc# klist -kte
klist: Improper format of Kerberos configuration file while initializing krb5
root@g73sw01:/etc# vi /etc/krb5.conf
root@g73sw01:/etc# klist -kte
Keytab name: FILE:/etc/krb5.keytab
KVNO Timestamp Principal
—- ——————- ——————————————————
6 11/19/2023 22:34:44 host/g73sw01.nix.mds.xyz@NIX.MDS.XYZ (aes256-cts-hmac-sha1-96)
6 11/19/2023 22:34:44 host/g73sw01.nix.mds.xyz@NIX.MDS.XYZ (aes128-cts-hmac-sha1-96)
6 11/19/2023 22:34:44 host/g73sw01.nix.mds.xyz@NIX.MDS.XYZ (aes256-cts-hmac-sha384-192)
6 11/19/2023 22:34:44 host/g73sw01.nix.mds.xyz@NIX.MDS.XYZ (aes128-cts-hmac-sha256-128)
6 11/19/2023 22:34:44 host/g73sw01.nix.mds.xyz@NIX.MDS.XYZ (DEPRECATED:des3-cbc-sha1)
6 11/19/2023 22:34:44 host/g73sw01.nix.mds.xyz@NIX.MDS.XYZ (DEPRECATED:arcfour-hmac)
root@g73sw01:/etc# cat /etc/krb5.conf
.
.
.
.mds.xyz = MDS.XYZ
mds.xyz = MDS.XYZ
root@g73sw01:/etc#

[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failename mismatch, certificate is not valid for ‘idmipa01.nix.mds.xyz’

When joining a new client to the FreeIPA servers:

# ipa-client-install –uninstall; ipa-client-install –force-join -p USER -w “SECRET” –fixed-primarver=idmipa01.nix.mds.xyz –server=idmipa02.nix.mds.xyz –domain=nix.mds.xyz –realm=NIX.MDS.XYZ -U

the following  message is visible:

Connection to https://idmipa01.nix.mds.xyz/ipa/json failed with [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failename mismatch, certificate is not valid for ‘idmipa01.nix.mds.xyz’. (_ssl.c:1007)
Connection to https://idmipa02.nix.mds.xyz/ipa/json failed with [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failename mismatch, certificate is not valid for ‘idmipa02.nix.mds.xyz’. (_ssl.c:1007)

On the surface this message doesn’t make much sense.  The certificate definitely matches the hostname:

openssl s_client -connect idmipa01.nix.mds.xyz:443

save certificate to a file by copying it out from the output of above command, then issue:


# openssl x509 -in freeipa.pem -text -noout </dev/null
Certificate:
    Data:
        Version: 3 (0x2)
        Serial Number: 43 (0x2b)
        Signature Algorithm: sha256WithRSAEncryption
        Issuer: O = NIX.MDS.XYZ, CN = Certificate Authority
        Validity
            Not Before: Sep 26 05:16:38 2022 GMT
            Not After : Sep 26 05:16:38 2024 GMT
        Subject: O = NIX.MDS.XYZ, CN = idmipa01.nix.mds.xyz

However, on closer inspection, there is no SAN entry:


            X509v3 Subject Alternative Name:
                othername: UPN::HTTP/idmipa01.nix.mds.xyz@NIX.MDS.XYZ, othername: 1.3.6.1.5.2.2::

Do add a SAN entry, issue the following on each IPA server, including the replicas you may have to add in a SAN certificate entry:

idmipa01: getcert list -d “/etc/httpd/alias” -n “Server-Cert”
idmipa01: getcert resubmit -i FROM_ABOVE_COMMAND -D $(hostname)

idmipa02: getcert list -d “/etc/httpd/alias” -n “Server-Cert”
idmipa02: getcert resubmit -i FROM_ABOVE_COMMAND -D $(hostname)

Verify again with openssl commands, from the client that the returned FreeIPA certificates now have a SAN entry:


idmipa01:
            X509v3 Subject Alternative Name:
                DNS:idmipa01.nix.mds.xyz, othername: UPN::HTTP/idmipa01.nix.mds.xyz@NIX.MDS.XYZ, othername: 1.3.6.1.5.2.2::

idmipa02:
            X509v3 Subject Alternative Name:
                DNS:idmipa02.nix.mds.xyz, othername: UPN::HTTP/idmipa02.nix.mds.xyz@NIX.MDS.XYZ, othername: 1.3.6.1.5.2.2::

Hope this helps!

Cheers,

Ping resolves internal DNS entries with external IP’s. Nslookup works fine.

Ping resolves internal DNS entries with external IP’s:

C:\Windows\system32>ping atlas-c01

Pinging atlas-c01.nix.mds.xyz [3.64.163.50] with 32 bytes of data:
Control-C
^C
C:\Windows\system32>ping atlas-c01

Pinging atlas-c01.nix.mds.xyz [3.64.163.50] with 32 bytes of data:
Control-C
^C
C:\Windows\system32>

Nslookup works great:

C:\Windows\system32>nslookup atlas-c01.nix.mds.xyz
Server: dns.mds.xyz
Address: 192.168.0.224

Non-authoritative answer:
Name: atlas-c01.nix.mds.xyz
Address: 10.0.0.77

C:\Windows\system32>

Even from a locally installed Ubuntu server, resolution returns external IP addresses:

root@g73sw01:~# ping atlas-c01
PING atlas-c01.mds.xyz (3.64.163.50) 56(84) bytes of data.
From _gateway (192.168.0.1) icmp_seq=1 Redirect Network(New nexthop: _gateway (192.168.0.6))

The Linux box is surprising since normally, it should get the IP and DNS from the netplan:


root@g73sw01:~# cat /etc/netplan/01-network-manager-all.yaml
# Let NetworkManager manage all devices on this system
network:
 version: 2
 renderer: NetworkManager
 ethernets:
  enp5s0:
   dhcp4: no
   addresses: [192.168.0.15/24]
   gateway4: 192.168.0.1
   nameservers:
    addresses: [192.168.0.224,192.168.0.46,192.168.0.51]
root@g73sw01:~#
Still, /etc/resolv.conf has the localhost IP as the nameserver, regardless what netplan has:
root@g73sw01:~# grep -v "#" /etc/resolv.conf
nameserver 127.0.0.53
options edns0 trust-ad
search mds.xyz
root@g73sw01:~#

And there is a DNS server running on the Ubuntu new install:


root@g73sw01:~# netstat -pnltu|grep -Ei 53
tcp        0      0 127.0.0.53:53           0.0.0.0:*               LISTEN      1750/systemd-resolv
udp        0      0 0.0.0.0:5353            0.0.0.0:*                           1801/avahi-daemon:
udp        0      0 127.0.0.53:53           0.0.0.0:*                           1750/systemd-resolv
udp6       0      0 :::5353                 :::*                                1801/avahi-daemon:
root@g73sw01:~#

Turns out that the local DNS resolver service is running on Ubuntu resolving local domain’s with external IP’s.  Turning this off:

root@g73sw01:~# systemctl status systemd-resolved
? systemd-resolved.service - Network Name Resolution
Loaded: loaded (/lib/systemd/system/systemd-resolved.service; enabled; vendor preset: enabled)
Active: inactive (dead) since Sun 2023-11-19 18:30:48 EST; 2s ago
Docs: man:systemd-resolved.service(8)
man:org.freedesktop.resolve1(5)
https://www.freedesktop.org/wiki/Software/systemd/writing-network-configuration-managers
https://www.freedesktop.org/wiki/Software/systemd/writing-resolver-clients
Process: 1750 ExecStart=/lib/systemd/systemd-resolved (code=exited, status=0/SUCCESS)
Main PID: 1750 (code=exited, status=0/SUCCESS)
Status: "Shutting down..."
CPU: 8.759s

Nov 19 17:23:24 g73sw01.nix.mds.xyz systemd-resolved[1750]: Grace period over, resuming full feature set (UDP>
Nov 19 17:24:05 g73sw01.nix.mds.xyz systemd-resolved[1750]: Using degraded feature set TCP instead of UDP for>
Nov 19 17:27:25 g73sw01.nix.mds.xyz systemd-resolved[1750]: Using degraded feature set UDP instead of TCP for>
Nov 19 17:33:04 g73sw01.nix.mds.xyz systemd-resolved[1750]: Using degraded feature set UDP instead of UDP+EDN>
Nov 19 17:38:19 g73sw01.nix.mds.xyz systemd-resolved[1750]: Using degraded feature set TCP instead of UDP for>
Nov 19 18:03:09 g73sw01.nix.mds.xyz systemd-resolved[1750]: Using degraded feature set TCP instead of UDP for>
Nov 19 18:30:48 g73sw01.nix.mds.xyz systemd[1]: Stopping Network Name Resolution...
Nov 19 18:30:48 g73sw01.nix.mds.xyz systemd[1]: systemd-resolved.service: Deactivated successfully.
Nov 19 18:30:48 g73sw01.nix.mds.xyz systemd[1]: Stopped Network Name Resolution.
Nov 19 18:30:48 g73sw01.nix.mds.xyz systemd[1]: systemd-resolved.service: Consumed 8.759s CPU time.
root@g73sw01:~#

resolves the issue:

C:\Windows\system32>ping atlas-c01.nix.mds.xyz
Pinging atlas-c01.nix.mds.xyz [10.0.0.77] with 32 bytes of data:
Control-C
^C
C:\Windows\system32>ping atlas-c01.nix.mds.xyz

digging in further to find out how this is configured.  In this case we want to disable the resolution entirely, so the Ubuntu server doesn’t act as a DNS for the rest of the network:


root@g73sw01:~# grep -v "#" /etc/systemd/resolved.conf

[Resolve]
root@g73sw01:~# 

root@g73sw01:~# systemctl disable systemd-resolved
Removed /etc/systemd/system/dbus-org.freedesktop.resolve1.service.
Removed /etc/systemd/system/multi-user.target.wants/systemd-resolved.service.
root@g73sw01:~# systemctl disable systemd-resolved
root@g73sw01:~#

root@g73sw01:~# grep -v "#" /etc/resolv.conf
nameserver 127.0.0.53
options edns0 trust-ad
search mds.xyz
root@g73sw01:~#

On Ubuntu, /etc/resolv.conf is managed by the above mentioned service, so we need to install another to edit the resolv.conf entries:

/etc/resolv.conf -> ../run/systemd/resolve/stub-resolv.conf

apt install resolvconf

systemctl status resolvconf

Next edit the following file:

/etc/resolvconf/resolv.conf.d/head

And add the name servers for your network.  For example:

root@g73sw01:~# grep -v "#" /etc/resolvconf/resolv.conf.d/head
nameserver 192.168.0.224
nameserver 192.168.0.46
nameserver 192.168.0.51
root@g73sw01:~#

In this case we want to only enable the following service:

systemctl restart resolvconf

and do not wish to have our Ubuntu server resolve for the rest of the network.  So the following service will remain disabled:

systemctl disabled systemd-resolved

However, in other environments, as needed, the service can now be reenabled.  But that was not the case here it appears.  Had to enable both services:

root@g73sw01:~# vi /etc/resolv.conf
root@g73sw01:~# vi /etc/resolvconf/resolv.conf.d/head
root@g73sw01:~#
root@g73sw01:~#
root@g73sw01:~#
root@g73sw01:~# systemctl restart systemd-resolved resolvconf
root@g73sw01:~# grep -v "#" /etc/resolvconf/resolv.conf.d/head
nameserver 192.168.0.224
nameserver 192.168.0.46
nameserver 192.168.0.51
domain nix.mds.xyz
search mds.xyz nix.mds.xyz mws.mds.xyz
root@g73sw01:~# cat /etc/resolv.conf
# Dynamic resolv.conf(5) file for glibc resolver(3) generated by resolvconf(8)
# DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN
# 127.0.0.53 is the systemd-resolved stub resolver.
# run "systemd-resolve --status" to see details about the actual nameservers.
nameserver 192.168.0.224
nameserver 192.168.0.46
nameserver 192.168.0.51
nameserver 127.0.0.53
search nix.mds.xyz mds.xyz mws.mds.xyz
root@g73sw01:~#

Yet this didn’t work either.  Finally, disabling the Ubuntu service altogether resolved it:

root@g73sw01:~# grep -v "#" /etc/resolv.conf
nameserver 192.168.0.224
nameserver 192.168.0.46
nameserver 192.168.0.51
search nix.mds.xyz mds.xyz mws.mds.xyz
root@g73sw01:~# systemctl disable systemd-resolved resolvconf
Synchronizing state of resolvconf.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install disable resolvconf
root@g73sw01:~#

Summary

What did work, is removing the resolvconf package and simply updating the DNS entries with nmcli (NetworkManager) commands like this:

# nmcli con add type ethernet con-name ens160 ifname ens160 ipv4.addresses 192.168.0.30/24 ipv4.gateway 192.168.0.1 ipv4.dns “192.168.0.46 192.168.0.51 192.168.0.224” ipv4.method manual ipv4.dns-search “mds.xyz nix.mds.xyz mws.mds.xyz” 

Followed by:

nmcli c s ens160
nmcli c u ens160
ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf
unlink /etc/resolv.conf

The summary of the above commands, namely the following that did not work:

apt install resolvconf
systemctl enable resolvconf
systemctl start resolvconf

Disable the previous service:

systemctl stop systemd-resolved
systemctl disable systemd-resolved

Add DNS entries to:

/etc/resolvconf/resolv.conf.d/head

Regenerate the entries using:

sudo resolvconf -u

Problem Solved!

Cheers,

Set Static IP on Ubuntu Server

Via the CLI:

root@tom-G73Sw:~# cd /etc/netplan/
root@tom-G73Sw:/etc/netplan# cat 01-network-manager-all.yaml
# Let NetworkManager manage all devices on this system
network:
version: 2
renderer: NetworkManager
ethernets:
enp5s0:
dhcp4: no
addresses: [192.168.0.15/24]
gateway4: 192.168.0.1
nameservers:
addresses: [192.168.0.224,192.168.0.46,192.168.0.51]
root@tom-G73Sw:/etc/netplan#

Then issue:

netplan try

so changes take effect.  Via UI, go to Activities then Settings, select Network or WiFi, based on which interface you wish to set the IP on.  Typically leaving the WiFi as DHCP might make some sense for redundancy.  Your choice:

Ubuntu Static IP Configuration via GUI

Ubuntu Static IP Configuration via GUI

Cheers,

BitDefender Parental Control

BitDefender updates resulted in this message being printed:

“we could not verify the certificate: reason = wrongHost”

“we could not verify the certificate: reason = untrusted”

And on further inspection, it is revealed the BitDefender is indeed the culprit via it’s Parental Controls:

we could not verify the certificate: reason = wrongHost

To fix this, I uninstalled BitDefender Parental Control in Windows, as it didn’t have any apparent configuration to refine the blockage of the internal DNS site.  Reinstalled just BitDefender Total Security to address the problem, all the while trying not to install BitDefender Parental Control.  Hope this helps!

Cheers,
Tk

VMware: Add user and group access to only a single ESXi host

Follow the following steps to add specific user and group access to a single ESXi host:

  1. Configure a single group, be it AD (remote) or Local.
  2. Add user to the above group.
  3. Login to the VCSA (vSphere Client).
  4. Select the ESXi host.
  5.  Click on the Permissions tab.
  6. Click on the + icon.  You will be in the Add Permission | host01.domain.abc popup.
  7. Select the domain.
  8. Select the group.  Start typing so the filter can locate your group.
  9. Select the role to apply.  ( Roles can be defined by the Administrator )
  10. Propagate to all children, if you wish to do so.
  11. Test!

HTH,

VMWare: Enable Management network: Error – Setting ip/ipv6 configuration failed

Getting a rather cryptic ESXi error message when trying to set a new IPv4 IP:

Enable Management Network: Error

Setting ip/ipv6 configuration failed:

For example, when trying to set 10.3.0.12, this is what is seen:

https://i0.wp.com/www.microdevsys.com/WordPressImages/ESXi-Enable-Management-Network-Error.PNG?ssl=1

It doesn't really, really say what the real reason behind the error is.  Taking a dive into the network configuration of the ESXi host, reveals the reason why:

[root@mdsesxi-p04:~] esxcli network ip interface ipv4 get
Name  IPv4 Address  IPv4 Netmask   IPv4 Broadcast  Address Type  Gateway   DHCP DNS
—-  ————  ————-  ————–  ————  ——–  ——–
vmk0  10.3.0.11     255.255.255.0  10.3.0.255      STATIC        10.3.0.1     false
vmk1  10.3.0.12     255.255.255.0  10.3.0.255      STATIC        10.3.0.1     false
vmk2  10.0.0.11     255.255.255.0  10.0.0.255      STATIC        0.0.0.0      false
[root@mdsesxi-p04:~]

In the UI there's no indication that that IP 10.3.0.12 is already taken by a vmkernel interface vmk1.  Instead, setting it to 10.3.0.13, which is free:

[root@mdsesxi-p04:~] esxcli network ip interface ipv4 get
Name  IPv4 Address  IPv4 Netmask   IPv4 Broadcast  Address Type  Gateway   DHCP DNS
—-  ————  ————-  ————–  ————  ——–  ——–
vmk0  10.3.0.13     255.255.255.0  10.3.0.255      STATIC        10.3.0.1     false
vmk1  10.3.0.12     255.255.255.0  10.3.0.255      STATIC        10.3.0.1     false
vmk2  10.0.0.11     255.255.255.0  10.0.0.255      STATIC        0.0.0.0      false
[root@mdsesxi-p04:~]

Works perfectly well!  With the new IP, the host can now be added to vSphere Client / Server. (VCSA). Additional sample handy ESXi commands:

esxcli network nic list
esxcli network ip netstack list
esxcli network vswitch standard portgroup list
esxcli network nic list
esxcli network vswitch standard list
esxcli network ip dns search list
esxcli network ip interface ipv4 get
esxcli network vswitch standard portgroup list
esxcli network ip interface list
esxcli network ip interface ipv4 get

See the full command list here:

https://developer.vmware.com/docs/11743/esxi-7-0-esxcli-command-reference

HTH,

DD-WRT: Fixing DNS Resolution through Networking Tab

In case the below error is seen:

C:\Users\tom>nslookup josh-vm01.nix.mds.xyz 10.5.0.1
Server:  UnKnown
Address:  10.5.0.1

*** No internal type for both IPv4 and IPv6 Addresses (A+AAAA) records available for josh-vm01.nix.mds.xyz

The fix for this is to enable Setup -> Networking -> Optional DNS Target

then fill in the target DNS server, in this case 192.168.0.100 for our internal DNS server.  Test again:

C:\Users\tom>nslookup josh-vm01.nix.mds.xyz 10.5.0.1
Server:  UnKnown
Address:  10.5.0.1

Non-authoritative answer:
Name:    josh-vm01.nix.mds.xyz
Address:  10.0.0.101
C:\Users\tom>

HTH,
Tom K

OpenVPN: Can’t ping public or internet IP’s, but can ping local or internal IP’s

While establishing an OpenVPN connection, the internal IP's are able to ping yet the external IP's are not, the issue might be with packet NAT from tun to vlan2 interfaces.  Note below there are NO replies:

root@DD-WRT-KHUFU:/jffs/etc/openvpn# tcpdump -na -s0 -i tun2 icmp
tcpdump: verbose output suppressed, use -v[v]… for full protocol decode
listening on tun2, link-type RAW (Raw IP), snapshot length 262144 bytes
10:49:55.636673 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 9093, length 40
10:50:00.028370 IP 10.1.1.2 > 192.168.0.46: ICMP 10.1.1.2 udp port 52858 unreachable, length 535
10:50:00.661006 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 9094, length 40
10:50:05.666028 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 9095, length 40
10:50:10.661477 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 9096, length 40
10:50:11.204349 IP 10.1.1.2 > 192.168.0.51: ICMP 10.1.1.2 udp port 65235 unreachable, length 479

NOTE: there are no reply packets above.  Looking at the interfaces and rules:

# ——————————————————————
# VPN: Required to be able to ping local on-prem or Azure VLAN's
# ——————————————————————
iptables -I FORWARD -i br0 -o tun2 -j ACCEPT
iptables -I FORWARD -i tun2 -o br0 -j ACCEPT
iptables -I INPUT -i tun2 -j logdrop
iptables -t nat -A POSTROUTING -o tun2 -j MASQUERADE

iptables -I FORWARD -i br0 -o tun1 -j ACCEPT
iptables -I FORWARD -i tun1 -o br0 -j ACCEPT
iptables -I INPUT -i tun1 -j logdrop
iptables -t nat -A POSTROUTING -o tun1 -j MASQUERADE

iptables -I FORWARD -i br0 -o tun0 -j ACCEPT
iptables -I FORWARD -i tun0 -o br0 -j ACCEPT
iptables -I INPUT -i tun0 -j logdrop
iptables -t nat -A POSTROUTING -o tun0 -j MASQUERADE

and interfaces:

6: vlan2@eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1452 qdisc noqueue state UP qlen 1000
    link/ether 2c:fd:a1:35:60:51 brd ff:ff:ff:ff:ff:ff
    inet 100.100.100.100/27 brd 108.168.115.31 scope global vlan2
       valid_lft forever preferred_lft forever
    inet6 fe80::2efd:a1ff:fe35:6051/64 scope link
       valid_lft forever preferred_lft forever

 

11: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP qlen 1000
    link/ether 2c:fd:a1:35:60:50 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.6/24 brd 192.168.0.255 scope global br0
       valid_lft forever preferred_lft forever
    inet6 fe80::2efd:a1ff:fe35:6050/64 scope link
       valid_lft forever preferred_lft forever

14: tun2: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UNKNOWN qlen 500
    link/[65534]
    inet 10.1.1.1/24 scope global tun2
       valid_lft forever preferred_lft forever

 

forwarding to br0, which is the local network, works very well:

C:\Users\tom>ping josh-vm01.nix.mds.xyz

Pinging josh-vm01.nix.mds.xyz [10.0.0.101] with 32 bytes of data:
Reply from 10.0.0.101: bytes=32 time=5ms TTL=62
Reply from 10.0.0.101: bytes=32 time=5ms TTL=62

But ping to outside does not:

C:\Users\tom>ping microdevsys.com

Pinging microdevsys.com [74.208.236.205] with 32 bytes of data:
Control-C
^C
C:\Users\tom>

the rules responsible for the above local forwarding, which works were:

# ——————————————————————
# VPN: Required to be able to ping local on-prem or Azure VLAN's
# ——————————————————————
iptables -I FORWARD -i br0 -o tun2 -j ACCEPT
iptables -I FORWARD -i tun2 -o br0 -j ACCEPT
iptables -I INPUT -i tun2 -j logdrop
iptables -t nat -A POSTROUTING -o tun2 -j MASQUERADE

iptables -I FORWARD -i br0 -o tun1 -j ACCEPT
iptables -I FORWARD -i tun1 -o br0 -j ACCEPT
iptables -I INPUT -i tun1 -j logdrop
iptables -t nat -A POSTROUTING -o tun1 -j MASQUERADE

iptables -I FORWARD -i br0 -o tun0 -j ACCEPT
iptables -I FORWARD -i tun0 -o br0 -j ACCEPT
iptables -I INPUT -i tun0 -j logdrop
iptables -t nat -A POSTROUTING -o tun0 -j MASQUERADE

however, there was nothing for vlan2 above, which is the internet facing network.  The following rules added in forward traffic from the tun (tunnel) interfaces to the outside world, allowing external ping's to work:

# ——————————————————————
# VPN: Allow web traffic: tunX to internet.
# ——————————————————————
iptables -I FORWARD -i vlan2 -o tun2 -j ACCEPT
iptables -I FORWARD -i tun2 -o vlan2 -j ACCEPT

# iptables -I INPUT -i tun2 -j logdrop
# iptables -t nat -A POSTROUTING -o tun2 -j MASQUERADE

iptables -I FORWARD -i vlan2 -o tun1 -j ACCEPT
iptables -I FORWARD -i tun1 -o vlan2 -j ACCEPT

# iptables -I INPUT -i tun1 -j logdrop
# iptables -t nat -A POSTROUTING -o tun1 -j MASQUERADE

iptables -I FORWARD -i vlan2 -o tun0 -j ACCEPT
iptables -I FORWARD -i tun0 -o vlan2 -j ACCEPT

# iptables -I INPUT -i tun0 -j logdrop
# iptables -t nat -A POSTROUTING -o tun0 -j MASQUERADE

Added 2 additional stanzas for any other future tun interfaces.  The result is:

root@DD-WRT-KHUFU:~# uptime
 17:22:08 up 7 min,  load average: 0.06, 0.18, 0.13
root@DD-WRT-KHUFU:~# tcpdump -na -s 0 -i tun2 icmp
tcpdump: verbose output suppressed, use -v[v]… for full protocol decode
listening on tun2, link-type RAW (Raw IP), snapshot length 262144 bytes
17:22:30.628458 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 32132, length 40
17:22:30.664892 IP 74.208.236.205 > 10.1.1.2: ICMP echo reply, id 1, seq 32132, length 40
17:22:31.648842 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 32133, length 40
17:22:31.684757 IP 74.208.236.205 > 10.1.1.2: ICMP echo reply, id 1, seq 32133, length 40
17:22:32.661280 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 32134, length 40
17:22:32.699882 IP 74.208.236.205 > 10.1.1.2: ICMP echo reply, id 1, seq 32134, length 40
17:22:33.668026 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 32135, length 40
17:22:33.705033 IP 74.208.236.205 > 10.1.1.2: ICMP echo reply, id 1, seq 32135, length 40
17:22:38.492608 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 32136, length 40
17:22:38.530251 IP 74.208.236.205 > 10.1.1.2: ICMP echo reply, id 1, seq 32136, length 40
17:22:39.504142 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 32137, length 40
17:22:39.541025 IP 74.208.236.205 > 10.1.1.2: ICMP echo reply, id 1, seq 32137, length 40
17:22:40.515175 IP 10.1.1.2 > 74.208.236.205: ICMP echo request, id 1, seq 32138, length 40
17:22:40.554968 IP 74.208.236.205 > 10.1.1.2: ICMP echo reply, id 1, seq 32138, length 40
17:22:46.839749 IP 10.1.1.2 > 10.0.0.101: ICMP echo request, id 1, seq 32139, length 40
17:22:46.841578 IP 10.0.0.101 > 10.1.1.2: ICMP echo reply, id 1, seq 32139, length 40
17:22:47.855641 IP 10.1.1.2 > 10.0.0.101: ICMP echo request, id 1, seq 32140, length 40
17:22:47.857412 IP 10.0.0.101 > 10.1.1.2: ICMP echo reply, id 1, seq 32140, length 40
^C
18 packets captured
18 packets received by filter
0 packets dropped by kernel
root@DD-WRT-KHUFU:~#

The full set of rules is:

root@DD-WRT-KHUFU:~# vi /jffs/firewall/DD-WRT-KHUFU-firewall.run
# ——————————————————————
# VPN: Required to be able to ping local on-prem or Azure VLAN's
# ——————————————————————
iptables -I FORWARD -i br0 -o tun2 -j ACCEPT
iptables -I FORWARD -i tun2 -o br0 -j ACCEPT
iptables -I INPUT -i tun2 -j logdrop
iptables -t nat -A POSTROUTING -o tun2 -j MASQUERADE

iptables -I FORWARD -i br0 -o tun1 -j ACCEPT
iptables -I FORWARD -i tun1 -o br0 -j ACCEPT
iptables -I INPUT -i tun1 -j logdrop
iptables -t nat -A POSTROUTING -o tun1 -j MASQUERADE

iptables -I FORWARD -i br0 -o tun0 -j ACCEPT
iptables -I FORWARD -i tun0 -o br0 -j ACCEPT
iptables -I INPUT -i tun0 -j logdrop
iptables -t nat -A POSTROUTING -o tun0 -j MASQUERADE


# ——————————————————————
# VPN: Allow web traffic: tunX to internet.
# ——————————————————————
iptables -I FORWARD -i vlan2 -o tun2 -j ACCEPT
iptables -I FORWARD -i tun2 -o vlan2 -j ACCEPT
# iptables -I INPUT -i tun2 -j logdrop
# iptables -t nat -A POSTROUTING -o tun2 -j MASQUERADE

iptables -I FORWARD -i vlan2 -o tun1 -j ACCEPT
iptables -I FORWARD -i tun1 -o vlan2 -j ACCEPT
# iptables -I INPUT -i tun1 -j logdrop
# iptables -t nat -A POSTROUTING -o tun1 -j MASQUERADE

iptables -I FORWARD -i vlan2 -o tun0 -j ACCEPT
iptables -I FORWARD -i tun0 -o vlan2 -j ACCEPT
# iptables -I INPUT -i tun0 -j logdrop
# iptables -t nat -A POSTROUTING -o tun0 -j MASQUERADE


# ———————-
#  VPN
# ———————-
# iptables -I INPUT 1 -p tcp –dport 47888 -j ACCEPT
# iptables -A INPUT -d 10.1.1.0/24 -j ACCEPT
# iptables -A INPUT -s 10.1.1.0/24 -j ACCEPT
# iptables -I FORWARD 1 –source 10.1.1.0/24 -j ACCEPT
# iptables -t nat -A POSTROUTING -s 10.1.1.0/24 -j MASQUERADE

Now that both rules are in, traffic to both the internal and external resources works!

HTH,
Tom K


     
  Copyright © 2003 - 2013 Tom Kacperski (microdevsys.com). All rights reserved.

Creative Commons License
This work is licensed under a Creative Commons Attribution 3.0 Unported License