-
Notifications
You must be signed in to change notification settings - Fork 2
/
test_load_balancer.py
770 lines (626 loc) · 27 KB
/
test_load_balancer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
"""
Load Balancer as a Service
==========================
You can create, modify and delete TCP load balancers.
"""
import pytest
from time import sleep
from util import build_http_url
from util import get_backends_for_request
from util import in_parallel
from util import RESOURCE_NAME_PREFIX
from util import retry_for
from util import setup_lbaas_http_test_server
from util import start_persistent_download
from util import unique
from util import wait_for_load_balancer_ready
from util import wait_for_url_ready
def test_simple_tcp_load_balancer(prober, create_load_balancer_scenario):
""" Create a simple TCP load balancer with one backend. """
# Create a load balancer setup with one backend on a private network
load_balancer, listener, pool, backends, private_network = \
create_load_balancer_scenario(
num_backends=1,
algorithm='round_robin',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Test if the load balancer works on IPv4
content = prober.http_get(load_balancer.build_url(addr_family=4))
assert 'Backend server running on' in content
# Test if the load balancer works on IPv6
content = prober.http_get(load_balancer.build_url(addr_family=6))
assert 'Backend server running on' in content
def test_load_balancer_end_to_end(prober, create_load_balancer_scenario):
""" Multi backend load balancer end-to-end test scenario.
* Load balancer on a public network with multiple backend servers on a
private network
* Send TCP traffic through the LB and verify backend answers
"""
# Create a load balancer setup with two backends on a private network
load_balancer, listener, pool, backends, private_network = \
create_load_balancer_scenario(
num_backends=2,
algorithm='round_robin',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Issue 10 requests on IPv4 and IPv6 to the load balancer
for i in range(10):
content = prober.http_get(load_balancer.build_url(addr_family=4))
assert 'Backend server running on' in content
content = prober.http_get(load_balancer.build_url(addr_family=6))
assert 'Backend server running on' in content
# Assert logs on both backend servers show they received traffic
assert unique(backends) == unique(get_backends_for_request(backends))
def test_multiple_listeners(prober, create_load_balancer_scenario):
""" Two load balancer listeners connected to the same pool.
"""
# Create a load balancer setup with one backend on a private network
load_balancer, listener1, pool, (backend, ), private_network = \
create_load_balancer_scenario(
num_backends=1,
algorithm='round_robin',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Add an additional listener on port 81
load_balancer.add_listener(pool, 81, name='listener-81')
# Assert the LB still works on port 80
assert prober.http_get(f'http://{load_balancer.vip(4)}/hostname') \
== backend.name
# Wait for the new listener on port 81 to be operational
wait_for_load_balancer_ready(load_balancer, prober, port=81, timeout=30)
# Assert backend is also reachable on port 81
assert prober.http_get(f'http://{load_balancer.vip(4)}:81/hostname') \
== backend.name
def test_multiple_listeners_multiple_pools(
prober, create_backend_server, image,
create_private_network,
create_load_balancer_scenario,
):
""" Two listeners connected to their own pool of member servers each.
"""
# Create a load balancer setup with one backends on a private network
load_balancer, listener1, pool1, (backend1, ), private_network1 = \
create_load_balancer_scenario(
num_backends=1,
algorithm='round_robin',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Assert backend1 is reachable on port 80
assert prober.http_get(f'http://{load_balancer.vip(4)}/hostname') \
== backend1.name
# Create an additional backend network
private_network2 = create_private_network(auto_create_ipv4_subnet=True)
# Create an additonal backend server
backend2 = create_backend_server(
name='backend2',
private_network=private_network2,
)
# Create a second pool and add the additional backend to it
pool2 = load_balancer.add_pool(f'lb-pool-2', 'round_robin', 'tcp')
load_balancer.add_pool_member(pool2, backend2, private_network2)
# Add an additional listener on Port 81 for the second pool
load_balancer.add_listener(pool2, 81, name='listener-81')
# Assert backend1 is still reachable on port 80
assert prober.http_get(f'http://{load_balancer.vip(4)}/hostname') \
== backend1.name
# Wait for the new backend to be operational
wait_for_load_balancer_ready(load_balancer, prober, port=81, timeout=30)
# Assert backend2 is reachable on port 81
assert prober.http_get(f'http://{load_balancer.vip(4)}:81/hostname') \
== backend2.name
def test_balancing_algorithm_round_robin(
prober, create_load_balancer_scenario,
):
""" The round_robin balancing algorithm schedules connections in turn among
pool members.
"""
# Create a load balancer setup with 3 backends on a private network
num_backends = 3
load_balancer, listener, pool, backends, private_network = \
create_load_balancer_scenario(
num_backends=num_backends,
algorithm='round_robin',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Issue a request to each backend to get the round robin order
backend_order = [prober.http_get(load_balancer.build_url(url='/hostname'))
for i in range(num_backends)]
# Assert all backends got a request
assert len(unique(backend_order)) == num_backends
# Issue 10 requests to each backend and verify round robin distribution
for n in range(10 * num_backends):
# Assert the correct backend received the request
hit_backend_name = prober.http_get(
load_balancer.build_url(url='/hostname')
)
assert hit_backend_name == backend_order[n % num_backends]
def test_balancing_algorithm_source_ip(
prober, create_load_balancer_scenario, create_server, image,
):
""" The source_ip balancing algorithm always schedules connections from the
same source to the same pool member.
This test creates a load balancer with 4 backends and 4 clients with
different source IP addresses and verifies that connections from the same
client always go to the same backend.
"""
# Create a load balancer setup with 4 backends on a private network
load_balancer, listener, pool, backends, private_network = \
create_load_balancer_scenario(
num_backends=4,
algorithm='source_ip',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Create 4 client VMs
clients = in_parallel(
create_server,
instances=({
'name': f'client{i+1}',
'image': image,
} for i in range(4))
)
# Build map of which client is directed to which backend server
backend_per_client = {}
for client in clients:
# Issue the first request from this client to the LB
backend_per_client[client.name] = client.http_get(
load_balancer.build_url(url='/hostname'),
)
# Issue 10 requests and verify the correct backend receives the request
for n in range(10):
for client in clients:
hit_backend_name = client.http_get(
load_balancer.build_url(url='/hostname')
)
assert backend_per_client[client.name] == hit_backend_name
def test_balancing_algorithm_least_connections(
server, create_load_balancer_scenario,
):
""" The least_connections balancing algorithm schedules connections to the
backend with the least amount of active connections.
Note: The current connection count is reset on any configuration changes to
the load balancer. Still running connections initiated before the
configuration change are not considered for selecting the backend.
"""
# This test uses a function scoped prober because we are going to start
# long running downloads which should not impact other tests.
prober = server
# Create a load balancer setup with 2 backends on a private network
load_balancer, listener, pool, backends, private_network = \
create_load_balancer_scenario(
num_backends=2,
algorithm='least_connections',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# start a persistent endless download of random data to "block" one backend
blocked_backend = start_persistent_download(prober, load_balancer,
backends)
# Verify requests go to the other backend as it has less active
# connections
for i in range(10):
hit_backend_name = prober.http_get(
load_balancer.build_url(url='/hostname')
)
assert hit_backend_name != blocked_backend.name
@pytest.mark.parametrize('health_monitor_type',
['ping', 'tcp', 'http', 'https', 'tls-hello'])
def test_backend_health_monitors(
prober, create_load_balancer_scenario, health_monitor_type,
):
""" Different health monitoring methods can be used to verify pool member
availability:
* ICMP ping health monitors check pool member availability by sending ICMP
echo requests to the pool member IP address
* TCP health monitors check the availability of TCP port on a pool member
* HTTP(S) health monitors check the response code of a HTTP(S) URL
* TLS_HELLO health monitors check that a pool member answers by initiating
a TLS handshake on a TCP port
"""
# Additional http parameter configuration for health monitor types
# which require additional configuration.
health_monitor_http_config = {
'http': {'host': 'www.example.com'},
'https': {'version': '1.0'},
}
# Configure SSL backend for SSL health checks
ssl = health_monitor_type in ('https', 'tls-hello') and True or False
# Create a load balancer setup with 1 backend on a private network
load_balancer, listener, pool, (backend, ), private_network = \
create_load_balancer_scenario(
num_backends=1,
algorithm='least_connections',
port=80,
pool_protocol='tcp',
allowed_cidrs=None,
health_monitor_type=health_monitor_type,
health_monitor_http_config=health_monitor_http_config.get(
health_monitor_type),
ssl=ssl,
)
# Test function to assert the desired load balancer and health monitor
# status
def assert_status(load_balancer_status, monitor_status):
load_balancer.refresh()
assert load_balancer.status == load_balancer_status
assert load_balancer.pool_members[0]['monitor_status'] \
== monitor_status
# Verify the health monitor reports the backend as up
retry_for(seconds=20).or_fail(
assert_status,
msg='Health monitor does not report "up" status after 20s',
load_balancer_status='running',
monitor_status='up',
)
# Shutdown the backend server and verify the health monitor goes down
backend.stop()
retry_for(seconds=20).or_fail(
assert_status,
msg='Health monitor does not report "down" status after 20s',
# As all pool members are down the LB is in status "error"
load_balancer_status='error',
monitor_status='down',
)
# Start the backend again and verfiy the health monitor goes up
# The test web server on the backend has to be started as well. It's not
# configured as a persistent systemd unit.
backend.start()
setup_lbaas_http_test_server(backend, ssl)
retry_for(seconds=20).or_fail(
assert_status,
msg='Health monitor does not report "up" status after 20s',
load_balancer_status='running',
monitor_status='up',
)
@pytest.mark.parametrize('action', ('disable-enable', 'remove-add'))
def test_pool_member_change(server, create_load_balancer_scenario,
action):
""" A pool member can be removed and added back or disabled and reenabled
to a load balancer pool without any disruption to connections not
terminated on this pool member.
"""
# This test uses a function scoped prober because we are going to start
# long running downloads which should not impact other tests.
prober = server
# Create a load balancer setup with 2 backends on a private network
load_balancer, listener, pool, backends, private_network = \
create_load_balancer_scenario(
num_backends=2,
algorithm='round_robin',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# download an endless file to create "persistent" connections
backend_first = start_persistent_download(prober, load_balancer, backends,
'wget-first')
# second download to create a "persistent" connection to the other backend
backend_second = start_persistent_download(prober, load_balancer, backends,
'wget-second')
# Assert both backends got a request
assert set(backends) == {backend_first, backend_second}
# Get pool member serving the first download
member_first_name = \
f'{RESOURCE_NAME_PREFIX}-pool-member-{backend_first.name}'
member_first = next(x for x in load_balancer.pool_members
if x['name'] == member_first_name)
# Remove/Disable member_first from the pool
if action == 'remove-add':
load_balancer.remove_pool_member(pool, member_first)
elif action == 'disable-enable':
load_balancer.toggle_pool_member(pool, member_first, enabled=False)
# Assert the persistent download to backend_second is still active
assert prober.output_of(
'systemctl --user is-active wget-second') == 'active'
# Assert the persistent download to backend_first is also still active.
# Already active connections are not affected by the configuration change.
assert prober.output_of(
'systemctl --user is-active wget-first') == 'active'
# Assert requests only go to backend_second after some time
retry_for(seconds=20).or_fail(
load_balancer.verify_backend,
msg=f'Backend {backend_first.name} not removed from the pool '
f'within 20s.',
prober=prober,
backend=backend_second,
# If 5 consecutive requests go to backend_second we assume
# backend_first no longer receives traffic
count=5,
)
# Add the backend back to the pool
if action == 'remove-add':
load_balancer.add_pool_member(pool, backend_first, private_network)
elif action == 'disable-enable':
load_balancer.toggle_pool_member(pool, member_first, enabled=True)
# Assert the other backend is added back to the pool and starts to serve
# requests again
retry_for(seconds=10).or_fail(
load_balancer.verify_backend,
msg=f'Backend {backend_first.name} not added back to the pool '
f'within 10s.',
prober=prober,
backend=backend_first,
)
# Assert the persistent downloads are still active
assert prober.output_of(
'systemctl --user is-active wget-first') == 'active'
assert prober.output_of(
'systemctl --user is-active wget-second') == 'active'
def test_private_load_balancer_frontend(
create_server, image, create_load_balancer_scenario, private_network,
):
""" A load balancer can use a private network as it's frontend (VIP)
network to receive connections.
"""
# Create a function scoped prober as we are going to attach this server
# the the private frontend network
prober = create_server(image=image['slug'], use_private_network=True)
# Add a subnet to the private frontend network
frontend_subnet = private_network.add_subnet(cidr='192.168.100.0/24')
# Attach the frontend network to the server used as an LB client
prober.update(
interfaces=[
{'network': 'public'},
{'network': prober.interfaces[1]['network']['uuid']},
{'network': private_network.info['uuid']},
],
)
prober.enable_dhcp_in_networkd(prober.interfaces[-1])
# Create a private load balancer setup with 1 backend on a private network
load_balancer, listener, pool, (backend, ), backend_network = \
create_load_balancer_scenario(
num_backends=1,
algorithm='least_connections',
frontend_subnet=frontend_subnet,
prober=prober,
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Assert the backend is reachable from the prober over the load balancer
load_balancer.verify_backend(prober, backend)
def test_floating_ip(prober, create_load_balancer_scenario, floating_ip):
""" A Floating IP can be assigned to a load balancer and used to receive
client connections.
"""
# Create a load balancer setup with one backend on a private network
load_balancer, listener, pool, (backend, ), private_network = \
create_load_balancer_scenario(
num_backends=1,
algorithm='round_robin',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Assign Floating IP to load balancer
floating_ip.assign(load_balancer=load_balancer)
# Assert load balancer is reachable on the Floating IP after at most 20s
wait_for_url_ready(
build_http_url(floating_ip.address),
prober,
timeout=20,
)
# Assert the load balancer is serving content on the Floating IP
assert prober.http_get(
url=build_http_url(floating_ip.address, path='/hostname')
) == backend.name
def test_floating_ip_reassign(prober, create_load_balancer_scenario,
floating_ipv4, server):
""" Test if a Floating IP can be reassigned from a server to a load
balancer, to another load balancer and back to a server.
"""
def check_content(url, content):
assert prober.http_get(url) == content
# Create two load balancer setups with one backend each
((load_balancer1, listener1, pool1, (backend1, ), private_network1),
(load_balancer2, listener1, pool2, (backend2, ), private_network2)) = \
in_parallel(create_load_balancer_scenario,
[{'name': 'lb1',
'num_backends': 1,
'algorithm': 'round_robin',
'port': 80,
'pool_protocol': 'tcp',
'ssl': False,
'health_monitor_type': None,
'allowed_cidrs': None,
},
{'name': 'lb2',
'num_backends': 1,
'algorithm': 'round_robin',
'port': 80,
'pool_protocol': 'tcp',
'ssl': False,
'health_monitor_type': None,
'allowed_cidrs': None,
}])
# Assign Floating IP to the server
floating_ipv4.assign(server=server)
# Configure the Floating IP on the server
server.configure_floating_ip(floating_ipv4)
# Check if the Floating IP is reachable (wait up to 15 seconds)
prober.ping(floating_ipv4, count=1, tries=15)
# Assign Floating IP to the first load balancer
floating_ipv4.assign(load_balancer=load_balancer1)
# Wait for up to 20s for the Floating IP to become ready
wait_for_url_ready(
f'http://{floating_ipv4.address}/hostname',
prober,
content=backend1.name,
timeout=20,
)
# Check if backend1 (via load_balancer1) receives requests on the
# Floating IP
assert prober.http_get(f'http://{floating_ipv4.address}/hostname') \
== backend1.name
# Assign Floating IP to the second load balancer
floating_ipv4.assign(load_balancer=load_balancer2)
# Wait for up to 20s for the Floating IP to become ready
wait_for_url_ready(
f'http://{floating_ipv4.address}/hostname',
prober,
content=backend2.name,
timeout=20,
)
# Check if backend2 (via load_balancer2) receives requests on the
# Floating IP
assert prober.http_get(f'http://{floating_ipv4.address}/hostname') \
== backend2.name
# Assign Floating IP back to the server
floating_ipv4.assign(server=server)
# Check if the Floating IP is reachable (wait up to 15 seconds)
prober.ping(floating_ipv4, count=1, tries=15)
def test_frontend_allowed_cidr(prober, create_load_balancer_scenario):
""" Frontend connection source IPs can be restricted by CIDRs. This works
for IPv4 and IPv6. The access restrictions can be updated on existing
load balancers.
"""
# Create a load balancer setup with one backend on a private network
# Restrict access to the prober
load_balancer, listener, pool, (backend, ), private_network = \
create_load_balancer_scenario(
num_backends=1,
algorithm='round_robin',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=[
f'{prober.ip("public", 4)}/32',
f'{prober.ip("public", 6)}/128',
],
)
# Assert the load balancer works on IPv4 and IPv6
prober.http_get(f'http://{load_balancer.vip(4)}/')
prober.http_get(f'http://[{load_balancer.vip(6)}]/')
# Restrict access to only the IPv4 address of the prober
load_balancer.update_listener(
listener,
allowed_cidrs=[f'{prober.ip("public", 4)}/32'],
)
# Wait some time for the configuration to be applied. Unfortunately the API
# does not provide this information
sleep(15)
# Assert the load balancer works on IPv4 and DOES NOT work on IPv6
prober.http_get(f'http://{load_balancer.vip(4)}/')
with pytest.raises(AssertionError):
prober.http_get(f'http://[{load_balancer.vip(6)}]/')
# Restrict access to only the IPv6 address of the prober
load_balancer.update_listener(
listener,
allowed_cidrs=[f'{prober.ip("public", 6)}/128'],
)
# Wait some time for the configuration to be applied. Unfortunately the API
# does not provide this information
sleep(15)
# Assert the load balancer works on IPv6 and DOES NOT work on IPv4
prober.http_get(f'http://[{load_balancer.vip(6)}]/')
with pytest.raises(AssertionError):
prober.http_get(f'http://{load_balancer.vip(4)}/')
# Restrict access to documentation IPv4 and IPv6 networks
# The prober should no longer have access
load_balancer.update_listener(
listener,
allowed_cidrs=['192.0.2.0/24', '2001:db8::/32'],
)
# Wait some time for the configuration to be applied. Unfortunately the API
# does not provide this information
sleep(15)
# Assert the load balancer does not work on IPv4 and IPv6
with pytest.raises(AssertionError):
prober.http_get(f'http://{load_balancer.vip(4)}/')
with pytest.raises(AssertionError):
prober.http_get(f'http://[{load_balancer.vip(6)}]/')
@pytest.mark.parametrize('proxy_protocol', ('proxy', 'proxyv2'))
def test_proxy_protocol(prober, create_load_balancer_scenario, proxy_protocol):
""" The load balancer can be configured to pass source IP information to
the backend server via the Proxy Protocol. Version 1 and 2 of the Proxy
Protocol are supported.
"""
# Create a load balancer setup with one backend on a private network
load_balancer, listener, pool, (backend, ), private_network = \
create_load_balancer_scenario(
num_backends=1,
algorithm='round_robin',
port=80,
pool_protocol=proxy_protocol,
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Assert the PROXY protocol header gets logged for IPv4
expected_log_line = {
'proxy': f'PROXY V1 header received: '
f'TCP4 {prober.ip("public", 4)} {load_balancer.vip(4)}',
'proxyv2': f'PROXY V2 header received: PROXY '
f'TCP4 {prober.ip("public", 4)} {load_balancer.vip(4)}'
}
prober.http_get(load_balancer.build_url(addr_family=4))
logs = backend.output_of('journalctl --user-unit lbaas-http-test-server')
assert expected_log_line[proxy_protocol] in logs
# Assert the PROXY protocol header gets logged for IPv6
expected_log_line = {
'proxy': f'PROXY V1 header received: '
f'TCP6 {prober.ip("public", 6)} {load_balancer.vip(6)}',
'proxyv2': f'PROXY V2 header received: PROXY '
f'TCP6 {prober.ip("public", 6)} {load_balancer.vip(6)}'
}
prober.http_get(load_balancer.build_url(addr_family=6))
logs = backend.output_of('journalctl --user-unit lbaas-http-test-server')
assert expected_log_line[proxy_protocol] in logs
def test_ping(prober, create_load_balancer_scenario, floating_ipv4,
floating_ipv6):
""" The load balancer answers to ICMP echo requests (ping) on all VIP
addresses and assigned Floating IPs.
"""
# Create simple load balancer setup with public VIP
load_balancer, listener, pool, backends, private_network = \
create_load_balancer_scenario(
num_backends=1,
algorithm='round_robin',
port=80,
pool_protocol='tcp',
ssl=False,
health_monitor_type=None,
allowed_cidrs=None,
)
# Verify the load balancer is pingable on IPv4 and IPv6 VIP
prober.ping(load_balancer.vip(4), count=1, tries=15)
prober.ping(load_balancer.vip(6), count=1, tries=15)
# Assign Floating IPs to load balancer
floating_ipv4.assign(load_balancer=load_balancer)
floating_ipv6.assign(load_balancer=load_balancer)
# Verify the load balancer is pingable on the Floating IPs
prober.ping(floating_ipv4.address, count=1, tries=15)
prober.ping(floating_ipv6.address, count=1, tries=15)