basic ECS spawner

This commit is contained in:
2026-01-15 15:27:48 +01:00
parent 24a781f36a
commit eb737b469c
860 changed files with 58621 additions and 32 deletions

View File

@@ -0,0 +1,56 @@
## Simple performance timing helpers for GECS
## Records results to JSONL files (one JSON per line, one file per test)
class_name PerfHelpers
## Time a callable and return milliseconds
static func time_it(callable: Callable) -> float:
var start_time = Time.get_ticks_usec()
callable.call()
var end_time = Time.get_ticks_usec()
return (end_time - start_time) / 1000.0 # Return milliseconds
## Record performance result to test-specific JSONL file
static func record_result(test_name: String, scale: int, time_ms: float) -> void:
var result = {
"timestamp": Time.get_datetime_string_from_system(),
"test": test_name,
"scale": scale,
"time_ms": time_ms,
"godot_version": Engine.get_version_info().string
}
# Ensure perf directory exists
var dir = DirAccess.open("res://")
if dir:
if not dir.dir_exists("reports"):
dir.make_dir("reports")
if not dir.dir_exists("reports/perf"):
dir.make_dir("reports/perf")
# Append to test-specific JSONL file (one JSON per line)
var filepath = "res://reports/perf/%s.jsonl" % test_name
# Check if file exists, if not create it with WRITE, otherwise open with READ_WRITE
var file_exists = FileAccess.file_exists(filepath)
var file = FileAccess.open(filepath, FileAccess.READ_WRITE if file_exists else FileAccess.WRITE)
if file:
if file_exists:
file.seek_end()
file.store_line(JSON.stringify(result))
file.close()
else:
push_error("Failed to open performance log file: %s (Error: %s)" % [filepath, error_string(FileAccess.get_open_error())])
# Print result for console visibility
prints("📊 %s (scale=%d): %.2f ms" % [test_name, scale, time_ms])
## Optional: Assert performance threshold (simple version)
static func assert_threshold(time_ms: float, max_ms: float, message: String = "") -> void:
if time_ms > max_ms:
var error = "Performance threshold exceeded: %.2f ms > %.2f ms" % [time_ms, max_ms]
if not message.is_empty():
error = "%s - %s" % [message, error]
assert(false, error)

View File

@@ -0,0 +1 @@
uid://bw7545nfp8er2

View File

@@ -0,0 +1 @@
uid://5hhcxik6cv30

View File

@@ -0,0 +1 @@
uid://cfa7qhlpk01qk

View File

@@ -0,0 +1 @@
uid://p46sqv2vhhyj

View File

@@ -0,0 +1 @@
uid://bxoj2kyydasxw

View File

@@ -0,0 +1 @@
uid://2l6fp4kfjsc0

View File

@@ -0,0 +1 @@
uid://8l8i83qy6ng7

View File

@@ -0,0 +1 @@
uid://b2d37fkunmia3

View File

@@ -0,0 +1 @@
uid://cxpn28q0c7wr

View File

@@ -0,0 +1 @@
uid://ik4sfttwvm74

View File

@@ -0,0 +1 @@
uid://x2vtacyhjyp7

View File

@@ -0,0 +1,36 @@
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
func after_test():
if world:
world.purge(false)
## Test to debug cache behavior
func test_cache_hits_with_repeated_queries():
# Add 100 entities with various components
for i in 100:
var entity = Entity.new()
entity.name = "Entity_%d" % i
if i % 2 == 0:
entity.add_component(C_TestA.new())
if i % 3 == 0:
entity.add_component(C_TestB.new())
world.add_entity(entity)
# Execute same query 10 times and print cache stats each time
for i in 10:
var entities = world.query.with_all([C_TestA, C_TestB]).execute()
var stats = world.get_cache_stats()
print("Query %d: found %d entities | Cache hits=%d misses=%d" % [
i + 1,
entities.size(),
stats.cache_hits,
stats.cache_misses
])

View File

@@ -0,0 +1 @@
uid://bdh450526m2jk

View File

@@ -0,0 +1,172 @@
## Cache Key Generation Performance Tests
## Tests the performance of cache key generation with different query complexities
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
func after_test():
if world:
world.purge(false)
## Test cache key generation with varying numbers of components
## This tests the raw cache key generation performance
func test_cache_key_generation(num_components: int, test_parameters := [[1], [5], [10], [20]]):
# Build arrays of component types for the test
var all_components = []
var any_components = []
var exclude_components = []
# Use available test components
var available_components = [C_TestA, C_TestB, C_TestC, C_TestD, C_TestE, C_TestF, C_TestG, C_TestH]
# Distribute components across all/any/exclude
for i in num_components:
var comp = available_components[i % available_components.size()]
if i % 3 == 0:
all_components.append(comp)
elif i % 3 == 1:
any_components.append(comp)
else:
exclude_components.append(comp)
# Time generating cache keys 10000 times
var time_ms = PerfHelpers.time_it(func():
for i in 10000:
var key = QueryCacheKey.build(all_components, any_components, exclude_components)
)
PerfHelpers.record_result("cache_key_generation", num_components, time_ms)
## Test cache hit performance with varying world sizes
## This measures the complete cached query execution time
func test_cache_hit_performance(scale: int, test_parameters := [[100], [1000], [10000]]):
# Setup entities
for i in scale:
var entity = Entity.new()
entity.name = "Entity_%d" % i
if i % 2 == 0:
entity.add_component(C_TestA.new())
if i % 3 == 0:
entity.add_component(C_TestB.new())
world.add_entity(entity, null, false)
# Execute query once to populate cache
var __ = world.query.with_all([C_TestA, C_TestB]).execute()
# Time 1000 cache hit queries
var time_ms = PerfHelpers.time_it(func():
for i in 1000:
var entities = world.query.with_all([C_TestA, C_TestB]).execute()
)
PerfHelpers.record_result("cache_hit_performance", scale, time_ms)
## Test cache miss vs cache hit comparison
## This shows the performance difference between cache miss and hit
func test_cache_miss_vs_hit(scale: int, test_parameters := [[100], [1000], [10000]]):
# Setup entities
for i in scale:
var entity = Entity.new()
entity.name = "Entity_%d" % i
if i % 2 == 0:
entity.add_component(C_TestA.new())
if i % 3 == 0:
entity.add_component(C_TestB.new())
world.add_entity(entity, null, false)
# Measure cache miss (first query)
var miss_time_ms = PerfHelpers.time_it(func():
var entities = world.query.with_all([C_TestA, C_TestB]).execute()
)
# Measure cache hit (subsequent query)
var hit_time_ms = PerfHelpers.time_it(func():
var entities = world.query.with_all([C_TestA, C_TestB]).execute()
)
PerfHelpers.record_result("cache_miss", scale, miss_time_ms)
PerfHelpers.record_result("cache_hit", scale, hit_time_ms)
# Print comparison
var speedup = miss_time_ms / hit_time_ms if hit_time_ms > 0 else 0
print(" Cache speedup at scale %d: %.1fx (miss=%.3fms, hit=%.3fms)" % [
scale, speedup, miss_time_ms, hit_time_ms
])
## Test cache key stability across query builder instances
## Ensures the same query produces the same cache key
func test_cache_key_stability():
# Setup some entities
for i in 100:
var entity = Entity.new()
entity.name = "Entity_%d" % i
if i % 2 == 0:
entity.add_component(C_TestA.new())
if i % 3 == 0:
entity.add_component(C_TestB.new())
world.add_entity(entity, null, false)
# Execute same query 100 times and collect cache stats
var initial_stats = world.get_cache_stats()
for i in 100:
var entities = world.query.with_all([C_TestA, C_TestB]).execute()
var final_stats = world.get_cache_stats()
var hits = final_stats.cache_hits - initial_stats.cache_hits
var misses = final_stats.cache_misses - initial_stats.cache_misses
print(" Cache key stability: %d hits, %d misses (%.1f%% hit rate)" % [
hits, misses, (hits * 100.0 / (hits + misses)) if (hits + misses) > 0 else 0
])
# We expect 1 miss (first query) and 99 hits (all subsequent queries)
assert_int(misses).is_equal(1)
assert_int(hits).is_equal(99)
## Test cache invalidation frequency impact
## Measures performance when cache is frequently invalidated
func test_cache_invalidation_impact(scale: int, test_parameters := [[100], [1000], [10000]]):
# Setup entities
for i in scale:
var entity = Entity.new()
entity.name = "Entity_%d" % i
if i % 2 == 0:
entity.add_component(C_TestA.new())
if i % 3 == 0:
entity.add_component(C_TestB.new())
world.add_entity(entity, null, false)
# Time queries with cache invalidation after each query
var with_invalidation_ms = PerfHelpers.time_it(func():
for i in 100:
var entities = world.query.with_all([C_TestA, C_TestB]).execute()
world._query_archetype_cache.clear() # Force cache miss on next query
)
# Time queries without invalidation (all cache hits after first)
var without_invalidation_ms = PerfHelpers.time_it(func():
for i in 100:
var entities = world.query.with_all([C_TestA, C_TestB]).execute()
)
PerfHelpers.record_result("cache_invalidation_impact_with", scale, with_invalidation_ms)
PerfHelpers.record_result("cache_invalidation_impact_without", scale, without_invalidation_ms)
var overhead = (with_invalidation_ms - without_invalidation_ms) / without_invalidation_ms * 100
print(" Cache invalidation overhead at scale %d: %.1f%% (with=%.2fms, without=%.2fms)" % [
scale, overhead, with_invalidation_ms, without_invalidation_ms
])

View File

@@ -0,0 +1 @@
uid://dm6141dihwven

View File

@@ -0,0 +1,120 @@
## Component Performance Tests
## Tests component addition, removal, and lookup operations
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
## Test adding components to entities
func test_component_addition(scale: int, test_parameters := [[100], [1000], [10000]]):
var entities = []
# Pre-create entities
for i in scale:
var entity = Entity.new()
entities.append(entity)
world.add_entity(entity, null, false)
# Time component addition
var time_ms = PerfHelpers.time_it(func():
for entity in entities:
entity.add_component(C_TestA.new())
)
PerfHelpers.record_result("component_addition", scale, time_ms)
world.purge(false)
## Test adding multiple components to entities
func test_multiple_component_addition(scale: int, test_parameters := [[100], [1000]]):
var entities = []
# Pre-create entities
for i in scale:
var entity = Entity.new()
entities.append(entity)
world.add_entity(entity, null, false)
# Time adding multiple components
var time_ms = PerfHelpers.time_it(func():
for entity in entities:
entity.add_component(C_TestA.new())
entity.add_component(C_TestB.new())
entity.add_component(C_TestC.new())
)
PerfHelpers.record_result("multiple_component_addition", scale, time_ms)
world.purge(false)
## Test removing components from entities
func test_component_removal(scale: int, test_parameters := [[100], [1000]]):
var entities = []
# Setup: create entities with components
for i in scale:
var entity = Entity.new()
entity.add_component(C_TestA.new())
entity.add_component(C_TestB.new())
entities.append(entity)
world.add_entity(entity, null, false)
# Time component removal
var time_ms = PerfHelpers.time_it(func():
for entity in entities:
entity.remove_component(C_TestA)
)
PerfHelpers.record_result("component_removal", scale, time_ms)
world.purge(false)
## Test component lookup (has_component)
func test_component_lookup(scale: int, test_parameters := [[100], [1000], [10000]]):
var entities = []
# Setup: create entities with components
for i in scale:
var entity = Entity.new()
if i % 2 == 0:
entity.add_component(C_TestA.new())
entity.add_component(C_TestB.new())
entities.append(entity)
world.add_entity(entity, null, false)
# Time component lookups
var time_ms = PerfHelpers.time_it(func():
for entity in entities:
var has_a = entity.has_component(C_TestA)
var has_b = entity.has_component(C_TestB)
)
PerfHelpers.record_result("component_lookup", scale, time_ms)
world.purge(false)
## Test getting component from entity
func test_component_get(scale: int, test_parameters := [[100], [1000]]):
var entities = []
# Setup: create entities with components
for i in scale:
var entity = Entity.new()
entity.add_component(C_TestA.new())
entity.add_component(C_TestB.new())
entities.append(entity)
world.add_entity(entity, null, false)
# Time component retrieval
var time_ms = PerfHelpers.time_it(func():
for entity in entities:
var comp_a = entity.get_component(C_TestA)
var comp_b = entity.get_component(C_TestB)
)
PerfHelpers.record_result("component_get", scale, time_ms)
world.purge(false)

View File

@@ -0,0 +1 @@
uid://bsuu6ftcww6gy

View File

@@ -0,0 +1,103 @@
## Entity Performance Tests
## Tests entity creation, addition, removal, and operations
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
## Test entity creation performance at different scales
func test_entity_creation(scale: int, test_parameters := [[100], [1000], [10000]]):
var entities = []
var time_ms = PerfHelpers.time_it(func():
for i in scale:
var entity = auto_free(Entity.new())
entity.name = "PerfEntity_%d" % i
entities.append(entity)
)
PerfHelpers.record_result("entity_creation", scale, time_ms)
## Test entity creation with multiple components
func test_entity_with_components(scale: int, test_parameters := [[100], [1000], [10000]]):
var entities = []
var time_ms = PerfHelpers.time_it(func():
for i in scale:
var entity = auto_free(Entity.new())
entity.name = "PerfEntity_%d" % i
entity.add_component(C_TestA.new())
entity.add_component(C_TestB.new())
if i % 2 == 0:
entity.add_component(C_TestC.new())
entities.append(entity)
)
PerfHelpers.record_result("entity_with_components", scale, time_ms)
world.purge(false)
## Test adding entities to world
func test_entity_world_addition(scale: int, test_parameters := [[100], [1000], [10000]]):
var entities = []
# Pre-create entities
for i in scale:
var entity = Entity.new()
entity.name = "PerfEntity_%d" % i
entities.append(entity)
# Time just the world addition
var time_ms = PerfHelpers.time_it(func():
for entity in entities:
world.add_entity(entity, null, false)
)
PerfHelpers.record_result("entity_world_addition", scale, time_ms)
world.purge(false)
## Test removing entities from world
func test_entity_removal(scale: int, test_parameters := [[100], [1000], [10000]]):
var entities = []
# Setup: create and add entities
for i in scale:
var entity = Entity.new()
entity.name = "PerfEntity_%d" % i
entities.append(entity)
world.add_entity(entity, null, false)
# Time removal of half the entities
var time_ms = PerfHelpers.time_it(func():
var to_remove = entities.slice(0, scale / 2)
for entity in to_remove:
world.remove_entity(entity)
)
PerfHelpers.record_result("entity_removal", scale, time_ms)
world.purge(false)
## Test bulk entity operations
func test_bulk_entity_operations(scale: int, test_parameters := [[100], [1000], [10000]]):
var entities = []
# Create batch
for i in scale:
var entity = Entity.new()
entity.name = "BatchEntity_%d" % i
entities.append(entity)
# Time bulk addition to world
var time_ms = PerfHelpers.time_it(func():
world.add_entities(entities)
)
PerfHelpers.record_result("bulk_entity_operations", scale, time_ms)
world.purge(false)

View File

@@ -0,0 +1 @@
uid://dytkj0pp6t3sk

View File

@@ -0,0 +1,157 @@
## System Processing Hotpath Breakdown Tests
## Detailed profiling of where time is spent during system processing
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
func after_test():
if world:
world.purge(false)
## Setup entities with velocity components (like real example)
func setup_velocity_entities(count: int) -> void:
for i in count:
var entity = Entity.new()
entity.name = "Entity_%d" % i
entity.add_component(C_Velocity.new(Vector3(randf(), randf(), randf())))
world.add_entity(entity, null, false)
## Test 1: Pure query execution (no processing)
func test_query_execution_only(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_velocity_entities(scale)
var time_ms = PerfHelpers.time_it(func():
var _result = world.query.with_all([C_Velocity]).execute()
)
PerfHelpers.record_result("hotpath_query_execution", scale, time_ms)
world.purge(false)
## Test 2: Query + component access (no actual work)
func test_component_access(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_velocity_entities(scale)
var entities = world.query.with_all([C_Velocity]).execute()
var c_velocity_path = C_Velocity.resource_path
var time_ms = PerfHelpers.time_it(func():
for entity in entities:
var _component = entity.components.get(c_velocity_path, null) as C_Velocity
)
PerfHelpers.record_result("hotpath_component_access", scale, time_ms)
world.purge(false)
## Test 3: Query + component access + data read
func test_component_data_read(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_velocity_entities(scale)
var entities = world.query.with_all([C_Velocity]).execute()
var c_velocity_path = C_Velocity.resource_path
var time_ms = PerfHelpers.time_it(func():
for entity in entities:
var component = entity.components.get(c_velocity_path, null) as C_Velocity
if component:
# Read the velocity data
var _vel = component.velocity
)
PerfHelpers.record_result("hotpath_data_read", scale, time_ms)
world.purge(false)
## Test 4: Simulate full system processing loop (manual)
func test_simulated_system_loop(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_velocity_entities(scale)
var c_velocity_path = C_Velocity.resource_path
var delta = 0.016
var time_ms = PerfHelpers.time_it(func():
# Simulate what a system does: query + iterate + component access + work
var entities = world.query.with_all([C_Velocity]).execute()
for entity in entities:
var component = entity.components.get(c_velocity_path, null) as C_Velocity
if component:
# Simulate typical work (reading velocity, calculating new position)
var _new_pos = component.velocity * delta
)
PerfHelpers.record_result("hotpath_simulated_system", scale, time_ms)
world.purge(false)
## Test 5: Using actual PerformanceTestSystem (available in tests)
func test_actual_system_processing(scale: int, test_parameters := [[100], [1000], [10000]]):
# Use C_TestA instead since PerformanceTestSystem uses it
for i in scale:
var entity = Entity.new()
entity.name = "Entity_%d" % i
entity.add_component(C_TestA.new())
world.add_entity(entity, null, false)
var test_system = PerformanceTestSystem.new()
world.add_system(test_system)
var time_ms = PerfHelpers.time_it(func():
world.process(0.016)
)
PerfHelpers.record_result("hotpath_actual_system", scale, time_ms)
world.purge(false)
## Test 6: Multiple query executions per frame (simulating multiple systems)
func test_multiple_queries_per_frame(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_velocity_entities(scale)
# Add multiple components to entities
for entity in world.entities:
entity.add_component(C_TestA.new())
entity.add_component(C_TestB.new())
var time_ms = PerfHelpers.time_it(func():
var _r1 = world.query.with_all([C_Velocity]).execute()
var _r2 = world.query.with_all([C_TestA]).execute()
var _r3 = world.query.with_all([C_TestB]).execute()
)
PerfHelpers.record_result("hotpath_multiple_queries", scale, time_ms)
world.purge(false)
## Test 7: Component access patterns - dictionary vs cached path
func test_component_access_patterns(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_velocity_entities(scale)
var entities = world.query.with_all([C_Velocity]).execute()
# Test with cached path (current best practice)
var c_velocity_path = C_Velocity.resource_path
var time_cached = PerfHelpers.time_it(func():
for entity in entities:
var _component = entity.components.get(c_velocity_path, null) as C_Velocity
)
# Test with get_component() helper
var time_helper = PerfHelpers.time_it(func():
for entity in entities:
var _component = entity.get_component(C_Velocity)
)
PerfHelpers.record_result("hotpath_component_access_cached", scale, time_cached)
PerfHelpers.record_result("hotpath_component_access_helper", scale, time_helper)
world.purge(false)

View File

@@ -0,0 +1 @@
uid://cj5f3xbcsymot

View File

@@ -0,0 +1,179 @@
## Component Indexing Performance Tests
## Compares performance of using Script objects vs String paths as dictionary keys
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
func after_test():
if world:
world.purge(false)
## Test dictionary lookup performance with String keys (current implementation)
func test_string_key_lookup(scale: int, test_parameters := [[1000], [10000], [100000]]):
# Create string-based dictionary
var string_dict: Dictionary = {}
# Populate with component paths
var component_types = [C_TestA, C_TestB, C_TestC, C_TestD]
for comp_type in component_types:
var path = comp_type.resource_path
string_dict[path] = []
for i in scale / 4:
string_dict[path].append(i)
# Time lookups
var time_ms = PerfHelpers.time_it(func():
for i in 10000: # Many lookups
var comp_type = component_types[i % 4]
var _result = string_dict.get(comp_type.resource_path, [])
)
PerfHelpers.record_result("string_key_lookup", scale, time_ms)
## Test dictionary lookup performance with Script object keys
func test_script_key_lookup(scale: int, test_parameters := [[1000], [10000], [100000]]):
# Create script-based dictionary
var script_dict: Dictionary = {}
# Populate with component scripts directly
var component_types = [C_TestA, C_TestB, C_TestC, C_TestD]
for comp_type in component_types:
script_dict[comp_type] = []
for i in scale / 4:
script_dict[comp_type].append(i)
# Time lookups
var time_ms = PerfHelpers.time_it(func():
for i in 10000: # Many lookups
var comp_type = component_types[i % 4]
var _result = script_dict.get(comp_type, [])
)
PerfHelpers.record_result("script_key_lookup", scale, time_ms)
## Test dictionary insertion performance with String keys
func test_string_key_insertion(scale: int, test_parameters := [[1000], [10000], [100000]]):
var component_types = [C_TestA, C_TestB, C_TestC, C_TestD]
var time_ms = PerfHelpers.time_it(func():
var string_dict: Dictionary = {}
for i in scale:
var comp_type = component_types[i % 4]
var path = comp_type.resource_path
if not string_dict.has(path):
string_dict[path] = []
string_dict[path].append(i)
)
PerfHelpers.record_result("string_key_insertion", scale, time_ms)
## Test dictionary insertion performance with Script object keys
func test_script_key_insertion(scale: int, test_parameters := [[1000], [10000], [100000]]):
var component_types = [C_TestA, C_TestB, C_TestC, C_TestD]
var time_ms = PerfHelpers.time_it(func():
var script_dict: Dictionary = {}
for i in scale:
var comp_type = component_types[i % 4]
if not script_dict.has(comp_type):
script_dict[comp_type] = []
script_dict[comp_type].append(i)
)
PerfHelpers.record_result("script_key_insertion", scale, time_ms)
## Test hash computation overhead - String path generation
func test_get_resource_path_overhead(scale: int, test_parameters := [[10000], [100000], [1000000]]):
var component_types = [C_TestA, C_TestB, C_TestC, C_TestD]
var time_ms = PerfHelpers.time_it(func():
for i in scale:
var comp_type = component_types[i % 4]
var _path = comp_type.resource_path
)
PerfHelpers.record_result("get_resource_path_overhead", scale, time_ms)
## Test dictionary lookup performance with Integer keys
func test_integer_key_lookup(scale: int, test_parameters := [[1000], [10000], [100000]]):
# Create integer-based dictionary
var int_dict: Dictionary = {}
# Populate with integer keys (simulating instance IDs or hashes)
var component_types = [C_TestA, C_TestB, C_TestC, C_TestD]
for i in range(4):
int_dict[i] = []
for j in scale / 4:
int_dict[i].append(j)
# Time lookups
var time_ms = PerfHelpers.time_it(func():
for i in 10000: # Many lookups
var key = i % 4
var _result = int_dict.get(key, [])
)
PerfHelpers.record_result("integer_key_lookup", scale, time_ms)
## Test dictionary insertion performance with Integer keys
func test_integer_key_insertion(scale: int, test_parameters := [[1000], [10000], [100000]]):
var time_ms = PerfHelpers.time_it(func():
var int_dict: Dictionary = {}
for i in scale:
var key = i % 4
if not int_dict.has(key):
int_dict[key] = []
int_dict[key].append(i)
)
PerfHelpers.record_result("integer_key_insertion", scale, time_ms)
## Test Script.get_instance_id() overhead
func test_get_instance_id_overhead(scale: int, test_parameters := [[10000], [100000], [1000000]]):
var component_types = [C_TestA, C_TestB, C_TestC, C_TestD]
var time_ms = PerfHelpers.time_it(func():
for i in scale:
var comp_type = component_types[i % 4]
var _id = comp_type.get_instance_id()
)
PerfHelpers.record_result("get_instance_id_overhead", scale, time_ms)
## Test realistic query performance with String keys (current implementation)
func test_realistic_query_with_strings(scale: int, test_parameters := [[100], [1000], [10000]]):
# Setup entities
for i in scale:
var entity = Entity.new()
entity.name = "Entity_%d" % i
if i % 2 == 0:
entity.add_component(C_TestA.new())
if i % 3 == 0:
entity.add_component(C_TestB.new())
world.add_entity(entity, null, false)
# Time queries (current string-based approach)
var time_ms = PerfHelpers.time_it(func():
for i in 100: # Execute query 100 times
var _entities = world.query.with_all([C_TestA]).execute()
)
PerfHelpers.record_result("realistic_query_with_strings", scale, time_ms)
world.purge(false)

View File

@@ -0,0 +1 @@
uid://5218hr3x4ron

View File

@@ -0,0 +1,328 @@
## Observer Performance Tests
## Compares observers vs traditional systems for different use cases
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
func after_test():
if world:
world.purge(false)
## Setup entities with position and velocity for movement tests
func setup_velocity_entities(count: int) -> void:
for i in count:
var entity = Entity.new()
entity.name = "VelocityEntity_%d" % i
entity.add_component(C_TestPosition.new(Vector3(i, 0, 0)))
entity.add_component(C_TestVelocity.new(Vector3(randf() * 10, randf() * 10, randf() * 10)))
world.add_entity(entity, null, false)
## Setup entities for observer add/remove tests
func setup_observer_test_entities(count: int) -> void:
for i in count:
var entity = Entity.new()
entity.name = "ObserverTestEntity_%d" % i
entity.add_component(C_ObserverTest.new(i))
world.add_entity(entity, null, false)
## Test traditional system approach for continuous processing (like velocity)
## This is the IDEAL use case for systems - they excel at continuous per-frame processing
func test_system_continuous_processing(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_velocity_entities(scale)
var system = S_VelocitySystem.new()
world.add_system(system)
var time_ms = PerfHelpers.time_it(func():
# Simulate 60 frames of processing
for i in range(60):
world.process(0.016)
)
PerfHelpers.record_result("system_continuous_velocity", scale, time_ms)
prints("System processed %d entities across 60 frames" % system.process_count)
world.purge(false)
## Test observer detecting component additions
## This is an IDEAL use case for observers - they excel at reacting to state changes
func test_observer_component_additions(scale: int, test_parameters := [[100], [1000], [10000]]):
var observer = O_PerformanceTest.new()
world.add_observer(observer)
var time_ms = PerfHelpers.time_it(func():
# Add components to entities (observers react to additions)
for i in range(scale):
var entity = Entity.new()
entity.add_component(C_ObserverTest.new(i))
world.add_entity(entity, null, false)
)
PerfHelpers.record_result("observer_component_additions", scale, time_ms)
prints("Observer detected %d additions" % observer.added_count)
assert_int(observer.added_count).is_equal(scale)
world.purge(false)
## Test observer detecting component removals
## Another IDEAL use case for observers - reacting to cleanup/removal events
func test_observer_component_removals(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_observer_test_entities(scale)
var observer = O_PerformanceTest.new()
world.add_observer(observer)
var entities = world.query.with_all([C_ObserverTest]).execute()
var time_ms = PerfHelpers.time_it(func():
# Remove components (observers react to removals)
for entity in entities:
entity.remove_component(C_ObserverTest)
)
PerfHelpers.record_result("observer_component_removals", scale, time_ms)
prints("Observer detected %d removals" % observer.removed_count)
assert_int(observer.removed_count).is_equal(scale)
world.purge(false)
## Test observer detecting property changes
## Good use case for observers - reacting to specific property changes
func test_observer_property_changes(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_observer_test_entities(scale)
var observer = O_PerformanceTest.new()
world.add_observer(observer)
observer.reset_counts()
var entities = world.query.with_all([C_ObserverTest]).execute()
var time_ms = PerfHelpers.time_it(func():
# Change properties (observers react to changes)
for entity in entities:
var comp = entity.get_component(C_ObserverTest)
comp.value = comp.value + 1 # Triggers property_changed signal
)
PerfHelpers.record_result("observer_property_changes", scale, time_ms)
prints("Observer detected %d property changes" % observer.changed_count)
assert_int(observer.changed_count).is_equal(scale)
world.purge(false)
## Test system approach for batch property reads
## Systems are better for batch operations without individual reactions
func test_system_batch_property_reads(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_observer_test_entities(scale)
var system = PerformanceTestSystem.new()
world.add_system(system)
var time_ms = PerfHelpers.time_it(func():
# Single process call reads all entities
world.process(0.016)
)
PerfHelpers.record_result("system_batch_property_reads", scale, time_ms)
prints("System processed %d entities in batch" % system.process_count)
world.purge(false)
## Test observer overhead with multiple property changes per entity
## Shows cost of observers when entities change frequently
func test_observer_frequent_changes(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_observer_test_entities(scale)
var observer = O_PerformanceTest.new()
world.add_observer(observer)
observer.reset_counts()
var entities = world.query.with_all([C_ObserverTest]).execute()
var time_ms = PerfHelpers.time_it(func():
# Each entity changes multiple times
for entity in entities:
var comp = entity.get_component(C_ObserverTest)
for j in range(10): # 10 changes per entity
comp.value = comp.value + 1
)
PerfHelpers.record_result("observer_frequent_changes", scale, time_ms)
prints("Observer detected %d property changes (%d entities × 10 changes)" % [observer.changed_count, scale])
assert_int(observer.changed_count).is_equal(scale * 10)
world.purge(false)
## Test system processing the same frequent changes scenario
## Compares continuous polling vs reactive observation
func test_system_simulating_frequent_changes(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_observer_test_entities(scale)
var system = PerformanceTestSystem.new()
world.add_system(system)
var entities = world.query.with_all([C_ObserverTest]).execute()
var time_ms = PerfHelpers.time_it(func():
# Make the changes
for entity in entities:
var comp = entity.get_component(C_ObserverTest)
for j in range(10):
# Direct property change without signal
comp.value = comp.value + 1
# System processes once (doesn't know about individual changes)
world.process(0.016)
)
PerfHelpers.record_result("system_simulating_frequent_changes", scale, time_ms)
prints("System processed %d entities once after changes" % system.process_count)
world.purge(false)
## Test multiple observers watching the same component
## Shows overhead of multiple reactive systems
func test_multiple_observers_same_component(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_observer_test_entities(scale)
var observer1 = O_PerformanceTest.new()
var observer2 = O_PerformanceTest.new()
var observer3 = O_PerformanceTest.new()
world.add_observers([observer1, observer2, observer3])
observer1.reset_counts()
observer2.reset_counts()
observer3.reset_counts()
var entities = world.query.with_all([C_ObserverTest]).execute()
var time_ms = PerfHelpers.time_it(func():
# Change properties (all 3 observers react)
for entity in entities:
var comp = entity.get_component(C_ObserverTest)
comp.value = comp.value + 1
)
PerfHelpers.record_result("multiple_observers_same_component", scale, time_ms)
prints("3 observers each detected %d changes" % observer1.changed_count)
assert_int(observer1.changed_count).is_equal(scale)
assert_int(observer2.changed_count).is_equal(scale)
assert_int(observer3.changed_count).is_equal(scale)
world.purge(false)
## Test observer query filtering performance
## Shows cost of query evaluation for observers
func test_observer_with_complex_query(scale: int, test_parameters := [[100], [1000], [10000]]):
# Create entities with varying component combinations
for i in range(scale):
var entity = Entity.new()
entity.add_component(C_ObserverTest.new(i))
if i % 2 == 0:
entity.add_component(C_ObserverHealth.new())
world.add_entity(entity, null, false)
# Observer with complex query (needs both components)
var observer = O_HealthObserver.new()
world.add_observer(observer)
observer.reset()
var entities_matching = world.query.with_all([C_ObserverTest, C_ObserverHealth]).execute()
var time_ms = PerfHelpers.time_it(func():
# Change health on matching entities
for entity in entities_matching:
var health = entity.get_component(C_ObserverHealth)
health.health = health.health - 1
)
PerfHelpers.record_result("observer_complex_query", scale, time_ms)
prints("Observer with complex query detected %d changes (out of %d total entities)" % [observer.health_changed_count, scale])
world.purge(false)
## Test baseline: Empty observer overhead
## Measures the cost of just having observers in the system
func test_observer_baseline_overhead(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_observer_test_entities(scale)
# Add observer but don't trigger it
var observer = O_PerformanceTest.new()
world.add_observer(observer)
var entities = world.query.with_all([C_ObserverTest]).execute()
var time_ms = PerfHelpers.time_it(func():
# Make changes WITHOUT triggering property_changed signals
for entity in entities:
var comp = entity.get_component(C_ObserverTest)
# Direct property access without signal emission
comp.value = comp.value + 1
)
PerfHelpers.record_result("observer_baseline_overhead", scale, time_ms)
prints("Made %d changes without triggering observer" % scale)
assert_int(observer.changed_count).is_equal(scale) # Observer should have triggered
world.purge(false)
## Test comparison: Observer vs System for sporadic changes
## Real-world scenario: only 10% of entities change per frame
func test_observer_vs_system_sporadic_changes(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_observer_test_entities(scale)
var observer = O_PerformanceTest.new()
world.add_observer(observer)
observer.reset_counts()
var entities = world.query.with_all([C_ObserverTest]).execute()
var changes_per_frame = max(1, scale / 10) # 10% of entities change
var time_ms_observer = PerfHelpers.time_it(func():
# Simulate 60 frames where only 10% of entities change per frame
for frame in range(60):
for i in range(changes_per_frame):
var entity = entities[i % scale]
var comp = entity.get_component(C_ObserverTest)
comp.value = comp.value + 1 # Triggers observer
)
PerfHelpers.record_result("observer_sporadic_changes", scale, time_ms_observer)
prints("Observer detected %d sporadic changes over 60 frames" % observer.changed_count)
# Now test with system approach
world.purge(false)
setup_observer_test_entities(scale)
var system = PerformanceTestSystem.new()
world.add_system(system)
entities = world.query.with_all([C_ObserverTest]).execute()
var time_ms_system = PerfHelpers.time_it(func():
# Same scenario but system processes ALL entities every frame
for frame in range(60):
# Make the same changes
for i in range(changes_per_frame):
var entity = entities[i % scale]
var comp = entity.get_component(C_ObserverTest)
comp.value = comp.value + 1
# System processes ALL entities every frame
world.process(0.016)
)
PerfHelpers.record_result("system_sporadic_changes", scale, time_ms_system)
prints("System processed %d total entities over 60 frames (even though only 10%% changed)" % system.process_count)
world.purge(false)

View File

@@ -0,0 +1 @@
uid://bmhys5pytv1x8

View File

@@ -0,0 +1,225 @@
## Query Performance Tests
## Tests query building and execution performance
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
func after_test():
if world:
world.purge(false)
## Setup diverse entities with various component combinations
func setup_diverse_entities(count: int) -> void:
for i in count:
var entity = Entity.new()
entity.name = "QueryEntity_%d" % i
# Create diverse component combinations
if i % 2 == 0:
entity.add_component(C_TestA.new())
if i % 3 == 0:
entity.add_component(C_TestB.new())
if i % 5 == 0:
entity.add_component(C_TestC.new())
if i % 7 == 0:
entity.add_component(C_TestD.new())
world.add_entity(entity, null, false)
## Test simple query with_all performance
func test_query_with_all(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_diverse_entities(scale)
var time_ms = PerfHelpers.time_it(func():
var entities = world.query.with_all([C_TestA]).execute()
)
PerfHelpers.record_result("query_with_all", scale, time_ms)
world.purge(false)
## Test query with_any performance
func test_query_with_any(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_diverse_entities(scale)
var time_ms = PerfHelpers.time_it(func():
var entities = world.query.with_any([C_TestA, C_TestB, C_TestC]).execute()
)
PerfHelpers.record_result("query_with_any", scale, time_ms)
world.purge(false)
## Test query with_none performance
func test_query_with_none(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_diverse_entities(scale)
var time_ms = PerfHelpers.time_it(func():
var entities = world.query.with_none([C_TestD]).execute()
)
PerfHelpers.record_result("query_with_none", scale, time_ms)
world.purge(false)
## Test complex combined query
func test_query_complex(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_diverse_entities(scale)
var time_ms = PerfHelpers.time_it(func():
var entities = world.query\
.with_all([C_TestA])\
.with_any([C_TestB, C_TestC])\
.with_none([C_TestD])\
.execute()
)
PerfHelpers.record_result("query_complex", scale, time_ms)
world.purge(false)
## Test query with component query (property filtering)
func test_query_with_component_query(scale: int, test_parameters := [[100], [1000], [10000]]):
# Setup entities with varying property values
for i in scale:
var entity = Entity.new()
var comp = C_TestA.new()
comp.value = i
entity.add_component(comp)
world.add_entity(entity, null, false)
var time_ms = PerfHelpers.time_it(func():
var entities = world.query\
.with_all([{C_TestA: {'value': {"_gte": scale / 2}}}])\
.execute()
)
PerfHelpers.record_result("query_with_component_query", scale, time_ms)
world.purge(false)
## Test query caching performance
func test_query_caching(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_diverse_entities(scale)
# Execute same query multiple times to test cache
var time_ms = PerfHelpers.time_it(func():
for i in 100:
var entities = world.query.with_all([C_TestA, C_TestB]).execute()
)
PerfHelpers.record_result("query_caching", scale, time_ms)
world.purge(false)
## Test query on empty world
func test_query_empty_world(scale: int, test_parameters := [[100], [1000], [10000]]):
# Don't setup any entities - testing empty world query
var time_ms = PerfHelpers.time_it(func():
for i in scale:
var entities = world.query.with_all([C_TestA]).execute()
)
PerfHelpers.record_result("query_empty_world", scale, time_ms)
world.purge(false)
## Test that disabled entities don't contribute to query time
## Creates many disabled entities with only a few enabled ones
## Query time should be similar to querying with only the enabled count
func test_query_disabled_entities_no_impact(scale: int, test_parameters := [[100], [1000], [10000]]):
# Create mostly disabled entities
var enabled_count = 10 # Always use 10 enabled entities regardless of scale
# First, create disabled entities (scale - enabled_count)
for i in (scale - enabled_count):
var entity = Entity.new()
entity.name = "DisabledEntity_%d" % i
entity.enabled = false
entity.add_component(C_TestA.new())
world.add_entity(entity, null, false)
# Then create the few enabled entities
for i in enabled_count:
var entity = Entity.new()
entity.name = "EnabledEntity_%d" % i
entity.enabled = true
entity.add_component(C_TestA.new())
world.add_entity(entity, null, false)
# Time querying only enabled entities
var time_ms = PerfHelpers.time_it(func():
var entities = world.query.with_all([C_TestA]).enabled().execute()
)
PerfHelpers.record_result("query_disabled_entities_no_impact", scale, time_ms)
world.purge(false)
## Baseline test: query with only enabled entities (no disabled ones)
## This should have similar performance to test_query_disabled_entities_no_impact
func test_query_only_enabled_baseline(scale: int, test_parameters := [[100], [1000], [10000]]):
var enabled_count = 10 # Same as test_query_disabled_entities_no_impact
# Create only enabled entities
for i in enabled_count:
var entity = Entity.new()
entity.name = "EnabledEntity_%d" % i
entity.enabled = true
entity.add_component(C_TestA.new())
world.add_entity(entity, null, false)
# Time querying enabled entities
var time_ms = PerfHelpers.time_it(func():
var entities = world.query.with_all([C_TestA]).enabled().execute()
)
PerfHelpers.record_result("query_only_enabled_baseline", scale, time_ms)
world.purge(false)
## Test group query performance using Godot's optimized get_nodes_in_group()
## This should be very fast since it uses Godot's native group indexing
func test_query_with_group(scale: int, test_parameters := [[100], [1000], [10000]]):
# Create entities and add them to a group
for i in scale:
var entity = Entity.new()
entity.name = "GroupEntity_%d" % i
entity.add_component(C_TestA.new())
world.add_entity(entity, null, true) # Must be in tree for groups
entity.add_to_group("test_group")
# Time querying by group
var time_ms = PerfHelpers.time_it(func():
var entities = world.query.with_group(["test_group"]).execute()
)
PerfHelpers.record_result("query_with_group", scale, time_ms)
world.purge(false)
## Test group query combined with component filtering
## This tests the common case of filtering entities by both group and components
func test_query_group_with_components(scale: int, test_parameters := [[100], [1000], [10000]]):
# Create diverse entities in a group
for i in scale:
var entity = Entity.new()
entity.name = "GroupEntity_%d" % i
# Add various components
if i % 2 == 0:
entity.add_component(C_TestA.new())
if i % 3 == 0:
entity.add_component(C_TestB.new())
world.add_entity(entity, null, true) # Must be in tree for groups
entity.add_to_group("test_group")
# Time querying by group + components
var time_ms = PerfHelpers.time_it(func():
var entities = world.query.with_group(["test_group"]).with_all([C_TestA]).execute()
)
PerfHelpers.record_result("query_group_with_components", scale, time_ms)
world.purge(false)

View File

@@ -0,0 +1 @@
uid://dq82hrc6evh3t

View File

@@ -0,0 +1,181 @@
## Set and Array Performance Tests
## Tests Set operations and ArrayExtensions performance
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
func after_test():
if world:
world.purge(false)
## Helper to create test arrays with specified overlap
func create_test_arrays(size1: int, size2: int, overlap_percent: float = 0.5) -> Array:
var array1: Array = []
var array2: Array = []
# Create first array
for i in size1:
array1.append("Entity_%d" % i)
# Create second array with specified overlap
var overlap_count = int(size2 * overlap_percent)
var unique_count = size2 - overlap_count
# Add overlapping elements
for i in overlap_count:
if i < size1:
array2.append(array1[i])
# Add unique elements
for i in unique_count:
array2.append("Entity_%d" % (size1 + i))
return [array1, array2]
## Test Set.intersect() performance
func test_set_intersect(scale: int, test_parameters := [[100], [1000], [10000]]):
var arrays = create_test_arrays(scale, scale, 0.5)
var set1 = Set.new(arrays[0])
var set2 = Set.new(arrays[1])
var time_ms = PerfHelpers.time_it(func():
var result = set1.intersect(set2)
)
PerfHelpers.record_result("set_intersect", scale, time_ms)
## Test Set.union() performance
func test_set_union(scale: int, test_parameters := [[100], [1000], [10000]]):
var arrays = create_test_arrays(scale, scale, 0.5)
var set1 = Set.new(arrays[0])
var set2 = Set.new(arrays[1])
var time_ms = PerfHelpers.time_it(func():
var result = set1.union(set2)
)
PerfHelpers.record_result("set_union", scale, time_ms)
## Test Set.difference() performance
func test_set_difference(scale: int, test_parameters := [[100], [1000], [10000]]):
var arrays = create_test_arrays(scale, scale, 0.5)
var set1 = Set.new(arrays[0])
var set2 = Set.new(arrays[1])
var time_ms = PerfHelpers.time_it(func():
var result = set1.difference(set2)
)
PerfHelpers.record_result("set_difference", scale, time_ms)
## Test ArrayExtensions.intersect() performance
func test_array_intersect(scale: int, test_parameters := [[100], [1000], [10000]]):
var arrays = create_test_arrays(scale, scale, 0.5)
var array1 = arrays[0]
var array2 = arrays[1]
var time_ms = PerfHelpers.time_it(func():
var result = ArrayExtensions.intersect(array1, array2)
)
PerfHelpers.record_result("array_intersect", scale, time_ms)
## Test ArrayExtensions.union() performance
func test_array_union(scale: int, test_parameters := [[100], [1000], [10000]]):
var arrays = create_test_arrays(scale, scale, 0.5)
var array1 = arrays[0]
var array2 = arrays[1]
var time_ms = PerfHelpers.time_it(func():
var result = ArrayExtensions.union(array1, array2)
)
PerfHelpers.record_result("array_union", scale, time_ms)
## Test ArrayExtensions.difference() performance
func test_array_difference(scale: int, test_parameters := [[100], [1000], [10000]]):
var arrays = create_test_arrays(scale, scale, 0.5)
var array1 = arrays[0]
var array2 = arrays[1]
var time_ms = PerfHelpers.time_it(func():
var result = ArrayExtensions.difference(array1, array2)
)
PerfHelpers.record_result("array_difference", scale, time_ms)
## Test Set.erase() performance
func test_set_erase(scale: int, test_parameters := [[100], [1000], [10000]]):
var array1: Array = []
for i in scale:
array1.append("Entity_%d" % i)
var test_set := Set.new(array1)
var time_ms = PerfHelpers.time_it(func():
# erase half the elements
for i in scale / 2:
test_set.erase("Entity_%d" % i)
)
PerfHelpers.record_result("set_erase", scale, time_ms)
## Test Set vs Array operations with no overlap
func test_set_vs_array_no_overlap(scale: int, test_parameters := [[100], [1000]]):
var arrays = create_test_arrays(scale, scale, 0.0) # No overlap
var array1 = arrays[0]
var array2 = arrays[1]
var set1 = Set.new(array1)
var set2 = Set.new(array2)
# Test array intersect
var array_time = PerfHelpers.time_it(func():
var result = ArrayExtensions.intersect(array1, array2)
)
# Test set intersect
var set_time = PerfHelpers.time_it(func():
var result = set1.intersect(set2)
)
PerfHelpers.record_result("array_intersect_no_overlap", scale, array_time)
PerfHelpers.record_result("set_intersect_no_overlap", scale, set_time)
## Test Set vs Array operations with complete overlap
func test_set_vs_array_complete_overlap(scale: int, test_parameters := [[100], [1000]]):
var arrays = create_test_arrays(scale, scale, 1.0) # Complete overlap
var array1 = arrays[0]
var array2 = arrays[1]
var set1 = Set.new(array1)
var set2 = Set.new(array2)
# Test array intersect
var array_time = PerfHelpers.time_it(func():
var result = ArrayExtensions.intersect(array1, array2)
)
# Test set intersect
var set_time = PerfHelpers.time_it(func():
var result = set1.intersect(set2)
)
PerfHelpers.record_result("array_intersect_complete_overlap", scale, array_time)
PerfHelpers.record_result("set_intersect_complete_overlap", scale, set_time)

View File

@@ -0,0 +1 @@
uid://brjw81pwtpsml

View File

@@ -0,0 +1,123 @@
## System Performance Tests
## Tests system processing and entity iteration performance
extends GdUnitTestSuite
var runner: GdUnitSceneRunner
var world: World
func before():
runner = scene_runner("res://addons/gecs/tests/test_scene.tscn")
world = runner.get_property("world")
ECS.world = world
func after_test():
if world:
world.purge(false)
## Setup entities for system testing
func setup_entities_for_systems(count: int) -> void:
for i in count:
var entity = Entity.new()
entity.name = "SystemEntity_%d" % i
entity.add_component(C_TestA.new())
if i % 2 == 0:
entity.add_component(C_TestB.new())
if i % 4 == 0:
entity.add_component(C_TestC.new())
world.add_entity(entity, null, false)
## Test simple system processing
func test_system_processing(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_entities_for_systems(scale)
var test_system = PerformanceTestSystem.new()
world.add_system(test_system)
var time_ms = PerfHelpers.time_it(func():
world.process(0.016) # 60 FPS delta
)
PerfHelpers.record_result("system_processing", scale, time_ms)
world.purge(false)
## Test multiple systems processing
func test_multiple_systems(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_entities_for_systems(scale)
var system_a = PerformanceTestSystem.new()
var system_b = ComplexPerformanceTestSystem.new()
world.add_systems([system_a, system_b])
var time_ms = PerfHelpers.time_it(func():
world.process(0.016)
)
PerfHelpers.record_result("multiple_systems", scale, time_ms)
world.purge(false)
## Test system processing with no matches
func test_system_no_matches(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_entities_for_systems(scale)
# Create system that won't match any entities
var test_system = PerformanceTestSystem.new()
world.add_system(test_system)
# Remove all C_TestA components so system doesn't match
for entity in world.entities:
entity.remove_component(C_TestA)
var time_ms = PerfHelpers.time_it(func():
world.process(0.016)
)
PerfHelpers.record_result("system_no_matches", scale, time_ms)
world.purge(false)
## Test system processing with different groups
func test_system_groups(scale: int, test_parameters := [[100], [1000], [10000]]):
setup_entities_for_systems(scale)
var physics_system = PerformanceTestSystem.new()
physics_system.group = "physics"
var render_system = PerformanceTestSystem.new()
render_system.group = "render"
world.add_systems([physics_system, render_system])
var time_ms = PerfHelpers.time_it(func():
world.process(0.016, "physics")
world.process(0.016, "render")
)
PerfHelpers.record_result("system_groups", scale, time_ms)
world.purge(false)
## Test system processing with entity changes mid-frame
func test_system_dynamic_entities(scale: int, test_parameters := [[100], [1000], [10000]]):
# Start with half the entities
setup_entities_for_systems(scale / 2)
var test_system = PerformanceTestSystem.new()
world.add_system(test_system)
var time_ms = PerfHelpers.time_it(func():
# Process
world.process(0.016)
# Add more entities mid-frame
for i in range(scale / 2, scale):
var entity = Entity.new()
entity.add_component(C_TestA.new())
world.add_entity(entity, null, false)
# Process again with more entities
world.process(0.016)
)
PerfHelpers.record_result("system_dynamic_entities", scale, time_ms)
world.purge(false)

View File

@@ -0,0 +1 @@
uid://d1md8ied574c0