mirror of
https://github.com/Imp0ssibl33z/BA-translator.git
synced 2025-12-10 13:29:41 +05:00
Updated for new versions
This commit is contained in:
606
BAtranslator.py
606
BAtranslator.py
@@ -268,6 +268,41 @@ def extract_strings(output_file, filter_str=None, update_from=None):
|
||||
filter_str (str): Optional filter (e.g., 'is_ascii', 'table_name:TableName')
|
||||
update_from (str): Path to existing JSON file to merge translations from
|
||||
"""
|
||||
# Auto-setup: Create all required files if they don't exist
|
||||
setup_required = False
|
||||
|
||||
# Check if repository_map.json exists
|
||||
if not os.path.exists(REPO_MAP_FILE):
|
||||
print(f"Repository map not found. Auto-generating from types.cs...")
|
||||
setup_required = True
|
||||
|
||||
# Check if schema exists
|
||||
if not os.path.exists('generated_schema.fbs'):
|
||||
print("FlatBuffer schema not found. Generating from types.cs...")
|
||||
if not os.path.exists('types.cs'):
|
||||
print("ERROR: types.cs not found. Cannot auto-generate files.")
|
||||
print("Please place types.cs in the project directory.")
|
||||
return
|
||||
|
||||
# Generate schema
|
||||
setup_schema_from_csharp('types.cs', 'generated_schema.fbs')
|
||||
|
||||
# Preprocess schema to fix reserved keywords
|
||||
print("Preprocessing schema to fix Python reserved keywords...")
|
||||
preprocess_flatbuffer_schema('generated_schema.fbs')
|
||||
|
||||
# Generate Python modules
|
||||
print("Generating Python modules from schema...")
|
||||
generate_flatbuffer_python('generated_schema.fbs', 'flatc.exe', '.')
|
||||
print()
|
||||
|
||||
# Generate repository mapping
|
||||
setup_repository_mapping('types.cs', REPO_MAP_FILE)
|
||||
print()
|
||||
|
||||
if setup_required:
|
||||
print("✓ Auto-setup completed! Proceeding with extraction...\n")
|
||||
|
||||
# Validate required files
|
||||
if not validate_required_files(REPO_MAP_FILE, DB_FILE):
|
||||
return
|
||||
@@ -346,6 +381,15 @@ def _process_table(cursor, repo_info, filter_type, filter_value, translation_mem
|
||||
schema_class = getattr(schema_module, blob_schema_name)
|
||||
get_root_method = getattr(schema_class, f"GetRootAs{blob_schema_name}")
|
||||
|
||||
# Check if table exists in database
|
||||
cursor.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name=?",
|
||||
(table_name,)
|
||||
)
|
||||
if not cursor.fetchone():
|
||||
# Table doesn't exist, skip it silently
|
||||
return None
|
||||
|
||||
# Process table rows
|
||||
cursor.execute(f'SELECT rowid, "{BLOB_COLUMN}" FROM "{table_name}"')
|
||||
table_translations = OrderedDict()
|
||||
@@ -372,6 +416,9 @@ def _process_table(cursor, repo_info, filter_type, filter_value, translation_mem
|
||||
except (ImportError, AttributeError):
|
||||
# Skip tables that can't be processed
|
||||
return None
|
||||
except sqlite3.OperationalError:
|
||||
# Handle database errors (e.g., table doesn't exist) silently
|
||||
return None
|
||||
|
||||
def _extract_string_fields(data_dict, filter_type, filter_value, translation_memory):
|
||||
"""Extract and filter string fields from FlatBuffer data."""
|
||||
@@ -409,13 +456,13 @@ def _passes_filter(text, filter_type, filter_value):
|
||||
elif filter_type == 'contains_text':
|
||||
return filter_value in text
|
||||
return True
|
||||
|
||||
def patch_database(input_file):
|
||||
"""Apply translations from JSON file to the database.
|
||||
|
||||
Args:
|
||||
input_file (str): Path to JSON file containing translations
|
||||
"""
|
||||
# Validate files
|
||||
if not validate_required_files(REPO_MAP_FILE, input_file, DB_FILE):
|
||||
return
|
||||
|
||||
@@ -438,8 +485,7 @@ def patch_database(input_file):
|
||||
with open(input_file, 'r', encoding='utf-8') as f:
|
||||
translations = json.load(f)
|
||||
|
||||
# Process changes
|
||||
print("Analyzing and applying translations...")
|
||||
# Analyze translation changes
|
||||
changes_to_apply = _analyze_translation_changes(translations)
|
||||
|
||||
if not changes_to_apply:
|
||||
@@ -464,36 +510,23 @@ def patch_database(input_file):
|
||||
conn.close()
|
||||
|
||||
|
||||
def patch_database(input_file):
|
||||
"""Apply translations from JSON file to the database."""
|
||||
if not validate_required_files(REPO_MAP_FILE, input_file, DB_FILE):
|
||||
return
|
||||
def _analyze_translation_changes(translations):
|
||||
"""Analyze translation JSON and extract changes to apply.
|
||||
|
||||
print(f"--- PATCHING MODE: '{input_file}' -> '{DB_FILE}' ---")
|
||||
|
||||
# Confirm operation
|
||||
response = input("Are you sure? A backup will be created. (yes/no): ").lower()
|
||||
if response not in ['yes', 'y']:
|
||||
print("Operation cancelled.")
|
||||
return
|
||||
|
||||
# Create backup
|
||||
print(f"Creating backup '{DB_BACKUP_FILE}'...")
|
||||
shutil.copyfile(DB_FILE, DB_BACKUP_FILE)
|
||||
|
||||
# Load data
|
||||
with open(REPO_MAP_FILE, 'r', encoding='utf-8') as f:
|
||||
repo_map = {v['table_name']: v for v in json.load(f).values()}
|
||||
|
||||
with open(input_file, 'r', encoding='utf-8') as f:
|
||||
translations = json.load(f)
|
||||
|
||||
# Find changes to apply
|
||||
Args:
|
||||
translations (dict): Translation data from JSON file
|
||||
|
||||
Returns:
|
||||
list: List of changes to apply, each containing table, row_id, and fields
|
||||
"""
|
||||
changes_to_apply = []
|
||||
|
||||
for table_name, table_data in translations.items():
|
||||
for row_id_str, fields in table_data.items():
|
||||
changed_fields = {}
|
||||
|
||||
for field, content in fields.items():
|
||||
# Check if field has translation that differs from original
|
||||
if (isinstance(content, dict) and 'original' in content and
|
||||
content.get('translation') and
|
||||
content['translation'] != (content['original'] if isinstance(content['original'], str)
|
||||
@@ -511,66 +544,397 @@ def patch_database(input_file):
|
||||
'fields': changed_fields
|
||||
})
|
||||
|
||||
if not changes_to_apply:
|
||||
print("No changes found to apply.")
|
||||
return changes_to_apply
|
||||
|
||||
|
||||
def _apply_database_changes(cursor, repo_map, changes_to_apply):
|
||||
"""Apply translation changes to database.
|
||||
|
||||
Args:
|
||||
cursor: SQLite cursor
|
||||
repo_map (dict): Repository mapping information
|
||||
changes_to_apply (list): List of changes to apply
|
||||
|
||||
Returns:
|
||||
int: Number of successfully updated entries
|
||||
"""
|
||||
updated_count = 0
|
||||
skipped_tables = set()
|
||||
|
||||
for change in tqdm(changes_to_apply, desc="Applying changes"):
|
||||
table_name = change['table']
|
||||
row_id = change['row_id']
|
||||
fields = change['fields']
|
||||
|
||||
# Skip if table not in repository map
|
||||
if table_name not in repo_map:
|
||||
if table_name not in skipped_tables:
|
||||
print(f"\nWARNING: Table '{table_name}' not found in repository map. Skipping...")
|
||||
skipped_tables.add(table_name)
|
||||
continue
|
||||
|
||||
try:
|
||||
repo_info = repo_map[table_name]
|
||||
|
||||
# Get schema class
|
||||
module_path = SCHEMA_LOCATION_MAP.get(repo_info['blob_schema_class'])
|
||||
if not module_path:
|
||||
if table_name not in skipped_tables:
|
||||
print(f"\nWARNING: Schema class '{repo_info['blob_schema_class']}' not found. Skipping table '{table_name}'...")
|
||||
skipped_tables.add(table_name)
|
||||
continue
|
||||
|
||||
schema_module = importlib.import_module(module_path)
|
||||
schema_class = getattr(schema_module, repo_info['blob_schema_class'])
|
||||
get_root_method = getattr(schema_class, f"GetRootAs{repo_info['blob_schema_class']}")
|
||||
|
||||
# Check if table exists in database
|
||||
cursor.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name=?",
|
||||
(table_name,)
|
||||
)
|
||||
if not cursor.fetchone():
|
||||
if table_name not in skipped_tables:
|
||||
print(f"\nWARNING: Table '{table_name}' does not exist in database. Skipping...")
|
||||
skipped_tables.add(table_name)
|
||||
continue
|
||||
|
||||
# Get and update data
|
||||
cursor.execute(f'SELECT "{BLOB_COLUMN}" FROM "{table_name}" WHERE rowid = ?', (row_id,))
|
||||
result = cursor.fetchone()
|
||||
|
||||
if not result or not result[0]:
|
||||
continue
|
||||
|
||||
# Parse FlatBuffer data
|
||||
fbs_obj = get_root_method(result[0], 0)
|
||||
data_dict = flatbuffer_to_dict(fbs_obj)
|
||||
data_dict.update(fields)
|
||||
|
||||
# Rebuild and save
|
||||
builder = flatbuffers.Builder(1024)
|
||||
new_offset = dict_to_flatbuffer(builder, data_dict, schema_class)
|
||||
builder.Finish(new_offset)
|
||||
|
||||
cursor.execute(
|
||||
f'UPDATE "{table_name}" SET "{BLOB_COLUMN}" = ? WHERE rowid = ?',
|
||||
(bytes(builder.Output()), row_id)
|
||||
)
|
||||
updated_count += 1
|
||||
|
||||
except sqlite3.OperationalError as e:
|
||||
# Handle SQL errors (e.g., table doesn't exist)
|
||||
if table_name not in skipped_tables:
|
||||
print(f"\nWARNING: Database error for table '{table_name}': {e}. Skipping...")
|
||||
skipped_tables.add(table_name)
|
||||
continue
|
||||
except Exception as e:
|
||||
# Handle other errors silently or log them
|
||||
continue
|
||||
|
||||
return updated_count
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# SETUP AND UTILITY FUNCTIONS
|
||||
# ============================================================================
|
||||
|
||||
def setup_schema_from_csharp(csharp_file='types.cs', output_fbs='generated_schema.fbs'):
|
||||
"""Parse C# files and generate FlatBuffers schema.
|
||||
|
||||
Args:
|
||||
csharp_file (str): Path to C# file with type definitions
|
||||
output_fbs (str): Output .fbs schema file path
|
||||
"""
|
||||
if not validate_required_files(csharp_file):
|
||||
return
|
||||
|
||||
print(f"Found {len(changes_to_apply)} records to update.")
|
||||
from unidecode import unidecode
|
||||
|
||||
# Apply changes
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
cursor = conn.cursor()
|
||||
updated_count = 0
|
||||
print(f"Parsing C# file: {csharp_file}")
|
||||
print("This may take a while for large files...")
|
||||
|
||||
try:
|
||||
for change in tqdm(changes_to_apply, desc="Applying changes"):
|
||||
table_name = change['table']
|
||||
row_id = change['row_id']
|
||||
fields = change['fields']
|
||||
# Type mapping
|
||||
type_map = {
|
||||
'long': 'long', 'ulong': 'ulong', 'int': 'int', 'uint': 'uint',
|
||||
'short': 'short', 'ushort': 'ushort', 'float': 'float', 'double': 'double',
|
||||
'bool': 'bool', 'string': 'string', 'byte': 'ubyte', 'sbyte': 'byte'
|
||||
}
|
||||
|
||||
def sanitize(name):
|
||||
return re.sub(r'[^A-Za-z0-9_.]', '_', unidecode(name))
|
||||
|
||||
def to_snake_case(name):
|
||||
name = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', name)
|
||||
name = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', name)
|
||||
return name.lower().replace('-', '_')
|
||||
|
||||
# Parse C# file
|
||||
with open(csharp_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Extract namespace
|
||||
ns_match = re.search(r'namespace\s+([\w.]+)', content)
|
||||
namespace = ns_match.group(1) if ns_match else 'FlatData'
|
||||
|
||||
# Parse tables and enums
|
||||
tables = {}
|
||||
enums = {}
|
||||
|
||||
# Find all class/table definitions
|
||||
table_pattern = re.compile(r'public\s+(?:sealed\s+)?class\s+(\w+)\s*{([^}]+)}', re.DOTALL)
|
||||
for match in table_pattern.finditer(content):
|
||||
name = match.group(1)
|
||||
body = match.group(2)
|
||||
|
||||
# Skip non-table classes
|
||||
if 'BaseExcelRepository' in body or 'BaseDBSchema' in body:
|
||||
continue
|
||||
|
||||
fields = []
|
||||
prop_pattern = re.compile(r'public\s+([\w.<>\[\]?]+)\s+(\w+)\s*{\s*get;\s*set;\s*}')
|
||||
for prop_match in prop_pattern.finditer(body):
|
||||
field_type = prop_match.group(1).replace('?', '')
|
||||
field_name = to_snake_case(prop_match.group(2))
|
||||
|
||||
if table_name not in repo_map:
|
||||
continue
|
||||
|
||||
try:
|
||||
repo_info = repo_map[table_name]
|
||||
module_path = SCHEMA_LOCATION_MAP.get(repo_info['blob_schema_class'])
|
||||
if not module_path:
|
||||
continue
|
||||
|
||||
schema_module = importlib.import_module(module_path)
|
||||
schema_class = getattr(schema_module, repo_info['blob_schema_class'])
|
||||
get_root_method = getattr(schema_class, f"GetRootAs{repo_info['blob_schema_class']}")
|
||||
|
||||
# Get and update data
|
||||
cursor.execute(f'SELECT "{BLOB_COLUMN}" FROM "{table_name}" WHERE rowid = ?', (row_id,))
|
||||
result = cursor.fetchone()
|
||||
if not result or not result[0]:
|
||||
continue
|
||||
|
||||
fbs_obj = get_root_method(result[0], 0)
|
||||
data_dict = flatbuffer_to_dict(fbs_obj)
|
||||
data_dict.update(fields)
|
||||
|
||||
# Rebuild and save
|
||||
builder = flatbuffers.Builder(1024)
|
||||
new_offset = dict_to_flatbuffer(builder, data_dict, schema_class)
|
||||
builder.Finish(new_offset)
|
||||
|
||||
cursor.execute(f'UPDATE "{table_name}" SET "{BLOB_COLUMN}" = ? WHERE rowid = ?',
|
||||
(bytes(builder.Output()), row_id))
|
||||
updated_count += 1
|
||||
|
||||
except Exception:
|
||||
continue
|
||||
# Convert type
|
||||
if field_type in type_map:
|
||||
fbs_type = type_map[field_type]
|
||||
elif field_type.startswith('List<'):
|
||||
inner = field_type[5:-1].replace('?', '')
|
||||
fbs_type = f"[{type_map.get(inner, sanitize(inner))}]"
|
||||
else:
|
||||
fbs_type = sanitize(field_type)
|
||||
|
||||
fields.append((field_name, fbs_type))
|
||||
|
||||
conn.commit()
|
||||
print(f"\nSuccess! Updated {updated_count} database entries.")
|
||||
if fields:
|
||||
tables[name] = fields
|
||||
|
||||
# Find enums
|
||||
enum_pattern = re.compile(r'public\s+enum\s+(\w+)\s*{([^}]+)}', re.DOTALL)
|
||||
for match in enum_pattern.finditer(content):
|
||||
name = match.group(1)
|
||||
body = match.group(2)
|
||||
values = []
|
||||
|
||||
except Exception as e:
|
||||
conn.rollback()
|
||||
print(f"ERROR during patching: {e}")
|
||||
finally:
|
||||
conn.close()
|
||||
for line in body.split(','):
|
||||
line = line.strip().split('=')[0].strip()
|
||||
if line and not line.startswith('//'):
|
||||
values.append(to_snake_case(line))
|
||||
|
||||
if values:
|
||||
enums[name] = values
|
||||
|
||||
# Generate .fbs file
|
||||
print(f"Generating schema file: {output_fbs}")
|
||||
with open(output_fbs, 'w', encoding='utf-8') as f:
|
||||
f.write(f"namespace {namespace};\n\n")
|
||||
|
||||
# Write enums
|
||||
for enum_name, values in sorted(enums.items()):
|
||||
f.write(f"enum {enum_name} : int {{\n")
|
||||
for value in values:
|
||||
f.write(f" {value},\n")
|
||||
f.write("}\n\n")
|
||||
|
||||
# Write tables
|
||||
for table_name, fields in sorted(tables.items()):
|
||||
f.write(f"table {table_name} {{\n")
|
||||
for field_name, field_type in fields:
|
||||
f.write(f" {field_name}:{field_type};\n")
|
||||
f.write("}\n\n")
|
||||
|
||||
print(f"Success! Generated {len(tables)} tables and {len(enums)} enums.")
|
||||
|
||||
|
||||
def setup_repository_mapping(csharp_file='types.cs', output_json='repository_map.json'):
|
||||
"""Create repository mapping from C# file.
|
||||
|
||||
Args:
|
||||
csharp_file (str): Path to C# file
|
||||
output_json (str): Output JSON mapping file
|
||||
"""
|
||||
if not validate_required_files(csharp_file):
|
||||
return
|
||||
|
||||
print(f"Analyzing '{csharp_file}' to create repository mapping...")
|
||||
|
||||
# Parse patterns
|
||||
repo_pattern = re.compile(
|
||||
r'public class (\w+)\s*:\s*BaseExcelRepository<[^,]+,\s*([^,]+),\s*([^>]+)>'
|
||||
)
|
||||
db_schema_pattern = re.compile(r'public class (\w+)\s*:\s*BaseDBSchema')
|
||||
prop_pattern = re.compile(r'public\s+([\w.<>\[\]?]+)\s+(\w+)\s*{\s*get;\s*set;\s*}')
|
||||
|
||||
repositories = OrderedDict()
|
||||
db_schemas = OrderedDict()
|
||||
current_schema = None
|
||||
|
||||
with open(csharp_file, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
line = line.strip().split('//')[0]
|
||||
|
||||
# Repository definition
|
||||
repo_match = repo_pattern.search(line)
|
||||
if repo_match:
|
||||
repo_name = repo_match.group(1)
|
||||
table_class = repo_match.group(2).strip()
|
||||
schema_class = repo_match.group(3).strip()
|
||||
repositories[repo_name] = {
|
||||
'table_class': table_class,
|
||||
'schema_class': schema_class
|
||||
}
|
||||
continue
|
||||
|
||||
# DB Schema definition
|
||||
schema_match = db_schema_pattern.search(line)
|
||||
if schema_match:
|
||||
current_schema = schema_match.group(1)
|
||||
db_schemas[current_schema] = {'properties': []}
|
||||
continue
|
||||
|
||||
# Properties
|
||||
if current_schema:
|
||||
prop_match = prop_pattern.search(line)
|
||||
if prop_match:
|
||||
prop_type = prop_match.group(1)
|
||||
prop_name = prop_match.group(2)
|
||||
db_schemas[current_schema]['properties'].append({
|
||||
'name': prop_name,
|
||||
'type': prop_type
|
||||
})
|
||||
|
||||
# Match repositories with schemas
|
||||
mapping = OrderedDict()
|
||||
for repo_name, repo_info in repositories.items():
|
||||
table_class = repo_info['table_class']
|
||||
schema_class = repo_info['schema_class']
|
||||
|
||||
if schema_class in db_schemas:
|
||||
mapping[repo_name] = {
|
||||
'table_name': table_class.replace('Excel', ''),
|
||||
'blob_schema_class': schema_class,
|
||||
'properties': db_schemas[schema_class]['properties']
|
||||
}
|
||||
|
||||
# Save mapping
|
||||
with open(output_json, 'w', encoding='utf-8') as f:
|
||||
json.dump(mapping, f, indent=2, ensure_ascii=False)
|
||||
|
||||
print(f"Success! Created mapping with {len(mapping)} repositories.")
|
||||
print(f"Mapping saved to: {output_json}")
|
||||
|
||||
|
||||
def preprocess_flatbuffer_schema(input_fbs, output_fbs=None):
|
||||
"""Preprocess FlatBuffer schema to rename Python reserved keywords.
|
||||
|
||||
Args:
|
||||
input_fbs (str): Input schema file
|
||||
output_fbs (str): Output schema file (if None, modifies in place)
|
||||
"""
|
||||
if not validate_required_files(input_fbs):
|
||||
return
|
||||
|
||||
reserved = [
|
||||
'self', 'class', 'def', 'return', 'import', 'from', 'as',
|
||||
'if', 'elif', 'else', 'while', 'for', 'in', 'is', 'not',
|
||||
'and', 'or', 'True', 'False', 'None', 'pass', 'break',
|
||||
'continue', 'try', 'except', 'finally', 'raise', 'with',
|
||||
'yield', 'lambda', 'global', 'nonlocal'
|
||||
]
|
||||
|
||||
with open(input_fbs, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
modified = False
|
||||
for keyword in reserved:
|
||||
pattern = rf'\b({keyword})(\s*:\s*\w+)'
|
||||
if re.search(pattern, content):
|
||||
content = re.sub(pattern, rf'\1_\2', content)
|
||||
modified = True
|
||||
print(f" Renamed '{keyword}' -> '{keyword}_'")
|
||||
|
||||
output_file = output_fbs or input_fbs
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
|
||||
if modified:
|
||||
print(f"Preprocessed schema saved to: {output_file}")
|
||||
else:
|
||||
print("No reserved keywords found in schema.")
|
||||
|
||||
|
||||
def generate_flatbuffer_python(fbs_file, flatc_exe='flatc.exe', output_dir='.'):
|
||||
"""Generate Python modules from FlatBuffer schema.
|
||||
|
||||
Args:
|
||||
fbs_file (str): FlatBuffer schema file (.fbs)
|
||||
flatc_exe (str): Path to flatc compiler
|
||||
output_dir (str): Output directory for generated Python files
|
||||
"""
|
||||
if not validate_required_files(fbs_file, flatc_exe):
|
||||
return
|
||||
|
||||
print(f"Generating Python modules from: {fbs_file}")
|
||||
|
||||
# Run flatc compiler
|
||||
cmd = [
|
||||
flatc_exe,
|
||||
'--python',
|
||||
'--gen-object-api',
|
||||
'-o', output_dir,
|
||||
fbs_file
|
||||
]
|
||||
|
||||
result = os.system(' '.join(cmd))
|
||||
|
||||
if result == 0:
|
||||
print("Success! Python modules generated.")
|
||||
else:
|
||||
print(f"ERROR: flatc failed with code {result}")
|
||||
|
||||
|
||||
def fix_flatbuffer_reserved_names(directory='MX'):
|
||||
"""Fix Python reserved keywords in generated FlatBuffer files.
|
||||
|
||||
Args:
|
||||
directory (str): Directory containing generated Python files
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
if not os.path.exists(directory):
|
||||
print(f"ERROR: Directory '{directory}' not found")
|
||||
return
|
||||
|
||||
print(f"Scanning {directory} for reserved keyword issues...")
|
||||
|
||||
reserved_map = {'self': 'self_', 'class': 'class_', 'import': 'import_'}
|
||||
fixed_count = 0
|
||||
|
||||
for py_file in Path(directory).rglob('*.py'):
|
||||
try:
|
||||
with open(py_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
original = content
|
||||
|
||||
for reserved, new_name in reserved_map.items():
|
||||
# Fix parameter names
|
||||
pattern = rf'(def __init__\([^)]*\n\s+self,\n(?:[^)]*\n)*?\s+){reserved}(\s*=)'
|
||||
if re.search(pattern, content):
|
||||
content = re.sub(pattern, rf'\1{new_name}\2', content)
|
||||
content = content.replace(f'self.{reserved} = {reserved}', f'self.{new_name} = {new_name}')
|
||||
print(f" Fixed: {py_file.name}")
|
||||
|
||||
if content != original:
|
||||
with open(py_file, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
fixed_count += 1
|
||||
except Exception as e:
|
||||
print(f" ERROR in {py_file}: {e}")
|
||||
|
||||
print(f"\nFixed {fixed_count} file(s).")
|
||||
|
||||
|
||||
# ============================================================================
|
||||
@@ -863,6 +1227,77 @@ def main():
|
||||
help='CSV file to validate (default: translations.csv)'
|
||||
)
|
||||
|
||||
# Setup schema command - generate FlatBuffer schema from C#
|
||||
parser_setup_schema = subparsers.add_parser(
|
||||
'setup_schema',
|
||||
help='Parse C# files and generate FlatBuffer schema (.fbs file).'
|
||||
)
|
||||
parser_setup_schema.add_argument(
|
||||
'--csharp',
|
||||
type=str,
|
||||
default='types.cs',
|
||||
help='Input C# file with type definitions (default: types.cs)'
|
||||
)
|
||||
parser_setup_schema.add_argument(
|
||||
'--output',
|
||||
type=str,
|
||||
default='generated_schema.fbs',
|
||||
help='Output .fbs schema file (default: generated_schema.fbs)'
|
||||
)
|
||||
|
||||
# Setup mapping command - create repository mapping
|
||||
parser_setup_mapping = subparsers.add_parser(
|
||||
'setup_mapping',
|
||||
help='Create repository mapping from C# files.'
|
||||
)
|
||||
parser_setup_mapping.add_argument(
|
||||
'--csharp',
|
||||
type=str,
|
||||
default='types.cs',
|
||||
help='Input C# file (default: types.cs)'
|
||||
)
|
||||
parser_setup_mapping.add_argument(
|
||||
'--output',
|
||||
type=str,
|
||||
default='repository_map.json',
|
||||
help='Output mapping JSON file (default: repository_map.json)'
|
||||
)
|
||||
|
||||
# Generate FlatBuffers command - generate Python modules
|
||||
parser_gen_fb = subparsers.add_parser(
|
||||
'generate_flatbuffers',
|
||||
help='Generate Python modules from FlatBuffer schema with preprocessing.'
|
||||
)
|
||||
parser_gen_fb.add_argument(
|
||||
'--schema',
|
||||
type=str,
|
||||
default='generated_schema.fbs',
|
||||
help='Input .fbs schema file (default: generated_schema.fbs)'
|
||||
)
|
||||
parser_gen_fb.add_argument(
|
||||
'--flatc',
|
||||
type=str,
|
||||
default='flatc.exe',
|
||||
help='Path to flatc compiler (default: flatc.exe)'
|
||||
)
|
||||
parser_gen_fb.add_argument(
|
||||
'--no-preprocess',
|
||||
action='store_true',
|
||||
help='Skip preprocessing (fixing reserved keywords)'
|
||||
)
|
||||
|
||||
# Fix reserved names command - fix generated Python files
|
||||
parser_fix_names = subparsers.add_parser(
|
||||
'fix_reserved_names',
|
||||
help='Fix Python reserved keywords in generated FlatBuffer files.'
|
||||
)
|
||||
parser_fix_names.add_argument(
|
||||
'--directory',
|
||||
type=str,
|
||||
default='MX',
|
||||
help='Directory with generated Python files (default: MX)'
|
||||
)
|
||||
|
||||
# Parse arguments and execute appropriate command
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -877,6 +1312,17 @@ def main():
|
||||
import_from_csv(args.input, args.output, args.original)
|
||||
elif args.command == 'validate_csv':
|
||||
validate_csv(args.input)
|
||||
elif args.command == 'setup_schema':
|
||||
setup_schema_from_csharp(args.csharp, args.output)
|
||||
elif args.command == 'setup_mapping':
|
||||
setup_repository_mapping(args.csharp, args.output)
|
||||
elif args.command == 'generate_flatbuffers':
|
||||
if not args.no_preprocess:
|
||||
print("Preprocessing schema to fix reserved keywords...")
|
||||
preprocess_flatbuffer_schema(args.schema)
|
||||
generate_flatbuffer_python(args.schema, args.flatc)
|
||||
elif args.command == 'fix_reserved_names':
|
||||
fix_flatbuffer_reserved_names(args.directory)
|
||||
else:
|
||||
print(f"ERROR: Unknown command '{args.command}'")
|
||||
parser.print_help()
|
||||
|
||||
Reference in New Issue
Block a user