🔨 Clean up and improve some Python scripts (#27752)

This commit is contained in:
Andrew 2025-08-13 13:33:09 -04:00 committed by GitHub
parent 081458a3c8
commit 4fb984e960
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 377 additions and 313 deletions

View file

@ -1,6 +1,7 @@
#!/usr/bin/python3
# Written By Marcio Teixeira 2018 - Aleph Objects, Inc.
# Edited By Andrew 2025 - ClassicRocker883
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -16,13 +17,13 @@
# location: <https://www.gnu.org/licenses/>.
from __future__ import print_function
import argparse, re, sys
import argparse, re, sys, os
from html.parser import HTMLParser
usage = '''
This program extracts line segments from a SVG file and writes
them as coordinates in a C array. The x and y values will be
USAGE = """
This program extracts line segments from an SVG file and writes
them out as coordinates in a C array. The X and Y values will be
scaled from 0x0000 to 0xFFFE. 0xFFFF is used as path separator.
This program can only interpret straight segments, not curves.
@ -33,19 +34,22 @@ SVG file into the proper format, use the following procedure:
- Convert all Objects to Paths (Path -> Object to Path)
- Convert all Strokes to Paths (Path -> Stroke to Path)
- Combine all paths into one (Path -> Combine) [1]
- Convert all curves into short line segments
(Extensions -> Modify Paths -> Flatten Beziers...)
- Convert all curves into short line segments [2]
(Extensions -> Modify Paths -> Appoximate Curves by Straight Lines...)
- Save as new SVG
- Convert into a header file using this utility
- To give paths individual names, break apart paths and
use the XML Editor to set the "id" attributes.
[1] Combining paths is necessary to remove transforms. You
could also use inkscape-applytransforms Inkscape extension.
could also use the Inkscape extension inkscape-applytransforms.
'''
[2] "Approximate Curves by Straight Lines..." has replaced
"Flatten Beziers".
header = '''
"""
header = """
/****************************************************************************
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
@ -70,249 +74,258 @@ header = '''
*/
#pragma once
'''
"""
class ComputeBoundingBox:
def reset(self):
self.x_min = float(" inf")
self.y_min = float(" inf")
self.x_max = float("-inf")
self.y_max = float("-inf")
self.n_points = 0
self.n_paths = 0
def reset(self):
self.x_min = float(" inf")
self.y_min = float(" inf")
self.x_max = float("-inf")
self.y_max = float("-inf")
self.n_points = 0
self.n_paths = 0
def command(self, type, x, y):
self.x_min = min(self.x_min, x)
self.x_max = max(self.x_max, x)
self.y_min = min(self.y_min, y)
self.y_max = max(self.y_max, y)
def command(self, type, x, y):
self.x_min = min(self.x_min, x)
self.x_max = max(self.x_max, x)
self.y_min = min(self.y_min, y)
self.y_max = max(self.y_max, y)
if type == "M":
self.n_paths += 1
self.n_points += 1
if type == "M":
self.n_paths += 1
self.n_points += 1
def scale(self, x, y):
x -= self.x_min
y -= self.y_min
x /= self.x_max - self.x_min
y /= self.y_max - self.y_min
#y = 1 - y # Flip upside down
return (x, y)
def scale(self, x, y):
x -= self.x_min
y -= self.y_min
x /= self.x_max - self.x_min
y /= self.y_max - self.y_min
#y = 1 - y # Flip upside down
return (x, y)
def path_finished(self, id):
pass
def path_finished(self, id):
pass
def write(self):
print("constexpr float x_min = %f;" % self.x_min)
print("constexpr float x_max = %f;" % self.x_max)
print("constexpr float y_min = %f;" % self.y_min)
print("constexpr float y_max = %f;" % self.y_max)
print()
def write(self):
print("constexpr float x_min = %f;" % self.x_min)
print("constexpr float x_max = %f;" % self.x_max)
print("constexpr float y_min = %f;" % self.y_min)
print("constexpr float y_max = %f;" % self.y_max)
print()
def from_svg_view_box(self, viewbox):
m = re.search('([0-9-.]+) ([0-9-.]+) ([0-9-.]+) ([0-9-.]+)', viewbox)
if m:
self.x_min = float(m[1])
self.y_min = float(m[2])
self.x_max = float(m[3])
self.y_max = float(m[4])
return True
return False
def from_svg_view_box(self, viewbox):
m = re.search('([0-9-.]+) ([0-9-.]+) ([0-9-.]+) ([0-9-.]+)', viewbox)
if m:
self.x_min = float(m[1])
self.y_min = float(m[2])
self.x_max = float(m[3])
self.y_max = float(m[4])
return True
return False
class WriteDataStructure:
def __init__(self, bounding_box):
self.bounds = bounding_box
def __init__(self, bounding_box, input_filename):
self.bounds = bounding_box
self.reset()
self.input_filename = input_filename
def reset(self, ):
self.hex_words = []
def reset(self):
self.hex_words = []
def push(self, value):
self.hex_words.append("0x%04X" % (0xFFFF & int(value)))
def push(self, value):
self.hex_words.append("0x%04X" % (0xFFFF & int(value)))
def command(self, type, x, y):
if type == "M":
self.push(0xFFFF)
x, y = self.bounds.scale(x,y)
self.push(x * 0xFFFE)
self.push(y * 0xFFFE)
def command(self, type, x, y):
if type == "M":
self.push(0xFFFF)
x, y = self.bounds.scale(x,y)
self.push(x * 0xFFFE)
self.push(y * 0xFFFE)
def path_finished(self, id):
if self.hex_words and self.hex_words[0] == "0xFFFF":
self.hex_words.pop(0)
print("const PROGMEM uint16_t", id + "[] = {" + ", ".join (self.hex_words) + "};")
self.hex_words = []
def path_finished(self, id):
if self.hex_words and self.hex_words[0] == "0xFFFF":
self.hex_words.pop(0)
self.write_to_file(id)
self.hex_words = []
def write_to_file(self, id):
base_filename = os.path.splitext(self.input_filename)[0] + '.h'
with open(base_filename, "w") as outfile:
outfile.write(header)
outfile.write("const PROGMEM uint16_t " + id + "[] = {" + ", ".join(self.hex_words) + "};\n")
class SVGParser(HTMLParser):
def __init__(self, args):
super().__init__()
self.args = args
self.tags = []
self.groups = []
self.op = None
self.restart()
def set_consumer(self, op):
self.op = op
if self.op:
self.op.reset()
def restart(self):
self.last_x = 0
self.last_y = 0
self.initial_x = 0
self.initial_y = 0
def process_svg_path_L_or_M(self, cmd, x, y):
if self.op:
self.op.command(cmd, x, y)
self.last_x = x
self.last_y = y
if cmd == "M":
self.initial_x = x
self.initial_y = y
def process_svg_path_data_cmd(self, id, cmd, a, b):
"""Converts the various types of moves into L or M commands
and dispatches to process_svg_path_L_or_M for further processing."""
if cmd == "Z" or cmd == "z":
self.process_svg_path_L_or_M("L", self.initial_x, self.initial_y)
elif cmd == "H":
self.process_svg_path_L_or_M("L", a, self.last_y)
elif cmd == "V":
self.process_svg_path_L_or_M("L", self.last_x, a)
elif cmd == "h":
self.process_svg_path_L_or_M("L", self.last_x + a, self.last_y)
elif cmd == "v":
self.process_svg_path_L_or_M("L", self.last_x, self.last_y + a)
elif cmd == "L":
self.process_svg_path_L_or_M("L", a, b)
elif cmd == "l":
self.process_svg_path_L_or_M("L", self.last_x + a, self.last_y + b)
elif cmd == "M":
self.process_svg_path_L_or_M("M", a, b)
elif cmd == "m":
self.process_svg_path_L_or_M("M", self.last_x + a, self.last_y + b)
else:
print("Unsupported path data command:", cmd, "in path", id, "\n", file=sys.stderr)
quit()
def eat_token(self, regex):
"""Looks for a token at the start of self.d.
If found, the token is removed."""
self.m = re.match(regex,self.d)
if self.m:
self.d = self.d[self.m.end():]
return self.m
def process_svg_path_data(self, id, d):
"""Breaks up the "d" attribute into individual commands
and calls "process_svg_path_data_cmd" for each"""
self.d = d
while (self.d):
if self.eat_token('\s+'):
pass # Just eat the spaces
elif self.eat_token('([LMHVZlmhvz])'):
cmd = self.m[1]
# The following commands take no arguments
if cmd == "Z" or cmd == "z":
self.process_svg_path_data_cmd(id, cmd, 0, 0)
elif self.eat_token('([CScsQqTtAa])'):
print("Unsupported path data command:", self.m[1], "in path", id, "\n", file=sys.stderr)
quit()
elif self.eat_token('([ ,]*[-0-9e.]+)+'):
# Process list of coordinates following command
coords = re.split('[ ,]+', self.m[0])
# The following commands take two arguments
if cmd == "L" or cmd == "l":
while coords:
self.process_svg_path_data_cmd(id, cmd, float(coords.pop(0)), float(coords.pop(0)))
elif cmd == "M":
while coords:
self.process_svg_path_data_cmd(id, cmd, float(coords.pop(0)), float(coords.pop(0)))
# If a MOVETO has multiple points, the subsequent ones are assumed to be LINETO
cmd = "L"
elif cmd == "m":
while coords:
self.process_svg_path_data_cmd(id, cmd, float(coords.pop(0)), float(coords.pop(0)))
# If a MOVETO has multiple points, the subsequent ones are assumed to be LINETO
cmd = "l"
# Assume all other commands are single argument
else:
while coords:
self.process_svg_path_data_cmd(id, cmd, float(coords.pop(0)), 0)
else:
print("Syntax error:", d, "in path", id, "\n", file=sys.stderr)
quit()
def find_attr(attrs, what):
for attr, value in attrs:
if attr == what:
return value
def layer_matches(self):
""" Are we in the correct layer?"""
if not self.args.layer:
return True
for l in self.groups:
if l and l.find(self.args.layer) != -1:
return True
return False
def handle_starttag(self, tag, attrs):
self.tags.append(tag)
if tag == 'svg':
self.viewbox = SVGParser.find_attr(attrs, 'viewbox')
if tag == 'g':
label = SVGParser.find_attr(attrs, 'inkscape:label')
self.groups.append(label)
if label and self.layer_matches():
print("Reading layer:", label, file=sys.stderr)
if tag == 'path' and self.layer_matches():
id = SVGParser.find_attr(attrs, 'id')
transform = SVGParser.find_attr(attrs, 'transform')
if transform:
print("Found transform in path", id, "! Cannot process file!", file=sys.stderr)
quit()
d = SVGParser.find_attr(attrs, 'd')
if d:
self.process_svg_path_data(id, d)
if self.op:
self.op.path_finished(id)
def __init__(self, args):
super().__init__()
self.args = args
self.tags = []
self.groups = []
self.op = None
self.restart()
def handle_endtag(self, tag):
if tag == 'g':
self.groups.pop()
if tag != self.tags.pop():
print("Error popping tag off list")
def set_consumer(self, op):
self.op = op
if self.op:
self.op.reset()
def restart(self):
self.last_x = 0
self.last_y = 0
self.initial_x = 0
self.initial_y = 0
def process_svg_path_L_or_M(self, cmd, x, y):
if self.op:
self.op.command(cmd, x, y)
self.last_x = x
self.last_y = y
if cmd == "M":
self.initial_x = x
self.initial_y = y
def process_svg_path_data_cmd(self, id, cmd, a, b):
"""Converts the various types of moves into L or M commands
and dispatches to process_svg_path_L_or_M for further processing."""
if cmd == "Z" or cmd == "z":
self.process_svg_path_L_or_M("L", self.initial_x, self.initial_y)
elif cmd == "H":
self.process_svg_path_L_or_M("L", a, self.last_y)
elif cmd == "V":
self.process_svg_path_L_or_M("L", self.last_x, a)
elif cmd == "h":
self.process_svg_path_L_or_M("L", self.last_x + a, self.last_y)
elif cmd == "v":
self.process_svg_path_L_or_M("L", self.last_x, self.last_y + a)
elif cmd == "L":
self.process_svg_path_L_or_M("L", a, b)
elif cmd == "l":
self.process_svg_path_L_or_M("L", self.last_x + a, self.last_y + b)
elif cmd == "M":
self.process_svg_path_L_or_M("M", a, b)
elif cmd == "m":
self.process_svg_path_L_or_M("M", self.last_x + a, self.last_y + b)
else:
print("Unsupported path data command:", cmd, "in path", id, "\n", file=sys.stderr)
quit()
def eat_token(self, regex):
"""Looks for a token at the start of self.d.
If found, the token is removed."""
self.m = re.match(regex,self.d)
if self.m:
self.d = self.d[self.m.end():]
return self.m
def process_svg_path_data(self, id, d):
"""Breaks up the "d" attribute into individual commands
and calls "process_svg_path_data_cmd" for each"""
self.d = d
while (self.d):
if self.eat_token(r'\s+'):
pass # Just eat the spaces
elif self.eat_token('([LMHVZlmhvz])'):
cmd = self.m[1]
# The following commands take no arguments
if cmd == "Z" or cmd == "z":
self.process_svg_path_data_cmd(id, cmd, 0, 0)
elif self.eat_token('([CScsQqTtAa])'):
print("Unsupported path data command:", self.m[1], "in path", id, "\n", file=sys.stderr)
quit()
elif self.eat_token('([ ,]*[-0-9e.]+)+'):
# Process list of coordinates following command
coords = re.split('[ ,]+', self.m[0])
# The following commands take two arguments
if cmd == "L" or cmd == "l":
while coords:
self.process_svg_path_data_cmd(id, cmd, float(coords.pop(0)), float(coords.pop(0)))
elif cmd == "M":
while coords:
self.process_svg_path_data_cmd(id, cmd, float(coords.pop(0)), float(coords.pop(0)))
# If a MOVETO has multiple points, the subsequent ones are assumed to be LINETO
cmd = "L"
elif cmd == "m":
while coords:
self.process_svg_path_data_cmd(id, cmd, float(coords.pop(0)), float(coords.pop(0)))
# If a MOVETO has multiple points, the subsequent ones are assumed to be LINETO
cmd = "l"
# Assume all other commands are single argument
else:
while coords:
self.process_svg_path_data_cmd(id, cmd, float(coords.pop(0)), 0)
else:
print("Syntax error:", d, "in path", id, "\n", file=sys.stderr)
quit()
def find_attr(self, attrs, what):
for attr, value in attrs:
if attr == what:
return value
def layer_matches(self):
""" Are we in the correct layer?"""
if not self.args.layer:
return True
for l in self.groups:
if l and l.find(self.args.layer) != -1:
return True
return False
def handle_starttag(self, tag, attrs):
self.tags.append(tag)
if tag == 'svg':
self.viewbox = self.find_attr(attrs, 'viewbox')
if tag == 'g':
label = self.find_attr(attrs, 'inkscape:label')
self.groups.append(label)
if label and self.layer_matches():
print("Reading layer:", label, file=sys.stderr)
if tag == 'path' and self.layer_matches():
id = self.find_attr(attrs, 'id')
transform = self.find_attr(attrs, 'transform')
if transform:
print("Found transform in path", id, "! Cannot process file!", file=sys.stderr)
quit()
d = self.find_attr(attrs, 'd')
if d:
self.process_svg_path_data(id, d)
if self.op:
self.op.path_finished(id)
self.restart()
def handle_endtag(self, tag):
if tag == 'g':
self.groups.pop()
if tag != self.tags.pop():
print("Error popping tag off list")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("filename")
parser.add_argument('--layer', help='only include layers which have this string in their names')
args = parser.parse_args()
parser = argparse.ArgumentParser(description=USAGE, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("filename")
parser.add_argument('--layer', help='Only include layers which have this string in their names')
args = parser.parse_args()
f = open(args.filename, "r", encoding='utf-8')
data = f.read()
f = open(args.filename, "r", encoding="utf-8")
data = f.read()
# First pass to grab viewbox
p = SVGParser(args)
p.feed(data)
b = ComputeBoundingBox()
if not b.from_svg_view_box(p.viewbox):
# Can't find the view box, so use the bounding box of the elements themselves.
# First pass to grab viewbox
p = SVGParser(args)
p.set_consumer(b)
p.feed(data)
b.write()
# Last pass to process paths
w = WriteDataStructure(b)
p = SVGParser(args)
p.set_consumer(w)
p.feed(data)
b = ComputeBoundingBox()
if not b.from_svg_view_box(p.viewbox):
# Can't find the view box, so use the bounding box of the elements themselves.
p = SVGParser(args)
p.set_consumer(b)
p.feed(data)
b.write()
# Last pass to process paths
w = WriteDataStructure(b, args.filename)
p = SVGParser(args)
p.set_consumer(w)
p.feed(data)

View file

@ -14,7 +14,7 @@
# The fallback branch is bugfix-2.1.x.
#
import os, subprocess, sys, urllib.request
import os, subprocess, sys, urllib.request, urllib.error
from pathlib import Path
DEBUGGING = False

View file

@ -38,6 +38,6 @@ if pioutil.is_pio_build():
else:
# The following almost works, but __start__ (from wirish/start.S) is not seen by common.inc
board.update("build.variants_dir", source_root_str);
board.update("build.variants_dir", source_root_str)
src = str(source_dir)
env.Append(BUILD_FLAGS=[f"-I{src}", f"-L{src}/ld"]) # Add include path for variant

View file

@ -22,7 +22,7 @@ if pioutil.is_pio_build():
test_suites = collect_test_suites()
for path in test_suites:
name = re.sub(r'^\d+-|\.ini$', '', path.name)
targets += [name];
targets += [name]
env.AddCustomTarget(
name = f"marlin_{name}",

View file

@ -3,7 +3,7 @@
# configuration.py
# Apply options from config.ini to the existing Configuration headers
#
import re, shutil, configparser, datetime
import re, os, shutil, configparser, datetime
from pathlib import Path
verbose = 0
@ -145,8 +145,6 @@ def fetch_example(url):
blab("Couldn't find curl or wget", -1)
return False
import os
# Reset configurations to default
os.system("git checkout HEAD Marlin/*.h")

View file

@ -136,7 +136,7 @@ if pioutil.is_pio_build():
#
mixedin = []
p = project_dir / "Marlin/src/lcd/dogm"
for f in [ "ultralcd_DOGM.cpp", "ultralcd_DOGM.h","u8g_dev_ssd1306_sh1106_128x64_I2C.cpp", "u8g_dev_ssd1309_12864.cpp", "u8g_dev_st7565_64128n_HAL.cpp", "u8g_dev_st7920_128x64_HAL.cpp", "u8g_dev_tft_upscale_from_128x64.cpp", "u8g_dev_uc1701_mini12864_HAL.cpp", "ultralcd_st7920_u8glib_rrd_AVR.cpp" ]:
for f in [ "ultralcd_DOGM.cpp", "ultralcd_DOGM.h", "u8g_dev_ssd1306_sh1106_128x64_I2C.cpp", "u8g_dev_ssd1309_12864.cpp", "u8g_dev_st7565_64128n_HAL.cpp", "u8g_dev_st7920_128x64_HAL.cpp", "u8g_dev_tft_upscale_from_128x64.cpp", "u8g_dev_uc1701_mini12864_HAL.cpp", "ultralcd_st7920_u8glib_rrd_AVR.cpp" ]:
if (p / f).is_file():
mixedin += [ f ]
p = project_dir / "Marlin/src/feature/bedlevel/abl"

View file

@ -1,16 +1,34 @@
#!/usr/bin/env python3
#
# schema.py
#
# Used by signature.py via common-dependencies.py to generate a schema file during the PlatformIO build
# when CONFIG_EXPORT is defined in the configuration.
#
# This script can also be run standalone from within the Marlin repo to generate JSON and YAML schema files.
#
# This script is a companion to abm/js/schema.js in the MarlinFirmware/AutoBuildMarlin project, which has
# been extended to evaluate conditions and can determine what options are actually enabled, not just which
# options are uncommented. That will be migrated to this script for standalone migration.
#
"""
schema.py
Extract firmware configuration into structured JSON or YAML schema format.
Used by signature.py via common-dependencies.py to generate a schema file during the
PlatformIO build when CONFIG_EXPORT is defined in the configuration.
This script can also be run standalone from within the Marlin repo, and is a companion to
abm/js/schema.js in the MarlinFirmware/AutoBuildMarlin project, which has been extended to
evaluate conditions and can determine what options are actually enabled, not just which
options are uncommented. That will be migrated to this script for standalone migration.
Usage: schema.py [-h] [some|json|jsons|group|yml|yaml]
Process Marlin firmware configuration files (Configuration.h and Configuration_adv.h)
to produce structured output suitable for documentation, tooling, or automated processing.
Positional arguments:
some Generate both JSON and YAML output (schema.json and schema.yml)
json Generate JSON output (schema.json)
jsons Generate grouped JSON output with wildcard options (schema.json and schema_grouped.json)
group Generate grouped JSON output only (schema_grouped.json)
yml Generate YAML output (schema.yml)
yaml Same as 'yml'
Optional arguments:
-h, --help Show this help message and exit
"""
import re, json
from pathlib import Path
@ -475,10 +493,10 @@ def main():
def inargs(c): return len(set(args) & set(c)) > 0
# Help / Unknown option
unk = not inargs(['some','json','jsons','group','yml','yaml'])
unk = not inargs(['some','json','jsons','group','yml','yaml', '-h', '--help'])
if (unk): print(f"Unknown option: '{args[0]}'")
if inargs(['-h', '--help']) or unk:
print("Usage: schema.py [some|json|jsons|group|yml|yaml]...")
print("Usage: schema.py [-h] [some|json|jsons|group|yml|yaml]")
print(" some = json + yml")
print(" jsons = json + group")
return

View file

@ -281,7 +281,7 @@ def compute_build_signature(env):
for line in sec_lines[1:]: sec_list += '\n' + ext_fmt.format('', line)
config_ini = build_path / 'config.ini'
with config_ini.open('w') as outfile:
with config_ini.open('w', encoding='utf-8') as outfile:
filegrp = { 'Configuration.h':'config:basic', 'Configuration_adv.h':'config:advanced' }
vers = build_defines["CONFIGURATION_H_VERSION"]
dt_string = datetime.now().strftime("%Y-%m-%d at %H:%M:%S")

View file

@ -47,6 +47,11 @@ def marlin_font_hzk():
with open(f[2], 'rb') as file:
print(f'{f[0]}x{f[1]}')
font = bdflib.reader.read_bdf(file)
if font is None:
print(f'Failed to read font from {f[2]}')
continue # Skip this font and move to the next one
for glyph in range(128):
bits = glyph_bits(f[0], f[1], font, glyph)
glyph_bytes = math.ceil(f[0]/8)
@ -58,6 +63,7 @@ def marlin_font_hzk():
except OverflowError:
print('Overflow')
print(f'{glyph}')
print(font[glyph])
if font and glyph in font: print(font[glyph])
else: print(f'Glyph {glyph} not found in the font or font is None')
for b in bits: print(f'{b:0{f[0]}b}')
return

View file

@ -11,27 +11,27 @@ from __future__ import print_function
folder = './'
my_file = 'test.gcode'
# this is the minimum of G1 instructions which should be between 2 different heights
min_g1 = 3
# The minimum number of G1 instructions that should be between 2 different heights
min_g = 3
# maximum number of lines to parse, I don't want to parse the complete file
# only the first plane is we are interested in
max_g1 = 100000000
# Maximum number of lines to parse. We don't want to parse the
# whole file since we're only interested in the first plane.
max_g = 100000000
# g29 keyword
g29_keyword = 'g29'
g29_keyword = g29_keyword.upper()
# G29 keyword
g29_keyword = 'G29'
# output filename
# Output filename
output_file = folder + 'g29_' + my_file
# input filename
# Input filename
input_file = folder + my_file
# minimum scan size
# Minimum scan size
min_size = 40
probing_points = 3 # points x points
max_lines = 1500
# other stuff
# Other stuff
min_x = 500
min_y = min_x
max_x = -500
@ -43,13 +43,19 @@ lines_of_g1 = 0
gcode = []
g29_found = False
g28_found = False
# return only g1-lines
def has_g1(line):
return line[:2].upper() == "G1"
YELLOW = '\033[33m'
GREEN = '\033[32m'
RED = '\033[31m'
RESET = '\033[0m'
# Return only G0-G1 lines
def has_g_move(line):
return line[:2].upper() in ("G0", "G1")
# find position in g1 (x,y,z)
# Find position in G move (x,y,z)
def find_axis(line, axis):
found = False
number = ""
@ -73,7 +79,7 @@ def find_axis(line, axis):
return None
# save the min or max-values for each axis
# Save the min or max-values for each axis
def set_mima(line):
global min_x, max_x, min_y, max_y, last_z
@ -90,7 +96,7 @@ def set_mima(line):
return min_x, max_x, min_y, max_y
# find z in the code and return it
# Find z in the code and return it
def find_z(gcode, start_at_line=0):
for i in range(start_at_line, len(gcode)):
my_z = find_axis(gcode[i], 'Z')
@ -103,65 +109,74 @@ def z_parse(gcode, start_at_line=0, end_at_line=0):
all_z = []
line_between_z = []
z_at_line = []
# last_z = 0
#last_z = 0
last_i = -1
while len(gcode) > i:
try:
z, i = find_z(gcode, i + 1)
except TypeError:
break
result = find_z(gcode, i + 1)
if result is None:
raise ValueError(f'{RED}Unable to determine Z height.{RESET}')
z, i = result
all_z.append(z)
z_at_line.append(i)
temp_line = i - last_i -1
line_between_z.append(i - last_i - 1)
# last_z = z
#last_z = z
last_i = i
if 0 < end_at_line <= i or temp_line >= min_g1:
# print('break at line {} at height {}'.format(i, z))
if 0 < end_at_line <= i or temp_line >= min_g:
#print('break at line {} at height {}'.format(i, z))
break
line_between_z = line_between_z[1:]
return all_z, line_between_z, z_at_line
# get the lines which should be the first layer
# Get the lines which should be the first layer
def get_lines(gcode, minimum):
i = 0
all_z, line_between_z, z_at_line = z_parse(gcode, end_at_line=max_g1)
all_z, line_between_z, z_at_line = z_parse(gcode, end_at_line=max_g)
#print('Detected Z heights:', all_z)
for count in line_between_z:
i += 1
if count > minimum:
# print('layer: {}:{}'.format(z_at_line[i-1], z_at_line[i]))
#print('layer: {}:{}'.format(z_at_line[i-1], z_at_line[i]))
return z_at_line[i - 1], z_at_line[i]
with open(input_file, 'r') as file:
with open(input_file, 'r', encoding='utf_8') as file:
lines = 0
for line in file:
lines += 1
if lines > 1000:
if lines > max_lines:
break
if has_g1(line):
if has_g_move(line):
gcode.append(line)
file.close()
start, end = get_lines(gcode, min_g1)
layer_range = get_lines(gcode, min_g)
if layer_range is None:
raise ValueError(f'{RED}Unable to determine layer range.{RESET}')
start, end = layer_range
for i in range(start, end):
set_mima(gcode[i])
print('x_min:{} x_max:{}\ny_min:{} y_max:{}'.format(min_x, max_x, min_y, max_y))
# resize min/max - values for minimum scan
# Resize min/max - values for minimum scan
if max_x - min_x < min_size:
offset_x = int((min_size - (max_x - min_x)) / 2 + 0.5) # int round up
# print('min_x! with {}'.format(int(max_x - min_x)))
#print('min_x! with {}'.format(int(max_x - min_x)))
min_x = int(min_x) - offset_x
max_x = int(max_x) + offset_x
if max_y - min_y < min_size:
offset_y = int((min_size - (max_y - min_y)) / 2 + 0.5) # int round up
# print('min_y! with {}'.format(int(max_y - min_y)))
#print('min_y! with {}'.format(int(max_y - min_y)))
min_y = int(min_y) - offset_y
max_y = int(max_y) + offset_y
@ -172,17 +187,24 @@ new_command = 'G29 L{0} R{1} F{2} B{3} P{4}\n'.format(min_x,
max_y,
probing_points)
out_file = open(output_file, 'w')
in_file = open(input_file, 'r')
with open(input_file, 'r', encoding='utf_8') as in_file, open(output_file, 'w', encoding='utf_8') as out_file:
for line in in_file:
# Check if G29 already exists
if line.strip().upper().startswith(g29_keyword):
g29_found = True
out_file.write(new_command)
print(f'{YELLOW}Write G29.{RESET}')
else:
out_file.write(line)
for line in in_file:
if line[:len(g29_keyword)].upper() == g29_keyword:
out_file.write(new_command)
print('write G29')
else:
out_file.write(line)
# If we find G28 and G29 wasn't found earlier, insert G29 after G28
if not g29_found and line.strip().upper().startswith('G28'):
g28_found = True # Mark that G28 was found
out_file.write(new_command) # Insert G29 command
print(f'{YELLOW}Note: G29 was not found.\nInserted G29 after G28.{RESET}')
file.close()
out_file.close()
print('auto G29 finished')
# Debugging messages
if not g28_found and not g29_found:
print(f'{RED}Error: G28 not found! G29 was not added.{RESET}')
else:
print(f'{GREEN}auto G29 finished!{RESET}')

View file

@ -1,14 +1,13 @@
#!/usr/bin/env python3
"""
Formatter script for pins_MYPINS.h files
#
# Formatter script for pins_MYPINS.h files
#
# Usage: pinsformat.py [infile] [outfile]
#
# With no parameters convert STDIN to STDOUT
#
usage: pinsformat.py [infile] [outfile]
import sys, re
With no parameters convert STDIN to STDOUT
"""
import sys, re, argparse
do_log = False
def logmsg(msg, line):
@ -46,14 +45,21 @@ ppad = [ 3, 4, 5, 5 ]
definePinPatt = re.compile(rf'^\s*(//)?#define\s+[A-Z_][A-Z0-9_]+?_PIN\s+({mstr})\s*(//.*)?$')
def format_pins(argv):
src_file = 'stdin'
dst_file = None
parser = argparse.ArgumentParser(description="Formatter script for pins_MYPINS.h files")
parser.add_argument('infile', nargs='?', default=None, help="Input file to read from. If not provided, reads from stdin.")
parser.add_argument('outfile', nargs='?', default=None, help="Output file to write to. If not provided, writes to stdout.")
parser.add_argument('-v', action='store_true', help="Enable logging.")
args = parser.parse_args(argv)
src_file = args.infile or 'stdin'
dst_file = args.outfile or None
scnt = 0
for arg in argv:
if arg == '-v':
global do_log
do_log = True
do_log = args.v or True
elif scnt == 0:
# Get a source file if specified. Default destination is the same file
src_file = dst_file = arg
@ -258,7 +264,7 @@ def process_text(txt):
if wDict['check_comment_next']:
# Comments in column 50
line = rpad('', col_comment) + r[1]
line = rpad('', col_comment) + (r[1] if r else '')
elif trySkip1(wDict): pass #define SKIP_ME
elif tryPindef(wDict): pass #define MY_PIN [pin]

View file

@ -81,6 +81,7 @@ def Upload(source, target, env):
_Send('M21')
Responses = _Recv()
if len(Responses) < 1 or not any('SD card ok' in r for r in Responses):
debugPrint(Responses)
raise Exception('Error accessing SD card')
debugPrint('SD Card OK')
return True