summaryrefslogtreecommitdiff
path: root/gerber/gerber.py
blob: d3f1ff18bb74e96c69b6b5efcd778d80fa03f89b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import re


def red(s):
    return '\033[1;31m{0}\033[0;m'.format(s)


class Statement:
    def __init__(self):
        pass


class ParamStmt(Statement):
    def __init__(self):
        pass


class CoordStmt(Statement):
    def __init__(self):
        pass


class ApertureStmt(Statement):
    def __init__(self):
        pass


class CommentStmt(Statement):
    def __init__(self, comment):
        self.comment = comment


class EofStmt(Statement):
    pass


class UnexpectedStmt(Statement):
    def __init__(self, line):
        self.line = line


class Gerber:
    NUMBER = r"[\+-]?\d+"
    FUNCTION = r"G\d{2}"
    STRING = r"[a-zA-Z0-9_+-/!?<>”’(){}.\|&@# :]+"

    COORD_OP = r"D[0]?[123]"

    PARAM_STMT = re.compile(r"%.*%")

    COORD_STMT = re.compile((
        r"(?P<f>{f})?"
        r"(X(?P<x>{number}))?(Y(?P<y>{number}))?"
        r"(I(?P<i>{number}))?(J(?P<j>{number}))?"
        r"(?P<op>{op})?\*".format(number=NUMBER, f=FUNCTION, op=COORD_OP)))

    APERTURE_STMT = re.compile(r"(G54)?D\d+\*")

    COMMENT_STMT = re.compile(r"G04(?P<comment>{string})\*".format(string=STRING))

    EOF_STMT = re.compile(r"M02\*")

    def __init__(self):
        self.tokens = []

    def parse(self, filename):
        fp = open(filename, "r")
        data = fp.readlines()

        self.tokens = list(self.tokenize(data))

        for token in self.tokens:
            if isinstance(token, UnexpectedStmt):
                print red("[UNEXPECTED TOKEN]")
                print token.line

    def tokenize(self, data):
        multiline = None

        for i, line in enumerate(data):
            # remove EOL
            if multiline:    
                line = multiline + line.strip()
            else:
                line = line.strip()

            # deal with multi-line parameters
            if line.startswith("%") and not line.endswith("%"):
                multiline = line
                continue
            else:
                multiline = None

            # parameter
            match = self.PARAM_STMT.match(line)
            if match:
                yield ParamStmt()
                continue

            # coord
            matches = self.COORD_STMT.finditer(line)
            if matches:
                for match in matches:
                    yield CoordStmt()
                continue

            # aperture selection
            match = self.APERTURE_STMT.match(line)
            if match:
                yield ApertureStmt()
                continue

            # comment
            match = self.COMMENT_STMT.match(line)
            if match:
                yield CommentStmt(match.groupdict("comment"))
                continue

            # eof
            match = self.EOF_STMT.match(line)
            if match:
                yield EofStmt()
                continue

            yield UnexpectedStmt(line)

if __name__ == "__main__":
    import sys

    for f in sys.argv[1:]:
        g = Gerber()
        g.parse(f)