Last active
September 30, 2020 00:16
-
-
Save cowboy/67f9248a9e82c11cfe19abbc4b2774fb to your computer and use it in GitHub Desktop.
generate boms: combine 1 or more KiBoM generated csv files with an id column name like "Digikey" (where a digikey part number is stored) and output IMO a much better csv file (note: do NOT number rows or group fields)
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
; https://github.com/SchrodingersGat/KiBoM | |
; | |
[BOM_OPTIONS] | |
; General BoM options here | |
; If 'ignore_dnf' option is set to 1, rows that are not to be fitted on the PCB will not be written to the BoM file | |
ignore_dnf = 1 | |
; If 'html_generate_dnf' option is set to 1, also generate a list of components not fitted on the PCB (HTML only) | |
html_generate_dnf = 1 | |
; If 'use_alt' option is set to 1, grouped references will be printed in the alternate compressed style eg: R1-R7,R18 | |
use_alt = 0 | |
; If 'number_rows' option is set to 1, each row in the BoM will be prepended with an incrementing row number | |
number_rows = 0 | |
; If 'group_connectors' option is set to 1, connectors with the same footprints will be grouped together, independent of the name of the connector | |
group_connectors = 1 | |
; If 'test_regex' option is set to 1, each component group will be tested against a number of regular-expressions (specified, per column, below). If any matches are found, the row is ignored in the output file | |
test_regex = 1 | |
; If 'merge_blank_fields' option is set to 1, component groups with blank fields will be merged into the most compatible group, where possible | |
merge_blank_fields = 1 | |
; Specify output file name format, %O is the defined output name, %v is the version, %V is the variant name which will be ammended according to 'variant_file_name_format'. | |
output_file_name = %O_bom_%v%V | |
; Specify the variant file name format, this is a unique field as the variant is not always used/specified. When it is unused you will want to strip all of this. | |
variant_file_name_format = _(%V) | |
; Field name used to determine if a particular part is to be fitted | |
fit_field = Config | |
; Character used to separate reference designators in output | |
ref_separator = ' ' | |
; Make a backup of the bom before generating the new one, using the following template | |
; make_backup = | |
; Default number of boards to produce if none given on CLI with -n | |
number_boards = 1 | |
; Default PCB variant if none given on CLI with -r | |
board_variant = [u'default'] | |
; Whether to hide headers from output file | |
hide_headers = 0 | |
; Whether to hide PCB info from output file | |
hide_pcb_info = 1 | |
; Interpret as a Digikey P/N and link the following field | |
digikey_link = 0 | |
[IGNORE_COLUMNS] | |
; Any column heading that appears here will be excluded from the Generated BoM | |
; Titles are case-insensitive | |
Description | |
Part | |
Part Lib | |
Sheetpath | |
; Footprint Lib | |
; Footprint | |
; Datasheet | |
[COLUMN_ORDER] | |
; Columns will apear in the order they are listed here | |
; Titles are case-insensitive | |
References | |
Quantity Per PCB | |
Value | |
Description | |
; Part | |
; Part Lib | |
Footprint | |
Footprint Lib | |
; Sheetpath | |
; Build Quantity | |
Datasheet | |
[GROUP_FIELDS] | |
; List of fields used for sorting individual components into groups | |
; Components which match (comparing *all* fields) will be grouped together | |
; Field names are case-insensitive | |
; Part | |
; Part Lib | |
; Value | |
; Footprint | |
; Footprint Lib | |
[COMPONENT_ALIASES] | |
; A series of values which are considered to be equivalent for the part name | |
; Each line represents a list of equivalent component name values separated by white space | |
; e.g. 'c c_small cap' will ensure the equivalent capacitor symbols can be grouped together | |
; Aliases are case-insensitive | |
c c_small cap capacitor | |
r r_small res resistor | |
sw switch | |
l l_small inductor | |
zener zenersmall | |
d diode d_small | |
[JOIN] | |
; A list of rules to join the content of fields | |
; Each line is a rule, the first name is the field that will receive the data | |
; from the other fields | |
; Use tab (ASCII 9) as separator | |
; Field names are case sensitive | |
[REGEX_INCLUDE] | |
; A series of regular expressions used to include parts in the BoM | |
; If there are any regex defined here, only components that match against ANY of them will be included in the BOM | |
; Column names are case-insensitive | |
; Format is: "[ColumName] [Regex]" (white-space separated) | |
[REGEX_EXCLUDE] | |
; A series of regular expressions used to exclude parts from the BoM | |
; If a component matches ANY of these, it will be excluded from the BoM | |
; Column names are case-insensitive | |
; Format is: "[ColumName] [Regex]" (white-space separated) | |
References ^TP[0-9]* | |
References ^FID | |
Part mount.*hole | |
Part solder.*bridge | |
Part solder.*jumper | |
Part test.*point | |
Footprint test.*point | |
Footprint mount.*hole | |
Footprint fiducial | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
import os | |
import sys | |
import getopt | |
import csv | |
import re | |
import json | |
def usage(code=0): | |
script = os.path.basename(__file__) | |
print(f'Usage: {script} --id-column-name <name> file1.csv [...fileN.csv]') | |
sys.exit(code) | |
def fail_args(): | |
usage(1) | |
def error(msg): | |
print(f'Error: {msg}') | |
def j(thing): | |
print(json.dumps(thing)) | |
# Parse CLI args and print usage if necessary | |
def parse_args(argv): | |
arg_map = {'id-column-name': None} | |
try: | |
opts, args = getopt.getopt(argv, "hi:", ["help", "id-column-name="]) | |
except getopt.GetoptError: | |
fail_args() | |
for opt, arg in opts: | |
if opt in ('-h', '--help'): | |
usage() | |
elif opt in ("-i", "--id-column-name"): | |
arg_map['id-column-name'] = arg | |
if len(args) == 0: | |
error('Missing csv file(s)') | |
fail_args() | |
return arg_map, args | |
# Read multiple csv BOM files into a single list | |
def read_boms(bom_files): | |
bom_list = [] | |
for bom_file in bom_files: | |
with open(bom_file) as f: | |
bom_list.extend([{k: v for k, v in row.items()} | |
for row in csv.DictReader(f, skipinitialspace=True)]) | |
return bom_list | |
# Print csv with the fields we care about to STDOUT | |
def print_csv(bom_list): | |
d = {**bom_list[0]} | |
d.pop('Footprint Lib', None) | |
d.pop('Description', None) | |
d.pop('Part', None) | |
d.pop('Part Lib', None) | |
fieldnames = d.keys() | |
writer = csv.DictWriter( | |
sys.stdout, fieldnames=fieldnames, extrasaction='ignore') | |
writer.writeheader() | |
for item in bom_list: | |
writer.writerow(item) | |
# Normalize any field values after reading in the boms | |
def normalize_bom_values(bom_list): | |
def normalize_item(item): | |
item = {**item} | |
if 'Datasheet' in item and item['Datasheet'] == '~': | |
item['Datasheet'] = '' | |
return item | |
return list(map(normalize_item, bom_list)) | |
# Split the "References" value (eg. "R101") into ref ("R") and num (101), | |
# sort on ref and num, and then remove ref and num from all dicts | |
def sort_by_ref(bom_list): | |
def add_sortable_keys(item): | |
ref = re.search(r'^[A-Za-z]+', item['References']).group(0) | |
num = int(re.search(r'\d+$', item['References']).group(0)) | |
return {**item, 'ref': ref, 'num': num} | |
bom_list = list(map(add_sortable_keys, bom_list)) | |
bom_list = sorted(bom_list, key=lambda x: (x['ref'], x['num'])) | |
def remove_sortable_keys(item): | |
item = {**item} | |
del item['ref'] | |
del item['num'] | |
return item | |
bom_list = list(map(remove_sortable_keys, bom_list)) | |
return bom_list | |
# Combine multiple items into a single item (with a space-delimited References list) | |
# based on their "id-column-name" (if specified and present), otherwise on a combination | |
# of their value and footprint | |
def combine_refs(bom_list, opts): | |
partnum_keys = [] | |
partnum_map = {} | |
id_col_name = opts['id-column-name'] | |
for item in bom_list: | |
ref = item['References'] | |
if id_col_name and item[id_col_name]: | |
key = item[id_col_name] | |
else: | |
key = '--'.join([item['Value'], item['Footprint'], | |
item['Footprint Lib']]) | |
if key not in partnum_map: | |
partnum_map[key] = { | |
**item, 'References': [], 'Quantity Per PCB': 0} | |
partnum_keys.append(key) | |
partnum_map[key]['References'].append(ref) | |
partnum_map[key]['Quantity Per PCB'] += 1 | |
def get_flattened_dict_from_key(key): | |
d = partnum_map[key] | |
return {**d, 'References': ' '.join(d['References'])} | |
bom_list = list(map(get_flattened_dict_from_key, partnum_keys)) | |
return bom_list | |
def main(argv): | |
opts, bom_files = parse_args(argv) | |
bom_list = read_boms(bom_files) | |
bom_list = normalize_bom_values(bom_list) | |
bom_list = sort_by_ref(bom_list) | |
bom_list = combine_refs(bom_list, opts) | |
print_csv(bom_list) | |
if __name__ == "__main__": | |
main(sys.argv[1:]) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment