A minimal storj log analyzer

This analyzer shows how much data has been uploaded (uploaded to the storage node, i.e. downloaded from the internet, i.e. ingress traffic) and produces a histogram and total counts from it.

#!/usr/bin/env python3
import sys
import json

def human_readable_size(size_bytes):
    if size_bytes == 0:
        return "0B"
    size_name = ("B", "KB", "MB", "GB", "TB")
    i = int(len(str(size_bytes)) - 1) // 3
    p = pow(1024, i)
    s = round(size_bytes / p, 3)
    return f"{s} {size_name[i]}"

def print_histogram(sizes):
    bins = [
        (1 * 1024, 2 * 1024),
        (2 * 1024, 4 * 1024),
        (4 * 1024, 8 * 1024),
        (8 * 1024, 16 * 1024),
        (16 * 1024, 32 * 1024),
        (32 * 1024, 64 * 1024),
        (64 * 1024, 128 * 1024),
        (128 * 1024, 256 * 1024),
        (256 * 1024, 512 * 1024),
        (512 * 1024, 1 * 1024 * 1024),
        (1 * 1024 * 1024, 2 * 1024 * 1024),
        (2 * 1024 * 1024, 4 * 1024 * 1024),
        (4 * 1024 * 1024, 8 * 1024 * 1024),
        (8 * 1024 * 1024, 16 * 1024 * 1024),
        (16 * 1024 * 1024, 32 * 1024 * 1024),
        (32 * 1024, 64 * 1024),
        (64 * 1024 * 1024, 128 * 1024 * 1024),
        (128 * 1024 * 1024, 256 * 1024 * 1024),
        (256 * 1024 * 1024, float('inf'))
    ]
    bin_counts = [0] * len(bins)

    for size in sizes:
        for i, (low, high) in enumerate(bins):
            if low <= size < high:
                bin_counts[i] += 1
                break

    max_count = max(bin_counts)
    max_chars = 50

    print("File size histogram:")
    for i, (low, high) in enumerate(bins):
        bin_label = f"{human_readable_size(low)}..{human_readable_size(high)}"
        num_chars = int((bin_counts[i] / max_count) * max_chars) if max_count > 0 else 0
        count_str = f"{bin_counts[i]:8}"
        print(f"{bin_label:<20} | {count_str} | {'█' * num_chars}")

def filter_uploaded_lines(logfile):
    total_size = 0
    upload_count = 0
    sizes = []

    try:
        with open(logfile, 'r') as file:
            lines = file.readlines()

        for line in lines:
            if "uploaded" in line:
                upload_count += 1
                try:
                    json_part = line.split('uploaded')[-1].strip()
                    data = json.loads(json_part)
                    size = data.get("Size", 0)
                    total_size += size
                    sizes.append(size)
                except json.JSONDecodeError:
                    print(f"Failed to decode JSON from line: {line}")
                except Exception as e:
                    print(f"An error occurred while processing line: {e}")

        print(f"Total size of uploaded pieces: {human_readable_size(total_size)}")
        print(f"Total number of uploads: {upload_count}")
        print_histogram(sizes)

    except FileNotFoundError:
        print(f"The file {logfile} does not exist.")
    except Exception as e:
        print(f"An error occurred: {e}")

if __name__ == "__main__":
    if len(sys.argv) != 2:
        print("Usage: python filter_log.py <logfile>")
    else:
        logfile = sys.argv[1]
        filter_uploaded_lines(logfile)

Run it on the storj logfile:

python3 storj-stats.py storage/storj.log

Example output

Total size of uploaded pieces: 156.995 GB
Total number of uploads: 1036601
File size histogram:
1.0 KB..2.0 KB       |    20164 | ██
2.0 KB..4.0 KB       |   402924 | █████████████████████████████████████████
4.0 KB..8.0 KB       |    18736 | █
8.0 KB..16.0 KB      |    15639 | █
16.0 KB..32.0 KB     |    10104 | █
32.0 KB..64.0 KB     |    11642 | █
64.0 KB..128.0 KB    |     9830 | █
128.0 KB..256.0 KB   |   483561 | ██████████████████████████████████████████████████
256.0 KB..512.0 KB   |    30104 | ███
512.0 KB..1.0 MB     |     3835 | 
1.0 MB..2.0 MB       |    11190 | █
2.0 MB..4.0 MB       |     8296 | 
4.0 MB..8.0 MB       |        5 | 
8.0 MB..16.0 MB      |        0 | 
16.0 MB..32.0 MB     |        0 | 
32.0 KB..64.0 KB     |        0 | 
64.0 MB..128.0 MB    |        0 | 
128.0 MB..256.0 MB   |        0 | 
256.0 MB..inf B      |        0 | 

 

Posted by Uli Köhler in Python

How to create empty pandas DataFrame with same columns and types as existing DataFrame

empty_df = pd.DataFrame(columns=df.columns).astype(df.dtypes)

Full example

import pandas as pd

# Initialize data to lists.
data = {'Name': ['Tom', 'Nick', 'John'],
        'Age': [20, 21, 19]}

# Create DataFrame
df = pd.DataFrame(data)

empty_df = pd.DataFrame(columns=df.columns).astype(df.dtypes)

 

Posted by Uli Köhler in pandas, Python

Python script to check KiCad footprint libraries for duplicate UUIDs

This script will check each individual footprint file for duplicate UUIDs

#!/usr/bin/env python3
import os
import re
import argparse
from collections import defaultdict

def find_kicad_mod_files(root_dir):
    """Recursively find all .kicad_mod files in the given directory."""
    kicad_mod_files = []
    for dirpath, _, filenames in os.walk(root_dir):
        for filename in filenames:
            if filename.endswith('.kicad_mod'):
                kicad_mod_files.append(os.path.join(dirpath, filename))
    return kicad_mod_files

def extract_uuids(file_path):
    """Extract all UUIDs from a given .kicad_mod file."""
    uuid_pattern = re.compile(r'\(uuid "([0-9a-fA-F-]+)"\)')
    uuids = []
    with open(file_path, 'r') as file:
        for line in file:
            match = uuid_pattern.search(line)
            if match:
                uuids.append(match.group(1))
    return uuids

def check_duplicate_uuids(uuids):
    """Check for duplicate UUIDs in the list and return them."""
    uuid_count = defaultdict(int)
    for uuid in uuids:
        uuid_count[uuid] += 1
    duplicates = [uuid for uuid, count in uuid_count.items() if count > 1]
    return duplicates

def main(root_dir):
    kicad_mod_files = find_kicad_mod_files(root_dir)
    for file_path in kicad_mod_files:
        uuids = extract_uuids(file_path)
        duplicates = check_duplicate_uuids(uuids)
        if duplicates:
            print(f"Duplicate UUIDs found in {file_path}: {duplicates}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Check for duplicate UUIDs in .kicad_mod files.')
    parser.add_argument('root_dir', type=str, help='The root directory to search for .kicad_mod files.')
    args = parser.parse_args()
    
    main(args.root_dir)

Usage

python3 check-duplicate-uuids.py kicad-footprints

Example output

Duplicate UUIDs found in ./Connector_Audio.pretty/Jack_3.5mm_Lumberg_1503_07_Horizontal.kicad_mod: ['5d89b84d-1724-4295-a6de-f4c29a1b88c4', '97162d26-e840-41f4-adb4-b92b38c5a58d', '596db706-3656-4d79-8d35-c213f53513c2']
Duplicate UUIDs found in ./Module.pretty/Arduino_Nano_WithMountingHoles.kicad_mod: ['afb53f55-89fc-467f-b192-5ef16ddd17c7']
Duplicate UUIDs found in ./RF_GPS.pretty/ublox_SAM-M8Q.kicad_mod: ['f39553a8-4ba2-4021-a968-74b5a9402e10']
Duplicate UUIDs found in ./Connector_RJ.pretty/RJ12_Amphenol_54601-x06_Horizontal.kicad_mod: ['ca7c8ade-6e68-4498-be5c-636214cec9ba']
Duplicate UUIDs found in ./Connector_RJ.pretty/RJ45_Amphenol_RJMG1BD3B8K1ANR.kicad_mod: ['a7ea8871-0814-496d-bea8-76acf87a9f1d']
Duplicate UUIDs found in ./Rotary_Encoder.pretty/RotaryEncoder_Bourns_Horizontal_PEC12R-2x17F-Sxxxx.kicad_mod: ['fbe20761-a4dd-4717-a1ee-ee163a29403c']
Duplicate UUIDs found in ./Rotary_Encoder.pretty/RotaryEncoder_Bourns_Vertical_PEC12R-3x17F-Sxxxx.kicad_mod: ['2121f37f-d564-4d19-b967-c2e3a11ee91d', 'da838f91-01ea-4baa-80d5-cd2ec5938d95']
Duplicate UUIDs found in ./Rotary_Encoder.pretty/RotaryEncoder_Bourns_Horizontal_PEC12R-2x17F-Nxxxx.kicad_mod: ['fbe20761-a4dd-4717-a1ee-ee163a29403c']
Duplicate UUIDs found in ./OptoDevice.pretty/Everlight_ITR9608-F.kicad_mod: ['31cbb4e1-301b-41e3-84aa-8889ec650a31']
Duplicate UUIDs found in ./Relay_THT.pretty/Relay_SPDT_Finder_40.11.kicad_mod: ['63c01b9c-4254-44f3-88b5-efcdc8449585']
Duplicate UUIDs found in ./Relay_THT.pretty/Relay_DPDT_Finder_40.52.kicad_mod: ['2ad24060-daab-4eec-8ede-478de9642fcc']
Duplicate UUIDs found in ./Connector_BarrelJack.pretty/BarrelJack_Wuerth_694102107102_1.0x3.9mm.kicad_mod: ['c7b3235a-ca21-4d5b-9cb8-5cd1c9ffac20']
Duplicate UUIDs found in ./Connector_BarrelJack.pretty/BarrelJack_Wuerth_694103107102_1.35x3.9mm.kicad_mod: ['c7b3235a-ca21-4d5b-9cb8-5cd1c9ffac20']
Duplicate UUIDs found in ./Potentiometer_THT.pretty/Potentiometer_Bourns_PTV112-4_Dual_Vertical.kicad_mod: ['ce46aad6-930d-4f62-b34c-aa3cfe63aff6', 'fc4aa5ce-c916-41ac-a552-27fa764ea7a1']
Duplicate UUIDs found in ./Connector_TE-Connectivity.pretty/TE_2834006-2_1x02_P4.0mm_Horizontal.kicad_mod: ['cfdfed8e-5d84-4c6d-942c-ee1b9b80924b', '9de75ce7-a14d-4cdd-b274-c644b1382308']
Duplicate UUIDs found in ./Package_DFN_QFN.pretty/Texas_MOF0009A.kicad_mod: ['14f8887d-4cb1-49e2-9618-9bb2e36b411d']
Duplicate UUIDs found in ./Package_DFN_QFN.pretty/OnSemi_SIP-38-6EP-9x7mm_P0.65mm_EP1.2x1.2mm.kicad_mod: ['36c37ae9-764e-4604-8fd1-0bd8202db0fa']

 

Posted by Uli Köhler in KiCAD, Python

Python script to find & count duplicate lines in a text file

#!/usr/bin/env python3
import sys
from collections import Counter

def main():
    if len(sys.argv) != 2:
        print("Usage: python count_lines.py <filename>")
        sys.exit(1)

    filename = sys.argv[1]
    
    try:
        with open(filename, 'r') as file:
            lines = file.readlines()
    except FileNotFoundError:
        print(f"File '{filename}' not found.")
        sys.exit(1)
    
    # Count occurrences of each line
    line_counts = Counter(lines)
    
    # Create a sorted list of lines that occur more than once
    sorted_lines = sorted(
        [(count, line.strip()) for line, count in line_counts.items() if count > 1],
        reverse=False,
        key=lambda x: x[0]
    )
    
    # Print the sorted lines
    for count, line in sorted_lines:
        print(f"{count} {line}")

if __name__ == "__main__":
    main()

 

Posted by Uli Köhler in Python

How to convert pandas pd.Timestamp() to epoch timestamp

In order to convert a Pandas pd.Timestamp to a number representing seconds since epoch, use ts.timestamp():

import pandas as pd

# Example pandas timestamp
timestamp = pd.Timestamp('2024-05-28 12:34:56')

# Convert to epoch timestamp with second resolution
epoch_timestamp_ms = timestamp.timestamp() # 1716899696.0

Since this returns a float, you can just multiply it by 1000 to obtain a timestamp with millisecond resolution:

import pandas as pd

# Example pandas timestamp
timestamp = pd.Timestamp('2024-05-28 12:34:56')

# Convert to epoch timestamp with second resolution
epoch_timestamp_ms = timestamp.timestamp() * 1000. # 1716899696000.0

 

Posted by Uli Köhler in pandas, Python

smartctl: How to abort SMART test

Use

smartctl -X /dev/sda

to abort a running SMART test on /dev/sda.

Example output

smartctl 7.2 2020-12-30 r5155 [x86_64-linux-5.15.0-105-generic] (local build)
Copyright (C) 2002-20, Bruce Allen, Christian Franke, www.smartmontools.org

=== START OF OFFLINE IMMEDIATE AND SELF-TEST SECTION ===
Sending command: "Abort SMART off-line mode self-test routine".
Self-testing aborted!

 

Posted by Uli Köhler in Linux

How to fix Docker MariaDB not starting up “upgrade … required but skipped due to $MARIADB_AUTO_UPGRADE”

Problem:

Your MariaDB / MySQL container doesnt start up due to

mariadb_1    | 2024-05-27 03:03:20+00:00 [Note] [Entrypoint]: Entrypoint script for MariaDB Server 1:11.3.2+maria~ubu2204 started.
mariadb_1    | 2024-05-27 03:03:20+00:00 [Warn] [Entrypoint]: /sys/fs/cgroup///memory.pressure not writable, functionality unavailable to MariaDB
mariadb_1    | 2024-05-27 03:03:20+00:00 [Note] [Entrypoint]: Switching to dedicated user 'mysql'
mariadb_1    | 2024-05-27 03:03:20+00:00 [Note] [Entrypoint]: Entrypoint script for MariaDB Server 1:11.3.2+maria~ubu2204 started.
mariadb_1    | 2024-05-27 03:03:21+00:00 [Note] [Entrypoint]: MariaDB upgrade information missing, assuming required
mariadb_1    | 2024-05-27 03:03:21+00:00 [Note] [Entrypoint]: MariaDB upgrade (mariadb-upgrade or creating healthcheck users) required, but skipped due to $MARIADB_AUTO_UPGRADE setting
mariadb_1    | 2024-05-27  3:03:21 0 [Warning] Could not increase number of max_open_files to more than 1024 (request: 32186)
mariadb_1    | 2024-05-27  3:03:21 0 [Warning] Changed limits: max_open_files: 1024  max_connections: 151 (was 151)  table_cache: 421 (was 2000)

Solution:

Add

- MARIADB_AUTO_UPGRADE=1

to the environments section of your docker-compose.yml.

Then, restart the container. MariaDB will then auto-upgrade itself.

 

 

Posted by Uli Köhler in Docker

Python script to find filenames and directories including non-standard characters

This script prints file- and directorynames recursively which have characters in their name not belonging to either the Unicode basic latin or Latin-1 supplement blocks.

#!/usr/bin/env python3
import os
import argparse

def is_valid_char(c):
    """Check if a character is within Basic Latin or Latin-1 Supplement Unicode blocks."""
    return '0000' <= f"{ord(c):04x}" <= '00ff'

def contains_invalid_chars(name):
    """Check if a string contains characters outside Basic Latin or Latin-1 Supplement."""
    return any(not is_valid_char(c) for c in name)

def check_directory(path):
    """Recursively check directories and files for invalid characters in their names."""
    for root, dirs, files in os.walk(path):
        for name in dirs + files:
            if contains_invalid_chars(name):
                print(os.path.join(root, name))

def main():
    parser = argparse.ArgumentParser(description="Check for files and directories with invalid characters in their names.")
    parser.add_argument('path', type=str, help="The path to the directory to check.")
    
    args = parser.parse_args()
    
    if not os.path.isdir(args.path):
        print(f"The path {args.path} is not a valid directory.")
        return

    check_directory(args.path)

if __name__ == "__main__":
    main()

 

Posted by Uli Köhler in Python

Python script to merge multiple vCard (.vcf) files into one.

#!/usr/bin/env python3
import os
import sys

# Check if the directory is provided as a command line argument
if len(sys.argv) != 2:
    print("Usage: python merge_vcf.py <directory>")
    sys.exit(1)

# Get the directory from the command line argument
directory = sys.argv[1]

# Check if the provided argument is a valid directory
if not os.path.isdir(directory):
    print(f"{directory} is not a valid directory.")
    sys.exit(1)

# Set the output file name to the directory name plus ".vcf"
output_file = os.path.basename(os.path.normpath(directory)) + '.vcf'
output_file_path = output_file

# Check if the output file already exists
if os.path.exists(output_file_path):
    print(f"{output_file_path} already exists. Exiting...")
else:
    # Get a list of all .vcf files in the specified directory
    vcf_files = [file for file in os.listdir(directory) if file.endswith('.vcf') and file != output_file]

    # Open the output file in write mode
    with open(output_file_path, 'w', encoding="utf-8") as outfile:
        # Loop through each vcf file and append its contents to the output file
        for vcf in vcf_files:
            with open(os.path.join(directory, vcf), 'r', encoding="utf-8") as infile:
                try:
                    outfile.write(infile.read())
                    # Ensure there's a new line between different vCard files
                    outfile.write('\n')
                except UnicodeEncodeError:
                    print("Encode error while processing", vcf)

    print(f"All vCard files have been merged into {output_file_path}")

 

Posted by Uli Köhler in Python

How to convert Mailchip-Like subscriber CSV to vCards using Python

If you’ve got a CSV with a header such as

"Email Address","First Name","Last Name",MEMBER_RATING,OPTIN_TIME,OPTIN_IP,CONFIRM_TIME,CONFIRM_IP,LATITUDE,LONGITUDE,GMTOFF,DSTOFF,TIMEZONE,CC,REGION,LAST_CHANGED,LEID,EUID,NOTES

you can use the following Python script to convert it to a directory of vcard files:

#!/usr/bin/env python3
import csv
import os
import argparse

# Set up argument parser
parser = argparse.ArgumentParser(description='Convert a CSV file of contacts to vCard files.')
parser.add_argument('csv_file', type=str, help='Path to the CSV file containing contacts')

args = parser.parse_args()

# Define the path for the CSV file from command line argument
csv_file_path = args.csv_file

# Create a directory to store the vCard files
vcard_directory = 'vcards'
os.makedirs(vcard_directory, exist_ok=True)

# Read the CSV file and process each contact
with open(csv_file_path, mode='r', encoding='utf-8') as csv_file:
    csv_reader = csv.DictReader(csv_file)
    
    for row in csv_reader:
        # Extract contact details
        email = row['Email Address']
        first_name = row['First Name']
        last_name = row['Last Name']
        latitude = row['LATITUDE']
        longitude = row['LONGITUDE']
        notes = row['NOTES']
        
        # Create the vCard content
        vcard_content = (
            f"BEGIN:VCARD\n"
            f"VERSION:3.0\n"
            f"N:{last_name};{first_name};;;\n"
            f"FN:{first_name} {last_name}\n"
            f"EMAIL;TYPE=INTERNET:{email}\n"
        )
        
        if latitude and longitude:
            vcard_content += f"GEO:{latitude};{longitude}\n"
        
        if notes:
            vcard_content += f"NOTE:{notes}\n"
        
        vcard_content += "END:VCARD\n"
        
        # Define the file name for the vCard
        vcard_file_name = f"{first_name}_{last_name}.vcf".replace(" ", "_")
        vcard_file_path = os.path.join(vcard_directory, vcard_file_name)
        
        # Write the vCard content to the file
        with open(vcard_file_path, mode='w', encoding='utf-8') as vcard_file:
            vcard_file.write(vcard_content)

print(f"vCard files have been created in the '{vcard_directory}' directory.")

 

Posted by Uli Köhler in Python

How to convert iChat message backups (.ichat, Apple Binary Property List) using Python

First, download this tool:

wget https://raw.githubusercontent.com/cclgroupltd/ccl-bplist/master/ccl_bplist.py

Now create bplist2json.py which is based on this old gist by Benno Kruit

#!/usr/bin/env python3
"""
Convert an Apple Binary Property List (bplist) to json
"""

import ccl_bplist # https://github.com/cclgroupltd/ccl-bplist

from datetime import datetime

def clean_archive(d):
    if type(d) in [dict, ccl_bplist.NsKeyedArchiverDictionary]:
        return {k:clean_archive(v) for k,v in d.items() if not k.startswith('$')}
    elif type(d) == ccl_bplist.NsKeyedArchiverList:
        return [clean_archive(i) for i in d]
    else:
        return d

def bplist_dict(fobj):
    """Convert a bplist file object to python dict"""
    plist = ccl_bplist.load(fobj)
    ccl_bplist.set_object_converter(ccl_bplist.NSKeyedArchiver_common_objects_convertor)
    archive = ccl_bplist.deserialise_NsKeyedArchiver(plist)
    return clean_archive(archive)


if __name__ == '__main__':
    import argparse, sys, json
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('--input', '-i', nargs='?', type=argparse.FileType('rb'),
        default=sys.stdin, help='default: stdin')
    args = parser.parse_args()

    class ExportEncoder(json.JSONEncoder):
        def default(self, o):
            if isinstance(o, datetime):
                return o.isoformat()
            if isinstance(o, bytes):
                return o.decode('iso-8859-1')
            return json.JSONEncoder.default(self, o)
    
    print(json.dumps(bplist_dict(args.input), cls=ExportEncoder, indent=4))

Now create my script extract-chatlog.py which converts the JSON files to human readable chat logs

#!/usr/bin/env python3
import json
import sys
from datetime import datetime

# Function to format datetime string
def format_datetime(dt_str):
    dt = datetime.fromisoformat(dt_str)
    return dt.strftime("%Y-%m-%d %H:%M:%S")

def main(input_file):
    # Read JSON data from input file
    with open(input_file, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # Extract the messages list
    messages = data[2]

    # Extract chat log in a human-readable format
    chat_log = []
    for message in messages:
        sender = message['Sender']['ID']
        time = format_datetime(message['Time'])
        text = message['MessageText']['NSString']
        chat_log.append(f"{time} - {sender}: {text}")

    # Print chat log to stdout
    for log in chat_log:
        print(log)

if __name__ == "__main__":
    if len(sys.argv) != 2:
        print("Usage: python script.py <input_file>")
        sys.exit(1)

    input_file = sys.argv[1]
    main(input_file)

Now it’s time to run it. Generally, run bplist2json.py on the .ichat to obtain a JSON file and then run extract-chatlog.py to obtain a text conversation log with timestamps etc.

Here’s a bash oneliner to run it on all *.ichat files in the current directory

for i in *.ichat ; do ./bplist2json.py -i ${i} > "${i}.json" ; ./extract-chatlog.py "${i}.json" > ${i}.txt ; done

 

 

Posted by Uli Köhler in Python

What does ‘BSC’ mean in datasheet dimensions?

In many datasheets, you can find dimensions like 6.00 BSC and in most datasheets, it is not defined what BSC means.

BSC means that this value is given without tolerances as a value which is exact (at least in theory). In practice of course, the dimension will have some tolerances, but these tolerances are not given in the datasheet.

Source: Microchip datasheet

Posted by Uli Köhler in Electronics

How to fix CMake ‘Could not find a package configuration file provided by “Coin” …’ on Ubuntu

Problem:

When trying to compile your CMake project, you see an error message such as

CMake Error at cMake/FreeCAD_Helpers/SetupCoin3D.cmake:13 (find_package):
  Could not find a package configuration file provided by "Coin" with any of
  the following names:

    CoinConfig.cmake
    coin-config.cmake

  Add the installation prefix of "Coin" to CMAKE_PREFIX_PATH or set
  "Coin_DIR" to a directory containing one of the above files.  If "Coin"
  provides a separate development package or SDK, be sure it has been
  installed.
Call Stack (most recent call first):
  CMakeLists.txt:86 (SetupCoin3D)

Solution:

sudo apt -y install libcoin-dev

Tested on Ubuntu 22.04

Posted by Uli Köhler in CMake, Linux

How to fix CMake ‘The imported target “Qt6::lprodump” references the file /usr/lib/qt6/libexec/lprodump …’ on Ubuntu

Problem:

When trying to compile your CMake project, you see an error message such as

CMake Error at /usr/lib/x86_64-linux-gnu/cmake/Qt6LinguistTools/Qt6LinguistToolsTargets.cmake:131 (message):
  The imported target "Qt6::lprodump" references the file

     "/usr/lib/qt6/libexec/lprodump"

  but this file does not exist.  Possible reasons include:

  * The file was deleted, renamed, or moved to another location.

  * An install or uninstall procedure did not complete successfully.

  * The installation package was faulty and contained

     "/usr/lib/x86_64-linux-gnu/cmake/Qt6LinguistTools/Qt6LinguistToolsTargets.cmake"

  but not all the files it references.

Call Stack (most recent call first):
  /usr/lib/x86_64-linux-gnu/cmake/Qt6LinguistTools/Qt6LinguistToolsConfig.cmake:47 (include)
  /usr/lib/x86_64-linux-gnu/cmake/Qt6/Qt6Config.cmake:219 (find_package)
  cMake/FreeCAD_Helpers/SetupQt.cmake:33 (find_package)
  CMakeLists.txt:82 (include)

Solution:

sudo apt -y install qt6-tools-dev-tools

Tested on Ubuntu 22.04

Posted by Uli Köhler in CMake, Linux

How to fix CMake ‘The imported target “Qt6::lconvert” references the file /usr/lib/qt6/bin/lconvert …’ on Ubuntu

Problem:

When trying to compile your CMake project, you see an error message such as

CMake Error at /usr/lib/x86_64-linux-gnu/cmake/Qt6LinguistTools/Qt6LinguistToolsTargets.cmake:131 (message):
  The imported target "Qt6::lconvert" references the file

     "/usr/lib/qt6/bin/lconvert"

  but this file does not exist.  Possible reasons include:

  * The file was deleted, renamed, or moved to another location.

  * An install or uninstall procedure did not complete successfully.

  * The installation package was faulty and contained

     "/usr/lib/x86_64-linux-gnu/cmake/Qt6LinguistTools/Qt6LinguistToolsTargets.cmake"

  but not all the files it references.

Call Stack (most recent call first):
  /usr/lib/x86_64-linux-gnu/cmake/Qt6LinguistTools/Qt6LinguistToolsConfig.cmake:47 (include)
  /usr/lib/x86_64-linux-gnu/cmake/Qt6/Qt6Config.cmake:219 (find_package)
  cMake/FreeCAD_Helpers/SetupQt.cmake:33 (find_package)
  CMakeLists.txt:82 (include)

 

Solution:

sudo apt -y install qt6-l10n-tools

Tested on Ubuntu 22.04

Posted by Uli Köhler in CMake, Linux

How to fix CMake ‘Failed to find Qt component “Core5Compat”‘ on Ubuntu

Problem:

When trying to compile your CMake project, you see an error message such as

CMake Error at cMake/FreeCAD_Helpers/SetupQt.cmake:33 (find_package):
  Found package configuration file:

    /usr/lib/x86_64-linux-gnu/cmake/Qt6/Qt6Config.cmake

  but it set Qt6_FOUND to FALSE so package "Qt6" is considered to be NOT
  FOUND.  Reason given by package:

  Failed to find Qt component "Core5Compat".

  Expected Config file at
  "/usr/lib/x86_64-linux-gnu/cmake/Qt6Core5Compat/Qt6Core5CompatConfig.cmake"
  does NOT exist

  

Call Stack (most recent call first):
  CMakeLists.txt:82 (include)

 

 

Solution:

sudo apt -y install libqt6core5compat6-dev

Tested on Ubuntu 22.04

Posted by Uli Köhler in CMake, Linux

How to fix CMake ‘Failed to find Qt component “SvgWidgets”‘ on Ubuntu

Problem:

When trying to compile your CMake project, you see an error message such as

CMake Error at cMake/FreeCAD_Helpers/SetupQt.cmake:33 (find_package):
  Found package configuration file:

    /usr/lib/x86_64-linux-gnu/cmake/Qt6/Qt6Config.cmake

  but it set Qt6_FOUND to FALSE so package "Qt6" is considered to be NOT
  FOUND.  Reason given by package:

  Failed to find Qt component "SvgWidgets".

  Expected Config file at
  "/usr/lib/x86_64-linux-gnu/cmake/Qt6SvgWidgets/Qt6SvgWidgetsConfig.cmake"
  does NOT exist

  

Call Stack (most recent call first):
  CMakeLists.txt:82 (include)

 

Solution:

sudo apt -y install libqt6svg6-dev

Tested on Ubuntu 22.04

Posted by Uli Köhler in CMake, Linux

How to fix CMake ‘Could not find a package configuration file provided by “VTK”‘ on Ubuntu

Problem:

When trying to compile your CMake project, you see an error message such as

CMake Error at cMake/FreeCAD_Helpers/SetupSalomeSMESH.cmake:29 (find_package):
  Could not find a package configuration file provided by "VTK" with any of
  the following names:

    VTKConfig.cmake
    vtk-config.cmake

  Add the installation prefix of "VTK" to CMAKE_PREFIX_PATH or set "VTK_DIR"
  to a directory containing one of the above files.  If "VTK" provides a
  separate development package or SDK, be sure it has been installed.
Call Stack (most recent call first):
  CMakeLists.txt:71 (SetupSalomeSMESH)

Solution:

sudo apt -y install libvtk9-dev

Note that depending on the version of Ubuntu you use, you might need to use a slightly different version (9) of vtklibvtk9-dev works for Ubuntu 22.04

Posted by Uli Köhler in CMake, Linux

How to fix CMake ‘Failed to find XercesC…’ on Ubuntu

Problem:

When trying to compile your CMake project, you see an error message such as

CMake Error at /usr/share/cmake-3.25/Modules/FindPackageHandleStandardArgs.cmake:230 (message):
  Failed to find XercesC (missing: XercesC_LIBRARY XercesC_INCLUDE_DIR
  XercesC_VERSION)
Call Stack (most recent call first):
  /usr/share/cmake-3.25/Modules/FindPackageHandleStandardArgs.cmake:600 (_FPHSA_FAILURE_MESSAGE)
  /usr/share/cmake-3.25/Modules/FindXercesC.cmake:112 (FIND_PACKAGE_HANDLE_STANDARD_ARGS)
  cMake/FreeCAD_Helpers/SetupXercesC.cmake:4 (find_package)
  CMakeLists.txt:61 (SetupXercesC)

Solution:

sudo apt -y install libxerces-c-dev

 

Posted by Uli Köhler in CMake, Linux

How to fix CMake ‘Could not find a package configuration file provided by “yaml-cpp” …’ on Ubuntu

Problem:

When you try to build a CMake project, you see an error message such as

CMake Error at cMake/FreeCAD_Helpers/SetupLibYaml.cmake:3 (find_package):
  By not providing "Findyaml-cpp.cmake" in CMAKE_MODULE_PATH this project has
  asked CMake to find a package configuration file provided by "yaml-cpp",
  but CMake did not find one.

  Could not find a package configuration file provided by "yaml-cpp" with any
  of the following names:

    yaml-cppConfig.cmake
    yaml-cpp-config.cmake

  Add the installation prefix of "yaml-cpp" to CMAKE_PREFIX_PATH or set
  "yaml-cpp_DIR" to a directory containing one of the above files.  If
  "yaml-cpp" provides a separate development package or SDK, be sure it has
  been installed.
Call Stack (most recent call first):
  CMakeLists.txt:56 (SetupYamlCpp)

Solution:

sudo apt -y install libyaml-cpp-dev

 

Posted by Uli Köhler in CMake, Linux
This website uses cookies to improve your experience. We'll assume you're ok with this, but you can opt-out if you wish. Cookie settingsACCEPTPrivacy Cookies Policy