Task 220: .FCS File Format

Task 220: .FCS File Format

1. List of All Properties of the .FCS File Format Intrinsic to Its File System

The .FCS (Flow Cytometry Standard) file format, primarily version 3.1 (with backward compatibility to 3.0), defines a structured binary file with segments for metadata and data. The intrinsic properties are those that define the core file structure, offsets, data encoding, and required parameters for parsing and validation. These are derived from the required keywords and structural elements in the specification. Optional or application-specific keywords (e.g., $COM for comments) are excluded as they are not intrinsic to the file system itself.

  • FCS Version: The file format version identifier (e.g., "FCS3.0" or "FCS3.1"), stored as ASCII in the first 6 bytes of the HEADER segment.
  • Delimiter: A single ASCII character (printable, 32-126) marking the start of the TEXT segment and separating keyword-value pairs (e.g., '/').
  • $BEGINSTEXT: Byte offset from the file start to the beginning of the primary TEXT segment (required; ASCII integer in HEADER, bytes 10-17).
  • $ENDSTEXT: Byte offset from the file start to the end of the primary TEXT segment (required; ASCII integer in HEADER, bytes 18-25).
  • $BEGINDATA: Byte offset from the file start to the beginning of the DATA segment (required; ASCII integer in HEADER, bytes 26-33).
  • $ENDDATA: Byte offset from the file start to the end of the DATA segment (required; ASCII integer in HEADER, bytes 34-41).
  • $BEGINANALYSIS: Byte offset from the file start to the beginning of the optional ANALYSIS segment (required; ASCII integer in HEADER, bytes 42-49; 0 if absent).
  • $ENDANALYSIS: Byte offset from the file start to the end of the optional ANALYSIS segment (required; ASCII integer in HEADER, bytes 50-57; 0 if absent).
  • $NEXTDATA: Byte offset from the file start to the next data set in multi-sample files (required; 0 for single-sample files).
  • $BYTEORD: Byte order for multi-byte data values in the DATA segment (required; e.g., "4,3,2,1" for big-endian, "1,2,3,4" for little-endian).
  • $DATATYPE: Data encoding type in the DATA segment (required; 'I' for integer, 'F' for float, 'D' for double, 'A' for ASCII).
  • $MODE: Data storage mode (required; 'L' for list mode, 'C' for correlated histogram, 'U' for uncorrelated histogram).
  • $PAR: Number of parameters (channels) per event in the DATA segment (required; integer >=1).
  • $TOT: Total number of events stored in the DATA segment (required; integer).
  • $PnB (for each n=1 to $PAR): Number of bits allocated per value for parameter n (required; e.g., 16, 32).
  • $PnE (for each n=1 to $PAR): Amplifier type for parameter n (required; e.g., "0,0" for linear, "Log" for logarithmic).
  • $PnR (for each n=1 to $PAR): Maximum data value (range) for parameter n (required; integer, e.g., 1023 for 10-bit).

These properties ensure the file is self-describing and parsable. The HEADER is fixed at 58 bytes for single-sample files (version + 4 spaces + 6x8-byte offset fields). Unused space between segments is padded with ASCII space (32). All offsets are 0-based from file start, and values >99,999,999 use special handling via keywords.

3. Ghost Blog Embedded HTML JavaScript for Drag-and-Drop .FCS Property Dump

This is a self-contained HTML snippet embeddable in a Ghost blog post (use in an HTML card). It creates a drag-and-drop zone. On drop, it reads the .FCS file as an ArrayBuffer in the browser, parses the intrinsic properties (from the list above), and dumps them to a <pre> element below the zone. No external libraries; pure vanilla JS. Handles basic parsing (assumes FCS 3.0/3.1, single-sample, no supplemental TEXT/ANALYSIS for simplicity; errors logged to console).

Drag and drop an .FCS file here

4. Python Class for .FCS File Handling

This class opens an .FCS file, decodes/reads the intrinsic properties (prints to console), and supports writing the file back (reconstructs from parsed data, preserving DATA/ANALYSIS as-is). Uses struct for binary parsing. Assumes Python 3; handles basic FCS 3.0/3.1.

import struct
import os

class FCSReader:
    def __init__(self, filepath):
        self.filepath = filepath
        self.buffer = open(filepath, 'rb').read()
        self.keywords = {}
        self.version = ''
        self.delimiter = ''
        self.offsets = {}
        self.par = 0
        self.parse()

    def parse(self):
        # HEADER
        self.version = self.buffer[:6].decode('ascii').rstrip('\x00')
        print(f"FCS Version: {self.version}")

        def parse_offset(start):
            s = self.buffer[start:start+8].decode('ascii').strip()
            return int(s) if s else 0

        self.offsets['BEGINSTEXT'] = parse_offset(10)
        self.offsets['ENDSTEXT'] = parse_offset(18)
        self.offsets['BEGINDATA'] = parse_offset(26)
        self.offsets['ENDDATA'] = parse_offset(34)
        self.offsets['BEGINANALYSIS'] = parse_offset(42)
        self.offsets['ENDANALYSIS'] = parse_offset(50)

        for k, v in self.offsets.items():
            print(f"${k}: {v}")

        # TEXT
        text_start = self.offsets['BEGINSTEXT']
        text_end = self.offsets['ENDSTEXT']
        text_bytes = self.buffer[text_start:text_end]
        self.delimiter = chr(text_bytes[0])
        print(f"Delimiter: '{self.delimiter}'")

        text_str = text_bytes[1:].decode('ascii', errors='ignore')
        self.parse_keywords(text_str)

        # Keywords
        print(f"$NEXTDATA: {self.keywords.get('$NEXTDATA', 0)}")
        print(f"$BYTEORD: {self.keywords.get('$BYTEORD', 'Unknown')}")
        print(f"$DATATYPE: {self.keywords.get('$DATATYPE', 'Unknown')}")
        print(f"$MODE: {self.keywords.get('$MODE', 'Unknown')}")
        self.par = int(self.keywords.get('$PAR', 0))
        print(f"$PAR: {self.par}")
        tot = int(self.keywords.get('$TOT', 0))
        print(f"$TOT: {tot}")

        for n in range(1, self.par + 1):
            p = f'P{n}'
            print(f"\nParameter {n}:")
            print(f"  $P{n}N: {self.keywords.get(f'${p}N', 'Unknown')}")
            print(f"  $P{n}B: {self.keywords.get(f'${p}B', 'Unknown')}")
            print(f"  $P{n}E: {self.keywords.get(f'${p}E', 'Unknown')}")
            print(f"  $P{n}R: {self.keywords.get(f'${p}R', 'Unknown')}")

    def parse_keywords(self, text):
        pos = 0
        while pos < len(text):
            delim_pos = text.find(self.delimiter, pos)
            if delim_pos == -1: break
            keyword = text[pos:delim_pos].strip().upper()
            pos = delim_pos + 1
            val_end = text.find(self.delimiter, pos)
            if val_end == -1: break
            value = text[pos:val_end].strip()
            if value.startswith(self.delimiter): value = value[1:]
            if value.endswith(self.delimiter): value = value[:-1]
            self.keywords[keyword] = value
            pos = val_end + 1

    def write(self, output_path):
        with open(output_path, 'wb') as f:
            # Reconstruct HEADER
            header = self.version.encode('ascii').ljust(6, b'\x20') + b' ' * 4
            offsets = [self.offsets[k] for k in ['BEGINSTEXT', 'ENDSTEXT', 'BEGINDATA', 'ENDDATA', 'BEGINANALYSIS', 'ENDANALYSIS']]
            for off in offsets:
                header += f"{off:8d}".encode('ascii')
            f.write(header)

            # TEXT
            text_content = self.delimiter.encode('ascii')
            for kw, val in self.keywords.items():
                text_content += f"{kw[1:]}{self.delimiter}{val}{self.delimiter}".encode('ascii')  # Skip $ in kw
            # Pad to original length if needed
            orig_text_len = self.offsets['ENDSTEXT'] - self.offsets['BEGINSTEXT']
            text_content += b' ' * (orig_text_len - len(text_content))
            f.write(text_content)

            # DATA and ANALYSIS as-is
            f.write(self.buffer[self.offsets['BEGINDATA']:self.offsets['ENDDATA']])
            if self.offsets['BEGINANALYSIS'] > 0:
                f.write(self.buffer[self.offsets['BEGINANALYSIS']:self.offsets['ENDANALYSIS']])

        print(f"Written to {output_path}")

# Usage: reader = FCSReader('sample.fcs'); reader.write('output.fcs')

5. Java Class for .FCS File Handling

This Java class reads an .FCS file, decodes/prints intrinsic properties to console, and writes back (reconstructs structure). Uses java.nio for binary I/O. Compile with javac FCSReader.java; run with java FCSReader sample.fcs output.fcs. Assumes Java 8+.

import java.io.*;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.HashMap;
import java.util.Map;

public class FCSReader {
    private String filepath;
    private ByteBuffer buffer;
    private Map<String, String> keywords = new HashMap<>();
    private String version = "";
    private char delimiter = ' ';
    private Map<String, Integer> offsets = new HashMap<>();

    public FCSReader(String filepath) throws IOException {
        this.filepath = filepath;
        FileInputStream fis = new FileInputStream(filepath);
        FileChannel channel = fis.getChannel();
        buffer = ByteBuffer.allocate((int) channel.size());
        channel.read(buffer);
        buffer.flip();
        fis.close();
        parse();
    }

    private void parse() {
        // HEADER
        byte[] verBytes = new byte[6];
        buffer.get(verBytes);
        version = new String(verBytes).trim();
        System.out.println("FCS Version: " + version);

        buffer.position(10);
        offsets.put("BEGINSTEXT", parseOffset());
        offsets.put("ENDSTEXT", parseOffset());
        offsets.put("BEGINDATA", parseOffset());
        offsets.put("ENDDATA", parseOffset());
        offsets.put("BEGINANALYSIS", parseOffset());
        offsets.put("ENDANALYSIS", parseOffset());

        for (Map.Entry<String, Integer> entry : offsets.entrySet()) {
            System.out.println("$" + entry.getKey() + ": " + entry.getValue());
        }

        // TEXT
        int textStart = offsets.get("BEGINSTEXT");
        int textEnd = offsets.get("ENDSTEXT");
        buffer.position(textStart);
        delimiter = (char) buffer.get();
        System.out.println("Delimiter: '" + delimiter + "'");

        byte[] textBytes = new byte[textEnd - textStart - 1];
        buffer.get(textBytes);
        String textStr = new String(textBytes).trim();
        parseKeywords(textStr);

        // Keywords
        System.out.println("$NEXTDATA: " + (keywords.getOrDefault("$NEXTDATA", "0")));
        System.out.println("$BYTEORD: " + keywords.getOrDefault("$BYTEORD", "Unknown"));
        System.out.println("$DATATYPE: " + keywords.getOrDefault("$DATATYPE", "Unknown"));
        System.out.println("$MODE: " + keywords.getOrDefault("$MODE", "Unknown"));
        int par = Integer.parseInt(keywords.getOrDefault("$PAR", "0"));
        System.out.println("$PAR: " + par);
        int tot = Integer.parseInt(keywords.getOrDefault("$TOT", "0"));
        System.out.println("$TOT: " + tot);

        for (int n = 1; n <= par; n++) {
            String p = "P" + n;
            System.out.println("\nParameter " + n + ":");
            System.out.println("  $P" + n + "N: " + keywords.getOrDefault("$" + p + "N", "Unknown"));
            System.out.println("  $P" + n + "B: " + keywords.getOrDefault("$" + p + "B", "Unknown"));
            System.out.println("  $P" + n + "E: " + keywords.getOrDefault("$" + p + "E", "Unknown"));
            System.out.println("  $P" + n + "R: " + keywords.getOrDefault("$" + p + "R", "Unknown"));
        }
    }

    private int parseOffset() {
        byte[] offBytes = new byte[8];
        buffer.get(offBytes);
        String offStr = new String(offBytes).trim();
        return offStr.isEmpty() ? 0 : Integer.parseInt(offStr);
    }

    private void parseKeywords(String text) {
        int pos = 0;
        while (pos < text.length()) {
            int delimPos = text.indexOf(delimiter, pos);
            if (delimPos == -1) break;
            String keyword = text.substring(pos, delimPos).trim().toUpperCase();
            pos = delimPos + 1;
            int valEnd = text.indexOf(delimiter, pos);
            if (valEnd == -1) break;
            String value = text.substring(pos, valEnd).trim();
            if (value.startsWith(String.valueOf(delimiter))) value = value.substring(1);
            if (value.endsWith(String.valueOf(delimiter))) value = value.substring(0, value.length() - 1);
            keywords.put(keyword, value);
            pos = valEnd + 1;
        }
    }

    public void write(String outputPath) throws IOException {
        try (FileOutputStream fos = new FileOutputStream(outputPath);
             FileChannel channel = fos.getChannel()) {
            ByteBuffer outBuf = ByteBuffer.allocate(buffer.capacity());

            // HEADER
            outBuf.put(version.getBytes()).put("    ".getBytes());  // version + 4 spaces
            String[] offsetKeys = {"BEGINSTEXT", "ENDSTEXT", "BEGINDATA", "ENDDATA", "BEGINANALYSIS", "ENDANALYSIS"};
            for (String key : offsetKeys) {
                outBuf.put(String.format("%8d", offsets.get(key)).getBytes());
            }
            outBuf.flip();
            channel.write(outBuf);
            outBuf.clear();

            // TEXT (simplified reconstruction)
            outBuf.put((byte) delimiter);
            for (Map.Entry<String, String> entry : keywords.entrySet()) {
                String kw = entry.getKey().startsWith("$") ? entry.getKey().substring(1) : entry.getKey();
                outBuf.put((kw + String.valueOf(delimiter) + entry.getValue() + String.valueOf(delimiter)).getBytes());
            }
            // Pad
            int origLen = offsets.get("ENDSTEXT") - offsets.get("BEGINSTEXT");
            while (outBuf.position() < origLen) outBuf.put((byte) ' ');
            outBuf.flip();
            channel.write(outBuf, offsets.get("BEGINSTEXT"));
            outBuf.clear();

            // DATA and ANALYSIS
            outBuf.put(buffer.array(), offsets.get("BEGINDATA"), offsets.get("ENDDATA") - offsets.get("BEGINDATA"));
            channel.write(outBuf, offsets.get("BEGINDATA"));
            outBuf.clear();
            if (offsets.get("BEGINANALYSIS") > 0) {
                outBuf.put(buffer.array(), offsets.get("BEGINANALYSIS"), offsets.get("ENDANALYSIS") - offsets.get("BEGINANALYSIS"));
                channel.write(outBuf, offsets.get("BEGINANALYSIS"));
            }
        }
        System.out.println("Written to " + outputPath);
    }

    public static void main(String[] args) throws IOException {
        if (args.length < 1) {
            System.err.println("Usage: java FCSReader <input.fcs> [output.fcs]");
            return;
        }
        FCSReader reader = new FCSReader(args[0]);
        if (args.length > 1) reader.write(args[1]);
    }
}

6. JavaScript Class for .FCS File Handling (Node.js)

This Node.js class reads an .FCS file synchronously, decodes/prints properties to console, and writes back. Uses fs and Buffer. Run with node fcsreader.js sample.fcs [output.fcs]. Handles basic parsing.

const fs = require('fs');

class FCSReader {
  constructor(filepath) {
    this.filepath = filepath;
    this.buffer = fs.readFileSync(filepath);
    this.keywords = {};
    this.version = '';
    this.delimiter = '';
    this.offsets = {};
    this.par = 0;
    this.parse();
  }

  parse() {
    // HEADER
    this.version = this.buffer.slice(0, 6).toString('ascii').trim();
    console.log(`FCS Version: ${this.version}`);

    const parseOffset = (start) => {
      const str = this.buffer.slice(start, start + 8).toString('ascii').trim();
      return str ? parseInt(str) : 0;
    };

    this.offsets.BEGINSTEXT = parseOffset(10);
    this.offsets.ENDSTEXT = parseOffset(18);
    this.offsets.BEGINDATA = parseOffset(26);
    this.offsets.ENDDATA = parseOffset(34);
    this.offsets.BEGINANALYSIS = parseOffset(42);
    this.offsets.ENDANALYSIS = parseOffset(50);

    for (const [k, v] of Object.entries(this.offsets)) {
      console.log(`$${k}: ${v}`);
    }

    // TEXT
    const textStart = this.offsets.BEGINSTEXT;
    const textEnd = this.offsets.ENDSTEXT;
    const textBytes = this.buffer.slice(textStart, textEnd);
    this.delimiter = String.fromCharCode(textBytes[0]);
    console.log(`Delimiter: '${this.delimiter}'`);

    const textStr = textBytes.slice(1).toString('ascii');
    this.parseKeywords(textStr);

    // Keywords
    console.log(`$NEXTDATA: ${this.keywords['$NEXTDATA'] || 0}`);
    console.log(`$BYTEORD: ${this.keywords['$BYTEORD'] || 'Unknown'}`);
    console.log(`$DATATYPE: ${this.keywords['$DATATYPE'] || 'Unknown'}`);
    console.log(`$MODE: ${this.keywords['$MODE'] || 'Unknown'}`);
    this.par = parseInt(this.keywords['$PAR'] || 0);
    console.log(`$PAR: ${this.par}`);
    const tot = parseInt(this.keywords['$TOT'] || 0);
    console.log(`$TOT: ${tot}`);

    for (let n = 1; n <= this.par; n++) {
      const p = `P${n}`;
      console.log(`\nParameter ${n}:`);
      console.log(`  $P${n}N: ${this.keywords[`$${p}N`] || 'Unknown'}`);
      console.log(`  $P${n}B: ${this.keywords[`$${p}B`] || 'Unknown'}`);
      console.log(`  $P${n}E: ${this.keywords[`$${p}E`] || 'Unknown'}`);
      console.log(`  $P${n}R: ${this.keywords[`$${p}R`] || 'Unknown'}`);
    }
  }

  parseKeywords(text) {
    let pos = 0;
    while (pos < text.length) {
      const delimPos = text.indexOf(this.delimiter, pos);
      if (delimPos === -1) break;
      const keyword = text.substring(pos, delimPos).trim().toUpperCase();
      pos = delimPos + 1;
      const valEnd = text.indexOf(this.delimiter, pos);
      if (valEnd === -1) break;
      let value = text.substring(pos, valEnd).trim();
      if (value.startsWith(this.delimiter)) value = value.slice(1);
      if (value.endsWith(this.delimiter)) value = value.slice(0, -1);
      this.keywords[keyword] = value;
      pos = valEnd + 1;
    }
  }

  write(outputPath) {
    const outBuffer = Buffer.alloc(this.buffer.length);

    // HEADER
    outBuffer.write(this.version, 0, 6, 'ascii');
    outBuffer.fill(' ', 6, 10);
    const offsetKeys = ['BEGINSTEXT', 'ENDSTEXT', 'BEGINDATA', 'ENDDATA', 'BEGINANALYSIS', 'ENDANALYSIS'];
    let pos = 10;
    for (const key of offsetKeys) {
      outBuffer.write(`${this.offsets[key].toString().padStart(8, ' ')}`, pos, 8, 'ascii');
      pos += 8;
    }

    // TEXT
    outBuffer[textStart] = this.delimiter.charCodeAt(0);
    // Simplified: write keywords (skip full padding for brevity)
    let textPos = this.offsets.BEGINSTEXT + 1;
    for (const [kw, val] of Object.entries(this.keywords)) {
      const entry = kw.substring(1) + this.delimiter + val + this.delimiter;
      outBuffer.write(entry, textPos, entry.length, 'ascii');
      textPos += entry.length;
    }
    // Pad rest of TEXT with spaces
    outBuffer.fill(' ', textPos, this.offsets.ENDSTEXT);

    // DATA and ANALYSIS
    this.buffer.copy(outBuffer, this.offsets.BEGINDATA, this.offsets.BEGINDATA, this.offsets.ENDDATA);
    if (this.offsets.BEGINANALYSIS > 0) {
      this.buffer.copy(outBuffer, this.offsets.BEGINANALYSIS, this.offsets.BEGINANALYSIS, this.offsets.ENDANALYSIS);
    }

    fs.writeFileSync(outputPath, outBuffer);
    console.log(`Written to ${outputPath}`);
  }
}

// Usage
const args = process.argv.slice(2);
const reader = new FCSReader(args[0]);
if (args[1]) reader.write(args[1]);

7. C Class (Struct) for .FCS File Handling

This C implementation uses stdio and stdlib for file I/O. Defines a struct for the reader. Compile with gcc fcsreader.c -o fcsreader; run ./fcsreader sample.fcs [output.fcs]. Basic parsing; console output via printf. Assumes single-sample FCS 3.0/3.1.

#include <stdio.h>
#include <stdlib.h>
#include <string.h>

typedef struct {
    char filepath[256];
    unsigned char* buffer;
    long filesize;
    char version[7];
    char delimiter;
    long offsets[6];  // 0:BEGINSTEXT,1:ENDSTEXT,2:BEGINDATA,3:ENDDATA,4:BEGINANALYSIS,5:ENDANALYSIS
    char keywords[100][256];  // Simple array for keywords (key=value)
    int num_keywords;
    int par;
} FCSReader;

void parse_header(FCSReader* reader) {
    memcpy(reader->version, reader->buffer, 6);
    reader->version[6] = '\0';
    printf("FCS Version: %s\n", reader->version);

    long parse_offset(int start) {
        char str[9];
        memcpy(str, reader->buffer + start, 8);
        str[8] = '\0';
        char* end;
        return strtol(str, &end, 10);
    }

    reader->offsets[0] = parse_offset(10);  // BEGINSTEXT
    reader->offsets[1] = parse_offset(18);  // ENDSTEXT
    reader->offsets[2] = parse_offset(26);  // BEGINDATA
    reader->offsets[3] = parse_offset(34);  // ENDDATA
    reader->offsets[4] = parse_offset(42);  // BEGINANALYSIS
    reader->offsets[5] = parse_offset(50);  // ENDANALYSIS

    char* offset_names[] = {"BEGINSTEXT", "ENDSTEXT", "BEGINDATA", "ENDDATA", "BEGINANALYSIS", "ENDANALYSIS"};
    for (int i = 0; i < 6; i++) {
        printf("$%s: %ld\n", offset_names[i], reader->offsets[i]);
    }
}

void parse_text(FCSReader* reader) {
    long start = reader->offsets[0];
    long end = reader->offsets[1];
    reader->delimiter = reader->buffer[start];
    printf("Delimiter: '%c'\n", reader->delimiter);

    // Simple keyword parse (assumes no escapes, basic split)
    reader->num_keywords = 0;
    char text[10000];
    memcpy(text, reader->buffer + start + 1, end - start - 1);
    text[end - start - 1] = '\0';

    char* token = strtok(text, &reader->delimiter);
    while (token && reader->num_keywords < 100) {
        char* key = token;
        token = strtok(NULL, &reader->delimiter);
        if (!token) break;
        char* val = token;
        token = strtok(NULL, &reader->delimiter);
        if (val) {
            snprintf(reader->keywords[reader->num_keywords], 256, "%s=%s", key, val);
            reader->num_keywords++;
        }
    }
}

void print_keywords(FCSReader* reader) {
    char nextdata[20] = "0";
    char byteord[50] = "Unknown";
    char datatype[20] = "Unknown";
    char mode[20] = "Unknown";
    int par = 0, tot = 0;

    for (int i = 0; i < reader->num_keywords; i++) {
        if (strncmp(reader->keywords[i], "$NEXTDATA=", 10) == 0) strncpy(nextdata, reader->keywords[i] + 10, 19);
        if (strncmp(reader->keywords[i], "$BYTEORD=", 9) == 0) strncpy(byteord, reader->keywords[i] + 9, 49);
        if (strncmp(reader->keywords[i], "$DATATYPE=", 10) == 0) strncpy(datatype, reader->keywords[i] + 10, 19);
        if (strncmp(reader->keywords[i], "$MODE=", 6) == 0) strncpy(mode, reader->keywords[i] + 6, 19);
        if (strncmp(reader->keywords[i], "$PAR=", 5) == 0) par = atoi(reader->keywords[i] + 5);
        if (strncmp(reader->keywords[i], "$TOT=", 5) == 0) tot = atoi(reader->keywords[i] + 5);
    }

    printf("$NEXTDATA: %s\n", nextdata);
    printf("$BYTEORD: %s\n", byteord);
    printf("$DATATYPE: %s\n", datatype);
    printf("$MODE: %s\n", mode);
    printf("$PAR: %d\n", par);
    reader->par = par;
    printf("$TOT: %d\n", tot);

    for (int n = 1; n <= par; n++) {
        char pnN[50] = "Unknown", pnB[20] = "Unknown", pnE[20] = "Unknown", pnR[20] = "Unknown";
        char search[10];
        snprintf(search, 10, "$P%dN=", n);
        for (int i = 0; i < reader->num_keywords; i++) {
            if (strncmp(reader->keywords[i], search, strlen(search)) == 0) strncpy(pnN, reader->keywords[i] + strlen(search), 49);
            snprintf(search, 10, "$P%dB=", n);
            if (strncmp(reader->keywords[i], search, strlen(search)) == 0) strncpy(pnB, reader->keywords[i] + strlen(search), 19);
            snprintf(search, 10, "$P%dE=", n);
            if (strncmp(reader->keywords[i], search, strlen(search)) == 0) strncpy(pnE, reader->keywords[i] + strlen(search), 19);
            snprintf(search, 10, "$P%dR=", n);
            if (strncmp(reader->keywords[i], search, strlen(search)) == 0) strncpy(pnR, reader->keywords[i] + strlen(search), 19);
        }
        printf("\nParameter %d:\n", n);
        printf("  $P%dN: %s\n", n, pnN);
        printf("  $P%dB: %s\n", n, pnB);
        printf("  $P%dE: %s\n", n, pnE);
        printf("  $P%dR: %s\n", n, pnR);
    }
}

void write_file(FCSReader* reader, char* output_path) {
    FILE* out = fopen(output_path, "wb");
    if (!out) return;

    // HEADER
    fwrite(reader->version, 1, 6, out);
    for (int i = 0; i < 4; i++) fputc(' ', out);
    char off_str[9];
    for (int i = 0; i < 6; i++) {
        sprintf(off_str, "%8ld", reader->offsets[i]);
        fwrite(off_str, 1, 8, out);
    }

    // TEXT (simplified)
    fputc(reader->delimiter, out);
    // Write keywords roughly
    for (int i = 0; i < reader->num_keywords; i++) {
        char* eq = strchr(reader->keywords[i], '=');
        if (eq) {
            *eq = '\0';
            char key[256]; strncpy(key, reader->keywords[i], 255);
            char val[256]; strcpy(val, eq + 1);
            *eq = '=';
            fprintf(out, "%s%c%s%c", key + 1, reader->delimiter, val, reader->delimiter);  // Skip $
        }
    }
    // Pad TEXT
    long text_len = reader->offsets[1] - reader->offsets[0];
    long cur_pos = ftell(out);
    for (long j = cur_pos; j < reader->offsets[0] + text_len; j++) fputc(' ', out);

    // DATA and ANALYSIS
    fwrite(reader->buffer + reader->offsets[2], 1, reader->offsets[3] - reader->offsets[2], out);
    if (reader->offsets[4] > 0) {
        fseek(out, reader->offsets[4], SEEK_SET);
        fwrite(reader->buffer + reader->offsets[4], 1, reader->offsets[5] - reader->offsets[4], out);
    }

    fclose(out);
    printf("Written to %s\n", output_path);
}

int main(int argc, char** argv) {
    if (argc < 2) {
        printf("Usage: %s <input.fcs> [output.fcs]\n", argv[0]);
        return 1;
    }

    FCSReader reader;
    strcpy(reader.filepath, argv[1]);
    FILE* f = fopen(argv[1], "rb");
    fseek(f, 0, SEEK_END);
    reader.filesize = ftell(f);
    fseek(f, 0, SEEK_SET);
    reader.buffer = malloc(reader.filesize);
    fread(reader.buffer, 1, reader.filesize, f);
    fclose(f);

    parse_header(&reader);
    parse_text(&reader);
    print_keywords(&reader);

    if (argc > 2) write_file(&reader, argv[2]);

    free(reader.buffer);
    return 0;
}