Premier commit déjà bien avancé

This commit is contained in:
2025-11-10 18:33:24 +01:00
commit db4f0508cb
652 changed files with 440521 additions and 0 deletions

21
frontend/node_modules/@lezer/lr/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (C) 2018 by Marijn Haverbeke <marijn@haverbeke.berlin> and others
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

25
frontend/node_modules/@lezer/lr/README.md generated vendored Normal file
View File

@ -0,0 +1,25 @@
# @lezer/lr
[ [**WEBSITE**](http://lezer.codemirror.net) | [**ISSUES**](https://github.com/lezer-parser/lezer/issues) | [**FORUM**](https://discuss.codemirror.net/c/lezer) | [**CHANGELOG**](https://github.com/lezer-parser/lr/blob/master/CHANGELOG.md) ]
Lezer ("reader" in Dutch, pronounced pretty much as laser) is an
incremental GLR parser intended for use in an editor or similar
system, which needs to keep a representation of the program current
during changes and in the face of syntax errors.
It prioritizes speed and compactness (both of parser table files and
of syntax tree) over having a highly usable parse tree—trees nodes are
just blobs with a start, end, tag, and set of child nodes, with no
further labeling of child nodes or extra metadata.
This package contains the run-time LR parser library. It consumes
parsers generated by
[@lezer/generator](https://github.com/lezer-parser/generator).
The parser programming interface is documented on [the
website](https://lezer.codemirror.net/docs/ref/#lr).
The code is licensed under an MIT license.
This project was hugely inspired by
[tree-sitter](http://tree-sitter.github.io/tree-sitter/).

45
frontend/node_modules/@lezer/lr/dist/constants.d.ts generated vendored Normal file
View File

@ -0,0 +1,45 @@
export declare const enum Action {
ReduceFlag = 65536,
ValueMask = 65535,
ReduceDepthShift = 19,
RepeatFlag = 131072,
GotoFlag = 131072,
StayFlag = 262144
}
export declare const enum StateFlag {
Skipped = 1,
Accepting = 2
}
export declare const enum Specialize {
Specialize = 0,
Extend = 1
}
export declare const enum Term {
Err = 0
}
export declare const enum Seq {
End = 65535,
Done = 0,
Next = 1,
Other = 2
}
export declare const enum ParseState {
Flags = 0,
Actions = 1,
Skip = 2,
TokenizerMask = 3,
DefaultReduce = 4,
ForcedReduce = 5,
Size = 6
}
export declare const enum Encode {
BigValCode = 126,
BigVal = 65535,
Start = 32,
Gap1 = 34,
Gap2 = 92,
Base = 46
}
export declare const enum File {
Version = 14
}

5
frontend/node_modules/@lezer/lr/dist/constants.js generated vendored Normal file
View File

@ -0,0 +1,5 @@
"use strict";
// This file defines some constants that are needed both in this
// package and in lezer-generator, so that the generator code can
// access them without them being part of lezer's public interface.
exports.__esModule = true;

1890
frontend/node_modules/@lezer/lr/dist/index.cjs generated vendored Normal file

File diff suppressed because it is too large Load Diff

303
frontend/node_modules/@lezer/lr/dist/index.d.cts generated vendored Normal file
View File

@ -0,0 +1,303 @@
import { Tree, NodePropSource, ParseWrapper, Parser, NodeSet, Input, TreeFragment, PartialParse, NodeType } from '@lezer/common';
/**
A parse stack. These are used internally by the parser to track
parsing progress. They also provide some properties and methods
that external code such as a tokenizer can use to get information
about the parse state.
*/
declare class Stack {
/**
The input position up to which this stack has parsed.
*/
pos: number;
/**
The stack's current [context](#lr.ContextTracker) value, if
any. Its type will depend on the context tracker's type
parameter, or it will be `null` if there is no context
tracker.
*/
get context(): any;
/**
Check if the given term would be able to be shifted (optionally
after some reductions) on this stack. This can be useful for
external tokenizers that want to make sure they only provide a
given token when it applies.
*/
canShift(term: number): boolean;
/**
Get the parser used by this stack.
*/
get parser(): LRParser;
/**
Test whether a given dialect (by numeric ID, as exported from
the terms file) is enabled.
*/
dialectEnabled(dialectID: number): boolean;
private shiftContext;
private reduceContext;
private updateContext;
}
/**
[Tokenizers](#lr.ExternalTokenizer) interact with the input
through this interface. It presents the input as a stream of
characters, tracking lookahead and hiding the complexity of
[ranges](#common.Parser.parse^ranges) from tokenizer code.
*/
declare class InputStream {
/**
Backup chunk
*/
private chunk2;
private chunk2Pos;
/**
The character code of the next code unit in the input, or -1
when the stream is at the end of the input.
*/
next: number;
/**
The current position of the stream. Note that, due to parses
being able to cover non-contiguous
[ranges](#common.Parser.startParse), advancing the stream does
not always mean its position moves a single unit.
*/
pos: number;
private rangeIndex;
private range;
/**
Look at a code unit near the stream position. `.peek(0)` equals
`.next`, `.peek(-1)` gives you the previous character, and so
on.
Note that looking around during tokenizing creates dependencies
on potentially far-away content, which may reduce the
effectiveness incremental parsing—when looking forward—or even
cause invalid reparses when looking backward more than 25 code
units, since the library does not track lookbehind.
*/
peek(offset: number): number;
/**
Accept a token. By default, the end of the token is set to the
current stream position, but you can pass an offset (relative to
the stream position) to change that.
*/
acceptToken(token: number, endOffset?: number): void;
/**
Accept a token ending at a specific given position.
*/
acceptTokenTo(token: number, endPos: number): void;
private getChunk;
private readNext;
/**
Move the stream forward N (defaults to 1) code units. Returns
the new value of [`next`](#lr.InputStream.next).
*/
advance(n?: number): number;
private setDone;
}
interface Tokenizer {
}
/**
@hide
*/
declare class LocalTokenGroup implements Tokenizer {
readonly precTable: number;
readonly elseToken?: number | undefined;
contextual: boolean;
fallback: boolean;
extend: boolean;
readonly data: Readonly<Uint16Array>;
constructor(data: Readonly<Uint16Array> | string, precTable: number, elseToken?: number | undefined);
token(input: InputStream, stack: Stack): void;
}
interface ExternalOptions {
/**
When set to true, mark this tokenizer as depending on the
current parse stack, which prevents its result from being cached
between parser actions at the same positions.
*/
contextual?: boolean;
/**
By defaults, when a tokenizer returns a token, that prevents
tokenizers with lower precedence from even running. When
`fallback` is true, the tokenizer is allowed to run when a
previous tokenizer returned a token that didn't match any of the
current state's actions.
*/
fallback?: boolean;
/**
When set to true, tokenizing will not stop after this tokenizer
has produced a token. (But it will still fail to reach this one
if a higher-precedence tokenizer produced a token.)
*/
extend?: boolean;
}
/**
`@external tokens` declarations in the grammar should resolve to
an instance of this class.
*/
declare class ExternalTokenizer {
/**
Create a tokenizer. The first argument is the function that,
given an input stream, scans for the types of tokens it
recognizes at the stream's position, and calls
[`acceptToken`](#lr.InputStream.acceptToken) when it finds
one.
*/
constructor(
/**
@internal
*/
token: (input: InputStream, stack: Stack) => void, options?: ExternalOptions);
}
/**
Context trackers are used to track stateful context (such as
indentation in the Python grammar, or parent elements in the XML
grammar) needed by external tokenizers. You declare them in a
grammar file as `@context exportName from "module"`.
Context values should be immutable, and can be updated (replaced)
on shift or reduce actions.
The export used in a `@context` declaration should be of this
type.
*/
declare class ContextTracker<T> {
/**
Define a context tracker.
*/
constructor(spec: {
/**
The initial value of the context at the start of the parse.
*/
start: T;
/**
Update the context when the parser executes a
[shift](https://en.wikipedia.org/wiki/LR_parser#Shift_and_reduce_actions)
action.
*/
shift?(context: T, term: number, stack: Stack, input: InputStream): T;
/**
Update the context when the parser executes a reduce action.
*/
reduce?(context: T, term: number, stack: Stack, input: InputStream): T;
/**
Update the context when the parser reuses a node from a tree
fragment.
*/
reuse?(context: T, node: Tree, stack: Stack, input: InputStream): T;
/**
Reduce a context value to a number (for cheap storage and
comparison). Only needed for strict contexts.
*/
hash?(context: T): number;
/**
By default, nodes can only be reused during incremental
parsing if they were created in the same context as the one in
which they are reused. Set this to false to disable that
check (and the overhead of storing the hashes).
*/
strict?: boolean;
});
}
/**
Configuration options when
[reconfiguring](#lr.LRParser.configure) a parser.
*/
interface ParserConfig {
/**
Node prop values to add to the parser's node set.
*/
props?: readonly NodePropSource[];
/**
The name of the `@top` declaration to parse from. If not
specified, the first top rule declaration in the grammar is
used.
*/
top?: string;
/**
A space-separated string of dialects to enable.
*/
dialect?: string;
/**
Replace the given external tokenizers with new ones.
*/
tokenizers?: {
from: ExternalTokenizer;
to: ExternalTokenizer;
}[];
/**
Replace external specializers with new ones.
*/
specializers?: {
from: (value: string, stack: Stack) => number;
to: (value: string, stack: Stack) => number;
}[];
/**
Replace the context tracker with a new one.
*/
contextTracker?: ContextTracker<any>;
/**
When true, the parser will raise an exception, rather than run
its error-recovery strategies, when the input doesn't match the
grammar.
*/
strict?: boolean;
/**
Add a wrapper, which can extend parses created by this parser
with additional logic (usually used to add
[mixed-language](#common.parseMixed) parsing).
*/
wrap?: ParseWrapper;
/**
The maximum length of the TreeBuffers generated in the output
tree. Defaults to 1024.
*/
bufferLength?: number;
}
/**
Holds the parse tables for a given grammar, as generated by
`lezer-generator`, and provides [methods](#common.Parser) to parse
content with.
*/
declare class LRParser extends Parser {
/**
The nodes used in the trees emitted by this parser.
*/
readonly nodeSet: NodeSet;
createParse(input: Input, fragments: readonly TreeFragment[], ranges: readonly {
from: number;
to: number;
}[]): PartialParse;
/**
Configure the parser. Returns a new parser instance that has the
given settings modified. Settings not provided in `config` are
kept from the original parser.
*/
configure(config: ParserConfig): LRParser;
/**
Tells you whether any [parse wrappers](#lr.ParserConfig.wrap)
are registered for this parser.
*/
hasWrappers(): boolean;
/**
Returns the name associated with a given term. This will only
work for all terms when the parser was generated with the
`--names` option. By default, only the names of tagged terms are
stored.
*/
getName(term: number): string;
/**
The type of top node produced by the parser.
*/
get topNode(): NodeType;
/**
Used by the output of the parser generator. Not available to
user code. @hide
*/
static deserialize(spec: any): LRParser;
}
export { ContextTracker, ExternalTokenizer, InputStream, LRParser, LocalTokenGroup, type ParserConfig, Stack };

303
frontend/node_modules/@lezer/lr/dist/index.d.ts generated vendored Normal file
View File

@ -0,0 +1,303 @@
import { Tree, NodePropSource, ParseWrapper, Parser, NodeSet, Input, TreeFragment, PartialParse, NodeType } from '@lezer/common';
/**
A parse stack. These are used internally by the parser to track
parsing progress. They also provide some properties and methods
that external code such as a tokenizer can use to get information
about the parse state.
*/
declare class Stack {
/**
The input position up to which this stack has parsed.
*/
pos: number;
/**
The stack's current [context](#lr.ContextTracker) value, if
any. Its type will depend on the context tracker's type
parameter, or it will be `null` if there is no context
tracker.
*/
get context(): any;
/**
Check if the given term would be able to be shifted (optionally
after some reductions) on this stack. This can be useful for
external tokenizers that want to make sure they only provide a
given token when it applies.
*/
canShift(term: number): boolean;
/**
Get the parser used by this stack.
*/
get parser(): LRParser;
/**
Test whether a given dialect (by numeric ID, as exported from
the terms file) is enabled.
*/
dialectEnabled(dialectID: number): boolean;
private shiftContext;
private reduceContext;
private updateContext;
}
/**
[Tokenizers](#lr.ExternalTokenizer) interact with the input
through this interface. It presents the input as a stream of
characters, tracking lookahead and hiding the complexity of
[ranges](#common.Parser.parse^ranges) from tokenizer code.
*/
declare class InputStream {
/**
Backup chunk
*/
private chunk2;
private chunk2Pos;
/**
The character code of the next code unit in the input, or -1
when the stream is at the end of the input.
*/
next: number;
/**
The current position of the stream. Note that, due to parses
being able to cover non-contiguous
[ranges](#common.Parser.startParse), advancing the stream does
not always mean its position moves a single unit.
*/
pos: number;
private rangeIndex;
private range;
/**
Look at a code unit near the stream position. `.peek(0)` equals
`.next`, `.peek(-1)` gives you the previous character, and so
on.
Note that looking around during tokenizing creates dependencies
on potentially far-away content, which may reduce the
effectiveness incremental parsing—when looking forward—or even
cause invalid reparses when looking backward more than 25 code
units, since the library does not track lookbehind.
*/
peek(offset: number): number;
/**
Accept a token. By default, the end of the token is set to the
current stream position, but you can pass an offset (relative to
the stream position) to change that.
*/
acceptToken(token: number, endOffset?: number): void;
/**
Accept a token ending at a specific given position.
*/
acceptTokenTo(token: number, endPos: number): void;
private getChunk;
private readNext;
/**
Move the stream forward N (defaults to 1) code units. Returns
the new value of [`next`](#lr.InputStream.next).
*/
advance(n?: number): number;
private setDone;
}
interface Tokenizer {
}
/**
@hide
*/
declare class LocalTokenGroup implements Tokenizer {
readonly precTable: number;
readonly elseToken?: number | undefined;
contextual: boolean;
fallback: boolean;
extend: boolean;
readonly data: Readonly<Uint16Array>;
constructor(data: Readonly<Uint16Array> | string, precTable: number, elseToken?: number | undefined);
token(input: InputStream, stack: Stack): void;
}
interface ExternalOptions {
/**
When set to true, mark this tokenizer as depending on the
current parse stack, which prevents its result from being cached
between parser actions at the same positions.
*/
contextual?: boolean;
/**
By defaults, when a tokenizer returns a token, that prevents
tokenizers with lower precedence from even running. When
`fallback` is true, the tokenizer is allowed to run when a
previous tokenizer returned a token that didn't match any of the
current state's actions.
*/
fallback?: boolean;
/**
When set to true, tokenizing will not stop after this tokenizer
has produced a token. (But it will still fail to reach this one
if a higher-precedence tokenizer produced a token.)
*/
extend?: boolean;
}
/**
`@external tokens` declarations in the grammar should resolve to
an instance of this class.
*/
declare class ExternalTokenizer {
/**
Create a tokenizer. The first argument is the function that,
given an input stream, scans for the types of tokens it
recognizes at the stream's position, and calls
[`acceptToken`](#lr.InputStream.acceptToken) when it finds
one.
*/
constructor(
/**
@internal
*/
token: (input: InputStream, stack: Stack) => void, options?: ExternalOptions);
}
/**
Context trackers are used to track stateful context (such as
indentation in the Python grammar, or parent elements in the XML
grammar) needed by external tokenizers. You declare them in a
grammar file as `@context exportName from "module"`.
Context values should be immutable, and can be updated (replaced)
on shift or reduce actions.
The export used in a `@context` declaration should be of this
type.
*/
declare class ContextTracker<T> {
/**
Define a context tracker.
*/
constructor(spec: {
/**
The initial value of the context at the start of the parse.
*/
start: T;
/**
Update the context when the parser executes a
[shift](https://en.wikipedia.org/wiki/LR_parser#Shift_and_reduce_actions)
action.
*/
shift?(context: T, term: number, stack: Stack, input: InputStream): T;
/**
Update the context when the parser executes a reduce action.
*/
reduce?(context: T, term: number, stack: Stack, input: InputStream): T;
/**
Update the context when the parser reuses a node from a tree
fragment.
*/
reuse?(context: T, node: Tree, stack: Stack, input: InputStream): T;
/**
Reduce a context value to a number (for cheap storage and
comparison). Only needed for strict contexts.
*/
hash?(context: T): number;
/**
By default, nodes can only be reused during incremental
parsing if they were created in the same context as the one in
which they are reused. Set this to false to disable that
check (and the overhead of storing the hashes).
*/
strict?: boolean;
});
}
/**
Configuration options when
[reconfiguring](#lr.LRParser.configure) a parser.
*/
interface ParserConfig {
/**
Node prop values to add to the parser's node set.
*/
props?: readonly NodePropSource[];
/**
The name of the `@top` declaration to parse from. If not
specified, the first top rule declaration in the grammar is
used.
*/
top?: string;
/**
A space-separated string of dialects to enable.
*/
dialect?: string;
/**
Replace the given external tokenizers with new ones.
*/
tokenizers?: {
from: ExternalTokenizer;
to: ExternalTokenizer;
}[];
/**
Replace external specializers with new ones.
*/
specializers?: {
from: (value: string, stack: Stack) => number;
to: (value: string, stack: Stack) => number;
}[];
/**
Replace the context tracker with a new one.
*/
contextTracker?: ContextTracker<any>;
/**
When true, the parser will raise an exception, rather than run
its error-recovery strategies, when the input doesn't match the
grammar.
*/
strict?: boolean;
/**
Add a wrapper, which can extend parses created by this parser
with additional logic (usually used to add
[mixed-language](#common.parseMixed) parsing).
*/
wrap?: ParseWrapper;
/**
The maximum length of the TreeBuffers generated in the output
tree. Defaults to 1024.
*/
bufferLength?: number;
}
/**
Holds the parse tables for a given grammar, as generated by
`lezer-generator`, and provides [methods](#common.Parser) to parse
content with.
*/
declare class LRParser extends Parser {
/**
The nodes used in the trees emitted by this parser.
*/
readonly nodeSet: NodeSet;
createParse(input: Input, fragments: readonly TreeFragment[], ranges: readonly {
from: number;
to: number;
}[]): PartialParse;
/**
Configure the parser. Returns a new parser instance that has the
given settings modified. Settings not provided in `config` are
kept from the original parser.
*/
configure(config: ParserConfig): LRParser;
/**
Tells you whether any [parse wrappers](#lr.ParserConfig.wrap)
are registered for this parser.
*/
hasWrappers(): boolean;
/**
Returns the name associated with a given term. This will only
work for all terms when the parser was generated with the
`--names` option. By default, only the names of tagged terms are
stored.
*/
getName(term: number): string;
/**
The type of top node produced by the parser.
*/
get topNode(): NodeType;
/**
Used by the output of the parser generator. Not available to
user code. @hide
*/
static deserialize(spec: any): LRParser;
}
export { ContextTracker, ExternalTokenizer, InputStream, LRParser, LocalTokenGroup, type ParserConfig, Stack };

1883
frontend/node_modules/@lezer/lr/dist/index.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

32
frontend/node_modules/@lezer/lr/package.json generated vendored Normal file
View File

@ -0,0 +1,32 @@
{
"name": "@lezer/lr",
"version": "1.4.3",
"description": "Incremental parser",
"main": "dist/index.cjs",
"type": "module",
"exports": {
"import": "./dist/index.js",
"require": "./dist/index.cjs"
},
"module": "dist/index.js",
"types": "dist/index.d.ts",
"author": "Marijn Haverbeke <marijn@haverbeke.berlin>",
"license": "MIT",
"repository": {
"type" : "git",
"url" : "https://github.com/lezer-parser/lr.git"
},
"devDependencies": {
"@marijn/buildtool": "^0.1.5",
"@types/node": "^20.6.2"
},
"dependencies": {
"@lezer/common": "^1.0.0"
},
"files": ["dist"],
"scripts": {
"test": "echo 'Tests are in @lezer/generator'",
"watch": "node build.js --watch",
"prepare": "node build.js; tsc src/constants.ts -d --outDir dist"
}
}