From c7fe00696b7ca9d4020301c6164cd777901bb4c2 Mon Sep 17 00:00:00 2001 From: Herald Yu Date: Mon, 22 Jul 2024 17:00:05 +0800 Subject: [PATCH] update docs/reference.md (#5009) Co-authored-by: Changjian Gao Co-authored-by: Git'Fellow <12234510+solracsf@users.noreply.github.com> --- .../fault_diagnosis_and_analysis.md | 16 +- .../metadata/redis_best_practices.md | 4 +- docs/en/administration/metadata_dump_load.md | 6 +- docs/en/administration/monitoring.md | 2 +- docs/en/administration/troubleshooting.md | 6 +- .../benchmark/performance_evaluation_guide.md | 4 +- .../production_deployment_recommendations.md | 2 +- docs/en/deployment/s3_gateway.md | 4 +- docs/en/development/internals.md | 2 +- docs/en/faq.md | 12 +- docs/en/getting-started/standalone.md | 4 +- docs/en/guide/cache.md | 36 +- docs/en/guide/clone.md | 2 +- docs/en/guide/gateway.md | 2 +- docs/en/guide/quota.md | 2 +- docs/en/guide/sync.md | 8 +- docs/en/introduction/io_processing.md | 6 +- docs/en/reference/_common_options.mdx | 69 + ...and_reference.md => command_reference.mdx} | 134 +- docs/en/reference/fuse_mount_options.md | 2 +- .../reference/how_to_set_up_object_storage.md | 4 +- docs/en/reference/posix_compatibility.md | 2 +- docs/en/security/trash.md | 12 +- docs/en/tutorials/juicefs_on_kubesphere.md | 2 +- .../fault_diagnosis_and_analysis.md | 12 +- .../metadata/redis_best_practices.md | 4 +- .../administration/metadata_dump_load.md | 6 +- docs/zh_cn/administration/monitoring.md | 2 +- docs/zh_cn/administration/troubleshooting.md | 6 +- .../benchmark/performance_evaluation_guide.md | 4 +- .../production_deployment_recommendations.md | 2 +- docs/zh_cn/deployment/s3_gateway.md | 4 +- docs/zh_cn/development/internals.md | 2 +- docs/zh_cn/faq.md | 12 +- docs/zh_cn/getting-started/standalone.md | 4 +- docs/zh_cn/guide/cache.md | 18 +- docs/zh_cn/guide/clone.md | 2 +- docs/zh_cn/guide/gateway.md | 6 +- docs/zh_cn/guide/quota.md | 2 +- docs/zh_cn/guide/sync.md | 10 +- docs/zh_cn/introduction/io_processing.md | 6 +- docs/zh_cn/reference/_common_options.mdx | 69 + ...and_reference.md => command_reference.mdx} | 128 +- docs/zh_cn/reference/fuse_mount_options.md | 2 +- .../reference/how_to_set_up_object_storage.md | 4 +- docs/zh_cn/reference/posix_compatibility.md | 2 +- docs/zh_cn/security/trash.md | 12 +- docs/zh_cn/tutorials/juicefs_on_kubesphere.md | 2 +- package-lock.json | 2510 ++++++++++++++++- package.json | 2 +- 50 files changed, 2759 insertions(+), 417 deletions(-) create mode 100644 docs/en/reference/_common_options.mdx rename docs/en/reference/{command_reference.md => command_reference.mdx} (78%) create mode 100644 docs/zh_cn/reference/_common_options.mdx rename docs/zh_cn/reference/{command_reference.md => command_reference.mdx} (79%) diff --git a/docs/en/administration/fault_diagnosis_and_analysis.md b/docs/en/administration/fault_diagnosis_and_analysis.md index a0329bf7a46a..672a4dff99d6 100644 --- a/docs/en/administration/fault_diagnosis_and_analysis.md +++ b/docs/en/administration/fault_diagnosis_and_analysis.md @@ -16,7 +16,7 @@ Different JuiceFS clients print logs in different ways, which are described as f ### Mount point -When a JuiceFS file system is mounted with the [`-d` option](../reference/command_reference.md#mount) (indicating running in the background), it will print logs to the system log file and local log file simultaneously. Depending on which user is running when mounting the file system, the paths of the local log files are slightly different. For root, the local log file locates at `/var/log/juicefs.log`, while it locates at `$HOME/.juicefs/juicefs.log` for non-root users. Please refer to [`--log` option](../reference/command_reference.md#mount) for details. +When a JuiceFS file system is mounted with the [`-d` option](../reference/command_reference.mdx#mount) (indicating running in the background), it will print logs to the system log file and local log file simultaneously. Depending on which user is running when mounting the file system, the paths of the local log files are slightly different. For root, the local log file locates at `/var/log/juicefs.log`, while it locates at `$HOME/.juicefs/juicefs.log` for non-root users. Please refer to [`--log` option](../reference/command_reference.mdx#mount) for details. Depending on the operating system, there are different commands to retrieve system logs or read local log files directly. @@ -118,7 +118,7 @@ kubectl -n kube-system exec juicefs-chaos-k8s-002-pvc-d4b8fb4f-2c0b-48e8-a2dc-53 ### S3 Gateway -You need to add the [`--access-log` option](../reference/command_reference.md#gateway) when starting the S3 gateway to specify the path to output the access log. By default, the S3 gateway does not output the access log. +You need to add the [`--access-log` option](../reference/command_reference.mdx#gateway) when starting the S3 gateway to specify the path to output the access log. By default, the S3 gateway does not output the access log. ### Hadoop Java SDK @@ -142,9 +142,7 @@ This command collects the following information: 6. Go pprof information 7. JuiceFS logs (defaulting to the last 5000 lines) -By default, a `debug` directory is created in the current directory, and the collected information is saved in that directory. - -Here's an example: +By default, a `debug` directory is created in the current directory, and the collected information is saved in that directory. Here's an example: ```shell $ juicefs debug /tmp/mountpoint @@ -177,7 +175,7 @@ JuiceFS provides the `profile` and `stats` subcommands to visualize real-time pe ### `juicefs profile` {#profile} -[`juicefs profile`](../reference/command_reference.md#profile) will collect data from [file system access log](#access-log), run the `juicefs profile MOUNTPOINT` command, you can see the real-time statistics of each file system operation based on the latest access log: +[`juicefs profile`](../reference/command_reference.mdx#profile) will collect data from [file system access log](#access-log), run the `juicefs profile MOUNTPOINT` command, you can see the real-time statistics of each file system operation based on the latest access log: ![JuiceFS-profiling](../images/juicefs-profiling.gif) @@ -201,7 +199,7 @@ juicefs profile /tmp/juicefs.accesslog --uid 12345 ### `juicefs stats` {#stats} -The [`juicefs stats`](../reference/command_reference.md#stats) command reads JuiceFS Client internal metrics data, and output performance data in a format similar to `dstat`: +The [`juicefs stats`](../reference/command_reference.mdx#stats) command reads JuiceFS Client internal metrics data, and output performance data in a format similar to `dstat`: ![juicefs_stats_watcher](../images/juicefs_stats_watcher.png) @@ -211,7 +209,7 @@ Metrics description: - `cpu`: CPU usage of the process. - `mem`: Physical memory used by the process. -- `buf`: Current [buffer size](../guide/cache.md#buffer-size), if this value is constantly close to (or even exceeds) the configured [`--buffer-size`](../reference/command_reference.md#mount), you should increase buffer size or decrease application workload. +- `buf`: Current [buffer size](../guide/cache.md#buffer-size), if this value is constantly close to (or even exceeds) the configured [`--buffer-size`](../reference/command_reference.mdx#mount-data-cache-options), you should increase buffer size or decrease application workload. - `cache`: Internal metric, ignore this. #### `fuse` @@ -276,7 +274,7 @@ You can also use the `juicefs debug` command to automatically collect these runt juicefs debug /mnt/jfs ``` -For more information about the `juicefs debug` command, see [command reference](../reference/command_reference.md#debug). +For more information about the `juicefs debug` command, see [command reference](../reference/command_reference.mdx#debug). ::: If you have the `go` command installed, you can analyze it directly with the `go tool pprof` command. For example to analyze CPU performance statistics: diff --git a/docs/en/administration/metadata/redis_best_practices.md b/docs/en/administration/metadata/redis_best_practices.md index 23999ebe4e97..73c2e7549d9b 100644 --- a/docs/en/administration/metadata/redis_best_practices.md +++ b/docs/en/administration/metadata/redis_best_practices.md @@ -27,7 +27,7 @@ used_memory_dataset: 13439673592 used_memory_dataset_perc: 70.12% ``` -Among them, `used_memory_rss` is the total memory size actually used by Redis, which includes not only the size of data stored in Redis (that is, `used_memory_dataset` above) but also some Redis [system overhead](https://redis.io/commands/memory-stats) (that is, `used_memory_overhead` above). As mentioned earlier that the metadata of each file occupies about 300 bytes, this is actually calculated by `used_memory_dataset`. If you find that the metadata of a single file in your JuiceFS file system occupies much more than 300 bytes, you can try to run [`juicefs gc`](../../reference/command_reference.md#gc) command to clean up possible redundant data. +Among them, `used_memory_rss` is the total memory size actually used by Redis, which includes not only the size of data stored in Redis (that is, `used_memory_dataset` above) but also some Redis [system overhead](https://redis.io/commands/memory-stats) (that is, `used_memory_overhead` above). As mentioned earlier that the metadata of each file occupies about 300 bytes, this is actually calculated by `used_memory_dataset`. If you find that the metadata of a single file in your JuiceFS file system occupies much more than 300 bytes, you can try to run [`juicefs gc`](../../reference/command_reference.mdx#gc) command to clean up possible redundant data. ## High availability @@ -124,7 +124,7 @@ After generating the AOF or RDB backup file, you can restore the data by copying If both AOF and RDB persistence are enabled, Redis will use the AOF file first on starting to recover the data because AOF is guaranteed to be the most complete data. -After recovering Redis data, you can continue to use the JuiceFS file system via the new Redis address. It is recommended to run [`juicefs fsck`](../../reference/command_reference.md#fsck) command to check the integrity of the file system data. +After recovering Redis data, you can continue to use the JuiceFS file system via the new Redis address. It is recommended to run [`juicefs fsck`](../../reference/command_reference.mdx#fsck) command to check the integrity of the file system data. ## Recommended Managed Redis Service diff --git a/docs/en/administration/metadata_dump_load.md b/docs/en/administration/metadata_dump_load.md index e9462b78b58c..f74d478078a8 100644 --- a/docs/en/administration/metadata_dump_load.md +++ b/docs/en/administration/metadata_dump_load.md @@ -10,7 +10,7 @@ slug: /metadata_dump_load - JuiceFS v1.0.4 starts to support importing an encrypted backup. ::: -JuiceFS supports [multiple metadata engines](../reference/how_to_set_up_metadata_engine.md), and each engine stores and manages data in a different format internally. JuiceFS provides the [`dump`](../reference/command_reference.md#dump) command to export metadata in a uniform JSON format, also there's the [`load`](../reference/command_reference.md#load) command to restore or migrate backups to any metadata storage engine. This dump / load process can also be used to migrate a community edition file system to enterprise edition (read [enterprise docs](https://juicefs.com/docs/cloud/metadata_dump_load) for more), and vice versa. +JuiceFS supports [multiple metadata engines](../reference/how_to_set_up_metadata_engine.md), and each engine stores and manages data in a different format internally. JuiceFS provides the [`dump`](../reference/command_reference.mdx#dump) command to export metadata in a uniform JSON format, also there's the [`load`](../reference/command_reference.mdx#load) command to restore or migrate backups to any metadata storage engine. This dump / load process can also be used to migrate a community edition file system to enterprise edition (read [enterprise docs](https://juicefs.com/docs/cloud/metadata_dump_load) for more), and vice versa. ## Metadata backup {#backup} @@ -81,7 +81,7 @@ JuiceFS periodically cleans up backups according to the following rules. ## Metadata recovery and migration {#recovery-and-migration} -Use the [`load`](../reference/command_reference.md#load) command to restore the metadata dump file into an empty database, for example: +Use the [`load`](../reference/command_reference.mdx#load) command to restore the metadata dump file into an empty database, for example: ```shell juicefs load redis://192.168.1.6:6379 meta-dump.json @@ -111,7 +111,7 @@ It is also possible to migrate directly through the system's pipe: juicefs dump redis://192.168.1.6:6379 | juicefs load mysql://user:password@(192.168.1.6:3306)/juicefs ``` -Note that since the API access key for object storage is excluded by default from the backup, when loading metadata, you need to use the [`juicefs config`](../reference/command_reference.md#config) command to reconfigure the object storage credentials. For example: +Note that since the API access key for object storage is excluded by default from the backup, when loading metadata, you need to use the [`juicefs config`](../reference/command_reference.mdx#config) command to reconfigure the object storage credentials. For example: ```shell juicefs config --secret-key xxxxx mysql://user:password@(192.168.1.6:3306)/juicefs diff --git a/docs/en/administration/monitoring.md b/docs/en/administration/monitoring.md index 4875bdbe9e57..1099e758e031 100644 --- a/docs/en/administration/monitoring.md +++ b/docs/en/administration/monitoring.md @@ -89,7 +89,7 @@ For different types of JuiceFS Client, metrics data is handled slightly differen ### Mount point {#mount-point} -When the JuiceFS file system is mounted via the [`juicefs mount`](../reference/command_reference.md#mount) command, you can collect monitoring metrics via the address `http://localhost:9567/metrics`, or you can customize it via the `--metrics` option. For example: +When the JuiceFS file system is mounted via the [`juicefs mount`](../reference/command_reference.mdx#mount) command, you can collect monitoring metrics via the address `http://localhost:9567/metrics`, or you can customize it via the `--metrics` option. For example: ```shell juicefs mount --metrics localhost:9567 ... diff --git a/docs/en/administration/troubleshooting.md b/docs/en/administration/troubleshooting.md index 1eb476abd9f1..a0aa574d4752 100644 --- a/docs/en/administration/troubleshooting.md +++ b/docs/en/administration/troubleshooting.md @@ -98,8 +98,8 @@ If the problem is a network connection issue, or the object storage has service The first issue with slow connection is upload / download timeouts (demonstrated in the above error logs), to tackle this problem: -* Reduce upload concurrency, e.g. [`--max-uploads=1`](../reference/command_reference.md#mount), to avoid upload timeouts. -* Reduce buffer size, e.g. [`--buffer-size=64`](../reference/command_reference.md#mount) or even lower. In a large bandwidth condition, increasing buffer size improves parallel performance. But in a low speed environment, this only makes `flush` operations slow and prone to timeouts. +* Reduce upload concurrency, e.g. [`--max-uploads=1`](../reference/command_reference.mdx#mount-data-storage-options), to avoid upload timeouts. +* Reduce buffer size, e.g. [`--buffer-size=64`](../reference/command_reference.mdx#mount-data-cache-options) or even lower. In a large bandwidth condition, increasing buffer size improves parallel performance. But in a low speed environment, this only makes `flush` operations slow and prone to timeouts. * Default timeout for GET / PUT requests are 60 seconds, increasing `--get-timeout` and `--put-timeout` may help with read / write timeouts. In addition, the ["Client Write Cache"](../guide/cache.md#client-write-cache) feature needs to be used with caution in low bandwidth environment. Let's briefly go over the JuiceFS Client background job design: every JuiceFS Client runs background jobs by default, one of which is data compaction, and if the client has poor internet speed, it'll drag down performance for the whole system. A worse case is when client write cache is also enabled, compaction results are uploaded too slowly, forcing other clients into a read hang when accessing the affected files: @@ -111,7 +111,7 @@ In addition, the ["Client Write Cache"](../guide/cache.md#client-write-cache) fe : fail to read sliceId 1771585458 (off:4194304, size:4194304, clen: 37746372): get chunks/0/0/1_0_4194304: oss: service returned error: StatusCode=404, ErrorCode=NoSuchKey, ErrorMessage="The specified key does not exist.", RequestId=62E8FB058C0B5C3134CB80B6 ``` -To avoid this type of issue, we recommend disabling background jobs on low-bandwidth clients, i.e. adding [`--no-bgjob`](../reference/command_reference.md#mount) option to the mount command. +To avoid this type of issue, we recommend disabling background jobs on low-bandwidth clients, i.e. adding [`--no-bgjob`](../reference/command_reference.mdx#mount-metadata-options) option to the mount command. ### WARNING log: block not found in object storage {#warning-log-block-not-found-in-object-storage} diff --git a/docs/en/benchmark/performance_evaluation_guide.md b/docs/en/benchmark/performance_evaluation_guide.md index d99216698708..b4ebbfe4c4c6 100644 --- a/docs/en/benchmark/performance_evaluation_guide.md +++ b/docs/en/benchmark/performance_evaluation_guide.md @@ -34,7 +34,7 @@ JuiceFS v1.0+ has Trash enabled by default, which means the benchmark tools will ### `juicefs bench` -The [`juicefs bench`](../reference/command_reference.md#bench) command can help you do a quick performance test on a standalone machine. With the test results, it is easy to evaluate if your environment configuration and JuiceFS performance are normal. Assuming you have mounted JuiceFS to `/mnt/jfs` on your server, execute the following command for this test (the `-p` option is recommended to set to the number of CPU cores on the server). If you need help with initializing or mounting JuiceFS, please refer to [Create a File System](../getting-started/standalone.md#juicefs-format). +The [`juicefs bench`](../reference/command_reference.mdx#bench) command can help you do a quick performance test on a standalone machine. With the test results, it is easy to evaluate if your environment configuration and JuiceFS performance are normal. Assuming you have mounted JuiceFS to `/mnt/jfs` on your server, execute the following command for this test (the `-p` option is recommended to set to the number of CPU cores on the server). If you need help with initializing or mounting JuiceFS, please refer to [Create a File System](../getting-started/standalone.md#juicefs-format). ```bash juicefs bench /mnt/jfs -p 4 @@ -79,7 +79,7 @@ The data above is from [AWS official documentation](https://docs.aws.amazon.com/ ### `juicefs objbench` -The [`juicefs objbench`](../reference/command_reference.md#objbench) command can run some tests on object storage to evaluate how well it performs as a backend storage for JuiceFS. Take testing Amazon S3 as an example: +The [`juicefs objbench`](../reference/command_reference.mdx#objbench) command can run some tests on object storage to evaluate how well it performs as a backend storage for JuiceFS. Take testing Amazon S3 as an example: ```bash juicefs objbench \ diff --git a/docs/en/deployment/production_deployment_recommendations.md b/docs/en/deployment/production_deployment_recommendations.md index c71d4ef0c41a..b83ec8fb118d 100644 --- a/docs/en/deployment/production_deployment_recommendations.md +++ b/docs/en/deployment/production_deployment_recommendations.md @@ -93,4 +93,4 @@ For details about the logrotate configuration, see this [link](https://linux.die ## Command line auto-completion -JuiceFS provides command line auto-completion scripts for Bash and Zsh to facilitate the use of `juicefs` commands. For details, see this [document](../reference/command_reference.md#Auto-completion) for details. +JuiceFS provides command line auto-completion scripts for Bash and Zsh to facilitate the use of `juicefs` commands. For details, see this [document](../reference/command_reference.mdx#Auto-completion) for details. diff --git a/docs/en/deployment/s3_gateway.md b/docs/en/deployment/s3_gateway.md index d4f172f7f411..c22b520883bc 100644 --- a/docs/en/deployment/s3_gateway.md +++ b/docs/en/deployment/s3_gateway.md @@ -32,7 +32,7 @@ juicefs gateway redis://localhost:6379 localhost:9000 The first two commands of the above three are used to set environment variables. Note that the length of `MINIO_ROOT_USER` is at least 3 characters, and the length of `MINIO_ROOT_PASSWORD` is at least 8 characters. If you are a Windows user, replace `export` with `set` in the above commands to set the environment variable. i.e., `set MINIO_ROOT_USER=admin`. -The last command is used to enable the S3 gateway. The `gateway` subcommand requires at least two parameters. The first is the URL of the database where the metadata is stored, and the second is the address and port on which the S3 gateway is listening. You can add [other options](../reference/command_reference.md#gateway) to the `gateway` subcommand to optimize the S3 gateway as needed, for example, to set the default local cache to 20 GiB. +The last command is used to enable the S3 gateway. The `gateway` subcommand requires at least two parameters. The first is the URL of the database where the metadata is stored, and the second is the address and port on which the S3 gateway is listening. You can add [other options](../reference/command_reference.mdx#gateway) to the `gateway` subcommand to optimize the S3 gateway as needed, for example, to set the default local cache to 20 GiB. ```shell juicefs gateway --cache-size 20480 redis://localhost:6379 localhost:9000 @@ -140,7 +140,7 @@ Then download the S3 gateway [deployment YAML](https://github.com/juicedata/juic - The latest version of `juicedata/juicefs-csi-driver` image is used by default, which has already integrated the latest version of JuiceFS client. Please check [here](https://github.com/juicedata/juicefs-csi-driver/releases) for the specific integrated JuiceFS client version. - The `initContainers` of `Deployment` will first try to format the JuiceFS file system, if you have already formatted it in advance, this step will not affect the existing JuiceFS file system. - The default port number that the S3 gateway listens on is 9000 -- The [startup options](../reference/command_reference.md#gateway) of S3 gateway will use default values if not specified. +- The [startup options](../reference/command_reference.mdx#gateway) of S3 gateway will use default values if not specified. - The value of `MINIO_ROOT_USER` environment variable is `access-key` in Secret, and the value of `MINIO_ROOT_PASSWORD` environment variable is `secret-key` in Secret. ```shell diff --git a/docs/en/development/internals.md b/docs/en/development/internals.md index 47d0517afb4f..fb987a0f2beb 100644 --- a/docs/en/development/internals.md +++ b/docs/en/development/internals.md @@ -178,7 +178,7 @@ type Attr struct { There are a few fields that need clarification. -- Atime/Atimensec: See [`--atime-mode`](../reference/command_reference.md#mount) +- Atime/Atimensec: See [`--atime-mode`](../reference/command_reference.mdx#mount-metadata-options) - Nlink - Directory file: initial value is 2 ('.' and '..'), add 1 for each subdirectory - Other files: initial value is 1, add 1 for each hard link created diff --git a/docs/en/faq.md b/docs/en/faq.md index 7fb43d615353..a2a64335dda7 100644 --- a/docs/en/faq.md +++ b/docs/en/faq.md @@ -47,14 +47,14 @@ JuiceFS already supported many object storage, please check [the list](reference The first reason is that you may have enabled the trash feature. In order to ensure data security, the trash is enabled by default. The deleted files are actually placed in the trash and are not actually deleted, so the size of the object storage will not change. trash retention time can be specified with `juicefs format` or modified with `juicefs config`. Please refer to the ["Trash"](security/trash.md) documentation for more information. -The second reason is that JuiceFS deletes the data in the object storage asynchronously, so the space change of the object storage will be slower. If you need to immediately clean up the data in the object store that needs to be deleted, you can try running the [`juicefs gc`](reference/command_reference.md#gc) command. +The second reason is that JuiceFS deletes the data in the object storage asynchronously, so the space change of the object storage will be slower. If you need to immediately clean up the data in the object store that needs to be deleted, you can try running the [`juicefs gc`](reference/command_reference.mdx#gc) command. ### Why is file system data size different from object storage usage? {#size-inconsistency} -* ["Random write in JuiceFS"](#random-write) produces data fragments, causing higher storage usage for object storage, especially after a large number of overwrites in a short period of time, many fragments will be generated. These fragments continue to occupy space in object storage until they are compacted and released. You shouldn't worry about this because JuiceFS checks for file compaction with every read/write, and cleans up in the client background job. Alternatively, you can manually trigger merges and garbage collection with [`juicefs gc --compact --delete`](./reference/command_reference.md#gc). +* ["Random write in JuiceFS"](#random-write) produces data fragments, causing higher storage usage for object storage, especially after a large number of overwrites in a short period of time, many fragments will be generated. These fragments continue to occupy space in object storage until they are compacted and released. You shouldn't worry about this because JuiceFS checks for file compaction with every read/write, and cleans up in the client background job. Alternatively, you can manually trigger merges and garbage collection with [`juicefs gc --compact --delete`](./reference/command_reference.mdx#gc). * If [Trash](./security/trash.md) is enabled, deleted files will be kept for a specified period of time, and then be garbage collected (all carried out in client background job). * After data fragments are compacted, stale slices will be kept inside Trash as well (not visible to user), following the same expiration settings. To delete this type of data, read [Trash and stale slices](./security/trash.md#gc). -* If compression is enabled (the `--compress` parameter in the [`format`](./reference/command_reference.md#format) command, disabled by default), object storage usage may be smaller than the actual file size (depending on the compression ratio of different types of files). +* If compression is enabled (the `--compress` parameter in the [`format`](./reference/command_reference.mdx#format) command, disabled by default), object storage usage may be smaller than the actual file size (depending on the compression ratio of different types of files). * Different [storage class](reference/how_to_set_up_object_storage.md#storage-class) of the object storage may calculate storage usage differently. The cloud service provider may set the minimum billable size for some storage classes. For example, the [minimum billable size](https://www.alibabacloud.com/help/en/object-storage-service/latest/storage-fees) for Alibaba Cloud OSS IA storage is 64KB. If a file is smaller than 64KB, it will be calculated as 64KB. * For self-hosted object storage services, for example MinIO, actual data usage is affected by [storage class settings](https://github.com/minio/minio/blob/master/docs/erasure/storage-class/README.md). @@ -68,7 +68,7 @@ As of the release of JuiceFS 1.0, this feature is not supported. ### Is it possible to bind multiple different object storages to a single file system (e.g. one file system with Amazon S3, GCS and OSS at the same time)? -No. However, you can set up multiple buckets associated with the same object storage service when creating a file system, thus solving the problem of limiting the number of individual bucket objects, for example, multiple S3 Buckets can be associated with a single file system. Please refer to [`--shards`](./reference/command_reference.md#format) option for details. +No. However, you can set up multiple buckets associated with the same object storage service when creating a file system, thus solving the problem of limiting the number of individual bucket objects, for example, multiple S3 Buckets can be associated with a single file system. Please refer to [`--shards`](./reference/command_reference.mdx#format) option for details. ## Performance Related Questions @@ -80,7 +80,7 @@ JuiceFS is built with multiple layers of caching (invalidated automatically), on ### Does JuiceFS support random read/write? How? {#random-write} -Yes, including those issued using mmap. Currently JuiceFS is optimized for sequential reading/writing, and optimized for random reading/writing is work in progress. If you want better random read performance, it's best to turn off compression ([`--compress none`](reference/command_reference.md#format)). +Yes, including those issued using mmap. Currently JuiceFS is optimized for sequential reading/writing, and optimized for random reading/writing is work in progress. If you want better random read performance, it's best to turn off compression ([`--compress none`](reference/command_reference.mdx#format)). JuiceFS does not store the original file in the object storage, but splits it into data blocks using a fixed size (4MiB by default), then uploads it to the object storage, and stores the ID of the data block in the metadata engine. When random write happens, the original metadata is marked stale, and then JuiceFS Client uploads the **new data block** to the object storage, then update the metadata accordingly. @@ -90,7 +90,7 @@ Read [JuiceFS Internals](development/internals.md) and [Data Processing Flow](in ### How to copy a large number of small files into JuiceFS quickly? -You could mount JuiceFS with [`--writeback` option](reference/command_reference.md#mount), which will write the small files into local disks first, then upload them to object storage in background, this could speedup coping many small files into JuiceFS. +You could mount JuiceFS with [`--writeback` option](reference/command_reference.mdx#mount-data-cache-options), which will write the small files into local disks first, then upload them to object storage in background, this could speedup coping many small files into JuiceFS. See ["Write Cache in Client"](guide/cache.md#client-write-cache) for more information. diff --git a/docs/en/getting-started/standalone.md b/docs/en/getting-started/standalone.md index 5ec74a9eae51..368c2862dcb9 100644 --- a/docs/en/getting-started/standalone.md +++ b/docs/en/getting-started/standalone.md @@ -24,7 +24,7 @@ Once installed successfully, executing the `juicefs` command in the terminal wil ### Basic concept -The JuiceFS client provides a command [`format`](../reference/command_reference.md#format) to create a file system as follows: +The JuiceFS client provides a command [`format`](../reference/command_reference.mdx#format) to create a file system as follows: ```shell juicefs format [command options] META-URL NAME @@ -68,7 +68,7 @@ Since no storage-related options are specified in this example, the local disk i ### Basic concept -The JuiceFS client provides a command [`mount`](../reference/command_reference.md#mount) to mount file systems in the following format: +The JuiceFS client provides a command [`mount`](../reference/command_reference.mdx#mount) to mount file systems in the following format: ```shell juicefs mount [command options] META-URL MOUNTPOINT diff --git a/docs/en/guide/cache.md b/docs/en/guide/cache.md index 1505572bae1c..8899854e2191 100644 --- a/docs/en/guide/cache.md +++ b/docs/en/guide/cache.md @@ -21,7 +21,7 @@ For [metadata](#metadata-cache), the default configuration offers a "close-to-op As for object storage, JuiceFS clients split files into data blocks (default 4MiB), each is assigned an unique ID and uploaded to object storage. Subsequent modifications on the file are carried out on new data blocks, and the original blocks remain unchanged. This guarantees consistency of the object storage data, because once the file is modified, clients will then read from the new data blocks, while the stale ones which will be deleted through [Trash](../security/trash.md) or compaction. -[Local file data cache](#client-read-cache) is object storage blocks downloaded into local disks. So consistency depends on the reliability of the disks, if data are tempered, clients will read bad data. To resolve this concern, choose an appropriate [`--verify-cache-checksum`](../reference/command_reference.md#mount) strategy to ensure data integrity. +[Local file data cache](#client-read-cache) is object storage blocks downloaded into local disks. So consistency depends on the reliability of the disks, if data are tempered, clients will read bad data. To resolve this concern, choose an appropriate [`--verify-cache-checksum`](../reference/command_reference.mdx#mount-data-cache-options) strategy to ensure data integrity. ## Metadata cache {#metadata-cache} @@ -54,7 +54,7 @@ When JuiceFS Client `open` a file, its file attributes are cached in client memo To maintain the default close-to-open consistency, `open` calls will always query metadata service, bypassing local cache, modifications done by client A isn't guaranteed available immediately for client B, but once A closes file, all other clients (across different nodes) will see the latest state. File attribute cache isn't necessarily obtained through `open`, for example `tail -f` will periodically query attributes, in this case, latest state is fetched without reopening the file. -To utilize the memory metadata cache, use [`--open-cache`](../reference/command_reference.md#mount) to specify its TTL, so that before cache expiration, `getattr` and `open` calls directly uses the slice information in client memory. These cached information avoids the overhead of querying metadata service on every call. +To utilize the memory metadata cache, use [`--open-cache`](../reference/command_reference.mdx#mount-metadata-cache-options) to specify its TTL, so that before cache expiration, `getattr` and `open` calls directly uses the slice information in client memory. These cached information avoids the overhead of querying metadata service on every call. With `--open-cache` enabled, JuiceFS no longer operates under close-to-open consistency, but similar to kernel metadata cache, the client initiating the modifications can also actively invalidate client memory metadata cache, while other clients can only wait for expiration. That's why in order to maintain semantics, `--open-cache` is disabled by default. For read intensive (or read-only) scenarios, such as AI model training, it is recommended to set `--open-cache` according to the situation to further improve the read performance. @@ -70,11 +70,11 @@ The metadata cache in discussed above really only pertain to multi-client situat * The mount point initiating changes have access to file change events, and can use tools like [`fswatch`](https://emcrisostomo.github.io/fswatch/) or [`Watchdog`](https://python-watchdog.readthedocs.io/en/stable). But the scope is obviously limited to the files changed within the mount point itself, i.e. files modified by A cannot be monitored by mount point B. * Due to the fact that FUSE doesn't yet support inotify API, if you'd like to monitor file change events using libraries like [Watchdog](https://python-watchdog.readthedocs.io/en/stable), you can only achieve this via polling (e.g. [`PollingObserver`](https://python-watchdog.readthedocs.io/en/stable/_modules/watchdog/observers/polling.html#PollingObserver)). -## Read/Write Buffer {#buffer-size} +## Read/Write buffer {#buffer-size} -The Read/Write buffer is a memory space allocated to the JuiceFS Client, size controlled by [`--buffer-size`](../reference/command_reference.md#mount) which defaults to 300 (in MiB). Read/Write data all pass through this buffer, making it crucial for all I/O operations, that's why under large scale scenarios, increase buffer size is often used as a first step of optimization. +The Read/Write buffer is a memory space allocated to the JuiceFS Client, size controlled by [`--buffer-size`](../reference/command_reference.mdx#mount-data-cache-options) which defaults to 300 (in MiB). Read/Write data all pass through this buffer, making it crucial for all I/O operations, that's why under large scale scenarios, increase buffer size is often used as a first step of optimization. -### Readahead and Prefetch {#readahead-prefetch} +### Readahead and prefetch {#readahead-prefetch} :::tip To accurately describe the internal mechanism of JuiceFS Client, we use the term "readahead" and "prefetch" to refer to the two different behaviors that both download data ahead of time to increase read performance. @@ -88,7 +88,7 @@ Apparently readahead is only good for sequential reads, that's why there's anoth ![prefetch](../images/buffer-prefetch.svg) -This mechanism assumes that if a file is randomly read at a given range, then its adjacent content is also more likely to get read momentarily. This isn't necessarily true for various different types of applications, for example, if an application decides to read read a huge file in a very sparse fashion, i.e. read offsets are far from each other. In such case, prefetch isn't really useful and can cause serious read amplification, so if you are already familiar with the file system access pattern of your application, and concluded that prefetch isn't really needed, you can disable by using [`--prefetch=0`](../reference/command_reference.md#mount-data-cache-options). +This mechanism assumes that if a file is randomly read at a given range, then its adjacent content is also more likely to get read momentarily. This isn't necessarily true for various different types of applications, for example, if an application decides to read read a huge file in a very sparse fashion, i.e. read offsets are far from each other. In such case, prefetch isn't really useful and can cause serious read amplification, so if you are already familiar with the file system access pattern of your application, and concluded that prefetch isn't really needed, you can disable by using [`--prefetch=0`](../reference/command_reference.mdx#mount-data-cache-options). Readahead and prefetch effectively increase sequential read and random read performance, but it also comes with read amplification, read ["Read amplification"](../administration/troubleshooting.md#read-amplification) for more information. @@ -106,17 +106,17 @@ Buffer is shared by both read & write, obviously write is treated with higher pr As illustrated above, a high write load puts too much pending slices inside the buffer, leaving little buffer space for readahead, file read will hence slow down. Due to a low upload speed, write may also fail due to `flush` timeouts. -### Observation and Optimization +### Observation and optimization {#buffer-observation} Buffer is crucial to both read & write, as is already introduced in above sections, making `--buffer-size` the first optimization target when faced with large scale scenarios. But simply increasing buffer size is not enough and might cause other problems (like buffer congestion, illustrated in the above section). The size of the buffer should be smartly decided along with other performance options. Before making any adjustments, we recommend running a [`juicefs stats`](../administration/fault_diagnosis_and_analysis.md#stats) command to check the current buffer usage, and read below content to guide your tuning. -If you wish to improve write speed, and have already increased [`--max-uploads`](../reference/command_reference.md#mount-data-cache-options) for more upload concurrency, with no noticeable increase in upload traffic, consider also increasing `--buffer-size` so that concurrent threads may easier allocate memory for data uploads. This also works in the opposite direction: if tuning up `--buffer-size` didn't bring out an increase in upload traffic, you should probably increase `--max-uploads` as well. +If you wish to improve sequential read speed, use a larger `--buffer-size` to expand the readahead window, all data blocks within the window will be concurrently fetched from object storage. Also keep in mind that, reading a single large file will never consume the full buffer, the space reserved for readahead is between 1/4 to 1/2 of the total buffer size. So if you noticed that `juicefs stats` indicates `buf` is already half full, while performing sequential read on a single large file, then it's time to increase `--buffer-size` to set a larger readahead window. -The `--buffer-size` also controls the data upload size for each `flush` operation, this means for clients working in a low bandwidth environment, you may need to use a lower `--buffer-size` to avoid `flush` timeouts. Refer to ["Connection problems with object storage"](../administration/troubleshooting.md#io-error-object-storage) for troubleshooting under low internet speed. +If you wish to improve write speed, and have already increased [`--max-uploads`](../reference/command_reference.mdx#mount-data-storage-options) for more upload concurrency, with no noticeable increase in upload traffic, consider also increasing `--buffer-size` so that concurrent threads may easier allocate memory for data uploads. This also works in the opposite direction: if tuning up `--buffer-size` didn't bring out an increase in upload traffic, you should probably increase `--max-uploads` as well. -If you wish to improve sequential read speed, use a larger `--buffer-size` to expand the readahead window, all data blocks within the window will be concurrently fetched from object storage. Also keep in mind that, reading a single large file will never consume the full buffer, the space reserved for readahead is between 1/4 to 1/2 of the total buffer size. So if you noticed that `juicefs stats` indicates `buf` is already half full, while performing sequential read on a single large file, then it's time to increase `--buffer-size` to set a larger readahead window. +The `--buffer-size` also controls the data upload size for each `flush` operation, this means for clients working in a low bandwidth environment, you may need to use a lower `--buffer-size` to avoid `flush` timeouts. Refer to ["Connection problems with object storage"](../administration/troubleshooting.md#io-error-object-storage) for troubleshooting under low internet speed. ## Data cache {#data-cache} @@ -124,14 +124,6 @@ To improve performance, JuiceFS also provides various caching mechanisms for dat ![JuiceFS-cache](../images/juicefs-cache.png) -### Read/Write buffer {#buffer-size} - -Mount parameter [`--buffer-size`](../reference/command_reference.md#mount) controls the Read/Write buffer size for JuiceFS Client, which defaults to 300 (in MiB). Buffer size dictates both the memory data size for file read (and readahead), and memory data size for writing pending pages. Naturally, we recommend increasing `--buffer-size` when under high concurrency, to effectively improve performance. - -If you wish to improve write speed, and have already increased [`--max-uploads`](../reference/command_reference.md#mount) for more upload concurrency, with no noticeable increase in upload traffic, consider also increasing `--buffer-size` so that concurrent threads may easier allocate memory for data uploads. This also works in the opposite direction: if tuning up `--buffer-size` didn't bring out an increase in upload traffic, you should probably increase `--max-uploads` as well. - -The `--buffer-size` also controls the data upload size for each `flush` operation, this means for clients working in a low bandwidth environment, you may need to use a lower `--buffer-size` to avoid `flush` timeouts. Refer to ["Connection problems with object storage"](../administration/troubleshooting.md#io-error-object-storage) for troubleshooting under low internet speed. - ### Kernel page cache {#kernel-data-cache} Kernel will build page cache for opened files. If this file is not updated (i.e. `mtime` doesn't change) afterwards, it will be read directly from the page cache to achieve the best performance. @@ -144,17 +136,17 @@ Repeated reads of the same file in JuiceFS can be extremely fast, with latencies Starting from Linux kernel 3.15, FUSE supports [writeback-cache](https://www.kernel.org/doc/Documentation/filesystems/fuse-io.txt) mode, the kernel will consolidate high-frequency random small (10-100 bytes) write requests to significantly improve its performance, but this comes with a side effect: sequential writes are also turned into random writes, hence sequential write performance is hindered, so only use it on intensive random write scenarios. -To enable writeback-cache mode, use the [`-o writeback_cache`](../reference/fuse_mount_options.md#writeback_cache) option when you [mount JuiceFS](../reference/command_reference.md#mount). Note that writeback-cache mode is not the same as [Client write data cache](#client-write-cache), the former is a kernel implementation while the latter happens inside the JuiceFS Client, read the corresponding section to learn their intended scenarios. +To enable writeback-cache mode, use the [`-o writeback_cache`](../reference/fuse_mount_options.md#writeback_cache) option when you [mount JuiceFS](../reference/command_reference.mdx#mount). Note that writeback-cache mode is not the same as [Client write data cache](#client-write-cache), the former is a kernel implementation while the latter happens inside the JuiceFS Client, read the corresponding section to learn their intended scenarios. ### Read cache in client {#client-read-cache} The client will perform prefetch and cache automatically to improve sequence read performance according to the read mode in the application. Data will be cached in local file system, which can be any local storage device like HDD, SSD or even memory. -Data downloaded from object storage, as well as small data (smaller than a single block) uploaded to object storage will be cached by JuiceFS Client, without compression or encryption. To achieve better performance on application's first read, use [`juicefs warmup`](../reference/command_reference.md#warmup) to cache data in advance. +Data downloaded from object storage, as well as small data (smaller than a single block) uploaded to object storage will be cached by JuiceFS Client, without compression or encryption. To achieve better performance on application's first read, use [`juicefs warmup`](../reference/command_reference.mdx#warmup) to cache data in advance. If the file system where the cache directory is located is not working properly, the JuiceFS client can immediately return an error and downgrade to direct access to object storage. This is usually true for local disk, but if the file system where the cache directory is located is abnormal and the read operation is stuck (such as some kernel-mode network file system), then JuiceFS will also get stuck together. This requires you to tune the underlying file system behavior of the cache directory to fail fast. -Below are some important options for cache configuration (see [`juicefs mount`](../reference/command_reference.md#mount) for complete reference): +Below are some important options for cache configuration (see [`juicefs mount`](../reference/command_reference.mdx#mount) for complete reference): * `--prefetch` @@ -188,7 +180,7 @@ Client write cache is disabled by default, data writes will be held in the [read You can see how the default "upload first, then commit" write process will not perform well when writing large amount of small files. After the client write cache is enabled, the write process becomes "commit first, then upload asynchronously", file writes will not be blocked by data uploads, instead it will be written to the local cache directory and committed to the metadata service, and then returned immediately. The file data in the cache directory will be asynchronously uploaded to the object storage. -If you need to use JuiceFS as a temporary storage, which doesn't require persistence and distributed access, use [`--upload-delay`](../reference/command_reference.md#mount) to delay data upload, this saves the upload process if files are deleted during the delay. Meanwhile, compared with a local disk, JuiceFS uploads files automatically when the cache directory is running out of space, which keeps the applications away from unexpected failures. +If you need to use JuiceFS as a temporary storage, which doesn't require persistence and distributed access, use [`--upload-delay`](../reference/command_reference.mdx#mount-data-cache-options) to delay data upload, this saves the upload process if files are deleted during the delay. Meanwhile, compared with a local disk, JuiceFS uploads files automatically when the cache directory is running out of space, which keeps the applications away from unexpected failures. Add `--writeback` to the mount command to enable client write cache, but this mode comes with some risks and caveats: diff --git a/docs/en/guide/clone.md b/docs/en/guide/clone.md index 30c1c52dba58..cf98dcb73afe 100644 --- a/docs/en/guide/clone.md +++ b/docs/en/guide/clone.md @@ -32,4 +32,4 @@ In terms of transaction consistency, cloning behaves as follows: - For directory: The `clone` command does not guarantee atomicity for directories. In other words, if the source directory changes during the cloning process, the target directory may be different from the source directory. - Only one `clone` can be successfully created from the same location at the same time. The failed clone will clean up the temporarily created directory tree. -The clone is done by the mount process, it will be interrupted if `clone` command is terminated. If the clone fails or is interrupted, `mount` process will cleanup any created inodes. If the mount process fails to do that, there could be some leaking the metadata engine and object storage, because the dangling tree still hold the references to underlying data blocks. They could be cleaned up by the [`juicefs gc --delete`](../reference/command_reference.md#gc) command. +The clone is done by the mount process, it will be interrupted if `clone` command is terminated. If the clone fails or is interrupted, `mount` process will cleanup any created inodes. If the mount process fails to do that, there could be some leaking the metadata engine and object storage, because the dangling tree still hold the references to underlying data blocks. They could be cleaned up by the [`juicefs gc --delete`](../reference/command_reference.mdx#gc) command. diff --git a/docs/en/guide/gateway.md b/docs/en/guide/gateway.md index 8d8157a74b89..560756b97944 100644 --- a/docs/en/guide/gateway.md +++ b/docs/en/guide/gateway.md @@ -44,7 +44,7 @@ Common application scenarios for JuiceFS S3 Gateway include: The `gateway` subcommand requires at least two parameters: the database URL for storing metadata and the address/port for JuiceFS S3 Gateway to listen on. Since version 1.2, JuiceFS supports running services in the background with options such as `--background` or `-d`, allowing them to operate as background processes. - By default, [Multi-bucket support](#multi-bucket-support) is not enabled. You can enable it by adding the `--multi-buckets` option. Additionally, you can add [other options](../reference/command_reference.md#gateway) to `gateway` subcommands as needed. For example, you can set the default local cache to 20 GiB. + By default, [Multi-bucket support](#multi-bucket-support) is not enabled. You can enable it by adding the `--multi-buckets` option. Additionally, you can add [other options](../reference/command_reference.mdx#gateway) to `gateway` subcommands as needed. For example, you can set the default local cache to 20 GiB. ```shell juicefs gateway --cache-size 20480 redis://localhost:6379/1 localhost:9000 diff --git a/docs/en/guide/quota.md b/docs/en/guide/quota.md index fd10c73b0afc..8b8f980cd44e 100644 --- a/docs/en/guide/quota.md +++ b/docs/en/guide/quota.md @@ -206,7 +206,7 @@ JuiceFS allows nested quota to be set on multiple levels of directories, client ### Subdirectory mount {#subdirectory-mount} -JuiceFS supports mounting arbitrary subdirectories using [`--subdir`](../reference/command_reference.md#mount). If the directory quota is set for the mounted subdirectory, you can use the `df` command that comes with the system to view the directory quota and current usage. For example, the file system quota is 1PiB and 10M inodes, while the quota for the `/test` directory is 1GiB and 400 inodes. The output of the `df` command when mounted using the root directory is: +JuiceFS supports mounting arbitrary subdirectories using [`--subdir`](../reference/command_reference.mdx#mount-metadata-options). If the directory quota is set for the mounted subdirectory, you can use the `df` command that comes with the system to view the directory quota and current usage. For example, the file system quota is 1PiB and 10M inodes, while the quota for the `/test` directory is 1GiB and 400 inodes. The output of the `df` command when mounted using the root directory is: ```shell $ df -h diff --git a/docs/en/guide/sync.md b/docs/en/guide/sync.md index 50ecf9de084b..9963891bbdfd 100644 --- a/docs/en/guide/sync.md +++ b/docs/en/guide/sync.md @@ -4,7 +4,7 @@ sidebar_position: 7 description: Learn how to use the data sync tool in JuiceFS. --- -[`juicefs sync`](../reference/command_reference.md#sync) is a powerful data migration tool, which can copy data across all supported storages including object storage, JuiceFS itself, and local file systems, you can freely copy data between any of these systems. In addition, it supports remote directories through SSH, HDFS, WebDAV, etc. while providing advanced features such as incremental synchronization, and pattern matching (like rsync), and distributed syncing. +[`juicefs sync`](../reference/command_reference.mdx#sync) is a powerful data migration tool, which can copy data across all supported storages including object storage, JuiceFS itself, and local file systems, you can freely copy data between any of these systems. In addition, it supports remote directories through SSH, HDFS, WebDAV, etc. while providing advanced features such as incremental synchronization, and pattern matching (like rsync), and distributed syncing. ## Basic Usage @@ -20,7 +20,7 @@ Arguments: - `SRC` is the source data address or path; - `DST` is the destination address or path; -- `[command options]` are synchronization options. See [command reference](../reference/command_reference.md#sync) for more details. +- `[command options]` are synchronization options. See [command reference](../reference/command_reference.mdx#sync) for more details. Address format: @@ -276,8 +276,8 @@ Here are some examples of layered filtering with `exclude`/`include` rules: + For `dir_name/***`, it matches all files at all layers under the `dir_name` directory. Note that each subpath element is recursively traversed from top to bottom, so `include`/`exclude` matching rules apply recursively to each full path element. For example, to include `/foo/bar/baz`, both `/foo` and `/foo/bar` should not be excluded. When a file is found to be transferred, the exclusion matching pattern short-circuits the exclusion traversal at that file's directory layer. If a parent directory is excluded, deeper include pattern matching is ineffective. This is crucial when using trailing `*`. For example, the following example will not work as expected: ``` - --include='/some/path/this-file-will-not-be-found' - --include='/file-is-included' + --include='/some/path/this-file-will-not-be-found' + --include='/file-is-included' --exclude='*' ``` diff --git a/docs/en/introduction/io_processing.md b/docs/en/introduction/io_processing.md index 4b034a6c6c81..c7fb9b88e999 100644 --- a/docs/en/introduction/io_processing.md +++ b/docs/en/introduction/io_processing.md @@ -13,7 +13,7 @@ Sequential writes are optimized, requiring only one continuously growing slice a ![internals-write](../images/internals-write.png) -Use [`juicefs stats`](../reference/command_reference.md#stats) to obtain real-time performance monitoring metrics. +Use [`juicefs stats`](../reference/command_reference.mdx#stats) to obtain real-time performance monitoring metrics. ![internals-stats](../images/internals-stats.png) @@ -32,7 +32,7 @@ When JuiceFS uploads objects smaller than the block size, it simultaneously writ Write operations are immediately committed to the client buffer, resulting in very low write latency (typically just a few microseconds). The actual upload to the object storage is automatically triggered internally when certain conditions are met, such as when the size or number of slices exceeds their limit, or data stays in the buffer for too long. Explicit calls, such as closing a file or invoking `fsync`, can also trigger uploading. -The client buffer is only released after the data stored inside is uploaded. In scenarios with high write concurrency, if the buffer size (configured using [`--buffer-size`](../reference/command_reference.md#mount)) is not big enough, or the object storage's performance insufficient, write blocking may occur, because the buffer cannot be released timely. The real-time buffer usage is shown in the `usage.buf` field in the metrics figure. To slow things down, The JuiceFS client introduces a 10 ms delay to every write when the buffer usage exceeds the threshold. If the buffer usage is over twice the threshold, new writes are completely suspended until the buffer is released. Therefore, if the write latency keeps increasing or the buffer usage has exceeded the threshold for a long while, you should increase `--buffer-size`. Also consider increasing the maximum number of upload concurrency ([`--max-uploads`](../reference/command_reference.md#mount), defaults to 20), which improves the upload bandwidth, thus boosting buffer release. +The client buffer is only released after the data stored inside is uploaded. In scenarios with high write concurrency, if the buffer size (configured using [`--buffer-size`](../reference/command_reference.mdx#mount-data-cache-options)) is not big enough, or the object storage's performance insufficient, write blocking may occur, because the buffer cannot be released timely. The real-time buffer usage is shown in the `usage.buf` field in the metrics figure. To slow things down, The JuiceFS client introduces a 10 ms delay to every write when the buffer usage exceeds the threshold. If the buffer usage is over twice the threshold, new writes are completely suspended until the buffer is released. Therefore, if the write latency keeps increasing or the buffer usage has exceeded the threshold for a long while, you should increase `--buffer-size`. Also consider increasing the maximum number of upload concurrency ([`--max-uploads`](../reference/command_reference.mdx#mount-data-storage-options), defaults to 20), which improves the upload bandwidth, thus boosting buffer release. ### Random writes {#random-write} @@ -52,7 +52,7 @@ Learn more in [Client Write Cache](../guide/cache.md#client-write-cache). ## Data reading process {#workflow-of-read} -JuiceFS supports sequential reads and random reads (including mmap-based random reads). During read requests, the object corresponding to the block is completely read through the `GetObject` API of the object storage, or only a certain range of data in the object may be read (e.g., the read range is limited by the `Range` parameter of [S3 API](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)). Meanwhile, prefetching is performed (controlled by the [`--prefetch`](../reference/command_reference.md#mount) option) to download the complete data block into the local cache directory, as shown in the `blockcache` write speed in the second stage of the above metrics figure. This is very good for sequential reads as all cached data is utilized, maximizing the object storage access efficiency. The dataflow is illustrated in the figure below: +JuiceFS supports sequential reads and random reads (including mmap-based random reads). During read requests, the object corresponding to the block is completely read through the `GetObject` API of the object storage, or only a certain range of data in the object may be read (e.g., the read range is limited by the `Range` parameter of [S3 API](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)). Meanwhile, prefetching is performed (controlled by the [`--prefetch`](../reference/command_reference.mdx#mount) option) to download the complete data block into the local cache directory, as shown in the `blockcache` write speed in the second stage of the above metrics figure. This is very good for sequential reads as all cached data is utilized, maximizing the object storage access efficiency. The dataflow is illustrated in the figure below: ![internals-read](../images/internals-read.png) diff --git a/docs/en/reference/_common_options.mdx b/docs/en/reference/_common_options.mdx new file mode 100644 index 000000000000..73b56726be59 --- /dev/null +++ b/docs/en/reference/_common_options.mdx @@ -0,0 +1,69 @@ +#### Metadata related options {#mount-metadata-options} + +|Items|Description| +|-|-| +|`--subdir=value`|mount a sub-directory as root (default: "")| +|`--backup-meta=3600`|interval (in seconds) to automatically backup metadata in the object storage (0 means disable backup) (default: "3600")| +|`--backup-skip-trash` 1.2|skip files and directories in trash when backup metadata.| +|`--heartbeat=12`|interval (in seconds) to send heartbeat; it's recommended that all clients use the same heartbeat value (default: "12")| +|`--read-only`|allow lookup/read operations only (default: false)| +|`--no-bgjob`|Disable background jobs, default to false, which means clients by default carry out background jobs, including:
  • Clean up expired files in Trash (look for `cleanupDeletedFiles`, `cleanupTrash` in [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go))
  • Delete slices that's not referenced (look for `cleanupSlices` in [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go))
  • Clean up stale client sessions (look for `CleanStaleSessions` in [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go))
Note that compaction isn't affected by this option, it happens automatically with file reads and writes, client will check if compaction is in need, and run in background (take Redis for example, look for `compactChunk` in [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/redis.go)).| +|`--atime-mode=noatime` 1.1 |Control atime (last time the file was accessed) behavior, support the following modes:
  • `noatime` (default): set when the file is created or when `SetAttr` is explicitly called. Accessing and modifying the file will not affect atime, tracking atime comes at a performance cost, so this is the default behavior
  • `relatime`: update inode access times relative to mtime (last time when the file data was modified) or ctime (last time when file metadata was changed). Only update atime if atime was earlier than the current mtime or ctime, or the file's atime is more than 1 day old
  • `strictatime`: always update atime on access
| +|`--skip-dir-nlink=20` 1.1 |number of retries after which the update of directory nlink will be skipped (used for tkv only, 0 means never) (default: 20)| +|`--skip-dir-mtime=100ms` 1.2|skip updating attribute of a directory if the mtime difference is smaller than this value (default: 100ms)| + +#### Metadata cache related options {#mount-metadata-cache-options} + +For metadata cache description and usage, refer to [Kernel metadata cache](../guide/cache.md#kernel-metadata-cache) and [Client memory metadata cache](../guide/cache.md#client-memory-metadata-cache). + +|Items|Description| +|-|-| +|`--attr-cache=1`|attributes cache timeout in seconds (default: 1), read [Kernel metadata cache](../guide/cache.md#kernel-metadata-cache)| +|`--entry-cache=1`|file entry cache timeout in seconds (default: 1), read [Kernel metadata cache](../guide/cache.md#kernel-metadata-cache)| +|`--dir-entry-cache=1`|dir entry cache timeout in seconds (default: 1), read [Kernel metadata cache](../guide/cache.md#kernel-metadata-cache)| +|`--open-cache=0`|open file cache timeout in seconds (0 means disable this feature) (default: 0)| +|`--open-cache-limit value` 1.1 |max number of open files to cache (soft limit, 0 means unlimited) (default: 10000)| + +#### Data storage related options {#mount-data-storage-options} + +|Items|Description| +|-|-| +|`--storage=file`|Object storage type (e.g. `s3`, `gs`, `oss`, `cos`) (default: `"file"`, refer to [documentation](../reference/how_to_set_up_object_storage.md#supported-object-storage) for all supported object storage types).| +|`--bucket=value`|customized endpoint to access object storage| +|`--storage-class value` 1.1 |the storage class for data written by current client| +|`--get-timeout=60`|the max number of seconds to download an object (default: 60)| +|`--put-timeout=60`|the max number of seconds to upload an object (default: 60)| +|`--io-retries=10`|The number of retries when the network is abnormal and the number of retries for metadata requests are also controlled by this option. If the number of retries is exceeded, an `EIO Input/output error` error will be returned. (default: 10)| +|`--max-uploads=20`|Upload concurrency, defaults to 20. This is already a reasonably high value for 4M writes, with such write pattern, increasing upload concurrency usually demands higher `--buffer-size`, learn more at [Read/Write Buffer](../guide/cache.md#buffer-size). But for random writes around 100K, 20 might not be enough and can cause congestion at high load, consider using a larger upload concurrency, or try to consolidate small writes in the application end. | +|`--max-stage-write=0` 1.2|The maximum number of concurrent writes of data blocks to the cache disk asynchronously. If the maximum number of concurrent writes is reached, the object storage will be uploaded directly (this option is only valid when ["Client write data cache"](../guide/cache.md#client-write-cache) is enabled) (default value: 0, that is, no concurrency limit)| +|`--max-deletes=10`|number of threads to delete objects (default: 10)| +|`--upload-limit=0`|bandwidth limit for upload in Mbps (default: 0)| +|`--download-limit=0`|bandwidth limit for download in Mbps (default: 0)| + +#### Data cache related options {#mount-data-cache-options} + +|Items|Description| +|-|-| +|`--buffer-size=300`|total read/write buffering in MiB (default: 300), see [Read/Write buffer](../guide/cache.md#buffer-size)| +|`--prefetch=1`|prefetch N blocks in parallel (default: 1), see [Client read data cache](../guide/cache.md#client-read-cache)| +|`--writeback`|upload objects in background (default: false), see [Client write data cache](../guide/cache.md#client-write-cache)| +|`--upload-delay=0`|When `--writeback` is enabled, you can use this option to add a delay to object storage upload, default to 0, meaning that upload will begin immediately after write. Different units are supported, including `s` (second), `m` (minute), `h` (hour). If files are deleted during this delay, upload will be skipped entirely, when using JuiceFS for temporary storage, use this option to reduce resource usage. Refer to [Client write data cache](../guide/cache.md#client-write-cache).| +|`--upload-hours` 1.2|When `--writeback` is enabled, data blocks are only uploaded during the specified time of day. The format of the parameter is `,` (including "start hour", but not including "end hour", "start hour" must be less than or greater than "end hour"), where `` can range from 0 to 23. For example, `0,6` means that data blocks are only uploaded between 0:00 and 5:59 every day, and `23,3` means that data blocks are only uploaded between 23:00 every day and 2:59 the next day.| +|`--cache-dir=value`|directory paths of local cache, use `:` (Linux, macOS) or `;` (Windows) to separate multiple paths (default: `$HOME/.juicefs/cache` or `/var/jfsCache`), see [Client read data cache](../guide/cache.md#client-read-cache)| +|`--cache-mode value` 1.1 |file permissions for cached blocks (default: "0600")| +|`--cache-size=102400`|size of cached object for read in MiB (default: 102400), see [Client read data cache](../guide/cache.md#client-read-cache)| +|`--free-space-ratio=0.1`|min free space ratio (default: 0.1), if [Client write data cache](../guide/cache.md#client-write-cache) is enabled, this option also controls write cache size, see [Client read data cache](../guide/cache.md#client-read-cache)| +|`--cache-partial-only`|cache random/small read only (default: false), see [Client read data cache](../guide/cache.md#client-read-cache)| +|`--verify-cache-checksum=full` 1.1 |Checksum level for cache data. After enabled, checksum will be calculated on divided parts of the cache blocks and stored on disks, which are used for verification during reads. The following strategies are supported:
  • `none`: Disable checksum verification, if local cache data is tampered, bad data will be read;
  • `full` (default): Perform verification when reading the full block, use this for sequential read scenarios;
  • `shrink`: Perform verification on parts that's fully included within the read range, use this for random read scenarios;
  • `extend`: Perform verification on parts that fully include the read range, this causes read amplifications and is only used for random read scenarios demanding absolute data integrity.
| +|`--cache-eviction=2-random` 1.1 |cache eviction policy (`none` or `2-random`) (default: "2-random")| +|`--cache-scan-interval=1h` 1.1 |interval (in seconds) to scan cache-dir to rebuild in-memory index (default: "1h")| +|`--cache-expire=0` 1.2|Cache blocks that have not been accessed for more than the set time, in seconds, will be automatically cleared (even if the value of `--cache-eviction` is `none`, these cache blocks will be deleted). A value of 0 means never expires (default: 0)| + +#### Metrics related options {#mount-metrics-options} + +||Items|Description| +|-|-| +|`--metrics=127.0.0.1:9567`|address to export metrics (default: `127.0.0.1:9567`)| +|`--custom-labels`|custom labels for metrics, format: `key1:value1;key2:value2` (default: "")| +|`--consul=127.0.0.1:8500`|Consul address to register (default: `127.0.0.1:8500`)| +|`--no-usage-report`|do not send usage report (default: false)| diff --git a/docs/en/reference/command_reference.md b/docs/en/reference/command_reference.mdx similarity index 78% rename from docs/en/reference/command_reference.md rename to docs/en/reference/command_reference.mdx index 70c113c30608..dc93cb6215c6 100644 --- a/docs/en/reference/command_reference.md +++ b/docs/en/reference/command_reference.mdx @@ -8,6 +8,8 @@ description: Descriptions, usage and examples of all commands and options includ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +import CommonOptions from './_common_options.mdx'; + Running `juicefs` by itself and it will print all available commands. In addition, you can add `-h/--help` flag after each command to get more information, e.g., `juicefs format -h`. ``` @@ -18,7 +20,7 @@ USAGE: juicefs [global options] command [command options] [arguments...] VERSION: - 1.1.0 + 1.2.0 COMMANDS: ADMIN: @@ -168,7 +170,7 @@ juicefs format sqlite3://myjfs.db myjfs --trash-days=0 |Items|Description| |-|-| -|`--block-size=4096`|size of block in KiB (default: 4096). 4M is usually a better default value because many object storage services use 4M as their internal block size, thus using the same block size in JuiceFS usually yields better performance.| +|`--block-size=4M`|size of block in KiB (default: 4M). 4M is usually a better default value because many object storage services use 4M as their internal block size, thus using the same block size in JuiceFS usually yields better performance.| |`--compress=none`|compression algorithm, choose from `lz4`, `zstd`, `none` (default). Enabling compression will inevitably affect performance. Among the two supported algorithms, `lz4` offers a better performance, while `zstd` comes with a higher compression ratio, Google for their detailed comparison.| |`--encrypt-rsa-key=value`|A path to RSA private key (PEM)| |`--encrypt-algo=aes256gcm-rsa`|encrypt algorithm (aes256gcm-rsa, chacha20-rsa) (default: "aes256gcm-rsa")| @@ -233,11 +235,11 @@ juicefs config redis://localhost --min-client-version 1.0.0 --max-client-version |`--capacity value`|limit for space in GiB| |`--inodes value`|limit for number of inodes| |`--trash-days value`|number of days after which removed files will be permanently deleted| +|`--enable-acl` 1.2|enable [POSIX ACL](../security/posix_acl.md) (irreversible), at the same time, the minimum client version allowed to connect will be upgraded to v1.2| |`--encrypt-secret`|encrypt the secret key if it was previously stored in plain format (default: false)| |`--min-client-version value` 1.1 |minimum client version allowed to connect| |`--max-client-version value` 1.1 |maximum client version allowed to connect| |`--dir-stats` 1.1 |enable dir stats, which is necessary for fast summary and dir quota (default: false)| -|`--enable-acl` 1.2|enable POSIX ACL(irreversible), min-client-version will be set to v1.2| ### `juicefs quota` 1.1 {#quota} @@ -319,8 +321,8 @@ juicefs gc redis://localhost --delete |Items|Description| |-|-| -|`--delete`|delete leaked objects (default: false)| |`--compact`|compact all chunks with more than 1 slices (default: false).| +|`--delete`|delete leaked objects (default: false)| |`--threads=10`|number of threads to delete leaked objects (default: 10)| ### `juicefs fsck` {#fsck} @@ -387,6 +389,7 @@ juicefs dump redis://localhost sub-meta-dump.json --subdir /dir/in/jfs |`FILE`|Export file path, if not specified, it will be exported to standard output. If the filename ends with `.gz`, it will be automatically compressed.| |`--subdir=path`|Only export metadata for the specified subdirectory.| |`--keep-secret-key` 1.1 |Export object storage authentication information, the default is `false`. Since it is exported in plain text, pay attention to data security when using it. If the export file does not contain object storage authentication information, you need to use [`juicefs config`](#config) to reconfigure object storage authentication information after the subsequent import is completed.| +|`--threads=10` 1.2|number of threads to dump metadata. (default: 10)| |`--fast` 1.2|Use more memory to speedup dump.| |`--skip-trash` 1.2|Skip files and directories in trash.| @@ -536,8 +539,8 @@ juicefs debug --out-dir=/var/log --limit=1000 /mnt/jfs |Items|Description| |-|-| |`--out-dir=./debug/`|The output directory of the results, automatically created if the directory does not exist (default: `./debug/`)| -|`--stats-sec=5`|The number of seconds to sample .stats file (default: 5)| |`--limit=value`|The number of log entries collected, from newest to oldest, if not specified, all entries will be collected| +|`--stats-sec=5`|The number of seconds to sample .stats file (default: 5)| |`--trace-sec=5`|The number of seconds to sample trace metrics (default: 5)| |`--profile-sec=30`|The number of seconds to sample profile metrics (default: 30)| @@ -615,6 +618,7 @@ juicefs mount redis://localhost /mnt/jfs --backup-meta 0 |`-d, --background`|run in background (default: false)| |`--no-syslog`|disable syslog (default: false)| |`--log=path`|path of log file when running in background (default: `$HOME/.juicefs/juicefs.log` or `/var/log/juicefs.log`)| +|`--force`|force to mount even if the mount point is already mounted by the same filesystem.| |`--update-fstab` 1.1 |add / update entry in `/etc/fstab`, will create a symlink from `/sbin/mount.juicefs` to JuiceFS executable if not existing (default: false)| #### FUSE related options {#mount-fuse-options} @@ -627,71 +631,18 @@ juicefs mount redis://localhost /mnt/jfs --backup-meta 0 |`--prefix-internal` 1.1 |add '.jfs' prefix to all internal files (default: false)| |`-o value`|other FUSE options, see [FUSE Mount Options](../reference/fuse_mount_options.md)| -#### Metadata related options {#mount-metadata-options} - -|Items|Description| -|-|-| -|`--subdir=value`|mount a sub-directory as root (default: "")| -|`--backup-meta=3600`|interval (in seconds) to automatically backup metadata in the object storage (0 means disable backup) (default: "3600")| -|`--backup-skip-trash` 1.2|skip files and directories in trash when backup metadata.| -|`--heartbeat=12`|interval (in seconds) to send heartbeat; it's recommended that all clients use the same heartbeat value (default: "12")| -|`--read-only`|allow lookup/read operations only (default: false)| -|`--no-bgjob`|Disable background jobs, default to false, which means clients by default carry out background jobs, including:
  • Clean up expired files in Trash (look for `cleanupDeletedFiles`, `cleanupTrash` in [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go))
  • Delete slices that's not referenced (look for `cleanupSlices` in [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go))
  • Clean up stale client sessions (look for `CleanStaleSessions` in [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go))
Note that compaction isn't affected by this option, it happens automatically with file reads and writes, client will check if compaction is in need, and run in background (take Redis for example, look for `compactChunk` in [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/redis.go)).| -|`--atime-mode=noatime` 1.1 |Control atime (last time the file was accessed) behavior, support the following modes:
  • `noatime` (default): set when the file is created or when `SetAttr` is explicitly called. Accessing and modifying the file will not affect atime, tracking atime comes at a performance cost, so this is the default behavior
  • `relatime`: update inode access times relative to mtime (last time when the file data was modified) or ctime (last time when file metadata was changed). Only update atime if atime was earlier than the current mtime or ctime, or the file's atime is more than 1 day old
  • `strictatime`: always update atime on access
| -|`--skip-dir-nlink value` 1.1 |number of retries after which the update of directory nlink will be skipped (used for tkv only, 0 means never) (default: 20)| - -#### Metadata cache related options {#mount-metadata-cache-options} + -For metadata cache description and usage, refer to [Kernel metadata cache](../guide/cache.md#kernel-metadata-cache) and [Client memory metadata cache](../guide/cache.md#client-memory-metadata-cache). + +
-|Items|Description| -|-|-| -|`--attr-cache=1`|attributes cache timeout in seconds (default: 1), read [Kernel metadata cache](../guide/cache.md#kernel-metadata-cache)| -|`--entry-cache=1`|file entry cache timeout in seconds (default: 1), read [Kernel metadata cache](../guide/cache.md#kernel-metadata-cache)| -|`--dir-entry-cache=1`|dir entry cache timeout in seconds (default: 1), read [Kernel metadata cache](../guide/cache.md#kernel-metadata-cache)| -|`--open-cache=0`|open file cache timeout in seconds (0 means disable this feature) (default: 0)| -|`--open-cache-limit value` 1.1 |max number of open files to cache (soft limit, 0 means unlimited) (default: 10000)| - -#### Data storage related options {#mount-data-storage-options} - -|Items|Description| -|-|-| -|`--storage=file`|Object storage type (e.g. `s3`, `gs`, `oss`, `cos`) (default: `"file"`, refer to [documentation](../reference/how_to_set_up_object_storage.md#supported-object-storage) for all supported object storage types).| -|`--storage-class value` 1.1 |the storage class for data written by current client| -|`--bucket=value`|customized endpoint to access object storage| -|`--get-timeout=60`|the max number of seconds to download an object (default: 60)| -|`--put-timeout=60`|the max number of seconds to upload an object (default: 60)| -|`--io-retries=10`|number of retries after network failure (default: 10)| -|`--max-uploads=20`|Upload concurrency, defaults to 20. This is already a reasonably high value for 4M writes, with such write pattern, increasing upload concurrency usually demands higher `--buffer-size`, learn more at [Read/Write Buffer](../guide/cache.md#buffer-size). But for random writes around 100K, 20 might not be enough and can cause congestion at high load, consider using a larger upload concurrency, or try to consolidate small writes in the application end. | -|`--max-deletes=10`|number of threads to delete objects (default: 10)| -|`--upload-limit=0`|bandwidth limit for upload in Mbps (default: 0)| -|`--download-limit=0`|bandwidth limit for download in Mbps (default: 0)| +#### {#mount-metadata-options} +#### {#mount-metadata-cache-options} +#### {#mount-data-storage-options} +#### {#mount-data-cache-options} +#### {#mount-metrics-options} -#### Data cache related options {#mount-data-cache-options} - -|Items|Description| -|-|-| -|`--buffer-size=300`|total read/write buffering in MiB (default: 300), see [Read/Write buffer](../guide/cache.md#buffer-size)| -|`--prefetch=1`|prefetch N blocks in parallel (default: 1), see [Client read data cache](../guide/cache.md#client-read-cache)| -|`--writeback`|upload objects in background (default: false), see [Client write data cache](../guide/cache.md#client-write-cache)| -|`--upload-delay=0`|When `--writeback` is enabled, you can use this option to add a delay to object storage upload, default to 0, meaning that upload will begin immediately after write. Different units are supported, including `s` (second), `m` (minute), `h` (hour). If files are deleted during this delay, upload will be skipped entirely, when using JuiceFS for temporary storage, use this option to reduce resource usage. Refer to [Client write data cache](../guide/cache.md#client-write-cache).| -|`--cache-dir=value`|directory paths of local cache, use `:` (Linux, macOS) or `;` (Windows) to separate multiple paths (default: `$HOME/.juicefs/cache` or `/var/jfsCache`), see [Client read data cache](../guide/cache.md#client-read-cache)| -|`--cache-mode value` 1.1 |file permissions for cached blocks (default: "0600")| -|`--cache-size=102400`|size of cached object for read in MiB (default: 102400), see [Client read data cache](../guide/cache.md#client-read-cache)| -|`--free-space-ratio=0.1`|min free space ratio (default: 0.1), if [Client write data cache](../guide/cache.md#client-write-cache) is enabled, this option also controls write cache size, see [Client read data cache](../guide/cache.md#client-read-cache)| -|`--cache-partial-only`|cache random/small read only (default: false), see [Client read data cache](../guide/cache.md#client-read-cache)| -|`--verify-cache-checksum value` 1.1 |Checksum level for cache data. After enabled, checksum will be calculated on divided parts of the cache blocks and stored on disks, which are used for verification during reads. The following strategies are supported:
  • `none`: Disable checksum verification, if local cache data is tampered, bad data will be read;
  • `full` (default): Perform verification when reading the full block, use this for sequential read scenarios;
  • `shrink`: Perform verification on parts that's fully included within the read range, use this for random read scenarios;
  • `extend`: Perform verification on parts that fully include the read range, this causes read amplifications and is only used for random read scenarios demanding absolute data integrity.
| -|`--cache-eviction value` 1.1 |cache eviction policy (none or 2-random) (default: "2-random")| -|`--cache-scan-interval value` 1.1 |interval (in seconds) to scan cache-dir to rebuild in-memory index (default: "3600")| - -#### Metrics related options {#mount-metrics-options} - -||Items|Description| -|-|-| -|`--metrics=127.0.0.1:9567`|address to export metrics (default: `127.0.0.1:9567`)| -|`--custom-labels`|custom labels for metrics, format: `key1:value1;key2:value2` (default: "")| -|`--consul=127.0.0.1:8500`|Consul address to register (default: `127.0.0.1:8500`)| -|`--no-usage-report`|do not send usage report (default: false)| +
### `juicefs umount` {#umount} @@ -728,20 +679,22 @@ juicefs gateway redis://localhost localhost:9000 #### Options -Apart from options listed below, this command shares options with `juicefs mount`, be sure to refer to [`mount`](#mount) as well. - |Items|Description| |-|-| -| `--log value`1.2 | path for gateway log | |`META-URL`|Database URL for metadata storage, see [JuiceFS supported metadata engines](../reference/how_to_set_up_metadata_engine.md) for details.| |`ADDRESS`|S3 gateway address and listening port, for example: `localhost:9000`| +|`--log value` 1.2|path for gateway log| |`--access-log=path`|path for JuiceFS access log.| -| `--background, -d`1.2 | run in background (default: false) | +|`--background, -d` 1.2|run in background (default: false)| |`--no-banner`|disable MinIO startup information (default: false)| |`--multi-buckets`|use top level of directories as buckets (default: false)| |`--keep-etag`|save the ETag for uploaded objects (default: false)| |`--umask=022`|umask for new file and directory in octal (default: 022)| -| `--domain value`1.2 |domain for virtual-host-style requests| +|`--object-tag` 1.2|enable object tagging API| +|`--domain value` 1.2|domain for virtual-host-style requests| +|`--refresh-iam-interval=5m` 1.2|interval to reload gateway IAM from configuration (default: 5m)| + + ### `juicefs webdav` {#webdav} @@ -757,19 +710,19 @@ juicefs webdav redis://localhost localhost:9007 #### Options -Apart from options listed below, this command shares options with `juicefs mount`, be sure to refer to [`mount`](#mount) as well. - |Items|Description| |-|-| |`META-URL`|Database URL for metadata storage, see [JuiceFS supported metadata engines](../reference/how_to_set_up_metadata_engine.md) for details.| |`ADDRESS`|WebDAV address and listening port, for example: `localhost:9007`.| -|`--cert-file` 1.1 |certificate file for HTTPS| -|`--key-file` 1.1 |key file for HTTPS| +|`--cert-file` 1.1|certificate file for HTTPS| +|`--key-file` 1.1|key file for HTTPS| |`--gzip`|compress served files via gzip (default: false)| |`--disallowList`|disallow list a directory (default: false)| -| `--log value`1.2 | path for WebDAV log| -|`--access-log=path`|path for JuiceFS access log.| -| `--background, -d`1.2 | run in background (default: false)| +|`--log value` 1.2|path for WebDAV log| +|`--access-log=path`|path for JuiceFS access log| +|`--background, -d` 1.2|run in background (default: false)| + + ## Tool {#tool} @@ -820,6 +773,7 @@ ACCESS_KEY=myAccessKey SECRET_KEY=mySecretKey juicefs objbench --storage=s3 http |`--storage=file`|Object storage type (e.g. `s3`, `gs`, `oss`, `cos`) (default: `file`, refer to [documentation](../reference/how_to_set_up_object_storage.md#supported-object-storage) for all supported object storage types)| |`--access-key=value`|Access Key for object storage (can also be set via the environment variable `ACCESS_KEY`), see [How to Set Up Object Storage](../reference/how_to_set_up_object_storage.md#aksk) for more.| |`--secret-key value`|Secret Key for object storage (can also be set via the environment variable `SECRET_KEY`), see [How to Set Up Object Storage](../reference/how_to_set_up_object_storage.md#aksk) for more.| +|`--session-token value` 1.0|session token for object storage| |`--block-size=4096`|size of each IO block in KiB (default: 4096)| |`--big-object-size=1024`|size of each big object in MiB (default: 1024)| |`--small-object-size=128`|size of each small object in KiB (default: 128)| @@ -855,6 +809,8 @@ juicefs warmup -f /tmp/filelist.txt |`--file=path, -f path`|file containing a list of paths (each line is a file path)| |`--threads=50, -p 50`|number of concurrent workers, default to 50. Reduce this number in low bandwidth environment to avoid download timeouts| |`--background, -b`|run in background (default: false)| +|`--evict` 1.2|evict cached blocks| +|`--check` 1.2|check whether the data blocks are cached or not| ### `juicefs rmr` {#rmr} @@ -914,8 +870,14 @@ In which: |Items|Description| |-|-| |`--start=KEY, -s KEY, --end=KEY, -e KEY`|Provide object storage key range for syncing.| +|`--end KEY, -e KEY`| the last `KEY` to sync | |`--exclude=PATTERN`|Exclude keys matching PATTERN.| |`--include=PATTERN`|Include keys matching PATTERN, need to be used with `--exclude`.| +|`--match-full-path` 1.2|match filters again the full path (default: false)| +|`--max-size-SIZE` 1.2|skip files larger than `SIZE`| +|`--min-size-SIZE` 1.2|skip files smaller than `SIZE`| +|`--max-age=DURATION` 1.2|Skip files whose last modification time exceeds `DURATION`, in seconds. For example, `--max-age=3600` means to synchronize only files that have been modified within 1 hour.| +|`--min-age=DURATION` 1.2|Skip files whose last modification time is no more than `DURATION`, in seconds. For example, `--min-age=3600` means to synchronize only files whose last modification time is more than 1 hour from the current time.| |`--limit=-1`|Limit the number of objects that will be processed, default to -1 which means unlimited.| |`--update, -u`|Update existing files if the source files' `mtime` is newer, default to false.| |`--force-update, -f`|Always update existing file, default to false.| @@ -929,6 +891,7 @@ In which: |`--dirs`|Sync empty directories as well.| |`--perms`|Preserve permissions, default to false.| |`--links, -l`|Copy symlinks as symlinks default to false.| +|`--inplace` 1.2|When a file in the source path is modified, directly modify the file with the same name in the destination path instead of first writing a temporary file in the destination path and then atomically renaming the temporary file to the real file name. This option only makes sense when the `--update` option is enabled and the storage system of the destination path supports in-place modification of files (such as JuiceFS, HDFS, NFS). That is to say, if the storage system of the destination path is object storage, enable this option is invalid. (default: false)| |`--delete-src, --deleteSrc`|Delete objects that already exist in destination. Different from rsync, files won't be deleted at the first run, instead they will be deleted at the next run, after files are successfully copied to the destination.| |`--delete-dst, --deleteDst`|Delete extraneous objects from destination.| |`--check-all`|Verify the integrity of all files in source and destination, default to false. Comparison is done on byte streams, which comes at a performance cost.| @@ -948,10 +911,17 @@ In which: #### Cluster related options {#sync-cluster-related-options} -|Items| Description | -|-|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|Items| Description| +|-|-| |`--manager-addr=ADDR`| The listening address of the Manager node in distributed synchronization mode in the format: `:[port]`. If not specified, it listens on a random port. If this option is omitted, it listens on a random local IPv4 address and a random port. | -|`--worker=ADDR,ADDR`| Worker node addresses used in distributed syncing, comma separated. | +|`--worker=ADDR,ADDR`| Worker node addresses used in distributed syncing, comma separated. | + +#### Metrics related options {#sync-metircs-related-options} + +|Items|Description| +|-|-| +|`--metrics value` 1.2|address to export metrics (default: "127.0.0.1:9567")| +|`--consul value` 1.2|Consul address to register (default: "127.0.0.1:8500")| ### `juicefs clone` 1.1 {#clone} diff --git a/docs/en/reference/fuse_mount_options.md b/docs/en/reference/fuse_mount_options.md index 73256fced4d4..18a923ece09a 100644 --- a/docs/en/reference/fuse_mount_options.md +++ b/docs/en/reference/fuse_mount_options.md @@ -8,7 +8,7 @@ JuiceFS provides several access methods, FUSE is the common one, which is the wa This guide describes the common FUSE mount options for JuiceFS, with two ways to add mount options: -1. Run [`juicefs mount`](../reference/command_reference.md#mount), and use `-o` to specify multiple options separated by commas. +1. Run [`juicefs mount`](../reference/command_reference.mdx#mount), and use `-o` to specify multiple options separated by commas. ```bash juicefs mount -d -o allow_other,writeback_cache sqlite3://myjfs.db ~/jfs diff --git a/docs/en/reference/how_to_set_up_object_storage.md b/docs/en/reference/how_to_set_up_object_storage.md index d5b4e2c0a778..5f308a2f6f08 100644 --- a/docs/en/reference/how_to_set_up_object_storage.md +++ b/docs/en/reference/how_to_set_up_object_storage.md @@ -34,7 +34,7 @@ When executing the `juicefs format` or `juicefs mount` command, you can set some ## Enable data sharding {#enable-data-sharding} -When creating a file system, multiple buckets can be defined as the underlying storage of the file system through the [`--shards`](../reference/command_reference.md#format-data-format-options) option. In this way, the system will distribute the files to multiple buckets based on the hashed value of the file name. Data sharding technology can distribute the load of concurrent writing of large-scale data to multiple buckets, thereby improving the writing performance. +When creating a file system, multiple buckets can be defined as the underlying storage of the file system through the [`--shards`](../reference/command_reference.mdx#format-data-format-options) option. In this way, the system will distribute the files to multiple buckets based on the hashed value of the file name. Data sharding technology can distribute the load of concurrent writing of large-scale data to multiple buckets, thereby improving the writing performance. The following are points to note when using the data sharding function: @@ -116,7 +116,7 @@ Creating a file system using an internal Endpoint ensures better performance and ## Storage class 1.1 {#storage-class} -Object storage usually supports multiple storage classes, such as standard storage, infrequent access storage, and archive storage. Different storage classes will have different prices and availability, you can set the default storage class with the [`--storage-class`](../reference/command_reference.md#format-data-storage-options) option when creating the JuiceFS file system, or set a new storage class with the [`--storage-class`](../reference/command_reference.md#mount-data-storage-options) option when mounting the JuiceFS file system. Please refer to the user manual of the object storage you are using to see how to set the value of the `--storage-class` option (such as [Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass)). +Object storage usually supports multiple storage classes, such as standard storage, infrequent access storage, and archive storage. Different storage classes will have different prices and availability, you can set the default storage class with the [`--storage-class`](../reference/command_reference.mdx#format-data-storage-options) option when creating the JuiceFS file system, or set a new storage class with the [`--storage-class`](../reference/command_reference.mdx#mount-data-storage-options) option when mounting the JuiceFS file system. Please refer to the user manual of the object storage you are using to see how to set the value of the `--storage-class` option (such as [Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass)). :::note When using certain storage classes (such as archive and deep archive), the data cannot be accessed immediately, and the data needs to be restored in advance and accessed after a period of time. diff --git a/docs/en/reference/posix_compatibility.md b/docs/en/reference/posix_compatibility.md index 280b9e9a243a..a6bccf919c17 100644 --- a/docs/en/reference/posix_compatibility.md +++ b/docs/en/reference/posix_compatibility.md @@ -38,7 +38,7 @@ Besides the features covered by pjdfstest, JuiceFS provides: - POSIX traditional record locks (fcntl). :::note -POSIX record locks are classified as **traditional locks** ("process-associated") and **OFD locks** (Open file description locks), and their locking operation commands are `F_SETLK` and `F_OFD_SETLK` respectively. Due to the implementation of the FUSE kernel module, JuiceFS currently only supports traditional record locks. More details can be found at: . +POSIX record locks are classified as **traditional locks** ("process-associated") and **OFD locks** (Open file description locks), and their locking operation commands are `F_SETLK` and `F_OFD_SETLK` respectively. Due to the implementation of the FUSE kernel module, JuiceFS currently only supports traditional record locks. More details can be found at: [https://man7.org/linux/man-pages/man2/fcntl.2.html](https://man7.org/linux/man-pages/man2/fcntl.2.html). ::: ## LTP diff --git a/docs/en/security/trash.md b/docs/en/security/trash.md index b842c692520c..6deb1bb21f7f 100644 --- a/docs/en/security/trash.md +++ b/docs/en/security/trash.md @@ -11,7 +11,7 @@ JuiceFS enables the trash feature by default, files deleted will be moved in a h When using `juicefs format` command to initialize JuiceFS volume, users are allowed to specify `--trash-days ` to set the number of days which files are kept in the `.trash` directory. Within this period, user-removed files are not actually purged, so the file system usage shown in the output of `df` command will not decrease, and the blocks in the object storage will still exist. -To control the expiration settings, use the [`--trash-days`](../reference/command_reference.md#format) option which is available for both `juicefs format` and `juicefs config`: +To control the expiration settings, use the [`--trash-days`](../reference/command_reference.mdx#format) option which is available for both `juicefs format` and `juicefs config`: ```shell # Creating a new file system @@ -24,7 +24,7 @@ juicefs config META-URL --trash-days=7 juicefs config META-URL --trash-days=0 ``` -In addition, the automatic cleaning of the trash relies on the background job of the JuiceFS client. To ensure that the background job can be executed properly, at least one online mount point is required, and the [`--no-bgjob`](../reference/command_reference.md#mount) parameter should not be used when mounting the file system. +In addition, the automatic cleaning of the trash relies on the background job of the JuiceFS client. To ensure that the background job can be executed properly, at least one online mount point is required, and the [`--no-bgjob`](../reference/command_reference.mdx#mount-metadata-options) parameter should not be used when mounting the file system. ## Recover files {#recover} @@ -36,7 +36,7 @@ If you have found the desired files in Trash, you can recover them using `mv`: mv .trash/2022-11-30-10/[parent inode]-[file inode]-[file name] . ``` -Files within the Trash directory lost all their directory structure information, and are stored in a "flatten" style, however the parent directory inode is preserved in the file name, if you have forgotten the file name, look for parent directory inode using [`juicefs info`](../reference/command_reference.md#info), and then track down the desired files. +Files within the Trash directory lost all their directory structure information, and are stored in a "flatten" style, however the parent directory inode is preserved in the file name, if you have forgotten the file name, look for parent directory inode using [`juicefs info`](../reference/command_reference.mdx#info), and then track down the desired files. Assuming the mount point being `/jfs`, and you've accidentally deleted `/jfs/data/config.json`, but you cannot directly recover this `config.json` because you've forgotten its name, use the following procedure to locate the parent directory inode, and then locate the corresponding trash files. @@ -81,7 +81,7 @@ $ tree .trash/2023-08-14-05 └── 16-18-config.json ``` -To resolve such inconvenience, JuiceFS v1.1 provides the [`restore`](../reference/command_reference.md#restore) subcommand to quickly restore deleted files, while preserving its original directory structure. Run this procedure as follows: +To resolve such inconvenience, JuiceFS v1.1 provides the [`restore`](../reference/command_reference.mdx#restore) subcommand to quickly restore deleted files, while preserving its original directory structure. Run this procedure as follows: ```shell # Run the restore command to reconstruct directory structure within the Trash @@ -107,7 +107,7 @@ juicefs restore $META_URL 2023-08-14-05 --put-back When files in the trash directory reach their expiration time, they will be automatically cleaned up. It is important to note that the file cleaning is performed by the background job of the JuiceFS client, which is scheduled to run every hour by default. Therefore, when there are a large number of expired files, the cleaning speed of the object storage may not be as fast as expected, and it may take some time to see the change in storage capacity. -If you want to permanently delete files before their expiration time, you need to have `root` privileges and use [`juicefs rmr`](../reference/command_reference.md#rmr) or the system's built-in `rm` command to delete the files in the `.trash` directory, so that storage space can be immediately released. +If you want to permanently delete files before their expiration time, you need to have `root` privileges and use [`juicefs rmr`](../reference/command_reference.mdx#rmr) or the system's built-in `rm` command to delete the files in the `.trash` directory, so that storage space can be immediately released. For example, to permanently delete a directory in the trash: @@ -121,7 +121,7 @@ If you want to delete expired files more quickly, you can mount multiple mount p Apart from user deleted files, there's another type of data which also resides in Trash, which isn't directly visible from the `.trash` directory, they are stale slices created by file edits and overwrites. Read more in [How JuiceFS stores files](../introduction/architecture.md#how-juicefs-store-files). To sum up, if applications constantly delete or overwrite files, object storage usage will exceed file system usage. -Although stale slices cannot be browsed or manipulated, you can use [`juicefs status`](../reference/command_reference.md#status) to observe its scale: +Although stale slices cannot be browsed or manipulated, you can use [`juicefs status`](../reference/command_reference.mdx#status) to observe its scale: ```shell # The Trash Slices field displayed below is the number of stale slices diff --git a/docs/en/tutorials/juicefs_on_kubesphere.md b/docs/en/tutorials/juicefs_on_kubesphere.md index 2ba4d426e93a..159b31d7b6f0 100644 --- a/docs/en/tutorials/juicefs_on_kubesphere.md +++ b/docs/en/tutorials/juicefs_on_kubesphere.md @@ -39,7 +39,7 @@ Click in the workspace to enter the application management, select "App Reposito #### Method two: Application Template -Download the chart compressed package from the JuiceFS CSI Driver warehouse: . +Download the chart compressed package from the JuiceFS CSI Driver warehouse: [https://github.com/juicedata/juicefs-csi-driver/releases](https://github.com/juicedata/juicefs-csi-driver/releases). In the "Workspace", click to enter the "App Management", select "App Templates", click "create", upload the chart compression package: diff --git a/docs/zh_cn/administration/fault_diagnosis_and_analysis.md b/docs/zh_cn/administration/fault_diagnosis_and_analysis.md index dbe4dc507b47..ad5c781a9599 100644 --- a/docs/zh_cn/administration/fault_diagnosis_and_analysis.md +++ b/docs/zh_cn/administration/fault_diagnosis_and_analysis.md @@ -16,7 +16,7 @@ JuiceFS 客户端在运行过程中会输出日志用于故障诊断,日志等 ### 挂载点 -当挂载 JuiceFS 文件系统时加上了 [`-d` 选项](../reference/command_reference.md#mount)(表示后台运行),日志会同时输出到系统日志和本地日志文件,取决于挂载文件系统时的运行用户,本地日志文件的路径稍有区别。root 用户对应的日志文件路径是 `/var/log/juicefs.log`,非 root 用户的日志文件路径是 `$HOME/.juicefs/juicefs.log`,具体请参见 [`--log` 选项](../reference/command_reference.md#mount)。 +当挂载 JuiceFS 文件系统时加上了 [`-d` 选项](../reference/command_reference.mdx#mount)(表示后台运行),日志会同时输出到系统日志和本地日志文件,取决于挂载文件系统时的运行用户,本地日志文件的路径稍有区别。root 用户对应的日志文件路径是 `/var/log/juicefs.log`,非 root 用户的日志文件路径是 `$HOME/.juicefs/juicefs.log`,具体请参见 [`--log` 选项](../reference/command_reference.mdx#mount)。 取决于你使用的操作系统,你可以通过不同的命令获取系统日志或直接读取本地日志文件: @@ -118,7 +118,7 @@ kubectl -n kube-system exec juicefs-1.2.3.4-pvc-d4b8fb4f-2c0b-48e8-a2dc-53079943 ### S3 网关 -需要在启动 S3 网关时新增 [`--access-log` 选项](../reference/command_reference.md#gateway),指定访问日志输出的路径,默认 S3 网关不输出访问日志。 +需要在启动 S3 网关时新增 [`--access-log` 选项](../reference/command_reference.mdx#gateway),指定访问日志输出的路径,默认 S3 网关不输出访问日志。 ### Hadoop Java SDK @@ -175,7 +175,7 @@ JuiceFS 客户端提供 `profile` 和 `stats` 两个子命令来对性能数据 ### `juicefs profile` {#profile} -[`juicefs profile`](../reference/command_reference.md#profile) 会对[「文件系统访问日志」](#access-log)进行汇总,运行 `juicefs profile MOUNTPOINT` 命令,便能看到根据最新访问日志获取的各个文件系统操作的实时统计信息: +[`juicefs profile`](../reference/command_reference.mdx#profile) 会对[「文件系统访问日志」](#access-log)进行汇总,运行 `juicefs profile MOUNTPOINT` 命令,便能看到根据最新访问日志获取的各个文件系统操作的实时统计信息: ![JuiceFS-profiling](../images/juicefs-profiling.gif) @@ -199,7 +199,7 @@ juicefs profile /tmp/juicefs.accesslog --uid 12345 ### `juicefs stats` {#stats} -[`juicefs stats`](../reference/command_reference.md#stats) 命令通过读取 JuiceFS 客户端的监控数据,以类似 Linux `dstat` 工具的形式实时打印各个指标的每秒变化情况: +[`juicefs stats`](../reference/command_reference.mdx#stats) 命令通过读取 JuiceFS 客户端的监控数据,以类似 Linux `dstat` 工具的形式实时打印各个指标的每秒变化情况: ![juicefs_stats_watcher](../images/juicefs_stats_watcher.png) @@ -209,7 +209,7 @@ juicefs profile /tmp/juicefs.accesslog --uid 12345 - `cpu`:进程的 CPU 使用率。 - `mem`:进程的物理内存使用量。 -- `buf`:进程已使用的[读写缓冲区](../guide/cache.md#buffer-size)大小,如果该数值逼近甚至超过客户端所设置的 [`--buffer-size`](../reference/command_reference.md#mount),说明读写缓冲区空间不足,需要视情况扩大,或者降低应用读写负载。 +- `buf`:进程已使用的[读写缓冲区](../guide/cache.md#buffer-size)大小,如果该数值逼近甚至超过客户端所设置的 [`--buffer-size`](../reference/command_reference.mdx#mount-data-cache-options),说明读写缓冲区空间不足,需要视情况扩大,或者降低应用读写负载。 - `cache`:内部指标,无需关注。 #### `fuse` @@ -274,7 +274,7 @@ curl 'http://localhost:/debug/pprof/heap' > juicefs.heap.pb.gz juicefs debug /mnt/jfs ``` -关于 `juicefs debug` 命令的更多信息,请查看[命令参考](../reference/command_reference.md#debug)。 +关于 `juicefs debug` 命令的更多信息,请查看[命令参考](../reference/command_reference.mdx#debug)。 ::: 如果你安装了 `go` 命令,那么可以通过 `go tool pprof` 命令直接分析,例如分析 CPU 性能统计: diff --git a/docs/zh_cn/administration/metadata/redis_best_practices.md b/docs/zh_cn/administration/metadata/redis_best_practices.md index 6e1cb9cd7536..520e66512bed 100644 --- a/docs/zh_cn/administration/metadata/redis_best_practices.md +++ b/docs/zh_cn/administration/metadata/redis_best_practices.md @@ -27,7 +27,7 @@ used_memory_dataset: 13439673592 used_memory_dataset_perc: 70.12% ``` -其中 `used_memory_rss` 是 Redis 实际使用的总内存大小,这里既包含了存储在 Redis 中的数据大小(也就是上面的 `used_memory_dataset`),也包含了一些 Redis 的[系统开销](https://redis.io/commands/memory-stats)(也就是上面的 `used_memory_overhead`)。前面提到每个文件的元数据大约占用 300 字节是通过 `used_memory_dataset` 来计算的,如果你发现你的 JuiceFS 文件系统中单个文件元数据占用空间远大于 300 字节,可以尝试运行 [`juicefs gc`](../../reference/command_reference.md#gc) 命令来清理可能存在的冗余数据。 +其中 `used_memory_rss` 是 Redis 实际使用的总内存大小,这里既包含了存储在 Redis 中的数据大小(也就是上面的 `used_memory_dataset`),也包含了一些 Redis 的[系统开销](https://redis.io/commands/memory-stats)(也就是上面的 `used_memory_overhead`)。前面提到每个文件的元数据大约占用 300 字节是通过 `used_memory_dataset` 来计算的,如果你发现你的 JuiceFS 文件系统中单个文件元数据占用空间远大于 300 字节,可以尝试运行 [`juicefs gc`](../../reference/command_reference.mdx#gc) 命令来清理可能存在的冗余数据。 ## 数据可用性 @@ -124,7 +124,7 @@ Redis 对数据备份非常友好,因为您可以在数据库运行时复制 R 如果 AOF 和 RDB 同时开启,Redis 启动时会优先使用 AOF 文件来恢复数据,因为 AOF 保证是最完整的数据。 -在恢复完 Redis 数据以后,可以继续通过新的 Redis 地址使用 JuiceFS 文件系统。建议运行 [`juicefs fsck`](../../reference/command_reference.md#fsck) 命令检查文件系统数据的完整性。 +在恢复完 Redis 数据以后,可以继续通过新的 Redis 地址使用 JuiceFS 文件系统。建议运行 [`juicefs fsck`](../../reference/command_reference.mdx#fsck) 命令检查文件系统数据的完整性。 ## 推荐的 Redis 托管服务 diff --git a/docs/zh_cn/administration/metadata_dump_load.md b/docs/zh_cn/administration/metadata_dump_load.md index 973c20c4e666..e2eb02b965c0 100644 --- a/docs/zh_cn/administration/metadata_dump_load.md +++ b/docs/zh_cn/administration/metadata_dump_load.md @@ -10,7 +10,7 @@ slug: /metadata_dump_load - JuiceFS v1.0.4 开始支持通过 `load` 命令恢复加密的元数据备份 ::: -JuiceFS 支持[多种元数据引擎](../reference/how_to_set_up_metadata_engine.md),且各引擎内部的数据管理格式各有不同。为了便于管理,JuiceFS 提供了 [`dump`](../reference/command_reference.md#dump) 命令允许将所有元数据以统一格式写入到 JSON 文件进行备份。同时,JuiceFS 也提供了 [`load`](../reference/command_reference.md#load) 命令,允许将备份恢复或迁移到任意元数据存储引擎。这个导出导入流程也可以用来将 JuiceFS 社区版文件系统迁移到企业版(参考[企业版文档](https://juicefs.com/docs/zh/cloud/metadata_dump_load)),反之亦然。 +JuiceFS 支持[多种元数据引擎](../reference/how_to_set_up_metadata_engine.md),且各引擎内部的数据管理格式各有不同。为了便于管理,JuiceFS 提供了 [`dump`](../reference/command_reference.mdx#dump) 命令允许将所有元数据以统一格式写入到 JSON 文件进行备份。同时,JuiceFS 也提供了 [`load`](../reference/command_reference.mdx#load) 命令,允许将备份恢复或迁移到任意元数据存储引擎。这个导出导入流程也可以用来将 JuiceFS 社区版文件系统迁移到企业版(参考[企业版文档](https://juicefs.com/docs/zh/cloud/metadata_dump_load)),反之亦然。 ## 元数据备份 {#backup} @@ -81,7 +81,7 @@ JuiceFS 会按照以下规则定期清理备份: ## 元数据恢复与迁移 {#recovery-and-migration} -使用 [`load`](../reference/command_reference.md#load) 命令可以将 `dump` 命令导出的元数据恢复到一个空数据库中,比如: +使用 [`load`](../reference/command_reference.mdx#load) 命令可以将 `dump` 命令导出的元数据恢复到一个空数据库中,比如: ```shell juicefs load redis://192.168.1.6:6379/1 meta-dump.json @@ -111,7 +111,7 @@ juicefs load redis://192.168.1.6:6379/1 meta-dump.json juicefs dump redis://192.168.1.6:6379/1 | juicefs load mysql://user:password@(192.168.1.6:3306)/juicefs ``` -需要注意的是,由于 `dump` 导出的备份中默认排除了对象存储的 API 访问密钥,不论恢复还是迁移元数据,完成操作后都需要使用 [`juicefs config`](../reference/command_reference.md#config) 命令把文件系统关联的对象存储的认证信息再添加回去,例如: +需要注意的是,由于 `dump` 导出的备份中默认排除了对象存储的 API 访问密钥,不论恢复还是迁移元数据,完成操作后都需要使用 [`juicefs config`](../reference/command_reference.mdx#config) 命令把文件系统关联的对象存储的认证信息再添加回去,例如: ```shell juicefs config --secret-key xxxxx mysql://user:password@(192.168.1.6:3306)/juicefs diff --git a/docs/zh_cn/administration/monitoring.md b/docs/zh_cn/administration/monitoring.md index 16a50c01bdcd..02932938c3f4 100644 --- a/docs/zh_cn/administration/monitoring.md +++ b/docs/zh_cn/administration/monitoring.md @@ -90,7 +90,7 @@ Grafana 仪表盘如下图: ### FUSE 挂载 {#mount-point} -当通过 [`juicefs mount`](../reference/command_reference.md#mount) 命令挂载 JuiceFS 文件系统后,可以通过 `http://localhost:9567/metrics` 这个地址收集监控指标,你也可以通过 `--metrics` 选项自定义。如: +当通过 [`juicefs mount`](../reference/command_reference.mdx#mount) 命令挂载 JuiceFS 文件系统后,可以通过 `http://localhost:9567/metrics` 这个地址收集监控指标,你也可以通过 `--metrics` 选项自定义。如: ```shell juicefs mount --metrics localhost:9567 ... diff --git a/docs/zh_cn/administration/troubleshooting.md b/docs/zh_cn/administration/troubleshooting.md index ff6602e81962..149957b02fb7 100644 --- a/docs/zh_cn/administration/troubleshooting.md +++ b/docs/zh_cn/administration/troubleshooting.md @@ -98,8 +98,8 @@ $ ls -l /usr/bin/fusermount 首先,在网速慢的时候,JuiceFS 客户端上传/下载文件容易超时(类似上方的错误日志),这种情况下可以考虑: -* 降低上传并发度,比如 [`--max-uploads=1`](../reference/command_reference.md#mount),避免上传超时。 -* 降低读写缓冲区大小,比如 [`--buffer-size=64`](../reference/command_reference.md#mount) 或者更小。当带宽充裕时,增大读写缓冲区能提升并发性能。但在低带宽场景下使用过大的读写缓冲区,`flush` 的上传时间会很长,因此容易超时。 +* 降低上传并发度,比如 [`--max-uploads=1`](../reference/command_reference.mdx#mount-data-storage-options),避免上传超时。 +* 降低读写缓冲区大小,比如 [`--buffer-size=64`](../reference/command_reference.mdx#mount-data-cache-options) 或者更小。当带宽充裕时,增大读写缓冲区能提升并发性能。但在低带宽场景下使用过大的读写缓冲区,`flush` 的上传时间会很长,因此容易超时。 * 默认 GET/PUT 请求超时时间为 60 秒,因此增大 `--get-timeout` 以及 `--put-timeout`,可以改善读写超时的情况。 此外,低带宽环境下需要慎用[「客户端写缓存」](../guide/cache.md#client-write-cache)特性。先简单介绍一下 JuiceFS 的后台任务设计:每个 JuiceFS 客户端默认都启用后台任务,后台任务中会执行碎片合并(compaction)、异步删除等工作,而如果节点网络状况太差,则会降低系统整体性能。更糟的是如果该节点还启用了客户端写缓存,则容易出现碎片合并后上传缓慢,导致其他节点无法读取该文件的危险情况: @@ -111,7 +111,7 @@ $ ls -l /usr/bin/fusermount : fail to read sliceId 1771585458 (off:4194304, size:4194304, clen: 37746372): get chunks/0/0/1_0_4194304: oss: service returned error: StatusCode=404, ErrorCode=NoSuchKey, ErrorMessage="The specified key does not exist.", RequestId=62E8FB058C0B5C3134CB80B6 ``` -为了避免此类问题,我们推荐在低带宽节点上禁用后台任务,也就是为挂载命令添加 [`--no-bgjob`](../reference/command_reference.md#mount) 参数。 +为了避免此类问题,我们推荐在低带宽节点上禁用后台任务,也就是为挂载命令添加 [`--no-bgjob`](../reference/command_reference.mdx#mount-metadata-options) 参数。 ### 警告日志:找不到对象存储块 {#warning-log-block-not-found-in-object-storage} diff --git a/docs/zh_cn/benchmark/performance_evaluation_guide.md b/docs/zh_cn/benchmark/performance_evaluation_guide.md index 7b3dbcfa6f9d..de8771a8c79c 100644 --- a/docs/zh_cn/benchmark/performance_evaluation_guide.md +++ b/docs/zh_cn/benchmark/performance_evaluation_guide.md @@ -34,7 +34,7 @@ JuiceFS v1.0+ 默认启用了回收站,基准测试会在文件系统中创建 ### `juicefs bench` -[`juicefs bench`](../reference/command_reference.md#bench) 命令可以帮助你快速完成单机性能测试,通过测试结果判断环境配置和性能表现是否正常。假设你已经把 JuiceFS 挂载到了测试机器的 `/mnt/jfs` 位置(如果在 JuiceFS 初始化、挂载方面需要帮助,请参考[创建文件系统](../getting-started/standalone.md#juicefs-format)),执行以下命令即可(推荐 `-p` 参数设置为测试机器的 CPU 核数): +[`juicefs bench`](../reference/command_reference.mdx#bench) 命令可以帮助你快速完成单机性能测试,通过测试结果判断环境配置和性能表现是否正常。假设你已经把 JuiceFS 挂载到了测试机器的 `/mnt/jfs` 位置(如果在 JuiceFS 初始化、挂载方面需要帮助,请参考[创建文件系统](../getting-started/standalone.md#juicefs-format)),执行以下命令即可(推荐 `-p` 参数设置为测试机器的 CPU 核数): ```bash juicefs bench /mnt/jfs -p 4 @@ -79,7 +79,7 @@ Amazon EFS 的性能与容量线性相关([参考官方文档](https://docs.aw ### `juicefs objbench` -[`juicefs objbench`](../reference/command_reference.md#objbench) 命令可以运行一些关于对象存储的测试,用以评估其作为 JuiceFS 的后端存储时的运行情况。以测试 Amazon S3 为例: +[`juicefs objbench`](../reference/command_reference.mdx#objbench) 命令可以运行一些关于对象存储的测试,用以评估其作为 JuiceFS 的后端存储时的运行情况。以测试 Amazon S3 为例: ```bash juicefs objbench \ diff --git a/docs/zh_cn/deployment/production_deployment_recommendations.md b/docs/zh_cn/deployment/production_deployment_recommendations.md index 5bb31de332a2..3520c1b4ef6e 100644 --- a/docs/zh_cn/deployment/production_deployment_recommendations.md +++ b/docs/zh_cn/deployment/production_deployment_recommendations.md @@ -90,4 +90,4 @@ logrotate -d /etc/logrotate.d/juicefs ## 命令行自动补全 -JuiceFS 为 Bash 和 Zsh 提供了命令行自动补全脚本,方便在命令行中使用 `juicefs` 命令,具体请参考[文档](../reference/command_reference.md#auto-completion)。 +JuiceFS 为 Bash 和 Zsh 提供了命令行自动补全脚本,方便在命令行中使用 `juicefs` 命令,具体请参考[文档](../reference/command_reference.mdx#auto-completion)。 diff --git a/docs/zh_cn/deployment/s3_gateway.md b/docs/zh_cn/deployment/s3_gateway.md index 5f25ebbb3840..d299e5d92ab9 100644 --- a/docs/zh_cn/deployment/s3_gateway.md +++ b/docs/zh_cn/deployment/s3_gateway.md @@ -32,7 +32,7 @@ juicefs gateway redis://localhost:6379 localhost:9000 以上三条命令中,前两条命令用于设置环境变量。注意,`MINIO_ROOT_USER` 的长度至少 3 个字符, `MINIO_ROOT_PASSWORD` 的长度至少 8 个字符(Windows 用户请改用 `set` 命令设置环境变量,例如:`set MINIO_ROOT_USER=admin`)。 -最后一条命令用于启用 S3 网关,`gateway` 子命令至少需要提供两个参数,第一个是存储元数据的数据库 URL,第二个是 S3 网关监听的地址和端口。你可以根据需要在 `gateway` 子命令中添加[其他选项](../reference/command_reference.md#gateway)优化 S3 网关,比如,可以将默认的本地缓存设置为 20 GiB。 +最后一条命令用于启用 S3 网关,`gateway` 子命令至少需要提供两个参数,第一个是存储元数据的数据库 URL,第二个是 S3 网关监听的地址和端口。你可以根据需要在 `gateway` 子命令中添加[其他选项](../reference/command_reference.mdx#gateway)优化 S3 网关,比如,可以将默认的本地缓存设置为 20 GiB。 ```shell juicefs gateway --cache-size 20480 redis://localhost:6379 localhost:9000 @@ -140,7 +140,7 @@ kubectl -n ${NAMESPACE} create secret generic juicefs-secret \ - 默认使用 `juicedata/juicefs-csi-driver` 最新版镜像,其中已经集成了最新版 JuiceFS 客户端,具体集成的 JuiceFS 客户端版本请查看[这里](https://github.com/juicedata/juicefs-csi-driver/releases)。 - `Deployment` 的 `initContainers` 会先尝试格式化 JuiceFS 文件系统,如果你已经提前格式化完毕,这一步不会影响现有 JuiceFS 文件系统。 - S3 网关默认监听的端口号为 9000 -- S3 网关[启动选项](../reference/command_reference.md#gateway)均为默认值,请根据实际需求调整。 +- S3 网关[启动选项](../reference/command_reference.mdx#gateway)均为默认值,请根据实际需求调整。 - `MINIO_ROOT_USER` 环境变量的值为 Secret 中的 `access-key`,`MINIO_ROOT_PASSWORD` 环境变量的值为 Secret 中的 `secret-key`。 ```shell diff --git a/docs/zh_cn/development/internals.md b/docs/zh_cn/development/internals.md index 1dfd05179ca5..3fc7318e0259 100644 --- a/docs/zh_cn/development/internals.md +++ b/docs/zh_cn/development/internals.md @@ -181,7 +181,7 @@ type Attr struct { 其中几个需要说明的字段: -- Atime/Atimensec:参考 [`--atime-mode`](../reference/command_reference.md#mount) +- Atime/Atimensec:参考 [`--atime-mode`](../reference/command_reference.mdx#mount-metadata-options) - Nlink: - 目录文件:初始值为 2('.' 和 '..'),每有一个子目录 Nlink 值加 1 - 其他文件:初始值为 1,每创建一个硬链接 Nlink 值加 1 diff --git a/docs/zh_cn/faq.md b/docs/zh_cn/faq.md index 43bd2483c4a3..5417b0ccde09 100644 --- a/docs/zh_cn/faq.md +++ b/docs/zh_cn/faq.md @@ -47,14 +47,14 @@ slug: /faq 第一个原因是你可能开启了回收站特性。为了保证数据安全回收站默认开启,删除的文件其实被放到了回收站,实际并没有被删除,所以对象存储大小不会变化。回收站的保留时间可以通过 `juicefs format` 指定或者通过 `juicefs config` 修改。请参考[「回收站」](security/trash.md)文档了解更多信息。 -第二个原因是 JuiceFS 是异步删除对象存储中的数据,所以对象存储的空间变化会慢一点。如果你需要立即清理对象存储中需要被删除的数据,可以尝试运行 [`juicefs gc`](reference/command_reference.md#gc) 命令。 +第二个原因是 JuiceFS 是异步删除对象存储中的数据,所以对象存储的空间变化会慢一点。如果你需要立即清理对象存储中需要被删除的数据,可以尝试运行 [`juicefs gc`](reference/command_reference.mdx#gc) 命令。 ### 为什么文件系统数据量与对象存储占用空间存在差异? {#size-inconsistency} -* [JuiceFS 随机写](#random-write)会产生文件碎片,因此对象存储的占用空间大部分情况下是大于等于实际大小的,尤其是短时间内进行大量的覆盖写产生许多文件碎片后,这些碎片仍旧占用着对象存储的空间。不过也不必担心,因为在每次读/写文件的时候都会检查,并在后台任务进行该文件相关碎片的整理工作。你可以通过 [`juicefs gc —-compact -—delete`](./reference/command_reference.md#gc) 命令手动触发合并与回收。 +* [JuiceFS 随机写](#random-write)会产生文件碎片,因此对象存储的占用空间大部分情况下是大于等于实际大小的,尤其是短时间内进行大量的覆盖写产生许多文件碎片后,这些碎片仍旧占用着对象存储的空间。不过也不必担心,因为在每次读/写文件的时候都会检查,并在后台任务进行该文件相关碎片的整理工作。你可以通过 [`juicefs gc —-compact -—delete`](./reference/command_reference.mdx#gc) 命令手动触发合并与回收。 * 如果开启了[「回收站」](./security/trash.md)功能,被删除的文件不会立刻清理,而是在回收站内保留指定时间后,才进行清理删除。 * 碎片被合并以后,失效的旧碎片也会在回收站中进行保留(但对用户不可见),过期时间也遵循回收站的设置。如果想要清理这些碎片,阅读[回收站和文件碎片](./security/trash.md#gc)。 -* 如果文件系统开启了压缩功能(也就是 [`format`](./reference/command_reference.md#format) 命令的 `--compress` 参数,默认不开启),那么对象存储上存储的对象有可能比实际文件大小更小(取决于不同类型文件的压缩比)。 +* 如果文件系统开启了压缩功能(也就是 [`format`](./reference/command_reference.mdx#format) 命令的 `--compress` 参数,默认不开启),那么对象存储上存储的对象有可能比实际文件大小更小(取决于不同类型文件的压缩比)。 * 根据所使用对象存储的[存储类型](reference/how_to_set_up_object_storage.md#storage-class)不同,云服务商可能会针对某些存储类型设置最小计量单位。例如阿里云 OSS 低频访问存储的[最小计量单位](https://help.aliyun.com/document_detail/173534.html)是 64KB,如果单个文件小于 64KB 也会按照 64KB 计算。 * 对于自建对象存储,例如 MinIO,实际占用大小也受到[存储级别](https://github.com/minio/minio/blob/master/docs/erasure/storage-class/README.md)设置的影响。 @@ -68,7 +68,7 @@ slug: /faq ### 一个文件系统可以绑定多个不同的对象存储吗(比如同时用 Amazon S3、GCS 和 OSS 组成一个文件系统)? -不支持。但在创建文件系统时可以设定关联同一个对象存储的多个 bucket,从而解决单个 bucket 对象数量限制的问题,例如,可以为一个文件系统关联多个 S3 Bucket。具体请参考 [`--shards`](./reference/command_reference.md#format) 选项的说明。 +不支持。但在创建文件系统时可以设定关联同一个对象存储的多个 bucket,从而解决单个 bucket 对象数量限制的问题,例如,可以为一个文件系统关联多个 S3 Bucket。具体请参考 [`--shards`](./reference/command_reference.mdx#format) 选项的说明。 ## 性能相关问题 @@ -80,7 +80,7 @@ JuiceFS 内置多级缓存(主动失效),一旦缓存预热好,访问的 ### JuiceFS 支持随机读写吗?原理如何? {#random-write} -支持,包括通过 mmap 等进行的随机读写。目前 JuiceFS 主要是对顺序读写进行了大量优化,对随机读写的优化也在进行中。如果想要更好的随机读性能,建议关闭压缩([`--compress none`](reference/command_reference.md#format))。 +支持,包括通过 mmap 等进行的随机读写。目前 JuiceFS 主要是对顺序读写进行了大量优化,对随机读写的优化也在进行中。如果想要更好的随机读性能,建议关闭压缩([`--compress none`](reference/command_reference.mdx#format))。 JuiceFS 不将原始文件存入对象存储,而是将其按照某个大小(默认为 4MiB)拆分为 N 个数据块(Block)后,上传到对象存储,然后将数据块的 ID 存入元数据引擎。随机写的时候,逻辑上是要覆盖原本的内容,实际上是把**要覆盖的数据块**的元数据标记为旧数据,同时只上传随机写时产生的**新数据块**到对象存储,并将**新数据块**对应的元数据更新到元数据引擎中。 @@ -90,7 +90,7 @@ JuiceFS 不将原始文件存入对象存储,而是将其按照某个大小( ### 怎么快速地拷贝大量小文件到 JuiceFS? -请在挂载时加上 [`--writeback` 选项](reference/command_reference.md#mount),它会先把数据写入本机的缓存,然后再异步上传到对象存储,会比直接上传到对象存储快很多倍。 +请在挂载时加上 [`--writeback` 选项](reference/command_reference.mdx#mount-data-cache-options),它会先把数据写入本机的缓存,然后再异步上传到对象存储,会比直接上传到对象存储快很多倍。 请查看[「客户端写缓存」](guide/cache.md#client-write-cache)了解更多信息。 diff --git a/docs/zh_cn/getting-started/standalone.md b/docs/zh_cn/getting-started/standalone.md index ec7086f1d684..2df7977eeeab 100644 --- a/docs/zh_cn/getting-started/standalone.md +++ b/docs/zh_cn/getting-started/standalone.md @@ -23,7 +23,7 @@ curl -sSL https://d.juicefs.com/install | sh - ### 基本概念 -创建文件系统使用客户端提供的 [`format`](../reference/command_reference.md#format) 命令,一般格式为: +创建文件系统使用客户端提供的 [`format`](../reference/command_reference.mdx#format) 命令,一般格式为: ```shell juicefs format [command options] META-URL NAME @@ -67,7 +67,7 @@ juicefs format sqlite3://myjfs.db myjfs ### 基本概念 -挂载文件系统使用客户端提供的 [`mount`](../reference/command_reference.md#mount) 命令,一般格式为: +挂载文件系统使用客户端提供的 [`mount`](../reference/command_reference.mdx#mount) 命令,一般格式为: ```shell juicefs mount [command options] META-URL MOUNTPOINT diff --git a/docs/zh_cn/guide/cache.md b/docs/zh_cn/guide/cache.md index 6950e605ed20..68042852bc7c 100644 --- a/docs/zh_cn/guide/cache.md +++ b/docs/zh_cn/guide/cache.md @@ -21,7 +21,7 @@ JuiceFS 提供包括元数据缓存、数据读写缓存等多种缓存机制。 对于对象存储,JuiceFS 将文件分成一个个数据块(默认 4MiB),赋予唯一 ID 并上传至对象存储服务。文件的任何修改操作都将生成新的数据块,原有块保持不变,所以不用担心数据缓存的一致性问题,因为一旦文件被修改过了,JuiceFS 会从对象存储读取新的数据块。而老的失效数据块,也会随着[回收站](../security/trash.md)或碎片合并机制被删除,避免对象存储泄露。 -[本地数据缓存](#client-read-cache)缓存也是以对象存储的数据块做为最小单元。一旦文件数据被下载到缓存盘,一致性就和缓存盘可靠性相关,如果磁盘数据发生了篡改,客户端也会读取到错误的数据。对于这种担忧,可以配置合适的 [`--verify-cache-checksum`](../reference/command_reference.md#mount) 策略,确保缓存盘数据完整性。 +[本地数据缓存](#client-read-cache)缓存也是以对象存储的数据块做为最小单元。一旦文件数据被下载到缓存盘,一致性就和缓存盘可靠性相关,如果磁盘数据发生了篡改,客户端也会读取到错误的数据。对于这种担忧,可以配置合适的 [`--verify-cache-checksum`](../reference/command_reference.mdx#mount-data-cache-options) 策略,确保缓存盘数据完整性。 ## 元数据缓存 {#metadata-cache} @@ -54,7 +54,7 @@ JuiceFS 客户端在 `open` 操作即打开一个文件时,其文件属性会 为保证「关闭再打开(close-to-open)」一致性,`open` 操作默认需要直接访问元数据引擎,不会利用缓存。也就是说,客户端 A 的修改在客户端 B 不一定能立即看到。但是,一旦这个文件在 A 写入完成并关闭,之后在任何一个客户端重新打开该文件都可以保证能访问到最新写入的数据,不论是否在同一个节点。文件的属性缓存也不一定要通过 `open` 操作建立,比如 `tail -f` 会不断查询文件属性,在这种情况下无需重新打开文件,也能获得最新文件变动。 -如果要利用上客户端内存的元数据缓存,需要设置 [`--open-cache`](../reference/command_reference.md#mount),指定缓存的有效时长。在缓存有效期间执行的 `getattr` 和 `open` 操作会从内存缓存中立即返回 slice 信息。有了这些信息,就能省去每次打开文件都重新访问元数据服务的开销。 +如果要利用上客户端内存的元数据缓存,需要设置 [`--open-cache`](../reference/command_reference.mdx#mount-metadata-cache-options),指定缓存的有效时长。在缓存有效期间执行的 `getattr` 和 `open` 操作会从内存缓存中立即返回 slice 信息。有了这些信息,就能省去每次打开文件都重新访问元数据服务的开销。 使用 `--open-cache` 选项设置了缓存时间以后,文件系统就不再满足 close-to-open 一致性了,不过与内核元数据类似,发起修改的客户端同样能享受到客户端内存元数据缓存主动失效,其他客户端就只能等待缓存自然过期。因此为了保证文件系统语义,`--open-cache` 默认关闭。如果文件很少发生修改,或者只读场景下(例如 AI 模型训练),则推荐根据情况设置 `--open-cache`,进一步提高读性能。 @@ -72,7 +72,7 @@ JuiceFS 客户端在 `open` 操作即打开一个文件时,其文件属性会 ## 读写缓冲区 {#buffer-size} -读写缓冲区是分配给 JuiceFS 客户端进程的一块内存,通过 [`--buffer-size`](../reference/command_reference.md#mount) 控制着大小,默认 300(单位 MiB)。读和写产生的数据,都会途经这个缓冲区。所以缓冲区的作用非常重要,在大规模场景下遇到性能不足时,提升缓冲区大小也是常见的优化方式。 +读写缓冲区是分配给 JuiceFS 客户端进程的一块内存,通过 [`--buffer-size`](../reference/command_reference.mdx#mount-data-cache-options) 控制着大小,默认 300(单位 MiB)。读和写产生的数据,都会途经这个缓冲区。所以缓冲区的作用非常重要,在大规模场景下遇到性能不足时,提升缓冲区大小也是常见的优化方式。 ### 预读和预取 {#readahead-prefetch} @@ -88,7 +88,7 @@ JuiceFS 客户端在 `open` 操作即打开一个文件时,其文件属性会 ![prefetch](../images/buffer-prefetch.svg) -预取的设计是基于「假如文件的某一小段被应用读取,那么文件附近的区域也很可能会被读取」的假设,对于不同的应用场景,这样的假设未必成立——如果应用对大文件进行偏移极大的、稀疏的随机读,那么不难想象,prefetch 会带来明显的读放大。因此如果你已经对应用场景的读取模式有深入了解,确认并不需要 prefetch,可以通过 [`--prefetch=0`](../reference/command_reference.md#mount-data-cache-options) 禁用该行为。 +预取的设计是基于「假如文件的某一小段被应用读取,那么文件附近的区域也很可能会被读取」的假设,对于不同的应用场景,这样的假设未必成立——如果应用对大文件进行偏移极大的、稀疏的随机读,那么不难想象,prefetch 会带来明显的读放大。因此如果你已经对应用场景的读取模式有深入了解,确认并不需要 prefetch,可以通过 [`--prefetch=0`](../reference/command_reference.mdx#mount-data-cache-options) 禁用该行为。 预读和预取分别优化了顺序读、随机读性能,也会带来一定程度的读放大,阅读[「读放大」](../administration/troubleshooting.md#read-amplification)了解更多信息。 @@ -114,9 +114,7 @@ JuiceFS 客户端在 `open` 操作即打开一个文件时,其文件属性会 如果希望增加顺序读速度,可以增加 `--buffer-size`,来放大预读窗口,窗口内尚未下载到本地的数据块,会并发地异步下载。同时注意,单个文件的预读不会把整个缓冲区用完,限制为 1/4 到 1/2。因此如果在优化单个大文件的顺序读时发现 `juicefs stats` 中 `buf` 用量已经接近一半,说明该文件的预读额度已满,此时虽然缓冲区还有空闲,但也需要继续增加 `--buffer-size` 才能进一步提升单个大文件的预读性能。 -挂载参数 [`--buffer-size`](../reference/command_reference.md#mount-data-storage-options) 控制着 JuiceFS 的读写缓冲区大小,默认 300(单位 MiB)。读写缓冲区的大小决定了读取文件以及预读(readahead)的内存数据量,同时也控制着写缓存(pending page)的大小。因此在面对高并发读写场景的时候,我们推荐对 `--buffer-size` 进行相应的扩容,能有效提升性能。 - -如果你希望增加写入速度,通过调整 [`--max-uploads`](../reference/command_reference.md#mount-data-storage-options) 增大了上传并发度,但并没有观察到上行带宽用量有明显增加,那么此时可能就需要相应地调大 `--buffer-size`,让并发线程更容易申请到内存来工作。这个排查原理反之亦然:如果增大 `--buffer-size` 却没有观察到上行带宽占用提升,也可以考虑增大 `--max-uploads` 来提升上传并发度。 +如果你希望增加写入速度,通过调整 [`--max-uploads`](../reference/command_reference.mdx#mount-data-storage-options) 增大了上传并发度,但并没有观察到上行带宽用量有明显增加,那么此时可能就需要相应地调大 `--buffer-size`,让并发线程更容易申请到内存来工作。这个排查原理反之亦然:如果增大 `--buffer-size` 却没有观察到上行带宽占用提升,也可以考虑增大 `--max-uploads` 来提升上传并发度。 可想而知,`--buffer-size` 也控制着每次 `flush` 操作的上传数据量大小,因此如果客户端处在一个低带宽的网络环境下,可能反而需要降低 `--buffer-size` 来避免 `flush` 超时。关于低带宽场景排查请详见[「与对象存储通信不畅」](../administration/troubleshooting.md#io-error-object-storage)。 @@ -144,11 +142,11 @@ JuiceFS 客户端会跟踪所有最近被打开的文件,要重复打开相同 客户端会根据应用读数据的模式,自动做预读和缓存操作以提高顺序读的性能。数据会缓存到本地文件系统中,可以是基于硬盘、SSD 或者内存的任意本地文件系统。 -JuiceFS 客户端会把从对象存储下载的数据,以及新上传的小于 1 个 block 大小的数据写入到缓存目录中,不做压缩和加密。如果希望保证应用程序首次访问数据的时候就能获得已缓存的性能,可以使用 [`juicefs warmup`](../reference/command_reference.md#warmup) 命令来对缓存数据进行预热。 +JuiceFS 客户端会把从对象存储下载的数据,以及新上传的小于 1 个 block 大小的数据写入到缓存目录中,不做压缩和加密。如果希望保证应用程序首次访问数据的时候就能获得已缓存的性能,可以使用 [`juicefs warmup`](../reference/command_reference.mdx#warmup) 命令来对缓存数据进行预热。 如果缓存目录所在的文件系统无法正常工作时 JuiceFS 客户端能立刻返回错误,并降级成直接访问对象存储。这对于本地盘而言通常是成立的,但如果缓存目录所在的文件系统异常时体现为读操作卡死(如某些内核态的网络文件系统),那么 JuiceFS 也会随之一起卡住,这就要求你对缓存目录底层的文件系统行为进行调优,做到快速失败。 -以下是缓存配置的关键参数(完整参数列表见 [`juicefs mount`](../reference/command_reference.md#mount)): +以下是缓存配置的关键参数(完整参数列表见 [`juicefs mount`](../reference/command_reference.mdx#mount)): * `--prefetch` @@ -182,7 +180,7 @@ JuiceFS 客户端会把从对象存储下载的数据,以及新上传的小于 由于默认的写入流程是「先上传,再提交」,可想而知,大量小文件写入时,这样的流程将影响写入性能。启用客户端写缓存以后,写入流程将改为「先提交,再异步上传」,写文件不会等待数据上传到对象存储,而是写入到本地缓存目录并提交到元数据服务后就立即返回,本地缓存目录中的文件数据会在后台异步上传至对象存储。 -如果你的场景需要写入大量临时文件,不需要持久化和分布式访问,也可以用 [`--upload-delay`](../reference/command_reference.md#mount) 参数来设置延缓数据上传到对象存储,如果在等待的时间内数据被应用删除,则无需再上传到对象存储,既提升了性能也节省了成本。相较于本地硬盘而言,JuiceFS 提供了后端保障,在缓存目录容量不足时依然会自动将数据上传,确保在应用侧不会因此而感知到错误。 +如果你的场景需要写入大量临时文件,不需要持久化和分布式访问,也可以用 [`--upload-delay`](../reference/command_reference.mdx#mount-data-cache-options) 参数来设置延缓数据上传到对象存储,如果在等待的时间内数据被应用删除,则无需再上传到对象存储,既提升了性能也节省了成本。相较于本地硬盘而言,JuiceFS 提供了后端保障,在缓存目录容量不足时依然会自动将数据上传,确保在应用侧不会因此而感知到错误。 挂载时加入 `--writeback` 参数,便能开启客户端写缓存,但在该模式下请注意: diff --git a/docs/zh_cn/guide/clone.md b/docs/zh_cn/guide/clone.md index af329874ab78..5d19aefb12db 100644 --- a/docs/zh_cn/guide/clone.md +++ b/docs/zh_cn/guide/clone.md @@ -30,4 +30,4 @@ juicefs clone /mnt/jfs/dir1 /mnt/jfs/dir2 - 对于目录:`clone` 命令对目录的原子性没有保证。在克隆过程中,如果源目录发生变化,则目标目录与源目录可能不一致。 - 同时往同一个位置创建克隆时,只会有一个成功,失败请求的会清理掉临时创建的目录树。 -克隆操作是在挂载进程中进行,如果克隆命令意外退出,克隆操作可能完成或者被中断。失败或者被中断的克隆操作,`mount` 进程会尝试清理已创建好的子树,如果清理子树也失败(元数据不可用或者`mount`进程意外退出),则会导致元数据泄露和可能的对象存储泄露。此时如果源对象被删除了,则会导致其对象存储上的数据不会被释放(因为被未挂载的的子树所引用),直到使用 [`juicefs gc --delete`](../reference/command_reference.md#gc) 命令清理。 +克隆操作是在挂载进程中进行,如果克隆命令意外退出,克隆操作可能完成或者被中断。失败或者被中断的克隆操作,`mount` 进程会尝试清理已创建好的子树,如果清理子树也失败(元数据不可用或者`mount`进程意外退出),则会导致元数据泄露和可能的对象存储泄露。此时如果源对象被删除了,则会导致其对象存储上的数据不会被释放(因为被未挂载的的子树所引用),直到使用 [`juicefs gc --delete`](../reference/command_reference.mdx#gc) 命令清理。 diff --git a/docs/zh_cn/guide/gateway.md b/docs/zh_cn/guide/gateway.md index e21ab54f41da..fdbef41b1dc2 100644 --- a/docs/zh_cn/guide/gateway.md +++ b/docs/zh_cn/guide/gateway.md @@ -46,7 +46,7 @@ JuiceFS S3 网关的常见的使用场景有: `gateway` 子命令至少需要提供两个参数,第一个是元数据引擎的 URL,第二个是 S3 网关监听的地址和端口。JuiceFS v1.2 开始支持后台启动,可以使用 `--background` 或 `-d` 选项将 S3 网关作为后台服务运行。 - S3 Gateway 默认没有启用[多桶支持](#多桶支持),可以添加 `--multi-buckets` 选项开启。还可以添加[其他选项](../reference/command_reference.md#gateway)优化 S3 网关,比如,可以将默认的本地缓存设置为 20 GiB。 + S3 Gateway 默认没有启用[多桶支持](#多桶支持),可以添加 `--multi-buckets` 选项开启。还可以添加[其他选项](../reference/command_reference.mdx#gateway)优化 S3 网关,比如,可以将默认的本地缓存设置为 20 GiB。 ```shell juicefs gateway --cache-size 20480 redis://localhost:6379/1 localhost:9000 @@ -149,8 +149,8 @@ juicefs gateway redis://localhost:6379/1 localhost:9000 --multi-buckets ### 启用虚拟主机风格请求 -默认情况下,S3 网关支持格式为 的路径类型请求。 -`MINIO_DOMAIN` 环境变量被用来启用虚拟主机类型请求。如果请求的`Host`头信息匹配 `(.+).mydomain.com`,则匹配的模式 `$1` 被用作 bucket,并且路径被用作 object. +默认情况下,S3 网关支持格式为 `http://mydomain.com/bucket/object` 的路径类型请求。`MINIO_DOMAIN` 环境变量被用来启用虚拟主机类型请求。如果请求的 `Host` 头信息匹配 `(.+).mydomain.com`,则匹配的模式 `$1` 被用作 bucket,并且路径被用作 object. + 示例: ```shell diff --git a/docs/zh_cn/guide/quota.md b/docs/zh_cn/guide/quota.md index 417cecc8fd08..c51ae4e3af52 100644 --- a/docs/zh_cn/guide/quota.md +++ b/docs/zh_cn/guide/quota.md @@ -206,7 +206,7 @@ JuiceFS 允许自由地设置各级目录配额,实际使用的时候会递归 ### 子目录挂载 {#subdirectory-mount} -JuiceFS 支持使用 [`--subdir`](../reference/command_reference.md#mount) 挂载任意子目录。如果挂载的子目录设置了目录配额,则可以使用系统自带的 `df` 命令查看目录配额和当前使用量。比如文件系统配额为 1PiB 和 10M 个 inode,而 `/test` 目录的配额为 1GiB 和 400 个 inode。使用根目录挂载时 `df` 命令的输出为: +JuiceFS 支持使用 [`--subdir`](../reference/command_reference.mdx#mount-metadata-options) 挂载任意子目录。如果挂载的子目录设置了目录配额,则可以使用系统自带的 `df` 命令查看目录配额和当前使用量。比如文件系统配额为 1PiB 和 10M 个 inode,而 `/test` 目录的配额为 1GiB 和 400 个 inode。使用根目录挂载时 `df` 命令的输出为: ```shell $ df -h diff --git a/docs/zh_cn/guide/sync.md b/docs/zh_cn/guide/sync.md index ee918279bc1c..8817e1257547 100644 --- a/docs/zh_cn/guide/sync.md +++ b/docs/zh_cn/guide/sync.md @@ -3,7 +3,7 @@ title: 数据同步 sidebar_position: 7 --- -[`juicefs sync`](../reference/command_reference.md#sync) 是强大的数据同步工具,可以在所有支持的存储之间并发同步或迁移数据,包括对象存储、JuiceFS、本地文件系统,你可以在这三者之间以任意方向和搭配进行数据同步。除此之外,还支持同步通过 SSH 访问远程目录、HDFS、WebDAV 等,同时提供增量同步、模式匹配(类似 rsync)、分布式同步等高级功能。 +[`juicefs sync`](../reference/command_reference.mdx#sync) 是强大的数据同步工具,可以在所有支持的存储之间并发同步或迁移数据,包括对象存储、JuiceFS、本地文件系统,你可以在这三者之间以任意方向和搭配进行数据同步。除此之外,还支持同步通过 SSH 访问远程目录、HDFS、WebDAV 等,同时提供增量同步、模式匹配(类似 rsync)、分布式同步等高级功能。 ## 基本用法 @@ -17,7 +17,7 @@ juicefs sync [command options] SRC DST - `SRC` 代表数据源地址及路径 - `DST` 代表目标地址及路径 -- `[command options]` 代表可选的同步选项,详情查看[命令参考](../reference/command_reference.md#sync)。 +- `[command options]` 代表可选的同步选项,详情查看[命令参考](../reference/command_reference.mdx#sync)。 地址格式均为: @@ -147,7 +147,7 @@ myfs=redis://10.10.0.8:6379/1 juicefs sync s3://ABCDEFG:HIJKLMN@aaa.s3.us-west-1 ### 增量同步与全量同步 {#incremental-and-full-synchronization} -`juicefs sync` 默认以增量同步方式工作,对于已存在的文件,仅在文件大小不一样时,才再次同步进行覆盖。在此基础上,还可以指定 [`--update`](../reference/command_reference.md#sync),在源文件 `mtime` 更新时进行覆盖。如果你的场景对正确性有着极致要求,可以指定 [`--check-new`](../reference/command_reference.md#sync) 或 [`--check-all`](../reference/command_reference.md#sync),来对两边的文件进行字节流比对,确保数据一致。 +`juicefs sync` 默认以增量同步方式工作,对于已存在的文件,仅在文件大小不一样时,才再次同步进行覆盖。在此基础上,还可以指定 [`--update`](../reference/command_reference.mdx#sync),在源文件 `mtime` 更新时进行覆盖。如果你的场景对正确性有着极致要求,可以指定 [`--check-new`](../reference/command_reference.mdx#sync) 或 [`--check-all`](../reference/command_reference.mdx#sync),来对两边的文件进行字节流比对,确保数据一致。 如需全量同步,即不论目标路径上是否存在相同的文件都重新同步,可以使用 `--force-update` 或 `-f`。例如,将 [对象存储 A](#required-storages) 的 `movies` 目录全量同步到 [JuiceFS 文件系统](#required-storages): @@ -243,8 +243,8 @@ juicefs sync --force-update s3://ABCDEFG:HIJKLMN@aaa.s3.us-west-1.amazonaws.com/ + 对于 `dir_name/***` 来说,它将匹配 dir_name 下的所有层次的文件。注意,每个子路径元素会自顶向下逐层,被访问因此 `include/exclude` 匹配模式会对每个子路径元素的全路径名进行递归 (例如,要包含 `/foo/bar/baz`,则`/foo`和`/foo/bar`必须不能被排除)。实际上,排除匹配模式在发现有文件要传输时,此文件所在目录层次的排除遍历会被短路。如果排除了某个父目录,则更深层次的 include 模式匹配将无效,这在使用尾随`*`时尤为重要。例如,下面的例子不会正常工作: ``` - --include='/some/path/this-file-will-not-be-found' - --include='/file-is-included' + --include='/some/path/this-file-will-not-be-found' + --include='/file-is-included' --exclude='*' ``` diff --git a/docs/zh_cn/introduction/io_processing.md b/docs/zh_cn/introduction/io_processing.md index 743266e75cf5..3583c4e46da3 100644 --- a/docs/zh_cn/introduction/io_processing.md +++ b/docs/zh_cn/introduction/io_processing.md @@ -13,7 +13,7 @@ JuiceFS 对大文件会做多级拆分([JuiceFS 如何存储文件](../introdu ![internals-write](../images/internals-write.png) -用 [`juicefs stats`](../reference/command_reference.md#stats) 命令记录的指标图,可以直观地看到实时性能数据: +用 [`juicefs stats`](../reference/command_reference.mdx#stats) 命令记录的指标图,可以直观地看到实时性能数据: ![internals-stats](../images/internals-stats.png) @@ -32,7 +32,7 @@ JuiceFS 对大文件会做多级拆分([JuiceFS 如何存储文件](../introdu 由于写请求写入客户端内存缓冲区即可返回,因此通常来说 JuiceFS 的 Write 时延非常低(几十微秒级别),真正上传到对象存储的动作由内部自动触发,比如单个 Slice 过大,Slice 数量过多,或者仅仅是在缓冲区停留时间过长等,或应用主动触发,比如关闭文件、调用 `fsync` 等。 -缓冲区中的数据只有在被持久化后才能释放,因此当写入并发较大时,如果缓冲区大小不足(默认 300MiB,通过 [`--buffer-size`](../reference/command_reference.md#mount) 调节),或者对象存储性能不佳,读写缓冲区将持续被占用而导致写阻塞。缓冲区大小可以在指标图的 usage.buf 一列中看到。当使用量超过阈值时,JuiceFS Client 会主动为 Write 添加约 10ms 等待时间以减缓写入速度;若已用量超过阈值两倍,则会导致写入暂停直至缓冲区得到释放。因此,在观察到 Write 时延上升以及 Buffer 长时间超过阈值时,通常需要尝试设置更大的 `--buffer-size`。另外,增大上传并发度([`--max-uploads`](../reference/command_reference.md#mount),默认 20)也能提升写入到对象存储的带宽,从而加快缓冲区的释放。 +缓冲区中的数据只有在被持久化后才能释放,因此当写入并发较大时,如果缓冲区大小不足(默认 300MiB,通过 [`--buffer-size`](../reference/command_reference.mdx#mount-data-cache-options) 调节),或者对象存储性能不佳,读写缓冲区将持续被占用而导致写阻塞。缓冲区大小可以在指标图的 usage.buf 一列中看到。当使用量超过阈值时,JuiceFS Client 会主动为 Write 添加约 10ms 等待时间以减缓写入速度;若已用量超过阈值两倍,则会导致写入暂停直至缓冲区得到释放。因此,在观察到 Write 时延上升以及 Buffer 长时间超过阈值时,通常需要尝试设置更大的 `--buffer-size`。另外,增大上传并发度([`--max-uploads`](../reference/command_reference.mdx#mount-data-storage-options),默认 20)也能提升写入到对象存储的带宽,从而加快缓冲区的释放。 ### 随机写 {#random-write} @@ -52,7 +52,7 @@ JuiceFS 支持随机写,包括通过 mmap 等进行的随机写。 ## 读取流程 {#workflow-of-read} -JuiceFS 支持顺序读和随机读(包括基于 mmap 的随机读),在处理读请求时会通过对象存储的 `GetObject` 接口完整读取 Block 对应的对象,也有可能仅仅读取对象中一定范围的数据(比如通过 [S3 API](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) 的 `Range` 参数限定读取范围)。与此同时异步地进行预读(通过 [`--prefetch`](../reference/command_reference.md#mount) 参数控制预读并发度),预读会将整个对象存储块下载到本地缓存目录,以备后用(如指标图中的第 2 阶段,blockcache 有很高的写入带宽)。显然,在顺序读时,这些提前获取的数据都会被后续的请求访问到,缓存命中率非常高,因此也能充分发挥出对象存储的读取性能。数据流如下图所示: +JuiceFS 支持顺序读和随机读(包括基于 mmap 的随机读),在处理读请求时会通过对象存储的 `GetObject` 接口完整读取 Block 对应的对象,也有可能仅仅读取对象中一定范围的数据(比如通过 [S3 API](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) 的 `Range` 参数限定读取范围)。与此同时异步地进行预读(通过 [`--prefetch`](../reference/command_reference.mdx#mount) 参数控制预读并发度),预读会将整个对象存储块下载到本地缓存目录,以备后用(如指标图中的第 2 阶段,blockcache 有很高的写入带宽)。显然,在顺序读时,这些提前获取的数据都会被后续的请求访问到,缓存命中率非常高,因此也能充分发挥出对象存储的读取性能。数据流如下图所示: ![internals-read](../images/internals-read.png) diff --git a/docs/zh_cn/reference/_common_options.mdx b/docs/zh_cn/reference/_common_options.mdx new file mode 100644 index 000000000000..5f3132d503a1 --- /dev/null +++ b/docs/zh_cn/reference/_common_options.mdx @@ -0,0 +1,69 @@ +#### 元数据相关参数 {#mount-metadata-options} + +|项 | 说明| +|-|-| +|`--subdir=value`|挂载指定的子目录,默认挂载整个文件系统。| +|`--backup-meta=3600`|自动备份元数据到对象存储的间隔时间;单位秒,默认 3600,设为 0 表示不备份。| +|`--backup-skip-trash` 1.2|备份元数据时跳过回收站中的文件和目录。| +|`--heartbeat=12`|发送心跳的间隔(单位秒),建议所有客户端使用相同的心跳值 (默认:12)| +|`--read-only`|启用只读模式挂载。| +|`--no-bgjob`|禁用后台任务,默认为 false,也就是说客户端会默认运行后台任务。后台任务包含:
  • 清理回收站中过期的文件(在 [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go) 中搜索 `cleanupDeletedFiles` 和 `cleanupTrash`)
  • 清理引用计数为 0 的 Slice(在 [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go) 中搜索 `cleanupSlices`)
  • 清理过期的客户端会话(在 [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go) 中搜索 `CleanStaleSessions`)
特别地,与[企业版](https://juicefs.com/docs/zh/cloud/guide/background-job)不同,社区版碎片合并(Compaction)不受该选项的影响,而是随着文件读写操作,自动判断是否需要合并,然后异步执行(以 Redis 为例,在 [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/redis.go) 中搜索 `compactChunk`)| +|`--atime-mode=noatime` 1.1|控制如何更新 atime(文件最后被访问的时间)。支持以下模式:
  • `noatime`(默认):仅在文件创建和主动调用 `SetAttr` 时设置,平时访问与修改文件不影响 atime 值。考虑到更新 atime 需要运行额外的事务,对性能有影响,因此默认关闭。
  • `relatime`:仅在 mtime(文件内容修改时间)或 ctime(文件元数据修改时间)比 atime 新,或者 atime 超过 24 小时没有更新时进行更新。
  • `strictatime`:持续更新 atime
| +|`--skip-dir-nlink=20` 1.1|跳过更新目录 nlink 前的重试次数 (仅用于 TKV, 0 代表永不跳过) (默认:20)| +|`--skip-dir-mtime=100ms` 1.2|如果 mtime 差异小于该值(默认值:100ms),则跳过更新目录的属性。| + +#### 元数据缓存参数 {#mount-metadata-cache-options} + +元数据缓存的介绍和使用,详见[「内核元数据缓存」](../guide/cache.md#kernel-metadata-cache)及[「客户端内存元数据缓存」](../guide/cache.md#client-memory-metadata-cache)。 + +|项 | 说明| +|-|-| +|`--attr-cache=1`|属性缓存过期时间;单位为秒,默认为 1。| +|`--entry-cache=1`|文件项缓存过期时间;单位为秒,默认为 1。| +|`--dir-entry-cache=1`|目录项缓存过期时间;单位为秒,默认为 1。| +|`--open-cache=0`|打开的文件的缓存过期时间,单位为秒,默认为 0,代表关闭该特性。| +|`--open-cache-limit=value` 1.1|允许缓存的最大文件个数 (软限制,0 代表不限制) (默认:10000)| + +#### 数据存储参数 {#mount-data-storage-options} + +|项 | 说明| +|-|-| +|`--storage=file`|对象存储类型 (例如 `s3`、`gs`、`oss`、`cos`) (默认:`"file"`,参考[文档](../reference/how_to_set_up_object_storage.md#supported-object-storage)查看所有支持的对象存储类型)| +|`--bucket=value`|为当前挂载点指定访问对象存储的 Endpoint。| +|`--storage-class value` 1.1|当前客户端写入数据的存储类型| +|`--get-timeout=60`|下载一个对象的超时时间;单位为秒 (默认:60)| +|`--put-timeout=60`|上传一个对象的超时时间;单位为秒 (默认:60)| +|`--io-retries=10`|网络异常时的重试次数,元数据请求的重试次数也由这个选项控制。如果超过重试次数将会返回 `EIO Input/output error` 错误。(默认:10)| +|`--max-uploads=20`|上传并发度,默认为 20。对于粒度为 4M 的写入模式,20 并发已经是很高的默认值,在这样的写入模式下,提高写并发往往需要伴随增大 `--buffer-size`, 详见「[读写缓冲区](../guide/cache.md#buffer-size)」。但面对百 K 级别的小随机写,并发量大的时候很容易产生阻塞等待,造成写入速度恶化。如果无法改善应用写模式,对其进行合并,那么需要考虑采用更高的写并发,避免排队等待。| +|`--max-stage-write=0` 1.2|异步写入数据块到缓存盘的最大并发数,如果达到最大并发数则会直接上传对象存储(此选项仅在启用[「客户端写缓存」](../guide/cache.md#client-write-cache)时有效)(默认值:0,即没有并发限制)| +|`--max-deletes=10`|删除对象的连接数 (默认:10)| +|`--upload-limit=0`|上传带宽限制,单位为 Mbps (默认:0)| +|`--download-limit=0`|下载带宽限制,单位为 Mbps (默认:0)| + +#### 数据缓存相关参数 {#mount-data-cache-options} + +|项 | 说明| +|-|-| +|`--buffer-size=300`|读写缓冲区的总大小;单位为 MiB (默认:300)。阅读[「读写缓冲区」](../guide/cache.md#buffer-size)了解更多。| +|`--prefetch=1`|并发预读 N 个块 (默认:1)。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| +|`--writeback`|后台异步上传对象,默认为 false。阅读[「客户端写缓存」](../guide/cache.md#client-write-cache)了解更多。| +|`--upload-delay=0`|启用 `--writeback` 后,可以使用该选项控制数据延迟上传到对象存储,默认为 0 秒,相当于写入后立刻上传。该选项也支持 `s`(秒)、`m`(分)、`h`(时)这些单位。如果在等待的时间内数据被应用删除,则无需再上传到对象存储。如果数据只是临时落盘,可以考虑用该选项节约资源。阅读[「客户端写缓存」](../guide/cache.md#client-write-cache)了解更多。| +|`--upload-hours` 1.2|启用 `--writeback` 后,只在一天中指定的时间段上传数据块。参数的格式为 `<起始小时>,<结束小时>`(含「起始小时」,但是不含「结束小时」,「起始小时」必须小于或者大于「结束小时」),其中 `<小时>` 的取值范围为 0 到 23。例如 `0,6` 表示只在每天 0:00 至 5:59 之间上传数据块、`23,3` 表示只在每天 23:00 至第二天 2:59 之间上传数据块。| +|`--cache-dir=value`|本地缓存目录路径;使用 `:`(Linux、macOS)或 `;`(Windows)隔离多个路径 (默认:`$HOME/.juicefs/cache` 或 `/var/jfsCache`)。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| +|`--cache-mode value` 1.1|缓存块的文件权限 (默认:"0600")| +|`--cache-size=102400`|缓存对象的总大小;单位为 MiB (默认:102400)。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| +|`--free-space-ratio=0.1`|最小剩余空间比例,默认为 0.1。如果启用了[「客户端写缓存」](../guide/cache.md#client-write-cache),则该参数还控制着写缓存占用空间。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| +|`--cache-partial-only`|仅缓存随机小块读,默认为 false。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| +|`--verify-cache-checksum=full` 1.1|缓存数据一致性检查级别,启用 Checksum 校验后,生成缓存文件时会对数据切分做 Checksum 并记录于文件末尾,供读缓存时进行校验。支持以下级别:
  • `none`:禁用一致性检查,如果本地数据被篡改,将会读到错误数据;
  • `full`(默认):读完整数据块时才校验,适合顺序读场景;
  • `shrink`:对读范围内的切片数据进行校验,校验范围不包含读边界所在的切片(可以理解为开区间),适合随机读场景;
  • `extend`:对读范围内的切片数据进行校验,校验范围同时包含读边界所在的切片(可以理解为闭区间),因此将带来一定程度的读放大,适合对正确性有极致要求的随机读场景。
| +|`--cache-eviction=2-random` 1.1|缓存逐出策略(`none` 或 `2-random`)(默认值:2-random)| +|`--cache-scan-interval=1h` 1.1|扫描缓存目录重建内存索引的间隔(以秒为单位)(默认值:1h)| +|`--cache-expire=0` 1.2|超过设置的时间未被访问的缓存块将会被自动清除(即使 `--cache-eviction` 的值为 `none`,这些缓存块也会被删除),单位为秒,值为 0 表示永不过期(默认值:0)| + +#### 监控相关参数 {#mount-metrics-options} + +|项 | 说明| +|-|-| +|`--metrics=127.0.0.1:9567`|监控数据导出地址,默认为 `127.0.0.1:9567`。| +|`--custom-labels`|监控指标自定义标签,格式为 `key1:value1;key2:value2` (默认:"")| +|`--consul=127.0.0.1:8500`|Consul 注册中心地址,默认为 `127.0.0.1:8500`。| +|`--no-usage-report`|不发送使用量信息 (默认:false)| diff --git a/docs/zh_cn/reference/command_reference.md b/docs/zh_cn/reference/command_reference.mdx similarity index 79% rename from docs/zh_cn/reference/command_reference.md rename to docs/zh_cn/reference/command_reference.mdx index 23e20428e113..9bcdf3cb1ce0 100644 --- a/docs/zh_cn/reference/command_reference.md +++ b/docs/zh_cn/reference/command_reference.mdx @@ -8,6 +8,8 @@ description: JuiceFS 客户端的所有命令及选项的说明、用法和示 import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +import CommonOptions from './_common_options.mdx'; + 在终端输入 `juicefs` 并执行,就能看到所有可用的命令。在每个子命令后面添加 `-h/--help` 并运行,就能获得该命令的详细帮助信息,例如 `juicefs format -h`。 ``` @@ -18,7 +20,7 @@ USAGE: juicefs [global options] command [command options] [arguments...] VERSION: - 1.1.0 + 1.2.0 COMMANDS: ADMIN: @@ -168,7 +170,7 @@ juicefs format sqlite3://myjfs.db myjfs --trash-days=0 |项 | 说明| |-|-| -|`--block-size=4096`|块大小,单位为 KiB,默认 4096。4M 是一个较好的默认值,不少对象存储(比如 S3)都将 4M 设为内部的块大小,因此将 JuiceFS block size 设为相同大小,往往也能获得更好的性能。| +|`--block-size=4M`|块大小,单位为 KiB,默认 4M。4M 是一个较好的默认值,不少对象存储(比如 S3)都将 4M 设为内部的块大小,因此将 JuiceFS block size 设为相同大小,往往也能获得更好的性能。| |`--compress=none`|压缩算法,支持 `lz4`、`zstd`、`none`(默认),启用压缩将不可避免地对性能产生一定影响。这两种压缩算法中,`lz4` 提供更好的性能,但压缩比要逊于 `zstd`,他们的具体性能差别具体需要读者自行搜索了解。| |`--encrypt-rsa-key=value`|RSA 私钥的路径,查看[数据加密](../security/encryption.md)以了解更多。| |`--encrypt-algo=aes256gcm-rsa`|加密算法 (aes256gcm-rsa, chacha20-rsa) (默认:"aes256gcm-rsa")| @@ -233,11 +235,11 @@ juicefs config redis://localhost --min-client-version 1.0.0 --max-client-version |`--capacity value`|容量配额,单位为 GiB| |`--inodes value`|文件数配额| |`--trash-days value`|文件被自动清理前在回收站内保留的天数| +|`--enable-acl` 1.2|开启 [POSIX ACL](../security/posix_acl.md)(不支持关闭),同时允许连接的最小客户端版本会提升到 v1.2| |`--encrypt-secret`|如果密钥之前以原格式存储,则加密密钥 (默认值:false)| |`--min-client-version value` 1.1|允许连接的最小客户端版本| |`--max-client-version value` 1.1|允许连接的最大客户端版本| |`--dir-stats` 1.1|开启目录统计,这是快速汇总和目录配额所必需的 (默认值:false)| -|`--enable-acl` 1.2|开启 POSIX ACL(不支持关闭), 同时允许连接的最小客户端版本会提升到 v1.2| ### `juicefs quota` 1.1 {#quota} @@ -319,8 +321,8 @@ juicefs gc redis://localhost --delete |项 | 说明| |-|-| -|`--delete`|删除泄漏的对象,以及因不完整的 `clone` 命令而产生泄漏的元数据。| |`--compact`|对所有文件执行碎片合并。| +|`--delete`|删除泄漏的对象,以及因不完整的 `clone` 命令而产生泄漏的元数据。| |`--threads=10`|并发线程数,默认为 10。| ### `juicefs fsck` {#fsck} @@ -387,6 +389,7 @@ juicefs dump redis://localhost sub-meta-dump.json --subdir /dir/in/jfs |`FILE`|导出文件路径,如果不指定,则会导出到标准输出。如果文件名以 `.gz` 结尾,将会自动压缩。| |`--subdir=path`|只导出指定子目录的元数据。| |`--keep-secret-key` 1.1|导出对象存储认证信息,默认为 `false`。由于是明文导出,使用时注意数据安全。如果导出文件不包含对象存储认证信息,后续的导入完成后,需要用 [`juicefs config`](#config) 重新配置对象存储认证信息。| +|`--threads=10` 1.2|并发线程数,默认 10。| |`--fast` 1.2|使用更多内存来加速导出。| |`--skip-trash` 1.2|跳过回收站中的文件和目录。| @@ -470,7 +473,6 @@ juicefs profile /mnt/jfs # 重放访问日志 cat /mnt/jfs/.accesslog > /tmp/jfs.alog - # 一段时间后按 Ctrl-C 停止 "cat" 命令 juicefs profile /tmp/jfs.alog @@ -628,71 +630,18 @@ juicefs mount redis://localhost /mnt/jfs --backup-meta 0 |`--prefix-internal` 1.1|挂载 JuiceFS 后,挂载点下默认创建 `.stats`, `.accesslog` 等虚拟文件。如果这些内部文件和你的应用发生冲突,可以启用该选项,添加 `.jfs` 前缀到所有内部文件。| |`-o value`|其他 FUSE 选项,详见 [FUSE 挂载选项](../reference/fuse_mount_options.md)| -#### 元数据相关参数 {#mount-metadata-options} - -|项 | 说明| -|-|-| -|`--subdir=value`|挂载指定的子目录,默认挂载整个文件系统。| -|`--backup-meta=3600`|自动备份元数据到对象存储的间隔时间;单位秒,默认 3600,设为 0 表示不备份。| -|`--backup-skip-trash` 1.2|备份元数据时跳过回收站中的文件和目录。| -|`--heartbeat=12`|发送心跳的间隔(单位秒),建议所有客户端使用相同的心跳值 (默认:12)| -|`--read-only`|启用只读模式挂载。| -|`--no-bgjob`|禁用后台任务,默认为 false,也就是说客户端会默认运行后台任务。后台任务包含:
  • 清理回收站中过期的文件(在 [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go) 中搜索 `cleanupDeletedFiles` 和 `cleanupTrash`)
  • 清理引用计数为 0 的 Slice(在 [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go) 中搜索 `cleanupSlices`)
  • 清理过期的客户端会话(在 [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/base.go) 中搜索 `CleanStaleSessions`)
特别地,与[企业版](https://juicefs.com/docs/zh/cloud/guide/background-job)不同,社区版碎片合并(Compaction)不受该选项的影响,而是随着文件读写操作,自动判断是否需要合并,然后异步执行(以 Redis 为例,在 [`pkg/meta/base.go`](https://github.com/juicedata/juicefs/blob/main/pkg/meta/redis.go) 中搜索 `compactChunk`)| -|`--atime-mode=noatime` 1.1|控制如何更新 atime(文件最后被访问的时间)。支持以下模式:
  • `noatime`(默认):仅在文件创建和主动调用 `SetAttr` 时设置,平时访问与修改文件不影响 atime 值。考虑到更新 atime 需要运行额外的事务,对性能有影响,因此默认关闭。
  • `relatime`:仅在 mtime(文件内容修改时间)或 ctime(文件元数据修改时间)比 atime 新,或者 atime 超过 24 小时没有更新时进行更新。
  • `strictatime`:持续更新 atime
| -|`--skip-dir-nlink value` 1.1|跳过更新目录 nlink 前的重试次数 (仅用于 TKV, 0 代表永不跳过) (默认:20)| - -#### 元数据缓存参数 {#mount-metadata-cache-options} + -元数据缓存的介绍和使用,详见[「内核元数据缓存」](../guide/cache.md#kernel-metadata-cache)及[「客户端内存元数据缓存」](../guide/cache.md#client-memory-metadata-cache)。 + +
-|项 | 说明| -|-|-| -|`--attr-cache=1`|属性缓存过期时间;单位为秒,默认为 1。| -|`--entry-cache=1`|文件项缓存过期时间;单位为秒,默认为 1。| -|`--dir-entry-cache=1`|目录项缓存过期时间;单位为秒,默认为 1。| -|`--open-cache=0`|打开的文件的缓存过期时间,单位为秒,默认为 0,代表关闭该特性。| -|`--open-cache-limit=value` 1.1|允许缓存的最大文件个数 (软限制,0 代表不限制) (默认:10000)| - -#### 数据存储参数 {#mount-data-storage-options} - -|项 | 说明| -|-|-| -|`--storage=file`|对象存储类型 (例如 `s3`、`gs`、`oss`、`cos`) (默认:`"file"`,参考[文档](../reference/how_to_set_up_object_storage.md#supported-object-storage)查看所有支持的对象存储类型)| -|`--bucket=value`|为当前挂载点指定访问对象存储的 Endpoint。| -|`--storage-class value` 1.1|当前客户端写入数据的存储类型| -|`--get-timeout=60`|下载一个对象的超时时间;单位为秒 (默认:60)| -|`--put-timeout=60`|上传一个对象的超时时间;单位为秒 (默认:60)| -|`--io-retries=10`|网络异常时的重试次数 (默认:10)| -|`--max-uploads=20`|上传并发度,默认为 20。对于粒度为 4M 的写入模式,20 并发已经是很高的默认值,在这样的写入模式下,提高写并发往往需要伴随增大 `--buffer-size`, 详见「[读写缓冲区](../guide/cache.md#buffer-size)」。但面对百 K 级别的小随机写,并发量大的时候很容易产生阻塞等待,造成写入速度恶化。如果无法改善应用写模式,对其进行合并,那么需要考虑采用更高的写并发,避免排队等待。| -|`--max-deletes=10`|删除对象的连接数 (默认:10)| -|`--upload-limit=0`|上传带宽限制,单位为 Mbps (默认:0)| -|`--download-limit=0`|下载带宽限制,单位为 Mbps (默认:0)| +#### {#mount-metadata-options} +#### {#mount-metadata-cache-options} +#### {#mount-data-storage-options} +#### {#mount-data-cache-options} +#### {#mount-metrics-options} -#### 数据缓存相关参数 {#mount-data-cache-options} - -|项 | 说明| -|-|-| -|`--buffer-size=300`|读写缓冲区的总大小;单位为 MiB (默认:300)。阅读[「读写缓冲区」](../guide/cache.md#buffer-size)了解更多。| -|`--prefetch=1`|并发预读 N 个块 (默认:1)。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| -|`--writeback`|后台异步上传对象,默认为 false。阅读[「客户端写缓存」](../guide/cache.md#client-write-cache)了解更多。| -|`--upload-delay=0`|启用 `--writeback` 后,可以使用该选项控制数据延迟上传到对象存储,默认为 0 秒,相当于写入后立刻上传。该选项也支持 `s`(秒)、`m`(分)、`h`(时)这些单位。如果在等待的时间内数据被应用删除,则无需再上传到对象存储。如果数据只是临时落盘,可以考虑用该选项节约资源。阅读[「客户端写缓存」](../guide/cache.md#client-write-cache)了解更多。| -|`--cache-dir=value`|本地缓存目录路径;使用 `:`(Linux、macOS)或 `;`(Windows)隔离多个路径 (默认:`$HOME/.juicefs/cache` 或 `/var/jfsCache`)。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| -|`--cache-mode value` 1.1|缓存块的文件权限 (默认:"0600")| -|`--cache-size=102400`|缓存对象的总大小;单位为 MiB (默认:102400)。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| -|`--free-space-ratio=0.1`|最小剩余空间比例,默认为 0.1。如果启用了[「客户端写缓存」](../guide/cache.md#client-write-cache),则该参数还控制着写缓存占用空间。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| -|`--cache-partial-only`|仅缓存随机小块读,默认为 false。阅读[「客户端读缓存」](../guide/cache.md#client-read-cache)了解更多。| -|`--verify-cache-checksum=full` 1.1|缓存数据一致性检查级别,启用 Checksum 校验后,生成缓存文件时会对数据切分做 Checksum 并记录于文件末尾,供读缓存时进行校验。支持以下级别:
  • `none`:禁用一致性检查,如果本地数据被篡改,将会读到错误数据;
  • `full`(默认):读完整数据块时才校验,适合顺序读场景;
  • `shrink`:对读范围内的切片数据进行校验,校验范围不包含读边界所在的切片(可以理解为开区间),适合随机读场景;
  • `extend`:对读范围内的切片数据进行校验,校验范围同时包含读边界所在的切片(可以理解为闭区间),因此将带来一定程度的读放大,适合对正确性有极致要求的随机读场景。
| -|`--cache-eviction value` 1.1|缓存逐出策略 (none 或 2-random) (默认值:"2-random")| -|`--cache-scan-interval value` 1.1|扫描缓存目录重建内存索引的间隔 (以秒为单位) (默认:"3600")| - -#### 监控相关参数 {#mount-metrics-options} - -|项 | 说明| -|-|-| -|`--metrics=127.0.0.1:9567`|监控数据导出地址,默认为 `127.0.0.1:9567`。| -|`--custom-labels`|监控指标自定义标签,格式为 `key1:value1;key2:value2` (默认:"")| -|`--consul=127.0.0.1:8500`|Consul 注册中心地址,默认为 `127.0.0.1:8500`。| -|`--no-usage-report`|不发送使用量信息 (默认:false)| +
### `juicefs umount` {#umount} @@ -729,20 +678,22 @@ juicefs gateway redis://localhost localhost:9000 #### 参数 -除下方列出的参数,该命令还与 `juicefs mount` 共享参数,因此需要结合 [`mount`](#mount) 一起参考。 - |项 | 说明| |-|-| -| `--log value`1.2 | 网关日志路径 | |`META-URL`|用于元数据存储的数据库 URL,详情查看[「JuiceFS 支持的元数据引擎」](../reference/how_to_set_up_metadata_engine.md)。| -| `--background, -d`1.2 | 后台运行 (默认:false) | |`ADDRESS`|S3 网关地址和监听的端口,例如:`localhost:9000`| +|`--log value` 1.2|网关日志路径| |`--access-log=path`|访问日志的路径| -|`--no-banner`|禁用 MinIO 的启动信息 (默认:false)| -|`--multi-buckets`|使用第一级目录作为存储桶 (默认:false)| -|`--keep-etag`|保留对象上传时的 ETag (默认:false)| -|`--umask=022`|新文件和新目录的 umask 的八进制格式 (默认值:022)| -| `--domain value`1.2 |虚拟主机样式请求的域| +|`--background, -d` 1.2|后台运行(默认:false)| +|`--no-banner`| 禁用 MinIO 的启动信息(默认:false)| +|`--multi-buckets`|使用第一级目录作为存储桶(默认:false)| +|`--keep-etag`|保留对象上传时的 ETag(默认:false)| +|`--umask=022`|新文件和新目录的 umask 的八进制格式(默认值:022)| +|`--object-tag` 1.2|启用对象标签 API| +|`--domain value` 1.2|虚拟主机样式请求的域| +|`--refresh-iam-interval=5m` 1.2|重新加载网关 IAM 配置的间隔时间(默认值:5 分钟)| + + ### `juicefs webdav` {#webdav} @@ -758,8 +709,6 @@ juicefs webdav redis://localhost localhost:9007 #### 参数 -除下方列出的参数,该命令还与 `juicefs mount` 共享参数,因此需要结合 [`mount`](#mount) 参数一起参考。 - |项 | 说明| |-|-| |`META-URL`|用于元数据存储的数据库 URL,详情查看[「JuiceFS 支持的元数据引擎」](../reference/how_to_set_up_metadata_engine.md)。| @@ -768,9 +717,11 @@ juicefs webdav redis://localhost localhost:9007 |`--key-file` 1.1|HTTPS 密钥文件| |`--gzip`|通过 gzip 压缩提供的文件(默认值:false)| |`--disallowList`|禁止列出目录(默认值:false)| -| `--log value`1.2 | WebDAV 日志路径 | +|`--log value` 1.2|WebDAV 日志路径| |`--access-log=path`|访问日志的路径| -| `--background, -d`1.2 | 后台运行 (默认:false) | +|`--background, -d` 1.2|后台运行(默认:false)| + + ## 工具 {#tool} @@ -820,6 +771,7 @@ ACCESS_KEY=myAccessKey SECRET_KEY=mySecretKey juicefs objbench --storage=s3 http |`--storage=file`|对象存储类型 (例如 `s3`、`gs`、`oss`、`cos`) (默认:`file`,参考[文档](../reference/how_to_set_up_object_storage.md#supported-object-storage)查看所有支持的对象存储类型)| |`--access-key=value`|对象存储的 Access Key,也可通过环境变量 `ACCESS_KEY` 设置。查看[如何设置对象存储](../reference/how_to_set_up_object_storage.md#aksk)以了解更多。| |`--secret-key=value`|对象存储的 Secret Key,也可通过环境变量 `SECRET_KEY` 设置。查看[如何设置对象存储](../reference/how_to_set_up_object_storage.md#aksk)以了解更多。| +|`--session-token value` 1.0|对象存储的会话令牌| |`--block-size=4096`|每个 IO 块的大小(以 KiB 为单位)(默认值:4096)| |`--big-object-size=1024`|大文件的大小(以 MiB 为单位)(默认值:1024)| |`--small-object-size=128`|每个小文件的大小(以 KiB 为单位)(默认值:128)| @@ -855,6 +807,8 @@ juicefs warmup -f /tmp/filelist.txt |`--file=value, -f value`|指定一个包含一组路径的文件(每一行为一个文件路径)。| |`--threads=50, -p 50`|并发的工作线程数,默认 50。如果带宽不足导致下载失败,需要减少并发度,控制下载速度。| |`--background, -b`|后台运行(默认:false)| +|`--evict` 1.2|逐出已缓存的块| +|`--check` 1.2|检查数据块是否已缓存| ### `juicefs rmr` {#rmr} @@ -914,8 +868,14 @@ juicefs sync --include='a1/b1' --exclude='a*' --include='b2' --exclude='b?' s3:/ |项 | 说明| |-|-| |`--start=KEY, -s KEY, --end=KEY, -e KEY`|提供 KEY 范围,来指定对象存储的 List 范围。| +|`--end KEY, -e KEY`| 同步的最后一个 `KEY` | |`--exclude=PATTERN`|排除匹配 `PATTERN` 的 Key。| |`--include=PATTERN`|不排除匹配 `PATTERN` 的 Key,需要与 `--exclude` 选项配合使用。| +|`--match-full-path` 1.2|匹配完整路径(默认值:false)| +|`--max-size=SIZE` 1.2|跳过大小大于 `SIZE` 的文件,单位字节| +|`--min-size=SIZE` 1.2|跳过大小小于 `SIZE` 的文件,单位字节| +|`--max-age=DURATION` 1.2|跳过最后修改时间超过 `DURATION` 的文件,单位秒。例如 `--max-age=3600` 表示仅同步在 1 小时内被修改过的文件。| +|`--min-age=DURATION` 1.2|跳过最后修改时间不超过 `DURATION` 的文件,单位秒。例如 `--min-age=3600` 表示仅同步最后修改时间距离当前时间已经超过 1 小时的文件。| |`--limit=-1`|限制将要处理的对象的数量,默认为 -1 表示不限制| |`--update, -u`|当源文件更新时(`mtime` 更新),覆盖已存在的文件,默认为 false。| |`--force-update, -f`|强制覆盖已存在的文件,默认为 false。| @@ -929,6 +889,7 @@ juicefs sync --include='a1/b1' --exclude='a*' --include='b2' --exclude='b?' s3:/ |`--dirs`|同步目录(包括空目录)。| |`--perms`|保留权限设置,默认为 false。| |`--links, -l`|将符号链接复制为符号链接,默认为 false,此时会查找并同步符号链接所指向的文件。| +|`--inplace` 1.2|当源路径的文件被修改时,直接修改目标路径中的同名文件,而不是先在目标路径中写一个临时文件,再将这个临时文件原子重命名到真实的文件名。这个选项只有当 `--update` 选项开启,以及目标路径的存储系统支持原地修改文件(如 JuiceFS、HDFS、NFS)时才有意义,也就是说如果目标路径的存储系统是对象存储开启这个选项是无效的。(默认值:false)| |`--delete-src, --deleteSrc`|如果目标存储已经存在,删除源存储的对象。与 rsync 不同,为保数据安全,首次执行时不会删除源存储文件,只有拷贝成功后再次运行时,扫描确认目标存储已经存在相关文件,才会删除源存储文件。| |`--delete-dst, --deleteDst`|删除目标存储下的不相关对象。| |`--check-all`|校验源路径和目标路径中所有文件的数据完整性,默认为 false。校验方式是基于字节流对比,因此也将带来相应的开销。| @@ -953,6 +914,13 @@ juicefs sync --include='a1/b1' --exclude='a*' --include='b2' --exclude='b?' s3:/ |`--manager-addr=ADDR`| 分布式同步模式中,Manager 节点的监听地址,格式:`:[port]`,如果不写端口,则监听随机端口。如果没有该参数,则监听本机随机的 IPv4 地址与随机端口。| |`--worker=ADDR,ADDR`| 分布式同步模式中,工作节点列表,使用逗号分隔。| +#### 监控相关参数 {#sync-metrics-related-options} + +|项 | 说明| +|-|-| +|`--metrics value` 1.2|导出监控指标的地址(默认值:"127.0.0.1:9567")| +|`--consul value` 1.2|用于注册的 Consul 地址(默认值:"127.0.0.1:8500")| + ### `juicefs clone` 1.1 {#clone} 快速在同一挂载点下克隆目录或者文件,只拷贝元数据但不拷贝数据块,因此拷贝速度非常快。更多介绍详见[「克隆文件或目录」](../guide/clone.md)。 diff --git a/docs/zh_cn/reference/fuse_mount_options.md b/docs/zh_cn/reference/fuse_mount_options.md index 2b1836a113fc..22a9c25fb48f 100644 --- a/docs/zh_cn/reference/fuse_mount_options.md +++ b/docs/zh_cn/reference/fuse_mount_options.md @@ -8,7 +8,7 @@ JuiceFS 文件系统为用户提供多种访问方式,FUSE 是其中较为常 本指南介绍 JuiceFS 常用的 FUSE 挂载选项,有两种添加挂载选项的方式: -1. 手动执行 [`juicefs mount`](../reference/command_reference.md#mount) 命令时,通过 `-o` 选项指定,多个选项使用半角逗号分隔。 +1. 手动执行 [`juicefs mount`](../reference/command_reference.mdx#mount) 命令时,通过 `-o` 选项指定,多个选项使用半角逗号分隔。 ```bash juicefs mount -d -o allow_other,writeback_cache sqlite3://myjfs.db ~/jfs diff --git a/docs/zh_cn/reference/how_to_set_up_object_storage.md b/docs/zh_cn/reference/how_to_set_up_object_storage.md index c38d940df6f9..f7e9f35dd556 100644 --- a/docs/zh_cn/reference/how_to_set_up_object_storage.md +++ b/docs/zh_cn/reference/how_to_set_up_object_storage.md @@ -34,7 +34,7 @@ juicefs format --storage s3 \ ## 配置数据分片(Sharding) {#enable-data-sharding} -创建文件系统时,可以通过 [`--shards`](../reference/command_reference.md#format-data-format-options) 选项定义多个 Bucket 作为文件系统的底层存储。这样一来,系统会根据文件名哈希值将文件分散到多个 Bucket 中。数据分片技术可以将大规模数据并发写的负载分散到多个 Bucket 中,从而提高写入性能。 +创建文件系统时,可以通过 [`--shards`](../reference/command_reference.mdx#format-data-format-options) 选项定义多个 Bucket 作为文件系统的底层存储。这样一来,系统会根据文件名哈希值将文件分散到多个 Bucket 中。数据分片技术可以将大规模数据并发写的负载分散到多个 Bucket 中,从而提高写入性能。 启用数据分片功能需要注意以下事项: @@ -116,7 +116,7 @@ JuiceFS 对这种区分内网外地址的对象存储服务也做了灵活的支 ## 存储类 1.1 {#storage-class} -对象存储通常支持多种存储类,如标准存储、低频访问存储、归档存储。不同的存储类会有不同的价格及服务可用性,你可以在创建 JuiceFS 文件系统时通过 [`--storage-class`](../reference/command_reference.md#format-data-storage-options) 选项设置默认的存储类,或者在挂载 JuiceFS 文件系统时通过 [`--storage-class`](../reference/command_reference.md#mount-data-storage-options) 选项设置一个新的存储类。请查阅你所使用的对象存储的用户手册了解应该如何设置 `--storage-class` 选项的值(如 [Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass))。 +对象存储通常支持多种存储类,如标准存储、低频访问存储、归档存储。不同的存储类会有不同的价格及服务可用性,你可以在创建 JuiceFS 文件系统时通过 [`--storage-class`](../reference/command_reference.mdx#format-data-storage-options) 选项设置默认的存储类,或者在挂载 JuiceFS 文件系统时通过 [`--storage-class`](../reference/command_reference.mdx#mount-data-storage-options) 选项设置一个新的存储类。请查阅你所使用的对象存储的用户手册了解应该如何设置 `--storage-class` 选项的值(如 [Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass))。 :::note 注意 当使用某些存储类(如归档、深度归档)时,数据无法立即访问,需要提前恢复数据并等待一段时间之后才能访问。 diff --git a/docs/zh_cn/reference/posix_compatibility.md b/docs/zh_cn/reference/posix_compatibility.md index fa24eea31d5b..e566b4553370 100644 --- a/docs/zh_cn/reference/posix_compatibility.md +++ b/docs/zh_cn/reference/posix_compatibility.md @@ -38,7 +38,7 @@ Result: PASS - 支持传统 POSIX 记录锁(fcntl) :::note 注意 -POSIX 记录锁分为**传统锁**和 **OFD 锁**(Open file description locks)两类,它们的加锁操作命令分别为 `F_SETLK` 和 `F_OFD_SETLK`。受限于 FUSE 内核模块的实现,目前 JuiceFS 只支持传统类型的记录锁。更多细节可参见:。 +POSIX 记录锁分为**传统锁**和 **OFD 锁**(Open file description locks)两类,它们的加锁操作命令分别为 `F_SETLK` 和 `F_OFD_SETLK`。受限于 FUSE 内核模块的实现,目前 JuiceFS 只支持传统类型的记录锁。更多细节可参见:[https://man7.org/linux/man-pages/man2/fcntl.2.html](https://man7.org/linux/man-pages/man2/fcntl.2.html)。 ::: ## LTP diff --git a/docs/zh_cn/security/trash.md b/docs/zh_cn/security/trash.md index f63b4be9519c..de28fac6c38f 100644 --- a/docs/zh_cn/security/trash.md +++ b/docs/zh_cn/security/trash.md @@ -10,7 +10,7 @@ sidebar_position: 2 JuiceFS 默认开启回收站功能,你删除的文件会被保存在文件系统根目录下的 `.trash` 目录内,保留指定时间后才将数据真正清理。在清理到来之前,通过 `df -h` 命令看到的文件系统使用量并不会减少,对象存储中的对象也会依然存在。 -不论你正在用 `format` 命令初始化文件系统,还是用 `config` 命令调整已有的文件系统,都可以用 [`--trash-days`](../reference/command_reference.md#format) 参数来指定回收站保留时长: +不论你正在用 `format` 命令初始化文件系统,还是用 `config` 命令调整已有的文件系统,都可以用 [`--trash-days`](../reference/command_reference.mdx#format) 参数来指定回收站保留时长: ```shell # 初始化新的文件系统 @@ -23,7 +23,7 @@ juicefs config META-URL --trash-days=7 juicefs config META-URL --trash-days=0 ``` -另外,回收站自动清理依赖 JuiceFS 客户端的后台任务,为了保证后台任务能够正常执行,需要至少 1 个在线的挂载点,并且在挂载文件系统时不可以使用 [`--no-bgjob`](../reference/command_reference.md#mount) 参数。 +另外,回收站自动清理依赖 JuiceFS 客户端的后台任务,为了保证后台任务能够正常执行,需要至少 1 个在线的挂载点,并且在挂载文件系统时不可以使用 [`--no-bgjob`](../reference/command_reference.mdx#mount-metadata-options) 参数。 ## 恢复文件 {#recover} @@ -35,7 +35,7 @@ juicefs config META-URL --trash-days=0 mv .trash/2022-11-30-10/[parent inode]-[file inode]-[file name] . ``` -被删除的文件会完全丢失其目录结构,在回收站中“平铺”存储,但会在文件名保留父目录的 inode,如果你确实忘记了被误删的文件名,可以使用 [`juicefs info`](../reference/command_reference.md#info) 命令先找出父目录的 inode,然后顺藤摸瓜地定位到误删文件。 +被删除的文件会完全丢失其目录结构,在回收站中“平铺”存储,但会在文件名保留父目录的 inode,如果你确实忘记了被误删的文件名,可以使用 [`juicefs info`](../reference/command_reference.mdx#info) 命令先找出父目录的 inode,然后顺藤摸瓜地定位到误删文件。 假设挂载点为 `/jfs`,你误删了 `/jfs/data/config.json`,但无法直接通过 `config.json` 文件名来操作恢复文件(因为你忘了),可以用下方流程反查父目录 inode,然后在回收站中定位文件: @@ -80,7 +80,7 @@ $ tree .trash/2023-08-14-05 └── 16-18-config.json ``` -正因如此,JuiceFS v1.1 提供了 [`restore`](../reference/command_reference.md#restore) 子命令来快速恢复大量误删的文件,以上方目录结构为例,恢复操作如下: +正因如此,JuiceFS v1.1 提供了 [`restore`](../reference/command_reference.mdx#restore) 子命令来快速恢复大量误删的文件,以上方目录结构为例,恢复操作如下: ```shell # 先运行 restore 命令,在回收站内重建目录结构 @@ -106,7 +106,7 @@ juicefs restore $META_URL 2023-08-14-05 --put-back 当回收站中的文件到了过期时间,会被自动清理。需要注意的是,文件清理由 JuiceFS 客户端的后台任务(background job,也称 bgjob)执行,默认每小时清理一次,因此面对大量文件过期时,对象存储的清理速度未必和你期望的一样快,可能需要一些时间才能看到存储容量变化。 -如果你希望在过期时间到来之前彻底删除文件,需要使用 root 用户身份,用 [`juicefs rmr`](../reference/command_reference.md#rmr) 或系统自带的 `rm` 命令来删除回收站目录 `.trash` 中的文件,这样就能立刻释放存储空间。 +如果你希望在过期时间到来之前彻底删除文件,需要使用 root 用户身份,用 [`juicefs rmr`](../reference/command_reference.mdx#rmr) 或系统自带的 `rm` 命令来删除回收站目录 `.trash` 中的文件,这样就能立刻释放存储空间。 例如,彻底删除回收站中某个目录: @@ -120,7 +120,7 @@ juicefs rmr .trash/2022-11-30-10/ 在回收站里,除了因用户操作而产生的文件,还存在另一类对用户不可见的数据——覆写产生的文件碎片。关于文件碎片是怎么产生的,可以详细阅读[「JuiceFS 如何存储文件」](../introduction/architecture.md#how-juicefs-store-files)。总而言之,如果应用经常删除文件或者频繁覆盖写文件,会导致对象存储使用量远大于文件系统用量。 -虽然失效的文件碎片不能直接浏览、操作,但你可以通过 [`juicefs status`](../reference/command_reference.md#status) 命令来简单观测其规模: +虽然失效的文件碎片不能直接浏览、操作,但你可以通过 [`juicefs status`](../reference/command_reference.mdx#status) 命令来简单观测其规模: ```shell # 下方 Trash Slices 就是失效的文件碎片统计 diff --git a/docs/zh_cn/tutorials/juicefs_on_kubesphere.md b/docs/zh_cn/tutorials/juicefs_on_kubesphere.md index 3237b32e9b0d..7d379a64c45d 100644 --- a/docs/zh_cn/tutorials/juicefs_on_kubesphere.md +++ b/docs/zh_cn/tutorials/juicefs_on_kubesphere.md @@ -40,7 +40,7 @@ KubeSphere 提供了运维友好的向导式操作界面,即便是 Kubernetes #### 方法二:应用模板 -先在 JuiceFS CSI Driver 仓库下载 chart 压缩包: 。 +先在 JuiceFS CSI Driver 仓库下载 chart 压缩包:[https://github.com/juicedata/juicefs-csi-driver/releases](https://github.com/juicedata/juicefs-csi-driver/releases)。 在「企业空间」中点击进入「应用管理」,选择「应用模板」,点击「创建」,上传 chart 压缩包: diff --git a/package-lock.json b/package-lock.json index 3ccb865b6509..ebd2cb95ffbf 100644 --- a/package-lock.json +++ b/package-lock.json @@ -13,7 +13,7 @@ "markdownlint-rule-enhanced-proper-names": "^0.0.1", "markdownlint-rule-no-trailing-slash-in-links": "^0.0.1", "remark-cli": "^11.0.0", - "remark-validate-links": "^12.1.0", + "remark-validate-links": "^13.0.1", "remark-validate-links-heading-id": "^0.0.3" } }, @@ -30,11 +30,12 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.24.7", + "resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", "dependencies": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" }, "engines": { "node": ">=6.9.0" @@ -365,9 +366,9 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "version": "7.24.7", + "resolved": "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", "engines": { "node": ">=6.9.0" } @@ -408,13 +409,14 @@ } }, "node_modules/@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmmirror.com/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.24.7", + "resolved": "https://registry.npmmirror.com/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", "dependencies": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" + "@babel/helper-validator-identifier": "^7.24.7", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" }, "engines": { "node": ">=6.9.0" @@ -1785,6 +1787,22 @@ "node": ">=8" } }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmmirror.com/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/@jridgewell/gen-mapping": { "version": "0.1.1", "resolved": "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz", @@ -1923,14 +1941,82 @@ "node": ">=10" } }, + "node_modules/@npmcli/git": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@npmcli/git/-/git-5.0.8.tgz", + "integrity": "sha512-liASfw5cqhjNW9UFd+ruwwdEf/lbOAQjLL2XY2dFW/bkJheXDYZgOyul/4gVvEV4BWkTXjYGmDqMw9uegdbJNQ==", + "dependencies": { + "@npmcli/promise-spawn": "^7.0.0", + "ini": "^4.1.3", + "lru-cache": "^10.0.1", + "npm-pick-manifest": "^9.0.0", + "proc-log": "^4.0.0", + "promise-inflight": "^1.0.1", + "promise-retry": "^2.0.1", + "semver": "^7.3.5", + "which": "^4.0.0" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/@npmcli/git/node_modules/ini": { + "version": "4.1.3", + "resolved": "https://registry.npmmirror.com/ini/-/ini-4.1.3.tgz", + "integrity": "sha512-X7rqawQBvfdjS10YU1y1YVreA3SsLrW9dX2CewP2EbBJM4ypVNLDkO5y04gejPwKIY9lR+7r9gn3rFPt/kmWFg==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@npmcli/git/node_modules/isexe": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/isexe/-/isexe-3.1.1.tgz", + "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==", + "engines": { + "node": ">=16" + } + }, + "node_modules/@npmcli/git/node_modules/proc-log": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/proc-log/-/proc-log-4.2.0.tgz", + "integrity": "sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@npmcli/git/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@npmcli/git/node_modules/which": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/which/-/which-4.0.0.tgz", + "integrity": "sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==", + "dependencies": { + "isexe": "^3.1.1" + }, + "bin": { + "node-which": "bin/which.js" + }, + "engines": { + "node": "^16.13.0 || >=18.0.0" + } + }, "node_modules/@npmcli/map-workspaces": { - "version": "3.0.1", - "resolved": "https://registry.npmmirror.com/@npmcli/map-workspaces/-/map-workspaces-3.0.1.tgz", - "integrity": "sha512-QXwE2p5zRTP6X8Irgf/swYwwdQEalSA1GBm0IGE/86R5EJbUGgKMOP0kOjaJWJxaWPkSqyhM8N50SPxFHTfkNg==", + "version": "3.0.6", + "resolved": "https://registry.npmmirror.com/@npmcli/map-workspaces/-/map-workspaces-3.0.6.tgz", + "integrity": "sha512-tkYs0OYnzQm6iIRdfy+LcLBjcKuQCeE5YLb8KnrIlutJfheNaPvPpgoFEyEFgbjzl5PLZ3IA/BWAwRU0eHuQDA==", "dependencies": { "@npmcli/name-from-folder": "^2.0.0", - "glob": "^8.0.1", - "minimatch": "^5.0.1", + "glob": "^10.2.2", + "minimatch": "^9.0.0", "read-package-json-fast": "^3.0.0" }, "engines": { @@ -1946,29 +2032,36 @@ } }, "node_modules/@npmcli/map-workspaces/node_modules/glob": { - "version": "8.0.3", - "resolved": "https://registry.npmmirror.com/glob/-/glob-8.0.3.tgz", - "integrity": "sha512-ull455NHSHI/Y1FqGaaYFaLGkNMMJbavMrEGFXG/PGrg6y7sutWHUHrz6gy6WEBH6akM1M414dWKCNs+IhKdiQ==", + "version": "10.4.5", + "resolved": "https://registry.npmmirror.com/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^5.0.1", - "once": "^1.3.0" + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" }, - "engines": { - "node": ">=12" + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/@npmcli/map-workspaces/node_modules/minimatch": { - "version": "5.1.1", - "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-5.1.1.tgz", - "integrity": "sha512-362NP+zlprccbEt/SkxKfRMHnNY85V74mVnpUpNyr3F35covl09Kec7/sEFLt3RA4oXmewtoaanoIf67SE5Y5g==", + "version": "9.0.5", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", "dependencies": { "brace-expansion": "^2.0.1" }, "engines": { - "node": ">=10" + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/@npmcli/name-from-folder": { @@ -1979,6 +2072,133 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, + "node_modules/@npmcli/package-json": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/@npmcli/package-json/-/package-json-5.2.0.tgz", + "integrity": "sha512-qe/kiqqkW0AGtvBjL8TJKZk/eBBSpnJkUWvHdQ9jM2lKHXRYYJuyNpJPlJw3c8QjC2ow6NZYiLExhUaeJelbxQ==", + "dependencies": { + "@npmcli/git": "^5.0.0", + "glob": "^10.2.2", + "hosted-git-info": "^7.0.0", + "json-parse-even-better-errors": "^3.0.0", + "normalize-package-data": "^6.0.0", + "proc-log": "^4.0.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/@npmcli/package-json/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@npmcli/package-json/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmmirror.com/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@npmcli/package-json/node_modules/json-parse-even-better-errors": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz", + "integrity": "sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@npmcli/package-json/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@npmcli/package-json/node_modules/proc-log": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/proc-log/-/proc-log-4.2.0.tgz", + "integrity": "sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@npmcli/package-json/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@npmcli/promise-spawn": { + "version": "7.0.2", + "resolved": "https://registry.npmmirror.com/@npmcli/promise-spawn/-/promise-spawn-7.0.2.tgz", + "integrity": "sha512-xhfYPXoV5Dy4UkY0D+v2KkwvnDfiA/8Mt3sWCGI/hM03NsYIH8ZaG6QzS9x7pje5vHZBZJ2v6VRFVTWACnqcmQ==", + "dependencies": { + "which": "^4.0.0" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/@npmcli/promise-spawn/node_modules/isexe": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/isexe/-/isexe-3.1.1.tgz", + "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==", + "engines": { + "node": ">=16" + } + }, + "node_modules/@npmcli/promise-spawn/node_modules/which": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/which/-/which-4.0.0.tgz", + "integrity": "sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==", + "dependencies": { + "isexe": "^3.1.1" + }, + "bin": { + "node-which": "bin/which.js" + }, + "engines": { + "node": "^16.13.0 || >=18.0.0" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmmirror.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "optional": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@svgr/babel-plugin-add-jsx-attribute": { "version": "6.5.1", "resolved": "https://registry.npmmirror.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", @@ -2221,6 +2441,19 @@ "resolved": "https://registry.npmmirror.com/@types/estree/-/estree-0.0.51.tgz", "integrity": "sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ==" }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/hosted-git-info": { + "version": "3.0.5", + "resolved": "https://registry.npmmirror.com/@types/hosted-git-info/-/hosted-git-info-3.0.5.tgz", + "integrity": "sha512-Dmngh7U003cOHPhKGyA7LWqrnvcTyILNgNPmNCxlx7j8MIi54iBliiT8XqVLIQ3GchoOjVAyBzNJVyuaJjqokg==" + }, "node_modules/@types/is-empty": { "version": "1.2.1", "resolved": "https://registry.npmmirror.com/@types/is-empty/-/is-empty-1.2.1.tgz", @@ -2269,6 +2502,11 @@ "resolved": "https://registry.npmmirror.com/@types/unist/-/unist-2.0.6.tgz", "integrity": "sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==" }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" + }, "node_modules/@webassemblyjs/ast": { "version": "1.11.1", "resolved": "https://registry.npmmirror.com/@webassemblyjs/ast/-/ast-1.11.1.tgz", @@ -2675,6 +2913,20 @@ "node": ">=6.0" } }, + "node_modules/ci-info": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/ci-info/-/ci-info-4.0.0.tgz", + "integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", @@ -2754,6 +3006,19 @@ "node": ">= 6" } }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/css-select": { "version": "4.3.0", "resolved": "https://registry.npmmirror.com/css-select/-/css-select-4.3.0.tgz", @@ -2837,6 +3102,18 @@ "node": ">=6" } }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/diff": { "version": "5.1.0", "resolved": "https://registry.npmmirror.com/diff/-/diff-5.1.0.tgz", @@ -2940,6 +3217,11 @@ "node": ">=0.12" } }, + "node_modules/err-code": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/err-code/-/err-code-2.0.3.tgz", + "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==" + }, "node_modules/error-ex": { "version": "1.3.2", "resolved": "https://registry.npmmirror.com/error-ex/-/error-ex-1.3.2.tgz", @@ -3119,6 +3401,21 @@ "node": ">=8" } }, + "node_modules/foreground-child": { + "version": "3.2.1", + "resolved": "https://registry.npmmirror.com/foreground-child/-/foreground-child-3.2.1.tgz", + "integrity": "sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA==", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/format": { "version": "0.2.2", "resolved": "https://registry.npmmirror.com/format/-/format-0.2.2.tgz", @@ -3290,14 +3587,14 @@ } }, "node_modules/hosted-git-info": { - "version": "5.2.1", - "resolved": "https://registry.npmmirror.com/hosted-git-info/-/hosted-git-info-5.2.1.tgz", - "integrity": "sha512-xIcQYMnhcx2Nr4JTjsFmwwnr9vldugPy9uVm0o87bjqqWMv9GaqsTeT+i99wTl0mk1uLxJtHxLb8kymqTENQsw==", + "version": "7.0.2", + "resolved": "https://registry.npmmirror.com/hosted-git-info/-/hosted-git-info-7.0.2.tgz", + "integrity": "sha512-puUZAUKT5m8Zzvs72XWy3HtvVbTWljRE66cP60bxJzAqf2DgICo7lYTY2IHUmLnNpjYvw5bvmoHvPc0QO2a62w==", "dependencies": { - "lru-cache": "^7.5.1" + "lru-cache": "^10.0.1" }, "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "node": "^16.14.0 || >=18.0.0" } }, "node_modules/ignore": { @@ -3408,6 +3705,14 @@ "node": ">=0.10.0" } }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmmirror.com/is-glob/-/is-glob-4.0.3.tgz", @@ -3435,6 +3740,25 @@ "node": ">=12" } }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmmirror.com/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, "node_modules/jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmmirror.com/jest-worker/-/jest-worker-27.5.1.tgz", @@ -3601,12 +3925,9 @@ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==" }, "node_modules/lru-cache": { - "version": "7.14.1", - "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-7.14.1.tgz", - "integrity": "sha512-ysxwsnTKdAx96aTRdhDOCQfDgbHnt8SK0KY8SEjO0wHinhWOFTESbjVCMPbU1uGXg/ch4lifqx0wfjOawU2+WA==", - "engines": { - "node": ">=12" - } + "version": "10.4.3", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==" }, "node_modules/markdown-it": { "version": "13.0.1", @@ -3706,6 +4027,201 @@ "uvu": "^0.5.0" } }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmmirror.com/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast/node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmmirror.com/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/mdast-util-to-hast/node_modules/@types/unist": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/@types/unist/-/unist-3.0.2.tgz", + "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" + }, + "node_modules/mdast-util-to-hast/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/mdast-util-to-hast/node_modules/micromark-util-encode": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz", + "integrity": "sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-to-hast/node_modules/micromark-util-sanitize-uri": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz", + "integrity": "sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/mdast-util-to-hast/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-to-hast/node_modules/micromark-util-types": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/micromark-util-types/-/micromark-util-types-2.0.0.tgz", + "integrity": "sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-to-hast/node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast/node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast/node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast/node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast/node_modules/vfile": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/vfile/-/vfile-6.0.2.tgz", + "integrity": "sha512-zND7NlS8rJYb/sPqkb13ZvbbUoExdbi4w3SfRrMq6R3FvnLQmmfpajJNITuuYm6AZ5uao9vy4BAos3EXBPf2rg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast/node_modules/vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/mdast-util-to-markdown": { "version": "1.4.0", "resolved": "https://registry.npmmirror.com/mdast-util-to-markdown/-/mdast-util-to-markdown-1.4.0.tgz", @@ -4007,6 +4523,14 @@ "resolved": "https://registry.npmmirror.com/minimist/-/minimist-1.2.7.tgz", "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==" }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmmirror.com/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/mri": { "version": "1.2.0", "resolved": "https://registry.npmmirror.com/mri/-/mri-1.2.0.tgz", @@ -4031,9 +4555,9 @@ "integrity": "sha512-EJ3rzxL9pTWPjk5arA0s0dgXpnyiAbJDE6wHT62g7VsgrgQgmmZ+Ru++M1BFofncWja+Pnn3rEr3fieRySAdKQ==" }, "node_modules/nopt": { - "version": "7.0.0", - "resolved": "https://registry.npmmirror.com/nopt/-/nopt-7.0.0.tgz", - "integrity": "sha512-e6Qw1rcrGoSxEH0hQ4GBSdUjkMOtXGhGFXdNT/3ZR0S37eR9DMj5za3dEDWE6o1T3/DP8ZOsPP4MIiky0c3QeA==", + "version": "7.2.1", + "resolved": "https://registry.npmmirror.com/nopt/-/nopt-7.2.1.tgz", + "integrity": "sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==", "dependencies": { "abbrev": "^2.0.0" }, @@ -4044,14 +4568,60 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmmirror.com/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "node_modules/normalize-package-data": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/normalize-package-data/-/normalize-package-data-6.0.2.tgz", + "integrity": "sha512-V6gygoYb/5EmNI+MEGrWkC+e6+Rr7mTmfHrxDbLzxQogBkgzo76rkok0Am6thgSF7Mv2nLOajAJj5vDJZEFn7g==", + "dependencies": { + "hosted-git-info": "^7.0.0", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/normalize-package-data/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "engines": { "node": ">=0.10.0" } }, + "node_modules/npm-install-checks": { + "version": "6.3.0", + "resolved": "https://registry.npmmirror.com/npm-install-checks/-/npm-install-checks-6.3.0.tgz", + "integrity": "sha512-W29RiK/xtpCGqn6f3ixfRYGk+zRyr+Ew9F2E20BfXxT5/euLdA/Nm7fO7OeTGuAmTs30cpgInyJ0cYe708YTZw==", + "dependencies": { + "semver": "^7.1.1" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/npm-install-checks/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/npm-normalize-package-bin": { "version": "3.0.0", "resolved": "https://registry.npmmirror.com/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.0.tgz", @@ -4060,6 +4630,64 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, + "node_modules/npm-package-arg": { + "version": "11.0.2", + "resolved": "https://registry.npmmirror.com/npm-package-arg/-/npm-package-arg-11.0.2.tgz", + "integrity": "sha512-IGN0IAwmhDJwy13Wc8k+4PEbTPhpJnMtfR53ZbOyjkvmEcLS4nCwp6mvMWjS5sUjeiW3mpx6cHmuhKEu9XmcQw==", + "dependencies": { + "hosted-git-info": "^7.0.0", + "proc-log": "^4.0.0", + "semver": "^7.3.5", + "validate-npm-package-name": "^5.0.0" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/npm-package-arg/node_modules/proc-log": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/proc-log/-/proc-log-4.2.0.tgz", + "integrity": "sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/npm-package-arg/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/npm-pick-manifest": { + "version": "9.1.0", + "resolved": "https://registry.npmmirror.com/npm-pick-manifest/-/npm-pick-manifest-9.1.0.tgz", + "integrity": "sha512-nkc+3pIIhqHVQr085X9d2JzPzLyjzQS96zbruppqC9aZRm/x8xx6xhI98gHtsfELP2bE+loHq8ZaHFHhe+NauA==", + "dependencies": { + "npm-install-checks": "^6.0.0", + "npm-normalize-package-bin": "^3.0.0", + "npm-package-arg": "^11.0.0", + "semver": "^7.3.5" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/npm-pick-manifest/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/nth-check": { "version": "2.1.1", "resolved": "https://registry.npmmirror.com/nth-check/-/nth-check-2.1.1.tgz", @@ -4076,6 +4704,11 @@ "wrappy": "1" } }, + "node_modules/package-json-from-dist": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz", + "integrity": "sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw==" + }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmmirror.com/parent-module/-/parent-module-1.0.1.tgz", @@ -4109,11 +4742,34 @@ "node": ">=0.10.0" } }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, "node_modules/path-parse": { "version": "1.0.7", "resolved": "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmmirror.com/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/path-type": { "version": "4.0.0", "resolved": "https://registry.npmmirror.com/path-type/-/path-type-4.0.0.tgz", @@ -4143,6 +4799,23 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, + "node_modules/promise-inflight": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/promise-inflight/-/promise-inflight-1.0.1.tgz", + "integrity": "sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==" + }, + "node_modules/promise-retry": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/promise-retry/-/promise-retry-2.0.1.tgz", + "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", + "dependencies": { + "err-code": "^2.0.2", + "retry": "^0.12.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/propose": { "version": "0.0.5", "resolved": "https://registry.npmmirror.com/propose/-/propose-0.0.5.tgz", @@ -4340,21 +5013,25 @@ } }, "node_modules/remark-validate-links": { - "version": "12.1.0", - "resolved": "https://registry.npmmirror.com/remark-validate-links/-/remark-validate-links-12.1.0.tgz", - "integrity": "sha512-+QhcQmu4WhUhxSduRbSInrFAMAFyNVX7QP0OW5AX8C6NzxMweJnwPBsCfWsV77ivIpC5L6sPbZfMLoW85UoMHQ==", - "dependencies": { - "@types/mdast": "^3.0.0", - "github-slugger": "^1.0.0", - "hosted-git-info": "^5.0.0", - "mdast-util-to-string": "^3.0.0", + "version": "13.0.1", + "resolved": "https://registry.npmmirror.com/remark-validate-links/-/remark-validate-links-13.0.1.tgz", + "integrity": "sha512-GWDZWJAQU0+Fsm1GCLNeJoVcE9L3XTVrWCgQZOYREfXqRFIYaSoIBbARZizLm/vBESq+a3GwEBnIflSCNw26tw==", + "dependencies": { + "@types/hosted-git-info": "^3.0.0", + "@types/mdast": "^4.0.0", + "github-slugger": "^2.0.0", + "hosted-git-info": "^7.0.0", + "mdast-util-to-hast": "^13.0.0", + "mdast-util-to-string": "^4.0.0", "propose": "0.0.5", - "to-vfile": "^7.0.0", "trough": "^2.0.0", - "unified": "^10.0.0", - "unified-engine": "^10.0.1", - "unist-util-visit": "^4.0.0", - "vfile": "^5.0.0" + "unified-engine": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, "node_modules/remark-validate-links-heading-id": { @@ -4366,6 +5043,390 @@ "unist-util-visit": "^4.1.1" } }, + "node_modules/remark-validate-links/node_modules/@npmcli/config": { + "version": "8.3.4", + "resolved": "https://registry.npmmirror.com/@npmcli/config/-/config-8.3.4.tgz", + "integrity": "sha512-01rtHedemDNhUXdicU7s+QYz/3JyV5Naj84cvdXGH4mgCdL+agmSYaLF4LUG4vMCLzhBO8YtS0gPpH1FGvbgAw==", + "dependencies": { + "@npmcli/map-workspaces": "^3.0.2", + "@npmcli/package-json": "^5.1.1", + "ci-info": "^4.0.0", + "ini": "^4.1.2", + "nopt": "^7.2.1", + "proc-log": "^4.2.0", + "semver": "^7.3.5", + "walk-up-path": "^3.0.1" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/remark-validate-links/node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmmirror.com/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/remark-validate-links/node_modules/@types/node": { + "version": "20.14.11", + "resolved": "https://registry.npmmirror.com/@types/node/-/node-20.14.11.tgz", + "integrity": "sha512-kprQpL8MMeszbz6ojB5/tU8PLN4kesnN8Gjzw349rDlNgsSzg90lAVj3llK99Dh7JON+t9AuscPPFW6mPbTnSA==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/remark-validate-links/node_modules/@types/unist": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/@types/unist/-/unist-3.0.2.tgz", + "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" + }, + "node_modules/remark-validate-links/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/remark-validate-links/node_modules/emoji-regex": { + "version": "10.3.0", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-10.3.0.tgz", + "integrity": "sha512-QpLs9D9v9kArv4lfDEgg1X/gN5XLnf/A6l9cs8SPZLRZR3ZkY9+kwIQTxm+fsSej5UMYGE8fdoaZVIBlqG0XTw==" + }, + "node_modules/remark-validate-links/node_modules/github-slugger": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/github-slugger/-/github-slugger-2.0.0.tgz", + "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==" + }, + "node_modules/remark-validate-links/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmmirror.com/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/remark-validate-links/node_modules/import-meta-resolve": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/import-meta-resolve/-/import-meta-resolve-4.1.0.tgz", + "integrity": "sha512-I6fiaX09Xivtk+THaMfAwnA3MVA5Big1WHF1Dfx9hFuvNIWpXnorlkzhcQf6ehrqQiiZECRt1poOAkPmer3ruw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-validate-links/node_modules/ini": { + "version": "4.1.3", + "resolved": "https://registry.npmmirror.com/ini/-/ini-4.1.3.tgz", + "integrity": "sha512-X7rqawQBvfdjS10YU1y1YVreA3SsLrW9dX2CewP2EbBJM4ypVNLDkO5y04gejPwKIY9lR+7r9gn3rFPt/kmWFg==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/remark-validate-links/node_modules/json-parse-even-better-errors": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz", + "integrity": "sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/remark-validate-links/node_modules/lines-and-columns": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/lines-and-columns/-/lines-and-columns-2.0.4.tgz", + "integrity": "sha512-wM1+Z03eypVAVUCE7QdSqpVIvelbOakn1M0bPDoA4SGWPx3sNDVUiMo3L6To6WWGClB7VyXnhQ4Sn7gxiJbE6A==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/remark-validate-links/node_modules/load-plugin": { + "version": "6.0.3", + "resolved": "https://registry.npmmirror.com/load-plugin/-/load-plugin-6.0.3.tgz", + "integrity": "sha512-kc0X2FEUZr145odl68frm+lMJuQ23+rTXYmR6TImqPtbpmXC4vVXbWKDQ9IzndA0HfyQamWfKLhzsqGSTxE63w==", + "dependencies": { + "@npmcli/config": "^8.0.0", + "import-meta-resolve": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-validate-links/node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/remark-validate-links/node_modules/parse-json": { + "version": "7.1.1", + "resolved": "https://registry.npmmirror.com/parse-json/-/parse-json-7.1.1.tgz", + "integrity": "sha512-SgOTCX/EZXtZxBE5eJ97P4yGM5n37BwRU+YMsH4vNzFqJV/oWFXXCmwFlgWUM4PrakybVOueJJ6pwHqSVhTFDw==", + "dependencies": { + "@babel/code-frame": "^7.21.4", + "error-ex": "^1.3.2", + "json-parse-even-better-errors": "^3.0.0", + "lines-and-columns": "^2.0.3", + "type-fest": "^3.8.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/remark-validate-links/node_modules/proc-log": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/proc-log/-/proc-log-4.2.0.tgz", + "integrity": "sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/remark-validate-links/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/remark-validate-links/node_modules/string-width": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-6.1.0.tgz", + "integrity": "sha512-k01swCJAgQmuADB0YIc+7TuatfNvTBVOoaUWJjTB9R4VJzR5vNWzf5t42ESVZFPS8xTySF7CAdV4t/aaIm3UnQ==", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^10.2.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/remark-validate-links/node_modules/supports-color": { + "version": "9.4.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-9.4.0.tgz", + "integrity": "sha512-VL+lNrEoIXww1coLPOmiEmK/0sGigko5COxI09KzHc2VJXJsQ37UaQ+8quuxjDeA7+KnLGTWRyOXSLLR2Wb4jw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/remark-validate-links/node_modules/unified-engine": { + "version": "11.2.1", + "resolved": "https://registry.npmmirror.com/unified-engine/-/unified-engine-11.2.1.tgz", + "integrity": "sha512-xBAdZ8UY2X4R9Hm6X6kMne4Nz0PlpOc1oE6DPeqJnewr5Imkb8uT5Eyvy1h7xNekPL3PSWh3ZJyNrMW6jnNQBg==", + "dependencies": { + "@types/concat-stream": "^2.0.0", + "@types/debug": "^4.0.0", + "@types/is-empty": "^1.0.0", + "@types/node": "^20.0.0", + "@types/unist": "^3.0.0", + "concat-stream": "^2.0.0", + "debug": "^4.0.0", + "extend": "^3.0.0", + "glob": "^10.0.0", + "ignore": "^5.0.0", + "is-empty": "^1.0.0", + "is-plain-obj": "^4.0.0", + "load-plugin": "^6.0.0", + "parse-json": "^7.0.0", + "trough": "^2.0.0", + "unist-util-inspect": "^8.0.0", + "vfile": "^6.0.0", + "vfile-message": "^4.0.0", + "vfile-reporter": "^8.0.0", + "vfile-statistics": "^3.0.0", + "yaml": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/unist-util-inspect": { + "version": "8.1.0", + "resolved": "https://registry.npmmirror.com/unist-util-inspect/-/unist-util-inspect-8.1.0.tgz", + "integrity": "sha512-mOlg8Mp33pR0eeFpo5d2902ojqFFOKMMG2hF8bmH7ZlhnmjFgh0NI3/ZDwdaBJNbvrS7LZFVrBVtIE9KZ9s7vQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/vfile": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/vfile/-/vfile-6.0.2.tgz", + "integrity": "sha512-zND7NlS8rJYb/sPqkb13ZvbbUoExdbi4w3SfRrMq6R3FvnLQmmfpajJNITuuYm6AZ5uao9vy4BAos3EXBPf2rg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/vfile-reporter": { + "version": "8.1.1", + "resolved": "https://registry.npmmirror.com/vfile-reporter/-/vfile-reporter-8.1.1.tgz", + "integrity": "sha512-qxRZcnFSQt6pWKn3PAk81yLK2rO2i7CDXpy8v8ZquiEOMLSnPw6BMSi9Y1sUCwGGl7a9b3CJT1CKpnRF7pp66g==", + "dependencies": { + "@types/supports-color": "^8.0.0", + "string-width": "^6.0.0", + "supports-color": "^9.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile": "^6.0.0", + "vfile-message": "^4.0.0", + "vfile-sort": "^4.0.0", + "vfile-statistics": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/vfile-sort": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/vfile-sort/-/vfile-sort-4.0.0.tgz", + "integrity": "sha512-lffPI1JrbHDTToJwcq0rl6rBmkjQmMuXkAxsZPRS9DXbaJQvc642eCg6EGxcX2i1L+esbuhq+2l9tBll5v8AeQ==", + "dependencies": { + "vfile": "^6.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/vfile-statistics": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/vfile-statistics/-/vfile-statistics-3.0.0.tgz", + "integrity": "sha512-/qlwqwWBWFOmpXujL/20P+Iuydil0rZZNglR+VNm6J0gpLHwuVM5s7g2TfVoswbXjZ4HuIhLMySEyIw5i7/D8w==", + "dependencies": { + "vfile": "^6.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-validate-links/node_modules/walk-up-path": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/walk-up-path/-/walk-up-path-3.0.1.tgz", + "integrity": "sha512-9YlCL/ynK3CTlrSRrDxZvUauLzAswPCrsaCgilqFevUYpeEW0/3ScEjaa3kbW/T0ghhkEr7mv+fpjqn1Y1YuTA==" + }, "node_modules/resolve": { "version": "1.22.1", "resolved": "https://registry.npmmirror.com/resolve/-/resolve-1.22.1.tgz", @@ -4392,6 +5453,14 @@ "resolved": "https://registry.npmmirror.com/resolve-pathname/-/resolve-pathname-3.0.0.tgz", "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" }, + "node_modules/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmmirror.com/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==", + "engines": { + "node": ">= 4" + } + }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmmirror.com/reusify/-/reusify-1.0.4.tgz", @@ -4466,6 +5535,25 @@ "randombytes": "^2.1.0" } }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, "node_modules/shelljs": { "version": "0.8.5", "resolved": "https://registry.npmmirror.com/shelljs/-/shelljs-0.8.5.tgz", @@ -4482,6 +5570,17 @@ "node": ">=4" } }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/slash": { "version": "4.0.0", "resolved": "https://registry.npmmirror.com/slash/-/slash-4.0.0.tgz", @@ -4507,6 +5606,34 @@ "source-map": "^0.6.0" } }, + "node_modules/spdx-correct": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/spdx-correct/-/spdx-correct-3.2.0.tgz", + "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-exceptions": { + "version": "2.5.0", + "resolved": "https://registry.npmmirror.com/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", + "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==" + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.18", + "resolved": "https://registry.npmmirror.com/spdx-license-ids/-/spdx-license-ids-3.0.18.tgz", + "integrity": "sha512-xxRs31BqRYHwiMzudOrpSiHtZ8i/GeionCBDSilhYRj+9gIcI8wCZTlXZKu9vZIVqViP3dcp9qE5G6AlIaD+TQ==" + }, "node_modules/sprintf-js": { "version": "1.0.3", "resolved": "https://registry.npmmirror.com/sprintf-js/-/sprintf-js-1.0.3.tgz", @@ -4539,6 +5666,44 @@ "node": ">=12" } }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-ansi": { "version": "7.0.1", "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-7.0.1.tgz", @@ -4550,6 +5715,26 @@ "node": ">=12" } }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, "node_modules/strip-bom-string": { "version": "1.0.0", "resolved": "https://registry.npmmirror.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz", @@ -4702,6 +5887,15 @@ "vfile": "^5.1.0" } }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/trough": { "version": "2.1.0", "resolved": "https://registry.npmmirror.com/trough/-/trough-2.1.0.tgz", @@ -4712,6 +5906,17 @@ "resolved": "https://registry.npmmirror.com/tslib/-/tslib-2.4.1.tgz", "integrity": "sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA==" }, + "node_modules/type-fest": { + "version": "3.13.1", + "resolved": "https://registry.npmmirror.com/type-fest/-/type-fest-3.13.1.tgz", + "integrity": "sha512-tLq3bSNx+xSpwvAJnzrK0Ep5CLNWjvFTOp71URMaAEWBfRb9nnJiBoUe0tF8bI4ZFO3omgBR6NvnbzVUT3Ly4g==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/typedarray": { "version": "0.0.6", "resolved": "https://registry.npmmirror.com/typedarray/-/typedarray-0.0.6.tgz", @@ -4722,6 +5927,11 @@ "resolved": "https://registry.npmmirror.com/uc.micro/-/uc.micro-1.0.6.tgz", "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmmirror.com/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, "node_modules/unicode-canonical-property-names-ecmascript": { "version": "2.0.0", "resolved": "https://registry.npmmirror.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", @@ -4902,6 +6112,23 @@ "resolved": "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-5.1.1.tgz", "integrity": "sha512-F5CZ68eYzuSvJjGhCLPL3cYx45IxkqXSetCcRgUXtbcm50X2L9oOWQlfUfDdAf+6Pd27YDblBfdtmsThXmwpbQ==" }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position/node_modules/@types/unist": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/@types/unist/-/unist-3.0.2.tgz", + "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" + }, "node_modules/unist-util-stringify-position": { "version": "3.0.2", "resolved": "https://registry.npmmirror.com/unist-util-stringify-position/-/unist-util-stringify-position-3.0.2.tgz", @@ -5004,6 +6231,23 @@ "node": ">=8" } }, + "node_modules/validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/validate-npm-package-name": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/validate-npm-package-name/-/validate-npm-package-name-5.0.1.tgz", + "integrity": "sha512-OljLrQ9SQdOUqTaQxqL5dEfZWrXExyyWsozYlAWFawPVNuD83igl7uJD2RTkNMbniIYgt8l81eCJGIdQF7avLQ==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, "node_modules/vfile": { "version": "5.3.6", "resolved": "https://registry.npmmirror.com/vfile/-/vfile-5.3.6.tgz", @@ -5128,6 +6372,101 @@ "node": ">=10.13.0" } }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmmirror.com/wrappy/-/wrappy-1.0.2.tgz", @@ -5163,11 +6502,12 @@ } }, "@babel/code-frame": { - "version": "7.18.6", - "resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.18.6.tgz", - "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", + "version": "7.24.7", + "resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", "requires": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" } }, "@babel/compat-data": { @@ -5410,9 +6750,9 @@ "integrity": "sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw==" }, "@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==" + "version": "7.24.7", + "resolved": "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==" }, "@babel/helper-validator-option": { "version": "7.18.6", @@ -5441,13 +6781,14 @@ } }, "@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmmirror.com/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.24.7", + "resolved": "https://registry.npmmirror.com/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", "requires": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" + "@babel/helper-validator-identifier": "^7.24.7", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" }, "dependencies": { "ansi-styles": { @@ -6349,6 +7690,19 @@ } } }, + "@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmmirror.com/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "requires": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + } + }, "@jridgewell/gen-mapping": { "version": "0.1.1", "resolved": "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz", @@ -6458,14 +7812,60 @@ } } }, + "@npmcli/git": { + "version": "5.0.8", + "resolved": "https://registry.npmmirror.com/@npmcli/git/-/git-5.0.8.tgz", + "integrity": "sha512-liASfw5cqhjNW9UFd+ruwwdEf/lbOAQjLL2XY2dFW/bkJheXDYZgOyul/4gVvEV4BWkTXjYGmDqMw9uegdbJNQ==", + "requires": { + "@npmcli/promise-spawn": "^7.0.0", + "ini": "^4.1.3", + "lru-cache": "^10.0.1", + "npm-pick-manifest": "^9.0.0", + "proc-log": "^4.0.0", + "promise-inflight": "^1.0.1", + "promise-retry": "^2.0.1", + "semver": "^7.3.5", + "which": "^4.0.0" + }, + "dependencies": { + "ini": { + "version": "4.1.3", + "resolved": "https://registry.npmmirror.com/ini/-/ini-4.1.3.tgz", + "integrity": "sha512-X7rqawQBvfdjS10YU1y1YVreA3SsLrW9dX2CewP2EbBJM4ypVNLDkO5y04gejPwKIY9lR+7r9gn3rFPt/kmWFg==" + }, + "isexe": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/isexe/-/isexe-3.1.1.tgz", + "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==" + }, + "proc-log": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/proc-log/-/proc-log-4.2.0.tgz", + "integrity": "sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA==" + }, + "semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==" + }, + "which": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/which/-/which-4.0.0.tgz", + "integrity": "sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==", + "requires": { + "isexe": "^3.1.1" + } + } + } + }, "@npmcli/map-workspaces": { - "version": "3.0.1", - "resolved": "https://registry.npmmirror.com/@npmcli/map-workspaces/-/map-workspaces-3.0.1.tgz", - "integrity": "sha512-QXwE2p5zRTP6X8Irgf/swYwwdQEalSA1GBm0IGE/86R5EJbUGgKMOP0kOjaJWJxaWPkSqyhM8N50SPxFHTfkNg==", + "version": "3.0.6", + "resolved": "https://registry.npmmirror.com/@npmcli/map-workspaces/-/map-workspaces-3.0.6.tgz", + "integrity": "sha512-tkYs0OYnzQm6iIRdfy+LcLBjcKuQCeE5YLb8KnrIlutJfheNaPvPpgoFEyEFgbjzl5PLZ3IA/BWAwRU0eHuQDA==", "requires": { "@npmcli/name-from-folder": "^2.0.0", - "glob": "^8.0.1", - "minimatch": "^5.0.1", + "glob": "^10.2.2", + "minimatch": "^9.0.0", "read-package-json-fast": "^3.0.0" }, "dependencies": { @@ -6478,21 +7878,22 @@ } }, "glob": { - "version": "8.0.3", - "resolved": "https://registry.npmmirror.com/glob/-/glob-8.0.3.tgz", - "integrity": "sha512-ull455NHSHI/Y1FqGaaYFaLGkNMMJbavMrEGFXG/PGrg6y7sutWHUHrz6gy6WEBH6akM1M414dWKCNs+IhKdiQ==", + "version": "10.4.5", + "resolved": "https://registry.npmmirror.com/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^5.0.1", - "once": "^1.3.0" + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" } }, "minimatch": { - "version": "5.1.1", - "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-5.1.1.tgz", - "integrity": "sha512-362NP+zlprccbEt/SkxKfRMHnNY85V74mVnpUpNyr3F35covl09Kec7/sEFLt3RA4oXmewtoaanoIf67SE5Y5g==", + "version": "9.0.5", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", "requires": { "brace-expansion": "^2.0.1" } @@ -6504,6 +7905,95 @@ "resolved": "https://registry.npmmirror.com/@npmcli/name-from-folder/-/name-from-folder-2.0.0.tgz", "integrity": "sha512-pwK+BfEBZJbKdNYpHHRTNBwBoqrN/iIMO0AiGvYsp3Hoaq0WbgGSWQR6SCldZovoDpY3yje5lkFUe6gsDgJ2vg==" }, + "@npmcli/package-json": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/@npmcli/package-json/-/package-json-5.2.0.tgz", + "integrity": "sha512-qe/kiqqkW0AGtvBjL8TJKZk/eBBSpnJkUWvHdQ9jM2lKHXRYYJuyNpJPlJw3c8QjC2ow6NZYiLExhUaeJelbxQ==", + "requires": { + "@npmcli/git": "^5.0.0", + "glob": "^10.2.2", + "hosted-git-info": "^7.0.0", + "json-parse-even-better-errors": "^3.0.0", + "normalize-package-data": "^6.0.0", + "proc-log": "^4.0.0", + "semver": "^7.5.3" + }, + "dependencies": { + "brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "requires": { + "balanced-match": "^1.0.0" + } + }, + "glob": { + "version": "10.4.5", + "resolved": "https://registry.npmmirror.com/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "requires": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + } + }, + "json-parse-even-better-errors": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz", + "integrity": "sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==" + }, + "minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "requires": { + "brace-expansion": "^2.0.1" + } + }, + "proc-log": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/proc-log/-/proc-log-4.2.0.tgz", + "integrity": "sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA==" + }, + "semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==" + } + } + }, + "@npmcli/promise-spawn": { + "version": "7.0.2", + "resolved": "https://registry.npmmirror.com/@npmcli/promise-spawn/-/promise-spawn-7.0.2.tgz", + "integrity": "sha512-xhfYPXoV5Dy4UkY0D+v2KkwvnDfiA/8Mt3sWCGI/hM03NsYIH8ZaG6QzS9x7pje5vHZBZJ2v6VRFVTWACnqcmQ==", + "requires": { + "which": "^4.0.0" + }, + "dependencies": { + "isexe": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/isexe/-/isexe-3.1.1.tgz", + "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==" + }, + "which": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/which/-/which-4.0.0.tgz", + "integrity": "sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==", + "requires": { + "isexe": "^3.1.1" + } + } + } + }, + "@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmmirror.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "optional": true + }, "@svgr/babel-plugin-add-jsx-attribute": { "version": "6.5.1", "resolved": "https://registry.npmmirror.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", @@ -6675,6 +8165,19 @@ "resolved": "https://registry.npmmirror.com/@types/estree/-/estree-0.0.51.tgz", "integrity": "sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ==" }, + "@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "requires": { + "@types/unist": "*" + } + }, + "@types/hosted-git-info": { + "version": "3.0.5", + "resolved": "https://registry.npmmirror.com/@types/hosted-git-info/-/hosted-git-info-3.0.5.tgz", + "integrity": "sha512-Dmngh7U003cOHPhKGyA7LWqrnvcTyILNgNPmNCxlx7j8MIi54iBliiT8XqVLIQ3GchoOjVAyBzNJVyuaJjqokg==" + }, "@types/is-empty": { "version": "1.2.1", "resolved": "https://registry.npmmirror.com/@types/is-empty/-/is-empty-1.2.1.tgz", @@ -6723,6 +8226,11 @@ "resolved": "https://registry.npmmirror.com/@types/unist/-/unist-2.0.6.tgz", "integrity": "sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==" }, + "@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" + }, "@webassemblyjs/ast": { "version": "1.11.1", "resolved": "https://registry.npmmirror.com/@webassemblyjs/ast/-/ast-1.11.1.tgz", @@ -7063,6 +8571,11 @@ "resolved": "https://registry.npmmirror.com/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz", "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==" }, + "ci-info": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/ci-info/-/ci-info-4.0.0.tgz", + "integrity": "sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg==" + }, "color-convert": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz", @@ -7129,6 +8642,16 @@ } } }, + "cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "requires": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + } + }, "css-select": { "version": "4.3.0", "resolved": "https://registry.npmmirror.com/css-select/-/css-select-4.3.0.tgz", @@ -7189,6 +8712,14 @@ "resolved": "https://registry.npmmirror.com/dequal/-/dequal-2.0.3.tgz", "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==" }, + "devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "requires": { + "dequal": "^2.0.0" + } + }, "diff": { "version": "5.1.0", "resolved": "https://registry.npmmirror.com/diff/-/diff-5.1.0.tgz", @@ -7276,6 +8807,11 @@ "resolved": "https://registry.npmmirror.com/entities/-/entities-3.0.1.tgz", "integrity": "sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q==" }, + "err-code": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/err-code/-/err-code-2.0.3.tgz", + "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==" + }, "error-ex": { "version": "1.3.2", "resolved": "https://registry.npmmirror.com/error-ex/-/error-ex-1.3.2.tgz", @@ -7411,6 +8947,15 @@ "to-regex-range": "^5.0.1" } }, + "foreground-child": { + "version": "3.2.1", + "resolved": "https://registry.npmmirror.com/foreground-child/-/foreground-child-3.2.1.tgz", + "integrity": "sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA==", + "requires": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + } + }, "format": { "version": "0.2.2", "resolved": "https://registry.npmmirror.com/format/-/format-0.2.2.tgz", @@ -7544,11 +9089,11 @@ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" }, "hosted-git-info": { - "version": "5.2.1", - "resolved": "https://registry.npmmirror.com/hosted-git-info/-/hosted-git-info-5.2.1.tgz", - "integrity": "sha512-xIcQYMnhcx2Nr4JTjsFmwwnr9vldugPy9uVm0o87bjqqWMv9GaqsTeT+i99wTl0mk1uLxJtHxLb8kymqTENQsw==", + "version": "7.0.2", + "resolved": "https://registry.npmmirror.com/hosted-git-info/-/hosted-git-info-7.0.2.tgz", + "integrity": "sha512-puUZAUKT5m8Zzvs72XWy3HtvVbTWljRE66cP60bxJzAqf2DgICo7lYTY2IHUmLnNpjYvw5bvmoHvPc0QO2a62w==", "requires": { - "lru-cache": "^7.5.1" + "lru-cache": "^10.0.1" } }, "ignore": { @@ -7635,6 +9180,11 @@ "resolved": "https://registry.npmmirror.com/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==" }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" + }, "is-glob": { "version": "4.0.3", "resolved": "https://registry.npmmirror.com/is-glob/-/is-glob-4.0.3.tgz", @@ -7653,6 +9203,20 @@ "resolved": "https://registry.npmmirror.com/is-plain-obj/-/is-plain-obj-4.1.0.tgz", "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==" }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmmirror.com/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "requires": { + "@isaacs/cliui": "^8.0.2", + "@pkgjs/parseargs": "^0.11.0" + } + }, "jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmmirror.com/jest-worker/-/jest-worker-27.5.1.tgz", @@ -7783,9 +9347,9 @@ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==" }, "lru-cache": { - "version": "7.14.1", - "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-7.14.1.tgz", - "integrity": "sha512-ysxwsnTKdAx96aTRdhDOCQfDgbHnt8SK0KY8SEjO0wHinhWOFTESbjVCMPbU1uGXg/ch4lifqx0wfjOawU2+WA==" + "version": "10.4.3", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==" }, "markdown-it": { "version": "13.0.1", @@ -7866,6 +9430,125 @@ "uvu": "^0.5.0" } }, + "mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmmirror.com/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "requires": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "dependencies": { + "@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmmirror.com/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "requires": { + "@types/unist": "*" + } + }, + "@types/unist": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/@types/unist/-/unist-3.0.2.tgz", + "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" + }, + "micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "requires": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "micromark-util-encode": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz", + "integrity": "sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA==" + }, + "micromark-util-sanitize-uri": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz", + "integrity": "sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw==", + "requires": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==" + }, + "micromark-util-types": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/micromark-util-types/-/micromark-util-types-2.0.0.tgz", + "integrity": "sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w==" + }, + "unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "requires": { + "@types/unist": "^3.0.0" + } + }, + "unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "requires": { + "@types/unist": "^3.0.0" + } + }, + "unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "requires": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + } + }, + "unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "requires": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + } + }, + "vfile": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/vfile/-/vfile-6.0.2.tgz", + "integrity": "sha512-zND7NlS8rJYb/sPqkb13ZvbbUoExdbi4w3SfRrMq6R3FvnLQmmfpajJNITuuYm6AZ5uao9vy4BAos3EXBPf2rg==", + "requires": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + } + }, + "vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "requires": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + } + } + } + }, "mdast-util-to-markdown": { "version": "1.4.0", "resolved": "https://registry.npmmirror.com/mdast-util-to-markdown/-/mdast-util-to-markdown-1.4.0.tgz", @@ -8152,6 +9835,11 @@ "resolved": "https://registry.npmmirror.com/minimist/-/minimist-1.2.7.tgz", "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==" }, + "minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmmirror.com/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==" + }, "mri": { "version": "1.2.0", "resolved": "https://registry.npmmirror.com/mri/-/mri-1.2.0.tgz", @@ -8173,23 +9861,96 @@ "integrity": "sha512-EJ3rzxL9pTWPjk5arA0s0dgXpnyiAbJDE6wHT62g7VsgrgQgmmZ+Ru++M1BFofncWja+Pnn3rEr3fieRySAdKQ==" }, "nopt": { - "version": "7.0.0", - "resolved": "https://registry.npmmirror.com/nopt/-/nopt-7.0.0.tgz", - "integrity": "sha512-e6Qw1rcrGoSxEH0hQ4GBSdUjkMOtXGhGFXdNT/3ZR0S37eR9DMj5za3dEDWE6o1T3/DP8ZOsPP4MIiky0c3QeA==", + "version": "7.2.1", + "resolved": "https://registry.npmmirror.com/nopt/-/nopt-7.2.1.tgz", + "integrity": "sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==", "requires": { "abbrev": "^2.0.0" } }, + "normalize-package-data": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/normalize-package-data/-/normalize-package-data-6.0.2.tgz", + "integrity": "sha512-V6gygoYb/5EmNI+MEGrWkC+e6+Rr7mTmfHrxDbLzxQogBkgzo76rkok0Am6thgSF7Mv2nLOajAJj5vDJZEFn7g==", + "requires": { + "hosted-git-info": "^7.0.0", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" + }, + "dependencies": { + "semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==" + } + } + }, "normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmmirror.com/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==" }, + "npm-install-checks": { + "version": "6.3.0", + "resolved": "https://registry.npmmirror.com/npm-install-checks/-/npm-install-checks-6.3.0.tgz", + "integrity": "sha512-W29RiK/xtpCGqn6f3ixfRYGk+zRyr+Ew9F2E20BfXxT5/euLdA/Nm7fO7OeTGuAmTs30cpgInyJ0cYe708YTZw==", + "requires": { + "semver": "^7.1.1" + }, + "dependencies": { + "semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==" + } + } + }, "npm-normalize-package-bin": { "version": "3.0.0", "resolved": "https://registry.npmmirror.com/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.0.tgz", "integrity": "sha512-g+DPQSkusnk7HYXr75NtzkIP4+N81i3RPsGFidF3DzHd9MT9wWngmqoeg/fnHFz5MNdtG4w03s+QnhewSLTT2Q==" }, + "npm-package-arg": { + "version": "11.0.2", + "resolved": "https://registry.npmmirror.com/npm-package-arg/-/npm-package-arg-11.0.2.tgz", + "integrity": "sha512-IGN0IAwmhDJwy13Wc8k+4PEbTPhpJnMtfR53ZbOyjkvmEcLS4nCwp6mvMWjS5sUjeiW3mpx6cHmuhKEu9XmcQw==", + "requires": { + "hosted-git-info": "^7.0.0", + "proc-log": "^4.0.0", + "semver": "^7.3.5", + "validate-npm-package-name": "^5.0.0" + }, + "dependencies": { + "proc-log": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/proc-log/-/proc-log-4.2.0.tgz", + "integrity": "sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA==" + }, + "semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==" + } + } + }, + "npm-pick-manifest": { + "version": "9.1.0", + "resolved": "https://registry.npmmirror.com/npm-pick-manifest/-/npm-pick-manifest-9.1.0.tgz", + "integrity": "sha512-nkc+3pIIhqHVQr085X9d2JzPzLyjzQS96zbruppqC9aZRm/x8xx6xhI98gHtsfELP2bE+loHq8ZaHFHhe+NauA==", + "requires": { + "npm-install-checks": "^6.0.0", + "npm-normalize-package-bin": "^3.0.0", + "npm-package-arg": "^11.0.0", + "semver": "^7.3.5" + }, + "dependencies": { + "semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==" + } + } + }, "nth-check": { "version": "2.1.1", "resolved": "https://registry.npmmirror.com/nth-check/-/nth-check-2.1.1.tgz", @@ -8206,6 +9967,11 @@ "wrappy": "1" } }, + "package-json-from-dist": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz", + "integrity": "sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw==" + }, "parent-module": { "version": "1.0.1", "resolved": "https://registry.npmmirror.com/parent-module/-/parent-module-1.0.1.tgz", @@ -8230,11 +9996,25 @@ "resolved": "https://registry.npmmirror.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==" }, + "path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==" + }, "path-parse": { "version": "1.0.7", "resolved": "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, + "path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmmirror.com/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "requires": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + } + }, "path-type": { "version": "4.0.0", "resolved": "https://registry.npmmirror.com/path-type/-/path-type-4.0.0.tgz", @@ -8255,6 +10035,20 @@ "resolved": "https://registry.npmmirror.com/proc-log/-/proc-log-3.0.0.tgz", "integrity": "sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A==" }, + "promise-inflight": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/promise-inflight/-/promise-inflight-1.0.1.tgz", + "integrity": "sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==" + }, + "promise-retry": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/promise-retry/-/promise-retry-2.0.1.tgz", + "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", + "requires": { + "err-code": "^2.0.2", + "retry": "^0.12.0" + } + }, "propose": { "version": "0.0.5", "resolved": "https://registry.npmmirror.com/propose/-/propose-0.0.5.tgz", @@ -8423,21 +10217,300 @@ } }, "remark-validate-links": { - "version": "12.1.0", - "resolved": "https://registry.npmmirror.com/remark-validate-links/-/remark-validate-links-12.1.0.tgz", - "integrity": "sha512-+QhcQmu4WhUhxSduRbSInrFAMAFyNVX7QP0OW5AX8C6NzxMweJnwPBsCfWsV77ivIpC5L6sPbZfMLoW85UoMHQ==", - "requires": { - "@types/mdast": "^3.0.0", - "github-slugger": "^1.0.0", - "hosted-git-info": "^5.0.0", - "mdast-util-to-string": "^3.0.0", + "version": "13.0.1", + "resolved": "https://registry.npmmirror.com/remark-validate-links/-/remark-validate-links-13.0.1.tgz", + "integrity": "sha512-GWDZWJAQU0+Fsm1GCLNeJoVcE9L3XTVrWCgQZOYREfXqRFIYaSoIBbARZizLm/vBESq+a3GwEBnIflSCNw26tw==", + "requires": { + "@types/hosted-git-info": "^3.0.0", + "@types/mdast": "^4.0.0", + "github-slugger": "^2.0.0", + "hosted-git-info": "^7.0.0", + "mdast-util-to-hast": "^13.0.0", + "mdast-util-to-string": "^4.0.0", "propose": "0.0.5", - "to-vfile": "^7.0.0", "trough": "^2.0.0", - "unified": "^10.0.0", - "unified-engine": "^10.0.1", - "unist-util-visit": "^4.0.0", - "vfile": "^5.0.0" + "unified-engine": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "dependencies": { + "@npmcli/config": { + "version": "8.3.4", + "resolved": "https://registry.npmmirror.com/@npmcli/config/-/config-8.3.4.tgz", + "integrity": "sha512-01rtHedemDNhUXdicU7s+QYz/3JyV5Naj84cvdXGH4mgCdL+agmSYaLF4LUG4vMCLzhBO8YtS0gPpH1FGvbgAw==", + "requires": { + "@npmcli/map-workspaces": "^3.0.2", + "@npmcli/package-json": "^5.1.1", + "ci-info": "^4.0.0", + "ini": "^4.1.2", + "nopt": "^7.2.1", + "proc-log": "^4.2.0", + "semver": "^7.3.5", + "walk-up-path": "^3.0.1" + } + }, + "@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmmirror.com/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "requires": { + "@types/unist": "*" + } + }, + "@types/node": { + "version": "20.14.11", + "resolved": "https://registry.npmmirror.com/@types/node/-/node-20.14.11.tgz", + "integrity": "sha512-kprQpL8MMeszbz6ojB5/tU8PLN4kesnN8Gjzw349rDlNgsSzg90lAVj3llK99Dh7JON+t9AuscPPFW6mPbTnSA==", + "requires": { + "undici-types": "~5.26.4" + } + }, + "@types/unist": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/@types/unist/-/unist-3.0.2.tgz", + "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" + }, + "brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "requires": { + "balanced-match": "^1.0.0" + } + }, + "emoji-regex": { + "version": "10.3.0", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-10.3.0.tgz", + "integrity": "sha512-QpLs9D9v9kArv4lfDEgg1X/gN5XLnf/A6l9cs8SPZLRZR3ZkY9+kwIQTxm+fsSej5UMYGE8fdoaZVIBlqG0XTw==" + }, + "github-slugger": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/github-slugger/-/github-slugger-2.0.0.tgz", + "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==" + }, + "glob": { + "version": "10.4.5", + "resolved": "https://registry.npmmirror.com/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "requires": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + } + }, + "import-meta-resolve": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/import-meta-resolve/-/import-meta-resolve-4.1.0.tgz", + "integrity": "sha512-I6fiaX09Xivtk+THaMfAwnA3MVA5Big1WHF1Dfx9hFuvNIWpXnorlkzhcQf6ehrqQiiZECRt1poOAkPmer3ruw==" + }, + "ini": { + "version": "4.1.3", + "resolved": "https://registry.npmmirror.com/ini/-/ini-4.1.3.tgz", + "integrity": "sha512-X7rqawQBvfdjS10YU1y1YVreA3SsLrW9dX2CewP2EbBJM4ypVNLDkO5y04gejPwKIY9lR+7r9gn3rFPt/kmWFg==" + }, + "json-parse-even-better-errors": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz", + "integrity": "sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==" + }, + "lines-and-columns": { + "version": "2.0.4", + "resolved": "https://registry.npmmirror.com/lines-and-columns/-/lines-and-columns-2.0.4.tgz", + "integrity": "sha512-wM1+Z03eypVAVUCE7QdSqpVIvelbOakn1M0bPDoA4SGWPx3sNDVUiMo3L6To6WWGClB7VyXnhQ4Sn7gxiJbE6A==" + }, + "load-plugin": { + "version": "6.0.3", + "resolved": "https://registry.npmmirror.com/load-plugin/-/load-plugin-6.0.3.tgz", + "integrity": "sha512-kc0X2FEUZr145odl68frm+lMJuQ23+rTXYmR6TImqPtbpmXC4vVXbWKDQ9IzndA0HfyQamWfKLhzsqGSTxE63w==", + "requires": { + "@npmcli/config": "^8.0.0", + "import-meta-resolve": "^4.0.0" + } + }, + "mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "requires": { + "@types/mdast": "^4.0.0" + } + }, + "minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "requires": { + "brace-expansion": "^2.0.1" + } + }, + "parse-json": { + "version": "7.1.1", + "resolved": "https://registry.npmmirror.com/parse-json/-/parse-json-7.1.1.tgz", + "integrity": "sha512-SgOTCX/EZXtZxBE5eJ97P4yGM5n37BwRU+YMsH4vNzFqJV/oWFXXCmwFlgWUM4PrakybVOueJJ6pwHqSVhTFDw==", + "requires": { + "@babel/code-frame": "^7.21.4", + "error-ex": "^1.3.2", + "json-parse-even-better-errors": "^3.0.0", + "lines-and-columns": "^2.0.3", + "type-fest": "^3.8.0" + } + }, + "proc-log": { + "version": "4.2.0", + "resolved": "https://registry.npmmirror.com/proc-log/-/proc-log-4.2.0.tgz", + "integrity": "sha512-g8+OnU/L2v+wyiVK+D5fA34J7EH8jZ8DDlvwhRCMxmMj7UCBvxiO1mGeN+36JXIKF4zevU4kRBd8lVgG9vLelA==" + }, + "semver": { + "version": "7.6.3", + "resolved": "https://registry.npmmirror.com/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==" + }, + "string-width": { + "version": "6.1.0", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-6.1.0.tgz", + "integrity": "sha512-k01swCJAgQmuADB0YIc+7TuatfNvTBVOoaUWJjTB9R4VJzR5vNWzf5t42ESVZFPS8xTySF7CAdV4t/aaIm3UnQ==", + "requires": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^10.2.1", + "strip-ansi": "^7.0.1" + } + }, + "supports-color": { + "version": "9.4.0", + "resolved": "https://registry.npmmirror.com/supports-color/-/supports-color-9.4.0.tgz", + "integrity": "sha512-VL+lNrEoIXww1coLPOmiEmK/0sGigko5COxI09KzHc2VJXJsQ37UaQ+8quuxjDeA7+KnLGTWRyOXSLLR2Wb4jw==" + }, + "unified-engine": { + "version": "11.2.1", + "resolved": "https://registry.npmmirror.com/unified-engine/-/unified-engine-11.2.1.tgz", + "integrity": "sha512-xBAdZ8UY2X4R9Hm6X6kMne4Nz0PlpOc1oE6DPeqJnewr5Imkb8uT5Eyvy1h7xNekPL3PSWh3ZJyNrMW6jnNQBg==", + "requires": { + "@types/concat-stream": "^2.0.0", + "@types/debug": "^4.0.0", + "@types/is-empty": "^1.0.0", + "@types/node": "^20.0.0", + "@types/unist": "^3.0.0", + "concat-stream": "^2.0.0", + "debug": "^4.0.0", + "extend": "^3.0.0", + "glob": "^10.0.0", + "ignore": "^5.0.0", + "is-empty": "^1.0.0", + "is-plain-obj": "^4.0.0", + "load-plugin": "^6.0.0", + "parse-json": "^7.0.0", + "trough": "^2.0.0", + "unist-util-inspect": "^8.0.0", + "vfile": "^6.0.0", + "vfile-message": "^4.0.0", + "vfile-reporter": "^8.0.0", + "vfile-statistics": "^3.0.0", + "yaml": "^2.0.0" + } + }, + "unist-util-inspect": { + "version": "8.1.0", + "resolved": "https://registry.npmmirror.com/unist-util-inspect/-/unist-util-inspect-8.1.0.tgz", + "integrity": "sha512-mOlg8Mp33pR0eeFpo5d2902ojqFFOKMMG2hF8bmH7ZlhnmjFgh0NI3/ZDwdaBJNbvrS7LZFVrBVtIE9KZ9s7vQ==", + "requires": { + "@types/unist": "^3.0.0" + } + }, + "unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "requires": { + "@types/unist": "^3.0.0" + } + }, + "unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "requires": { + "@types/unist": "^3.0.0" + } + }, + "unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "requires": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + } + }, + "unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "requires": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + } + }, + "vfile": { + "version": "6.0.2", + "resolved": "https://registry.npmmirror.com/vfile/-/vfile-6.0.2.tgz", + "integrity": "sha512-zND7NlS8rJYb/sPqkb13ZvbbUoExdbi4w3SfRrMq6R3FvnLQmmfpajJNITuuYm6AZ5uao9vy4BAos3EXBPf2rg==", + "requires": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + } + }, + "vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "requires": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + } + }, + "vfile-reporter": { + "version": "8.1.1", + "resolved": "https://registry.npmmirror.com/vfile-reporter/-/vfile-reporter-8.1.1.tgz", + "integrity": "sha512-qxRZcnFSQt6pWKn3PAk81yLK2rO2i7CDXpy8v8ZquiEOMLSnPw6BMSi9Y1sUCwGGl7a9b3CJT1CKpnRF7pp66g==", + "requires": { + "@types/supports-color": "^8.0.0", + "string-width": "^6.0.0", + "supports-color": "^9.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile": "^6.0.0", + "vfile-message": "^4.0.0", + "vfile-sort": "^4.0.0", + "vfile-statistics": "^3.0.0" + } + }, + "vfile-sort": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/vfile-sort/-/vfile-sort-4.0.0.tgz", + "integrity": "sha512-lffPI1JrbHDTToJwcq0rl6rBmkjQmMuXkAxsZPRS9DXbaJQvc642eCg6EGxcX2i1L+esbuhq+2l9tBll5v8AeQ==", + "requires": { + "vfile": "^6.0.0", + "vfile-message": "^4.0.0" + } + }, + "vfile-statistics": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/vfile-statistics/-/vfile-statistics-3.0.0.tgz", + "integrity": "sha512-/qlwqwWBWFOmpXujL/20P+Iuydil0rZZNglR+VNm6J0gpLHwuVM5s7g2TfVoswbXjZ4HuIhLMySEyIw5i7/D8w==", + "requires": { + "vfile": "^6.0.0", + "vfile-message": "^4.0.0" + } + }, + "walk-up-path": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/walk-up-path/-/walk-up-path-3.0.1.tgz", + "integrity": "sha512-9YlCL/ynK3CTlrSRrDxZvUauLzAswPCrsaCgilqFevUYpeEW0/3ScEjaa3kbW/T0ghhkEr7mv+fpjqn1Y1YuTA==" + } } }, "remark-validate-links-heading-id": { @@ -8469,6 +10542,11 @@ "resolved": "https://registry.npmmirror.com/resolve-pathname/-/resolve-pathname-3.0.0.tgz", "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" }, + "retry": { + "version": "0.12.0", + "resolved": "https://registry.npmmirror.com/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==" + }, "reusify": { "version": "1.0.4", "resolved": "https://registry.npmmirror.com/reusify/-/reusify-1.0.4.tgz", @@ -8527,6 +10605,19 @@ "randombytes": "^2.1.0" } }, + "shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "requires": { + "shebang-regex": "^3.0.0" + } + }, + "shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==" + }, "shelljs": { "version": "0.8.5", "resolved": "https://registry.npmmirror.com/shelljs/-/shelljs-0.8.5.tgz", @@ -8537,6 +10628,11 @@ "rechoir": "^0.6.2" } }, + "signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==" + }, "slash": { "version": "4.0.0", "resolved": "https://registry.npmmirror.com/slash/-/slash-4.0.0.tgz", @@ -8556,6 +10652,34 @@ "source-map": "^0.6.0" } }, + "spdx-correct": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/spdx-correct/-/spdx-correct-3.2.0.tgz", + "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "requires": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-exceptions": { + "version": "2.5.0", + "resolved": "https://registry.npmmirror.com/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", + "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==" + }, + "spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "requires": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-license-ids": { + "version": "3.0.18", + "resolved": "https://registry.npmmirror.com/spdx-license-ids/-/spdx-license-ids-3.0.18.tgz", + "integrity": "sha512-xxRs31BqRYHwiMzudOrpSiHtZ8i/GeionCBDSilhYRj+9gIcI8wCZTlXZKu9vZIVqViP3dcp9qE5G6AlIaD+TQ==" + }, "sprintf-js": { "version": "1.0.3", "resolved": "https://registry.npmmirror.com/sprintf-js/-/sprintf-js-1.0.3.tgz", @@ -8584,6 +10708,36 @@ "strip-ansi": "^7.0.1" } }, + "string-width-cjs": { + "version": "npm:string-width@4.2.3", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "requires": { + "ansi-regex": "^5.0.1" + } + } + } + }, "strip-ansi": { "version": "7.0.1", "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-7.0.1.tgz", @@ -8592,6 +10746,21 @@ "ansi-regex": "^6.0.1" } }, + "strip-ansi-cjs": { + "version": "npm:strip-ansi@6.0.1", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "requires": { + "ansi-regex": "^5.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" + } + } + }, "strip-bom-string": { "version": "1.0.0", "resolved": "https://registry.npmmirror.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz", @@ -8696,6 +10865,11 @@ "vfile": "^5.1.0" } }, + "trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==" + }, "trough": { "version": "2.1.0", "resolved": "https://registry.npmmirror.com/trough/-/trough-2.1.0.tgz", @@ -8706,6 +10880,11 @@ "resolved": "https://registry.npmmirror.com/tslib/-/tslib-2.4.1.tgz", "integrity": "sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA==" }, + "type-fest": { + "version": "3.13.1", + "resolved": "https://registry.npmmirror.com/type-fest/-/type-fest-3.13.1.tgz", + "integrity": "sha512-tLq3bSNx+xSpwvAJnzrK0Ep5CLNWjvFTOp71URMaAEWBfRb9nnJiBoUe0tF8bI4ZFO3omgBR6NvnbzVUT3Ly4g==" + }, "typedarray": { "version": "0.0.6", "resolved": "https://registry.npmmirror.com/typedarray/-/typedarray-0.0.6.tgz", @@ -8716,6 +10895,11 @@ "resolved": "https://registry.npmmirror.com/uc.micro/-/uc.micro-1.0.6.tgz", "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" }, + "undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmmirror.com/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, "unicode-canonical-property-names-ecmascript": { "version": "2.0.0", "resolved": "https://registry.npmmirror.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", @@ -8870,6 +11054,21 @@ "resolved": "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-5.1.1.tgz", "integrity": "sha512-F5CZ68eYzuSvJjGhCLPL3cYx45IxkqXSetCcRgUXtbcm50X2L9oOWQlfUfDdAf+6Pd27YDblBfdtmsThXmwpbQ==" }, + "unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "requires": { + "@types/unist": "^3.0.0" + }, + "dependencies": { + "@types/unist": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/@types/unist/-/unist-3.0.2.tgz", + "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" + } + } + }, "unist-util-stringify-position": { "version": "3.0.2", "resolved": "https://registry.npmmirror.com/unist-util-stringify-position/-/unist-util-stringify-position-3.0.2.tgz", @@ -8945,6 +11144,20 @@ "sade": "^1.7.3" } }, + "validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "requires": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "validate-npm-package-name": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/validate-npm-package-name/-/validate-npm-package-name-5.0.1.tgz", + "integrity": "sha512-OljLrQ9SQdOUqTaQxqL5dEfZWrXExyyWsozYlAWFawPVNuD83igl7uJD2RTkNMbniIYgt8l81eCJGIdQF7avLQ==" + }, "vfile": { "version": "5.3.6", "resolved": "https://registry.npmmirror.com/vfile/-/vfile-5.3.6.tgz", @@ -9051,6 +11264,71 @@ "resolved": "https://registry.npmmirror.com/webpack-sources/-/webpack-sources-3.2.3.tgz", "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==" }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "requires": { + "isexe": "^2.0.0" + } + }, + "wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "requires": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "dependencies": { + "ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==" + } + } + }, + "wrap-ansi-cjs": { + "version": "npm:wrap-ansi@7.0.0", + "resolved": "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "dependencies": { + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==" + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "requires": { + "ansi-regex": "^5.0.1" + } + } + } + }, "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmmirror.com/wrappy/-/wrappy-1.0.2.tgz", diff --git a/package.json b/package.json index 7d30a629c608..3769b162d05d 100644 --- a/package.json +++ b/package.json @@ -16,7 +16,7 @@ "markdownlint-rule-enhanced-proper-names": "^0.0.1", "markdownlint-rule-no-trailing-slash-in-links": "^0.0.1", "remark-cli": "^11.0.0", - "remark-validate-links": "^12.1.0", + "remark-validate-links": "^13.0.1", "remark-validate-links-heading-id": "^0.0.3" }, "remarkConfig": {