toggling of block comments (#4718)
This commit is contained in:
parent
f46a09ab4f
commit
26b3dc29be
11 changed files with 568 additions and 29 deletions
|
@ -12,6 +12,7 @@
|
|||
- [Match mode](#match-mode)
|
||||
- [Window mode](#window-mode)
|
||||
- [Space mode](#space-mode)
|
||||
- [Comment mode](#comment-mode)
|
||||
- [Popup](#popup)
|
||||
- [Unimpaired](#unimpaired)
|
||||
- [Insert mode](#insert-mode)
|
||||
|
@ -289,6 +290,9 @@ This layer is a kludge of mappings, mostly pickers.
|
|||
| `h` | Select symbol references (**LSP**) | `select_references_to_symbol_under_cursor` |
|
||||
| `'` | Open last fuzzy picker | `last_picker` |
|
||||
| `w` | Enter [window mode](#window-mode) | N/A |
|
||||
| `c` | Comment/uncomment selections | `toggle_comments` |
|
||||
| `C` | Block comment/uncomment selections | `toggle_block_comments` |
|
||||
| `Alt-c` | Line comment/uncomment selections | `toggle_line_comments` |
|
||||
| `p` | Paste system clipboard after selections | `paste_clipboard_after` |
|
||||
| `P` | Paste system clipboard before selections | `paste_clipboard_before` |
|
||||
| `y` | Yank selections to clipboard | `yank_to_clipboard` |
|
||||
|
|
|
@ -42,7 +42,7 @@ name = "mylang"
|
|||
scope = "source.mylang"
|
||||
injection-regex = "mylang"
|
||||
file-types = ["mylang", "myl"]
|
||||
comment-token = "#"
|
||||
comment-tokens = "#"
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
formatter = { command = "mylang-formatter" , args = ["--stdin"] }
|
||||
language-servers = [ "mylang-lsp" ]
|
||||
|
@ -61,7 +61,8 @@ These configuration keys are available:
|
|||
| `roots` | A set of marker files to look for when trying to find the workspace root. For example `Cargo.lock`, `yarn.lock` |
|
||||
| `auto-format` | Whether to autoformat this language when saving |
|
||||
| `diagnostic-severity` | Minimal severity of diagnostic for it to be displayed. (Allowed values: `Error`, `Warning`, `Info`, `Hint`) |
|
||||
| `comment-token` | The token to use as a comment-token |
|
||||
| `comment-tokens` | The tokens to use as a comment token, either a single token `"//"` or an array `["//", "///", "//!"]` (the first token will be used for commenting). Also configurable as `comment-token` for backwards compatibility|
|
||||
| `block-comment-tokens`| The start and end tokens for a multiline comment either an array or single table of `{ start = "/*", end = "*/"}`. The first set of tokens will be used for commenting, any pairs in the array can be uncommented |
|
||||
| `indent` | The indent to use. Has sub keys `unit` (the text inserted into the document when indenting; usually set to N spaces or `"\t"` for tabs) and `tab-width` (the number of spaces rendered for a tab) |
|
||||
| `language-servers` | The Language Servers used for this language. See below for more information in the section [Configuring Language Servers for a language](#configuring-language-servers-for-a-language) |
|
||||
| `grammar` | The tree-sitter grammar to use (defaults to the value of `name`) |
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
//! This module contains the functionality toggle comments on lines over the selection
|
||||
//! using the comment character defined in the user's `languages.toml`
|
||||
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use crate::{
|
||||
find_first_non_whitespace_char, Change, Rope, RopeSlice, Selection, Tendril, Transaction,
|
||||
syntax::BlockCommentToken, Change, Range, Rope, RopeSlice, Selection, Tendril, Transaction,
|
||||
};
|
||||
use helix_stdx::rope::RopeSliceExt;
|
||||
use std::borrow::Cow;
|
||||
|
||||
/// Given text, a comment token, and a set of line indices, returns the following:
|
||||
|
@ -22,12 +25,12 @@ fn find_line_comment(
|
|||
) -> (bool, Vec<usize>, usize, usize) {
|
||||
let mut commented = true;
|
||||
let mut to_change = Vec::new();
|
||||
let mut min = usize::MAX; // minimum col for find_first_non_whitespace_char
|
||||
let mut min = usize::MAX; // minimum col for first_non_whitespace_char
|
||||
let mut margin = 1;
|
||||
let token_len = token.chars().count();
|
||||
for line in lines {
|
||||
let line_slice = text.line(line);
|
||||
if let Some(pos) = find_first_non_whitespace_char(line_slice) {
|
||||
if let Some(pos) = line_slice.first_non_whitespace_char() {
|
||||
let len = line_slice.len_chars();
|
||||
|
||||
if pos < min {
|
||||
|
@ -94,6 +97,222 @@ pub fn toggle_line_comments(doc: &Rope, selection: &Selection, token: Option<&st
|
|||
Transaction::change(doc, changes.into_iter())
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum CommentChange {
|
||||
Commented {
|
||||
range: Range,
|
||||
start_pos: usize,
|
||||
end_pos: usize,
|
||||
start_margin: bool,
|
||||
end_margin: bool,
|
||||
start_token: String,
|
||||
end_token: String,
|
||||
},
|
||||
Uncommented {
|
||||
range: Range,
|
||||
start_pos: usize,
|
||||
end_pos: usize,
|
||||
start_token: String,
|
||||
end_token: String,
|
||||
},
|
||||
Whitespace {
|
||||
range: Range,
|
||||
},
|
||||
}
|
||||
|
||||
pub fn find_block_comments(
|
||||
tokens: &[BlockCommentToken],
|
||||
text: RopeSlice,
|
||||
selection: &Selection,
|
||||
) -> (bool, Vec<CommentChange>) {
|
||||
let mut commented = true;
|
||||
let mut only_whitespace = true;
|
||||
let mut comment_changes = Vec::with_capacity(selection.len());
|
||||
let default_tokens = tokens.first().cloned().unwrap_or_default();
|
||||
// TODO: check if this can be removed on MSRV bump
|
||||
#[allow(clippy::redundant_clone)]
|
||||
let mut start_token = default_tokens.start.clone();
|
||||
#[allow(clippy::redundant_clone)]
|
||||
let mut end_token = default_tokens.end.clone();
|
||||
|
||||
let mut tokens = tokens.to_vec();
|
||||
// sort the tokens by length, so longer tokens will match first
|
||||
tokens.sort_by(|a, b| {
|
||||
if a.start.len() == b.start.len() {
|
||||
b.end.len().cmp(&a.end.len())
|
||||
} else {
|
||||
b.start.len().cmp(&a.start.len())
|
||||
}
|
||||
});
|
||||
for range in selection {
|
||||
let selection_slice = range.slice(text);
|
||||
if let (Some(start_pos), Some(end_pos)) = (
|
||||
selection_slice.first_non_whitespace_char(),
|
||||
selection_slice.last_non_whitespace_char(),
|
||||
) {
|
||||
let mut line_commented = false;
|
||||
let mut after_start = 0;
|
||||
let mut before_end = 0;
|
||||
let len = (end_pos + 1) - start_pos;
|
||||
|
||||
for BlockCommentToken { start, end } in &tokens {
|
||||
let start_len = start.chars().count();
|
||||
let end_len = end.chars().count();
|
||||
after_start = start_pos + start_len;
|
||||
before_end = end_pos.saturating_sub(end_len);
|
||||
|
||||
if len >= start_len + end_len {
|
||||
let start_fragment = selection_slice.slice(start_pos..after_start);
|
||||
let end_fragment = selection_slice.slice(before_end + 1..end_pos + 1);
|
||||
|
||||
// block commented with these tokens
|
||||
if start_fragment == start.as_str() && end_fragment == end.as_str() {
|
||||
start_token = start.to_string();
|
||||
end_token = end.to_string();
|
||||
line_commented = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !line_commented {
|
||||
comment_changes.push(CommentChange::Uncommented {
|
||||
range: *range,
|
||||
start_pos,
|
||||
end_pos,
|
||||
start_token: default_tokens.start.clone(),
|
||||
end_token: default_tokens.end.clone(),
|
||||
});
|
||||
commented = false;
|
||||
} else {
|
||||
comment_changes.push(CommentChange::Commented {
|
||||
range: *range,
|
||||
start_pos,
|
||||
end_pos,
|
||||
start_margin: selection_slice
|
||||
.get_char(after_start)
|
||||
.map_or(false, |c| c == ' '),
|
||||
end_margin: after_start != before_end
|
||||
&& selection_slice
|
||||
.get_char(before_end)
|
||||
.map_or(false, |c| c == ' '),
|
||||
start_token: start_token.to_string(),
|
||||
end_token: end_token.to_string(),
|
||||
});
|
||||
}
|
||||
only_whitespace = false;
|
||||
} else {
|
||||
comment_changes.push(CommentChange::Whitespace { range: *range });
|
||||
}
|
||||
}
|
||||
if only_whitespace {
|
||||
commented = false;
|
||||
}
|
||||
(commented, comment_changes)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn create_block_comment_transaction(
|
||||
doc: &Rope,
|
||||
selection: &Selection,
|
||||
commented: bool,
|
||||
comment_changes: Vec<CommentChange>,
|
||||
) -> (Transaction, SmallVec<[Range; 1]>) {
|
||||
let mut changes: Vec<Change> = Vec::with_capacity(selection.len() * 2);
|
||||
let mut ranges: SmallVec<[Range; 1]> = SmallVec::with_capacity(selection.len());
|
||||
let mut offs = 0;
|
||||
for change in comment_changes {
|
||||
if commented {
|
||||
if let CommentChange::Commented {
|
||||
range,
|
||||
start_pos,
|
||||
end_pos,
|
||||
start_token,
|
||||
end_token,
|
||||
start_margin,
|
||||
end_margin,
|
||||
} = change
|
||||
{
|
||||
let from = range.from();
|
||||
changes.push((
|
||||
from + start_pos,
|
||||
from + start_pos + start_token.len() + start_margin as usize,
|
||||
None,
|
||||
));
|
||||
changes.push((
|
||||
from + end_pos - end_token.len() - end_margin as usize + 1,
|
||||
from + end_pos + 1,
|
||||
None,
|
||||
));
|
||||
}
|
||||
} else {
|
||||
// uncommented so manually map ranges through changes
|
||||
match change {
|
||||
CommentChange::Uncommented {
|
||||
range,
|
||||
start_pos,
|
||||
end_pos,
|
||||
start_token,
|
||||
end_token,
|
||||
} => {
|
||||
let from = range.from();
|
||||
changes.push((
|
||||
from + start_pos,
|
||||
from + start_pos,
|
||||
Some(Tendril::from(format!("{} ", start_token))),
|
||||
));
|
||||
changes.push((
|
||||
from + end_pos + 1,
|
||||
from + end_pos + 1,
|
||||
Some(Tendril::from(format!(" {}", end_token))),
|
||||
));
|
||||
|
||||
let offset = start_token.chars().count() + end_token.chars().count() + 2;
|
||||
ranges.push(
|
||||
Range::new(from + offs, from + offs + end_pos + 1 + offset)
|
||||
.with_direction(range.direction()),
|
||||
);
|
||||
offs += offset;
|
||||
}
|
||||
CommentChange::Commented { range, .. } | CommentChange::Whitespace { range } => {
|
||||
ranges.push(Range::new(range.from() + offs, range.to() + offs));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
(Transaction::change(doc, changes.into_iter()), ranges)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn toggle_block_comments(
|
||||
doc: &Rope,
|
||||
selection: &Selection,
|
||||
tokens: &[BlockCommentToken],
|
||||
) -> Transaction {
|
||||
let text = doc.slice(..);
|
||||
let (commented, comment_changes) = find_block_comments(tokens, text, selection);
|
||||
let (mut transaction, ranges) =
|
||||
create_block_comment_transaction(doc, selection, commented, comment_changes);
|
||||
if !commented {
|
||||
transaction = transaction.with_selection(Selection::new(ranges, selection.primary_index()));
|
||||
}
|
||||
transaction
|
||||
}
|
||||
|
||||
pub fn split_lines_of_selection(text: RopeSlice, selection: &Selection) -> Selection {
|
||||
let mut ranges = SmallVec::new();
|
||||
for range in selection.ranges() {
|
||||
let (line_start, line_end) = range.line_range(text.slice(..));
|
||||
let mut pos = text.line_to_char(line_start);
|
||||
for line in text.slice(pos..text.line_to_char(line_end + 1)).lines() {
|
||||
let start = pos;
|
||||
pos += line.len_chars();
|
||||
ranges.push(Range::new(start, pos));
|
||||
}
|
||||
}
|
||||
Selection::new(ranges, 0)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
@ -149,4 +368,49 @@ mod test {
|
|||
|
||||
// TODO: account for uncommenting with uneven comment indentation
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_block_comments() {
|
||||
// three lines 5 characters.
|
||||
let mut doc = Rope::from("1\n2\n3");
|
||||
// select whole document
|
||||
let selection = Selection::single(0, doc.len_chars());
|
||||
|
||||
let text = doc.slice(..);
|
||||
|
||||
let res = find_block_comments(&[BlockCommentToken::default()], text, &selection);
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
(
|
||||
false,
|
||||
vec![CommentChange::Uncommented {
|
||||
range: Range::new(0, 5),
|
||||
start_pos: 0,
|
||||
end_pos: 4,
|
||||
start_token: "/*".to_string(),
|
||||
end_token: "*/".to_string(),
|
||||
}]
|
||||
)
|
||||
);
|
||||
|
||||
// comment
|
||||
let transaction = toggle_block_comments(&doc, &selection, &[BlockCommentToken::default()]);
|
||||
transaction.apply(&mut doc);
|
||||
|
||||
assert_eq!(doc, "/* 1\n2\n3 */");
|
||||
|
||||
// uncomment
|
||||
let selection = Selection::single(0, doc.len_chars());
|
||||
let transaction = toggle_block_comments(&doc, &selection, &[BlockCommentToken::default()]);
|
||||
transaction.apply(&mut doc);
|
||||
assert_eq!(doc, "1\n2\n3");
|
||||
|
||||
// don't panic when there is just a space in comment
|
||||
doc = Rope::from("/* */");
|
||||
let selection = Selection::single(0, doc.len_chars());
|
||||
let transaction = toggle_block_comments(&doc, &selection, &[BlockCommentToken::default()]);
|
||||
transaction.apply(&mut doc);
|
||||
assert_eq!(doc, "");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
use std::{borrow::Cow, collections::HashMap};
|
||||
|
||||
use helix_stdx::rope::RopeSliceExt;
|
||||
use tree_sitter::{Query, QueryCursor, QueryPredicateArg};
|
||||
|
||||
use crate::{
|
||||
chars::{char_is_line_ending, char_is_whitespace},
|
||||
find_first_non_whitespace_char,
|
||||
graphemes::{grapheme_width, tab_width_at},
|
||||
syntax::{IndentationHeuristic, LanguageConfiguration, RopeProvider, Syntax},
|
||||
tree_sitter::Node,
|
||||
|
@ -970,7 +970,7 @@ pub fn indent_for_newline(
|
|||
let mut num_attempts = 0;
|
||||
for line_idx in (0..=line_before).rev() {
|
||||
let line = text.line(line_idx);
|
||||
let first_non_whitespace_char = match find_first_non_whitespace_char(line) {
|
||||
let first_non_whitespace_char = match line.first_non_whitespace_char() {
|
||||
Some(i) => i,
|
||||
None => {
|
||||
continue;
|
||||
|
|
|
@ -37,9 +37,6 @@ pub mod unicode {
|
|||
|
||||
pub use helix_loader::find_workspace;
|
||||
|
||||
pub fn find_first_non_whitespace_char(line: RopeSlice) -> Option<usize> {
|
||||
line.chars().position(|ch| !ch.is_whitespace())
|
||||
}
|
||||
mod rope_reader;
|
||||
|
||||
pub use rope_reader::RopeReader;
|
||||
|
|
|
@ -99,7 +99,19 @@ pub struct LanguageConfiguration {
|
|||
pub shebangs: Vec<String>, // interpreter(s) associated with language
|
||||
#[serde(default)]
|
||||
pub roots: Vec<String>, // these indicate project roots <.git, Cargo.toml>
|
||||
pub comment_token: Option<String>,
|
||||
#[serde(
|
||||
default,
|
||||
skip_serializing,
|
||||
deserialize_with = "from_comment_tokens",
|
||||
alias = "comment-token"
|
||||
)]
|
||||
pub comment_tokens: Option<Vec<String>>,
|
||||
#[serde(
|
||||
default,
|
||||
skip_serializing,
|
||||
deserialize_with = "from_block_comment_tokens"
|
||||
)]
|
||||
pub block_comment_tokens: Option<Vec<BlockCommentToken>>,
|
||||
pub text_width: Option<usize>,
|
||||
pub soft_wrap: Option<SoftWrap>,
|
||||
|
||||
|
@ -240,6 +252,59 @@ impl<'de> Deserialize<'de> for FileType {
|
|||
}
|
||||
}
|
||||
|
||||
fn from_comment_tokens<'de, D>(deserializer: D) -> Result<Option<Vec<String>>, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum CommentTokens {
|
||||
Multiple(Vec<String>),
|
||||
Single(String),
|
||||
}
|
||||
Ok(
|
||||
Option::<CommentTokens>::deserialize(deserializer)?.map(|tokens| match tokens {
|
||||
CommentTokens::Single(val) => vec![val],
|
||||
CommentTokens::Multiple(vals) => vals,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct BlockCommentToken {
|
||||
pub start: String,
|
||||
pub end: String,
|
||||
}
|
||||
|
||||
impl Default for BlockCommentToken {
|
||||
fn default() -> Self {
|
||||
BlockCommentToken {
|
||||
start: "/*".to_string(),
|
||||
end: "*/".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn from_block_comment_tokens<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<Option<Vec<BlockCommentToken>>, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum BlockCommentTokens {
|
||||
Multiple(Vec<BlockCommentToken>),
|
||||
Single(BlockCommentToken),
|
||||
}
|
||||
Ok(
|
||||
Option::<BlockCommentTokens>::deserialize(deserializer)?.map(|tokens| match tokens {
|
||||
BlockCommentTokens::Single(val) => vec![val],
|
||||
BlockCommentTokens::Multiple(vals) => vals,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum LanguageServerFeature {
|
||||
|
|
|
@ -4,6 +4,7 @@ use helix_core::{
|
|||
syntax::{Configuration, Loader},
|
||||
Syntax,
|
||||
};
|
||||
use helix_stdx::rope::RopeSliceExt;
|
||||
use ropey::Rope;
|
||||
use std::{ops::Range, path::PathBuf, process::Command, sync::Arc};
|
||||
|
||||
|
@ -211,7 +212,7 @@ fn test_treesitter_indent(
|
|||
if ignored_lines.iter().any(|range| range.contains(&(i + 1))) {
|
||||
continue;
|
||||
}
|
||||
if let Some(pos) = helix_core::find_first_non_whitespace_char(line) {
|
||||
if let Some(pos) = line.first_non_whitespace_char() {
|
||||
let tab_width: usize = 4;
|
||||
let suggested_indent = treesitter_indent_for_pos(
|
||||
indent_query,
|
||||
|
|
|
@ -14,6 +14,8 @@ pub trait RopeSliceExt<'a>: Sized {
|
|||
byte_range: R,
|
||||
) -> RegexInput<RopeyCursor<'a>>;
|
||||
fn regex_input_at<R: RangeBounds<usize>>(self, char_range: R) -> RegexInput<RopeyCursor<'a>>;
|
||||
fn first_non_whitespace_char(self) -> Option<usize>;
|
||||
fn last_non_whitespace_char(self) -> Option<usize>;
|
||||
}
|
||||
|
||||
impl<'a> RopeSliceExt<'a> for RopeSlice<'a> {
|
||||
|
@ -64,4 +66,13 @@ impl<'a> RopeSliceExt<'a> for RopeSlice<'a> {
|
|||
};
|
||||
input.range(byte_range)
|
||||
}
|
||||
fn first_non_whitespace_char(self) -> Option<usize> {
|
||||
self.chars().position(|ch| !ch.is_whitespace())
|
||||
}
|
||||
fn last_non_whitespace_char(self) -> Option<usize> {
|
||||
self.chars_at(self.len_chars())
|
||||
.reversed()
|
||||
.position(|ch| !ch.is_whitespace())
|
||||
.map(|pos| self.len_chars() - pos - 1)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ pub use typed::*;
|
|||
use helix_core::{
|
||||
char_idx_at_visual_offset, comment,
|
||||
doc_formatter::TextFormat,
|
||||
encoding, find_first_non_whitespace_char, find_workspace, graphemes,
|
||||
encoding, find_workspace, graphemes,
|
||||
history::UndoKind,
|
||||
increment, indent,
|
||||
indent::IndentStyle,
|
||||
|
@ -23,7 +23,7 @@ use helix_core::{
|
|||
regex::{self, Regex},
|
||||
search::{self, CharMatcher},
|
||||
selection, shellwords, surround,
|
||||
syntax::LanguageServerFeature,
|
||||
syntax::{BlockCommentToken, LanguageServerFeature},
|
||||
text_annotations::TextAnnotations,
|
||||
textobject,
|
||||
tree_sitter::Node,
|
||||
|
@ -415,6 +415,8 @@ impl MappableCommand {
|
|||
completion, "Invoke completion popup",
|
||||
hover, "Show docs for item under cursor",
|
||||
toggle_comments, "Comment/uncomment selections",
|
||||
toggle_line_comments, "Line comment/uncomment selections",
|
||||
toggle_block_comments, "Block comment/uncomment selections",
|
||||
rotate_selections_forward, "Rotate selections forward",
|
||||
rotate_selections_backward, "Rotate selections backward",
|
||||
rotate_selection_contents_forward, "Rotate selection contents forward",
|
||||
|
@ -822,7 +824,7 @@ fn kill_to_line_start(cx: &mut Context) {
|
|||
let head = if anchor == first_char && line != 0 {
|
||||
// select until previous line
|
||||
line_end_char_index(&text, line - 1)
|
||||
} else if let Some(pos) = find_first_non_whitespace_char(text.line(line)) {
|
||||
} else if let Some(pos) = text.line(line).first_non_whitespace_char() {
|
||||
if first_char + pos < anchor {
|
||||
// select until first non-blank in line if cursor is after it
|
||||
first_char + pos
|
||||
|
@ -884,7 +886,7 @@ fn goto_first_nonwhitespace_impl(view: &mut View, doc: &mut Document, movement:
|
|||
let selection = doc.selection(view.id).clone().transform(|range| {
|
||||
let line = range.cursor_line(text);
|
||||
|
||||
if let Some(pos) = find_first_non_whitespace_char(text.line(line)) {
|
||||
if let Some(pos) = text.line(line).first_non_whitespace_char() {
|
||||
let pos = pos + text.line_to_char(line);
|
||||
range.put_cursor(text, pos, movement == Movement::Extend)
|
||||
} else {
|
||||
|
@ -3087,11 +3089,11 @@ fn insert_with_indent(cx: &mut Context, cursor_fallback: IndentFallbackPos) {
|
|||
} else {
|
||||
// move cursor to the fallback position
|
||||
let pos = match cursor_fallback {
|
||||
IndentFallbackPos::LineStart => {
|
||||
find_first_non_whitespace_char(text.line(cursor_line))
|
||||
.map(|ws_offset| ws_offset + cursor_line_start)
|
||||
.unwrap_or(cursor_line_start)
|
||||
}
|
||||
IndentFallbackPos::LineStart => text
|
||||
.line(cursor_line)
|
||||
.first_non_whitespace_char()
|
||||
.map(|ws_offset| ws_offset + cursor_line_start)
|
||||
.unwrap_or(cursor_line_start),
|
||||
IndentFallbackPos::LineEnd => line_end_char_index(&text, cursor_line),
|
||||
};
|
||||
|
||||
|
@ -4462,18 +4464,124 @@ pub fn completion(cx: &mut Context) {
|
|||
}
|
||||
|
||||
// comments
|
||||
fn toggle_comments(cx: &mut Context) {
|
||||
type CommentTransactionFn = fn(
|
||||
line_token: Option<&str>,
|
||||
block_tokens: Option<&[BlockCommentToken]>,
|
||||
doc: &Rope,
|
||||
selection: &Selection,
|
||||
) -> Transaction;
|
||||
|
||||
fn toggle_comments_impl(cx: &mut Context, comment_transaction: CommentTransactionFn) {
|
||||
let (view, doc) = current!(cx.editor);
|
||||
let token = doc
|
||||
let line_token: Option<&str> = doc
|
||||
.language_config()
|
||||
.and_then(|lc| lc.comment_token.as_ref())
|
||||
.map(|tc| tc.as_ref());
|
||||
let transaction = comment::toggle_line_comments(doc.text(), doc.selection(view.id), token);
|
||||
.and_then(|lc| lc.comment_tokens.as_ref())
|
||||
.and_then(|tc| tc.first())
|
||||
.map(|tc| tc.as_str());
|
||||
let block_tokens: Option<&[BlockCommentToken]> = doc
|
||||
.language_config()
|
||||
.and_then(|lc| lc.block_comment_tokens.as_ref())
|
||||
.map(|tc| &tc[..]);
|
||||
|
||||
let transaction =
|
||||
comment_transaction(line_token, block_tokens, doc.text(), doc.selection(view.id));
|
||||
|
||||
doc.apply(&transaction, view.id);
|
||||
exit_select_mode(cx);
|
||||
}
|
||||
|
||||
/// commenting behavior:
|
||||
/// 1. only line comment tokens -> line comment
|
||||
/// 2. each line block commented -> uncomment all lines
|
||||
/// 3. whole selection block commented -> uncomment selection
|
||||
/// 4. all lines not commented and block tokens -> comment uncommented lines
|
||||
/// 5. no comment tokens and not block commented -> line comment
|
||||
fn toggle_comments(cx: &mut Context) {
|
||||
toggle_comments_impl(cx, |line_token, block_tokens, doc, selection| {
|
||||
let text = doc.slice(..);
|
||||
|
||||
// only have line comment tokens
|
||||
if line_token.is_some() && block_tokens.is_none() {
|
||||
return comment::toggle_line_comments(doc, selection, line_token);
|
||||
}
|
||||
|
||||
let split_lines = comment::split_lines_of_selection(text, selection);
|
||||
|
||||
let default_block_tokens = &[BlockCommentToken::default()];
|
||||
let block_comment_tokens = block_tokens.unwrap_or(default_block_tokens);
|
||||
|
||||
let (line_commented, line_comment_changes) =
|
||||
comment::find_block_comments(block_comment_tokens, text, &split_lines);
|
||||
|
||||
// block commented by line would also be block commented so check this first
|
||||
if line_commented {
|
||||
return comment::create_block_comment_transaction(
|
||||
doc,
|
||||
&split_lines,
|
||||
line_commented,
|
||||
line_comment_changes,
|
||||
)
|
||||
.0;
|
||||
}
|
||||
|
||||
let (block_commented, comment_changes) =
|
||||
comment::find_block_comments(block_comment_tokens, text, selection);
|
||||
|
||||
// check if selection has block comments
|
||||
if block_commented {
|
||||
return comment::create_block_comment_transaction(
|
||||
doc,
|
||||
selection,
|
||||
block_commented,
|
||||
comment_changes,
|
||||
)
|
||||
.0;
|
||||
}
|
||||
|
||||
// not commented and only have block comment tokens
|
||||
if line_token.is_none() && block_tokens.is_some() {
|
||||
return comment::create_block_comment_transaction(
|
||||
doc,
|
||||
&split_lines,
|
||||
line_commented,
|
||||
line_comment_changes,
|
||||
)
|
||||
.0;
|
||||
}
|
||||
|
||||
// not block commented at all and don't have any tokens
|
||||
comment::toggle_line_comments(doc, selection, line_token)
|
||||
})
|
||||
}
|
||||
|
||||
fn toggle_line_comments(cx: &mut Context) {
|
||||
toggle_comments_impl(cx, |line_token, block_tokens, doc, selection| {
|
||||
if line_token.is_none() && block_tokens.is_some() {
|
||||
let default_block_tokens = &[BlockCommentToken::default()];
|
||||
let block_comment_tokens = block_tokens.unwrap_or(default_block_tokens);
|
||||
comment::toggle_block_comments(
|
||||
doc,
|
||||
&comment::split_lines_of_selection(doc.slice(..), selection),
|
||||
block_comment_tokens,
|
||||
)
|
||||
} else {
|
||||
comment::toggle_line_comments(doc, selection, line_token)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn toggle_block_comments(cx: &mut Context) {
|
||||
toggle_comments_impl(cx, |line_token, block_tokens, doc, selection| {
|
||||
if line_token.is_some() && block_tokens.is_none() {
|
||||
comment::toggle_line_comments(doc, selection, line_token)
|
||||
} else {
|
||||
let default_block_tokens = &[BlockCommentToken::default()];
|
||||
let block_comment_tokens = block_tokens.unwrap_or(default_block_tokens);
|
||||
comment::toggle_block_comments(doc, selection, block_comment_tokens)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn rotate_selections(cx: &mut Context, direction: Direction) {
|
||||
let count = cx.count();
|
||||
let (view, doc) = current!(cx.editor);
|
||||
|
|
|
@ -276,6 +276,9 @@ pub fn default() -> HashMap<Mode, KeyTrie> {
|
|||
"k" => hover,
|
||||
"r" => rename_symbol,
|
||||
"h" => select_references_to_symbol_under_cursor,
|
||||
"c" => toggle_comments,
|
||||
"C" => toggle_block_comments,
|
||||
"A-c" => toggle_line_comments,
|
||||
"?" => command_palette,
|
||||
},
|
||||
"z" => { "View"
|
||||
|
|
|
@ -191,7 +191,12 @@ injection-regex = "rust"
|
|||
file-types = ["rs"]
|
||||
roots = ["Cargo.toml", "Cargo.lock"]
|
||||
auto-format = true
|
||||
comment-token = "//"
|
||||
comment-tokens = ["//", "///", "//!"]
|
||||
block-comment-tokens = [
|
||||
{ start = "/*", end = "*/" },
|
||||
{ start = "/**", end = "*/" },
|
||||
{ start = "/*!", end = "*/" },
|
||||
]
|
||||
language-servers = [ "rust-analyzer" ]
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
persistent-diagnostic-sources = ["rustc", "clippy"]
|
||||
|
@ -283,6 +288,7 @@ injection-regex = "protobuf"
|
|||
file-types = ["proto"]
|
||||
language-servers = [ "bufls", "pbkit" ]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
[[grammar]]
|
||||
|
@ -326,6 +332,7 @@ injection-regex = "mint"
|
|||
file-types = ["mint"]
|
||||
shebangs = []
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "mint" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -408,6 +415,7 @@ scope = "source.c"
|
|||
injection-regex = "c"
|
||||
file-types = ["c"] # TODO: ["h"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "clangd" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -444,6 +452,7 @@ scope = "source.cpp"
|
|||
injection-regex = "cpp"
|
||||
file-types = ["cc", "hh", "c++", "cpp", "hpp", "h", "ipp", "tpp", "cxx", "hxx", "ixx", "txx", "ino", "C", "H", "cu", "cuh", "cppm", "h++", "ii", "inl", { glob = ".hpp.in" }, { glob = ".h.in" }]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "clangd" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -491,6 +500,7 @@ injection-regex = "c-?sharp"
|
|||
file-types = ["cs", "csx", "cake"]
|
||||
roots = ["sln", "csproj"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = "\t" }
|
||||
language-servers = [ "omnisharp" ]
|
||||
|
||||
|
@ -549,6 +559,7 @@ file-types = ["go"]
|
|||
roots = ["go.work", "go.mod"]
|
||||
auto-format = true
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "gopls", "golangci-lint-lsp" ]
|
||||
# TODO: gopls needs utf-8 offsets?
|
||||
indent = { tab-width = 4, unit = "\t" }
|
||||
|
@ -614,6 +625,7 @@ scope = "source.gotmpl"
|
|||
injection-regex = "gotmpl"
|
||||
file-types = ["gotmpl"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "gopls" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -643,6 +655,7 @@ language-id = "javascript"
|
|||
file-types = ["js", "mjs", "cjs", "rules", "es6", "pac", { glob = "jakefile" }]
|
||||
shebangs = ["node"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "typescript-language-server" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -669,6 +682,7 @@ injection-regex = "jsx"
|
|||
language-id = "javascriptreact"
|
||||
file-types = ["jsx"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "typescript-language-server" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
grammar = "javascript"
|
||||
|
@ -680,6 +694,8 @@ injection-regex = "(ts|typescript)"
|
|||
file-types = ["ts", "mts", "cts"]
|
||||
language-id = "typescript"
|
||||
shebangs = ["deno", "ts-node"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "typescript-language-server" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -693,6 +709,8 @@ scope = "source.tsx"
|
|||
injection-regex = "(tsx)" # |typescript
|
||||
language-id = "typescriptreact"
|
||||
file-types = ["tsx"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "typescript-language-server" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -705,6 +723,7 @@ name = "css"
|
|||
scope = "source.css"
|
||||
injection-regex = "css"
|
||||
file-types = ["css", "scss"]
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "vscode-css-language-server" ]
|
||||
auto-format = true
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
@ -718,6 +737,7 @@ name = "scss"
|
|||
scope = "source.scss"
|
||||
injection-regex = "scss"
|
||||
file-types = ["scss"]
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "vscode-css-language-server" ]
|
||||
auto-format = true
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
@ -731,6 +751,7 @@ name = "html"
|
|||
scope = "text.html.basic"
|
||||
injection-regex = "html"
|
||||
file-types = ["html", "htm", "shtml", "xhtml", "xht", "jsp", "asp", "aspx", "jshtm", "volt", "rhtml"]
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
language-servers = [ "vscode-html-language-server" ]
|
||||
auto-format = true
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
@ -901,6 +922,7 @@ injection-regex = "php"
|
|||
file-types = ["php", "inc", "php4", "php5", "phtml", "ctp"]
|
||||
shebangs = ["php"]
|
||||
roots = ["composer.json", "index.php"]
|
||||
comment-token = "//"
|
||||
language-servers = [ "intelephense" ]
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
|
||||
|
@ -913,6 +935,7 @@ name = "twig"
|
|||
scope = "source.twig"
|
||||
injection-regex = "twig"
|
||||
file-types = ["twig"]
|
||||
block-comment-tokens = { start = "{#", end = "#}" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
[[grammar]]
|
||||
|
@ -966,6 +989,7 @@ injection-regex = "lean"
|
|||
file-types = ["lean"]
|
||||
roots = [ "lakefile.lean" ]
|
||||
comment-token = "--"
|
||||
block-comment-tokens = { start = "/-", end = "-/" }
|
||||
language-servers = [ "lean" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -992,6 +1016,7 @@ file-types = ["jl"]
|
|||
shebangs = ["julia"]
|
||||
roots = ["Manifest.toml", "Project.toml"]
|
||||
comment-token = "#"
|
||||
block-comment-tokens = { start = "#=", end = "=#" }
|
||||
language-servers = [ "julia" ]
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
|
||||
|
@ -1055,6 +1080,7 @@ scope = "source.ocaml"
|
|||
injection-regex = "ocaml"
|
||||
file-types = ["ml"]
|
||||
shebangs = ["ocaml", "ocamlrun", "ocamlscript"]
|
||||
block-comment-tokens = { start = "(*", end = "*)" }
|
||||
comment-token = "(**)"
|
||||
language-servers = [ "ocamllsp" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
@ -1074,6 +1100,7 @@ name = "ocaml-interface"
|
|||
scope = "source.ocaml.interface"
|
||||
file-types = ["mli"]
|
||||
shebangs = []
|
||||
block-comment-tokens = { start = "(*", end = "*)" }
|
||||
comment-token = "(**)"
|
||||
language-servers = [ "ocamllsp" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
@ -1096,6 +1123,7 @@ file-types = ["lua"]
|
|||
shebangs = ["lua", "luajit"]
|
||||
roots = [".luarc.json", ".luacheckrc", ".stylua.toml", "selene.toml", ".git"]
|
||||
comment-token = "--"
|
||||
block-comment-tokens = { start = "--[[", end = "--]]" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "lua-language-server" ]
|
||||
|
||||
|
@ -1121,6 +1149,7 @@ scope = "source.vue"
|
|||
injection-regex = "vue"
|
||||
file-types = ["vue"]
|
||||
roots = ["package.json"]
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "vuels" ]
|
||||
|
||||
|
@ -1148,6 +1177,7 @@ injection-regex = "haskell"
|
|||
file-types = ["hs", "hs-boot"]
|
||||
roots = ["Setup.hs", "stack.yaml", "cabal.project"]
|
||||
comment-token = "--"
|
||||
block-comment-tokens = { start = "{-", end = "-}" }
|
||||
language-servers = [ "haskell-language-server" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -1173,6 +1203,7 @@ injection-regex = "purescript"
|
|||
file-types = ["purs"]
|
||||
roots = ["spago.yaml", "spago.dhall", "bower.json"]
|
||||
comment-token = "--"
|
||||
block-comment-tokens = { start = "{-", end = "-}" }
|
||||
language-servers = [ "purescript-language-server" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
auto-format = true
|
||||
|
@ -1227,6 +1258,7 @@ scope = "source.prolog"
|
|||
file-types = ["pl", "prolog"]
|
||||
shebangs = ["swipl"]
|
||||
comment-token = "%"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "swipl" ]
|
||||
|
||||
[[language]]
|
||||
|
@ -1246,6 +1278,7 @@ name = "cmake"
|
|||
scope = "source.cmake"
|
||||
file-types = ["cmake", { glob = "CMakeLists.txt" }]
|
||||
comment-token = "#"
|
||||
block-comment-tokens = { start = "#[[", end = "]]" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "cmake-language-server" ]
|
||||
injection-regex = "cmake"
|
||||
|
@ -1272,6 +1305,7 @@ name = "glsl"
|
|||
scope = "source.glsl"
|
||||
file-types = ["glsl", "vert", "tesc", "tese", "geom", "frag", "comp" ]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
injection-regex = "glsl"
|
||||
|
||||
|
@ -1309,6 +1343,7 @@ file-types = ["rkt", "rktd", "rktl", "scrbl"]
|
|||
shebangs = ["racket"]
|
||||
comment-token = ";"
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
block-comment-tokens = { start = "#|", end = "|#" }
|
||||
language-servers = [ "racket" ]
|
||||
grammar = "scheme"
|
||||
|
||||
|
@ -1343,6 +1378,7 @@ name = "wgsl"
|
|||
scope = "source.wgsl"
|
||||
file-types = ["wgsl"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "wgsl_analyzer" ]
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
|
||||
|
@ -1389,6 +1425,7 @@ name = "tablegen"
|
|||
scope = "source.tablegen"
|
||||
file-types = ["td"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
injection-regex = "tablegen"
|
||||
|
||||
|
@ -1404,6 +1441,7 @@ file-types = ["md", "markdown", "mkd", "mdwn", "mdown", "markdn", "mdtxt", "mdte
|
|||
roots = [".marksman.toml"]
|
||||
language-servers = [ "marksman" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
|
||||
[[grammar]]
|
||||
name = "markdown"
|
||||
|
@ -1427,6 +1465,7 @@ file-types = ["dart"]
|
|||
roots = ["pubspec.yaml"]
|
||||
auto-format = true
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "dart" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -1440,6 +1479,7 @@ scope = "source.scala"
|
|||
roots = ["build.sbt", "build.sc", "build.gradle", "build.gradle.kts", "pom.xml", ".scala-build"]
|
||||
file-types = ["scala", "sbt", "sc"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "metals" ]
|
||||
|
||||
|
@ -1560,6 +1600,8 @@ scope = "source.graphql"
|
|||
injection-regex = "graphql"
|
||||
file-types = ["gql", "graphql", "graphqls"]
|
||||
language-servers = [ "graphql-language-service" ]
|
||||
comment-token = "#"
|
||||
block-comment-tokens = { start = "\"\"\"", end = "\"\"\"" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
[[grammar]]
|
||||
|
@ -1574,6 +1616,7 @@ file-types = ["elm"]
|
|||
roots = ["elm.json"]
|
||||
auto-format = true
|
||||
comment-token = "--"
|
||||
block-comment-tokens = { start = "{-", end = "-}" }
|
||||
language-servers = [ "elm-language-server" ]
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
|
||||
|
@ -1586,6 +1629,7 @@ name = "iex"
|
|||
scope = "source.iex"
|
||||
injection-regex = "iex"
|
||||
file-types = ["iex"]
|
||||
comment-token = "#"
|
||||
|
||||
[[grammar]]
|
||||
name = "iex"
|
||||
|
@ -1599,6 +1643,7 @@ file-types = ["res"]
|
|||
roots = ["bsconfig.json"]
|
||||
auto-format = true
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "rescript-language-server" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
|
@ -1635,6 +1680,7 @@ scope = "source.kotlin"
|
|||
file-types = ["kt", "kts"]
|
||||
roots = ["settings.gradle", "settings.gradle.kts"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
language-servers = [ "kotlin-language-server" ]
|
||||
|
||||
|
@ -1649,6 +1695,7 @@ injection-regex = "(hcl|tf|nomad)"
|
|||
language-id = "terraform"
|
||||
file-types = ["hcl", "tf", "nomad"]
|
||||
comment-token = "#"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "terraform-ls" ]
|
||||
auto-format = true
|
||||
|
@ -1663,6 +1710,7 @@ scope = "source.tfvars"
|
|||
language-id = "terraform-vars"
|
||||
file-types = ["tfvars"]
|
||||
comment-token = "#"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "terraform-ls" ]
|
||||
auto-format = true
|
||||
|
@ -1685,6 +1733,7 @@ scope = "source.sol"
|
|||
injection-regex = "(sol|solidity)"
|
||||
file-types = ["sol"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
language-servers = [ "solc" ]
|
||||
|
||||
|
@ -1713,6 +1762,7 @@ scope = "source.ron"
|
|||
injection-regex = "ron"
|
||||
file-types = ["ron"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
|
||||
[[grammar]]
|
||||
|
@ -1754,6 +1804,7 @@ injection-regex = "(r|R)md"
|
|||
file-types = ["rmd", "Rmd"]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
grammar = "markdown"
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
language-servers = [ "r" ]
|
||||
|
||||
[[language]]
|
||||
|
@ -1763,6 +1814,7 @@ injection-regex = "swift"
|
|||
file-types = ["swift"]
|
||||
roots = [ "Package.swift" ]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
auto-format = true
|
||||
language-servers = [ "sourcekit-lsp" ]
|
||||
|
||||
|
@ -1775,6 +1827,7 @@ name = "erb"
|
|||
scope = "text.html.erb"
|
||||
injection-regex = "erb"
|
||||
file-types = ["erb"]
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
grammar = "embedded-template"
|
||||
|
||||
|
@ -1783,6 +1836,7 @@ name = "ejs"
|
|||
scope = "text.html.ejs"
|
||||
injection-regex = "ejs"
|
||||
file-types = ["ejs"]
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
grammar = "embedded-template"
|
||||
|
||||
|
@ -1796,6 +1850,7 @@ scope = "source.eex"
|
|||
injection-regex = "eex"
|
||||
file-types = ["eex"]
|
||||
roots = ["mix.exs", "mix.lock"]
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
[[grammar]]
|
||||
|
@ -1808,6 +1863,7 @@ scope = "source.heex"
|
|||
injection-regex = "heex"
|
||||
file-types = ["heex"]
|
||||
roots = ["mix.exs", "mix.lock"]
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "elixir-ls" ]
|
||||
|
||||
|
@ -1820,6 +1876,7 @@ name = "sql"
|
|||
scope = "source.sql"
|
||||
file-types = ["sql", "dsql"]
|
||||
comment-token = "--"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
injection-regex = "sql"
|
||||
|
||||
|
@ -1878,6 +1935,7 @@ scope = "source.vala"
|
|||
injection-regex = "vala"
|
||||
file-types = ["vala", "vapi"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "vala-language-server" ]
|
||||
|
||||
|
@ -1903,6 +1961,7 @@ scope = "source.devicetree"
|
|||
injection-regex = "(dtsi?|devicetree|fdt)"
|
||||
file-types = ["dts", "dtsi"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = "\t" }
|
||||
|
||||
[[grammar]]
|
||||
|
@ -1941,6 +2000,7 @@ file-types = ["odin"]
|
|||
roots = ["ols.json"]
|
||||
language-servers = [ "ols" ]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = "\t" }
|
||||
formatter = { command = "odinfmt", args = [ "-stdin", "true" ] }
|
||||
|
||||
|
@ -1998,6 +2058,7 @@ roots = ["v.mod"]
|
|||
language-servers = [ "vlang-language-server" ]
|
||||
auto-format = true
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = "\t" }
|
||||
|
||||
[[grammar]]
|
||||
|
@ -2009,6 +2070,7 @@ name = "verilog"
|
|||
scope = "source.verilog"
|
||||
file-types = ["v", "vh", "sv", "svh"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "svlangserver" ]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
injection-regex = "verilog"
|
||||
|
@ -2045,6 +2107,7 @@ scope = "source.openscad"
|
|||
injection-regex = "openscad"
|
||||
file-types = ["scad"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "openscad-lsp" ]
|
||||
indent = { tab-width = 2, unit = "\t" }
|
||||
|
||||
|
@ -2109,6 +2172,7 @@ injection-regex = "idr"
|
|||
file-types = ["idr"]
|
||||
shebangs = []
|
||||
comment-token = "--"
|
||||
block-comment-tokens = { start = "{-", end = "-}" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "idris2-lsp" ]
|
||||
|
||||
|
@ -2144,6 +2208,7 @@ scope = "source.dot"
|
|||
injection-regex = "dot"
|
||||
file-types = ["dot"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
language-servers = [ "dot-language-server" ]
|
||||
|
||||
|
@ -2173,6 +2238,7 @@ scope = "source.slint"
|
|||
injection-regex = "slint"
|
||||
file-types = ["slint"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
language-servers = [ "slint-lsp" ]
|
||||
|
||||
|
@ -2222,6 +2288,7 @@ scope = "source.pascal"
|
|||
injection-regex = "pascal"
|
||||
file-types = ["pas", "pp", "inc", "lpr", "lfm"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "{", end = "}" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "pasls" ]
|
||||
|
||||
|
@ -2234,7 +2301,7 @@ name = "sml"
|
|||
scope = "source.sml"
|
||||
injection-regex = "sml"
|
||||
file-types = ["sml"]
|
||||
comment-token = "(*"
|
||||
block-comment-tokens = { start = "(*", end = "*)" }
|
||||
|
||||
[[grammar]]
|
||||
name = "sml"
|
||||
|
@ -2246,6 +2313,7 @@ scope = "source.jsonnet"
|
|||
file-types = ["libsonnet", "jsonnet"]
|
||||
roots = ["jsonnetfile.json"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "jsonnet-language-server" ]
|
||||
|
||||
|
@ -2258,6 +2326,7 @@ name = "astro"
|
|||
scope = "source.astro"
|
||||
injection-regex = "astro"
|
||||
file-types = ["astro"]
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
[[grammar]]
|
||||
|
@ -2281,6 +2350,7 @@ source = { git = "https://github.com/vito/tree-sitter-bass", rev = "501133e260d7
|
|||
name = "wat"
|
||||
scope = "source.wat"
|
||||
comment-token = ";;"
|
||||
block-comment-tokens = { start = "(;", end = ";)" }
|
||||
file-types = ["wat"]
|
||||
|
||||
[[grammar]]
|
||||
|
@ -2291,6 +2361,7 @@ source = { git = "https://github.com/wasm-lsp/tree-sitter-wasm", rev = "2ca28a9f
|
|||
name = "wast"
|
||||
scope = "source.wast"
|
||||
comment-token = ";;"
|
||||
block-comment-tokens = { start = "(;", end = ";)" }
|
||||
file-types = ["wast"]
|
||||
|
||||
[[grammar]]
|
||||
|
@ -2302,6 +2373,7 @@ name = "d"
|
|||
scope = "source.d"
|
||||
file-types = [ "d", "dd" ]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
injection-regex = "d"
|
||||
indent = { tab-width = 4, unit = " "}
|
||||
language-servers = [ "serve-d" ]
|
||||
|
@ -2328,6 +2400,7 @@ name = "kdl"
|
|||
scope = "source.kdl"
|
||||
file-types = ["kdl"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
injection-regex = "kdl"
|
||||
|
||||
[[grammar]]
|
||||
|
@ -2398,6 +2471,7 @@ file-types = [
|
|||
"musicxml",
|
||||
"glif"
|
||||
]
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
[language.auto-pairs]
|
||||
|
@ -2437,6 +2511,7 @@ scope = "source.wit"
|
|||
injection-regex = "wit"
|
||||
file-types = ["wit"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
|
||||
[language.auto-pairs]
|
||||
|
@ -2501,6 +2576,7 @@ scope = "source.bicep"
|
|||
file-types = ["bicep"]
|
||||
auto-format = true
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 2, unit = " "}
|
||||
language-servers = [ "bicep-langserver" ]
|
||||
|
||||
|
@ -2513,6 +2589,8 @@ name = "qml"
|
|||
scope = "source.qml"
|
||||
file-types = ["qml"]
|
||||
language-servers = [ "qmlls" ]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
grammar = "qmljs"
|
||||
|
||||
|
@ -2552,6 +2630,7 @@ injection-regex = "pony"
|
|||
roots = ["corral.json", "lock.json"]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
|
||||
[[grammar]]
|
||||
name = "ponylang"
|
||||
|
@ -2563,6 +2642,7 @@ scope = "source.dhall"
|
|||
injection-regex = "dhall"
|
||||
file-types = ["dhall"]
|
||||
comment-token = "--"
|
||||
block-comment-tokens = { start = "{-", end = "-}" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "dhall-lsp-server" ]
|
||||
formatter = { command = "dhall" , args = ["format"] }
|
||||
|
@ -2586,6 +2666,7 @@ scope = "source.msbuild"
|
|||
injection-regex = "msbuild"
|
||||
file-types = ["proj", "vbproj", "csproj", "fsproj", "targets", "props"]
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
grammar = "xml"
|
||||
|
||||
[language.auto-pairs]
|
||||
|
@ -2632,7 +2713,7 @@ scope = "source.tal"
|
|||
injection-regex = "tal"
|
||||
file-types = ["tal"]
|
||||
auto-format = false
|
||||
comment-token = "("
|
||||
block-comment-tokens = { start = "(", end = ")" }
|
||||
|
||||
[[grammar]]
|
||||
name = "uxntal"
|
||||
|
@ -2766,6 +2847,7 @@ injection-regex = "nim"
|
|||
file-types = ["nim", "nims", "nimble"]
|
||||
shebangs = []
|
||||
comment-token = "#"
|
||||
block-comment-tokens = { start = "#[", end = "]#" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
language-servers = [ "nimlangserver" ]
|
||||
|
||||
|
@ -2805,6 +2887,7 @@ source = { git = "https://github.com/pfeiferj/tree-sitter-hurl", rev = "264c4206
|
|||
[[language]]
|
||||
name = "markdoc"
|
||||
scope = "text.markdoc"
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
file-types = ["mdoc"]
|
||||
language-servers = [ "markdoc-ls" ]
|
||||
|
||||
|
@ -2858,6 +2941,7 @@ scope = "source.blueprint"
|
|||
injection-regex = "blueprint"
|
||||
file-types = ["blp"]
|
||||
comment-token = "//"
|
||||
block-comment-tokens = { start = "/*", end = "*/" }
|
||||
language-servers = [ "blueprint-compiler" ]
|
||||
indent = { tab-width = 4, unit = " " }
|
||||
|
||||
|
@ -2910,6 +2994,7 @@ name = "webc"
|
|||
scope = "text.html.webc"
|
||||
injection-regex = "webc"
|
||||
file-types = ["webc"]
|
||||
block-comment-tokens = { start = "<!--", end = "-->" }
|
||||
indent = { tab-width = 2, unit = " " }
|
||||
grammar = "html"
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue