Skip to content

Commit

Permalink
feat: added leading comments to the formatter
Browse files Browse the repository at this point in the history
  • Loading branch information
kpagacz committed Sep 14, 2024
1 parent 02ae8c4 commit 91faf53
Show file tree
Hide file tree
Showing 16 changed files with 137 additions and 28 deletions.
37 changes: 27 additions & 10 deletions formatter/src/code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,16 +122,33 @@ impl<'a> Code for Token<'a> {

impl Code for CommentedToken<'_> {
fn to_docs(&self, config: &impl FormattingConfig) -> Rc<Doc> {
if let Some(inline_comment) = self.inline_comment {
// Group because the token should go with its inline comment
// no matter whether the line fits or not
self.token.to_docs(config).cons(text!(" ")).cons(text!(
inline_comment,
0,
InlineCommentPosition::End
))
} else {
self.token.to_docs(config)
match (&self.leading_comments, self.inline_comment) {
(None, None) => self.token.to_docs(config),
(None, Some(inline_comment)) => self
.token
.to_docs(config)
.cons(text!(" "))
.cons(text!(inline_comment, 0, InlineCommentPosition::End)),
(Some(leading_comments), None) => {
let leading_comments = leading_comments
.iter()
.fold(Rc::new(Doc::Nil), |first, second| {
first.cons(text!(second, 0)).cons(text!("\n"))
});

leading_comments.cons(self.token.to_docs(config))
}
(Some(leading_comments), Some(inline_comment)) => {
let leading_comments = leading_comments
.iter()
.fold(Rc::new(Doc::Nil), |first, second| {
first.cons(text!(second, 0)).cons(text!("\n"))
});
leading_comments
.cons(self.token.to_docs(config))
.cons(text!(" "))
.cons(text!(inline_comment, 0, InlineCommentPosition::End))
}
}
}
}
Expand Down
2 changes: 2 additions & 0 deletions parser/src/compound.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use log::trace;
use nom::{
combinator::{map, opt},
error::Error,
Expand Down Expand Up @@ -65,6 +66,7 @@ where
while let Some(xpr) = comma_delimited_args.next() {
args.push(Arg(xpr, comma_delimited_args.next()));
}
trace!("delimited_comma_sep_exprs: parsed args {args:?}");
Args::new(ldelim, args, rdelim)
}
None => Args::new(ldelim, vec![], rdelim),
Expand Down
9 changes: 9 additions & 0 deletions parser/src/expressions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ fn literal_expr<'a, 'b: 'a>(tokens: Input<'a, 'b>) -> IResult<Input<'a, 'b>, Exp
pub(crate) fn term_expr<'a, 'b: 'a>(
tokens: Input<'a, 'b>,
) -> IResult<Input<'a, 'b>, Expression<'a>> {
trace!("term_expr: {}", TokensBuffer(tokens));
alt((
for_loop_expression,
while_expression,
Expand Down Expand Up @@ -75,6 +76,7 @@ pub(crate) fn term_expr<'a, 'b: 'a>(
))(tokens)
}

#[derive(Debug)]
enum Tail<'a> {
Call(Args<'a>),
DoubleSubset(Args<'a>),
Expand All @@ -86,6 +88,8 @@ pub(crate) fn atomic_term<'a, 'b: 'a>(
) -> IResult<Input<'a, 'b>, Expression<'a>> {
let (mut tokens, lhs) = term_expr(tokens)?;
let mut acc = lhs;
trace!("atomic_term: parsed LHS: {acc}");
trace!("atomic_term: parsing rhs: {}", TokensBuffer(tokens));
while let Ok((new_tokens, tail)) = alt((
map(
delimited_comma_sep_exprs(map(lparen, Delimiter::Paren), map(rparen, Delimiter::Paren)),
Expand All @@ -107,6 +111,7 @@ pub(crate) fn atomic_term<'a, 'b: 'a>(
),
))(tokens)
{
trace!("atomic_term: parsed the rhs to this tail: {tail:?}");
match tail {
Tail::Call(args) => {
acc = Expression::FunctionCall(FunctionCall {
Expand All @@ -121,8 +126,10 @@ pub(crate) fn atomic_term<'a, 'b: 'a>(
})
}
}
trace!("atomic_term: final acc: {acc}");
tokens = new_tokens;
}

Ok((tokens, acc))
}

Expand Down Expand Up @@ -257,6 +264,8 @@ impl ExprParser {
}
lhs = Expression::Bop(op, Box::new(lhs), Box::new(rhs));
}
trace!("ExprParser::parse: LHS {lhs}");
trace!("ExprParser::parse: tokens left: {}", TokensBuffer(tokens));
Ok((tokens, lhs))
}
}
Expand Down
23 changes: 15 additions & 8 deletions parser/src/pre_parsing_hooks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,22 @@ pub fn pre_parse<'a>(tokens: &'a mut [CommentedToken<'a>]) -> Vec<&'a CommentedT
let mut it = 0;
let mut tokens_without_comments = vec![];
while it < tokens.len() {
if let Token::Comment(_) = tokens[it].token {
let start = it;
while matches!(tokens[it].token, Token::Comment(_))
|| matches!(tokens[it].token, Token::Newline)
{
if let Token::Comment(comment) = tokens[it].token {
let mut comments = vec![comment];
it += 1;
loop {
match tokens[it].token {
Token::Newline => {
if matches!(tokens[it - 1].token, Token::Newline) {
comments.push("");
}
}
Token::Comment(comment) => comments.push(comment),
_ => break,
}
it += 1;
}

tokens[it].leading_comments = Some((start, it));
tokens[it].leading_comments = Some(comments);
tokens_without_comments.push(it);
} else if let Token::InlineComment(comment) = tokens[it].token {
tokens[it - 1].inline_comment = Some(comment);
Expand Down Expand Up @@ -56,7 +63,7 @@ mod tests {
// Comments
assert_eq!(
res_token.leading_comments,
Some((0, 2)),
Some(vec!["Comment"]),
"The length of the leading comments does not match"
);

Expand Down
12 changes: 2 additions & 10 deletions tokenizer/src/tokens.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ pub struct CommentedToken<'a> {
pub offset: usize,
/// Preceding comments. The tuple contains the start and end offset (exclusive) of the comment
/// in the array of tokens.
pub leading_comments: Option<(usize, usize)>,
pub leading_comments: Option<Vec<&'a str>>,
/// Trailing inline comment. The offset is the index of the comment in the array of tokens.
pub inline_comment: Option<&'a str>,
}
Expand All @@ -33,7 +33,7 @@ impl<'a> CommentedToken<'a> {
token: Token<'a>,
line: u32,
offset: usize,
leading_comments: Option<(usize, usize)>,
leading_comments: Option<Vec<&'a str>>,
inline_comment: Option<&'a str>,
) -> Self {
Self {
Expand All @@ -44,14 +44,6 @@ impl<'a> CommentedToken<'a> {
inline_comment,
}
}

pub fn get_preceding_comments(
&'a self,
tokens: &'a [CommentedToken<'a>],
) -> Option<&'a [CommentedToken<'a>]> {
self.leading_comments
.map(|(start, end)| &tokens[start..end])
}
}

/// When comparing two tokens, only the token itself is compared.
Expand Down
6 changes: 6 additions & 0 deletions unsully/tests/format_integration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,3 +155,9 @@ comparison_test!(
);
comparison_test!(comments_are_not_formatted, "057");
comparison_test!(comments_in_an_array, "058");
comparison_test!(single_leading_comment, "059");
comparison_test!(two_line_leading_comment, "060");
comparison_test!(two_line_with_short_line_config, "060", short_line_config());
comparison_test!(two_leading_comments_one_after_another, "061");
comparison_test!(comments_with_no_code_work, "062");
comparison_test!(real_life_example_0, "real_life_0");
2 changes: 2 additions & 0 deletions unsully/tests/test_cases/059.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Comment
a
2 changes: 2 additions & 0 deletions unsully/tests/test_cases/059.expected
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Comment
a
3 changes: 3 additions & 0 deletions unsully/tests/test_cases/060.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Comment
# Second comment
a
3 changes: 3 additions & 0 deletions unsully/tests/test_cases/060.expected
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Comment
# Second comment
a
4 changes: 4 additions & 0 deletions unsully/tests/test_cases/061.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# comment

# Comment
a
4 changes: 4 additions & 0 deletions unsully/tests/test_cases/061.expected
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# comment

# Comment
a
3 changes: 3 additions & 0 deletions unsully/tests/test_cases/062.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# comment

# Comment
3 changes: 3 additions & 0 deletions unsully/tests/test_cases/062.expected
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# comment

# Comment
26 changes: 26 additions & 0 deletions unsully/tests/test_cases/real_life_0.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
rows <- lapply(
datanames,
function(dataname) {
parent <- teal.data::parent(joinkeys, dataname)

# todo: what should we display for a parent dataset?
# - Obs and Subjects
# - Obs only
# - Subjects only
# todo (for later): summary table should be displayed in a way that child datasets
# are indented under their parent dataset to form a tree structure
subject_keys <- if (length(parent) > 0) {
names(joinkeys[dataname, parent])
} else {
joinkeys[dataname, dataname]
}
get_object_filter_overview(
filtered_data = filtered_data_objs[[dataname]],
unfiltered_data = unfiltered_data_objs[[dataname]],
dataname = dataname,
subject_keys = subject_keys
)
}
)

unssuported_idx <- vapply(rows, function(x) all(is.na(x[-1])), logical(1)) # this is mainly for vectors
26 changes: 26 additions & 0 deletions unsully/tests/test_cases/real_life_0.expected
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
rows <- lapply(
datanames,
function(dataname) {
parent <- teal.data::parent(joinkeys, dataname)

# todo: what should we display for a parent dataset?
# - Obs and Subjects
# - Obs only
# - Subjects only
# todo (for later): summary table should be displayed in a way that child datasets
# are indented under their parent dataset to form a tree structure
subject_keys <- if (length(parent) > 0) {
names(joinkeys[dataname, parent])
} else {
joinkeys[dataname, dataname]
}
get_object_filter_overview(
filtered_data = filtered_data_objs[[dataname]],
unfiltered_data = unfiltered_data_objs[[dataname]],
dataname = dataname,
subject_keys = subject_keys
)
}
)

unssuported_idx <- vapply(rows, function(x) all(is.na(x[-1])), logical(1)) # this is mainly for vectors

0 comments on commit 91faf53

Please sign in to comment.