Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
138 changes: 124 additions & 14 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,109 @@ jobs:
echo "Changed files: $FILES"

if [ -n "$FILES" ]; then
echo "$FILES" | xargs ./tools/linter/zig-out/bin/linter > lint_results.json
echo "$FILES" | xargs ./tools/linter/zig-out/bin/linter > lint_results_raw.json
else
echo "[]" > lint_results.json
echo "[]" > lint_results_raw.json
fi

# Debug output
echo "Lint results:"
cat lint_results.json
echo "Raw lint results:"
cat lint_results_raw.json

- name: Filter lint results to changed lines
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const { execSync } = require('child_process');

if (!fs.existsSync('lint_results_raw.json')) {
fs.writeFileSync('lint_results.json', '[]');
console.log('No raw lint results found');
return;
}

const rawIssues = JSON.parse(fs.readFileSync('lint_results_raw.json', 'utf8'));

if (rawIssues.length === 0) {
fs.writeFileSync('lint_results.json', '[]');
console.log('No lint issues found');
return;
}

console.log(`Found ${rawIssues.length} total lint issue(s)`);

// Get diff with line numbers
const baseSha = '${{ github.event.pull_request.base.sha }}';
const headSha = '${{ github.event.pull_request.head.sha }}';

// Parse diff to get changed line numbers per file
const changedLines = {};

for (const issue of rawIssues) {
const file = issue.file;

if (!changedLines[file]) {
try {
// Get the diff for this specific file
const diff = execSync(
`git diff --unified=0 ${baseSha} ${headSha} -- "${file}"`,
{ encoding: 'utf8' }
);

// Parse the diff to extract changed line numbers
const lines = new Set();
const diffLines = diff.split('\n');

for (const line of diffLines) {
// Match the @@ -start,count +start,count @@ format
const match = line.match(/^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@/);
if (match) {
const startLine = parseInt(match[1]);
const count = match[2] ? parseInt(match[2]) : 1;

// Add all changed lines in this hunk
for (let i = 0; i < count; i++) {
lines.add(startLine + i);
}
}
}

changedLines[file] = lines;
console.log(`File ${file}: ${lines.size} changed line(s)`);
} catch (error) {
console.log(`Could not get diff for ${file}: ${error.message}`);
changedLines[file] = new Set();
}
}
}

// Filter issues to only those on changed lines
const filteredIssues = rawIssues.filter(issue => {
const fileLines = changedLines[issue.file] || new Set();
const isOnChangedLine = fileLines.has(issue.line);
if (!isOnChangedLine) {
console.log(`Filtered out: ${issue.file}:${issue.line} (not on changed line)`);
}
return isOnChangedLine;
});

// Save filtered issues
fs.writeFileSync('lint_results.json', JSON.stringify(filteredIssues, null, 2));

// Create summary of filtered issues
const filtered = rawIssues.length - filteredIssues.length;
if (filtered > 0) {
console.log(`Filtered out ${filtered} issue(s) on unchanged lines`);

const filteredOut = rawIssues.filter(issue => !filteredIssues.includes(issue));
const summary = filteredOut.map(i => `${i.file}:${i.line}: ${i.message}`).join('\n');
fs.writeFileSync('filtered_issues.txt',
`The following ${filtered} issue(s) exist but are not on lines changed in this PR:\n\n${summary}`
);
}

console.log(`Kept ${filteredIssues.length} issue(s) on changed lines for inline comments`);

- name: Post review with grouped comments
uses: actions/github-script@v7
Expand Down Expand Up @@ -150,7 +245,11 @@ jobs:
review.body && review.body.includes('<!-- lint-review -->')
);

if (!content || content === '[]') {
// Check if there are filtered issues
const hasFilteredIssues = fs.existsSync('filtered_issues.txt');
const filteredContent = hasFilteredIssues ? fs.readFileSync('filtered_issues.txt', 'utf8') : '';

if ((!content || content === '[]') && !hasFilteredIssues) {
console.log('No lint issues found');

// Dismiss all existing bot reviews and resolve their conversations
Expand All @@ -171,11 +270,11 @@ jobs:
return;
}

const issues = JSON.parse(content);
const issues = content && content !== '[]' ? JSON.parse(content) : [];

// Create hash of current issues to check if review needs updating
const issuesHash = crypto.createHash('md5')
.update(JSON.stringify(issues.map(i => `${i.file}:${i.line}:${i.message}`)))
.update(JSON.stringify(issues.map(i => `${i.file}:${i.line}:${i.message}`)) + filteredContent)
.digest('hex')
.substring(0, 8);

Expand Down Expand Up @@ -204,7 +303,7 @@ jobs:
}
}

// Prepare review comments
// Prepare review comments (only for issues on changed lines)
const reviewComments = [];
const issuesByFile = {};

Expand All @@ -226,26 +325,37 @@ jobs:
const fileCount = Object.keys(issuesByFile).length;

let reviewBody = `## 🔍 Lint Results\n\n`;
reviewBody += `Found **${totalIssues}** issue${totalIssues !== 1 ? 's' : ''} in **${fileCount}** file${fileCount !== 1 ? 's' : ''}:\n\n`;

for (const [file, fileIssues] of Object.entries(issuesByFile)) {
reviewBody += `- **${file}**: ${fileIssues.length} issue${fileIssues.length !== 1 ? 's' : ''}\n`;
if (totalIssues > 0) {
reviewBody += `Found **${totalIssues}** issue${totalIssues !== 1 ? 's' : ''} on changed lines in **${fileCount}** file${fileCount !== 1 ? 's' : ''}:\n\n`;

for (const [file, fileIssues] of Object.entries(issuesByFile)) {
reviewBody += `- **${file}**: ${fileIssues.length} issue${fileIssues.length !== 1 ? 's' : ''}\n`;
}
}

// Add filtered issues summary if exists
if (hasFilteredIssues) {
reviewBody += `\n<details>\n<summary>ℹ️ Additional issues on unchanged lines</summary>\n\n\`\`\`\n${filteredContent}\n\`\`\`\n\n</details>\n`;
}

reviewBody += `\n<!-- lint-review -->\n<!-- lint-hash:${issuesHash} -->`;

// Determine review event type
const reviewEvent = totalIssues > 0 ? 'REQUEST_CHANGES' : 'COMMENT';

try {
const review = await github.rest.pulls.createReview({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number,
commit_id: context.payload.pull_request.head.sha,
body: reviewBody,
event: 'REQUEST_CHANGES',
event: reviewEvent,
comments: reviewComments
});

console.log(`Created review with ${reviewComments.length} comments`);
console.log(`Created review with ${reviewComments.length} inline comment(s)`);
} catch (error) {
console.error(`Failed to create review:`, error.message);

Expand All @@ -257,7 +367,7 @@ jobs:
pull_number: context.issue.number,
commit_id: context.payload.pull_request.head.sha,
body: reviewBody + '\n\n⚠️ Could not attach inline comments due to an error.',
event: 'REQUEST_CHANGES'
event: reviewEvent
});
console.log('Created review without inline comments as fallback');
} catch (fallbackError) {
Expand Down
72 changes: 58 additions & 14 deletions tools/linter/src/main.zig
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@ const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;

const Token = std.zig.Token;
const TokenIndex = std.zig.Ast.TokenIndex;

const Issue = struct {
file: []const u8,
line: u32,
Expand All @@ -20,8 +23,8 @@ pub fn main() !void {
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);

var issues: std.array_list.Managed(Issue) = .init(allocator);
defer issues.deinit();
var issues: std.ArrayListUnmanaged(Issue) = .{};
defer issues.deinit(allocator);

for (args[1..]) |path| {
const source = std.fs.cwd().readFileAllocOptions(allocator, path, 100 * 1024 * 1024, null, .@"1", 0) catch |err| {
Expand All @@ -32,6 +35,8 @@ pub fn main() !void {

var ast = try std.zig.Ast.parse(allocator, source, .zig);
defer ast.deinit(allocator);

try check_todos(allocator, path, source, &issues);
for (ast.nodes.items(.tag), ast.nodes.items(.main_token)) |node_tag, main_tok_idx| {
switch (node_tag) {
.fn_proto_simple,
Expand All @@ -48,7 +53,7 @@ pub fn main() !void {
snake_case,
});

try issues.append(.{
try issues.append(allocator, .{
.line = @intCast(location.line + 1),
.message = message,
.file = path,
Expand Down Expand Up @@ -78,9 +83,6 @@ pub fn main() !void {
try writer.flush();
}

const Token = std.zig.Token;
const TokenIndex = std.zig.Ast.TokenIndex;

fn find_first_token_tag(ast: std.zig.Ast, tag: Token.Tag, start_idx: TokenIndex) TokenIndex {
return for (ast.tokens.items(.tag)[start_idx..], start_idx..) |token_tag, token_idx| {
if (token_tag == tag)
Expand Down Expand Up @@ -118,24 +120,66 @@ fn camel_to_snake(arena: Allocator, str: []const u8) ![]const u8 {
if (str.len == 0)
return str;

var ret = std.array_list.Managed(u8).init(arena);
errdefer ret.deinit();
var ret = std.ArrayListUnmanaged(u8){};
errdefer ret.deinit(arena);

if (std.ascii.isUpper(str[0])) {
try ret.append(std.ascii.toLower(str[0]));
try ret.append(arena, std.ascii.toLower(str[0]));
} else {
try ret.append(str[0]);
try ret.append(arena, str[0]);
}

for (str[1..]) |c| {
if (std.ascii.isUpper(c)) {
// Add underscore before uppercase letters
try ret.append('_');
try ret.append(std.ascii.toLower(c));
try ret.append(arena, '_');
try ret.append(arena, std.ascii.toLower(c));
} else {
try ret.append(c);
try ret.append(arena, c);
}
}

return ret.toOwnedSlice();
return ret.toOwnedSlice(arena);
}

const uppercase_todo_keywords: []const []const u8 = &.{
"TODO",
"HACK",
"FIXME",
"XXX",
"BUG",
"NOTE",
};

pub fn check_todos(
allocator: Allocator,
path: []const u8,
source: []const u8,
issues: *std.ArrayListUnmanaged(Issue),
) !void {
var line_num: u32 = 1;
var it = std.mem.splitScalar(u8, source, '\n');
while (it.next()) |line| : (line_num += 1) {
const comment_start = std.mem.indexOf(u8, line, "//") orelse continue;
const comment = line[comment_start..];
if (contains_todo_keyword(comment) and !contains_link(comment)) {
try issues.append(allocator, .{
.file = path,
.line = line_num,
.message = "TODO style comments need to have a linked microzig issue on the same line.",
});
}
}
}

/// Checks if a string contains a link (URL).
fn contains_link(text: []const u8) bool {
return std.mem.indexOf(u8, text, "https://github.com/ZigEmbeddedGroup/microzig/issues") != null;
}

fn contains_todo_keyword(text: []const u8) bool {
return for (uppercase_todo_keywords) |keyword| {
if (std.mem.indexOf(u8, text, keyword) != null)
break true;
} else false;
}
Loading