
Sponsored
Sponsored
To find duplicate files by their content, we can use a HashMap (or Dictionary). For each directory info string, parse the directory path and the files with their contents. Use the content as the key in the map and store full path of the file as its value. After parsing all inputs, the map keys with more than one value represent duplicate files.
Time Complexity: O(n), where n is the total number of characters in all file paths. We iterate over each character once.
Space Complexity: O(n) to store the lists of file paths in the dictionary.
1#include <stdio.h>
2#include <stdlib.h>
3#include <string.h>
4
5#define MAX_LENGTH 3000
6
7typedef struct FileNode {
8 char* path;
9 struct FileNode* next;
10} FileNode;
11
12typedef struct {
13 char* content;
14 FileNode* files;
15} ContentGroup;
16
17char*** findDuplicate(char** paths, int pathsSize, int* returnSize, int** returnColumnSizes){
18 // For simplicity, assume the maximum number of unique contents is small
19 ContentGroup contentGroups[1000];
20 int contentGroupCount = 0;
21
22 for (int i = 0; i < pathsSize; ++i) {
23 char* path = paths[i];
24 char* dirName = strtok(path, " ");
25 char* filePart;
26 while ((filePart = strtok(NULL, " ")) != NULL) {
27 // Get file name and content
28 char* openBracket = strchr(filePart, '(');
29 *openBracket = '\0';
30 char* fileName = filePart;
31 char* content = openBracket + 1;
32 char* closeBracket = strchr(content, ')');
33 *closeBracket = '\0';
34
35 // Form full path
36 char* fullPath = (char*)malloc(strlen(dirName) + strlen(fileName) + 2);
37 sprintf(fullPath, "%s/%s", dirName, fileName);
38
39 // Search for this content
40 int j;
41 for (j = 0; j < contentGroupCount; ++j) {
42 if (strcmp(contentGroups[j].content, content) == 0) {
43 break;
44 }
45 }
46
47 // If found, append to files
48 if (j < contentGroupCount) {
49 FileNode* newNode = (FileNode*)malloc(sizeof(FileNode));
50 newNode->path = fullPath;
51 newNode->next = contentGroups[j].files;
52 contentGroups[j].files = newNode;
53 } else { // Create new content group
54 contentGroups[contentGroupCount].content = strdup(content);
55 FileNode* newNode = (FileNode*)malloc(sizeof(FileNode));
56 newNode->path = fullPath;
57 newNode->next = NULL;
58 contentGroups[contentGroupCount].files = newNode;
59 ++contentGroupCount;
60 }
61 }
62 }
63
64 // Allocate return arrays
65 char*** result = (char***)malloc(contentGroupCount * sizeof(char**));
66 *returnColumnSizes = (int*)malloc(contentGroupCount * sizeof(int));
67 *returnSize = 0;
68 for (int i = 0; i < contentGroupCount; ++i) {
69 int count = 0;
70 FileNode* node;
71 for (node = contentGroups[i].files; node; node = node->next, ++count);
72 if (count > 1) {
73 (*returnColumnSizes)[*returnSize] = count;
74 result[*returnSize] = (char**)malloc(count * sizeof(char*));
75 node = contentGroups[i].files;
76 for (int j = 0; j < count; ++j) {
77 result[*returnSize][j] = node->path;
78 node = node->next;
79 }
80 ++(*returnSize);
81 }
82 }
83
84 // Free the content groups
85 for (int i = 0; i < contentGroupCount; ++i) {
86 free(contentGroups[i].content);
87 FileNode* node = contentGroups[i].files;
88 while (node) {
89 FileNode* next = node->next;
90 // Path pointers are stored in result
91 free(node);
92 node = next;
93 }
94 }
95 return result;
96}This C solution simulates a hashmap functionality using a structure that groups file paths sharing the same content. For each input string, it extracts directory and file data to create full path strings, storing them in linked lists indexed by content. The result is collected from lists having more than one entry and formatted according to the provided exercise's requirements.
This approach is based on directly processing string data and arranging results using a 2D array. Strings are manipulated to extract directory data, file names, and contents into standalone variables, then append paths to a growing structure. Compared to hash maps, this method uses arrays to aggregate identical files.
Time Complexity: O(n), where n is the total input character count due to one-pass evaluation.
Space Complexity: O(n), maintaining paths and intermediate arrays.
1using System;
2using System.Collections.Generic;
3
public class DuplicateWithArrays
{
public IList<IList<string>> FindDuplicate(string[] paths)
{
Dictionary<string, List<string>> contentMap = new Dictionary<string, List<string>>();
foreach (string path in paths)
{
string[] parts = path.Split(' ');
string directory = parts[0];
for (int i = 1; i < parts.Length; i++)
{
int openParenIndex = parts[i].IndexOf('(');
int closeParenIndex = parts[i].IndexOf(')', openParenIndex);
string name = parts[i].Substring(0, openParenIndex);
string content = parts[i].Substring(openParenIndex + 1, closeParenIndex - openParenIndex - 1);
if (!contentMap.ContainsKey(content))
{
contentMap[content] = new List<string>();
}
contentMap[content].Add(directory + "/" + name);
}
}
var result = new List<IList<string>>();
foreach (var pair in contentMap)
{
if (pair.Value.Count > 1)
{
result.Add(pair.Value);
}
}
return result;
}
}Utilizing C#'s native string capabilities, this implementation deliberately processes content without relying on heavy structuring, using simple substring and index functions to group matching file paths by their contents.