##// END OF EJS Templates
rust: add Vfs trait...
Raphaël Gomès -
r52761:db7dbe6f default
parent child Browse files
Show More
@@ -1,1774 +1,1820
1 1 # This file is automatically @generated by Cargo.
2 2 # It is not intended for manual editing.
3 3 version = 3
4 4
5 5 [[package]]
6 6 name = "adler"
7 7 version = "1.0.2"
8 8 source = "registry+https://github.com/rust-lang/crates.io-index"
9 9 checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
10 10
11 11 [[package]]
12 12 name = "ahash"
13 13 version = "0.8.2"
14 14 source = "registry+https://github.com/rust-lang/crates.io-index"
15 15 checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107"
16 16 dependencies = [
17 17 "cfg-if",
18 18 "once_cell",
19 19 "version_check",
20 20 ]
21 21
22 22 [[package]]
23 23 name = "aho-corasick"
24 24 version = "0.7.19"
25 25 source = "registry+https://github.com/rust-lang/crates.io-index"
26 26 checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
27 27 dependencies = [
28 28 "memchr",
29 29 ]
30 30
31 31 [[package]]
32 32 name = "android-tzdata"
33 33 version = "0.1.1"
34 34 source = "registry+https://github.com/rust-lang/crates.io-index"
35 35 checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
36 36
37 37 [[package]]
38 38 name = "android_system_properties"
39 39 version = "0.1.5"
40 40 source = "registry+https://github.com/rust-lang/crates.io-index"
41 41 checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
42 42 dependencies = [
43 43 "libc",
44 44 ]
45 45
46 46 [[package]]
47 47 name = "atty"
48 48 version = "0.2.14"
49 49 source = "registry+https://github.com/rust-lang/crates.io-index"
50 50 checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
51 51 dependencies = [
52 52 "hermit-abi",
53 53 "libc",
54 54 "winapi",
55 55 ]
56 56
57 57 [[package]]
58 58 name = "autocfg"
59 59 version = "1.1.0"
60 60 source = "registry+https://github.com/rust-lang/crates.io-index"
61 61 checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
62 62
63 63 [[package]]
64 64 name = "bitflags"
65 65 version = "1.3.2"
66 66 source = "registry+https://github.com/rust-lang/crates.io-index"
67 67 checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
68 68
69 69 [[package]]
70 70 name = "bitflags"
71 71 version = "2.6.0"
72 72 source = "registry+https://github.com/rust-lang/crates.io-index"
73 73 checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
74 74
75 75 [[package]]
76 76 name = "bitmaps"
77 77 version = "2.1.0"
78 78 source = "registry+https://github.com/rust-lang/crates.io-index"
79 79 checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2"
80 80 dependencies = [
81 81 "typenum",
82 82 ]
83 83
84 84 [[package]]
85 85 name = "bitvec"
86 86 version = "1.0.1"
87 87 source = "registry+https://github.com/rust-lang/crates.io-index"
88 88 checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
89 89 dependencies = [
90 90 "funty",
91 91 "radium",
92 92 "tap",
93 93 "wyz",
94 94 ]
95 95
96 96 [[package]]
97 97 name = "block-buffer"
98 98 version = "0.9.0"
99 99 source = "registry+https://github.com/rust-lang/crates.io-index"
100 100 checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
101 101 dependencies = [
102 102 "generic-array",
103 103 ]
104 104
105 105 [[package]]
106 106 name = "block-buffer"
107 107 version = "0.10.3"
108 108 source = "registry+https://github.com/rust-lang/crates.io-index"
109 109 checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
110 110 dependencies = [
111 111 "generic-array",
112 112 ]
113 113
114 114 [[package]]
115 115 name = "bstr"
116 116 version = "1.6.0"
117 117 source = "registry+https://github.com/rust-lang/crates.io-index"
118 118 checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05"
119 119 dependencies = [
120 120 "memchr",
121 121 "regex-automata",
122 122 "serde",
123 123 ]
124 124
125 125 [[package]]
126 126 name = "bumpalo"
127 127 version = "3.11.1"
128 128 source = "registry+https://github.com/rust-lang/crates.io-index"
129 129 checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
130 130
131 131 [[package]]
132 132 name = "byteorder"
133 133 version = "1.4.3"
134 134 source = "registry+https://github.com/rust-lang/crates.io-index"
135 135 checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
136 136
137 137 [[package]]
138 138 name = "bytes-cast"
139 139 version = "0.3.0"
140 140 source = "registry+https://github.com/rust-lang/crates.io-index"
141 141 checksum = "a20de93b91d7703ca0e39e12930e310acec5ff4d715f4166e0ab026babb352e8"
142 142 dependencies = [
143 143 "bytes-cast-derive",
144 144 ]
145 145
146 146 [[package]]
147 147 name = "bytes-cast-derive"
148 148 version = "0.2.0"
149 149 source = "registry+https://github.com/rust-lang/crates.io-index"
150 150 checksum = "7470a6fcce58cde3d62cce758bf71007978b75247e6becd9255c9b884bcb4f71"
151 151 dependencies = [
152 152 "proc-macro2",
153 153 "quote",
154 154 "syn",
155 155 ]
156 156
157 157 [[package]]
158 158 name = "cc"
159 159 version = "1.0.76"
160 160 source = "registry+https://github.com/rust-lang/crates.io-index"
161 161 checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f"
162 162 dependencies = [
163 163 "jobserver",
164 164 ]
165 165
166 166 [[package]]
167 167 name = "cfg-if"
168 168 version = "1.0.0"
169 169 source = "registry+https://github.com/rust-lang/crates.io-index"
170 170 checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
171 171
172 172 [[package]]
173 173 name = "chrono"
174 174 version = "0.4.34"
175 175 source = "registry+https://github.com/rust-lang/crates.io-index"
176 176 checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b"
177 177 dependencies = [
178 178 "android-tzdata",
179 179 "iana-time-zone",
180 180 "js-sys",
181 181 "num-traits",
182 182 "wasm-bindgen",
183 "windows-targets 0.52.0",
183 "windows-targets 0.52.6",
184 184 ]
185 185
186 186 [[package]]
187 187 name = "clap"
188 188 version = "4.0.24"
189 189 source = "registry+https://github.com/rust-lang/crates.io-index"
190 190 checksum = "60494cedb60cb47462c0ff7be53de32c0e42a6fc2c772184554fa12bd9489c03"
191 191 dependencies = [
192 192 "atty",
193 193 "bitflags 1.3.2",
194 194 "clap_derive",
195 195 "clap_lex",
196 196 "once_cell",
197 197 "strsim",
198 198 "termcolor",
199 199 ]
200 200
201 201 [[package]]
202 202 name = "clap_derive"
203 203 version = "4.0.21"
204 204 source = "registry+https://github.com/rust-lang/crates.io-index"
205 205 checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014"
206 206 dependencies = [
207 207 "heck",
208 208 "proc-macro-error",
209 209 "proc-macro2",
210 210 "quote",
211 211 "syn",
212 212 ]
213 213
214 214 [[package]]
215 215 name = "clap_lex"
216 216 version = "0.3.0"
217 217 source = "registry+https://github.com/rust-lang/crates.io-index"
218 218 checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8"
219 219 dependencies = [
220 220 "os_str_bytes",
221 221 ]
222 222
223 223 [[package]]
224 224 name = "codespan-reporting"
225 225 version = "0.11.1"
226 226 source = "registry+https://github.com/rust-lang/crates.io-index"
227 227 checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
228 228 dependencies = [
229 229 "termcolor",
230 230 "unicode-width",
231 231 ]
232 232
233 233 [[package]]
234 234 name = "convert_case"
235 235 version = "0.4.0"
236 236 source = "registry+https://github.com/rust-lang/crates.io-index"
237 237 checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
238 238
239 239 [[package]]
240 240 name = "core-foundation-sys"
241 241 version = "0.8.3"
242 242 source = "registry+https://github.com/rust-lang/crates.io-index"
243 243 checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
244 244
245 245 [[package]]
246 246 name = "cpufeatures"
247 247 version = "0.2.5"
248 248 source = "registry+https://github.com/rust-lang/crates.io-index"
249 249 checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320"
250 250 dependencies = [
251 251 "libc",
252 252 ]
253 253
254 254 [[package]]
255 255 name = "cpython"
256 256 version = "0.7.2"
257 257 source = "registry+https://github.com/rust-lang/crates.io-index"
258 258 checksum = "43b398a2c65baaf5892f10bb69b52508bf7a993380cc4ecd3785aaebb5c79389"
259 259 dependencies = [
260 260 "libc",
261 261 "num-traits",
262 262 "paste",
263 263 "python3-sys",
264 264 ]
265 265
266 266 [[package]]
267 267 name = "crc32fast"
268 268 version = "1.3.2"
269 269 source = "registry+https://github.com/rust-lang/crates.io-index"
270 270 checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
271 271 dependencies = [
272 272 "cfg-if",
273 273 ]
274 274
275 275 [[package]]
276 276 name = "crossbeam-channel"
277 277 version = "0.5.6"
278 278 source = "registry+https://github.com/rust-lang/crates.io-index"
279 279 checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
280 280 dependencies = [
281 281 "cfg-if",
282 282 "crossbeam-utils",
283 283 ]
284 284
285 285 [[package]]
286 286 name = "crossbeam-deque"
287 287 version = "0.8.2"
288 288 source = "registry+https://github.com/rust-lang/crates.io-index"
289 289 checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
290 290 dependencies = [
291 291 "cfg-if",
292 292 "crossbeam-epoch",
293 293 "crossbeam-utils",
294 294 ]
295 295
296 296 [[package]]
297 297 name = "crossbeam-epoch"
298 298 version = "0.9.11"
299 299 source = "registry+https://github.com/rust-lang/crates.io-index"
300 300 checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348"
301 301 dependencies = [
302 302 "autocfg",
303 303 "cfg-if",
304 304 "crossbeam-utils",
305 305 "memoffset",
306 306 "scopeguard",
307 307 ]
308 308
309 309 [[package]]
310 310 name = "crossbeam-utils"
311 311 version = "0.8.12"
312 312 source = "registry+https://github.com/rust-lang/crates.io-index"
313 313 checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac"
314 314 dependencies = [
315 315 "cfg-if",
316 316 ]
317 317
318 318 [[package]]
319 319 name = "crypto-common"
320 320 version = "0.1.6"
321 321 source = "registry+https://github.com/rust-lang/crates.io-index"
322 322 checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
323 323 dependencies = [
324 324 "generic-array",
325 325 "typenum",
326 326 ]
327 327
328 328 [[package]]
329 329 name = "ctor"
330 330 version = "0.1.26"
331 331 source = "registry+https://github.com/rust-lang/crates.io-index"
332 332 checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096"
333 333 dependencies = [
334 334 "quote",
335 335 "syn",
336 336 ]
337 337
338 338 [[package]]
339 339 name = "cxx"
340 340 version = "1.0.81"
341 341 source = "registry+https://github.com/rust-lang/crates.io-index"
342 342 checksum = "97abf9f0eca9e52b7f81b945524e76710e6cb2366aead23b7d4fbf72e281f888"
343 343 dependencies = [
344 344 "cc",
345 345 "cxxbridge-flags",
346 346 "cxxbridge-macro",
347 347 "link-cplusplus",
348 348 ]
349 349
350 350 [[package]]
351 351 name = "cxx-build"
352 352 version = "1.0.81"
353 353 source = "registry+https://github.com/rust-lang/crates.io-index"
354 354 checksum = "7cc32cc5fea1d894b77d269ddb9f192110069a8a9c1f1d441195fba90553dea3"
355 355 dependencies = [
356 356 "cc",
357 357 "codespan-reporting",
358 358 "once_cell",
359 359 "proc-macro2",
360 360 "quote",
361 361 "scratch",
362 362 "syn",
363 363 ]
364 364
365 365 [[package]]
366 366 name = "cxxbridge-flags"
367 367 version = "1.0.81"
368 368 source = "registry+https://github.com/rust-lang/crates.io-index"
369 369 checksum = "8ca220e4794c934dc6b1207c3b42856ad4c302f2df1712e9f8d2eec5afaacf1f"
370 370
371 371 [[package]]
372 372 name = "cxxbridge-macro"
373 373 version = "1.0.81"
374 374 source = "registry+https://github.com/rust-lang/crates.io-index"
375 375 checksum = "b846f081361125bfc8dc9d3940c84e1fd83ba54bbca7b17cd29483c828be0704"
376 376 dependencies = [
377 377 "proc-macro2",
378 378 "quote",
379 379 "syn",
380 380 ]
381 381
382 382 [[package]]
383 383 name = "derive_more"
384 384 version = "0.99.17"
385 385 source = "registry+https://github.com/rust-lang/crates.io-index"
386 386 checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321"
387 387 dependencies = [
388 388 "convert_case",
389 389 "proc-macro2",
390 390 "quote",
391 391 "rustc_version",
392 392 "syn",
393 393 ]
394 394
395 395 [[package]]
396 396 name = "diff"
397 397 version = "0.1.13"
398 398 source = "registry+https://github.com/rust-lang/crates.io-index"
399 399 checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
400 400
401 401 [[package]]
402 402 name = "digest"
403 403 version = "0.9.0"
404 404 source = "registry+https://github.com/rust-lang/crates.io-index"
405 405 checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
406 406 dependencies = [
407 407 "generic-array",
408 408 ]
409 409
410 410 [[package]]
411 411 name = "digest"
412 412 version = "0.10.5"
413 413 source = "registry+https://github.com/rust-lang/crates.io-index"
414 414 checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
415 415 dependencies = [
416 416 "block-buffer 0.10.3",
417 417 "crypto-common",
418 418 ]
419 419
420 420 [[package]]
421 421 name = "dirs"
422 422 version = "5.0.1"
423 423 source = "registry+https://github.com/rust-lang/crates.io-index"
424 424 checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225"
425 425 dependencies = [
426 426 "dirs-sys",
427 427 ]
428 428
429 429 [[package]]
430 430 name = "dirs-sys"
431 431 version = "0.4.1"
432 432 source = "registry+https://github.com/rust-lang/crates.io-index"
433 433 checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c"
434 434 dependencies = [
435 435 "libc",
436 436 "option-ext",
437 437 "redox_users",
438 "windows-sys",
438 "windows-sys 0.48.0",
439 439 ]
440 440
441 441 [[package]]
442 name = "dyn-clone"
443 version = "1.0.17"
444 source = "registry+https://github.com/rust-lang/crates.io-index"
445 checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
446
447 [[package]]
442 448 name = "either"
443 449 version = "1.8.0"
444 450 source = "registry+https://github.com/rust-lang/crates.io-index"
445 451 checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
446 452
447 453 [[package]]
448 454 name = "env_logger"
449 455 version = "0.9.3"
450 456 source = "registry+https://github.com/rust-lang/crates.io-index"
451 457 checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"
452 458 dependencies = [
453 459 "atty",
454 460 "humantime",
455 461 "log",
456 462 "regex",
457 463 "termcolor",
458 464 ]
459 465
460 466 [[package]]
461 467 name = "fastrand"
462 468 version = "1.8.0"
463 469 source = "registry+https://github.com/rust-lang/crates.io-index"
464 470 checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
465 471 dependencies = [
466 472 "instant",
467 473 ]
468 474
469 475 [[package]]
476 name = "filetime"
477 version = "0.2.25"
478 source = "registry+https://github.com/rust-lang/crates.io-index"
479 checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
480 dependencies = [
481 "cfg-if",
482 "libc",
483 "libredox",
484 "windows-sys 0.59.0",
485 ]
486
487 [[package]]
470 488 name = "flate2"
471 489 version = "1.0.24"
472 490 source = "registry+https://github.com/rust-lang/crates.io-index"
473 491 checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6"
474 492 dependencies = [
475 493 "crc32fast",
476 494 "libz-sys",
477 495 "miniz_oxide",
478 496 ]
479 497
480 498 [[package]]
481 499 name = "format-bytes"
482 500 version = "0.3.0"
483 501 source = "registry+https://github.com/rust-lang/crates.io-index"
484 502 checksum = "48942366ef93975da38e175ac9e10068c6fc08ca9e85930d4f098f4d5b14c2fd"
485 503 dependencies = [
486 504 "format-bytes-macros",
487 505 ]
488 506
489 507 [[package]]
490 508 name = "format-bytes-macros"
491 509 version = "0.4.0"
492 510 source = "registry+https://github.com/rust-lang/crates.io-index"
493 511 checksum = "203aadebefcc73d12038296c228eabf830f99cba991b0032adf20e9fa6ce7e4f"
494 512 dependencies = [
495 513 "proc-macro2",
496 514 "quote",
497 515 "syn",
498 516 ]
499 517
500 518 [[package]]
501 519 name = "funty"
502 520 version = "2.0.0"
503 521 source = "registry+https://github.com/rust-lang/crates.io-index"
504 522 checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
505 523
506 524 [[package]]
507 525 name = "generic-array"
508 526 version = "0.14.6"
509 527 source = "registry+https://github.com/rust-lang/crates.io-index"
510 528 checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
511 529 dependencies = [
512 530 "typenum",
513 531 "version_check",
514 532 ]
515 533
516 534 [[package]]
517 535 name = "getrandom"
518 536 version = "0.1.16"
519 537 source = "registry+https://github.com/rust-lang/crates.io-index"
520 538 checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
521 539 dependencies = [
522 540 "cfg-if",
523 541 "libc",
524 542 "wasi 0.9.0+wasi-snapshot-preview1",
525 543 ]
526 544
527 545 [[package]]
528 546 name = "getrandom"
529 547 version = "0.2.8"
530 548 source = "registry+https://github.com/rust-lang/crates.io-index"
531 549 checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
532 550 dependencies = [
533 551 "cfg-if",
534 552 "libc",
535 553 "wasi 0.11.0+wasi-snapshot-preview1",
536 554 ]
537 555
538 556 [[package]]
539 557 name = "hashbrown"
540 558 version = "0.12.3"
541 559 source = "registry+https://github.com/rust-lang/crates.io-index"
542 560 checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
543 561
544 562 [[package]]
545 563 name = "hashbrown"
546 564 version = "0.13.1"
547 565 source = "registry+https://github.com/rust-lang/crates.io-index"
548 566 checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038"
549 567 dependencies = [
550 568 "ahash",
551 569 "rayon",
552 570 ]
553 571
554 572 [[package]]
555 573 name = "heck"
556 574 version = "0.4.0"
557 575 source = "registry+https://github.com/rust-lang/crates.io-index"
558 576 checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
559 577
560 578 [[package]]
561 579 name = "hermit-abi"
562 580 version = "0.1.19"
563 581 source = "registry+https://github.com/rust-lang/crates.io-index"
564 582 checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
565 583 dependencies = [
566 584 "libc",
567 585 ]
568 586
569 587 [[package]]
570 588 name = "hex"
571 589 version = "0.4.3"
572 590 source = "registry+https://github.com/rust-lang/crates.io-index"
573 591 checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
574 592
575 593 [[package]]
576 594 name = "hg-core"
577 595 version = "0.1.0"
578 596 dependencies = [
579 597 "bitflags 1.3.2",
580 598 "bitvec",
581 599 "byteorder",
582 600 "bytes-cast",
583 601 "chrono",
584 602 "clap",
585 603 "crossbeam-channel",
586 604 "derive_more",
605 "dyn-clone",
606 "filetime",
587 607 "flate2",
588 608 "format-bytes",
589 609 "hashbrown 0.13.1",
590 610 "home",
591 611 "im-rc",
592 612 "itertools",
593 613 "lazy_static",
594 614 "libc",
595 615 "log",
596 616 "logging_timer",
597 617 "memmap2",
598 618 "once_cell",
599 619 "pretty_assertions",
600 620 "rand 0.8.5",
601 621 "rand_distr",
602 622 "rand_pcg",
603 623 "rayon",
604 624 "regex",
605 625 "same-file",
606 626 "self_cell",
607 627 "serde",
608 628 "sha-1 0.10.0",
609 629 "tempfile",
610 630 "thread_local",
611 631 "toml",
612 632 "twox-hash",
613 633 "zstd",
614 634 ]
615 635
616 636 [[package]]
617 637 name = "hg-cpython"
618 638 version = "0.1.0"
619 639 dependencies = [
620 640 "cpython",
621 641 "crossbeam-channel",
622 642 "env_logger",
623 643 "hg-core",
624 644 "libc",
625 645 "log",
626 646 "stable_deref_trait",
627 647 "vcsgraph",
628 648 ]
629 649
630 650 [[package]]
631 651 name = "home"
632 652 version = "0.5.4"
633 653 source = "registry+https://github.com/rust-lang/crates.io-index"
634 654 checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408"
635 655 dependencies = [
636 656 "winapi",
637 657 ]
638 658
639 659 [[package]]
640 660 name = "humantime"
641 661 version = "2.1.0"
642 662 source = "registry+https://github.com/rust-lang/crates.io-index"
643 663 checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
644 664
645 665 [[package]]
646 666 name = "iana-time-zone"
647 667 version = "0.1.53"
648 668 source = "registry+https://github.com/rust-lang/crates.io-index"
649 669 checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765"
650 670 dependencies = [
651 671 "android_system_properties",
652 672 "core-foundation-sys",
653 673 "iana-time-zone-haiku",
654 674 "js-sys",
655 675 "wasm-bindgen",
656 676 "winapi",
657 677 ]
658 678
659 679 [[package]]
660 680 name = "iana-time-zone-haiku"
661 681 version = "0.1.1"
662 682 source = "registry+https://github.com/rust-lang/crates.io-index"
663 683 checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
664 684 dependencies = [
665 685 "cxx",
666 686 "cxx-build",
667 687 ]
668 688
669 689 [[package]]
670 690 name = "im-rc"
671 691 version = "15.1.0"
672 692 source = "registry+https://github.com/rust-lang/crates.io-index"
673 693 checksum = "af1955a75fa080c677d3972822ec4bad316169ab1cfc6c257a942c2265dbe5fe"
674 694 dependencies = [
675 695 "bitmaps",
676 696 "rand_core 0.6.4",
677 697 "rand_xoshiro",
678 698 "sized-chunks",
679 699 "typenum",
680 700 "version_check",
681 701 ]
682 702
683 703 [[package]]
684 704 name = "indexmap"
685 705 version = "1.9.2"
686 706 source = "registry+https://github.com/rust-lang/crates.io-index"
687 707 checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399"
688 708 dependencies = [
689 709 "autocfg",
690 710 "hashbrown 0.12.3",
691 711 ]
692 712
693 713 [[package]]
694 714 name = "instant"
695 715 version = "0.1.12"
696 716 source = "registry+https://github.com/rust-lang/crates.io-index"
697 717 checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
698 718 dependencies = [
699 719 "cfg-if",
700 720 ]
701 721
702 722 [[package]]
703 723 name = "itertools"
704 724 version = "0.10.5"
705 725 source = "registry+https://github.com/rust-lang/crates.io-index"
706 726 checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
707 727 dependencies = [
708 728 "either",
709 729 ]
710 730
711 731 [[package]]
712 732 name = "jobserver"
713 733 version = "0.1.25"
714 734 source = "registry+https://github.com/rust-lang/crates.io-index"
715 735 checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b"
716 736 dependencies = [
717 737 "libc",
718 738 ]
719 739
720 740 [[package]]
721 741 name = "js-sys"
722 742 version = "0.3.60"
723 743 source = "registry+https://github.com/rust-lang/crates.io-index"
724 744 checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
725 745 dependencies = [
726 746 "wasm-bindgen",
727 747 ]
728 748
729 749 [[package]]
730 750 name = "lazy_static"
731 751 version = "1.4.0"
732 752 source = "registry+https://github.com/rust-lang/crates.io-index"
733 753 checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
734 754
735 755 [[package]]
736 756 name = "libc"
737 757 version = "0.2.155"
738 758 source = "registry+https://github.com/rust-lang/crates.io-index"
739 759 checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
740 760
741 761 [[package]]
742 762 name = "libm"
743 763 version = "0.2.6"
744 764 source = "registry+https://github.com/rust-lang/crates.io-index"
745 765 checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
746 766
747 767 [[package]]
748 768 name = "libredox"
749 769 version = "0.1.3"
750 770 source = "registry+https://github.com/rust-lang/crates.io-index"
751 771 checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
752 772 dependencies = [
753 773 "bitflags 2.6.0",
754 774 "libc",
775 "redox_syscall 0.5.3",
755 776 ]
756 777
757 778 [[package]]
758 779 name = "libz-sys"
759 780 version = "1.1.8"
760 781 source = "registry+https://github.com/rust-lang/crates.io-index"
761 782 checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf"
762 783 dependencies = [
763 784 "cc",
764 785 "pkg-config",
765 786 "vcpkg",
766 787 ]
767 788
768 789 [[package]]
769 790 name = "link-cplusplus"
770 791 version = "1.0.7"
771 792 source = "registry+https://github.com/rust-lang/crates.io-index"
772 793 checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369"
773 794 dependencies = [
774 795 "cc",
775 796 ]
776 797
777 798 [[package]]
778 799 name = "log"
779 800 version = "0.4.17"
780 801 source = "registry+https://github.com/rust-lang/crates.io-index"
781 802 checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
782 803 dependencies = [
783 804 "cfg-if",
784 805 ]
785 806
786 807 [[package]]
787 808 name = "logging_timer"
788 809 version = "1.1.0"
789 810 source = "registry+https://github.com/rust-lang/crates.io-index"
790 811 checksum = "64e96f261d684b7089aa576bb74e823241dccd994b27d30fabf1dcb3af284fe9"
791 812 dependencies = [
792 813 "log",
793 814 "logging_timer_proc_macros",
794 815 ]
795 816
796 817 [[package]]
797 818 name = "logging_timer_proc_macros"
798 819 version = "1.1.0"
799 820 source = "registry+https://github.com/rust-lang/crates.io-index"
800 821 checksum = "10a9062912d7952c5588cc474795e0b9ee008e7e6781127945b85413d4b99d81"
801 822 dependencies = [
802 823 "log",
803 824 "proc-macro2",
804 825 "quote",
805 826 "syn",
806 827 ]
807 828
808 829 [[package]]
809 830 name = "memchr"
810 831 version = "2.5.0"
811 832 source = "registry+https://github.com/rust-lang/crates.io-index"
812 833 checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
813 834
814 835 [[package]]
815 836 name = "memmap2"
816 837 version = "0.5.8"
817 838 source = "registry+https://github.com/rust-lang/crates.io-index"
818 839 checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc"
819 840 dependencies = [
820 841 "libc",
821 842 "stable_deref_trait",
822 843 ]
823 844
824 845 [[package]]
825 846 name = "memoffset"
826 847 version = "0.6.5"
827 848 source = "registry+https://github.com/rust-lang/crates.io-index"
828 849 checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
829 850 dependencies = [
830 851 "autocfg",
831 852 ]
832 853
833 854 [[package]]
834 855 name = "miniz_oxide"
835 856 version = "0.5.4"
836 857 source = "registry+https://github.com/rust-lang/crates.io-index"
837 858 checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34"
838 859 dependencies = [
839 860 "adler",
840 861 ]
841 862
842 863 [[package]]
843 864 name = "nom8"
844 865 version = "0.2.0"
845 866 source = "registry+https://github.com/rust-lang/crates.io-index"
846 867 checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8"
847 868 dependencies = [
848 869 "memchr",
849 870 ]
850 871
851 872 [[package]]
852 873 name = "num-traits"
853 874 version = "0.2.15"
854 875 source = "registry+https://github.com/rust-lang/crates.io-index"
855 876 checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
856 877 dependencies = [
857 878 "autocfg",
858 879 "libm",
859 880 ]
860 881
861 882 [[package]]
862 883 name = "num_cpus"
863 884 version = "1.14.0"
864 885 source = "registry+https://github.com/rust-lang/crates.io-index"
865 886 checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
866 887 dependencies = [
867 888 "hermit-abi",
868 889 "libc",
869 890 ]
870 891
871 892 [[package]]
872 893 name = "once_cell"
873 894 version = "1.16.0"
874 895 source = "registry+https://github.com/rust-lang/crates.io-index"
875 896 checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
876 897
877 898 [[package]]
878 899 name = "opaque-debug"
879 900 version = "0.3.0"
880 901 source = "registry+https://github.com/rust-lang/crates.io-index"
881 902 checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
882 903
883 904 [[package]]
884 905 name = "option-ext"
885 906 version = "0.2.0"
886 907 source = "registry+https://github.com/rust-lang/crates.io-index"
887 908 checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
888 909
889 910 [[package]]
890 911 name = "os_str_bytes"
891 912 version = "6.4.0"
892 913 source = "registry+https://github.com/rust-lang/crates.io-index"
893 914 checksum = "7b5bf27447411e9ee3ff51186bf7a08e16c341efdde93f4d823e8844429bed7e"
894 915 dependencies = [
895 916 "memchr",
896 917 ]
897 918
898 919 [[package]]
899 920 name = "output_vt100"
900 921 version = "0.1.3"
901 922 source = "registry+https://github.com/rust-lang/crates.io-index"
902 923 checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66"
903 924 dependencies = [
904 925 "winapi",
905 926 ]
906 927
907 928 [[package]]
908 929 name = "paste"
909 930 version = "1.0.9"
910 931 source = "registry+https://github.com/rust-lang/crates.io-index"
911 932 checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
912 933
913 934 [[package]]
914 935 name = "pkg-config"
915 936 version = "0.3.26"
916 937 source = "registry+https://github.com/rust-lang/crates.io-index"
917 938 checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
918 939
919 940 [[package]]
920 941 name = "ppv-lite86"
921 942 version = "0.2.17"
922 943 source = "registry+https://github.com/rust-lang/crates.io-index"
923 944 checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
924 945
925 946 [[package]]
926 947 name = "pretty_assertions"
927 948 version = "1.3.0"
928 949 source = "registry+https://github.com/rust-lang/crates.io-index"
929 950 checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755"
930 951 dependencies = [
931 952 "ctor",
932 953 "diff",
933 954 "output_vt100",
934 955 "yansi",
935 956 ]
936 957
937 958 [[package]]
938 959 name = "proc-macro-error"
939 960 version = "1.0.4"
940 961 source = "registry+https://github.com/rust-lang/crates.io-index"
941 962 checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
942 963 dependencies = [
943 964 "proc-macro-error-attr",
944 965 "proc-macro2",
945 966 "quote",
946 967 "syn",
947 968 "version_check",
948 969 ]
949 970
950 971 [[package]]
951 972 name = "proc-macro-error-attr"
952 973 version = "1.0.4"
953 974 source = "registry+https://github.com/rust-lang/crates.io-index"
954 975 checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
955 976 dependencies = [
956 977 "proc-macro2",
957 978 "quote",
958 979 "version_check",
959 980 ]
960 981
961 982 [[package]]
962 983 name = "proc-macro2"
963 984 version = "1.0.47"
964 985 source = "registry+https://github.com/rust-lang/crates.io-index"
965 986 checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
966 987 dependencies = [
967 988 "unicode-ident",
968 989 ]
969 990
970 991 [[package]]
971 992 name = "python3-sys"
972 993 version = "0.7.2"
973 994 source = "registry+https://github.com/rust-lang/crates.io-index"
974 995 checksum = "0f53ef6740367a09718d2cd21ba15b0d7972342a38e554736bcee7773e45c9f5"
975 996 dependencies = [
976 997 "libc",
977 998 "regex",
978 999 ]
979 1000
980 1001 [[package]]
981 1002 name = "quote"
982 1003 version = "1.0.21"
983 1004 source = "registry+https://github.com/rust-lang/crates.io-index"
984 1005 checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
985 1006 dependencies = [
986 1007 "proc-macro2",
987 1008 ]
988 1009
989 1010 [[package]]
990 1011 name = "radium"
991 1012 version = "0.7.0"
992 1013 source = "registry+https://github.com/rust-lang/crates.io-index"
993 1014 checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
994 1015
995 1016 [[package]]
996 1017 name = "rand"
997 1018 version = "0.7.3"
998 1019 source = "registry+https://github.com/rust-lang/crates.io-index"
999 1020 checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
1000 1021 dependencies = [
1001 1022 "getrandom 0.1.16",
1002 1023 "libc",
1003 1024 "rand_chacha 0.2.2",
1004 1025 "rand_core 0.5.1",
1005 1026 "rand_hc",
1006 1027 ]
1007 1028
1008 1029 [[package]]
1009 1030 name = "rand"
1010 1031 version = "0.8.5"
1011 1032 source = "registry+https://github.com/rust-lang/crates.io-index"
1012 1033 checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
1013 1034 dependencies = [
1014 1035 "libc",
1015 1036 "rand_chacha 0.3.1",
1016 1037 "rand_core 0.6.4",
1017 1038 ]
1018 1039
1019 1040 [[package]]
1020 1041 name = "rand_chacha"
1021 1042 version = "0.2.2"
1022 1043 source = "registry+https://github.com/rust-lang/crates.io-index"
1023 1044 checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
1024 1045 dependencies = [
1025 1046 "ppv-lite86",
1026 1047 "rand_core 0.5.1",
1027 1048 ]
1028 1049
1029 1050 [[package]]
1030 1051 name = "rand_chacha"
1031 1052 version = "0.3.1"
1032 1053 source = "registry+https://github.com/rust-lang/crates.io-index"
1033 1054 checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
1034 1055 dependencies = [
1035 1056 "ppv-lite86",
1036 1057 "rand_core 0.6.4",
1037 1058 ]
1038 1059
1039 1060 [[package]]
1040 1061 name = "rand_core"
1041 1062 version = "0.5.1"
1042 1063 source = "registry+https://github.com/rust-lang/crates.io-index"
1043 1064 checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
1044 1065 dependencies = [
1045 1066 "getrandom 0.1.16",
1046 1067 ]
1047 1068
1048 1069 [[package]]
1049 1070 name = "rand_core"
1050 1071 version = "0.6.4"
1051 1072 source = "registry+https://github.com/rust-lang/crates.io-index"
1052 1073 checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
1053 1074 dependencies = [
1054 1075 "getrandom 0.2.8",
1055 1076 ]
1056 1077
1057 1078 [[package]]
1058 1079 name = "rand_distr"
1059 1080 version = "0.4.3"
1060 1081 source = "registry+https://github.com/rust-lang/crates.io-index"
1061 1082 checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31"
1062 1083 dependencies = [
1063 1084 "num-traits",
1064 1085 "rand 0.8.5",
1065 1086 ]
1066 1087
1067 1088 [[package]]
1068 1089 name = "rand_hc"
1069 1090 version = "0.2.0"
1070 1091 source = "registry+https://github.com/rust-lang/crates.io-index"
1071 1092 checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
1072 1093 dependencies = [
1073 1094 "rand_core 0.5.1",
1074 1095 ]
1075 1096
1076 1097 [[package]]
1077 1098 name = "rand_pcg"
1078 1099 version = "0.3.1"
1079 1100 source = "registry+https://github.com/rust-lang/crates.io-index"
1080 1101 checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e"
1081 1102 dependencies = [
1082 1103 "rand_core 0.6.4",
1083 1104 ]
1084 1105
1085 1106 [[package]]
1086 1107 name = "rand_xoshiro"
1087 1108 version = "0.6.0"
1088 1109 source = "registry+https://github.com/rust-lang/crates.io-index"
1089 1110 checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa"
1090 1111 dependencies = [
1091 1112 "rand_core 0.6.4",
1092 1113 ]
1093 1114
1094 1115 [[package]]
1095 1116 name = "rayon"
1096 1117 version = "1.7.0"
1097 1118 source = "registry+https://github.com/rust-lang/crates.io-index"
1098 1119 checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
1099 1120 dependencies = [
1100 1121 "either",
1101 1122 "rayon-core",
1102 1123 ]
1103 1124
1104 1125 [[package]]
1105 1126 name = "rayon-core"
1106 1127 version = "1.11.0"
1107 1128 source = "registry+https://github.com/rust-lang/crates.io-index"
1108 1129 checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
1109 1130 dependencies = [
1110 1131 "crossbeam-channel",
1111 1132 "crossbeam-deque",
1112 1133 "crossbeam-utils",
1113 1134 "num_cpus",
1114 1135 ]
1115 1136
1116 1137 [[package]]
1117 1138 name = "redox_syscall"
1118 1139 version = "0.2.16"
1119 1140 source = "registry+https://github.com/rust-lang/crates.io-index"
1120 1141 checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
1121 1142 dependencies = [
1122 1143 "bitflags 1.3.2",
1123 1144 ]
1124 1145
1125 1146 [[package]]
1147 name = "redox_syscall"
1148 version = "0.5.3"
1149 source = "registry+https://github.com/rust-lang/crates.io-index"
1150 checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4"
1151 dependencies = [
1152 "bitflags 2.6.0",
1153 ]
1154
1155 [[package]]
1126 1156 name = "redox_users"
1127 1157 version = "0.4.5"
1128 1158 source = "registry+https://github.com/rust-lang/crates.io-index"
1129 1159 checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891"
1130 1160 dependencies = [
1131 1161 "getrandom 0.2.8",
1132 1162 "libredox",
1133 1163 "thiserror",
1134 1164 ]
1135 1165
1136 1166 [[package]]
1137 1167 name = "regex"
1138 1168 version = "1.7.0"
1139 1169 source = "registry+https://github.com/rust-lang/crates.io-index"
1140 1170 checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
1141 1171 dependencies = [
1142 1172 "aho-corasick",
1143 1173 "memchr",
1144 1174 "regex-syntax",
1145 1175 ]
1146 1176
1147 1177 [[package]]
1148 1178 name = "regex-automata"
1149 1179 version = "0.3.9"
1150 1180 source = "registry+https://github.com/rust-lang/crates.io-index"
1151 1181 checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9"
1152 1182
1153 1183 [[package]]
1154 1184 name = "regex-syntax"
1155 1185 version = "0.6.28"
1156 1186 source = "registry+https://github.com/rust-lang/crates.io-index"
1157 1187 checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
1158 1188
1159 1189 [[package]]
1160 1190 name = "remove_dir_all"
1161 1191 version = "0.5.3"
1162 1192 source = "registry+https://github.com/rust-lang/crates.io-index"
1163 1193 checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
1164 1194 dependencies = [
1165 1195 "winapi",
1166 1196 ]
1167 1197
1168 1198 [[package]]
1169 1199 name = "rhg"
1170 1200 version = "0.1.0"
1171 1201 dependencies = [
1172 1202 "atty",
1173 1203 "chrono",
1174 1204 "clap",
1175 1205 "derive_more",
1176 1206 "env_logger",
1177 1207 "format-bytes",
1178 1208 "hg-core",
1179 1209 "home",
1180 1210 "lazy_static",
1181 1211 "libc",
1182 1212 "log",
1183 1213 "logging_timer",
1184 1214 "rayon",
1185 1215 "regex",
1186 1216 "shellexpand",
1187 1217 "which",
1188 1218 "whoami",
1189 1219 ]
1190 1220
1191 1221 [[package]]
1192 1222 name = "rustc_version"
1193 1223 version = "0.4.0"
1194 1224 source = "registry+https://github.com/rust-lang/crates.io-index"
1195 1225 checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
1196 1226 dependencies = [
1197 1227 "semver",
1198 1228 ]
1199 1229
1200 1230 [[package]]
1201 1231 name = "same-file"
1202 1232 version = "1.0.6"
1203 1233 source = "registry+https://github.com/rust-lang/crates.io-index"
1204 1234 checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
1205 1235 dependencies = [
1206 1236 "winapi-util",
1207 1237 ]
1208 1238
1209 1239 [[package]]
1210 1240 name = "scopeguard"
1211 1241 version = "1.1.0"
1212 1242 source = "registry+https://github.com/rust-lang/crates.io-index"
1213 1243 checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
1214 1244
1215 1245 [[package]]
1216 1246 name = "scratch"
1217 1247 version = "1.0.2"
1218 1248 source = "registry+https://github.com/rust-lang/crates.io-index"
1219 1249 checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898"
1220 1250
1221 1251 [[package]]
1222 1252 name = "self_cell"
1223 1253 version = "1.0.0"
1224 1254 source = "registry+https://github.com/rust-lang/crates.io-index"
1225 1255 checksum = "4a3926e239738d36060909ffe6f511502f92149a45a1fade7fe031cb2d33e88b"
1226 1256
1227 1257 [[package]]
1228 1258 name = "semver"
1229 1259 version = "1.0.14"
1230 1260 source = "registry+https://github.com/rust-lang/crates.io-index"
1231 1261 checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4"
1232 1262
1233 1263 [[package]]
1234 1264 name = "serde"
1235 1265 version = "1.0.152"
1236 1266 source = "registry+https://github.com/rust-lang/crates.io-index"
1237 1267 checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
1238 1268 dependencies = [
1239 1269 "serde_derive",
1240 1270 ]
1241 1271
1242 1272 [[package]]
1243 1273 name = "serde_derive"
1244 1274 version = "1.0.152"
1245 1275 source = "registry+https://github.com/rust-lang/crates.io-index"
1246 1276 checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
1247 1277 dependencies = [
1248 1278 "proc-macro2",
1249 1279 "quote",
1250 1280 "syn",
1251 1281 ]
1252 1282
1253 1283 [[package]]
1254 1284 name = "serde_spanned"
1255 1285 version = "0.6.1"
1256 1286 source = "registry+https://github.com/rust-lang/crates.io-index"
1257 1287 checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4"
1258 1288 dependencies = [
1259 1289 "serde",
1260 1290 ]
1261 1291
1262 1292 [[package]]
1263 1293 name = "sha-1"
1264 1294 version = "0.9.8"
1265 1295 source = "registry+https://github.com/rust-lang/crates.io-index"
1266 1296 checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6"
1267 1297 dependencies = [
1268 1298 "block-buffer 0.9.0",
1269 1299 "cfg-if",
1270 1300 "cpufeatures",
1271 1301 "digest 0.9.0",
1272 1302 "opaque-debug",
1273 1303 ]
1274 1304
1275 1305 [[package]]
1276 1306 name = "sha-1"
1277 1307 version = "0.10.0"
1278 1308 source = "registry+https://github.com/rust-lang/crates.io-index"
1279 1309 checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f"
1280 1310 dependencies = [
1281 1311 "cfg-if",
1282 1312 "cpufeatures",
1283 1313 "digest 0.10.5",
1284 1314 ]
1285 1315
1286 1316 [[package]]
1287 1317 name = "shellexpand"
1288 1318 version = "3.1.0"
1289 1319 source = "registry+https://github.com/rust-lang/crates.io-index"
1290 1320 checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b"
1291 1321 dependencies = [
1292 1322 "bstr",
1293 1323 "dirs",
1294 1324 "os_str_bytes",
1295 1325 ]
1296 1326
1297 1327 [[package]]
1298 1328 name = "sized-chunks"
1299 1329 version = "0.6.5"
1300 1330 source = "registry+https://github.com/rust-lang/crates.io-index"
1301 1331 checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e"
1302 1332 dependencies = [
1303 1333 "bitmaps",
1304 1334 "typenum",
1305 1335 ]
1306 1336
1307 1337 [[package]]
1308 1338 name = "stable_deref_trait"
1309 1339 version = "1.2.0"
1310 1340 source = "registry+https://github.com/rust-lang/crates.io-index"
1311 1341 checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
1312 1342
1313 1343 [[package]]
1314 1344 name = "static_assertions"
1315 1345 version = "1.1.0"
1316 1346 source = "registry+https://github.com/rust-lang/crates.io-index"
1317 1347 checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
1318 1348
1319 1349 [[package]]
1320 1350 name = "strsim"
1321 1351 version = "0.10.0"
1322 1352 source = "registry+https://github.com/rust-lang/crates.io-index"
1323 1353 checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
1324 1354
1325 1355 [[package]]
1326 1356 name = "syn"
1327 1357 version = "1.0.109"
1328 1358 source = "registry+https://github.com/rust-lang/crates.io-index"
1329 1359 checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
1330 1360 dependencies = [
1331 1361 "proc-macro2",
1332 1362 "quote",
1333 1363 "unicode-ident",
1334 1364 ]
1335 1365
1336 1366 [[package]]
1337 1367 name = "tap"
1338 1368 version = "1.0.1"
1339 1369 source = "registry+https://github.com/rust-lang/crates.io-index"
1340 1370 checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
1341 1371
1342 1372 [[package]]
1343 1373 name = "tempfile"
1344 1374 version = "3.3.0"
1345 1375 source = "registry+https://github.com/rust-lang/crates.io-index"
1346 1376 checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
1347 1377 dependencies = [
1348 1378 "cfg-if",
1349 1379 "fastrand",
1350 1380 "libc",
1351 "redox_syscall",
1381 "redox_syscall 0.2.16",
1352 1382 "remove_dir_all",
1353 1383 "winapi",
1354 1384 ]
1355 1385
1356 1386 [[package]]
1357 1387 name = "termcolor"
1358 1388 version = "1.1.3"
1359 1389 source = "registry+https://github.com/rust-lang/crates.io-index"
1360 1390 checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
1361 1391 dependencies = [
1362 1392 "winapi-util",
1363 1393 ]
1364 1394
1365 1395 [[package]]
1366 1396 name = "thiserror"
1367 1397 version = "1.0.39"
1368 1398 source = "registry+https://github.com/rust-lang/crates.io-index"
1369 1399 checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c"
1370 1400 dependencies = [
1371 1401 "thiserror-impl",
1372 1402 ]
1373 1403
1374 1404 [[package]]
1375 1405 name = "thiserror-impl"
1376 1406 version = "1.0.39"
1377 1407 source = "registry+https://github.com/rust-lang/crates.io-index"
1378 1408 checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e"
1379 1409 dependencies = [
1380 1410 "proc-macro2",
1381 1411 "quote",
1382 1412 "syn",
1383 1413 ]
1384 1414
1385 1415 [[package]]
1386 1416 name = "thread_local"
1387 1417 version = "1.1.4"
1388 1418 source = "registry+https://github.com/rust-lang/crates.io-index"
1389 1419 checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180"
1390 1420 dependencies = [
1391 1421 "once_cell",
1392 1422 ]
1393 1423
1394 1424 [[package]]
1395 1425 name = "toml"
1396 1426 version = "0.6.0"
1397 1427 source = "registry+https://github.com/rust-lang/crates.io-index"
1398 1428 checksum = "4fb9d890e4dc9298b70f740f615f2e05b9db37dce531f6b24fb77ac993f9f217"
1399 1429 dependencies = [
1400 1430 "serde",
1401 1431 "serde_spanned",
1402 1432 "toml_datetime",
1403 1433 "toml_edit",
1404 1434 ]
1405 1435
1406 1436 [[package]]
1407 1437 name = "toml_datetime"
1408 1438 version = "0.5.1"
1409 1439 source = "registry+https://github.com/rust-lang/crates.io-index"
1410 1440 checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5"
1411 1441 dependencies = [
1412 1442 "serde",
1413 1443 ]
1414 1444
1415 1445 [[package]]
1416 1446 name = "toml_edit"
1417 1447 version = "0.18.1"
1418 1448 source = "registry+https://github.com/rust-lang/crates.io-index"
1419 1449 checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b"
1420 1450 dependencies = [
1421 1451 "indexmap",
1422 1452 "nom8",
1423 1453 "serde",
1424 1454 "serde_spanned",
1425 1455 "toml_datetime",
1426 1456 ]
1427 1457
1428 1458 [[package]]
1429 1459 name = "twox-hash"
1430 1460 version = "1.6.3"
1431 1461 source = "registry+https://github.com/rust-lang/crates.io-index"
1432 1462 checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
1433 1463 dependencies = [
1434 1464 "cfg-if",
1435 1465 "rand 0.8.5",
1436 1466 "static_assertions",
1437 1467 ]
1438 1468
1439 1469 [[package]]
1440 1470 name = "typenum"
1441 1471 version = "1.15.0"
1442 1472 source = "registry+https://github.com/rust-lang/crates.io-index"
1443 1473 checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
1444 1474
1445 1475 [[package]]
1446 1476 name = "unicode-ident"
1447 1477 version = "1.0.5"
1448 1478 source = "registry+https://github.com/rust-lang/crates.io-index"
1449 1479 checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
1450 1480
1451 1481 [[package]]
1452 1482 name = "unicode-width"
1453 1483 version = "0.1.10"
1454 1484 source = "registry+https://github.com/rust-lang/crates.io-index"
1455 1485 checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
1456 1486
1457 1487 [[package]]
1458 1488 name = "vcpkg"
1459 1489 version = "0.2.15"
1460 1490 source = "registry+https://github.com/rust-lang/crates.io-index"
1461 1491 checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
1462 1492
1463 1493 [[package]]
1464 1494 name = "vcsgraph"
1465 1495 version = "0.2.0"
1466 1496 source = "registry+https://github.com/rust-lang/crates.io-index"
1467 1497 checksum = "4cb68c231e2575f7503a7c19213875f9d4ec2e84e963a56ce3de4b6bee351ef7"
1468 1498 dependencies = [
1469 1499 "hex",
1470 1500 "rand 0.7.3",
1471 1501 "sha-1 0.9.8",
1472 1502 ]
1473 1503
1474 1504 [[package]]
1475 1505 name = "version_check"
1476 1506 version = "0.9.4"
1477 1507 source = "registry+https://github.com/rust-lang/crates.io-index"
1478 1508 checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
1479 1509
1480 1510 [[package]]
1481 1511 name = "wasi"
1482 1512 version = "0.9.0+wasi-snapshot-preview1"
1483 1513 source = "registry+https://github.com/rust-lang/crates.io-index"
1484 1514 checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
1485 1515
1486 1516 [[package]]
1487 1517 name = "wasi"
1488 1518 version = "0.11.0+wasi-snapshot-preview1"
1489 1519 source = "registry+https://github.com/rust-lang/crates.io-index"
1490 1520 checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
1491 1521
1492 1522 [[package]]
1493 1523 name = "wasm-bindgen"
1494 1524 version = "0.2.83"
1495 1525 source = "registry+https://github.com/rust-lang/crates.io-index"
1496 1526 checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
1497 1527 dependencies = [
1498 1528 "cfg-if",
1499 1529 "wasm-bindgen-macro",
1500 1530 ]
1501 1531
1502 1532 [[package]]
1503 1533 name = "wasm-bindgen-backend"
1504 1534 version = "0.2.83"
1505 1535 source = "registry+https://github.com/rust-lang/crates.io-index"
1506 1536 checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
1507 1537 dependencies = [
1508 1538 "bumpalo",
1509 1539 "log",
1510 1540 "once_cell",
1511 1541 "proc-macro2",
1512 1542 "quote",
1513 1543 "syn",
1514 1544 "wasm-bindgen-shared",
1515 1545 ]
1516 1546
1517 1547 [[package]]
1518 1548 name = "wasm-bindgen-macro"
1519 1549 version = "0.2.83"
1520 1550 source = "registry+https://github.com/rust-lang/crates.io-index"
1521 1551 checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
1522 1552 dependencies = [
1523 1553 "quote",
1524 1554 "wasm-bindgen-macro-support",
1525 1555 ]
1526 1556
1527 1557 [[package]]
1528 1558 name = "wasm-bindgen-macro-support"
1529 1559 version = "0.2.83"
1530 1560 source = "registry+https://github.com/rust-lang/crates.io-index"
1531 1561 checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
1532 1562 dependencies = [
1533 1563 "proc-macro2",
1534 1564 "quote",
1535 1565 "syn",
1536 1566 "wasm-bindgen-backend",
1537 1567 "wasm-bindgen-shared",
1538 1568 ]
1539 1569
1540 1570 [[package]]
1541 1571 name = "wasm-bindgen-shared"
1542 1572 version = "0.2.83"
1543 1573 source = "registry+https://github.com/rust-lang/crates.io-index"
1544 1574 checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
1545 1575
1546 1576 [[package]]
1547 1577 name = "web-sys"
1548 1578 version = "0.3.60"
1549 1579 source = "registry+https://github.com/rust-lang/crates.io-index"
1550 1580 checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f"
1551 1581 dependencies = [
1552 1582 "js-sys",
1553 1583 "wasm-bindgen",
1554 1584 ]
1555 1585
1556 1586 [[package]]
1557 1587 name = "which"
1558 1588 version = "4.3.0"
1559 1589 source = "registry+https://github.com/rust-lang/crates.io-index"
1560 1590 checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b"
1561 1591 dependencies = [
1562 1592 "either",
1563 1593 "libc",
1564 1594 "once_cell",
1565 1595 ]
1566 1596
1567 1597 [[package]]
1568 1598 name = "whoami"
1569 1599 version = "1.4.0"
1570 1600 source = "registry+https://github.com/rust-lang/crates.io-index"
1571 1601 checksum = "2c70234412ca409cc04e864e89523cb0fc37f5e1344ebed5a3ebf4192b6b9f68"
1572 1602 dependencies = [
1573 1603 "wasm-bindgen",
1574 1604 "web-sys",
1575 1605 ]
1576 1606
1577 1607 [[package]]
1578 1608 name = "winapi"
1579 1609 version = "0.3.9"
1580 1610 source = "registry+https://github.com/rust-lang/crates.io-index"
1581 1611 checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
1582 1612 dependencies = [
1583 1613 "winapi-i686-pc-windows-gnu",
1584 1614 "winapi-x86_64-pc-windows-gnu",
1585 1615 ]
1586 1616
1587 1617 [[package]]
1588 1618 name = "winapi-i686-pc-windows-gnu"
1589 1619 version = "0.4.0"
1590 1620 source = "registry+https://github.com/rust-lang/crates.io-index"
1591 1621 checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
1592 1622
1593 1623 [[package]]
1594 1624 name = "winapi-util"
1595 1625 version = "0.1.5"
1596 1626 source = "registry+https://github.com/rust-lang/crates.io-index"
1597 1627 checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
1598 1628 dependencies = [
1599 1629 "winapi",
1600 1630 ]
1601 1631
1602 1632 [[package]]
1603 1633 name = "winapi-x86_64-pc-windows-gnu"
1604 1634 version = "0.4.0"
1605 1635 source = "registry+https://github.com/rust-lang/crates.io-index"
1606 1636 checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
1607 1637
1608 1638 [[package]]
1609 1639 name = "windows-sys"
1610 1640 version = "0.48.0"
1611 1641 source = "registry+https://github.com/rust-lang/crates.io-index"
1612 1642 checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
1613 1643 dependencies = [
1614 1644 "windows-targets 0.48.5",
1615 1645 ]
1616 1646
1617 1647 [[package]]
1648 name = "windows-sys"
1649 version = "0.59.0"
1650 source = "registry+https://github.com/rust-lang/crates.io-index"
1651 checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
1652 dependencies = [
1653 "windows-targets 0.52.6",
1654 ]
1655
1656 [[package]]
1618 1657 name = "windows-targets"
1619 1658 version = "0.48.5"
1620 1659 source = "registry+https://github.com/rust-lang/crates.io-index"
1621 1660 checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
1622 1661 dependencies = [
1623 1662 "windows_aarch64_gnullvm 0.48.5",
1624 1663 "windows_aarch64_msvc 0.48.5",
1625 1664 "windows_i686_gnu 0.48.5",
1626 1665 "windows_i686_msvc 0.48.5",
1627 1666 "windows_x86_64_gnu 0.48.5",
1628 1667 "windows_x86_64_gnullvm 0.48.5",
1629 1668 "windows_x86_64_msvc 0.48.5",
1630 1669 ]
1631 1670
1632 1671 [[package]]
1633 1672 name = "windows-targets"
1634 version = "0.52.0"
1673 version = "0.52.6"
1635 1674 source = "registry+https://github.com/rust-lang/crates.io-index"
1636 checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
1675 checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
1637 1676 dependencies = [
1638 "windows_aarch64_gnullvm 0.52.0",
1639 "windows_aarch64_msvc 0.52.0",
1640 "windows_i686_gnu 0.52.0",
1641 "windows_i686_msvc 0.52.0",
1642 "windows_x86_64_gnu 0.52.0",
1643 "windows_x86_64_gnullvm 0.52.0",
1644 "windows_x86_64_msvc 0.52.0",
1677 "windows_aarch64_gnullvm 0.52.6",
1678 "windows_aarch64_msvc 0.52.6",
1679 "windows_i686_gnu 0.52.6",
1680 "windows_i686_gnullvm",
1681 "windows_i686_msvc 0.52.6",
1682 "windows_x86_64_gnu 0.52.6",
1683 "windows_x86_64_gnullvm 0.52.6",
1684 "windows_x86_64_msvc 0.52.6",
1645 1685 ]
1646 1686
1647 1687 [[package]]
1648 1688 name = "windows_aarch64_gnullvm"
1649 1689 version = "0.48.5"
1650 1690 source = "registry+https://github.com/rust-lang/crates.io-index"
1651 1691 checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
1652 1692
1653 1693 [[package]]
1654 1694 name = "windows_aarch64_gnullvm"
1655 version = "0.52.0"
1695 version = "0.52.6"
1656 1696 source = "registry+https://github.com/rust-lang/crates.io-index"
1657 checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
1697 checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
1658 1698
1659 1699 [[package]]
1660 1700 name = "windows_aarch64_msvc"
1661 1701 version = "0.48.5"
1662 1702 source = "registry+https://github.com/rust-lang/crates.io-index"
1663 1703 checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
1664 1704
1665 1705 [[package]]
1666 1706 name = "windows_aarch64_msvc"
1667 version = "0.52.0"
1707 version = "0.52.6"
1668 1708 source = "registry+https://github.com/rust-lang/crates.io-index"
1669 checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
1709 checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
1670 1710
1671 1711 [[package]]
1672 1712 name = "windows_i686_gnu"
1673 1713 version = "0.48.5"
1674 1714 source = "registry+https://github.com/rust-lang/crates.io-index"
1675 1715 checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
1676 1716
1677 1717 [[package]]
1678 1718 name = "windows_i686_gnu"
1679 version = "0.52.0"
1719 version = "0.52.6"
1680 1720 source = "registry+https://github.com/rust-lang/crates.io-index"
1681 checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
1721 checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
1722
1723 [[package]]
1724 name = "windows_i686_gnullvm"
1725 version = "0.52.6"
1726 source = "registry+https://github.com/rust-lang/crates.io-index"
1727 checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
1682 1728
1683 1729 [[package]]
1684 1730 name = "windows_i686_msvc"
1685 1731 version = "0.48.5"
1686 1732 source = "registry+https://github.com/rust-lang/crates.io-index"
1687 1733 checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
1688 1734
1689 1735 [[package]]
1690 1736 name = "windows_i686_msvc"
1691 version = "0.52.0"
1737 version = "0.52.6"
1692 1738 source = "registry+https://github.com/rust-lang/crates.io-index"
1693 checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
1739 checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
1694 1740
1695 1741 [[package]]
1696 1742 name = "windows_x86_64_gnu"
1697 1743 version = "0.48.5"
1698 1744 source = "registry+https://github.com/rust-lang/crates.io-index"
1699 1745 checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
1700 1746
1701 1747 [[package]]
1702 1748 name = "windows_x86_64_gnu"
1703 version = "0.52.0"
1749 version = "0.52.6"
1704 1750 source = "registry+https://github.com/rust-lang/crates.io-index"
1705 checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
1751 checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
1706 1752
1707 1753 [[package]]
1708 1754 name = "windows_x86_64_gnullvm"
1709 1755 version = "0.48.5"
1710 1756 source = "registry+https://github.com/rust-lang/crates.io-index"
1711 1757 checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
1712 1758
1713 1759 [[package]]
1714 1760 name = "windows_x86_64_gnullvm"
1715 version = "0.52.0"
1761 version = "0.52.6"
1716 1762 source = "registry+https://github.com/rust-lang/crates.io-index"
1717 checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
1763 checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
1718 1764
1719 1765 [[package]]
1720 1766 name = "windows_x86_64_msvc"
1721 1767 version = "0.48.5"
1722 1768 source = "registry+https://github.com/rust-lang/crates.io-index"
1723 1769 checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
1724 1770
1725 1771 [[package]]
1726 1772 name = "windows_x86_64_msvc"
1727 version = "0.52.0"
1773 version = "0.52.6"
1728 1774 source = "registry+https://github.com/rust-lang/crates.io-index"
1729 checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
1775 checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
1730 1776
1731 1777 [[package]]
1732 1778 name = "wyz"
1733 1779 version = "0.5.1"
1734 1780 source = "registry+https://github.com/rust-lang/crates.io-index"
1735 1781 checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
1736 1782 dependencies = [
1737 1783 "tap",
1738 1784 ]
1739 1785
1740 1786 [[package]]
1741 1787 name = "yansi"
1742 1788 version = "0.5.1"
1743 1789 source = "registry+https://github.com/rust-lang/crates.io-index"
1744 1790 checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
1745 1791
1746 1792 [[package]]
1747 1793 name = "zstd"
1748 1794 version = "0.12.3+zstd.1.5.2"
1749 1795 source = "registry+https://github.com/rust-lang/crates.io-index"
1750 1796 checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806"
1751 1797 dependencies = [
1752 1798 "zstd-safe",
1753 1799 ]
1754 1800
1755 1801 [[package]]
1756 1802 name = "zstd-safe"
1757 1803 version = "6.0.4+zstd.1.5.4"
1758 1804 source = "registry+https://github.com/rust-lang/crates.io-index"
1759 1805 checksum = "7afb4b54b8910cf5447638cb54bf4e8a65cbedd783af98b98c62ffe91f185543"
1760 1806 dependencies = [
1761 1807 "libc",
1762 1808 "zstd-sys",
1763 1809 ]
1764 1810
1765 1811 [[package]]
1766 1812 name = "zstd-sys"
1767 1813 version = "2.0.7+zstd.1.5.4"
1768 1814 source = "registry+https://github.com/rust-lang/crates.io-index"
1769 1815 checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5"
1770 1816 dependencies = [
1771 1817 "cc",
1772 1818 "libc",
1773 1819 "pkg-config",
1774 1820 ]
@@ -1,54 +1,56
1 1 [package]
2 2 name = "hg-core"
3 3 version = "0.1.0"
4 4 authors = ["Georges Racinet <gracinet@anybox.fr>"]
5 5 description = "Mercurial pure Rust core library, with no assumption on Python bindings (FFI)"
6 6 edition = "2021"
7 7
8 8 [lib]
9 9 name = "hg"
10 10
11 11 [dependencies]
12 12 bitflags = "1.3.2"
13 13 bytes-cast = "0.3.0"
14 14 byteorder = "1.4.3"
15 15 derive_more = "0.99.17"
16 16 hashbrown = { version = "0.13.1", features = ["rayon"] }
17 17 home = "0.5.4"
18 18 im-rc = "15.1.0"
19 19 itertools = "0.10.5"
20 20 lazy_static = "1.4.0"
21 21 libc = "0.2.137"
22 22 logging_timer = "1.1.0"
23 23 rand = "0.8.5"
24 24 rand_pcg = "0.3.1"
25 25 rand_distr = "0.4.3"
26 26 rayon = "1.7.0"
27 27 regex = "1.7.0"
28 28 self_cell = "1.0"
29 29 serde = { version = "1.0", features = ["derive"] }
30 30 sha-1 = "0.10.0"
31 31 twox-hash = "1.6.3"
32 32 same-file = "1.0.6"
33 33 tempfile = "3.3.0"
34 34 toml = "0.6"
35 35 thread_local = "1.1.4"
36 36 crossbeam-channel = "0.5.6"
37 37 log = "0.4.17"
38 38 memmap2 = { version = "0.5.8", features = ["stable_deref_trait"] }
39 39 zstd = "0.12"
40 40 format-bytes = "0.3.0"
41 41 once_cell = "1.16.0"
42 42 bitvec = "1.0.1"
43 43 chrono = "0.4.34"
44 dyn-clone = "1.0.16"
45 filetime = "0.2.23"
44 46
45 47 # We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
46 48 # we have a clearer view of which backend is the fastest.
47 49 [dependencies.flate2]
48 50 version = "1.0.24"
49 51 features = ["zlib"]
50 52 default-features = false
51 53
52 54 [dev-dependencies]
53 55 clap = { version = "~4.0", features = ["derive"] }
54 56 pretty_assertions = "1.1.0"
@@ -1,188 +1,188
1 1 //! Filesystem-based locks for local repositories
2 2
3 3 use crate::errors::HgError;
4 4 use crate::errors::HgResultExt;
5 use crate::vfs::Vfs;
5 use crate::vfs::VfsImpl;
6 6 use std::io;
7 7 use std::io::ErrorKind;
8 8
9 9 #[derive(derive_more::From)]
10 10 pub enum LockError {
11 11 AlreadyHeld,
12 12 #[from]
13 13 Other(HgError),
14 14 }
15 15
16 16 /// Try to call `f` with the lock acquired, without waiting.
17 17 ///
18 18 /// If the lock is aready held, `f` is not called and `LockError::AlreadyHeld`
19 19 /// is returned. `LockError::Io` is returned for any unexpected I/O error
20 20 /// accessing the lock file, including for removing it after `f` was called.
21 21 /// The return value of `f` is dropped in that case. If all is successful, the
22 22 /// return value of `f` is forwarded.
23 23 pub fn try_with_lock_no_wait<R>(
24 hg_vfs: Vfs,
24 hg_vfs: &VfsImpl,
25 25 lock_filename: &str,
26 26 f: impl FnOnce() -> R,
27 27 ) -> Result<R, LockError> {
28 28 let our_lock_data = &*OUR_LOCK_DATA;
29 29 for _retry in 0..5 {
30 30 match make_lock(hg_vfs, lock_filename, our_lock_data) {
31 31 Ok(()) => {
32 32 let result = f();
33 33 unlock(hg_vfs, lock_filename)?;
34 34 return Ok(result);
35 35 }
36 36 Err(HgError::IoError { error, .. })
37 37 if error.kind() == ErrorKind::AlreadyExists =>
38 38 {
39 39 let lock_data = read_lock(hg_vfs, lock_filename)?;
40 40 if lock_data.is_none() {
41 41 // Lock was apparently just released, retry acquiring it
42 42 continue;
43 43 }
44 44 if !lock_should_be_broken(&lock_data) {
45 45 return Err(LockError::AlreadyHeld);
46 46 }
47 47 // The lock file is left over from a process not running
48 48 // anymore. Break it, but with another lock to
49 49 // avoid a race.
50 50 break_lock(hg_vfs, lock_filename)?;
51 51
52 52 // Retry acquiring
53 53 }
54 54 Err(error) => Err(error)?,
55 55 }
56 56 }
57 57 Err(LockError::AlreadyHeld)
58 58 }
59 59
60 fn break_lock(hg_vfs: Vfs, lock_filename: &str) -> Result<(), LockError> {
60 fn break_lock(hg_vfs: &VfsImpl, lock_filename: &str) -> Result<(), LockError> {
61 61 try_with_lock_no_wait(hg_vfs, &format!("{}.break", lock_filename), || {
62 62 // Check again in case some other process broke and
63 63 // acquired the lock in the meantime
64 64 let lock_data = read_lock(hg_vfs, lock_filename)?;
65 65 if !lock_should_be_broken(&lock_data) {
66 66 return Err(LockError::AlreadyHeld);
67 67 }
68 68 Ok(hg_vfs.remove_file(lock_filename)?)
69 69 })?
70 70 }
71 71
72 72 #[cfg(unix)]
73 73 fn make_lock(
74 hg_vfs: Vfs,
74 hg_vfs: &VfsImpl,
75 75 lock_filename: &str,
76 76 data: &str,
77 77 ) -> Result<(), HgError> {
78 78 // Use a symbolic link because creating it is atomic.
79 79 // The link’s "target" contains data not representing any path.
80 80 let fake_symlink_target = data;
81 81 hg_vfs.create_symlink(lock_filename, fake_symlink_target)
82 82 }
83 83
84 84 fn read_lock(
85 hg_vfs: Vfs,
85 hg_vfs: &VfsImpl,
86 86 lock_filename: &str,
87 87 ) -> Result<Option<String>, HgError> {
88 88 let link_target =
89 89 hg_vfs.read_link(lock_filename).io_not_found_as_none()?;
90 90 if let Some(target) = link_target {
91 91 let data = target
92 92 .into_os_string()
93 93 .into_string()
94 94 .map_err(|_| HgError::corrupted("non-UTF-8 lock data"))?;
95 95 Ok(Some(data))
96 96 } else {
97 97 Ok(None)
98 98 }
99 99 }
100 100
101 fn unlock(hg_vfs: Vfs, lock_filename: &str) -> Result<(), HgError> {
101 fn unlock(hg_vfs: &VfsImpl, lock_filename: &str) -> Result<(), HgError> {
102 102 hg_vfs.remove_file(lock_filename)
103 103 }
104 104
105 105 /// Return whether the process that is/was holding the lock is known not to be
106 106 /// running anymore.
107 107 fn lock_should_be_broken(data: &Option<String>) -> bool {
108 108 (|| -> Option<bool> {
109 109 let (prefix, pid) = data.as_ref()?.split_once(':')?;
110 110 if prefix != *LOCK_PREFIX {
111 111 return Some(false);
112 112 }
113 113 let process_is_running;
114 114
115 115 #[cfg(unix)]
116 116 {
117 117 let pid: libc::pid_t = pid.parse().ok()?;
118 118 unsafe {
119 119 let signal = 0; // Test if we could send a signal, without sending
120 120 let result = libc::kill(pid, signal);
121 121 if result == 0 {
122 122 process_is_running = true
123 123 } else {
124 124 let errno =
125 125 io::Error::last_os_error().raw_os_error().unwrap();
126 126 process_is_running = errno != libc::ESRCH
127 127 }
128 128 }
129 129 }
130 130
131 131 Some(!process_is_running)
132 132 })()
133 133 .unwrap_or(false)
134 134 }
135 135
136 136 lazy_static::lazy_static! {
137 137 /// A string which is used to differentiate pid namespaces
138 138 ///
139 139 /// It's useful to detect "dead" processes and remove stale locks with
140 140 /// confidence. Typically it's just hostname. On modern linux, we include an
141 141 /// extra Linux-specific pid namespace identifier.
142 142 static ref LOCK_PREFIX: String = {
143 143 // Note: this must match the behavior of `_getlockprefix` in `mercurial/lock.py`
144 144
145 145 /// Same as https://github.com/python/cpython/blob/v3.10.0/Modules/socketmodule.c#L5414
146 146 const BUFFER_SIZE: usize = 1024;
147 147 // This cast is *needed* for platforms with signed chars
148 148 #[allow(clippy::unnecessary_cast)]
149 149 let mut buffer = [0 as libc::c_char; BUFFER_SIZE];
150 150 let hostname_bytes = unsafe {
151 151 let result = libc::gethostname(buffer.as_mut_ptr(), BUFFER_SIZE);
152 152 if result != 0 {
153 153 panic!("gethostname: {}", io::Error::last_os_error())
154 154 }
155 155 std::ffi::CStr::from_ptr(buffer.as_mut_ptr()).to_bytes()
156 156 };
157 157 let hostname =
158 158 std::str::from_utf8(hostname_bytes).expect("non-UTF-8 hostname");
159 159
160 160 #[cfg(target_os = "linux")]
161 161 {
162 162 use std::os::linux::fs::MetadataExt;
163 163 match std::fs::metadata("/proc/self/ns/pid") {
164 164 Ok(meta) => {
165 165 return format!("{}/{:x}", hostname, meta.st_ino())
166 166 }
167 167 Err(error) => {
168 168 // TODO: match on `error.kind()` when `NotADirectory`
169 169 // is available on all supported Rust versions:
170 170 // https://github.com/rust-lang/rust/issues/86442
171 171 use libc::{
172 172 ENOENT, // ErrorKind::NotFound
173 173 ENOTDIR, // ErrorKind::NotADirectory
174 174 EACCES, // ErrorKind::PermissionDenied
175 175 };
176 176 match error.raw_os_error() {
177 177 Some(ENOENT) | Some(ENOTDIR) | Some(EACCES) => {}
178 178 _ => panic!("stat /proc/self/ns/pid: {}", error),
179 179 }
180 180 }
181 181 }
182 182 }
183 183
184 184 hostname.to_owned()
185 185 };
186 186
187 187 static ref OUR_LOCK_DATA: String = format!("{}:{}", &*LOCK_PREFIX, std::process::id());
188 188 }
@@ -1,101 +1,105
1 1 use crate::errors::{HgError, HgResultExt, IoErrorContext, IoResultExt};
2 use crate::vfs::Vfs;
2 use crate::vfs::VfsImpl;
3 3 use std::io::Write;
4 4
5 5 /// An utility to append to a log file with the given name, and optionally
6 6 /// rotate it after it reaches a certain maximum size.
7 7 ///
8 8 /// Rotation works by renaming "example.log" to "example.log.1", after renaming
9 9 /// "example.log.1" to "example.log.2" etc up to the given maximum number of
10 10 /// files.
11 11 pub struct LogFile<'a> {
12 vfs: Vfs<'a>,
12 vfs: VfsImpl,
13 13 name: &'a str,
14 14 max_size: Option<u64>,
15 15 max_files: u32,
16 16 }
17 17
18 18 impl<'a> LogFile<'a> {
19 pub fn new(vfs: Vfs<'a>, name: &'a str) -> Self {
19 pub fn new(vfs: VfsImpl, name: &'a str) -> Self {
20 20 Self {
21 21 vfs,
22 22 name,
23 23 max_size: None,
24 24 max_files: 0,
25 25 }
26 26 }
27 27
28 28 /// Rotate before writing to a log file that was already larger than the
29 29 /// given size, in bytes. `None` disables rotation.
30 30 pub fn max_size(mut self, value: Option<u64>) -> Self {
31 31 self.max_size = value;
32 32 self
33 33 }
34 34
35 35 /// Keep this many rotated files `{name}.1` up to `{name}.{max}`, in
36 36 /// addition to the original `{name}` file.
37 37 pub fn max_files(mut self, value: u32) -> Self {
38 38 self.max_files = value;
39 39 self
40 40 }
41 41
42 42 /// Append the given `bytes` as-is to the log file, after rotating if
43 43 /// needed.
44 44 ///
45 45 /// No trailing newline is added. Make sure to include one in `bytes` if
46 46 /// desired.
47 47 pub fn write(&self, bytes: &[u8]) -> Result<(), HgError> {
48 48 let path = self.vfs.join(self.name);
49 49 let context = || IoErrorContext::WritingFile(path.clone());
50 50 let open = || {
51 51 std::fs::OpenOptions::new()
52 52 .create(true)
53 53 .append(true)
54 54 .open(&path)
55 55 .with_context(context)
56 56 };
57 57 let mut file = open()?;
58 58 if let Some(max_size) = self.max_size {
59 59 if file.metadata().with_context(context)?.len() >= max_size {
60 60 // For example with `max_files == 5`, the first iteration of
61 61 // this loop has `i == 4` and renames `{name}.4` to `{name}.5`.
62 62 // The last iteration renames `{name}.1` to
63 63 // `{name}.2`
64 64 for i in (1..self.max_files).rev() {
65 65 self.vfs
66 66 .rename(
67 67 format!("{}.{}", self.name, i),
68 68 format!("{}.{}", self.name, i + 1),
69 69 )
70 70 .io_not_found_as_none()?;
71 71 }
72 72 // Then rename `{name}` to `{name}.1`. This is the
73 73 // previously-opened `file`.
74 74 self.vfs
75 75 .rename(self.name, format!("{}.1", self.name))
76 76 .io_not_found_as_none()?;
77 77 // Finally, create a new `{name}` file and replace our `file`
78 78 // handle.
79 79 file = open()?;
80 80 }
81 81 }
82 82 file.write_all(bytes).with_context(context)?;
83 83 file.sync_all().with_context(context)
84 84 }
85 85 }
86 86
87 87 #[test]
88 88 fn test_rotation() {
89 89 let temp = tempfile::tempdir().unwrap();
90 let vfs = Vfs { base: temp.path() };
91 let logger = LogFile::new(vfs, "log").max_size(Some(3)).max_files(2);
90 let vfs = VfsImpl {
91 base: temp.path().to_owned(),
92 };
93 let logger = LogFile::new(vfs.clone(), "log")
94 .max_size(Some(3))
95 .max_files(2);
92 96 logger.write(b"one\n").unwrap();
93 97 logger.write(b"two\n").unwrap();
94 98 logger.write(b"3\n").unwrap();
95 99 logger.write(b"four\n").unwrap();
96 100 logger.write(b"five\n").unwrap();
97 101 assert_eq!(vfs.read("log").unwrap(), b"five\n");
98 102 assert_eq!(vfs.read("log.1").unwrap(), b"3\nfour\n");
99 103 assert_eq!(vfs.read("log.2").unwrap(), b"two\n");
100 104 assert!(vfs.read("log.3").io_not_found_as_none().unwrap().is_none());
101 105 }
@@ -1,842 +1,851
1 1 use crate::changelog::Changelog;
2 2 use crate::config::{Config, ConfigError, ConfigParseError};
3 3 use crate::dirstate::DirstateParents;
4 4 use crate::dirstate_tree::dirstate_map::DirstateMapWriteMode;
5 5 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
6 6 use crate::dirstate_tree::owning::OwningDirstateMap;
7 7 use crate::errors::HgResultExt;
8 8 use crate::errors::{HgError, IoResultExt};
9 9 use crate::lock::{try_with_lock_no_wait, LockError};
10 10 use crate::manifest::{Manifest, Manifestlog};
11 11 use crate::requirements::{
12 12 CHANGELOGV2_REQUIREMENT, GENERALDELTA_REQUIREMENT, NODEMAP_REQUIREMENT,
13 13 REVLOGV1_REQUIREMENT, REVLOGV2_REQUIREMENT,
14 14 };
15 15 use crate::revlog::filelog::Filelog;
16 16 use crate::revlog::RevlogError;
17 17 use crate::utils::debug::debug_wait_for_file_or_print;
18 18 use crate::utils::files::get_path_from_bytes;
19 19 use crate::utils::hg_path::HgPath;
20 20 use crate::utils::SliceExt;
21 use crate::vfs::{is_dir, is_file, Vfs};
21 use crate::vfs::{is_dir, is_file, VfsImpl};
22 22 use crate::{
23 23 requirements, NodePrefix, RevlogDataConfig, RevlogDeltaConfig,
24 24 RevlogFeatureConfig, RevlogType, RevlogVersionOptions, UncheckedRevision,
25 25 };
26 26 use crate::{DirstateError, RevlogOpenOptions};
27 27 use std::cell::{Ref, RefCell, RefMut};
28 28 use std::collections::HashSet;
29 29 use std::io::Seek;
30 30 use std::io::SeekFrom;
31 31 use std::io::Write as IoWrite;
32 32 use std::path::{Path, PathBuf};
33 33
34 34 const V2_MAX_READ_ATTEMPTS: usize = 5;
35 35
36 36 type DirstateMapIdentity = (Option<u64>, Option<Vec<u8>>, usize);
37 37
38 38 /// A repository on disk
39 39 pub struct Repo {
40 40 working_directory: PathBuf,
41 41 dot_hg: PathBuf,
42 42 store: PathBuf,
43 43 requirements: HashSet<String>,
44 44 config: Config,
45 45 dirstate_parents: LazyCell<DirstateParents>,
46 46 dirstate_map: LazyCell<OwningDirstateMap>,
47 47 changelog: LazyCell<Changelog>,
48 48 manifestlog: LazyCell<Manifestlog>,
49 49 }
50 50
51 51 #[derive(Debug, derive_more::From)]
52 52 pub enum RepoError {
53 53 NotFound {
54 54 at: PathBuf,
55 55 },
56 56 #[from]
57 57 ConfigParseError(ConfigParseError),
58 58 #[from]
59 59 Other(HgError),
60 60 }
61 61
62 62 impl From<ConfigError> for RepoError {
63 63 fn from(error: ConfigError) -> Self {
64 64 match error {
65 65 ConfigError::Parse(error) => error.into(),
66 66 ConfigError::Other(error) => error.into(),
67 67 }
68 68 }
69 69 }
70 70
71 71 impl Repo {
72 72 /// tries to find nearest repository root in current working directory or
73 73 /// its ancestors
74 74 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
75 75 let current_directory = crate::utils::current_dir()?;
76 76 // ancestors() is inclusive: it first yields `current_directory`
77 77 // as-is.
78 78 for ancestor in current_directory.ancestors() {
79 79 if is_dir(ancestor.join(".hg"))? {
80 80 return Ok(ancestor.to_path_buf());
81 81 }
82 82 }
83 83 Err(RepoError::NotFound {
84 84 at: current_directory,
85 85 })
86 86 }
87 87
88 88 /// Find a repository, either at the given path (which must contain a `.hg`
89 89 /// sub-directory) or by searching the current directory and its
90 90 /// ancestors.
91 91 ///
92 92 /// A method with two very different "modes" like this usually a code smell
93 93 /// to make two methods instead, but in this case an `Option` is what rhg
94 94 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
95 95 /// Having two methods would just move that `if` to almost all callers.
96 96 pub fn find(
97 97 config: &Config,
98 98 explicit_path: Option<PathBuf>,
99 99 ) -> Result<Self, RepoError> {
100 100 if let Some(root) = explicit_path {
101 101 if is_dir(root.join(".hg"))? {
102 102 Self::new_at_path(root, config)
103 103 } else if is_file(&root)? {
104 104 Err(HgError::unsupported("bundle repository").into())
105 105 } else {
106 106 Err(RepoError::NotFound { at: root })
107 107 }
108 108 } else {
109 109 let root = Self::find_repo_root()?;
110 110 Self::new_at_path(root, config)
111 111 }
112 112 }
113 113
114 114 /// To be called after checking that `.hg` is a sub-directory
115 115 fn new_at_path(
116 116 working_directory: PathBuf,
117 117 config: &Config,
118 118 ) -> Result<Self, RepoError> {
119 119 let dot_hg = working_directory.join(".hg");
120 120
121 121 let mut repo_config_files =
122 122 vec![dot_hg.join("hgrc"), dot_hg.join("hgrc-not-shared")];
123 123
124 let hg_vfs = Vfs { base: &dot_hg };
125 let mut reqs = requirements::load_if_exists(hg_vfs)?;
124 let hg_vfs = VfsImpl {
125 base: dot_hg.to_owned(),
126 };
127 let mut reqs = requirements::load_if_exists(&hg_vfs)?;
126 128 let relative =
127 129 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
128 130 let shared =
129 131 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
130 132
131 133 // From `mercurial/localrepo.py`:
132 134 //
133 135 // if .hg/requires contains the sharesafe requirement, it means
134 136 // there exists a `.hg/store/requires` too and we should read it
135 137 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
136 138 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
137 139 // is not present, refer checkrequirementscompat() for that
138 140 //
139 141 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
140 142 // repository was shared the old way. We check the share source
141 143 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
142 144 // current repository needs to be reshared
143 145 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
144 146
145 147 let store_path;
146 148 if !shared {
147 149 store_path = dot_hg.join("store");
148 150 } else {
149 151 let bytes = hg_vfs.read("sharedpath")?;
150 152 let mut shared_path =
151 153 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
152 154 .to_owned();
153 155 if relative {
154 156 shared_path = dot_hg.join(shared_path)
155 157 }
156 158 if !is_dir(&shared_path)? {
157 159 return Err(HgError::corrupted(format!(
158 160 ".hg/sharedpath points to nonexistent directory {}",
159 161 shared_path.display()
160 162 ))
161 163 .into());
162 164 }
163 165
164 166 store_path = shared_path.join("store");
165 167
166 let source_is_share_safe =
167 requirements::load(Vfs { base: &shared_path })?
168 .contains(requirements::SHARESAFE_REQUIREMENT);
168 let source_is_share_safe = requirements::load(VfsImpl {
169 base: shared_path.to_owned(),
170 })?
171 .contains(requirements::SHARESAFE_REQUIREMENT);
169 172
170 173 if share_safe != source_is_share_safe {
171 174 return Err(HgError::unsupported("share-safe mismatch").into());
172 175 }
173 176
174 177 if share_safe {
175 178 repo_config_files.insert(0, shared_path.join("hgrc"))
176 179 }
177 180 }
178 181 if share_safe {
179 reqs.extend(requirements::load(Vfs { base: &store_path })?);
182 reqs.extend(requirements::load(VfsImpl {
183 base: store_path.to_owned(),
184 })?);
180 185 }
181 186
182 187 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
183 188 config.combine_with_repo(&repo_config_files)?
184 189 } else {
185 190 config.clone()
186 191 };
187 192
188 193 let repo = Self {
189 194 requirements: reqs,
190 195 working_directory,
191 196 store: store_path,
192 197 dot_hg,
193 198 config: repo_config,
194 199 dirstate_parents: LazyCell::new(),
195 200 dirstate_map: LazyCell::new(),
196 201 changelog: LazyCell::new(),
197 202 manifestlog: LazyCell::new(),
198 203 };
199 204
200 205 requirements::check(&repo)?;
201 206
202 207 Ok(repo)
203 208 }
204 209
205 210 pub fn working_directory_path(&self) -> &Path {
206 211 &self.working_directory
207 212 }
208 213
209 214 pub fn requirements(&self) -> &HashSet<String> {
210 215 &self.requirements
211 216 }
212 217
213 218 pub fn config(&self) -> &Config {
214 219 &self.config
215 220 }
216 221
217 222 /// For accessing repository files (in `.hg`), except for the store
218 223 /// (`.hg/store`).
219 pub fn hg_vfs(&self) -> Vfs<'_> {
220 Vfs { base: &self.dot_hg }
224 pub fn hg_vfs(&self) -> VfsImpl {
225 VfsImpl {
226 base: self.dot_hg.to_owned(),
227 }
221 228 }
222 229
223 230 /// For accessing repository store files (in `.hg/store`)
224 pub fn store_vfs(&self) -> Vfs<'_> {
225 Vfs { base: &self.store }
231 pub fn store_vfs(&self) -> VfsImpl {
232 VfsImpl {
233 base: self.store.to_owned(),
234 }
226 235 }
227 236
228 237 /// For accessing the working copy
229 pub fn working_directory_vfs(&self) -> Vfs<'_> {
230 Vfs {
231 base: &self.working_directory,
238 pub fn working_directory_vfs(&self) -> VfsImpl {
239 VfsImpl {
240 base: self.working_directory.to_owned(),
232 241 }
233 242 }
234 243
235 244 pub fn try_with_wlock_no_wait<R>(
236 245 &self,
237 246 f: impl FnOnce() -> R,
238 247 ) -> Result<R, LockError> {
239 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
248 try_with_lock_no_wait(&self.hg_vfs(), "wlock", f)
240 249 }
241 250
242 251 /// Whether this repo should use dirstate-v2.
243 252 /// The presence of `dirstate-v2` in the requirements does not mean that
244 253 /// the on-disk dirstate is necessarily in version 2. In most cases,
245 254 /// a dirstate-v2 file will indeed be found, but in rare cases (like the
246 255 /// upgrade mechanism being cut short), the on-disk version will be a
247 256 /// v1 file.
248 257 /// Semantically, having a requirement only means that a client cannot
249 258 /// properly understand or properly update the repo if it lacks the support
250 259 /// for the required feature, but not that that feature is actually used
251 260 /// in all occasions.
252 261 pub fn use_dirstate_v2(&self) -> bool {
253 262 self.requirements
254 263 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
255 264 }
256 265
257 266 pub fn has_sparse(&self) -> bool {
258 267 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
259 268 }
260 269
261 270 pub fn has_narrow(&self) -> bool {
262 271 self.requirements.contains(requirements::NARROW_REQUIREMENT)
263 272 }
264 273
265 274 pub fn has_nodemap(&self) -> bool {
266 275 self.requirements
267 276 .contains(requirements::NODEMAP_REQUIREMENT)
268 277 }
269 278
270 279 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
271 280 Ok(self
272 281 .hg_vfs()
273 282 .read("dirstate")
274 283 .io_not_found_as_none()?
275 284 .unwrap_or_default())
276 285 }
277 286
278 287 fn dirstate_identity(&self) -> Result<Option<u64>, HgError> {
279 288 use std::os::unix::fs::MetadataExt;
280 289 Ok(self
281 290 .hg_vfs()
282 291 .symlink_metadata("dirstate")
283 292 .io_not_found_as_none()?
284 293 .map(|meta| meta.ino()))
285 294 }
286 295
287 296 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
288 297 Ok(*self
289 298 .dirstate_parents
290 299 .get_or_init(|| self.read_dirstate_parents())?)
291 300 }
292 301
293 302 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
294 303 let dirstate = self.dirstate_file_contents()?;
295 304 let parents = if dirstate.is_empty() {
296 305 DirstateParents::NULL
297 306 } else if self.use_dirstate_v2() {
298 307 let docket_res =
299 308 crate::dirstate_tree::on_disk::read_docket(&dirstate);
300 309 match docket_res {
301 310 Ok(docket) => docket.parents(),
302 311 Err(_) => {
303 312 log::info!(
304 313 "Parsing dirstate docket failed, \
305 314 falling back to dirstate-v1"
306 315 );
307 316 *crate::dirstate::parsers::parse_dirstate_parents(
308 317 &dirstate,
309 318 )?
310 319 }
311 320 }
312 321 } else {
313 322 *crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
314 323 };
315 324 self.dirstate_parents.set(parents);
316 325 Ok(parents)
317 326 }
318 327
319 328 /// Returns the information read from the dirstate docket necessary to
320 329 /// check if the data file has been updated/deleted by another process
321 330 /// since we last read the dirstate.
322 331 /// Namely, the inode, data file uuid and the data size.
323 332 fn get_dirstate_data_file_integrity(
324 333 &self,
325 334 ) -> Result<DirstateMapIdentity, HgError> {
326 335 assert!(
327 336 self.use_dirstate_v2(),
328 337 "accessing dirstate data file ID without dirstate-v2"
329 338 );
330 339 // Get the identity before the contents since we could have a race
331 340 // between the two. Having an identity that is too old is fine, but
332 341 // one that is younger than the content change is bad.
333 342 let identity = self.dirstate_identity()?;
334 343 let dirstate = self.dirstate_file_contents()?;
335 344 if dirstate.is_empty() {
336 345 self.dirstate_parents.set(DirstateParents::NULL);
337 346 Ok((identity, None, 0))
338 347 } else {
339 348 let docket_res =
340 349 crate::dirstate_tree::on_disk::read_docket(&dirstate);
341 350 match docket_res {
342 351 Ok(docket) => {
343 352 self.dirstate_parents.set(docket.parents());
344 353 Ok((
345 354 identity,
346 355 Some(docket.uuid.to_owned()),
347 356 docket.data_size(),
348 357 ))
349 358 }
350 359 Err(_) => {
351 360 log::info!(
352 361 "Parsing dirstate docket failed, \
353 362 falling back to dirstate-v1"
354 363 );
355 364 let parents =
356 365 *crate::dirstate::parsers::parse_dirstate_parents(
357 366 &dirstate,
358 367 )?;
359 368 self.dirstate_parents.set(parents);
360 369 Ok((identity, None, 0))
361 370 }
362 371 }
363 372 }
364 373 }
365 374
366 375 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
367 376 if self.use_dirstate_v2() {
368 377 // The v2 dirstate is split into a docket and a data file.
369 378 // Since we don't always take the `wlock` to read it
370 379 // (like in `hg status`), it is susceptible to races.
371 380 // A simple retry method should be enough since full rewrites
372 381 // only happen when too much garbage data is present and
373 382 // this race is unlikely.
374 383 let mut tries = 0;
375 384
376 385 while tries < V2_MAX_READ_ATTEMPTS {
377 386 tries += 1;
378 387 match self.read_docket_and_data_file() {
379 388 Ok(m) => {
380 389 return Ok(m);
381 390 }
382 391 Err(e) => match e {
383 392 DirstateError::Common(HgError::RaceDetected(
384 393 context,
385 394 )) => {
386 395 log::info!(
387 396 "dirstate read race detected {} (retry {}/{})",
388 397 context,
389 398 tries,
390 399 V2_MAX_READ_ATTEMPTS,
391 400 );
392 401 continue;
393 402 }
394 403 _ => {
395 404 log::info!(
396 405 "Reading dirstate v2 failed, \
397 406 falling back to v1"
398 407 );
399 408 return self.new_dirstate_map_v1();
400 409 }
401 410 },
402 411 }
403 412 }
404 413 let error = HgError::abort(
405 414 format!("dirstate read race happened {tries} times in a row"),
406 415 255,
407 416 None,
408 417 );
409 418 Err(DirstateError::Common(error))
410 419 } else {
411 420 self.new_dirstate_map_v1()
412 421 }
413 422 }
414 423
415 424 fn new_dirstate_map_v1(&self) -> Result<OwningDirstateMap, DirstateError> {
416 425 debug_wait_for_file_or_print(self.config(), "dirstate.pre-read-file");
417 426 let identity = self.dirstate_identity()?;
418 427 let dirstate_file_contents = self.dirstate_file_contents()?;
419 428 if dirstate_file_contents.is_empty() {
420 429 self.dirstate_parents.set(DirstateParents::NULL);
421 430 Ok(OwningDirstateMap::new_empty(Vec::new()))
422 431 } else {
423 432 let (map, parents) =
424 433 OwningDirstateMap::new_v1(dirstate_file_contents, identity)?;
425 434 self.dirstate_parents.set(parents);
426 435 Ok(map)
427 436 }
428 437 }
429 438
430 439 fn read_docket_and_data_file(
431 440 &self,
432 441 ) -> Result<OwningDirstateMap, DirstateError> {
433 442 debug_wait_for_file_or_print(self.config(), "dirstate.pre-read-file");
434 443 let dirstate_file_contents = self.dirstate_file_contents()?;
435 444 let identity = self.dirstate_identity()?;
436 445 if dirstate_file_contents.is_empty() {
437 446 self.dirstate_parents.set(DirstateParents::NULL);
438 447 return Ok(OwningDirstateMap::new_empty(Vec::new()));
439 448 }
440 449 let docket = crate::dirstate_tree::on_disk::read_docket(
441 450 &dirstate_file_contents,
442 451 )?;
443 452 debug_wait_for_file_or_print(
444 453 self.config(),
445 454 "dirstate.post-docket-read-file",
446 455 );
447 456 self.dirstate_parents.set(docket.parents());
448 457 let uuid = docket.uuid.to_owned();
449 458 let data_size = docket.data_size();
450 459
451 460 let context = "between reading dirstate docket and data file";
452 461 let race_error = HgError::RaceDetected(context.into());
453 462 let metadata = docket.tree_metadata();
454 463
455 464 let mut map = if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
456 465 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
457 466 let contents = self.hg_vfs().read(docket.data_filename());
458 467 let contents = match contents {
459 468 Ok(c) => c,
460 469 Err(HgError::IoError { error, context }) => {
461 470 match error.raw_os_error().expect("real os error") {
462 471 // 2 = ENOENT, No such file or directory
463 472 // 116 = ESTALE, Stale NFS file handle
464 473 //
465 474 // TODO match on `error.kind()` when
466 475 // `ErrorKind::StaleNetworkFileHandle` is stable.
467 476 2 | 116 => {
468 477 // Race where the data file was deleted right after
469 478 // we read the docket, try again
470 479 return Err(race_error.into());
471 480 }
472 481 _ => {
473 482 return Err(
474 483 HgError::IoError { error, context }.into()
475 484 )
476 485 }
477 486 }
478 487 }
479 488 Err(e) => return Err(e.into()),
480 489 };
481 490 OwningDirstateMap::new_v2(
482 491 contents, data_size, metadata, uuid, identity,
483 492 )
484 493 } else {
485 494 match self
486 495 .hg_vfs()
487 496 .mmap_open(docket.data_filename())
488 497 .io_not_found_as_none()
489 498 {
490 499 Ok(Some(data_mmap)) => OwningDirstateMap::new_v2(
491 500 data_mmap, data_size, metadata, uuid, identity,
492 501 ),
493 502 Ok(None) => {
494 503 // Race where the data file was deleted right after we
495 504 // read the docket, try again
496 505 return Err(race_error.into());
497 506 }
498 507 Err(e) => return Err(e.into()),
499 508 }
500 509 }?;
501 510
502 511 let write_mode_config = self
503 512 .config()
504 513 .get_str(b"devel", b"dirstate.v2.data_update_mode")
505 514 .unwrap_or(Some("auto"))
506 515 .unwrap_or("auto"); // don't bother for devel options
507 516 let write_mode = match write_mode_config {
508 517 "auto" => DirstateMapWriteMode::Auto,
509 518 "force-new" => DirstateMapWriteMode::ForceNewDataFile,
510 519 "force-append" => DirstateMapWriteMode::ForceAppend,
511 520 _ => DirstateMapWriteMode::Auto,
512 521 };
513 522
514 523 map.with_dmap_mut(|m| m.set_write_mode(write_mode));
515 524
516 525 Ok(map)
517 526 }
518 527
519 528 pub fn dirstate_map(
520 529 &self,
521 530 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
522 531 self.dirstate_map.get_or_init(|| self.new_dirstate_map())
523 532 }
524 533
525 534 pub fn dirstate_map_mut(
526 535 &self,
527 536 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
528 537 self.dirstate_map
529 538 .get_mut_or_init(|| self.new_dirstate_map())
530 539 }
531 540
532 541 fn new_changelog(&self) -> Result<Changelog, HgError> {
533 542 Changelog::open(
534 543 &self.store_vfs(),
535 544 self.default_revlog_options(RevlogType::Changelog)?,
536 545 )
537 546 }
538 547
539 548 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
540 549 self.changelog.get_or_init(|| self.new_changelog())
541 550 }
542 551
543 552 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
544 553 self.changelog.get_mut_or_init(|| self.new_changelog())
545 554 }
546 555
547 556 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
548 557 Manifestlog::open(
549 558 &self.store_vfs(),
550 559 self.default_revlog_options(RevlogType::Manifestlog)?,
551 560 )
552 561 }
553 562
554 563 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
555 564 self.manifestlog.get_or_init(|| self.new_manifestlog())
556 565 }
557 566
558 567 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
559 568 self.manifestlog.get_mut_or_init(|| self.new_manifestlog())
560 569 }
561 570
562 571 /// Returns the manifest of the *changeset* with the given node ID
563 572 pub fn manifest_for_node(
564 573 &self,
565 574 node: impl Into<NodePrefix>,
566 575 ) -> Result<Manifest, RevlogError> {
567 576 self.manifestlog()?.data_for_node(
568 577 self.changelog()?
569 578 .data_for_node(node.into())?
570 579 .manifest_node()?
571 580 .into(),
572 581 )
573 582 }
574 583
575 584 /// Returns the manifest of the *changeset* with the given revision number
576 585 pub fn manifest_for_rev(
577 586 &self,
578 587 revision: UncheckedRevision,
579 588 ) -> Result<Manifest, RevlogError> {
580 589 self.manifestlog()?.data_for_node(
581 590 self.changelog()?
582 591 .data_for_rev(revision)?
583 592 .manifest_node()?
584 593 .into(),
585 594 )
586 595 }
587 596
588 597 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
589 598 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
590 599 Ok(entry.tracked())
591 600 } else {
592 601 Ok(false)
593 602 }
594 603 }
595 604
596 605 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
597 606 Filelog::open(
598 607 self,
599 608 path,
600 609 self.default_revlog_options(RevlogType::Filelog)?,
601 610 )
602 611 }
603 612 /// Write to disk any updates that were made through `dirstate_map_mut`.
604 613 ///
605 614 /// The "wlock" must be held while calling this.
606 615 /// See for example `try_with_wlock_no_wait`.
607 616 ///
608 617 /// TODO: have a `WritableRepo` type only accessible while holding the
609 618 /// lock?
610 619 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
611 620 let map = self.dirstate_map()?;
612 621 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
613 622 // it’s unset
614 623 let parents = self.dirstate_parents()?;
615 624 let (packed_dirstate, old_uuid_to_remove) = if self.use_dirstate_v2() {
616 625 let (identity, uuid, data_size) =
617 626 self.get_dirstate_data_file_integrity()?;
618 627 let identity_changed = identity != map.old_identity();
619 628 let uuid_changed = uuid.as_deref() != map.old_uuid();
620 629 let data_length_changed = data_size != map.old_data_size();
621 630
622 631 if identity_changed || uuid_changed || data_length_changed {
623 632 // If any of identity, uuid or length have changed since
624 633 // last disk read, don't write.
625 634 // This is fine because either we're in a command that doesn't
626 635 // write anything too important (like `hg status`), or we're in
627 636 // `hg add` and we're supposed to have taken the lock before
628 637 // reading anyway.
629 638 //
630 639 // TODO complain loudly if we've changed anything important
631 640 // without taking the lock.
632 641 // (see `hg help config.format.use-dirstate-tracked-hint`)
633 642 log::debug!(
634 643 "dirstate has changed since last read, not updating."
635 644 );
636 645 return Ok(());
637 646 }
638 647
639 648 let uuid_opt = map.old_uuid();
640 649 let write_mode = if uuid_opt.is_some() {
641 650 DirstateMapWriteMode::Auto
642 651 } else {
643 652 DirstateMapWriteMode::ForceNewDataFile
644 653 };
645 654 let (data, tree_metadata, append, old_data_size) =
646 655 map.pack_v2(write_mode)?;
647 656
648 657 // Reuse the uuid, or generate a new one, keeping the old for
649 658 // deletion.
650 659 let (uuid, old_uuid) = match uuid_opt {
651 660 Some(uuid) => {
652 661 let as_str = std::str::from_utf8(uuid)
653 662 .map_err(|_| {
654 663 HgError::corrupted(
655 664 "non-UTF-8 dirstate data file ID",
656 665 )
657 666 })?
658 667 .to_owned();
659 668 if append {
660 669 (as_str, None)
661 670 } else {
662 671 (DirstateDocket::new_uid(), Some(as_str))
663 672 }
664 673 }
665 674 None => (DirstateDocket::new_uid(), None),
666 675 };
667 676
668 677 let data_filename = format!("dirstate.{}", uuid);
669 678 let data_filename = self.hg_vfs().join(data_filename);
670 679 let mut options = std::fs::OpenOptions::new();
671 680 options.write(true);
672 681
673 682 // Why are we not using the O_APPEND flag when appending?
674 683 //
675 684 // - O_APPEND makes it trickier to deal with garbage at the end of
676 685 // the file, left by a previous uncommitted transaction. By
677 686 // starting the write at [old_data_size] we make sure we erase
678 687 // all such garbage.
679 688 //
680 689 // - O_APPEND requires to special-case 0-byte writes, whereas we
681 690 // don't need that.
682 691 //
683 692 // - Some OSes have bugs in implementation O_APPEND:
684 693 // revlog.py talks about a Solaris bug, but we also saw some ZFS
685 694 // bug: https://github.com/openzfs/zfs/pull/3124,
686 695 // https://github.com/openzfs/zfs/issues/13370
687 696 //
688 697 if !append {
689 698 log::trace!("creating a new dirstate data file");
690 699 options.create_new(true);
691 700 } else {
692 701 log::trace!("appending to the dirstate data file");
693 702 }
694 703
695 704 let data_size = (|| {
696 705 // TODO: loop and try another random ID if !append and this
697 706 // returns `ErrorKind::AlreadyExists`? Collision chance of two
698 707 // random IDs is one in 2**32
699 708 let mut file = options.open(&data_filename)?;
700 709 if append {
701 710 file.seek(SeekFrom::Start(old_data_size as u64))?;
702 711 }
703 712 file.write_all(&data)?;
704 713 file.flush()?;
705 714 file.stream_position()
706 715 })()
707 716 .when_writing_file(&data_filename)?;
708 717
709 718 let packed_dirstate = DirstateDocket::serialize(
710 719 parents,
711 720 tree_metadata,
712 721 data_size,
713 722 uuid.as_bytes(),
714 723 )
715 724 .map_err(|_: std::num::TryFromIntError| {
716 725 HgError::corrupted("overflow in dirstate docket serialization")
717 726 })?;
718 727
719 728 (packed_dirstate, old_uuid)
720 729 } else {
721 730 let identity = self.dirstate_identity()?;
722 731 if identity != map.old_identity() {
723 732 // If identity changed since last disk read, don't write.
724 733 // This is fine because either we're in a command that doesn't
725 734 // write anything too important (like `hg status`), or we're in
726 735 // `hg add` and we're supposed to have taken the lock before
727 736 // reading anyway.
728 737 //
729 738 // TODO complain loudly if we've changed anything important
730 739 // without taking the lock.
731 740 // (see `hg help config.format.use-dirstate-tracked-hint`)
732 741 log::debug!(
733 742 "dirstate has changed since last read, not updating."
734 743 );
735 744 return Ok(());
736 745 }
737 746 (map.pack_v1(parents)?, None)
738 747 };
739 748
740 749 let vfs = self.hg_vfs();
741 750 vfs.atomic_write("dirstate", &packed_dirstate)?;
742 751 if let Some(uuid) = old_uuid_to_remove {
743 752 // Remove the old data file after the new docket pointing to the
744 753 // new data file was written.
745 754 vfs.remove_file(format!("dirstate.{}", uuid))?;
746 755 }
747 756 Ok(())
748 757 }
749 758
750 759 pub fn default_revlog_options(
751 760 &self,
752 761 revlog_type: RevlogType,
753 762 ) -> Result<RevlogOpenOptions, HgError> {
754 763 let requirements = self.requirements();
755 764 let is_changelog = revlog_type == RevlogType::Changelog;
756 765 let version = if is_changelog
757 766 && requirements.contains(CHANGELOGV2_REQUIREMENT)
758 767 {
759 768 let compute_rank = self
760 769 .config()
761 770 .get_bool(b"experimental", b"changelog-v2.compute-rank")?;
762 771 RevlogVersionOptions::ChangelogV2 { compute_rank }
763 772 } else if requirements.contains(REVLOGV2_REQUIREMENT) {
764 773 RevlogVersionOptions::V2
765 774 } else if requirements.contains(REVLOGV1_REQUIREMENT) {
766 775 RevlogVersionOptions::V1 {
767 776 general_delta: requirements.contains(GENERALDELTA_REQUIREMENT),
768 777 inline: !is_changelog,
769 778 }
770 779 } else {
771 780 RevlogVersionOptions::V0
772 781 };
773 782 Ok(RevlogOpenOptions {
774 783 version,
775 784 // We don't need to dance around the slow path like in the Python
776 785 // implementation since we know we have access to the fast code.
777 786 use_nodemap: requirements.contains(NODEMAP_REQUIREMENT),
778 787 delta_config: RevlogDeltaConfig::new(
779 788 self.config(),
780 789 self.requirements(),
781 790 revlog_type,
782 791 )?,
783 792 data_config: RevlogDataConfig::new(
784 793 self.config(),
785 794 self.requirements(),
786 795 )?,
787 796 feature_config: RevlogFeatureConfig::new(
788 797 self.config(),
789 798 requirements,
790 799 )?,
791 800 })
792 801 }
793 802 }
794 803
795 804 /// Lazily-initialized component of `Repo` with interior mutability
796 805 ///
797 806 /// This differs from `OnceCell` in that the value can still be "deinitialized"
798 807 /// later by setting its inner `Option` to `None`. It also takes the
799 808 /// initialization function as an argument when the value is requested, not
800 809 /// when the instance is created.
801 810 struct LazyCell<T> {
802 811 value: RefCell<Option<T>>,
803 812 }
804 813
805 814 impl<T> LazyCell<T> {
806 815 fn new() -> Self {
807 816 Self {
808 817 value: RefCell::new(None),
809 818 }
810 819 }
811 820
812 821 fn set(&self, value: T) {
813 822 *self.value.borrow_mut() = Some(value)
814 823 }
815 824
816 825 fn get_or_init<E>(
817 826 &self,
818 827 init: impl Fn() -> Result<T, E>,
819 828 ) -> Result<Ref<T>, E> {
820 829 let mut borrowed = self.value.borrow();
821 830 if borrowed.is_none() {
822 831 drop(borrowed);
823 832 // Only use `borrow_mut` if it is really needed to avoid panic in
824 833 // case there is another outstanding borrow but mutation is not
825 834 // needed.
826 835 *self.value.borrow_mut() = Some(init()?);
827 836 borrowed = self.value.borrow()
828 837 }
829 838 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
830 839 }
831 840
832 841 fn get_mut_or_init<E>(
833 842 &self,
834 843 init: impl Fn() -> Result<T, E>,
835 844 ) -> Result<RefMut<T>, E> {
836 845 let mut borrowed = self.value.borrow_mut();
837 846 if borrowed.is_none() {
838 847 *borrowed = Some(init()?);
839 848 }
840 849 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
841 850 }
842 851 }
@@ -1,183 +1,185
1 1 use crate::errors::{HgError, HgResultExt};
2 2 use crate::repo::Repo;
3 3 use crate::utils::join_display;
4 use crate::vfs::Vfs;
4 use crate::vfs::VfsImpl;
5 5 use std::collections::HashSet;
6 6
7 7 fn parse(bytes: &[u8]) -> Result<HashSet<String>, HgError> {
8 8 // The Python code reading this file uses `str.splitlines`
9 9 // which looks for a number of line separators (even including a couple of
10 10 // non-ASCII ones), but Python code writing it always uses `\n`.
11 11 let lines = bytes.split(|&byte| byte == b'\n');
12 12
13 13 lines
14 14 .filter(|line| !line.is_empty())
15 15 .map(|line| {
16 16 // Python uses Unicode `str.isalnum` but feature names are all
17 17 // ASCII
18 18 if line[0].is_ascii_alphanumeric() && line.is_ascii() {
19 19 Ok(String::from_utf8(line.into()).unwrap())
20 20 } else {
21 21 Err(HgError::corrupted("parse error in 'requires' file"))
22 22 }
23 23 })
24 24 .collect()
25 25 }
26 26
27 pub(crate) fn load(hg_vfs: Vfs) -> Result<HashSet<String>, HgError> {
27 pub(crate) fn load(hg_vfs: VfsImpl) -> Result<HashSet<String>, HgError> {
28 28 parse(&hg_vfs.read("requires")?)
29 29 }
30 30
31 pub(crate) fn load_if_exists(hg_vfs: Vfs) -> Result<HashSet<String>, HgError> {
31 pub(crate) fn load_if_exists(
32 hg_vfs: &VfsImpl,
33 ) -> Result<HashSet<String>, HgError> {
32 34 if let Some(bytes) = hg_vfs.read("requires").io_not_found_as_none()? {
33 35 parse(&bytes)
34 36 } else {
35 37 // Treat a missing file the same as an empty file.
36 38 // From `mercurial/localrepo.py`:
37 39 // > requires file contains a newline-delimited list of
38 40 // > features/capabilities the opener (us) must have in order to use
39 41 // > the repository. This file was introduced in Mercurial 0.9.2,
40 42 // > which means very old repositories may not have one. We assume
41 43 // > a missing file translates to no requirements.
42 44 Ok(HashSet::new())
43 45 }
44 46 }
45 47
46 48 pub(crate) fn check(repo: &Repo) -> Result<(), HgError> {
47 49 let unknown: Vec<_> = repo
48 50 .requirements()
49 51 .iter()
50 52 .map(String::as_str)
51 53 // .filter(|feature| !ALL_SUPPORTED.contains(feature.as_str()))
52 54 .filter(|feature| {
53 55 !REQUIRED.contains(feature) && !SUPPORTED.contains(feature)
54 56 })
55 57 .collect();
56 58 if !unknown.is_empty() {
57 59 return Err(HgError::unsupported(format!(
58 60 "repository requires feature unknown to this Mercurial: {}",
59 61 join_display(&unknown, ", ")
60 62 )));
61 63 }
62 64 let missing: Vec<_> = REQUIRED
63 65 .iter()
64 66 .filter(|&&feature| !repo.requirements().contains(feature))
65 67 .collect();
66 68 if !missing.is_empty() {
67 69 return Err(HgError::unsupported(format!(
68 70 "repository is missing feature required by this Mercurial: {}",
69 71 join_display(&missing, ", ")
70 72 )));
71 73 }
72 74 Ok(())
73 75 }
74 76
75 77 /// rhg does not support repositories that are *missing* any of these features
76 78 const REQUIRED: &[&str] = &["revlogv1", "store", "fncache", "dotencode"];
77 79
78 80 /// rhg supports repository with or without these
79 81 const SUPPORTED: &[&str] = &[
80 82 GENERALDELTA_REQUIREMENT,
81 83 SHARED_REQUIREMENT,
82 84 SHARESAFE_REQUIREMENT,
83 85 SPARSEREVLOG_REQUIREMENT,
84 86 RELATIVE_SHARED_REQUIREMENT,
85 87 REVLOG_COMPRESSION_ZSTD,
86 88 DIRSTATE_V2_REQUIREMENT,
87 89 DIRSTATE_TRACKED_HINT_V1,
88 90 // As of this writing everything rhg does is read-only.
89 91 // When it starts writing to the repository, it’ll need to either keep the
90 92 // persistent nodemap up to date or remove this entry:
91 93 NODEMAP_REQUIREMENT,
92 94 // Not all commands support `sparse` and `narrow`. The commands that do
93 95 // not should opt out by checking `has_sparse` and `has_narrow`.
94 96 SPARSE_REQUIREMENT,
95 97 NARROW_REQUIREMENT,
96 98 // rhg doesn't care about bookmarks at all yet
97 99 BOOKMARKS_IN_STORE_REQUIREMENT,
98 100 ];
99 101
100 102 // Copied from mercurial/requirements.py:
101 103
102 104 pub const DIRSTATE_V2_REQUIREMENT: &str = "dirstate-v2";
103 105 pub const GENERALDELTA_REQUIREMENT: &str = "generaldelta";
104 106
105 107 /// A repository that uses the tracked hint dirstate file
106 108 #[allow(unused)]
107 109 pub const DIRSTATE_TRACKED_HINT_V1: &str = "dirstate-tracked-key-v1";
108 110
109 111 /// When narrowing is finalized and no longer subject to format changes,
110 112 /// we should move this to just "narrow" or similar.
111 113 #[allow(unused)]
112 114 pub const NARROW_REQUIREMENT: &str = "narrowhg-experimental";
113 115
114 116 /// Bookmarks must be stored in the `store` part of the repository and will be
115 117 /// share accross shares
116 118 #[allow(unused)]
117 119 pub const BOOKMARKS_IN_STORE_REQUIREMENT: &str = "bookmarksinstore";
118 120
119 121 /// Enables sparse working directory usage
120 122 #[allow(unused)]
121 123 pub const SPARSE_REQUIREMENT: &str = "exp-sparse";
122 124
123 125 /// Enables the internal phase which is used to hide changesets instead
124 126 /// of stripping them
125 127 #[allow(unused)]
126 128 pub const INTERNAL_PHASE_REQUIREMENT: &str = "internal-phase";
127 129
128 130 /// Stores manifest in Tree structure
129 131 #[allow(unused)]
130 132 pub const TREEMANIFEST_REQUIREMENT: &str = "treemanifest";
131 133
132 134 /// Whether to use the "RevlogNG" or V1 of the revlog format
133 135 #[allow(unused)]
134 136 pub const REVLOGV1_REQUIREMENT: &str = "revlogv1";
135 137
136 138 /// Increment the sub-version when the revlog v2 format changes to lock out old
137 139 /// clients.
138 140 #[allow(unused)]
139 141 pub const REVLOGV2_REQUIREMENT: &str = "exp-revlogv2.1";
140 142
141 143 /// Increment the sub-version when the revlog v2 format changes to lock out old
142 144 /// clients.
143 145 #[allow(unused)]
144 146 pub const CHANGELOGV2_REQUIREMENT: &str = "exp-changelog-v2";
145 147
146 148 /// A repository with the sparserevlog feature will have delta chains that
147 149 /// can spread over a larger span. Sparse reading cuts these large spans into
148 150 /// pieces, so that each piece isn't too big.
149 151 /// Without the sparserevlog capability, reading from the repository could use
150 152 /// huge amounts of memory, because the whole span would be read at once,
151 153 /// including all the intermediate revisions that aren't pertinent for the
152 154 /// chain. This is why once a repository has enabled sparse-read, it becomes
153 155 /// required.
154 156 #[allow(unused)]
155 157 pub const SPARSEREVLOG_REQUIREMENT: &str = "sparserevlog";
156 158
157 159 /// A repository with the the copies-sidedata-changeset requirement will store
158 160 /// copies related information in changeset's sidedata.
159 161 #[allow(unused)]
160 162 pub const COPIESSDC_REQUIREMENT: &str = "exp-copies-sidedata-changeset";
161 163
162 164 /// The repository use persistent nodemap for the changelog and the manifest.
163 165 #[allow(unused)]
164 166 pub const NODEMAP_REQUIREMENT: &str = "persistent-nodemap";
165 167
166 168 /// Denotes that the current repository is a share
167 169 #[allow(unused)]
168 170 pub const SHARED_REQUIREMENT: &str = "shared";
169 171
170 172 /// Denotes that current repository is a share and the shared source path is
171 173 /// relative to the current repository root path
172 174 #[allow(unused)]
173 175 pub const RELATIVE_SHARED_REQUIREMENT: &str = "relshared";
174 176
175 177 /// A repository with share implemented safely. The repository has different
176 178 /// store and working copy requirements i.e. both `.hg/requires` and
177 179 /// `.hg/store/requires` are present.
178 180 #[allow(unused)]
179 181 pub const SHARESAFE_REQUIREMENT: &str = "share-safe";
180 182
181 183 /// A repository that use zstd compression inside its revlog
182 184 #[allow(unused)]
183 185 pub const REVLOG_COMPRESSION_ZSTD: &str = "revlog-compression-zstd";
@@ -1,762 +1,764
1 1 use std::ascii::escape_default;
2 2 use std::borrow::Cow;
3 3 use std::collections::BTreeMap;
4 4 use std::fmt::{Debug, Formatter};
5 5 use std::{iter, str};
6 6
7 7 use chrono::{DateTime, FixedOffset, NaiveDateTime};
8 8 use itertools::{Either, Itertools};
9 9
10 10 use crate::errors::HgError;
11 11 use crate::revlog::Index;
12 12 use crate::revlog::Revision;
13 13 use crate::revlog::{Node, NodePrefix};
14 14 use crate::revlog::{Revlog, RevlogEntry, RevlogError};
15 15 use crate::utils::hg_path::HgPath;
16 use crate::vfs::Vfs;
16 use crate::vfs::VfsImpl;
17 17 use crate::{Graph, GraphError, RevlogOpenOptions, UncheckedRevision};
18 18
19 19 /// A specialized `Revlog` to work with changelog data format.
20 20 pub struct Changelog {
21 21 /// The generic `revlog` format.
22 22 pub(crate) revlog: Revlog,
23 23 }
24 24
25 25 impl Changelog {
26 26 /// Open the `changelog` of a repository given by its root.
27 27 pub fn open(
28 store_vfs: &Vfs,
28 store_vfs: &VfsImpl,
29 29 options: RevlogOpenOptions,
30 30 ) -> Result<Self, HgError> {
31 31 let revlog = Revlog::open(store_vfs, "00changelog.i", None, options)?;
32 32 Ok(Self { revlog })
33 33 }
34 34
35 35 /// Return the `ChangelogRevisionData` for the given node ID.
36 36 pub fn data_for_node(
37 37 &self,
38 38 node: NodePrefix,
39 39 ) -> Result<ChangelogRevisionData, RevlogError> {
40 40 let rev = self.revlog.rev_from_node(node)?;
41 41 self.entry_for_checked_rev(rev)?.data()
42 42 }
43 43
44 44 /// Return the [`ChangelogEntry`] for the given revision number.
45 45 pub fn entry_for_rev(
46 46 &self,
47 47 rev: UncheckedRevision,
48 48 ) -> Result<ChangelogEntry, RevlogError> {
49 49 let revlog_entry = self.revlog.get_entry(rev)?;
50 50 Ok(ChangelogEntry { revlog_entry })
51 51 }
52 52
53 53 /// Same as [`Self::entry_for_rev`] for checked revisions.
54 54 fn entry_for_checked_rev(
55 55 &self,
56 56 rev: Revision,
57 57 ) -> Result<ChangelogEntry, RevlogError> {
58 58 let revlog_entry = self.revlog.get_entry_for_checked_rev(rev)?;
59 59 Ok(ChangelogEntry { revlog_entry })
60 60 }
61 61
62 62 /// Return the [`ChangelogRevisionData`] for the given revision number.
63 63 ///
64 64 /// This is a useful shortcut in case the caller does not need the
65 65 /// generic revlog information (parents, hashes etc). Otherwise
66 66 /// consider taking a [`ChangelogEntry`] with
67 67 /// [entry_for_rev](`Self::entry_for_rev`) and doing everything from there.
68 68 pub fn data_for_rev(
69 69 &self,
70 70 rev: UncheckedRevision,
71 71 ) -> Result<ChangelogRevisionData, RevlogError> {
72 72 self.entry_for_rev(rev)?.data()
73 73 }
74 74
75 75 pub fn node_from_rev(&self, rev: UncheckedRevision) -> Option<&Node> {
76 76 self.revlog.node_from_rev(rev)
77 77 }
78 78
79 79 pub fn rev_from_node(
80 80 &self,
81 81 node: NodePrefix,
82 82 ) -> Result<Revision, RevlogError> {
83 83 self.revlog.rev_from_node(node)
84 84 }
85 85
86 86 pub fn get_index(&self) -> &Index {
87 87 &self.revlog.index
88 88 }
89 89 }
90 90
91 91 impl Graph for Changelog {
92 92 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
93 93 self.revlog.parents(rev)
94 94 }
95 95 }
96 96
97 97 /// A specialized `RevlogEntry` for `changelog` data format
98 98 ///
99 99 /// This is a `RevlogEntry` with the added semantics that the associated
100 100 /// data should meet the requirements for `changelog`, materialized by
101 101 /// the fact that `data()` constructs a `ChangelogRevisionData`.
102 102 /// In case that promise would be broken, the `data` method returns an error.
103 103 #[derive(Clone)]
104 104 pub struct ChangelogEntry<'changelog> {
105 105 /// Same data, as a generic `RevlogEntry`.
106 106 pub(crate) revlog_entry: RevlogEntry<'changelog>,
107 107 }
108 108
109 109 impl<'changelog> ChangelogEntry<'changelog> {
110 110 pub fn data<'a>(
111 111 &'a self,
112 112 ) -> Result<ChangelogRevisionData<'changelog>, RevlogError> {
113 113 let bytes = self.revlog_entry.data()?;
114 114 if bytes.is_empty() {
115 115 Ok(ChangelogRevisionData::null())
116 116 } else {
117 117 Ok(ChangelogRevisionData::new(bytes).map_err(|err| {
118 118 RevlogError::Other(HgError::CorruptedRepository(format!(
119 119 "Invalid changelog data for revision {}: {:?}",
120 120 self.revlog_entry.revision(),
121 121 err
122 122 )))
123 123 })?)
124 124 }
125 125 }
126 126
127 127 /// Obtain a reference to the underlying `RevlogEntry`.
128 128 ///
129 129 /// This allows the caller to access the information that is common
130 130 /// to all revlog entries: revision number, node id, parent revisions etc.
131 131 pub fn as_revlog_entry(&self) -> &RevlogEntry {
132 132 &self.revlog_entry
133 133 }
134 134
135 135 pub fn p1_entry(&self) -> Result<Option<ChangelogEntry>, RevlogError> {
136 136 Ok(self
137 137 .revlog_entry
138 138 .p1_entry()?
139 139 .map(|revlog_entry| Self { revlog_entry }))
140 140 }
141 141
142 142 pub fn p2_entry(&self) -> Result<Option<ChangelogEntry>, RevlogError> {
143 143 Ok(self
144 144 .revlog_entry
145 145 .p2_entry()?
146 146 .map(|revlog_entry| Self { revlog_entry }))
147 147 }
148 148 }
149 149
150 150 /// `Changelog` entry which knows how to interpret the `changelog` data bytes.
151 151 #[derive(PartialEq)]
152 152 pub struct ChangelogRevisionData<'changelog> {
153 153 /// The data bytes of the `changelog` entry.
154 154 bytes: Cow<'changelog, [u8]>,
155 155 /// The end offset for the hex manifest (not including the newline)
156 156 manifest_end: usize,
157 157 /// The end offset for the user+email (not including the newline)
158 158 user_end: usize,
159 159 /// The end offset for the timestamp+timezone+extras (not including the
160 160 /// newline)
161 161 timestamp_end: usize,
162 162 /// The end offset for the file list (not including the newline)
163 163 files_end: usize,
164 164 }
165 165
166 166 impl<'changelog> ChangelogRevisionData<'changelog> {
167 167 fn new(bytes: Cow<'changelog, [u8]>) -> Result<Self, HgError> {
168 168 let mut line_iter = bytes.split(|b| b == &b'\n');
169 169 let manifest_end = line_iter
170 170 .next()
171 171 .expect("Empty iterator from split()?")
172 172 .len();
173 173 let user_slice = line_iter.next().ok_or_else(|| {
174 174 HgError::corrupted("Changeset data truncated after manifest line")
175 175 })?;
176 176 let user_end = manifest_end + 1 + user_slice.len();
177 177 let timestamp_slice = line_iter.next().ok_or_else(|| {
178 178 HgError::corrupted("Changeset data truncated after user line")
179 179 })?;
180 180 let timestamp_end = user_end + 1 + timestamp_slice.len();
181 181 let mut files_end = timestamp_end + 1;
182 182 loop {
183 183 let line = line_iter.next().ok_or_else(|| {
184 184 HgError::corrupted("Changeset data truncated in files list")
185 185 })?;
186 186 if line.is_empty() {
187 187 if files_end == bytes.len() {
188 188 // The list of files ended with a single newline (there
189 189 // should be two)
190 190 return Err(HgError::corrupted(
191 191 "Changeset data truncated after files list",
192 192 ));
193 193 }
194 194 files_end -= 1;
195 195 break;
196 196 }
197 197 files_end += line.len() + 1;
198 198 }
199 199
200 200 Ok(Self {
201 201 bytes,
202 202 manifest_end,
203 203 user_end,
204 204 timestamp_end,
205 205 files_end,
206 206 })
207 207 }
208 208
209 209 fn null() -> Self {
210 210 Self::new(Cow::Borrowed(
211 211 b"0000000000000000000000000000000000000000\n\n0 0\n\n",
212 212 ))
213 213 .unwrap()
214 214 }
215 215
216 216 /// Return an iterator over the lines of the entry.
217 217 pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
218 218 self.bytes.split(|b| b == &b'\n')
219 219 }
220 220
221 221 /// Return the node id of the `manifest` referenced by this `changelog`
222 222 /// entry.
223 223 pub fn manifest_node(&self) -> Result<Node, HgError> {
224 224 let manifest_node_hex = &self.bytes[..self.manifest_end];
225 225 Node::from_hex_for_repo(manifest_node_hex)
226 226 }
227 227
228 228 /// The full user string (usually a name followed by an email enclosed in
229 229 /// angle brackets)
230 230 pub fn user(&self) -> &[u8] {
231 231 &self.bytes[self.manifest_end + 1..self.user_end]
232 232 }
233 233
234 234 /// The full timestamp line (timestamp in seconds, offset in seconds, and
235 235 /// possibly extras)
236 236 // TODO: We should expose this in a more useful way
237 237 pub fn timestamp_line(&self) -> &[u8] {
238 238 &self.bytes[self.user_end + 1..self.timestamp_end]
239 239 }
240 240
241 241 /// Parsed timestamp.
242 242 pub fn timestamp(&self) -> Result<DateTime<FixedOffset>, HgError> {
243 243 parse_timestamp(self.timestamp_line())
244 244 }
245 245
246 246 /// Optional commit extras.
247 247 pub fn extra(&self) -> Result<BTreeMap<String, Vec<u8>>, HgError> {
248 248 parse_timestamp_line_extra(self.timestamp_line())
249 249 }
250 250
251 251 /// The files changed in this revision.
252 252 pub fn files(&self) -> impl Iterator<Item = &HgPath> {
253 253 if self.timestamp_end == self.files_end {
254 254 Either::Left(iter::empty())
255 255 } else {
256 256 Either::Right(
257 257 self.bytes[self.timestamp_end + 1..self.files_end]
258 258 .split(|b| b == &b'\n')
259 259 .map(HgPath::new),
260 260 )
261 261 }
262 262 }
263 263
264 264 /// The change description.
265 265 pub fn description(&self) -> &[u8] {
266 266 &self.bytes[self.files_end + 2..]
267 267 }
268 268 }
269 269
270 270 impl Debug for ChangelogRevisionData<'_> {
271 271 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
272 272 f.debug_struct("ChangelogRevisionData")
273 273 .field("bytes", &debug_bytes(&self.bytes))
274 274 .field("manifest", &debug_bytes(&self.bytes[..self.manifest_end]))
275 275 .field(
276 276 "user",
277 277 &debug_bytes(
278 278 &self.bytes[self.manifest_end + 1..self.user_end],
279 279 ),
280 280 )
281 281 .field(
282 282 "timestamp",
283 283 &debug_bytes(
284 284 &self.bytes[self.user_end + 1..self.timestamp_end],
285 285 ),
286 286 )
287 287 .field(
288 288 "files",
289 289 &debug_bytes(
290 290 &self.bytes[self.timestamp_end + 1..self.files_end],
291 291 ),
292 292 )
293 293 .field(
294 294 "description",
295 295 &debug_bytes(&self.bytes[self.files_end + 2..]),
296 296 )
297 297 .finish()
298 298 }
299 299 }
300 300
301 301 fn debug_bytes(bytes: &[u8]) -> String {
302 302 String::from_utf8_lossy(
303 303 &bytes.iter().flat_map(|b| escape_default(*b)).collect_vec(),
304 304 )
305 305 .to_string()
306 306 }
307 307
308 308 /// Parse the raw bytes of the timestamp line from a changelog entry.
309 309 ///
310 310 /// According to the documentation in `hg help dates` and the
311 311 /// implementation in `changelog.py`, the format of the timestamp line
312 312 /// is `time tz extra\n` where:
313 313 ///
314 314 /// - `time` is an ASCII-encoded signed int or float denoting a UTC timestamp
315 315 /// as seconds since the UNIX epoch.
316 316 ///
317 317 /// - `tz` is the timezone offset as an ASCII-encoded signed integer denoting
318 318 /// seconds WEST of UTC (so negative for timezones east of UTC, which is the
319 319 /// opposite of the sign in ISO 8601 timestamps).
320 320 ///
321 321 /// - `extra` is an optional set of NUL-delimited key-value pairs, with the key
322 322 /// and value in each pair separated by an ASCII colon. Keys are limited to
323 323 /// ASCII letters, digits, hyphens, and underscores, whereas values can be
324 324 /// arbitrary bytes.
325 325 fn parse_timestamp(
326 326 timestamp_line: &[u8],
327 327 ) -> Result<DateTime<FixedOffset>, HgError> {
328 328 let mut parts = timestamp_line.splitn(3, |c| *c == b' ');
329 329
330 330 let timestamp_bytes = parts
331 331 .next()
332 332 .ok_or_else(|| HgError::corrupted("missing timestamp"))?;
333 333 let timestamp_str = str::from_utf8(timestamp_bytes).map_err(|e| {
334 334 HgError::corrupted(format!("timestamp is not valid UTF-8: {e}"))
335 335 })?;
336 336 let timestamp_utc = timestamp_str
337 337 .parse()
338 338 .map_err(|e| {
339 339 HgError::corrupted(format!("failed to parse timestamp: {e}"))
340 340 })
341 341 .and_then(|secs| {
342 342 NaiveDateTime::from_timestamp_opt(secs, 0).ok_or_else(|| {
343 343 HgError::corrupted(format!(
344 344 "integer timestamp out of valid range: {secs}"
345 345 ))
346 346 })
347 347 })
348 348 // Attempt to parse the timestamp as a float if we can't parse
349 349 // it as an int. It doesn't seem like float timestamps are actually
350 350 // used in practice, but the Python code supports them.
351 351 .or_else(|_| parse_float_timestamp(timestamp_str))?;
352 352
353 353 let timezone_bytes = parts
354 354 .next()
355 355 .ok_or_else(|| HgError::corrupted("missing timezone"))?;
356 356 let timezone_secs: i32 = str::from_utf8(timezone_bytes)
357 357 .map_err(|e| {
358 358 HgError::corrupted(format!("timezone is not valid UTF-8: {e}"))
359 359 })?
360 360 .parse()
361 361 .map_err(|e| {
362 362 HgError::corrupted(format!("timezone is not an integer: {e}"))
363 363 })?;
364 364 let timezone = FixedOffset::west_opt(timezone_secs)
365 365 .ok_or_else(|| HgError::corrupted("timezone offset out of bounds"))?;
366 366
367 367 Ok(DateTime::from_naive_utc_and_offset(timestamp_utc, timezone))
368 368 }
369 369
370 370 /// Attempt to parse the given string as floating-point timestamp, and
371 371 /// convert the result into a `chrono::NaiveDateTime`.
372 372 fn parse_float_timestamp(
373 373 timestamp_str: &str,
374 374 ) -> Result<NaiveDateTime, HgError> {
375 375 let timestamp = timestamp_str.parse::<f64>().map_err(|e| {
376 376 HgError::corrupted(format!("failed to parse timestamp: {e}"))
377 377 })?;
378 378
379 379 // To construct a `NaiveDateTime` we'll need to convert the float
380 380 // into signed integer seconds and unsigned integer nanoseconds.
381 381 let mut secs = timestamp.trunc() as i64;
382 382 let mut subsecs = timestamp.fract();
383 383
384 384 // If the timestamp is negative, we need to express the fractional
385 385 // component as positive nanoseconds since the previous second.
386 386 if timestamp < 0.0 {
387 387 secs -= 1;
388 388 subsecs += 1.0;
389 389 }
390 390
391 391 // This cast should be safe because the fractional component is
392 392 // by definition less than 1.0, so this value should not exceed
393 393 // 1 billion, which is representable as an f64 without loss of
394 394 // precision and should fit into a u32 without overflowing.
395 395 //
396 396 // (Any loss of precision in the fractional component will have
397 397 // already happened at the time of initial parsing; in general,
398 398 // f64s are insufficiently precise to provide nanosecond-level
399 399 // precision with present-day timestamps.)
400 400 let nsecs = (subsecs * 1_000_000_000.0) as u32;
401 401
402 402 NaiveDateTime::from_timestamp_opt(secs, nsecs).ok_or_else(|| {
403 403 HgError::corrupted(format!(
404 404 "float timestamp out of valid range: {timestamp}"
405 405 ))
406 406 })
407 407 }
408 408
409 409 /// Decode changeset extra fields.
410 410 ///
411 411 /// Extras are null-delimited key-value pairs where the key consists of ASCII
412 412 /// alphanumeric characters plus hyphens and underscores, and the value can
413 413 /// contain arbitrary bytes.
414 414 fn decode_extra(extra: &[u8]) -> Result<BTreeMap<String, Vec<u8>>, HgError> {
415 415 extra
416 416 .split(|c| *c == b'\0')
417 417 .map(|pair| {
418 418 let pair = unescape_extra(pair);
419 419 let mut iter = pair.splitn(2, |c| *c == b':');
420 420
421 421 let key_bytes =
422 422 iter.next().filter(|k| !k.is_empty()).ok_or_else(|| {
423 423 HgError::corrupted("empty key in changeset extras")
424 424 })?;
425 425
426 426 let key = str::from_utf8(key_bytes)
427 427 .ok()
428 428 .filter(|k| {
429 429 k.chars().all(|c| {
430 430 c.is_ascii_alphanumeric() || c == '_' || c == '-'
431 431 })
432 432 })
433 433 .ok_or_else(|| {
434 434 let key = String::from_utf8_lossy(key_bytes);
435 435 HgError::corrupted(format!(
436 436 "invalid key in changeset extras: {key}",
437 437 ))
438 438 })?
439 439 .to_string();
440 440
441 441 let value = iter.next().map(Into::into).ok_or_else(|| {
442 442 HgError::corrupted(format!(
443 443 "missing value for changeset extra: {key}"
444 444 ))
445 445 })?;
446 446
447 447 Ok((key, value))
448 448 })
449 449 .collect()
450 450 }
451 451
452 452 /// Parse the extra fields from a changeset's timestamp line.
453 453 fn parse_timestamp_line_extra(
454 454 timestamp_line: &[u8],
455 455 ) -> Result<BTreeMap<String, Vec<u8>>, HgError> {
456 456 Ok(timestamp_line
457 457 .splitn(3, |c| *c == b' ')
458 458 .nth(2)
459 459 .map(decode_extra)
460 460 .transpose()?
461 461 .unwrap_or_default())
462 462 }
463 463
464 464 /// Decode Mercurial's escaping for changelog extras.
465 465 ///
466 466 /// The `_string_escape` function in `changelog.py` only escapes 4 characters
467 467 /// (null, backslash, newline, and carriage return) so we only decode those.
468 468 ///
469 469 /// The Python code also includes a workaround for decoding escaped nuls
470 470 /// that are followed by an ASCII octal digit, since Python's built-in
471 471 /// `string_escape` codec will interpret that as an escaped octal byte value.
472 472 /// That workaround is omitted here since we don't support decoding octal.
473 473 fn unescape_extra(bytes: &[u8]) -> Vec<u8> {
474 474 let mut output = Vec::with_capacity(bytes.len());
475 475 let mut input = bytes.iter().copied();
476 476
477 477 while let Some(c) = input.next() {
478 478 if c != b'\\' {
479 479 output.push(c);
480 480 continue;
481 481 }
482 482
483 483 match input.next() {
484 484 Some(b'0') => output.push(b'\0'),
485 485 Some(b'\\') => output.push(b'\\'),
486 486 Some(b'n') => output.push(b'\n'),
487 487 Some(b'r') => output.push(b'\r'),
488 488 // The following cases should never occur in theory because any
489 489 // backslashes in the original input should have been escaped
490 490 // with another backslash, so it should not be possible to
491 491 // observe an escape sequence other than the 4 above.
492 492 Some(c) => output.extend_from_slice(&[b'\\', c]),
493 493 None => output.push(b'\\'),
494 494 }
495 495 }
496 496
497 497 output
498 498 }
499 499
500 500 #[cfg(test)]
501 501 mod tests {
502 502 use super::*;
503 use crate::vfs::Vfs;
503 use crate::vfs::VfsImpl;
504 504 use crate::{
505 505 RevlogDataConfig, RevlogDeltaConfig, RevlogFeatureConfig,
506 506 NULL_REVISION,
507 507 };
508 508 use pretty_assertions::assert_eq;
509 509
510 510 #[test]
511 511 fn test_create_changelogrevisiondata_invalid() {
512 512 // Completely empty
513 513 assert!(ChangelogRevisionData::new(Cow::Borrowed(b"abcd")).is_err());
514 514 // No newline after manifest
515 515 assert!(ChangelogRevisionData::new(Cow::Borrowed(b"abcd")).is_err());
516 516 // No newline after user
517 517 assert!(ChangelogRevisionData::new(Cow::Borrowed(b"abcd\n")).is_err());
518 518 // No newline after timestamp
519 519 assert!(
520 520 ChangelogRevisionData::new(Cow::Borrowed(b"abcd\n\n0 0")).is_err()
521 521 );
522 522 // Missing newline after files
523 523 assert!(ChangelogRevisionData::new(Cow::Borrowed(
524 524 b"abcd\n\n0 0\nfile1\nfile2"
525 525 ))
526 526 .is_err(),);
527 527 // Only one newline after files
528 528 assert!(ChangelogRevisionData::new(Cow::Borrowed(
529 529 b"abcd\n\n0 0\nfile1\nfile2\n"
530 530 ))
531 531 .is_err(),);
532 532 }
533 533
534 534 #[test]
535 535 fn test_create_changelogrevisiondata() {
536 536 let data = ChangelogRevisionData::new(Cow::Borrowed(
537 537 b"0123456789abcdef0123456789abcdef01234567
538 538 Some One <someone@example.com>
539 539 0 0
540 540 file1
541 541 file2
542 542
543 543 some
544 544 commit
545 545 message",
546 546 ))
547 547 .unwrap();
548 548 assert_eq!(
549 549 data.manifest_node().unwrap(),
550 550 Node::from_hex("0123456789abcdef0123456789abcdef01234567")
551 551 .unwrap()
552 552 );
553 553 assert_eq!(data.user(), b"Some One <someone@example.com>");
554 554 assert_eq!(data.timestamp_line(), b"0 0");
555 555 assert_eq!(
556 556 data.files().collect_vec(),
557 557 vec![HgPath::new("file1"), HgPath::new("file2")]
558 558 );
559 559 assert_eq!(data.description(), b"some\ncommit\nmessage");
560 560 }
561 561
562 562 #[test]
563 563 fn test_data_from_rev_null() -> Result<(), RevlogError> {
564 564 // an empty revlog will be enough for this case
565 565 let temp = tempfile::tempdir().unwrap();
566 let vfs = Vfs { base: temp.path() };
566 let vfs = VfsImpl {
567 base: temp.path().to_owned(),
568 };
567 569 std::fs::write(temp.path().join("foo.i"), b"").unwrap();
568 570 std::fs::write(temp.path().join("foo.d"), b"").unwrap();
569 571 let revlog = Revlog::open(
570 572 &vfs,
571 573 "foo.i",
572 574 None,
573 575 RevlogOpenOptions::new(
574 576 false,
575 577 RevlogDataConfig::default(),
576 578 RevlogDeltaConfig::default(),
577 579 RevlogFeatureConfig::default(),
578 580 ),
579 581 )
580 582 .unwrap();
581 583
582 584 let changelog = Changelog { revlog };
583 585 assert_eq!(
584 586 changelog.data_for_rev(NULL_REVISION.into())?,
585 587 ChangelogRevisionData::null()
586 588 );
587 589 // same with the intermediate entry object
588 590 assert_eq!(
589 591 changelog.entry_for_rev(NULL_REVISION.into())?.data()?,
590 592 ChangelogRevisionData::null()
591 593 );
592 594 Ok(())
593 595 }
594 596
595 597 #[test]
596 598 fn test_empty_files_list() {
597 599 assert!(ChangelogRevisionData::null()
598 600 .files()
599 601 .collect_vec()
600 602 .is_empty());
601 603 }
602 604
603 605 #[test]
604 606 fn test_unescape_basic() {
605 607 // '\0', '\\', '\n', and '\r' are correctly unescaped.
606 608 let expected = b"AAA\0BBB\\CCC\nDDD\rEEE";
607 609 let escaped = br"AAA\0BBB\\CCC\nDDD\rEEE";
608 610 let unescaped = unescape_extra(escaped);
609 611 assert_eq!(&expected[..], &unescaped[..]);
610 612 }
611 613
612 614 #[test]
613 615 fn test_unescape_unsupported_sequence() {
614 616 // Other escape sequences are left unaltered.
615 617 for c in 0u8..255 {
616 618 match c {
617 619 b'0' | b'\\' | b'n' | b'r' => continue,
618 620 c => {
619 621 let expected = &[b'\\', c][..];
620 622 let unescaped = unescape_extra(expected);
621 623 assert_eq!(expected, &unescaped[..]);
622 624 }
623 625 }
624 626 }
625 627 }
626 628
627 629 #[test]
628 630 fn test_unescape_trailing_backslash() {
629 631 // Trailing backslashes are OK.
630 632 let expected = br"hi\";
631 633 let unescaped = unescape_extra(expected);
632 634 assert_eq!(&expected[..], &unescaped[..]);
633 635 }
634 636
635 637 #[test]
636 638 fn test_unescape_nul_followed_by_octal() {
637 639 // Escaped NUL chars followed by octal digits are decoded correctly.
638 640 let expected = b"\x0012";
639 641 let escaped = br"\012";
640 642 let unescaped = unescape_extra(escaped);
641 643 assert_eq!(&expected[..], &unescaped[..]);
642 644 }
643 645
644 646 #[test]
645 647 fn test_parse_float_timestamp() {
646 648 let test_cases = [
647 649 // Zero should map to the UNIX epoch.
648 650 ("0.0", "1970-01-01 00:00:00"),
649 651 // Negative zero should be the same as positive zero.
650 652 ("-0.0", "1970-01-01 00:00:00"),
651 653 // Values without fractional components should work like integers.
652 654 // (Assuming the timestamp is within the limits of f64 precision.)
653 655 ("1115154970.0", "2005-05-03 21:16:10"),
654 656 // We expect some loss of precision in the fractional component
655 657 // when parsing arbitrary floating-point values.
656 658 ("1115154970.123456789", "2005-05-03 21:16:10.123456716"),
657 659 // But representable f64 values should parse losslessly.
658 660 ("1115154970.123456716", "2005-05-03 21:16:10.123456716"),
659 661 // Negative fractional components are subtracted from the epoch.
660 662 ("-1.333", "1969-12-31 23:59:58.667"),
661 663 ];
662 664
663 665 for (input, expected) in test_cases {
664 666 let res = parse_float_timestamp(input).unwrap().to_string();
665 667 assert_eq!(res, expected);
666 668 }
667 669 }
668 670
669 671 fn escape_extra(bytes: &[u8]) -> Vec<u8> {
670 672 let mut output = Vec::with_capacity(bytes.len());
671 673
672 674 for c in bytes.iter().copied() {
673 675 output.extend_from_slice(match c {
674 676 b'\0' => &b"\\0"[..],
675 677 b'\\' => &b"\\\\"[..],
676 678 b'\n' => &b"\\n"[..],
677 679 b'\r' => &b"\\r"[..],
678 680 _ => {
679 681 output.push(c);
680 682 continue;
681 683 }
682 684 });
683 685 }
684 686
685 687 output
686 688 }
687 689
688 690 fn encode_extra<K, V>(pairs: impl IntoIterator<Item = (K, V)>) -> Vec<u8>
689 691 where
690 692 K: AsRef<[u8]>,
691 693 V: AsRef<[u8]>,
692 694 {
693 695 let extras = pairs.into_iter().map(|(k, v)| {
694 696 escape_extra(&[k.as_ref(), b":", v.as_ref()].concat())
695 697 });
696 698 // Use fully-qualified syntax to avoid a future naming conflict with
697 699 // the standard library: https://github.com/rust-lang/rust/issues/79524
698 700 Itertools::intersperse(extras, b"\0".to_vec()).concat()
699 701 }
700 702
701 703 #[test]
702 704 fn test_decode_extra() {
703 705 let extra = [
704 706 ("branch".into(), b"default".to_vec()),
705 707 ("key-with-hyphens".into(), b"value1".to_vec()),
706 708 ("key_with_underscores".into(), b"value2".to_vec()),
707 709 ("empty-value".into(), b"".to_vec()),
708 710 ("binary-value".into(), (0u8..=255).collect::<Vec<_>>()),
709 711 ]
710 712 .into_iter()
711 713 .collect::<BTreeMap<String, Vec<u8>>>();
712 714
713 715 let encoded = encode_extra(&extra);
714 716 let decoded = decode_extra(&encoded).unwrap();
715 717
716 718 assert_eq!(extra, decoded);
717 719 }
718 720
719 721 #[test]
720 722 fn test_corrupt_extra() {
721 723 let test_cases = [
722 724 (&b""[..], "empty input"),
723 725 (&b"\0"[..], "unexpected null byte"),
724 726 (&b":empty-key"[..], "empty key"),
725 727 (&b"\0leading-null:"[..], "leading null"),
726 728 (&b"trailing-null:\0"[..], "trailing null"),
727 729 (&b"missing-value"[..], "missing value"),
728 730 (&b"$!@# non-alphanum-key:"[..], "non-alphanumeric key"),
729 731 (&b"\xF0\x9F\xA6\x80 non-ascii-key:"[..], "non-ASCII key"),
730 732 ];
731 733
732 734 for (extra, msg) in test_cases {
733 735 assert!(
734 736 decode_extra(extra).is_err(),
735 737 "corrupt extra should have failed to parse: {}",
736 738 msg
737 739 );
738 740 }
739 741 }
740 742
741 743 #[test]
742 744 fn test_parse_timestamp_line() {
743 745 let extra = [
744 746 ("branch".into(), b"default".to_vec()),
745 747 ("key-with-hyphens".into(), b"value1".to_vec()),
746 748 ("key_with_underscores".into(), b"value2".to_vec()),
747 749 ("empty-value".into(), b"".to_vec()),
748 750 ("binary-value".into(), (0u8..=255).collect::<Vec<_>>()),
749 751 ]
750 752 .into_iter()
751 753 .collect::<BTreeMap<String, Vec<u8>>>();
752 754
753 755 let mut line: Vec<u8> = b"1115154970 28800 ".to_vec();
754 756 line.extend_from_slice(&encode_extra(&extra));
755 757
756 758 let timestamp = parse_timestamp(&line).unwrap();
757 759 assert_eq!(&timestamp.to_rfc3339(), "2005-05-03T13:16:10-08:00");
758 760
759 761 let parsed_extra = parse_timestamp_line_extra(&line).unwrap();
760 762 assert_eq!(extra, parsed_extra);
761 763 }
762 764 }
@@ -1,245 +1,245
1 1 use crate::errors::HgError;
2 2 use crate::exit_codes;
3 3 use crate::repo::Repo;
4 4 use crate::revlog::path_encode::path_encode;
5 5 use crate::revlog::NodePrefix;
6 6 use crate::revlog::Revision;
7 7 use crate::revlog::RevlogEntry;
8 8 use crate::revlog::{Revlog, RevlogError};
9 9 use crate::utils::files::get_path_from_bytes;
10 10 use crate::utils::hg_path::HgPath;
11 11 use crate::utils::SliceExt;
12 12 use crate::Graph;
13 13 use crate::GraphError;
14 14 use crate::RevlogOpenOptions;
15 15 use crate::UncheckedRevision;
16 16 use std::path::PathBuf;
17 17
18 18 /// A specialized `Revlog` to work with file data logs.
19 19 pub struct Filelog {
20 20 /// The generic `revlog` format.
21 21 revlog: Revlog,
22 22 }
23 23
24 24 impl Graph for Filelog {
25 25 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
26 26 self.revlog.parents(rev)
27 27 }
28 28 }
29 29
30 30 impl Filelog {
31 31 pub fn open_vfs(
32 store_vfs: &crate::vfs::Vfs<'_>,
32 store_vfs: &crate::vfs::VfsImpl,
33 33 file_path: &HgPath,
34 34 options: RevlogOpenOptions,
35 35 ) -> Result<Self, HgError> {
36 36 let index_path = store_path(file_path, b".i");
37 37 let data_path = store_path(file_path, b".d");
38 38 let revlog =
39 39 Revlog::open(store_vfs, index_path, Some(&data_path), options)?;
40 40 Ok(Self { revlog })
41 41 }
42 42
43 43 pub fn open(
44 44 repo: &Repo,
45 45 file_path: &HgPath,
46 46 options: RevlogOpenOptions,
47 47 ) -> Result<Self, HgError> {
48 48 Self::open_vfs(&repo.store_vfs(), file_path, options)
49 49 }
50 50
51 51 /// The given node ID is that of the file as found in a filelog, not of a
52 52 /// changeset.
53 53 pub fn data_for_node(
54 54 &self,
55 55 file_node: impl Into<NodePrefix>,
56 56 ) -> Result<FilelogRevisionData, RevlogError> {
57 57 let file_rev = self.revlog.rev_from_node(file_node.into())?;
58 58 self.data_for_rev(file_rev.into())
59 59 }
60 60
61 61 /// The given revision is that of the file as found in a filelog, not of a
62 62 /// changeset.
63 63 pub fn data_for_rev(
64 64 &self,
65 65 file_rev: UncheckedRevision,
66 66 ) -> Result<FilelogRevisionData, RevlogError> {
67 67 let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?.into_owned();
68 68 Ok(FilelogRevisionData(data))
69 69 }
70 70
71 71 /// The given node ID is that of the file as found in a filelog, not of a
72 72 /// changeset.
73 73 pub fn entry_for_node(
74 74 &self,
75 75 file_node: impl Into<NodePrefix>,
76 76 ) -> Result<FilelogEntry, RevlogError> {
77 77 let file_rev = self.revlog.rev_from_node(file_node.into())?;
78 78 self.entry_for_checked_rev(file_rev)
79 79 }
80 80
81 81 /// The given revision is that of the file as found in a filelog, not of a
82 82 /// changeset.
83 83 pub fn entry_for_rev(
84 84 &self,
85 85 file_rev: UncheckedRevision,
86 86 ) -> Result<FilelogEntry, RevlogError> {
87 87 Ok(FilelogEntry(self.revlog.get_entry(file_rev)?))
88 88 }
89 89
90 90 fn entry_for_checked_rev(
91 91 &self,
92 92 file_rev: Revision,
93 93 ) -> Result<FilelogEntry, RevlogError> {
94 94 Ok(FilelogEntry(
95 95 self.revlog.get_entry_for_checked_rev(file_rev)?,
96 96 ))
97 97 }
98 98 }
99 99
100 100 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
101 101 let encoded_bytes =
102 102 path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat());
103 103 get_path_from_bytes(&encoded_bytes).into()
104 104 }
105 105
106 106 pub struct FilelogEntry<'a>(RevlogEntry<'a>);
107 107
108 108 impl FilelogEntry<'_> {
109 109 /// `self.data()` can be expensive, with decompression and delta
110 110 /// resolution.
111 111 ///
112 112 /// *Without* paying this cost, based on revlog index information
113 113 /// including `RevlogEntry::uncompressed_len`:
114 114 ///
115 115 /// * Returns `true` if the length that `self.data().file_data().len()`
116 116 /// would return is definitely **not equal** to `other_len`.
117 117 /// * Returns `false` if available information is inconclusive.
118 118 pub fn file_data_len_not_equal_to(&self, other_len: u64) -> bool {
119 119 // Relevant code that implement this behavior in Python code:
120 120 // basefilectx.cmp, filelog.size, storageutil.filerevisioncopied,
121 121 // revlog.size, revlog.rawsize
122 122
123 123 // Let’s call `file_data_len` what would be returned by
124 124 // `self.data().file_data().len()`.
125 125
126 126 if self.0.is_censored() {
127 127 let file_data_len = 0;
128 128 return other_len != file_data_len;
129 129 }
130 130
131 131 if self.0.has_length_affecting_flag_processor() {
132 132 // We can’t conclude anything about `file_data_len`.
133 133 return false;
134 134 }
135 135
136 136 // Revlog revisions (usually) have metadata for the size of
137 137 // their data after decompression and delta resolution
138 138 // as would be returned by `Revlog::get_rev_data`.
139 139 //
140 140 // For filelogs this is the file’s contents preceded by an optional
141 141 // metadata block.
142 142 let uncompressed_len = if let Some(l) = self.0.uncompressed_len() {
143 143 l as u64
144 144 } else {
145 145 // The field was set to -1, the actual uncompressed len is unknown.
146 146 // We need to decompress to say more.
147 147 return false;
148 148 };
149 149 // `uncompressed_len = file_data_len + optional_metadata_len`,
150 150 // so `file_data_len <= uncompressed_len`.
151 151 if uncompressed_len < other_len {
152 152 // Transitively, `file_data_len < other_len`.
153 153 // So `other_len != file_data_len` definitely.
154 154 return true;
155 155 }
156 156
157 157 if uncompressed_len == other_len + 4 {
158 158 // It’s possible that `file_data_len == other_len` with an empty
159 159 // metadata block (2 start marker bytes + 2 end marker bytes).
160 160 // This happens when there wouldn’t otherwise be metadata, but
161 161 // the first 2 bytes of file data happen to match a start marker
162 162 // and would be ambiguous.
163 163 return false;
164 164 }
165 165
166 166 if !self.0.has_p1() {
167 167 // There may or may not be copy metadata, so we can’t deduce more
168 168 // about `file_data_len` without computing file data.
169 169 return false;
170 170 }
171 171
172 172 // Filelog ancestry is not meaningful in the way changelog ancestry is.
173 173 // It only provides hints to delta generation.
174 174 // p1 and p2 are set to null when making a copy or rename since
175 175 // contents are likely unrelatedto what might have previously existed
176 176 // at the destination path.
177 177 //
178 178 // Conversely, since here p1 is non-null, there is no copy metadata.
179 179 // Note that this reasoning may be invalidated in the presence of
180 180 // merges made by some previous versions of Mercurial that
181 181 // swapped p1 and p2. See <https://bz.mercurial-scm.org/show_bug.cgi?id=6528>
182 182 // and `tests/test-issue6528.t`.
183 183 //
184 184 // Since copy metadata is currently the only kind of metadata
185 185 // kept in revlog data of filelogs,
186 186 // this `FilelogEntry` does not have such metadata:
187 187 let file_data_len = uncompressed_len;
188 188
189 189 file_data_len != other_len
190 190 }
191 191
192 192 pub fn data(&self) -> Result<FilelogRevisionData, HgError> {
193 193 let data = self.0.data();
194 194 match data {
195 195 Ok(data) => Ok(FilelogRevisionData(data.into_owned())),
196 196 // Errors other than `HgError` should not happen at this point
197 197 Err(e) => match e {
198 198 RevlogError::Other(hg_error) => Err(hg_error),
199 199 revlog_error => Err(HgError::abort(
200 200 revlog_error.to_string(),
201 201 exit_codes::ABORT,
202 202 None,
203 203 )),
204 204 },
205 205 }
206 206 }
207 207 }
208 208
209 209 /// The data for one revision in a filelog, uncompressed and delta-resolved.
210 210 pub struct FilelogRevisionData(Vec<u8>);
211 211
212 212 impl FilelogRevisionData {
213 213 /// Split into metadata and data
214 214 pub fn split(&self) -> Result<(Option<&[u8]>, &[u8]), HgError> {
215 215 const DELIMITER: &[u8; 2] = &[b'\x01', b'\n'];
216 216
217 217 if let Some(rest) = self.0.drop_prefix(DELIMITER) {
218 218 if let Some((metadata, data)) = rest.split_2_by_slice(DELIMITER) {
219 219 Ok((Some(metadata), data))
220 220 } else {
221 221 Err(HgError::corrupted(
222 222 "Missing metadata end delimiter in filelog entry",
223 223 ))
224 224 }
225 225 } else {
226 226 Ok((None, &self.0))
227 227 }
228 228 }
229 229
230 230 /// Returns the file contents at this revision, stripped of any metadata
231 231 pub fn file_data(&self) -> Result<&[u8], HgError> {
232 232 let (_metadata, data) = self.split()?;
233 233 Ok(data)
234 234 }
235 235
236 236 /// Consume the entry, and convert it into data, discarding any metadata,
237 237 /// if present.
238 238 pub fn into_file_data(self) -> Result<Vec<u8>, HgError> {
239 239 if let (Some(_metadata), data) = self.split()? {
240 240 Ok(data.to_owned())
241 241 } else {
242 242 Ok(self.0)
243 243 }
244 244 }
245 245 }
@@ -1,213 +1,213
1 1 use crate::errors::HgError;
2 2 use crate::revlog::{Node, NodePrefix};
3 3 use crate::revlog::{Revlog, RevlogError};
4 4 use crate::utils::hg_path::HgPath;
5 5 use crate::utils::SliceExt;
6 use crate::vfs::Vfs;
6 use crate::vfs::VfsImpl;
7 7 use crate::{
8 8 Graph, GraphError, Revision, RevlogOpenOptions, UncheckedRevision,
9 9 };
10 10
11 11 /// A specialized `Revlog` to work with `manifest` data format.
12 12 pub struct Manifestlog {
13 13 /// The generic `revlog` format.
14 14 pub(crate) revlog: Revlog,
15 15 }
16 16
17 17 impl Graph for Manifestlog {
18 18 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
19 19 self.revlog.parents(rev)
20 20 }
21 21 }
22 22
23 23 impl Manifestlog {
24 24 /// Open the `manifest` of a repository given by its root.
25 25 pub fn open(
26 store_vfs: &Vfs,
26 store_vfs: &VfsImpl,
27 27 options: RevlogOpenOptions,
28 28 ) -> Result<Self, HgError> {
29 29 let revlog = Revlog::open(store_vfs, "00manifest.i", None, options)?;
30 30 Ok(Self { revlog })
31 31 }
32 32
33 33 /// Return the `Manifest` for the given node ID.
34 34 ///
35 35 /// Note: this is a node ID in the manifestlog, typically found through
36 36 /// `ChangelogEntry::manifest_node`. It is *not* the node ID of any
37 37 /// changeset.
38 38 ///
39 39 /// See also `Repo::manifest_for_node`
40 40 pub fn data_for_node(
41 41 &self,
42 42 node: NodePrefix,
43 43 ) -> Result<Manifest, RevlogError> {
44 44 let rev = self.revlog.rev_from_node(node)?;
45 45 self.data_for_checked_rev(rev)
46 46 }
47 47
48 48 /// Return the `Manifest` of a given revision number.
49 49 ///
50 50 /// Note: this is a revision number in the manifestlog, *not* of any
51 51 /// changeset.
52 52 ///
53 53 /// See also `Repo::manifest_for_rev`
54 54 pub fn data_for_rev(
55 55 &self,
56 56 rev: UncheckedRevision,
57 57 ) -> Result<Manifest, RevlogError> {
58 58 let bytes = self.revlog.get_rev_data(rev)?.into_owned();
59 59 Ok(Manifest { bytes })
60 60 }
61 61
62 62 pub fn data_for_checked_rev(
63 63 &self,
64 64 rev: Revision,
65 65 ) -> Result<Manifest, RevlogError> {
66 66 let bytes =
67 67 self.revlog.get_rev_data_for_checked_rev(rev)?.into_owned();
68 68 Ok(Manifest { bytes })
69 69 }
70 70 }
71 71
72 72 /// `Manifestlog` entry which knows how to interpret the `manifest` data bytes.
73 73 #[derive(Debug)]
74 74 pub struct Manifest {
75 75 /// Format for a manifest: flat sequence of variable-size entries,
76 76 /// sorted by path, each as:
77 77 ///
78 78 /// ```text
79 79 /// <path> \0 <hex_node_id> <flags> \n
80 80 /// ```
81 81 ///
82 82 /// The last entry is also terminated by a newline character.
83 83 /// Flags is one of `b""` (the empty string), `b"x"`, `b"l"`, or `b"t"`.
84 84 bytes: Vec<u8>,
85 85 }
86 86
87 87 impl Manifest {
88 88 pub fn iter(
89 89 &self,
90 90 ) -> impl Iterator<Item = Result<ManifestEntry, HgError>> {
91 91 self.bytes
92 92 .split(|b| b == &b'\n')
93 93 .filter(|line| !line.is_empty())
94 94 .map(ManifestEntry::from_raw)
95 95 }
96 96
97 97 /// If the given path is in this manifest, return its filelog node ID
98 98 pub fn find_by_path(
99 99 &self,
100 100 path: &HgPath,
101 101 ) -> Result<Option<ManifestEntry>, HgError> {
102 102 use std::cmp::Ordering::*;
103 103 let path = path.as_bytes();
104 104 // Both boundaries of this `&[u8]` slice are always at the boundary of
105 105 // an entry
106 106 let mut bytes = &*self.bytes;
107 107
108 108 // Binary search algorithm derived from `[T]::binary_search_by`
109 109 // <https://github.com/rust-lang/rust/blob/1.57.0/library/core/src/slice/mod.rs#L2221>
110 110 // except we don’t have a slice of entries. Instead we jump to the
111 111 // middle of the byte slice and look around for entry delimiters
112 112 // (newlines).
113 113 while let Some(entry_range) = Self::find_entry_near_middle_of(bytes)? {
114 114 let (entry_path, rest) =
115 115 ManifestEntry::split_path(&bytes[entry_range.clone()])?;
116 116 let cmp = entry_path.cmp(path);
117 117 if cmp == Less {
118 118 let after_newline = entry_range.end + 1;
119 119 bytes = &bytes[after_newline..];
120 120 } else if cmp == Greater {
121 121 bytes = &bytes[..entry_range.start];
122 122 } else {
123 123 return Ok(Some(ManifestEntry::from_path_and_rest(
124 124 entry_path, rest,
125 125 )));
126 126 }
127 127 }
128 128 Ok(None)
129 129 }
130 130
131 131 /// If there is at least one, return the byte range of an entry *excluding*
132 132 /// the final newline.
133 133 fn find_entry_near_middle_of(
134 134 bytes: &[u8],
135 135 ) -> Result<Option<std::ops::Range<usize>>, HgError> {
136 136 let len = bytes.len();
137 137 if len > 0 {
138 138 let middle = bytes.len() / 2;
139 139 // Integer division rounds down, so `middle < len`.
140 140 let (before, after) = bytes.split_at(middle);
141 141 let is_newline = |&byte: &u8| byte == b'\n';
142 142 let entry_start = match before.iter().rposition(is_newline) {
143 143 Some(i) => i + 1,
144 144 None => 0, // We choose the first entry in `bytes`
145 145 };
146 146 let entry_end = match after.iter().position(is_newline) {
147 147 Some(i) => {
148 148 // No `+ 1` here to exclude this newline from the range
149 149 middle + i
150 150 }
151 151 None => {
152 152 // In a well-formed manifest:
153 153 //
154 154 // * Since `len > 0`, `bytes` contains at least one entry
155 155 // * Every entry ends with a newline
156 156 // * Since `middle < len`, `after` contains at least the
157 157 // newline at the end of the last entry of `bytes`.
158 158 //
159 159 // We didn’t find a newline, so this manifest is not
160 160 // well-formed.
161 161 return Err(HgError::corrupted(
162 162 "manifest entry without \\n delimiter",
163 163 ));
164 164 }
165 165 };
166 166 Ok(Some(entry_start..entry_end))
167 167 } else {
168 168 // len == 0
169 169 Ok(None)
170 170 }
171 171 }
172 172 }
173 173
174 174 /// `Manifestlog` entry which knows how to interpret the `manifest` data bytes.
175 175 #[derive(Debug)]
176 176 pub struct ManifestEntry<'manifest> {
177 177 pub path: &'manifest HgPath,
178 178 pub hex_node_id: &'manifest [u8],
179 179
180 180 /// `Some` values are b'x', b'l', or 't'
181 181 pub flags: Option<u8>,
182 182 }
183 183
184 184 impl<'a> ManifestEntry<'a> {
185 185 fn split_path(bytes: &[u8]) -> Result<(&[u8], &[u8]), HgError> {
186 186 bytes.split_2(b'\0').ok_or_else(|| {
187 187 HgError::corrupted("manifest entry without \\0 delimiter")
188 188 })
189 189 }
190 190
191 191 fn from_path_and_rest(path: &'a [u8], rest: &'a [u8]) -> Self {
192 192 let (hex_node_id, flags) = match rest.split_last() {
193 193 Some((&b'x', rest)) => (rest, Some(b'x')),
194 194 Some((&b'l', rest)) => (rest, Some(b'l')),
195 195 Some((&b't', rest)) => (rest, Some(b't')),
196 196 _ => (rest, None),
197 197 };
198 198 Self {
199 199 path: HgPath::new(path),
200 200 hex_node_id,
201 201 flags,
202 202 }
203 203 }
204 204
205 205 fn from_raw(bytes: &'a [u8]) -> Result<Self, HgError> {
206 206 let (path, rest) = Self::split_path(bytes)?;
207 207 Ok(Self::from_path_and_rest(path, rest))
208 208 }
209 209
210 210 pub fn node_id(&self) -> Result<Node, HgError> {
211 211 Node::from_hex_for_repo(self.hex_node_id)
212 212 }
213 213 }
@@ -1,1458 +1,1466
1 1 // Copyright 2018-2023 Georges Racinet <georges.racinet@octobus.net>
2 2 // and Mercurial contributors
3 3 //
4 4 // This software may be used and distributed according to the terms of the
5 5 // GNU General Public License version 2 or any later version.
6 6 //! Mercurial concepts for handling revision history
7 7
8 8 pub mod node;
9 9 pub mod nodemap;
10 10 mod nodemap_docket;
11 11 pub mod path_encode;
12 12 pub use node::{FromHexError, Node, NodePrefix};
13 13 pub mod changelog;
14 14 pub mod filelog;
15 15 pub mod index;
16 16 pub mod manifest;
17 17 pub mod patch;
18 18
19 19 use std::borrow::Cow;
20 20 use std::collections::HashSet;
21 21 use std::io::Read;
22 22 use std::ops::Deref;
23 23 use std::path::Path;
24 24
25 25 use flate2::read::ZlibDecoder;
26 26 use sha1::{Digest, Sha1};
27 27 use std::cell::RefCell;
28 28 use zstd;
29 29
30 30 use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
31 31 use self::nodemap_docket::NodeMapDocket;
32 32 use super::index::Index;
33 33 use super::index::INDEX_ENTRY_SIZE;
34 34 use super::nodemap::{NodeMap, NodeMapError};
35 35 use crate::config::{Config, ResourceProfileValue};
36 36 use crate::errors::HgError;
37 37 use crate::exit_codes;
38 38 use crate::requirements::{
39 39 GENERALDELTA_REQUIREMENT, NARROW_REQUIREMENT, SPARSEREVLOG_REQUIREMENT,
40 40 };
41 use crate::vfs::Vfs;
41 use crate::vfs::VfsImpl;
42 42
43 43 /// As noted in revlog.c, revision numbers are actually encoded in
44 44 /// 4 bytes, and are liberally converted to ints, whence the i32
45 45 pub type BaseRevision = i32;
46 46
47 47 /// Mercurial revision numbers
48 48 /// In contrast to the more general [`UncheckedRevision`], these are "checked"
49 49 /// in the sense that they should only be used for revisions that are
50 50 /// valid for a given index (i.e. in bounds).
51 51 #[derive(
52 52 Debug,
53 53 derive_more::Display,
54 54 Clone,
55 55 Copy,
56 56 Hash,
57 57 PartialEq,
58 58 Eq,
59 59 PartialOrd,
60 60 Ord,
61 61 )]
62 62 pub struct Revision(pub BaseRevision);
63 63
64 64 impl format_bytes::DisplayBytes for Revision {
65 65 fn display_bytes(
66 66 &self,
67 67 output: &mut dyn std::io::Write,
68 68 ) -> std::io::Result<()> {
69 69 self.0.display_bytes(output)
70 70 }
71 71 }
72 72
73 73 /// Unchecked Mercurial revision numbers.
74 74 ///
75 75 /// Values of this type have no guarantee of being a valid revision number
76 76 /// in any context. Use method `check_revision` to get a valid revision within
77 77 /// the appropriate index object.
78 78 #[derive(
79 79 Debug,
80 80 derive_more::Display,
81 81 Clone,
82 82 Copy,
83 83 Hash,
84 84 PartialEq,
85 85 Eq,
86 86 PartialOrd,
87 87 Ord,
88 88 )]
89 89 pub struct UncheckedRevision(pub BaseRevision);
90 90
91 91 impl format_bytes::DisplayBytes for UncheckedRevision {
92 92 fn display_bytes(
93 93 &self,
94 94 output: &mut dyn std::io::Write,
95 95 ) -> std::io::Result<()> {
96 96 self.0.display_bytes(output)
97 97 }
98 98 }
99 99
100 100 impl From<Revision> for UncheckedRevision {
101 101 fn from(value: Revision) -> Self {
102 102 Self(value.0)
103 103 }
104 104 }
105 105
106 106 impl From<BaseRevision> for UncheckedRevision {
107 107 fn from(value: BaseRevision) -> Self {
108 108 Self(value)
109 109 }
110 110 }
111 111
112 112 /// Marker expressing the absence of a parent
113 113 ///
114 114 /// Independently of the actual representation, `NULL_REVISION` is guaranteed
115 115 /// to be smaller than all existing revisions.
116 116 pub const NULL_REVISION: Revision = Revision(-1);
117 117
118 118 /// Same as `mercurial.node.wdirrev`
119 119 ///
120 120 /// This is also equal to `i32::max_value()`, but it's better to spell
121 121 /// it out explicitely, same as in `mercurial.node`
122 122 #[allow(clippy::unreadable_literal)]
123 123 pub const WORKING_DIRECTORY_REVISION: UncheckedRevision =
124 124 UncheckedRevision(0x7fffffff);
125 125
126 126 pub const WORKING_DIRECTORY_HEX: &str =
127 127 "ffffffffffffffffffffffffffffffffffffffff";
128 128
129 129 /// The simplest expression of what we need of Mercurial DAGs.
130 130 pub trait Graph {
131 131 /// Return the two parents of the given `Revision`.
132 132 ///
133 133 /// Each of the parents can be independently `NULL_REVISION`
134 134 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError>;
135 135 }
136 136
137 137 #[derive(Clone, Debug, PartialEq)]
138 138 pub enum GraphError {
139 139 ParentOutOfRange(Revision),
140 140 }
141 141
142 142 impl<T: Graph> Graph for &T {
143 143 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
144 144 (*self).parents(rev)
145 145 }
146 146 }
147 147
148 148 /// The Mercurial Revlog Index
149 149 ///
150 150 /// This is currently limited to the minimal interface that is needed for
151 151 /// the [`nodemap`](nodemap/index.html) module
152 152 pub trait RevlogIndex {
153 153 /// Total number of Revisions referenced in this index
154 154 fn len(&self) -> usize;
155 155
156 156 fn is_empty(&self) -> bool {
157 157 self.len() == 0
158 158 }
159 159
160 160 /// Return a reference to the Node or `None` for `NULL_REVISION`
161 161 fn node(&self, rev: Revision) -> Option<&Node>;
162 162
163 163 /// Return a [`Revision`] if `rev` is a valid revision number for this
164 164 /// index.
165 165 ///
166 166 /// [`NULL_REVISION`] is considered to be valid.
167 167 #[inline(always)]
168 168 fn check_revision(&self, rev: UncheckedRevision) -> Option<Revision> {
169 169 let rev = rev.0;
170 170
171 171 if rev == NULL_REVISION.0 || (rev >= 0 && (rev as usize) < self.len())
172 172 {
173 173 Some(Revision(rev))
174 174 } else {
175 175 None
176 176 }
177 177 }
178 178 }
179 179
180 180 const REVISION_FLAG_CENSORED: u16 = 1 << 15;
181 181 const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
182 182 const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
183 183 const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
184 184
185 185 // Keep this in sync with REVIDX_KNOWN_FLAGS in
186 186 // mercurial/revlogutils/flagutil.py
187 187 const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
188 188 | REVISION_FLAG_ELLIPSIS
189 189 | REVISION_FLAG_EXTSTORED
190 190 | REVISION_FLAG_HASCOPIESINFO;
191 191
192 192 const NULL_REVLOG_ENTRY_FLAGS: u16 = 0;
193 193
194 194 #[derive(Debug, derive_more::From, derive_more::Display)]
195 195 pub enum RevlogError {
196 196 InvalidRevision,
197 197 /// Working directory is not supported
198 198 WDirUnsupported,
199 199 /// Found more than one entry whose ID match the requested prefix
200 200 AmbiguousPrefix,
201 201 #[from]
202 202 Other(HgError),
203 203 }
204 204
205 205 impl From<NodeMapError> for RevlogError {
206 206 fn from(error: NodeMapError) -> Self {
207 207 match error {
208 208 NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
209 209 NodeMapError::RevisionNotInIndex(rev) => RevlogError::corrupted(
210 210 format!("nodemap point to revision {} not in index", rev),
211 211 ),
212 212 }
213 213 }
214 214 }
215 215
216 216 fn corrupted<S: AsRef<str>>(context: S) -> HgError {
217 217 HgError::corrupted(format!("corrupted revlog, {}", context.as_ref()))
218 218 }
219 219
220 220 impl RevlogError {
221 221 fn corrupted<S: AsRef<str>>(context: S) -> Self {
222 222 RevlogError::Other(corrupted(context))
223 223 }
224 224 }
225 225
226 226 #[derive(derive_more::Display, Debug, Copy, Clone, PartialEq, Eq)]
227 227 pub enum RevlogType {
228 228 Changelog,
229 229 Manifestlog,
230 230 Filelog,
231 231 }
232 232
233 233 impl TryFrom<usize> for RevlogType {
234 234 type Error = HgError;
235 235
236 236 fn try_from(value: usize) -> Result<Self, Self::Error> {
237 237 match value {
238 238 1001 => Ok(Self::Changelog),
239 239 1002 => Ok(Self::Manifestlog),
240 240 1003 => Ok(Self::Filelog),
241 241 t => Err(HgError::abort(
242 242 format!("Unknown revlog type {}", t),
243 243 exit_codes::ABORT,
244 244 None,
245 245 )),
246 246 }
247 247 }
248 248 }
249 249
250 250 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
251 251 pub enum CompressionEngine {
252 252 Zlib {
253 253 /// Between 0 and 9 included
254 254 level: u32,
255 255 },
256 256 Zstd {
257 257 /// Between 0 and 22 included
258 258 level: u32,
259 259 /// Never used in practice for now
260 260 threads: u32,
261 261 },
262 262 /// No compression is performed
263 263 None,
264 264 }
265 265 impl CompressionEngine {
266 266 pub fn set_level(&mut self, new_level: usize) -> Result<(), HgError> {
267 267 match self {
268 268 CompressionEngine::Zlib { level } => {
269 269 if new_level > 9 {
270 270 return Err(HgError::abort(
271 271 format!(
272 272 "invalid compression zlib compression level {}",
273 273 new_level
274 274 ),
275 275 exit_codes::ABORT,
276 276 None,
277 277 ));
278 278 }
279 279 *level = new_level as u32;
280 280 }
281 281 CompressionEngine::Zstd { level, .. } => {
282 282 if new_level > 22 {
283 283 return Err(HgError::abort(
284 284 format!(
285 285 "invalid compression zstd compression level {}",
286 286 new_level
287 287 ),
288 288 exit_codes::ABORT,
289 289 None,
290 290 ));
291 291 }
292 292 *level = new_level as u32;
293 293 }
294 294 CompressionEngine::None => {}
295 295 }
296 296 Ok(())
297 297 }
298 298
299 299 pub fn zstd(
300 300 zstd_level: Option<u32>,
301 301 ) -> Result<CompressionEngine, HgError> {
302 302 let mut engine = CompressionEngine::Zstd {
303 303 level: 3,
304 304 threads: 0,
305 305 };
306 306 if let Some(level) = zstd_level {
307 307 engine.set_level(level as usize)?;
308 308 }
309 309 Ok(engine)
310 310 }
311 311 }
312 312
313 313 impl Default for CompressionEngine {
314 314 fn default() -> Self {
315 315 Self::Zlib { level: 6 }
316 316 }
317 317 }
318 318
319 319 #[derive(Debug, Clone, Copy, PartialEq)]
320 320 /// Holds configuration values about how the revlog data is read
321 321 pub struct RevlogDataConfig {
322 322 /// Should we try to open the "pending" version of the revlog
323 323 pub try_pending: bool,
324 324 /// Should we try to open the "split" version of the revlog
325 325 pub try_split: bool,
326 326 /// When True, `indexfile` should be opened with `checkambig=True` at
327 327 /// writing time, to avoid file stat ambiguity
328 328 pub check_ambig: bool,
329 329 /// If true, use mmap instead of reading to deal with large indexes
330 330 pub mmap_large_index: bool,
331 331 /// How much data is considered large
332 332 pub mmap_index_threshold: Option<u64>,
333 333 /// How much data to read and cache into the raw revlog data cache
334 334 pub chunk_cache_size: u64,
335 335 /// The size of the uncompressed cache compared to the largest revision
336 336 /// seen
337 337 pub uncompressed_cache_factor: Option<f64>,
338 338 /// The number of chunks cached
339 339 pub uncompressed_cache_count: Option<u64>,
340 340 /// Allow sparse reading of the revlog data
341 341 pub with_sparse_read: bool,
342 342 /// Minimal density of a sparse read chunk
343 343 pub sr_density_threshold: f64,
344 344 /// Minimal size of the data we skip when performing sparse reads
345 345 pub sr_min_gap_size: u64,
346 346 /// Whether deltas are encoded against arbitrary bases
347 347 pub general_delta: bool,
348 348 }
349 349
350 350 impl RevlogDataConfig {
351 351 pub fn new(
352 352 config: &Config,
353 353 requirements: &HashSet<String>,
354 354 ) -> Result<Self, HgError> {
355 355 let mut data_config = Self::default();
356 356 if let Some(chunk_cache_size) =
357 357 config.get_byte_size(b"format", b"chunkcachesize")?
358 358 {
359 359 data_config.chunk_cache_size = chunk_cache_size;
360 360 }
361 361
362 362 let memory_profile = config.get_resource_profile(Some("memory"));
363 363 if memory_profile.value >= ResourceProfileValue::Medium {
364 364 data_config.uncompressed_cache_count = Some(10_000);
365 365 data_config.uncompressed_cache_factor = Some(4.0);
366 366 if memory_profile.value >= ResourceProfileValue::High {
367 367 data_config.uncompressed_cache_factor = Some(10.0)
368 368 }
369 369 }
370 370
371 371 if let Some(mmap_index_threshold) = config
372 372 .get_byte_size(b"storage", b"revlog.mmap.index:size-threshold")?
373 373 {
374 374 data_config.mmap_index_threshold = Some(mmap_index_threshold);
375 375 }
376 376
377 377 let with_sparse_read =
378 378 config.get_bool(b"experimental", b"sparse-read")?;
379 379 if let Some(sr_density_threshold) = config
380 380 .get_f64(b"experimental", b"sparse-read.density-threshold")?
381 381 {
382 382 data_config.sr_density_threshold = sr_density_threshold;
383 383 }
384 384 data_config.with_sparse_read = with_sparse_read;
385 385 if let Some(sr_min_gap_size) = config
386 386 .get_byte_size(b"experimental", b"sparse-read.min-gap-size")?
387 387 {
388 388 data_config.sr_min_gap_size = sr_min_gap_size;
389 389 }
390 390
391 391 data_config.with_sparse_read =
392 392 requirements.contains(SPARSEREVLOG_REQUIREMENT);
393 393
394 394 Ok(data_config)
395 395 }
396 396 }
397 397
398 398 impl Default for RevlogDataConfig {
399 399 fn default() -> Self {
400 400 Self {
401 401 chunk_cache_size: 65536,
402 402 sr_density_threshold: 0.50,
403 403 sr_min_gap_size: 262144,
404 404 try_pending: Default::default(),
405 405 try_split: Default::default(),
406 406 check_ambig: Default::default(),
407 407 mmap_large_index: Default::default(),
408 408 mmap_index_threshold: Default::default(),
409 409 uncompressed_cache_factor: Default::default(),
410 410 uncompressed_cache_count: Default::default(),
411 411 with_sparse_read: Default::default(),
412 412 general_delta: Default::default(),
413 413 }
414 414 }
415 415 }
416 416
417 417 #[derive(Debug, Clone, Copy, PartialEq)]
418 418 /// Holds configuration values about how new deltas are computed.
419 419 ///
420 420 /// Some attributes are duplicated from [`RevlogDataConfig`] to help having
421 421 /// each object self contained.
422 422 pub struct RevlogDeltaConfig {
423 423 /// Whether deltas can be encoded against arbitrary bases
424 424 pub general_delta: bool,
425 425 /// Allow sparse writing of the revlog data
426 426 pub sparse_revlog: bool,
427 427 /// Maximum length of a delta chain
428 428 pub max_chain_len: Option<u64>,
429 429 /// Maximum distance between a delta chain's start and end
430 430 pub max_deltachain_span: Option<u64>,
431 431 /// If `upper_bound_comp` is not None, this is the expected maximal
432 432 /// gain from compression for the data content
433 433 pub upper_bound_comp: Option<f64>,
434 434 /// Should we try a delta against both parents
435 435 pub delta_both_parents: bool,
436 436 /// Test delta base candidate groups by chunks of this maximal size
437 437 pub candidate_group_chunk_size: u64,
438 438 /// Should we display debug information about delta computation
439 439 pub debug_delta: bool,
440 440 /// Trust incoming deltas by default
441 441 pub lazy_delta: bool,
442 442 /// Trust the base of incoming deltas by default
443 443 pub lazy_delta_base: bool,
444 444 }
445 445 impl RevlogDeltaConfig {
446 446 pub fn new(
447 447 config: &Config,
448 448 requirements: &HashSet<String>,
449 449 revlog_type: RevlogType,
450 450 ) -> Result<Self, HgError> {
451 451 let mut delta_config = Self {
452 452 delta_both_parents: config
453 453 .get_option_no_default(
454 454 b"storage",
455 455 b"revlog.optimize-delta-parent-choice",
456 456 )?
457 457 .unwrap_or(true),
458 458 candidate_group_chunk_size: config
459 459 .get_u64(
460 460 b"storage",
461 461 b"revlog.delta-parent-search.candidate-group-chunk-size",
462 462 )?
463 463 .unwrap_or_default(),
464 464 ..Default::default()
465 465 };
466 466
467 467 delta_config.debug_delta =
468 468 config.get_bool(b"debug", b"revlog.debug-delta")?;
469 469
470 470 delta_config.general_delta =
471 471 requirements.contains(GENERALDELTA_REQUIREMENT);
472 472
473 473 let lazy_delta =
474 474 config.get_bool(b"storage", b"revlog.reuse-external-delta")?;
475 475
476 476 if revlog_type == RevlogType::Manifestlog {
477 477 // upper bound of what we expect from compression
478 478 // (real life value seems to be 3)
479 479 delta_config.upper_bound_comp = Some(3.0)
480 480 }
481 481
482 482 let mut lazy_delta_base = false;
483 483 if lazy_delta {
484 484 lazy_delta_base = match config.get_option_no_default(
485 485 b"storage",
486 486 b"revlog.reuse-external-delta-parent",
487 487 )? {
488 488 Some(base) => base,
489 489 None => config.get_bool(b"format", b"generaldelta")?,
490 490 };
491 491 }
492 492 delta_config.lazy_delta = lazy_delta;
493 493 delta_config.lazy_delta_base = lazy_delta_base;
494 494
495 495 delta_config.max_deltachain_span =
496 496 match config.get_i64(b"experimental", b"maxdeltachainspan")? {
497 497 Some(span) => {
498 498 if span < 0 {
499 499 None
500 500 } else {
501 501 Some(span as u64)
502 502 }
503 503 }
504 504 None => None,
505 505 };
506 506
507 507 delta_config.sparse_revlog =
508 508 requirements.contains(SPARSEREVLOG_REQUIREMENT);
509 509
510 510 delta_config.max_chain_len =
511 511 config.get_byte_size_no_default(b"format", b"maxchainlen")?;
512 512
513 513 Ok(delta_config)
514 514 }
515 515 }
516 516
517 517 impl Default for RevlogDeltaConfig {
518 518 fn default() -> Self {
519 519 Self {
520 520 delta_both_parents: true,
521 521 lazy_delta: true,
522 522 general_delta: Default::default(),
523 523 sparse_revlog: Default::default(),
524 524 max_chain_len: Default::default(),
525 525 max_deltachain_span: Default::default(),
526 526 upper_bound_comp: Default::default(),
527 527 candidate_group_chunk_size: Default::default(),
528 528 debug_delta: Default::default(),
529 529 lazy_delta_base: Default::default(),
530 530 }
531 531 }
532 532 }
533 533
534 534 #[derive(Debug, Default, Clone, Copy, PartialEq)]
535 535 /// Holds configuration values about the available revlog features
536 536 pub struct RevlogFeatureConfig {
537 537 /// The compression engine and its options
538 538 pub compression_engine: CompressionEngine,
539 539 /// Can we use censor on this revlog
540 540 pub censorable: bool,
541 541 /// Does this revlog use the "side data" feature
542 542 pub has_side_data: bool,
543 543 /// Might remove this configuration once the rank computation has no
544 544 /// impact
545 545 pub compute_rank: bool,
546 546 /// Parent order is supposed to be semantically irrelevant, so we
547 547 /// normally re-sort parents to ensure that the first parent is non-null,
548 548 /// if there is a non-null parent at all.
549 549 /// filelog abuses the parent order as a flag to mark some instances of
550 550 /// meta-encoded files, so allow it to disable this behavior.
551 551 pub canonical_parent_order: bool,
552 552 /// Can ellipsis commit be used
553 553 pub enable_ellipsis: bool,
554 554 }
555 555 impl RevlogFeatureConfig {
556 556 pub fn new(
557 557 config: &Config,
558 558 requirements: &HashSet<String>,
559 559 ) -> Result<Self, HgError> {
560 560 let mut feature_config = Self::default();
561 561
562 562 let zlib_level = config.get_u32(b"storage", b"revlog.zlib.level")?;
563 563 let zstd_level = config.get_u32(b"storage", b"revlog.zstd.level")?;
564 564
565 565 feature_config.compression_engine = CompressionEngine::default();
566 566
567 567 for requirement in requirements {
568 568 if requirement.starts_with("revlog-compression-")
569 569 || requirement.starts_with("exp-compression-")
570 570 {
571 571 let split = &mut requirement.splitn(3, '-');
572 572 split.next();
573 573 split.next();
574 574 feature_config.compression_engine = match split.next().unwrap()
575 575 {
576 576 "zstd" => CompressionEngine::zstd(zstd_level)?,
577 577 e => {
578 578 return Err(HgError::UnsupportedFeature(format!(
579 579 "Unsupported compression engine '{e}'"
580 580 )))
581 581 }
582 582 };
583 583 }
584 584 }
585 585 if let Some(level) = zlib_level {
586 586 if matches!(
587 587 feature_config.compression_engine,
588 588 CompressionEngine::Zlib { .. }
589 589 ) {
590 590 feature_config
591 591 .compression_engine
592 592 .set_level(level as usize)?;
593 593 }
594 594 }
595 595
596 596 feature_config.enable_ellipsis =
597 597 requirements.contains(NARROW_REQUIREMENT);
598 598
599 599 Ok(feature_config)
600 600 }
601 601 }
602 602
603 603 /// Read only implementation of revlog.
604 604 pub struct Revlog {
605 605 /// When index and data are not interleaved: bytes of the revlog index.
606 606 /// When index and data are interleaved: bytes of the revlog index and
607 607 /// data.
608 608 index: Index,
609 609 /// When index and data are not interleaved: bytes of the revlog data
610 610 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
611 611 /// When present on disk: the persistent nodemap for this revlog
612 612 nodemap: Option<nodemap::NodeTree>,
613 613 }
614 614
615 615 impl Graph for Revlog {
616 616 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
617 617 self.index.parents(rev)
618 618 }
619 619 }
620 620
621 621 #[derive(Debug, Copy, Clone, PartialEq)]
622 622 pub enum RevlogVersionOptions {
623 623 V0,
624 624 V1 { general_delta: bool, inline: bool },
625 625 V2,
626 626 ChangelogV2 { compute_rank: bool },
627 627 }
628 628
629 629 /// Options to govern how a revlog should be opened, usually from the
630 630 /// repository configuration or requirements.
631 631 #[derive(Debug, Copy, Clone)]
632 632 pub struct RevlogOpenOptions {
633 633 /// The revlog version, along with any option specific to this version
634 634 pub version: RevlogVersionOptions,
635 635 /// Whether the revlog uses a persistent nodemap.
636 636 pub use_nodemap: bool,
637 637 pub delta_config: RevlogDeltaConfig,
638 638 pub data_config: RevlogDataConfig,
639 639 pub feature_config: RevlogFeatureConfig,
640 640 }
641 641
642 642 #[cfg(test)]
643 643 impl Default for RevlogOpenOptions {
644 644 fn default() -> Self {
645 645 Self {
646 646 version: RevlogVersionOptions::V1 {
647 647 general_delta: true,
648 648 inline: false,
649 649 },
650 650 use_nodemap: true,
651 651 data_config: Default::default(),
652 652 delta_config: Default::default(),
653 653 feature_config: Default::default(),
654 654 }
655 655 }
656 656 }
657 657
658 658 impl RevlogOpenOptions {
659 659 pub fn new(
660 660 inline: bool,
661 661 data_config: RevlogDataConfig,
662 662 delta_config: RevlogDeltaConfig,
663 663 feature_config: RevlogFeatureConfig,
664 664 ) -> Self {
665 665 Self {
666 666 version: RevlogVersionOptions::V1 {
667 667 general_delta: data_config.general_delta,
668 668 inline,
669 669 },
670 670 use_nodemap: false,
671 671 data_config,
672 672 delta_config,
673 673 feature_config,
674 674 }
675 675 }
676 676
677 677 pub fn index_header(&self) -> index::IndexHeader {
678 678 index::IndexHeader {
679 679 header_bytes: match self.version {
680 680 RevlogVersionOptions::V0 => [0, 0, 0, 0],
681 681 RevlogVersionOptions::V1 {
682 682 general_delta,
683 683 inline,
684 684 } => [
685 685 0,
686 686 if general_delta && inline {
687 687 3
688 688 } else if general_delta {
689 689 2
690 690 } else {
691 691 u8::from(inline)
692 692 },
693 693 0,
694 694 1,
695 695 ],
696 696 RevlogVersionOptions::V2 => 0xDEADu32.to_be_bytes(),
697 697 RevlogVersionOptions::ChangelogV2 { compute_rank: _ } => {
698 698 0xD34Du32.to_be_bytes()
699 699 }
700 700 },
701 701 }
702 702 }
703 703 }
704 704
705 705 impl Revlog {
706 706 /// Open a revlog index file.
707 707 ///
708 708 /// It will also open the associated data file if index and data are not
709 709 /// interleaved.
710 710 pub fn open(
711 store_vfs: &Vfs,
711 // Todo use the `Vfs` trait here once we create a function for mmap
712 store_vfs: &VfsImpl,
712 713 index_path: impl AsRef<Path>,
713 714 data_path: Option<&Path>,
714 715 options: RevlogOpenOptions,
715 716 ) -> Result<Self, HgError> {
716 717 Self::open_gen(store_vfs, index_path, data_path, options, None)
717 718 }
718 719
719 720 fn open_gen(
720 store_vfs: &Vfs,
721 // Todo use the `Vfs` trait here once we create a function for mmap
722 store_vfs: &VfsImpl,
721 723 index_path: impl AsRef<Path>,
722 724 data_path: Option<&Path>,
723 725 options: RevlogOpenOptions,
724 726 nodemap_for_test: Option<nodemap::NodeTree>,
725 727 ) -> Result<Self, HgError> {
726 728 let index_path = index_path.as_ref();
727 729 let index = {
728 730 match store_vfs.mmap_open_opt(index_path)? {
729 731 None => Index::new(
730 732 Box::<Vec<_>>::default(),
731 733 options.index_header(),
732 734 ),
733 735 Some(index_mmap) => {
734 736 let index = Index::new(
735 737 Box::new(index_mmap),
736 738 options.index_header(),
737 739 )?;
738 740 Ok(index)
739 741 }
740 742 }
741 743 }?;
742 744
743 745 let default_data_path = index_path.with_extension("d");
744 746
745 747 // type annotation required
746 748 // won't recognize Mmap as Deref<Target = [u8]>
747 749 let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
748 750 if index.is_inline() {
749 751 None
750 752 } else if index.is_empty() {
751 753 // No need to even try to open the data file then.
752 754 Some(Box::new(&[][..]))
753 755 } else {
754 756 let data_path = data_path.unwrap_or(&default_data_path);
755 757 let data_mmap = store_vfs.mmap_open(data_path)?;
756 758 Some(Box::new(data_mmap))
757 759 };
758 760
759 761 let nodemap = if index.is_inline() || !options.use_nodemap {
760 762 None
761 763 } else {
762 764 NodeMapDocket::read_from_file(store_vfs, index_path)?.map(
763 765 |(docket, data)| {
764 766 nodemap::NodeTree::load_bytes(
765 767 Box::new(data),
766 768 docket.data_length,
767 769 )
768 770 },
769 771 )
770 772 };
771 773
772 774 let nodemap = nodemap_for_test.or(nodemap);
773 775
774 776 Ok(Revlog {
775 777 index,
776 778 data_bytes,
777 779 nodemap,
778 780 })
779 781 }
780 782
781 783 /// Return number of entries of the `Revlog`.
782 784 pub fn len(&self) -> usize {
783 785 self.index.len()
784 786 }
785 787
786 788 /// Returns `true` if the `Revlog` has zero `entries`.
787 789 pub fn is_empty(&self) -> bool {
788 790 self.index.is_empty()
789 791 }
790 792
791 793 /// Returns the node ID for the given revision number, if it exists in this
792 794 /// revlog
793 795 pub fn node_from_rev(&self, rev: UncheckedRevision) -> Option<&Node> {
794 796 if rev == NULL_REVISION.into() {
795 797 return Some(&NULL_NODE);
796 798 }
797 799 let rev = self.index.check_revision(rev)?;
798 800 Some(self.index.get_entry(rev)?.hash())
799 801 }
800 802
801 803 /// Return the revision number for the given node ID, if it exists in this
802 804 /// revlog
803 805 pub fn rev_from_node(
804 806 &self,
805 807 node: NodePrefix,
806 808 ) -> Result<Revision, RevlogError> {
807 809 if let Some(nodemap) = &self.nodemap {
808 810 nodemap
809 811 .find_bin(&self.index, node)?
810 812 .ok_or(RevlogError::InvalidRevision)
811 813 } else {
812 814 self.rev_from_node_no_persistent_nodemap(node)
813 815 }
814 816 }
815 817
816 818 /// Same as `rev_from_node`, without using a persistent nodemap
817 819 ///
818 820 /// This is used as fallback when a persistent nodemap is not present.
819 821 /// This happens when the persistent-nodemap experimental feature is not
820 822 /// enabled, or for small revlogs.
821 823 fn rev_from_node_no_persistent_nodemap(
822 824 &self,
823 825 node: NodePrefix,
824 826 ) -> Result<Revision, RevlogError> {
825 827 // Linear scan of the revlog
826 828 // TODO: consider building a non-persistent nodemap in memory to
827 829 // optimize these cases.
828 830 let mut found_by_prefix = None;
829 831 for rev in (-1..self.len() as BaseRevision).rev() {
830 832 let rev = Revision(rev as BaseRevision);
831 833 let candidate_node = if rev == Revision(-1) {
832 834 NULL_NODE
833 835 } else {
834 836 let index_entry =
835 837 self.index.get_entry(rev).ok_or_else(|| {
836 838 HgError::corrupted(
837 839 "revlog references a revision not in the index",
838 840 )
839 841 })?;
840 842 *index_entry.hash()
841 843 };
842 844 if node == candidate_node {
843 845 return Ok(rev);
844 846 }
845 847 if node.is_prefix_of(&candidate_node) {
846 848 if found_by_prefix.is_some() {
847 849 return Err(RevlogError::AmbiguousPrefix);
848 850 }
849 851 found_by_prefix = Some(rev)
850 852 }
851 853 }
852 854 found_by_prefix.ok_or(RevlogError::InvalidRevision)
853 855 }
854 856
855 857 /// Returns whether the given revision exists in this revlog.
856 858 pub fn has_rev(&self, rev: UncheckedRevision) -> bool {
857 859 self.index.check_revision(rev).is_some()
858 860 }
859 861
860 862 /// Return the full data associated to a revision.
861 863 ///
862 864 /// All entries required to build the final data out of deltas will be
863 865 /// retrieved as needed, and the deltas will be applied to the inital
864 866 /// snapshot to rebuild the final data.
865 867 pub fn get_rev_data(
866 868 &self,
867 869 rev: UncheckedRevision,
868 870 ) -> Result<Cow<[u8]>, RevlogError> {
869 871 if rev == NULL_REVISION.into() {
870 872 return Ok(Cow::Borrowed(&[]));
871 873 };
872 874 self.get_entry(rev)?.data()
873 875 }
874 876
875 877 /// [`Self::get_rev_data`] for checked revisions.
876 878 pub fn get_rev_data_for_checked_rev(
877 879 &self,
878 880 rev: Revision,
879 881 ) -> Result<Cow<[u8]>, RevlogError> {
880 882 if rev == NULL_REVISION {
881 883 return Ok(Cow::Borrowed(&[]));
882 884 };
883 885 self.get_entry_for_checked_rev(rev)?.data()
884 886 }
885 887
886 888 /// Check the hash of some given data against the recorded hash.
887 889 pub fn check_hash(
888 890 &self,
889 891 p1: Revision,
890 892 p2: Revision,
891 893 expected: &[u8],
892 894 data: &[u8],
893 895 ) -> bool {
894 896 let e1 = self.index.get_entry(p1);
895 897 let h1 = match e1 {
896 898 Some(ref entry) => entry.hash(),
897 899 None => &NULL_NODE,
898 900 };
899 901 let e2 = self.index.get_entry(p2);
900 902 let h2 = match e2 {
901 903 Some(ref entry) => entry.hash(),
902 904 None => &NULL_NODE,
903 905 };
904 906
905 907 hash(data, h1.as_bytes(), h2.as_bytes()) == expected
906 908 }
907 909
908 910 /// Build the full data of a revision out its snapshot
909 911 /// and its deltas.
910 912 fn build_data_from_deltas(
911 913 snapshot: RevlogEntry,
912 914 deltas: &[RevlogEntry],
913 915 ) -> Result<Vec<u8>, HgError> {
914 916 let snapshot = snapshot.data_chunk()?;
915 917 let deltas = deltas
916 918 .iter()
917 919 .rev()
918 920 .map(RevlogEntry::data_chunk)
919 921 .collect::<Result<Vec<_>, _>>()?;
920 922 let patches: Vec<_> =
921 923 deltas.iter().map(|d| patch::PatchList::new(d)).collect();
922 924 let patch = patch::fold_patch_lists(&patches);
923 925 Ok(patch.apply(&snapshot))
924 926 }
925 927
926 928 /// Return the revlog data.
927 929 fn data(&self) -> &[u8] {
928 930 match &self.data_bytes {
929 931 Some(data_bytes) => data_bytes,
930 932 None => panic!(
931 933 "forgot to load the data or trying to access inline data"
932 934 ),
933 935 }
934 936 }
935 937
936 938 pub fn make_null_entry(&self) -> RevlogEntry {
937 939 RevlogEntry {
938 940 revlog: self,
939 941 rev: NULL_REVISION,
940 942 bytes: b"",
941 943 compressed_len: 0,
942 944 uncompressed_len: 0,
943 945 base_rev_or_base_of_delta_chain: None,
944 946 p1: NULL_REVISION,
945 947 p2: NULL_REVISION,
946 948 flags: NULL_REVLOG_ENTRY_FLAGS,
947 949 hash: NULL_NODE,
948 950 }
949 951 }
950 952
951 953 fn get_entry_for_checked_rev(
952 954 &self,
953 955 rev: Revision,
954 956 ) -> Result<RevlogEntry, RevlogError> {
955 957 if rev == NULL_REVISION {
956 958 return Ok(self.make_null_entry());
957 959 }
958 960 let index_entry = self
959 961 .index
960 962 .get_entry(rev)
961 963 .ok_or(RevlogError::InvalidRevision)?;
962 964 let offset = index_entry.offset();
963 965 let start = if self.index.is_inline() {
964 966 offset + ((rev.0 as usize + 1) * INDEX_ENTRY_SIZE)
965 967 } else {
966 968 offset
967 969 };
968 970 let end = start + index_entry.compressed_len() as usize;
969 971 let data = if self.index.is_inline() {
970 972 self.index.data(start, end)
971 973 } else {
972 974 &self.data()[start..end]
973 975 };
974 976 let base_rev = self
975 977 .index
976 978 .check_revision(index_entry.base_revision_or_base_of_delta_chain())
977 979 .ok_or_else(|| {
978 980 RevlogError::corrupted(format!(
979 981 "base revision for rev {} is invalid",
980 982 rev
981 983 ))
982 984 })?;
983 985 let p1 =
984 986 self.index.check_revision(index_entry.p1()).ok_or_else(|| {
985 987 RevlogError::corrupted(format!(
986 988 "p1 for rev {} is invalid",
987 989 rev
988 990 ))
989 991 })?;
990 992 let p2 =
991 993 self.index.check_revision(index_entry.p2()).ok_or_else(|| {
992 994 RevlogError::corrupted(format!(
993 995 "p2 for rev {} is invalid",
994 996 rev
995 997 ))
996 998 })?;
997 999 let entry = RevlogEntry {
998 1000 revlog: self,
999 1001 rev,
1000 1002 bytes: data,
1001 1003 compressed_len: index_entry.compressed_len(),
1002 1004 uncompressed_len: index_entry.uncompressed_len(),
1003 1005 base_rev_or_base_of_delta_chain: if base_rev == rev {
1004 1006 None
1005 1007 } else {
1006 1008 Some(base_rev)
1007 1009 },
1008 1010 p1,
1009 1011 p2,
1010 1012 flags: index_entry.flags(),
1011 1013 hash: *index_entry.hash(),
1012 1014 };
1013 1015 Ok(entry)
1014 1016 }
1015 1017
1016 1018 /// Get an entry of the revlog.
1017 1019 pub fn get_entry(
1018 1020 &self,
1019 1021 rev: UncheckedRevision,
1020 1022 ) -> Result<RevlogEntry, RevlogError> {
1021 1023 if rev == NULL_REVISION.into() {
1022 1024 return Ok(self.make_null_entry());
1023 1025 }
1024 1026 let rev = self.index.check_revision(rev).ok_or_else(|| {
1025 1027 RevlogError::corrupted(format!("rev {} is invalid", rev))
1026 1028 })?;
1027 1029 self.get_entry_for_checked_rev(rev)
1028 1030 }
1029 1031 }
1030 1032
1031 1033 /// The revlog entry's bytes and the necessary informations to extract
1032 1034 /// the entry's data.
1033 1035 #[derive(Clone)]
1034 1036 pub struct RevlogEntry<'revlog> {
1035 1037 revlog: &'revlog Revlog,
1036 1038 rev: Revision,
1037 1039 bytes: &'revlog [u8],
1038 1040 compressed_len: u32,
1039 1041 uncompressed_len: i32,
1040 1042 base_rev_or_base_of_delta_chain: Option<Revision>,
1041 1043 p1: Revision,
1042 1044 p2: Revision,
1043 1045 flags: u16,
1044 1046 hash: Node,
1045 1047 }
1046 1048
1047 1049 thread_local! {
1048 1050 // seems fine to [unwrap] here: this can only fail due to memory allocation
1049 1051 // failing, and it's normal for that to cause panic.
1050 1052 static ZSTD_DECODER : RefCell<zstd::bulk::Decompressor<'static>> =
1051 1053 RefCell::new(zstd::bulk::Decompressor::new().ok().unwrap());
1052 1054 }
1053 1055
1054 1056 fn zstd_decompress_to_buffer(
1055 1057 bytes: &[u8],
1056 1058 buf: &mut Vec<u8>,
1057 1059 ) -> Result<usize, std::io::Error> {
1058 1060 ZSTD_DECODER
1059 1061 .with(|decoder| decoder.borrow_mut().decompress_to_buffer(bytes, buf))
1060 1062 }
1061 1063
1062 1064 impl<'revlog> RevlogEntry<'revlog> {
1063 1065 pub fn revision(&self) -> Revision {
1064 1066 self.rev
1065 1067 }
1066 1068
1067 1069 pub fn node(&self) -> &Node {
1068 1070 &self.hash
1069 1071 }
1070 1072
1071 1073 pub fn uncompressed_len(&self) -> Option<u32> {
1072 1074 u32::try_from(self.uncompressed_len).ok()
1073 1075 }
1074 1076
1075 1077 pub fn has_p1(&self) -> bool {
1076 1078 self.p1 != NULL_REVISION
1077 1079 }
1078 1080
1079 1081 pub fn p1_entry(
1080 1082 &self,
1081 1083 ) -> Result<Option<RevlogEntry<'revlog>>, RevlogError> {
1082 1084 if self.p1 == NULL_REVISION {
1083 1085 Ok(None)
1084 1086 } else {
1085 1087 Ok(Some(self.revlog.get_entry_for_checked_rev(self.p1)?))
1086 1088 }
1087 1089 }
1088 1090
1089 1091 pub fn p2_entry(
1090 1092 &self,
1091 1093 ) -> Result<Option<RevlogEntry<'revlog>>, RevlogError> {
1092 1094 if self.p2 == NULL_REVISION {
1093 1095 Ok(None)
1094 1096 } else {
1095 1097 Ok(Some(self.revlog.get_entry_for_checked_rev(self.p2)?))
1096 1098 }
1097 1099 }
1098 1100
1099 1101 pub fn p1(&self) -> Option<Revision> {
1100 1102 if self.p1 == NULL_REVISION {
1101 1103 None
1102 1104 } else {
1103 1105 Some(self.p1)
1104 1106 }
1105 1107 }
1106 1108
1107 1109 pub fn p2(&self) -> Option<Revision> {
1108 1110 if self.p2 == NULL_REVISION {
1109 1111 None
1110 1112 } else {
1111 1113 Some(self.p2)
1112 1114 }
1113 1115 }
1114 1116
1115 1117 pub fn is_censored(&self) -> bool {
1116 1118 (self.flags & REVISION_FLAG_CENSORED) != 0
1117 1119 }
1118 1120
1119 1121 pub fn has_length_affecting_flag_processor(&self) -> bool {
1120 1122 // Relevant Python code: revlog.size()
1121 1123 // note: ELLIPSIS is known to not change the content
1122 1124 (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
1123 1125 }
1124 1126
1125 1127 /// The data for this entry, after resolving deltas if any.
1126 1128 pub fn rawdata(&self) -> Result<Cow<'revlog, [u8]>, RevlogError> {
1127 1129 let mut entry = self.clone();
1128 1130 let mut delta_chain = vec![];
1129 1131
1130 1132 // The meaning of `base_rev_or_base_of_delta_chain` depends on
1131 1133 // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
1132 1134 // `mercurial/revlogutils/constants.py` and the code in
1133 1135 // [_chaininfo] and in [index_deltachain].
1134 1136 let uses_generaldelta = self.revlog.index.uses_generaldelta();
1135 1137 while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
1136 1138 entry = if uses_generaldelta {
1137 1139 delta_chain.push(entry);
1138 1140 self.revlog.get_entry_for_checked_rev(base_rev)?
1139 1141 } else {
1140 1142 let base_rev = UncheckedRevision(entry.rev.0 - 1);
1141 1143 delta_chain.push(entry);
1142 1144 self.revlog.get_entry(base_rev)?
1143 1145 };
1144 1146 }
1145 1147
1146 1148 let data = if delta_chain.is_empty() {
1147 1149 entry.data_chunk()?
1148 1150 } else {
1149 1151 Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
1150 1152 };
1151 1153
1152 1154 Ok(data)
1153 1155 }
1154 1156
1155 1157 fn check_data(
1156 1158 &self,
1157 1159 data: Cow<'revlog, [u8]>,
1158 1160 ) -> Result<Cow<'revlog, [u8]>, RevlogError> {
1159 1161 if self.revlog.check_hash(
1160 1162 self.p1,
1161 1163 self.p2,
1162 1164 self.hash.as_bytes(),
1163 1165 &data,
1164 1166 ) {
1165 1167 Ok(data)
1166 1168 } else {
1167 1169 if (self.flags & REVISION_FLAG_ELLIPSIS) != 0 {
1168 1170 return Err(HgError::unsupported(
1169 1171 "ellipsis revisions are not supported by rhg",
1170 1172 )
1171 1173 .into());
1172 1174 }
1173 1175 Err(corrupted(format!(
1174 1176 "hash check failed for revision {}",
1175 1177 self.rev
1176 1178 ))
1177 1179 .into())
1178 1180 }
1179 1181 }
1180 1182
1181 1183 pub fn data(&self) -> Result<Cow<'revlog, [u8]>, RevlogError> {
1182 1184 let data = self.rawdata()?;
1183 1185 if self.rev == NULL_REVISION {
1184 1186 return Ok(data);
1185 1187 }
1186 1188 if self.is_censored() {
1187 1189 return Err(HgError::CensoredNodeError.into());
1188 1190 }
1189 1191 self.check_data(data)
1190 1192 }
1191 1193
1192 1194 /// Extract the data contained in the entry.
1193 1195 /// This may be a delta. (See `is_delta`.)
1194 1196 fn data_chunk(&self) -> Result<Cow<'revlog, [u8]>, HgError> {
1195 1197 if self.bytes.is_empty() {
1196 1198 return Ok(Cow::Borrowed(&[]));
1197 1199 }
1198 1200 match self.bytes[0] {
1199 1201 // Revision data is the entirety of the entry, including this
1200 1202 // header.
1201 1203 b'\0' => Ok(Cow::Borrowed(self.bytes)),
1202 1204 // Raw revision data follows.
1203 1205 b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
1204 1206 // zlib (RFC 1950) data.
1205 1207 b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
1206 1208 // zstd data.
1207 1209 b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
1208 1210 // A proper new format should have had a repo/store requirement.
1209 1211 format_type => Err(corrupted(format!(
1210 1212 "unknown compression header '{}'",
1211 1213 format_type
1212 1214 ))),
1213 1215 }
1214 1216 }
1215 1217
1216 1218 fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
1217 1219 let mut decoder = ZlibDecoder::new(self.bytes);
1218 1220 if self.is_delta() {
1219 1221 let mut buf = Vec::with_capacity(self.compressed_len as usize);
1220 1222 decoder
1221 1223 .read_to_end(&mut buf)
1222 1224 .map_err(|e| corrupted(e.to_string()))?;
1223 1225 Ok(buf)
1224 1226 } else {
1225 1227 let cap = self.uncompressed_len.max(0) as usize;
1226 1228 let mut buf = vec![0; cap];
1227 1229 decoder
1228 1230 .read_exact(&mut buf)
1229 1231 .map_err(|e| corrupted(e.to_string()))?;
1230 1232 Ok(buf)
1231 1233 }
1232 1234 }
1233 1235
1234 1236 fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
1235 1237 let cap = self.uncompressed_len.max(0) as usize;
1236 1238 if self.is_delta() {
1237 1239 // [cap] is usually an over-estimate of the space needed because
1238 1240 // it's the length of delta-decoded data, but we're interested
1239 1241 // in the size of the delta.
1240 1242 // This means we have to [shrink_to_fit] to avoid holding on
1241 1243 // to a large chunk of memory, but it also means we must have a
1242 1244 // fallback branch, for the case when the delta is longer than
1243 1245 // the original data (surprisingly, this does happen in practice)
1244 1246 let mut buf = Vec::with_capacity(cap);
1245 1247 match zstd_decompress_to_buffer(self.bytes, &mut buf) {
1246 1248 Ok(_) => buf.shrink_to_fit(),
1247 1249 Err(_) => {
1248 1250 buf.clear();
1249 1251 zstd::stream::copy_decode(self.bytes, &mut buf)
1250 1252 .map_err(|e| corrupted(e.to_string()))?;
1251 1253 }
1252 1254 };
1253 1255 Ok(buf)
1254 1256 } else {
1255 1257 let mut buf = Vec::with_capacity(cap);
1256 1258 let len = zstd_decompress_to_buffer(self.bytes, &mut buf)
1257 1259 .map_err(|e| corrupted(e.to_string()))?;
1258 1260 if len != self.uncompressed_len as usize {
1259 1261 Err(corrupted("uncompressed length does not match"))
1260 1262 } else {
1261 1263 Ok(buf)
1262 1264 }
1263 1265 }
1264 1266 }
1265 1267
1266 1268 /// Tell if the entry is a snapshot or a delta
1267 1269 /// (influences on decompression).
1268 1270 fn is_delta(&self) -> bool {
1269 1271 self.base_rev_or_base_of_delta_chain.is_some()
1270 1272 }
1271 1273 }
1272 1274
1273 1275 /// Calculate the hash of a revision given its data and its parents.
1274 1276 fn hash(
1275 1277 data: &[u8],
1276 1278 p1_hash: &[u8],
1277 1279 p2_hash: &[u8],
1278 1280 ) -> [u8; NODE_BYTES_LENGTH] {
1279 1281 let mut hasher = Sha1::new();
1280 1282 let (a, b) = (p1_hash, p2_hash);
1281 1283 if a > b {
1282 1284 hasher.update(b);
1283 1285 hasher.update(a);
1284 1286 } else {
1285 1287 hasher.update(a);
1286 1288 hasher.update(b);
1287 1289 }
1288 1290 hasher.update(data);
1289 1291 *hasher.finalize().as_ref()
1290 1292 }
1291 1293
1292 1294 #[cfg(test)]
1293 1295 mod tests {
1294 1296 use super::*;
1295 1297 use crate::index::IndexEntryBuilder;
1296 1298 use itertools::Itertools;
1297 1299
1298 1300 #[test]
1299 1301 fn test_empty() {
1300 1302 let temp = tempfile::tempdir().unwrap();
1301 let vfs = Vfs { base: temp.path() };
1303 let vfs = VfsImpl {
1304 base: temp.path().to_owned(),
1305 };
1302 1306 std::fs::write(temp.path().join("foo.i"), b"").unwrap();
1303 1307 std::fs::write(temp.path().join("foo.d"), b"").unwrap();
1304 1308 let revlog =
1305 1309 Revlog::open(&vfs, "foo.i", None, RevlogOpenOptions::default())
1306 1310 .unwrap();
1307 1311 assert!(revlog.is_empty());
1308 1312 assert_eq!(revlog.len(), 0);
1309 1313 assert!(revlog.get_entry(0.into()).is_err());
1310 1314 assert!(!revlog.has_rev(0.into()));
1311 1315 assert_eq!(
1312 1316 revlog.rev_from_node(NULL_NODE.into()).unwrap(),
1313 1317 NULL_REVISION
1314 1318 );
1315 1319 let null_entry = revlog.get_entry(NULL_REVISION.into()).ok().unwrap();
1316 1320 assert_eq!(null_entry.revision(), NULL_REVISION);
1317 1321 assert!(null_entry.data().unwrap().is_empty());
1318 1322 }
1319 1323
1320 1324 #[test]
1321 1325 fn test_inline() {
1322 1326 let temp = tempfile::tempdir().unwrap();
1323 let vfs = Vfs { base: temp.path() };
1327 let vfs = VfsImpl {
1328 base: temp.path().to_owned(),
1329 };
1324 1330 let node0 = Node::from_hex("2ed2a3912a0b24502043eae84ee4b279c18b90dd")
1325 1331 .unwrap();
1326 1332 let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
1327 1333 .unwrap();
1328 1334 let node2 = Node::from_hex("dd6ad206e907be60927b5a3117b97dffb2590582")
1329 1335 .unwrap();
1330 1336 let entry0_bytes = IndexEntryBuilder::new()
1331 1337 .is_first(true)
1332 1338 .with_version(1)
1333 1339 .with_inline(true)
1334 1340 .with_node(node0)
1335 1341 .build();
1336 1342 let entry1_bytes = IndexEntryBuilder::new().with_node(node1).build();
1337 1343 let entry2_bytes = IndexEntryBuilder::new()
1338 1344 .with_p1(Revision(0))
1339 1345 .with_p2(Revision(1))
1340 1346 .with_node(node2)
1341 1347 .build();
1342 1348 let contents = vec![entry0_bytes, entry1_bytes, entry2_bytes]
1343 1349 .into_iter()
1344 1350 .flatten()
1345 1351 .collect_vec();
1346 1352 std::fs::write(temp.path().join("foo.i"), contents).unwrap();
1347 1353 let revlog =
1348 1354 Revlog::open(&vfs, "foo.i", None, RevlogOpenOptions::default())
1349 1355 .unwrap();
1350 1356
1351 1357 let entry0 = revlog.get_entry(0.into()).ok().unwrap();
1352 1358 assert_eq!(entry0.revision(), Revision(0));
1353 1359 assert_eq!(*entry0.node(), node0);
1354 1360 assert!(!entry0.has_p1());
1355 1361 assert_eq!(entry0.p1(), None);
1356 1362 assert_eq!(entry0.p2(), None);
1357 1363 let p1_entry = entry0.p1_entry().unwrap();
1358 1364 assert!(p1_entry.is_none());
1359 1365 let p2_entry = entry0.p2_entry().unwrap();
1360 1366 assert!(p2_entry.is_none());
1361 1367
1362 1368 let entry1 = revlog.get_entry(1.into()).ok().unwrap();
1363 1369 assert_eq!(entry1.revision(), Revision(1));
1364 1370 assert_eq!(*entry1.node(), node1);
1365 1371 assert!(!entry1.has_p1());
1366 1372 assert_eq!(entry1.p1(), None);
1367 1373 assert_eq!(entry1.p2(), None);
1368 1374 let p1_entry = entry1.p1_entry().unwrap();
1369 1375 assert!(p1_entry.is_none());
1370 1376 let p2_entry = entry1.p2_entry().unwrap();
1371 1377 assert!(p2_entry.is_none());
1372 1378
1373 1379 let entry2 = revlog.get_entry(2.into()).ok().unwrap();
1374 1380 assert_eq!(entry2.revision(), Revision(2));
1375 1381 assert_eq!(*entry2.node(), node2);
1376 1382 assert!(entry2.has_p1());
1377 1383 assert_eq!(entry2.p1(), Some(Revision(0)));
1378 1384 assert_eq!(entry2.p2(), Some(Revision(1)));
1379 1385 let p1_entry = entry2.p1_entry().unwrap();
1380 1386 assert!(p1_entry.is_some());
1381 1387 assert_eq!(p1_entry.unwrap().revision(), Revision(0));
1382 1388 let p2_entry = entry2.p2_entry().unwrap();
1383 1389 assert!(p2_entry.is_some());
1384 1390 assert_eq!(p2_entry.unwrap().revision(), Revision(1));
1385 1391 }
1386 1392
1387 1393 #[test]
1388 1394 fn test_nodemap() {
1389 1395 let temp = tempfile::tempdir().unwrap();
1390 let vfs = Vfs { base: temp.path() };
1396 let vfs = VfsImpl {
1397 base: temp.path().to_owned(),
1398 };
1391 1399
1392 1400 // building a revlog with a forced Node starting with zeros
1393 1401 // This is a corruption, but it does not preclude using the nodemap
1394 1402 // if we don't try and access the data
1395 1403 let node0 = Node::from_hex("00d2a3912a0b24502043eae84ee4b279c18b90dd")
1396 1404 .unwrap();
1397 1405 let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
1398 1406 .unwrap();
1399 1407 let entry0_bytes = IndexEntryBuilder::new()
1400 1408 .is_first(true)
1401 1409 .with_version(1)
1402 1410 .with_inline(true)
1403 1411 .with_node(node0)
1404 1412 .build();
1405 1413 let entry1_bytes = IndexEntryBuilder::new().with_node(node1).build();
1406 1414 let contents = vec![entry0_bytes, entry1_bytes]
1407 1415 .into_iter()
1408 1416 .flatten()
1409 1417 .collect_vec();
1410 1418 std::fs::write(temp.path().join("foo.i"), contents).unwrap();
1411 1419
1412 1420 let mut idx = nodemap::tests::TestNtIndex::new();
1413 1421 idx.insert_node(Revision(0), node0).unwrap();
1414 1422 idx.insert_node(Revision(1), node1).unwrap();
1415 1423
1416 1424 let revlog = Revlog::open_gen(
1417 1425 &vfs,
1418 1426 "foo.i",
1419 1427 None,
1420 1428 RevlogOpenOptions::default(),
1421 1429 Some(idx.nt),
1422 1430 )
1423 1431 .unwrap();
1424 1432
1425 1433 // accessing the data shows the corruption
1426 1434 revlog.get_entry(0.into()).unwrap().data().unwrap_err();
1427 1435
1428 1436 assert_eq!(
1429 1437 revlog.rev_from_node(NULL_NODE.into()).unwrap(),
1430 1438 Revision(-1)
1431 1439 );
1432 1440 assert_eq!(revlog.rev_from_node(node0.into()).unwrap(), Revision(0));
1433 1441 assert_eq!(revlog.rev_from_node(node1.into()).unwrap(), Revision(1));
1434 1442 assert_eq!(
1435 1443 revlog
1436 1444 .rev_from_node(NodePrefix::from_hex("000").unwrap())
1437 1445 .unwrap(),
1438 1446 Revision(-1)
1439 1447 );
1440 1448 assert_eq!(
1441 1449 revlog
1442 1450 .rev_from_node(NodePrefix::from_hex("b00").unwrap())
1443 1451 .unwrap(),
1444 1452 Revision(1)
1445 1453 );
1446 1454 // RevlogError does not implement PartialEq
1447 1455 // (ultimately because io::Error does not)
1448 1456 match revlog
1449 1457 .rev_from_node(NodePrefix::from_hex("00").unwrap())
1450 1458 .expect_err("Expected to give AmbiguousPrefix error")
1451 1459 {
1452 1460 RevlogError::AmbiguousPrefix => (),
1453 1461 e => {
1454 1462 panic!("Got another error than AmbiguousPrefix: {:?}", e);
1455 1463 }
1456 1464 };
1457 1465 }
1458 1466 }
@@ -1,108 +1,108
1 1 use crate::errors::{HgError, HgResultExt};
2 2 use bytes_cast::{unaligned, BytesCast};
3 3 use memmap2::Mmap;
4 4 use std::path::{Path, PathBuf};
5 5
6 use crate::vfs::Vfs;
6 use crate::vfs::VfsImpl;
7 7
8 8 const ONDISK_VERSION: u8 = 1;
9 9
10 10 pub(super) struct NodeMapDocket {
11 11 pub data_length: usize,
12 12 // TODO: keep here more of the data from `parse()` when we need it
13 13 }
14 14
15 15 #[derive(BytesCast)]
16 16 #[repr(C)]
17 17 struct DocketHeader {
18 18 uid_size: u8,
19 19 _tip_rev: unaligned::U64Be,
20 20 data_length: unaligned::U64Be,
21 21 _data_unused: unaligned::U64Be,
22 22 tip_node_size: unaligned::U64Be,
23 23 }
24 24
25 25 impl NodeMapDocket {
26 26 /// Return `Ok(None)` when the caller should proceed without a persistent
27 27 /// nodemap:
28 28 ///
29 29 /// * This revlog does not have a `.n` docket file (it is not generated for
30 30 /// small revlogs), or
31 31 /// * The docket has an unsupported version number (repositories created by
32 32 /// later hg, maybe that should be a requirement instead?), or
33 33 /// * The docket file points to a missing (likely deleted) data file (this
34 34 /// can happen in a rare race condition).
35 35 pub fn read_from_file(
36 store_vfs: &Vfs,
36 store_vfs: &VfsImpl,
37 37 index_path: &Path,
38 38 ) -> Result<Option<(Self, Mmap)>, HgError> {
39 39 let docket_path = index_path.with_extension("n");
40 40 let docket_bytes = if let Some(bytes) =
41 41 store_vfs.read(&docket_path).io_not_found_as_none()?
42 42 {
43 43 bytes
44 44 } else {
45 45 return Ok(None);
46 46 };
47 47
48 48 let input = if let Some((&ONDISK_VERSION, rest)) =
49 49 docket_bytes.split_first()
50 50 {
51 51 rest
52 52 } else {
53 53 return Ok(None);
54 54 };
55 55
56 56 /// Treat any error as a parse error
57 57 fn parse<T, E>(result: Result<T, E>) -> Result<T, HgError> {
58 58 result
59 59 .map_err(|_| HgError::corrupted("nodemap docket parse error"))
60 60 }
61 61
62 62 let (header, rest) = parse(DocketHeader::from_bytes(input))?;
63 63 let uid_size = header.uid_size as usize;
64 64 // TODO: do we care about overflow for 4 GB+ nodemap files on 32-bit
65 65 // systems?
66 66 let tip_node_size = header.tip_node_size.get() as usize;
67 67 let data_length = header.data_length.get() as usize;
68 68 let (uid, rest) = parse(u8::slice_from_bytes(rest, uid_size))?;
69 69 let (_tip_node, _rest) =
70 70 parse(u8::slice_from_bytes(rest, tip_node_size))?;
71 71 let uid = parse(std::str::from_utf8(uid))?;
72 72 let docket = NodeMapDocket { data_length };
73 73
74 74 let data_path = rawdata_path(&docket_path, uid);
75 75 // TODO: use `vfs.read()` here when the `persistent-nodemap.mmap`
76 76 // config is false?
77 77 if let Some(mmap) =
78 78 store_vfs.mmap_open(data_path).io_not_found_as_none()?
79 79 {
80 80 if mmap.len() >= data_length {
81 81 Ok(Some((docket, mmap)))
82 82 } else {
83 83 Err(HgError::corrupted("persistent nodemap too short"))
84 84 }
85 85 } else {
86 86 // Even if .hg/requires opted in, some revlogs are deemed small
87 87 // enough to not need a persistent nodemap.
88 88 Ok(None)
89 89 }
90 90 }
91 91 }
92 92
93 93 fn rawdata_path(docket_path: &Path, uid: &str) -> PathBuf {
94 94 let docket_name = docket_path
95 95 .file_name()
96 96 .expect("expected a base name")
97 97 .to_str()
98 98 .expect("expected an ASCII file name in the store");
99 99 let prefix = docket_name
100 100 .strip_suffix(".n.a")
101 101 .or_else(|| docket_name.strip_suffix(".n"))
102 102 .expect("expected docket path in .n or .n.a");
103 103 let name = format!("{}-{}.nd", prefix, uid);
104 104 docket_path
105 105 .parent()
106 106 .expect("expected a non-root path")
107 107 .join(name)
108 108 }
@@ -1,205 +1,382
1 1 use crate::errors::{HgError, IoErrorContext, IoResultExt};
2 use crate::exit_codes;
3 use dyn_clone::DynClone;
2 4 use memmap2::{Mmap, MmapOptions};
5 use std::fs::File;
3 6 use std::io::{ErrorKind, Write};
7 use std::os::unix::fs::MetadataExt;
4 8 use std::path::{Path, PathBuf};
5 9
6 10 /// Filesystem access abstraction for the contents of a given "base" diretory
7 #[derive(Clone, Copy)]
8 pub struct Vfs<'a> {
9 pub(crate) base: &'a Path,
11 #[derive(Clone)]
12 pub struct VfsImpl {
13 pub(crate) base: PathBuf,
10 14 }
11 15
12 16 struct FileNotFound(std::io::Error, PathBuf);
13 17
14 impl Vfs<'_> {
18 impl VfsImpl {
15 19 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
16 20 self.base.join(relative_path)
17 21 }
18 22
19 23 pub fn symlink_metadata(
20 24 &self,
21 25 relative_path: impl AsRef<Path>,
22 26 ) -> Result<std::fs::Metadata, HgError> {
23 27 let path = self.join(relative_path);
24 28 std::fs::symlink_metadata(&path).when_reading_file(&path)
25 29 }
26 30
27 31 pub fn read_link(
28 32 &self,
29 33 relative_path: impl AsRef<Path>,
30 34 ) -> Result<PathBuf, HgError> {
31 35 let path = self.join(relative_path);
32 36 std::fs::read_link(&path).when_reading_file(&path)
33 37 }
34 38
35 39 pub fn read(
36 40 &self,
37 41 relative_path: impl AsRef<Path>,
38 42 ) -> Result<Vec<u8>, HgError> {
39 43 let path = self.join(relative_path);
40 44 std::fs::read(&path).when_reading_file(&path)
41 45 }
42 46
43 47 /// Returns `Ok(None)` if the file does not exist.
44 48 pub fn try_read(
45 49 &self,
46 50 relative_path: impl AsRef<Path>,
47 51 ) -> Result<Option<Vec<u8>>, HgError> {
48 52 match self.read(relative_path) {
49 53 Err(e) => match &e {
50 54 HgError::IoError { error, .. } => match error.kind() {
51 55 ErrorKind::NotFound => Ok(None),
52 56 _ => Err(e),
53 57 },
54 58 _ => Err(e),
55 59 },
56 60 Ok(v) => Ok(Some(v)),
57 61 }
58 62 }
59 63
60 64 fn mmap_open_gen(
61 65 &self,
62 66 relative_path: impl AsRef<Path>,
63 67 ) -> Result<Result<Mmap, FileNotFound>, HgError> {
64 68 let path = self.join(relative_path);
65 69 let file = match std::fs::File::open(&path) {
66 70 Err(err) => {
67 71 if let ErrorKind::NotFound = err.kind() {
68 72 return Ok(Err(FileNotFound(err, path)));
69 73 };
70 74 return (Err(err)).when_reading_file(&path);
71 75 }
72 76 Ok(file) => file,
73 77 };
74 // TODO: what are the safety requirements here?
78 // Safety is "enforced" by locks and assuming other processes are
79 // well-behaved. If any misbehaving or malicious process does touch
80 // the index, it could lead to corruption. This is inherent
81 // to file-based `mmap`, though some platforms have some ways of
82 // mitigating.
83 // TODO linux: set the immutable flag with `chattr(1)`?
75 84 let mmap = unsafe { MmapOptions::new().map(&file) }
76 85 .when_reading_file(&path)?;
77 86 Ok(Ok(mmap))
78 87 }
79 88
80 89 pub fn mmap_open_opt(
81 90 &self,
82 91 relative_path: impl AsRef<Path>,
83 92 ) -> Result<Option<Mmap>, HgError> {
84 93 self.mmap_open_gen(relative_path).map(|res| res.ok())
85 94 }
86 95
87 96 pub fn mmap_open(
88 97 &self,
89 98 relative_path: impl AsRef<Path>,
90 99 ) -> Result<Mmap, HgError> {
91 100 match self.mmap_open_gen(relative_path)? {
92 101 Err(FileNotFound(err, path)) => Err(err).when_reading_file(&path),
93 102 Ok(res) => Ok(res),
94 103 }
95 104 }
96 105
97 106 pub fn rename(
98 107 &self,
99 108 relative_from: impl AsRef<Path>,
100 109 relative_to: impl AsRef<Path>,
101 110 ) -> Result<(), HgError> {
102 111 let from = self.join(relative_from);
103 112 let to = self.join(relative_to);
104 113 std::fs::rename(&from, &to)
105 114 .with_context(|| IoErrorContext::RenamingFile { from, to })
106 115 }
107 116
108 117 pub fn remove_file(
109 118 &self,
110 119 relative_path: impl AsRef<Path>,
111 120 ) -> Result<(), HgError> {
112 121 let path = self.join(relative_path);
113 122 std::fs::remove_file(&path)
114 123 .with_context(|| IoErrorContext::RemovingFile(path))
115 124 }
116 125
117 126 #[cfg(unix)]
118 127 pub fn create_symlink(
119 128 &self,
120 129 relative_link_path: impl AsRef<Path>,
121 130 target_path: impl AsRef<Path>,
122 131 ) -> Result<(), HgError> {
123 132 let link_path = self.join(relative_link_path);
124 133 std::os::unix::fs::symlink(target_path, &link_path)
125 134 .when_writing_file(&link_path)
126 135 }
127 136
128 137 /// Write `contents` into a temporary file, then rename to `relative_path`.
129 138 /// This makes writing to a file "atomic": a reader opening that path will
130 139 /// see either the previous contents of the file or the complete new
131 140 /// content, never a partial write.
132 141 pub fn atomic_write(
133 142 &self,
134 143 relative_path: impl AsRef<Path>,
135 144 contents: &[u8],
136 145 ) -> Result<(), HgError> {
137 let mut tmp = tempfile::NamedTempFile::new_in(self.base)
138 .when_writing_file(self.base)?;
146 let mut tmp = tempfile::NamedTempFile::new_in(&self.base)
147 .when_writing_file(&self.base)?;
139 148 tmp.write_all(contents)
140 149 .and_then(|()| tmp.flush())
141 150 .when_writing_file(tmp.path())?;
142 151 let path = self.join(relative_path);
143 152 tmp.persist(&path)
144 153 .map_err(|e| e.error)
145 154 .when_writing_file(&path)?;
146 155 Ok(())
147 156 }
148 157 }
149 158
150 159 fn fs_metadata(
151 160 path: impl AsRef<Path>,
152 161 ) -> Result<Option<std::fs::Metadata>, HgError> {
153 162 let path = path.as_ref();
154 163 match std::fs::metadata(path) {
155 164 Ok(meta) => Ok(Some(meta)),
156 165 Err(error) => match error.kind() {
157 166 // TODO: when we require a Rust version where `NotADirectory` is
158 167 // stable, invert this logic and return None for it and `NotFound`
159 168 // and propagate any other error.
160 169 ErrorKind::PermissionDenied => Err(error).with_context(|| {
161 170 IoErrorContext::ReadingMetadata(path.to_owned())
162 171 }),
163 172 _ => Ok(None),
164 173 },
165 174 }
166 175 }
167 176
177 /// Writable file object that atomically updates a file
178 ///
179 /// All writes will go to a temporary copy of the original file. Call
180 /// [`Self::close`] when you are done writing, and [`Self`] will rename
181 /// the temporary copy to the original name, making the changes
182 /// visible. If the object is destroyed without being closed, all your
183 /// writes are discarded.
184 pub struct AtomicFile {
185 /// The temporary file to write to
186 fp: std::fs::File,
187 /// Path of the temp file
188 temp_path: PathBuf,
189 /// Used when stat'ing the file, is useful only if the target file is
190 /// guarded by any lock (e.g. repo.lock or repo.wlock).
191 check_ambig: bool,
192 /// Path of the target file
193 target_name: PathBuf,
194 /// Whether the file is open or not
195 is_open: bool,
196 }
197
198 impl AtomicFile {
199 pub fn new(
200 fp: std::fs::File,
201 check_ambig: bool,
202 temp_name: PathBuf,
203 target_name: PathBuf,
204 ) -> Self {
205 Self {
206 fp,
207 check_ambig,
208 temp_path: temp_name,
209 target_name,
210 is_open: true,
211 }
212 }
213
214 /// Write `buf` to the temporary file
215 pub fn write_all(&mut self, buf: &[u8]) -> Result<(), std::io::Error> {
216 self.fp.write_all(buf)
217 }
218
219 fn target(&self) -> PathBuf {
220 self.temp_path
221 .parent()
222 .expect("should not be at the filesystem root")
223 .join(&self.target_name)
224 }
225
226 /// Close the temporary file and rename to the target
227 pub fn close(mut self) -> Result<(), std::io::Error> {
228 self.fp.flush()?;
229 let target = self.target();
230 if self.check_ambig {
231 if let Ok(stat) = std::fs::metadata(&target) {
232 std::fs::rename(&self.temp_path, &target)?;
233 let new_stat = std::fs::metadata(&target)?;
234 let ctime = new_stat.ctime();
235 let is_ambiguous = ctime == stat.ctime();
236 if is_ambiguous {
237 let advanced =
238 filetime::FileTime::from_unix_time(ctime + 1, 0);
239 filetime::set_file_times(target, advanced, advanced)?;
240 }
241 } else {
242 std::fs::rename(&self.temp_path, target)?;
243 }
244 } else {
245 std::fs::rename(&self.temp_path, target).unwrap();
246 }
247 self.is_open = false;
248 Ok(())
249 }
250 }
251
252 impl Drop for AtomicFile {
253 fn drop(&mut self) {
254 if self.is_open {
255 std::fs::remove_file(self.target()).ok();
256 }
257 }
258 }
259
260 /// Abstracts over the VFS to allow for different implementations of the
261 /// filesystem layer (like passing one from Python).
262 pub trait Vfs: Sync + Send + DynClone {
263 fn open(&self, filename: &Path) -> Result<std::fs::File, HgError>;
264 fn open_read(&self, filename: &Path) -> Result<std::fs::File, HgError>;
265 fn open_check_ambig(
266 &self,
267 filename: &Path,
268 ) -> Result<std::fs::File, HgError>;
269 fn create(&self, filename: &Path) -> Result<std::fs::File, HgError>;
270 /// Must truncate the new file if exist
271 fn create_atomic(
272 &self,
273 filename: &Path,
274 check_ambig: bool,
275 ) -> Result<AtomicFile, HgError>;
276 fn file_size(&self, file: &File) -> Result<u64, HgError>;
277 fn exists(&self, filename: &Path) -> bool;
278 fn unlink(&self, filename: &Path) -> Result<(), HgError>;
279 fn rename(
280 &self,
281 from: &Path,
282 to: &Path,
283 check_ambig: bool,
284 ) -> Result<(), HgError>;
285 fn copy(&self, from: &Path, to: &Path) -> Result<(), HgError>;
286 }
287
288 /// These methods will need to be implemented once `rhg` (and other) non-Python
289 /// users of `hg-core` start doing more on their own, like writing to files.
290 impl Vfs for VfsImpl {
291 fn open(&self, _filename: &Path) -> Result<std::fs::File, HgError> {
292 todo!()
293 }
294 fn open_read(&self, filename: &Path) -> Result<std::fs::File, HgError> {
295 let path = self.base.join(filename);
296 std::fs::File::open(&path).when_reading_file(&path)
297 }
298 fn open_check_ambig(
299 &self,
300 _filename: &Path,
301 ) -> Result<std::fs::File, HgError> {
302 todo!()
303 }
304 fn create(&self, _filename: &Path) -> Result<std::fs::File, HgError> {
305 todo!()
306 }
307 fn create_atomic(
308 &self,
309 _filename: &Path,
310 _check_ambig: bool,
311 ) -> Result<AtomicFile, HgError> {
312 todo!()
313 }
314 fn file_size(&self, file: &File) -> Result<u64, HgError> {
315 Ok(file
316 .metadata()
317 .map_err(|e| {
318 HgError::abort(
319 format!("Could not get file metadata: {}", e),
320 exit_codes::ABORT,
321 None,
322 )
323 })?
324 .size())
325 }
326 fn exists(&self, _filename: &Path) -> bool {
327 todo!()
328 }
329 fn unlink(&self, _filename: &Path) -> Result<(), HgError> {
330 todo!()
331 }
332 fn rename(
333 &self,
334 _from: &Path,
335 _to: &Path,
336 _check_ambig: bool,
337 ) -> Result<(), HgError> {
338 todo!()
339 }
340 fn copy(&self, _from: &Path, _to: &Path) -> Result<(), HgError> {
341 todo!()
342 }
343 }
344
168 345 pub(crate) fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> {
169 346 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir()))
170 347 }
171 348
172 349 pub(crate) fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> {
173 350 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file()))
174 351 }
175 352
176 353 /// Returns whether the given `path` is on a network file system.
177 354 /// Taken from `cargo`'s codebase.
178 355 #[cfg(target_os = "linux")]
179 356 pub(crate) fn is_on_nfs_mount(path: impl AsRef<Path>) -> bool {
180 357 use std::ffi::CString;
181 358 use std::mem;
182 359 use std::os::unix::prelude::*;
183 360
184 361 let path = match CString::new(path.as_ref().as_os_str().as_bytes()) {
185 362 Ok(path) => path,
186 363 Err(_) => return false,
187 364 };
188 365
189 366 unsafe {
190 367 let mut buf: libc::statfs = mem::zeroed();
191 368 let r = libc::statfs(path.as_ptr(), &mut buf);
192 369
193 370 r == 0 && buf.f_type as u32 == libc::NFS_SUPER_MAGIC as u32
194 371 }
195 372 }
196 373
197 374 /// Similar to what Cargo does; although detecting NFS (or non-local
198 375 /// file systems) _should_ be possible on other operating systems,
199 376 /// we'll just assume that mmap() works there, for now; after all,
200 377 /// _some_ functionality is better than a compile error, i.e. none at
201 378 /// all
202 379 #[cfg(not(target_os = "linux"))]
203 380 pub(crate) fn is_on_nfs_mount(_path: impl AsRef<Path>) -> bool {
204 381 false
205 382 }
@@ -1,826 +1,826
1 1 // status.rs
2 2 //
3 3 // Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
4 4 //
5 5 // This software may be used and distributed according to the terms of the
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 use crate::error::CommandError;
9 9 use crate::ui::{
10 10 format_pattern_file_warning, print_narrow_sparse_warnings, relative_paths,
11 11 RelativePaths, Ui,
12 12 };
13 13 use crate::utils::path_utils::RelativizePaths;
14 14 use clap::Arg;
15 15 use format_bytes::format_bytes;
16 16 use hg::config::Config;
17 17 use hg::dirstate::has_exec_bit;
18 18 use hg::dirstate::status::StatusPath;
19 19 use hg::dirstate::TruncatedTimestamp;
20 20 use hg::errors::{HgError, IoResultExt};
21 21 use hg::filepatterns::parse_pattern_args;
22 22 use hg::lock::LockError;
23 23 use hg::manifest::Manifest;
24 24 use hg::matchers::{AlwaysMatcher, IntersectionMatcher};
25 25 use hg::repo::Repo;
26 26 use hg::utils::debug::debug_wait_for_file;
27 27 use hg::utils::files::{
28 28 get_bytes_from_os_str, get_bytes_from_os_string, get_path_from_bytes,
29 29 };
30 30 use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
31 31 use hg::Revision;
32 32 use hg::StatusError;
33 33 use hg::StatusOptions;
34 34 use hg::{self, narrow, sparse};
35 35 use hg::{DirstateStatus, RevlogOpenOptions};
36 36 use hg::{PatternFileWarning, RevlogType};
37 37 use log::info;
38 38 use rayon::prelude::*;
39 39 use std::borrow::Cow;
40 40 use std::io;
41 41 use std::mem::take;
42 42 use std::path::PathBuf;
43 43
44 44 pub const HELP_TEXT: &str = "
45 45 Show changed files in the working directory
46 46
47 47 This is a pure Rust version of `hg status`.
48 48
49 49 Some options might be missing, check the list below.
50 50 ";
51 51
52 52 pub fn args() -> clap::Command {
53 53 clap::command!("status")
54 54 .alias("st")
55 55 .about(HELP_TEXT)
56 56 .arg(
57 57 Arg::new("file")
58 58 .value_parser(clap::value_parser!(std::ffi::OsString))
59 59 .help("show only these files")
60 60 .action(clap::ArgAction::Append),
61 61 )
62 62 .arg(
63 63 Arg::new("all")
64 64 .help("show status of all files")
65 65 .short('A')
66 66 .action(clap::ArgAction::SetTrue)
67 67 .long("all"),
68 68 )
69 69 .arg(
70 70 Arg::new("modified")
71 71 .help("show only modified files")
72 72 .short('m')
73 73 .action(clap::ArgAction::SetTrue)
74 74 .long("modified"),
75 75 )
76 76 .arg(
77 77 Arg::new("added")
78 78 .help("show only added files")
79 79 .short('a')
80 80 .action(clap::ArgAction::SetTrue)
81 81 .long("added"),
82 82 )
83 83 .arg(
84 84 Arg::new("removed")
85 85 .help("show only removed files")
86 86 .short('r')
87 87 .action(clap::ArgAction::SetTrue)
88 88 .long("removed"),
89 89 )
90 90 .arg(
91 91 Arg::new("clean")
92 92 .help("show only clean files")
93 93 .short('c')
94 94 .action(clap::ArgAction::SetTrue)
95 95 .long("clean"),
96 96 )
97 97 .arg(
98 98 Arg::new("deleted")
99 99 .help("show only deleted files")
100 100 .short('d')
101 101 .action(clap::ArgAction::SetTrue)
102 102 .long("deleted"),
103 103 )
104 104 .arg(
105 105 Arg::new("unknown")
106 106 .help("show only unknown (not tracked) files")
107 107 .short('u')
108 108 .action(clap::ArgAction::SetTrue)
109 109 .long("unknown"),
110 110 )
111 111 .arg(
112 112 Arg::new("ignored")
113 113 .help("show only ignored files")
114 114 .short('i')
115 115 .action(clap::ArgAction::SetTrue)
116 116 .long("ignored"),
117 117 )
118 118 .arg(
119 119 Arg::new("copies")
120 120 .help("show source of copied files (DEFAULT: ui.statuscopies)")
121 121 .short('C')
122 122 .action(clap::ArgAction::SetTrue)
123 123 .long("copies"),
124 124 )
125 125 .arg(
126 126 Arg::new("print0")
127 127 .help("end filenames with NUL, for use with xargs")
128 128 .short('0')
129 129 .action(clap::ArgAction::SetTrue)
130 130 .long("print0"),
131 131 )
132 132 .arg(
133 133 Arg::new("no-status")
134 134 .help("hide status prefix")
135 135 .short('n')
136 136 .action(clap::ArgAction::SetTrue)
137 137 .long("no-status"),
138 138 )
139 139 .arg(
140 140 Arg::new("verbose")
141 141 .help("enable additional output")
142 142 .short('v')
143 143 .action(clap::ArgAction::SetTrue)
144 144 .long("verbose"),
145 145 )
146 146 .arg(
147 147 Arg::new("rev")
148 148 .help("show difference from/to revision")
149 149 .long("rev")
150 150 .num_args(1)
151 151 .action(clap::ArgAction::Append)
152 152 .value_name("REV"),
153 153 )
154 154 }
155 155
156 156 fn parse_revpair(
157 157 repo: &Repo,
158 158 revs: Option<Vec<String>>,
159 159 ) -> Result<Option<(Revision, Revision)>, CommandError> {
160 160 let revs = match revs {
161 161 None => return Ok(None),
162 162 Some(revs) => revs,
163 163 };
164 164 if revs.is_empty() {
165 165 return Ok(None);
166 166 }
167 167 if revs.len() != 2 {
168 168 return Err(CommandError::unsupported("expected 0 or 2 --rev flags"));
169 169 }
170 170
171 171 let rev1 = &revs[0];
172 172 let rev2 = &revs[1];
173 173 let rev1 = hg::revset::resolve_single(rev1, repo)
174 174 .map_err(|e| (e, rev1.as_str()))?;
175 175 let rev2 = hg::revset::resolve_single(rev2, repo)
176 176 .map_err(|e| (e, rev2.as_str()))?;
177 177 Ok(Some((rev1, rev2)))
178 178 }
179 179
180 180 /// Pure data type allowing the caller to specify file states to display
181 181 #[derive(Copy, Clone, Debug)]
182 182 pub struct DisplayStates {
183 183 pub modified: bool,
184 184 pub added: bool,
185 185 pub removed: bool,
186 186 pub clean: bool,
187 187 pub deleted: bool,
188 188 pub unknown: bool,
189 189 pub ignored: bool,
190 190 }
191 191
192 192 pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
193 193 modified: true,
194 194 added: true,
195 195 removed: true,
196 196 clean: false,
197 197 deleted: true,
198 198 unknown: true,
199 199 ignored: false,
200 200 };
201 201
202 202 pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
203 203 modified: true,
204 204 added: true,
205 205 removed: true,
206 206 clean: true,
207 207 deleted: true,
208 208 unknown: true,
209 209 ignored: true,
210 210 };
211 211
212 212 impl DisplayStates {
213 213 pub fn is_empty(&self) -> bool {
214 214 !(self.modified
215 215 || self.added
216 216 || self.removed
217 217 || self.clean
218 218 || self.deleted
219 219 || self.unknown
220 220 || self.ignored)
221 221 }
222 222 }
223 223
224 224 fn has_unfinished_merge(repo: &Repo) -> Result<bool, CommandError> {
225 225 Ok(repo.dirstate_parents()?.is_merge())
226 226 }
227 227
228 228 fn has_unfinished_state(repo: &Repo) -> Result<bool, CommandError> {
229 229 // These are all the known values for the [fname] argument of
230 230 // [addunfinished] function in [state.py]
231 231 let known_state_files: &[&str] = &[
232 232 "bisect.state",
233 233 "graftstate",
234 234 "histedit-state",
235 235 "rebasestate",
236 236 "shelvedstate",
237 237 "transplant/journal",
238 238 "updatestate",
239 239 ];
240 240 if has_unfinished_merge(repo)? {
241 241 return Ok(true);
242 242 };
243 243 for f in known_state_files {
244 244 if repo.hg_vfs().join(f).exists() {
245 245 return Ok(true);
246 246 }
247 247 }
248 248 Ok(false)
249 249 }
250 250
251 251 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
252 252 // TODO: lift these limitations
253 253 if invocation
254 254 .config
255 255 .get(b"commands", b"status.terse")
256 256 .is_some()
257 257 {
258 258 return Err(CommandError::unsupported(
259 259 "status.terse is not yet supported with rhg status",
260 260 ));
261 261 }
262 262
263 263 let ui = invocation.ui;
264 264 let config = invocation.config;
265 265 let args = invocation.subcommand_args;
266 266
267 267 let revs = args.get_many::<String>("rev");
268 268 let print0 = args.get_flag("print0");
269 269 let verbose = args.get_flag("verbose")
270 270 || config.get_bool(b"ui", b"verbose")?
271 271 || config.get_bool(b"commands", b"status.verbose")?;
272 272 let verbose = verbose && !print0;
273 273
274 274 let all = args.get_flag("all");
275 275 let display_states = if all {
276 276 // TODO when implementing `--quiet`: it excludes clean files
277 277 // from `--all`
278 278 ALL_DISPLAY_STATES
279 279 } else {
280 280 let requested = DisplayStates {
281 281 modified: args.get_flag("modified"),
282 282 added: args.get_flag("added"),
283 283 removed: args.get_flag("removed"),
284 284 clean: args.get_flag("clean"),
285 285 deleted: args.get_flag("deleted"),
286 286 unknown: args.get_flag("unknown"),
287 287 ignored: args.get_flag("ignored"),
288 288 };
289 289 if requested.is_empty() {
290 290 DEFAULT_DISPLAY_STATES
291 291 } else {
292 292 requested
293 293 }
294 294 };
295 295 let no_status = args.get_flag("no-status");
296 296 let list_copies = all
297 297 || args.get_flag("copies")
298 298 || config.get_bool(b"ui", b"statuscopies")?;
299 299
300 300 let repo = invocation.repo?;
301 301 let revpair = parse_revpair(repo, revs.map(|i| i.cloned().collect()))?;
302 302
303 303 if verbose && has_unfinished_state(repo)? {
304 304 return Err(CommandError::unsupported(
305 305 "verbose status output is not supported by rhg (and is needed because we're in an unfinished operation)",
306 306 ));
307 307 }
308 308
309 309 let mut dmap = repo.dirstate_map_mut()?;
310 310
311 311 let check_exec = hg::checkexec::check_exec(repo.working_directory_path());
312 312
313 313 let options = StatusOptions {
314 314 check_exec,
315 315 list_clean: display_states.clean,
316 316 list_unknown: display_states.unknown,
317 317 list_ignored: display_states.ignored,
318 318 list_copies,
319 319 collect_traversed_dirs: false,
320 320 };
321 321
322 322 type StatusResult<'a> =
323 323 Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
324 324
325 325 let relative_status = config
326 326 .get_option(b"commands", b"status.relative")?
327 327 .expect("commands.status.relative should have a default value");
328 328
329 329 let relativize_paths = relative_status || {
330 330 // See in Python code with `getuipathfn` usage in `commands.py`.
331 331 let legacy_relative_behavior = args.contains_id("file");
332 332 match relative_paths(invocation.config)? {
333 333 RelativePaths::Legacy => legacy_relative_behavior,
334 334 RelativePaths::Bool(v) => v,
335 335 }
336 336 };
337 337
338 338 let mut output = DisplayStatusPaths {
339 339 ui,
340 340 no_status,
341 341 relativize: if relativize_paths {
342 342 Some(RelativizePaths::new(repo)?)
343 343 } else {
344 344 None
345 345 },
346 346 print0,
347 347 };
348 348
349 349 let after_status = |res: StatusResult| -> Result<_, CommandError> {
350 350 let (mut ds_status, pattern_warnings) = res?;
351 351 for warning in pattern_warnings {
352 352 ui.write_stderr(&format_pattern_file_warning(&warning, repo))?;
353 353 }
354 354
355 355 for (path, error) in take(&mut ds_status.bad) {
356 356 let error = match error {
357 357 hg::BadMatch::OsError(code) => {
358 358 std::io::Error::from_raw_os_error(code).to_string()
359 359 }
360 360 hg::BadMatch::BadType(ty) => {
361 361 format!("unsupported file type (type is {})", ty)
362 362 }
363 363 };
364 364 ui.write_stderr(&format_bytes!(
365 365 b"{}: {}\n",
366 366 path.as_bytes(),
367 367 error.as_bytes()
368 368 ))?
369 369 }
370 370 if !ds_status.unsure.is_empty() {
371 371 info!(
372 372 "Files to be rechecked by retrieval from filelog: {:?}",
373 373 ds_status.unsure.iter().map(|s| &s.path).collect::<Vec<_>>()
374 374 );
375 375 }
376 376 let mut fixup = Vec::new();
377 377 if !ds_status.unsure.is_empty()
378 378 && (display_states.modified || display_states.clean)
379 379 {
380 380 let p1 = repo.dirstate_parents()?.p1;
381 381 let manifest = repo.manifest_for_node(p1).map_err(|e| {
382 382 CommandError::from((e, &*format!("{:x}", p1.short())))
383 383 })?;
384 384 let working_directory_vfs = repo.working_directory_vfs();
385 385 let store_vfs = repo.store_vfs();
386 386 let revlog_open_options =
387 387 repo.default_revlog_options(RevlogType::Manifestlog)?;
388 388 let res: Vec<_> = take(&mut ds_status.unsure)
389 389 .into_par_iter()
390 390 .map(|to_check| {
391 391 // The compiler seems to get a bit confused with complex
392 392 // inference when using a parallel iterator + map
393 393 // + map_err + collect, so let's just inline some of the
394 394 // logic.
395 395 match unsure_is_modified(
396 working_directory_vfs,
397 store_vfs,
396 &working_directory_vfs,
397 &store_vfs,
398 398 check_exec,
399 399 &manifest,
400 400 &to_check.path,
401 401 revlog_open_options,
402 402 ) {
403 403 Err(HgError::IoError { .. }) => {
404 404 // IO errors most likely stem from the file being
405 405 // deleted even though we know it's in the
406 406 // dirstate.
407 407 Ok((to_check, UnsureOutcome::Deleted))
408 408 }
409 409 Ok(outcome) => Ok((to_check, outcome)),
410 410 Err(e) => Err(e),
411 411 }
412 412 })
413 413 .collect::<Result<_, _>>()?;
414 414 for (status_path, outcome) in res.into_iter() {
415 415 match outcome {
416 416 UnsureOutcome::Clean => {
417 417 if display_states.clean {
418 418 ds_status.clean.push(status_path.clone());
419 419 }
420 420 fixup.push(status_path.path.into_owned())
421 421 }
422 422 UnsureOutcome::Modified => {
423 423 if display_states.modified {
424 424 ds_status.modified.push(status_path);
425 425 }
426 426 }
427 427 UnsureOutcome::Deleted => {
428 428 if display_states.deleted {
429 429 ds_status.deleted.push(status_path);
430 430 }
431 431 }
432 432 }
433 433 }
434 434 }
435 435
436 436 let dirstate_write_needed = ds_status.dirty;
437 437 let filesystem_time_at_status_start =
438 438 ds_status.filesystem_time_at_status_start;
439 439
440 440 output.output(display_states, ds_status)?;
441 441
442 442 Ok((
443 443 fixup,
444 444 dirstate_write_needed,
445 445 filesystem_time_at_status_start,
446 446 ))
447 447 };
448 448 let (narrow_matcher, narrow_warnings) = narrow::matcher(repo)?;
449 449
450 450 if let Some((rev1, rev2)) = revpair {
451 451 let mut ds_status = DirstateStatus::default();
452 452 if list_copies {
453 453 return Err(CommandError::unsupported(
454 454 "status --rev --rev with copy information is not implemented yet",
455 455 ));
456 456 }
457 457
458 458 let stat = hg::operations::status_rev_rev_no_copies(
459 459 repo,
460 460 rev1,
461 461 rev2,
462 462 narrow_matcher,
463 463 )?;
464 464 for entry in stat.iter() {
465 465 let (path, status) = entry?;
466 466 let path = StatusPath {
467 467 path: Cow::Borrowed(path),
468 468 copy_source: None,
469 469 };
470 470 match status {
471 471 hg::operations::DiffStatus::Removed => {
472 472 if display_states.removed {
473 473 ds_status.removed.push(path)
474 474 }
475 475 }
476 476 hg::operations::DiffStatus::Added => {
477 477 if display_states.added {
478 478 ds_status.added.push(path)
479 479 }
480 480 }
481 481 hg::operations::DiffStatus::Modified => {
482 482 if display_states.modified {
483 483 ds_status.modified.push(path)
484 484 }
485 485 }
486 486 hg::operations::DiffStatus::Matching => {
487 487 if display_states.clean {
488 488 ds_status.clean.push(path)
489 489 }
490 490 }
491 491 }
492 492 }
493 493 output.output(display_states, ds_status)?;
494 494 return Ok(());
495 495 }
496 496
497 497 let (sparse_matcher, sparse_warnings) = sparse::matcher(repo)?;
498 498 let matcher = match (repo.has_narrow(), repo.has_sparse()) {
499 499 (true, true) => {
500 500 Box::new(IntersectionMatcher::new(narrow_matcher, sparse_matcher))
501 501 }
502 502 (true, false) => narrow_matcher,
503 503 (false, true) => sparse_matcher,
504 504 (false, false) => Box::new(AlwaysMatcher),
505 505 };
506 506 let matcher = match args.get_many::<std::ffi::OsString>("file") {
507 507 None => matcher,
508 508 Some(files) => {
509 509 let patterns: Vec<Vec<u8>> = files
510 510 .filter(|s| !s.is_empty())
511 511 .map(get_bytes_from_os_str)
512 512 .collect();
513 513 for file in &patterns {
514 514 if file.starts_with(b"set:") {
515 515 return Err(CommandError::unsupported("fileset"));
516 516 }
517 517 }
518 518 let cwd = hg::utils::current_dir()?;
519 519 let root = repo.working_directory_path();
520 520 let ignore_patterns = parse_pattern_args(patterns, &cwd, root)?;
521 521 let files_matcher =
522 522 hg::matchers::PatternMatcher::new(ignore_patterns)?;
523 523 Box::new(IntersectionMatcher::new(
524 524 Box::new(files_matcher),
525 525 matcher,
526 526 ))
527 527 }
528 528 };
529 529
530 530 print_narrow_sparse_warnings(
531 531 &narrow_warnings,
532 532 &sparse_warnings,
533 533 ui,
534 534 repo,
535 535 )?;
536 536 let (fixup, mut dirstate_write_needed, filesystem_time_at_status_start) =
537 537 dmap.with_status(
538 538 matcher.as_ref(),
539 539 repo.working_directory_path().to_owned(),
540 540 ignore_files(repo, config),
541 541 options,
542 542 after_status,
543 543 )?;
544 544
545 545 // Development config option to test write races
546 546 if let Err(e) =
547 547 debug_wait_for_file(config, "status.pre-dirstate-write-file")
548 548 {
549 549 ui.write_stderr(e.as_bytes()).ok();
550 550 }
551 551
552 552 if (fixup.is_empty() || filesystem_time_at_status_start.is_none())
553 553 && !dirstate_write_needed
554 554 {
555 555 // Nothing to update
556 556 return Ok(());
557 557 }
558 558
559 559 // Update the dirstate on disk if we can
560 560 let with_lock_result =
561 561 repo.try_with_wlock_no_wait(|| -> Result<(), CommandError> {
562 562 if let Some(mtime_boundary) = filesystem_time_at_status_start {
563 563 for hg_path in fixup {
564 564 use std::os::unix::fs::MetadataExt;
565 565 let fs_path = hg_path_to_path_buf(&hg_path)
566 566 .expect("HgPath conversion");
567 567 // Specifically do not reuse `fs_metadata` from
568 568 // `unsure_is_clean` which was needed before reading
569 569 // contents. Here we access metadata again after reading
570 570 // content, in case it changed in the meantime.
571 571 let metadata_res = repo
572 572 .working_directory_vfs()
573 573 .symlink_metadata(&fs_path);
574 574 let fs_metadata = match metadata_res {
575 575 Ok(meta) => meta,
576 576 Err(err) => match err {
577 577 HgError::IoError { .. } => {
578 578 // The file has probably been deleted. In any
579 579 // case, it was in the dirstate before, so
580 580 // let's ignore the error.
581 581 continue;
582 582 }
583 583 _ => return Err(err.into()),
584 584 },
585 585 };
586 586 if let Some(mtime) =
587 587 TruncatedTimestamp::for_reliable_mtime_of(
588 588 &fs_metadata,
589 589 &mtime_boundary,
590 590 )
591 591 .when_reading_file(&fs_path)?
592 592 {
593 593 let mode = fs_metadata.mode();
594 594 let size = fs_metadata.len();
595 595 dmap.set_clean(&hg_path, mode, size as u32, mtime)?;
596 596 dirstate_write_needed = true
597 597 }
598 598 }
599 599 }
600 600 drop(dmap); // Avoid "already mutably borrowed" RefCell panics
601 601 if dirstate_write_needed {
602 602 repo.write_dirstate()?
603 603 }
604 604 Ok(())
605 605 });
606 606 match with_lock_result {
607 607 Ok(closure_result) => closure_result?,
608 608 Err(LockError::AlreadyHeld) => {
609 609 // Not updating the dirstate is not ideal but not critical:
610 610 // don’t keep our caller waiting until some other Mercurial
611 611 // process releases the lock.
612 612 log::info!("not writing dirstate from `status`: lock is held")
613 613 }
614 614 Err(LockError::Other(HgError::IoError { error, .. }))
615 615 if error.kind() == io::ErrorKind::PermissionDenied
616 616 || match error.raw_os_error() {
617 617 None => false,
618 618 Some(errno) => libc::EROFS == errno,
619 619 } =>
620 620 {
621 621 // `hg status` on a read-only repository is fine
622 622 }
623 623 Err(LockError::Other(error)) => {
624 624 // Report other I/O errors
625 625 Err(error)?
626 626 }
627 627 }
628 628 Ok(())
629 629 }
630 630
631 631 fn ignore_files(repo: &Repo, config: &Config) -> Vec<PathBuf> {
632 632 let mut ignore_files = Vec::new();
633 633 let repo_ignore = repo.working_directory_vfs().join(".hgignore");
634 634 if repo_ignore.exists() {
635 635 ignore_files.push(repo_ignore)
636 636 }
637 637 for (key, value) in config.iter_section(b"ui") {
638 638 if key == b"ignore" || key.starts_with(b"ignore.") {
639 639 let path = get_path_from_bytes(value);
640 640 let path = shellexpand::path::full_with_context_no_errors(
641 641 path,
642 642 home::home_dir,
643 643 |s| std::env::var(s).ok(),
644 644 );
645 645 let joined = repo.working_directory_path().join(path);
646 646 ignore_files.push(joined);
647 647 }
648 648 }
649 649 ignore_files
650 650 }
651 651
652 652 struct DisplayStatusPaths<'a> {
653 653 ui: &'a Ui,
654 654 no_status: bool,
655 655 relativize: Option<RelativizePaths>,
656 656 print0: bool,
657 657 }
658 658
659 659 impl DisplayStatusPaths<'_> {
660 660 // Probably more elegant to use a Deref or Borrow trait rather than
661 661 // harcode HgPathBuf, but probably not really useful at this point
662 662 fn display(
663 663 &self,
664 664 status_prefix: &[u8],
665 665 label: &'static str,
666 666 mut paths: Vec<StatusPath<'_>>,
667 667 ) -> Result<(), CommandError> {
668 668 paths.sort_unstable();
669 669 // TODO: get the stdout lock once for the whole loop
670 670 // instead of in each write
671 671 for StatusPath { path, copy_source } in paths {
672 672 let relative_path;
673 673 let relative_source;
674 674 let (path, copy_source) = if let Some(relativize) =
675 675 &self.relativize
676 676 {
677 677 relative_path = relativize.relativize(&path);
678 678 relative_source =
679 679 copy_source.as_ref().map(|s| relativize.relativize(s));
680 680 (&*relative_path, relative_source.as_deref())
681 681 } else {
682 682 (path.as_bytes(), copy_source.as_ref().map(|s| s.as_bytes()))
683 683 };
684 684 // TODO: Add a way to use `write_bytes!` instead of `format_bytes!`
685 685 // in order to stream to stdout instead of allocating an
686 686 // itermediate `Vec<u8>`.
687 687 if !self.no_status {
688 688 self.ui.write_stdout_labelled(status_prefix, label)?
689 689 }
690 690 let linebreak = if self.print0 { b"\x00" } else { b"\n" };
691 691 self.ui.write_stdout_labelled(
692 692 &format_bytes!(b"{}{}", path, linebreak),
693 693 label,
694 694 )?;
695 695 if let Some(source) = copy_source.filter(|_| !self.no_status) {
696 696 let label = "status.copied";
697 697 self.ui.write_stdout_labelled(
698 698 &format_bytes!(b" {}{}", source, linebreak),
699 699 label,
700 700 )?
701 701 }
702 702 }
703 703 Ok(())
704 704 }
705 705
706 706 fn output(
707 707 &mut self,
708 708 display_states: DisplayStates,
709 709 ds_status: DirstateStatus,
710 710 ) -> Result<(), CommandError> {
711 711 if display_states.modified {
712 712 self.display(b"M ", "status.modified", ds_status.modified)?;
713 713 }
714 714 if display_states.added {
715 715 self.display(b"A ", "status.added", ds_status.added)?;
716 716 }
717 717 if display_states.removed {
718 718 self.display(b"R ", "status.removed", ds_status.removed)?;
719 719 }
720 720 if display_states.deleted {
721 721 self.display(b"! ", "status.deleted", ds_status.deleted)?;
722 722 }
723 723 if display_states.unknown {
724 724 self.display(b"? ", "status.unknown", ds_status.unknown)?;
725 725 }
726 726 if display_states.ignored {
727 727 self.display(b"I ", "status.ignored", ds_status.ignored)?;
728 728 }
729 729 if display_states.clean {
730 730 self.display(b"C ", "status.clean", ds_status.clean)?;
731 731 }
732 732 Ok(())
733 733 }
734 734 }
735 735
736 736 /// Outcome of the additional check for an ambiguous tracked file
737 737 enum UnsureOutcome {
738 738 /// The file is actually clean
739 739 Clean,
740 740 /// The file has been modified
741 741 Modified,
742 742 /// The file was deleted on disk (or became another type of fs entry)
743 743 Deleted,
744 744 }
745 745
746 746 /// Check if a file is modified by comparing actual repo store and file system.
747 747 ///
748 748 /// This meant to be used for those that the dirstate cannot resolve, due
749 749 /// to time resolution limits.
750 750 fn unsure_is_modified(
751 working_directory_vfs: hg::vfs::Vfs,
752 store_vfs: hg::vfs::Vfs,
751 working_directory_vfs: &hg::vfs::VfsImpl,
752 store_vfs: &hg::vfs::VfsImpl,
753 753 check_exec: bool,
754 754 manifest: &Manifest,
755 755 hg_path: &HgPath,
756 756 revlog_open_options: RevlogOpenOptions,
757 757 ) -> Result<UnsureOutcome, HgError> {
758 758 let vfs = working_directory_vfs;
759 759 let fs_path = hg_path_to_path_buf(hg_path).expect("HgPath conversion");
760 760 let fs_metadata = vfs.symlink_metadata(&fs_path)?;
761 761 let is_symlink = fs_metadata.file_type().is_symlink();
762 762
763 763 let entry = manifest
764 764 .find_by_path(hg_path)?
765 765 .expect("ambgious file not in p1");
766 766
767 767 // TODO: Also account for `FALLBACK_SYMLINK` and `FALLBACK_EXEC` from the
768 768 // dirstate
769 769 let fs_flags = if is_symlink {
770 770 Some(b'l')
771 771 } else if check_exec && has_exec_bit(&fs_metadata) {
772 772 Some(b'x')
773 773 } else {
774 774 None
775 775 };
776 776
777 777 let entry_flags = if check_exec {
778 778 entry.flags
779 779 } else if entry.flags == Some(b'x') {
780 780 None
781 781 } else {
782 782 entry.flags
783 783 };
784 784
785 785 if entry_flags != fs_flags {
786 786 return Ok(UnsureOutcome::Modified);
787 787 }
788 788 let filelog = hg::filelog::Filelog::open_vfs(
789 &store_vfs,
789 store_vfs,
790 790 hg_path,
791 791 revlog_open_options,
792 792 )?;
793 793 let fs_len = fs_metadata.len();
794 794 let file_node = entry.node_id()?;
795 795 let filelog_entry = filelog.entry_for_node(file_node).map_err(|_| {
796 796 HgError::corrupted(format!(
797 797 "filelog {:?} missing node {:?} from manifest",
798 798 hg_path, file_node
799 799 ))
800 800 })?;
801 801 if filelog_entry.file_data_len_not_equal_to(fs_len) {
802 802 // No need to read file contents:
803 803 // it cannot be equal if it has a different length.
804 804 return Ok(UnsureOutcome::Modified);
805 805 }
806 806
807 807 let p1_filelog_data = filelog_entry.data()?;
808 808 let p1_contents = p1_filelog_data.file_data()?;
809 809 if p1_contents.len() as u64 != fs_len {
810 810 // No need to read file contents:
811 811 // it cannot be equal if it has a different length.
812 812 return Ok(UnsureOutcome::Modified);
813 813 }
814 814
815 815 let fs_contents = if is_symlink {
816 816 get_bytes_from_os_string(vfs.read_link(fs_path)?.into_os_string())
817 817 } else {
818 818 vfs.read(fs_path)?
819 819 };
820 820
821 821 Ok(if p1_contents != &*fs_contents {
822 822 UnsureOutcome::Modified
823 823 } else {
824 824 UnsureOutcome::Clean
825 825 })
826 826 }
General Comments 0
You need to be logged in to leave comments. Login now