Show More
@@ -1,1000 +1,1000 b'' | |||
|
1 | 1 | # __init__.py - fsmonitor initialization and overrides |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013-2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | Integrates the file-watching program Watchman with Mercurial to produce faster |
|
11 | 11 | status results. |
|
12 | 12 | |
|
13 | 13 | On a particular Linux system, for a real-world repository with over 400,000 |
|
14 | 14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same |
|
15 | 15 | system, with fsmonitor it takes about 0.3 seconds. |
|
16 | 16 | |
|
17 | 17 | fsmonitor requires no configuration -- it will tell Watchman about your |
|
18 | 18 | repository as necessary. You'll need to install Watchman from |
|
19 | 19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. |
|
20 | 20 | |
|
21 | 21 | fsmonitor is incompatible with the largefiles and eol extensions, and |
|
22 | 22 | will disable itself if any of those are active. |
|
23 | 23 | |
|
24 | 24 | The following configuration options exist: |
|
25 | 25 | |
|
26 | 26 | :: |
|
27 | 27 | |
|
28 | 28 | [fsmonitor] |
|
29 | 29 | mode = {off, on, paranoid} |
|
30 | 30 | |
|
31 | 31 | When `mode = off`, fsmonitor will disable itself (similar to not loading the |
|
32 | 32 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). |
|
33 | 33 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, |
|
34 | 34 | and ensure that the results are consistent. |
|
35 | 35 | |
|
36 | 36 | :: |
|
37 | 37 | |
|
38 | 38 | [fsmonitor] |
|
39 | 39 | timeout = (float) |
|
40 | 40 | |
|
41 | 41 | A value, in seconds, that determines how long fsmonitor will wait for Watchman |
|
42 | 42 | to return results. Defaults to `2.0`. |
|
43 | 43 | |
|
44 | 44 | :: |
|
45 | 45 | |
|
46 | 46 | [fsmonitor] |
|
47 | 47 | blacklistusers = (list of userids) |
|
48 | 48 | |
|
49 | 49 | A list of usernames for which fsmonitor will disable itself altogether. |
|
50 | 50 | |
|
51 | 51 | :: |
|
52 | 52 | |
|
53 | 53 | [fsmonitor] |
|
54 | 54 | walk_on_invalidate = (boolean) |
|
55 | 55 | |
|
56 | 56 | Whether or not to walk the whole repo ourselves when our cached state has been |
|
57 | 57 | invalidated, for example when Watchman has been restarted or .hgignore rules |
|
58 | 58 | have been changed. Walking the repo in that case can result in competing for |
|
59 | 59 | I/O with Watchman. For large repos it is recommended to set this value to |
|
60 | 60 | false. You may wish to set this to true if you have a very fast filesystem |
|
61 | 61 | that can outpace the IPC overhead of getting the result data for the full repo |
|
62 | 62 | from Watchman. Defaults to false. |
|
63 | 63 | |
|
64 | 64 | :: |
|
65 | 65 | |
|
66 | 66 | [fsmonitor] |
|
67 | 67 | warn_when_unused = (boolean) |
|
68 | 68 | |
|
69 | 69 | Whether to print a warning during certain operations when fsmonitor would be |
|
70 | 70 | beneficial to performance but isn't enabled. |
|
71 | 71 | |
|
72 | 72 | :: |
|
73 | 73 | |
|
74 | 74 | [fsmonitor] |
|
75 | 75 | warn_update_file_count = (integer) |
|
76 | 76 | # or when mercurial is built with rust support |
|
77 | 77 | warn_update_file_count_rust = (integer) |
|
78 | 78 | |
|
79 | 79 | If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will |
|
80 | 80 | be printed during working directory updates if this many files will be |
|
81 | 81 | created. |
|
82 | 82 | ''' |
|
83 | 83 | |
|
84 | 84 | # Platforms Supported |
|
85 | 85 | # =================== |
|
86 | 86 | # |
|
87 | 87 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, |
|
88 | 88 | # even under severe loads. |
|
89 | 89 | # |
|
90 | 90 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor |
|
91 | 91 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of |
|
92 | 92 | # user testing under normal loads. |
|
93 | 93 | # |
|
94 | 94 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but |
|
95 | 95 | # very little testing has been done. |
|
96 | 96 | # |
|
97 | 97 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. |
|
98 | 98 | # |
|
99 | 99 | # Known Issues |
|
100 | 100 | # ============ |
|
101 | 101 | # |
|
102 | 102 | # * fsmonitor will disable itself if any of the following extensions are |
|
103 | 103 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. |
|
104 | 104 | # * fsmonitor will produce incorrect results if nested repos that are not |
|
105 | 105 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. |
|
106 | 106 | # |
|
107 | 107 | # The issues related to nested repos and subrepos are probably not fundamental |
|
108 | 108 | # ones. Patches to fix them are welcome. |
|
109 | 109 | |
|
110 | 110 | |
|
111 | 111 | import codecs |
|
112 | 112 | import os |
|
113 | 113 | import stat |
|
114 | 114 | import sys |
|
115 | 115 | import tempfile |
|
116 | 116 | import weakref |
|
117 | 117 | |
|
118 | 118 | from mercurial.i18n import _ |
|
119 | 119 | from mercurial.node import hex |
|
120 | 120 | from mercurial.pycompat import open |
|
121 | 121 | from mercurial import ( |
|
122 | 122 | context, |
|
123 | 123 | encoding, |
|
124 | 124 | error, |
|
125 | 125 | extensions, |
|
126 | 126 | localrepo, |
|
127 | 127 | merge, |
|
128 | 128 | pathutil, |
|
129 | 129 | pycompat, |
|
130 | 130 | registrar, |
|
131 | 131 | scmutil, |
|
132 | 132 | util, |
|
133 | 133 | ) |
|
134 | 134 | from mercurial import match as matchmod |
|
135 | 135 | from mercurial.utils import ( |
|
136 | 136 | hashutil, |
|
137 | 137 | stringutil, |
|
138 | 138 | ) |
|
139 | 139 | |
|
140 | 140 | from . import ( |
|
141 | 141 | pywatchman, |
|
142 | 142 | state, |
|
143 | 143 | watchmanclient, |
|
144 | 144 | ) |
|
145 | 145 | |
|
146 | 146 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
147 | 147 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
148 | 148 | # be specifying the version(s) of Mercurial they are tested with, or |
|
149 | 149 | # leave the attribute unspecified. |
|
150 | 150 | testedwith = b'ships-with-hg-core' |
|
151 | 151 | |
|
152 | 152 | configtable = {} |
|
153 | 153 | configitem = registrar.configitem(configtable) |
|
154 | 154 | |
|
155 | 155 | configitem( |
|
156 | 156 | b'fsmonitor', |
|
157 | 157 | b'mode', |
|
158 | 158 | default=b'on', |
|
159 | 159 | ) |
|
160 | 160 | configitem( |
|
161 | 161 | b'fsmonitor', |
|
162 | 162 | b'walk_on_invalidate', |
|
163 | 163 | default=False, |
|
164 | 164 | ) |
|
165 | 165 | configitem( |
|
166 | 166 | b'fsmonitor', |
|
167 | 167 | b'timeout', |
|
168 | 168 | default=b'2', |
|
169 | 169 | ) |
|
170 | 170 | configitem( |
|
171 | 171 | b'fsmonitor', |
|
172 | 172 | b'blacklistusers', |
|
173 | 173 | default=list, |
|
174 | 174 | ) |
|
175 | 175 | configitem( |
|
176 | 176 | b'fsmonitor', |
|
177 | 177 | b'watchman_exe', |
|
178 | 178 | default=b'watchman', |
|
179 | 179 | ) |
|
180 | 180 | configitem( |
|
181 | 181 | b'fsmonitor', |
|
182 | 182 | b'verbose', |
|
183 | 183 | default=True, |
|
184 | 184 | experimental=True, |
|
185 | 185 | ) |
|
186 | 186 | configitem( |
|
187 | 187 | b'experimental', |
|
188 | 188 | b'fsmonitor.transaction_notify', |
|
189 | 189 | default=False, |
|
190 | 190 | ) |
|
191 | 191 | |
|
192 | 192 | # This extension is incompatible with the following blacklisted extensions |
|
193 | 193 | # and will disable itself when encountering one of these: |
|
194 | 194 | _blacklist = [b'largefiles', b'eol'] |
|
195 | 195 | |
|
196 | 196 | |
|
197 | 197 | def debuginstall(ui, fm): |
|
198 | 198 | fm.write( |
|
199 | 199 | b"fsmonitor-watchman", |
|
200 | 200 | _(b"fsmonitor checking for watchman binary... (%s)\n"), |
|
201 | 201 | ui.configpath(b"fsmonitor", b"watchman_exe"), |
|
202 | 202 | ) |
|
203 | 203 | root = tempfile.mkdtemp() |
|
204 | 204 | c = watchmanclient.client(ui, root) |
|
205 | 205 | err = None |
|
206 | 206 | try: |
|
207 | 207 | v = c.command(b"version") |
|
208 | 208 | fm.write( |
|
209 | 209 | b"fsmonitor-watchman-version", |
|
210 | 210 | _(b" watchman binary version %s\n"), |
|
211 | 211 | pycompat.bytestr(v["version"]), |
|
212 | 212 | ) |
|
213 | 213 | except watchmanclient.Unavailable as e: |
|
214 | 214 | err = stringutil.forcebytestr(e) |
|
215 | 215 | fm.condwrite( |
|
216 | 216 | err, |
|
217 | 217 | b"fsmonitor-watchman-error", |
|
218 | 218 | _(b" watchman binary missing or broken: %s\n"), |
|
219 | 219 | err, |
|
220 | 220 | ) |
|
221 | 221 | return 1 if err else 0 |
|
222 | 222 | |
|
223 | 223 | |
|
224 | 224 | def _handleunavailable(ui, state, ex): |
|
225 | 225 | """Exception handler for Watchman interaction exceptions""" |
|
226 | 226 | if isinstance(ex, watchmanclient.Unavailable): |
|
227 | 227 | # experimental config: fsmonitor.verbose |
|
228 | 228 | if ex.warn and ui.configbool(b'fsmonitor', b'verbose'): |
|
229 | 229 | if b'illegal_fstypes' not in stringutil.forcebytestr(ex): |
|
230 | 230 | ui.warn(stringutil.forcebytestr(ex) + b'\n') |
|
231 | 231 | if ex.invalidate: |
|
232 | 232 | state.invalidate() |
|
233 | 233 | # experimental config: fsmonitor.verbose |
|
234 | 234 | if ui.configbool(b'fsmonitor', b'verbose'): |
|
235 | 235 | ui.log( |
|
236 | 236 | b'fsmonitor', |
|
237 | 237 | b'Watchman unavailable: %s\n', |
|
238 | 238 | stringutil.forcebytestr(ex.msg), |
|
239 | 239 | ) |
|
240 | 240 | else: |
|
241 | 241 | ui.log( |
|
242 | 242 | b'fsmonitor', |
|
243 | 243 | b'Watchman exception: %s\n', |
|
244 | 244 | stringutil.forcebytestr(ex), |
|
245 | 245 | ) |
|
246 | 246 | |
|
247 | 247 | |
|
248 | 248 | def _hashignore(ignore): |
|
249 | 249 | """Calculate hash for ignore patterns and filenames |
|
250 | 250 | |
|
251 | 251 | If this information changes between Mercurial invocations, we can't |
|
252 | 252 | rely on Watchman information anymore and have to re-scan the working |
|
253 | 253 | copy. |
|
254 | 254 | |
|
255 | 255 | """ |
|
256 | 256 | sha1 = hashutil.sha1() |
|
257 | 257 | sha1.update(pycompat.byterepr(ignore)) |
|
258 | 258 | return pycompat.sysbytes(sha1.hexdigest()) |
|
259 | 259 | |
|
260 | 260 | |
|
261 | 261 | _watchmanencoding = pywatchman.encoding.get_local_encoding() |
|
262 | 262 | _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding() |
|
263 | 263 | _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding) |
|
264 | 264 | |
|
265 | 265 | |
|
266 | 266 | def _watchmantofsencoding(path): |
|
267 | 267 | """Fix path to match watchman and local filesystem encoding |
|
268 | 268 | |
|
269 | 269 | watchman's paths encoding can differ from filesystem encoding. For example, |
|
270 | 270 | on Windows, it's always utf-8. |
|
271 | 271 | """ |
|
272 | 272 | try: |
|
273 | 273 | decoded = path.decode(_watchmanencoding) |
|
274 | 274 | except UnicodeDecodeError as e: |
|
275 | 275 | raise error.Abort( |
|
276 | 276 | stringutil.forcebytestr(e), hint=b'watchman encoding error' |
|
277 | 277 | ) |
|
278 | 278 | |
|
279 | 279 | try: |
|
280 | 280 | encoded = decoded.encode(_fsencoding, 'strict') |
|
281 | 281 | except UnicodeEncodeError as e: |
|
282 | 282 | raise error.Abort(stringutil.forcebytestr(e)) |
|
283 | 283 | |
|
284 | 284 | return encoded |
|
285 | 285 | |
|
286 | 286 | |
|
287 | 287 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
288 | 288 | """Replacement for dirstate.walk, hooking into Watchman. |
|
289 | 289 | |
|
290 | 290 | Whenever full is False, ignored is False, and the Watchman client is |
|
291 | 291 | available, use Watchman combined with saved state to possibly return only a |
|
292 | 292 | subset of files.""" |
|
293 | 293 | |
|
294 | 294 | def bail(reason): |
|
295 | 295 | self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason) |
|
296 | 296 | return orig(match, subrepos, unknown, ignored, full=True) |
|
297 | 297 | |
|
298 | 298 | if full: |
|
299 | 299 | return bail(b'full rewalk requested') |
|
300 | 300 | if ignored: |
|
301 | 301 | return bail(b'listing ignored files') |
|
302 | 302 | if not self._watchmanclient.available(): |
|
303 | 303 | return bail(b'client unavailable') |
|
304 | 304 | state = self._fsmonitorstate |
|
305 | 305 | clock, ignorehash, notefiles = state.get() |
|
306 | 306 | if not clock: |
|
307 | 307 | if state.walk_on_invalidate: |
|
308 | 308 | return bail(b'no clock') |
|
309 | 309 | # Initial NULL clock value, see |
|
310 | 310 | # https://facebook.github.io/watchman/docs/clockspec.html |
|
311 | 311 | clock = b'c:0:0' |
|
312 | 312 | notefiles = [] |
|
313 | 313 | |
|
314 | 314 | ignore = self._ignore |
|
315 | 315 | dirignore = self._dirignore |
|
316 | 316 | if unknown: |
|
317 | 317 | if _hashignore(ignore) != ignorehash and clock != b'c:0:0': |
|
318 | 318 | # ignore list changed -- can't rely on Watchman state any more |
|
319 | 319 | if state.walk_on_invalidate: |
|
320 | 320 | return bail(b'ignore rules changed') |
|
321 | 321 | notefiles = [] |
|
322 | 322 | clock = b'c:0:0' |
|
323 | 323 | else: |
|
324 | 324 | # always ignore |
|
325 | 325 | ignore = util.always |
|
326 | 326 | dirignore = util.always |
|
327 | 327 | |
|
328 | 328 | matchfn = match.matchfn |
|
329 | 329 | matchalways = match.always() |
|
330 | 330 | dmap = self._map |
|
331 | 331 | if util.safehasattr(dmap, b'_map'): |
|
332 | 332 | # for better performance, directly access the inner dirstate map if the |
|
333 | 333 | # standard dirstate implementation is in use. |
|
334 | 334 | dmap = dmap._map |
|
335 | 335 | nonnormalset = { |
|
336 | 336 | f |
|
337 | 337 | for f, e in self._map.items() |
|
338 | if e.v1_state() != b"n" or e.v1_mtime() == -1 | |
|
338 | if e._v1_state() != b"n" or e._v1_mtime() == -1 | |
|
339 | 339 | } |
|
340 | 340 | |
|
341 | 341 | copymap = self._map.copymap |
|
342 | 342 | getkind = stat.S_IFMT |
|
343 | 343 | dirkind = stat.S_IFDIR |
|
344 | 344 | regkind = stat.S_IFREG |
|
345 | 345 | lnkkind = stat.S_IFLNK |
|
346 | 346 | join = self._join |
|
347 | 347 | normcase = util.normcase |
|
348 | 348 | fresh_instance = False |
|
349 | 349 | |
|
350 | 350 | exact = skipstep3 = False |
|
351 | 351 | if match.isexact(): # match.exact |
|
352 | 352 | exact = True |
|
353 | 353 | dirignore = util.always # skip step 2 |
|
354 | 354 | elif match.prefix(): # match.match, no patterns |
|
355 | 355 | skipstep3 = True |
|
356 | 356 | |
|
357 | 357 | if not exact and self._checkcase: |
|
358 | 358 | # note that even though we could receive directory entries, we're only |
|
359 | 359 | # interested in checking if a file with the same name exists. So only |
|
360 | 360 | # normalize files if possible. |
|
361 | 361 | normalize = self._normalizefile |
|
362 | 362 | skipstep3 = False |
|
363 | 363 | else: |
|
364 | 364 | normalize = None |
|
365 | 365 | |
|
366 | 366 | # step 1: find all explicit files |
|
367 | 367 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
368 | 368 | |
|
369 | 369 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
370 | 370 | work = [d for d in work if not dirignore(d[0])] |
|
371 | 371 | |
|
372 | 372 | if not work and (exact or skipstep3): |
|
373 | 373 | for s in subrepos: |
|
374 | 374 | del results[s] |
|
375 | 375 | del results[b'.hg'] |
|
376 | 376 | return results |
|
377 | 377 | |
|
378 | 378 | # step 2: query Watchman |
|
379 | 379 | try: |
|
380 | 380 | # Use the user-configured timeout for the query. |
|
381 | 381 | # Add a little slack over the top of the user query to allow for |
|
382 | 382 | # overheads while transferring the data |
|
383 | 383 | self._watchmanclient.settimeout(state.timeout + 0.1) |
|
384 | 384 | result = self._watchmanclient.command( |
|
385 | 385 | b'query', |
|
386 | 386 | { |
|
387 | 387 | b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'], |
|
388 | 388 | b'since': clock, |
|
389 | 389 | b'expression': [ |
|
390 | 390 | b'not', |
|
391 | 391 | [ |
|
392 | 392 | b'anyof', |
|
393 | 393 | [b'dirname', b'.hg'], |
|
394 | 394 | [b'name', b'.hg', b'wholename'], |
|
395 | 395 | ], |
|
396 | 396 | ], |
|
397 | 397 | b'sync_timeout': int(state.timeout * 1000), |
|
398 | 398 | b'empty_on_fresh_instance': state.walk_on_invalidate, |
|
399 | 399 | }, |
|
400 | 400 | ) |
|
401 | 401 | except Exception as ex: |
|
402 | 402 | _handleunavailable(self._ui, state, ex) |
|
403 | 403 | self._watchmanclient.clearconnection() |
|
404 | 404 | return bail(b'exception during run') |
|
405 | 405 | else: |
|
406 | 406 | # We need to propagate the last observed clock up so that we |
|
407 | 407 | # can use it for our next query |
|
408 | 408 | state.setlastclock(pycompat.sysbytes(result[b'clock'])) |
|
409 | 409 | if result[b'is_fresh_instance']: |
|
410 | 410 | if state.walk_on_invalidate: |
|
411 | 411 | state.invalidate() |
|
412 | 412 | return bail(b'fresh instance') |
|
413 | 413 | fresh_instance = True |
|
414 | 414 | # Ignore any prior noteable files from the state info |
|
415 | 415 | notefiles = [] |
|
416 | 416 | |
|
417 | 417 | # for file paths which require normalization and we encounter a case |
|
418 | 418 | # collision, we store our own foldmap |
|
419 | 419 | if normalize: |
|
420 | 420 | foldmap = {normcase(k): k for k in results} |
|
421 | 421 | |
|
422 | 422 | switch_slashes = pycompat.ossep == b'\\' |
|
423 | 423 | # The order of the results is, strictly speaking, undefined. |
|
424 | 424 | # For case changes on a case insensitive filesystem we may receive |
|
425 | 425 | # two entries, one with exists=True and another with exists=False. |
|
426 | 426 | # The exists=True entries in the same response should be interpreted |
|
427 | 427 | # as being happens-after the exists=False entries due to the way that |
|
428 | 428 | # Watchman tracks files. We use this property to reconcile deletes |
|
429 | 429 | # for name case changes. |
|
430 | 430 | for entry in result[b'files']: |
|
431 | 431 | fname = entry[b'name'] |
|
432 | 432 | |
|
433 | 433 | # Watchman always give us a str. Normalize to bytes on Python 3 |
|
434 | 434 | # using Watchman's encoding, if needed. |
|
435 | 435 | if not isinstance(fname, bytes): |
|
436 | 436 | fname = fname.encode(_watchmanencoding) |
|
437 | 437 | |
|
438 | 438 | if _fixencoding: |
|
439 | 439 | fname = _watchmantofsencoding(fname) |
|
440 | 440 | |
|
441 | 441 | if switch_slashes: |
|
442 | 442 | fname = fname.replace(b'\\', b'/') |
|
443 | 443 | if normalize: |
|
444 | 444 | normed = normcase(fname) |
|
445 | 445 | fname = normalize(fname, True, True) |
|
446 | 446 | foldmap[normed] = fname |
|
447 | 447 | fmode = entry[b'mode'] |
|
448 | 448 | fexists = entry[b'exists'] |
|
449 | 449 | kind = getkind(fmode) |
|
450 | 450 | |
|
451 | 451 | if b'/.hg/' in fname or fname.endswith(b'/.hg'): |
|
452 | 452 | return bail(b'nested-repo-detected') |
|
453 | 453 | |
|
454 | 454 | if not fexists: |
|
455 | 455 | # if marked as deleted and we don't already have a change |
|
456 | 456 | # record, mark it as deleted. If we already have an entry |
|
457 | 457 | # for fname then it was either part of walkexplicit or was |
|
458 | 458 | # an earlier result that was a case change |
|
459 | 459 | if ( |
|
460 | 460 | fname not in results |
|
461 | 461 | and fname in dmap |
|
462 | 462 | and (matchalways or matchfn(fname)) |
|
463 | 463 | ): |
|
464 | 464 | results[fname] = None |
|
465 | 465 | elif kind == dirkind: |
|
466 | 466 | if fname in dmap and (matchalways or matchfn(fname)): |
|
467 | 467 | results[fname] = None |
|
468 | 468 | elif kind == regkind or kind == lnkkind: |
|
469 | 469 | if fname in dmap: |
|
470 | 470 | if matchalways or matchfn(fname): |
|
471 | 471 | results[fname] = entry |
|
472 | 472 | elif (matchalways or matchfn(fname)) and not ignore(fname): |
|
473 | 473 | results[fname] = entry |
|
474 | 474 | elif fname in dmap and (matchalways or matchfn(fname)): |
|
475 | 475 | results[fname] = None |
|
476 | 476 | |
|
477 | 477 | # step 3: query notable files we don't already know about |
|
478 | 478 | # XXX try not to iterate over the entire dmap |
|
479 | 479 | if normalize: |
|
480 | 480 | # any notable files that have changed case will already be handled |
|
481 | 481 | # above, so just check membership in the foldmap |
|
482 | 482 | notefiles = { |
|
483 | 483 | normalize(f, True, True) |
|
484 | 484 | for f in notefiles |
|
485 | 485 | if normcase(f) not in foldmap |
|
486 | 486 | } |
|
487 | 487 | visit = { |
|
488 | 488 | f |
|
489 | 489 | for f in notefiles |
|
490 | 490 | if (f not in results and matchfn(f) and (f in dmap or not ignore(f))) |
|
491 | 491 | } |
|
492 | 492 | |
|
493 | 493 | if not fresh_instance: |
|
494 | 494 | if matchalways: |
|
495 | 495 | visit.update(f for f in nonnormalset if f not in results) |
|
496 | 496 | visit.update(f for f in copymap if f not in results) |
|
497 | 497 | else: |
|
498 | 498 | visit.update( |
|
499 | 499 | f for f in nonnormalset if f not in results and matchfn(f) |
|
500 | 500 | ) |
|
501 | 501 | visit.update(f for f in copymap if f not in results and matchfn(f)) |
|
502 | 502 | else: |
|
503 | 503 | if matchalways: |
|
504 | 504 | visit.update(f for f, st in dmap.items() if f not in results) |
|
505 | 505 | visit.update(f for f in copymap if f not in results) |
|
506 | 506 | else: |
|
507 | 507 | visit.update( |
|
508 | 508 | f for f, st in dmap.items() if f not in results and matchfn(f) |
|
509 | 509 | ) |
|
510 | 510 | visit.update(f for f in copymap if f not in results and matchfn(f)) |
|
511 | 511 | |
|
512 | 512 | audit = pathutil.pathauditor(self._root, cached=True).check |
|
513 | 513 | auditpass = [f for f in visit if audit(f)] |
|
514 | 514 | auditpass.sort() |
|
515 | 515 | auditfail = visit.difference(auditpass) |
|
516 | 516 | for f in auditfail: |
|
517 | 517 | results[f] = None |
|
518 | 518 | |
|
519 | 519 | nf = iter(auditpass) |
|
520 | 520 | for st in util.statfiles([join(f) for f in auditpass]): |
|
521 | 521 | f = next(nf) |
|
522 | 522 | if st or f in dmap: |
|
523 | 523 | results[f] = st |
|
524 | 524 | |
|
525 | 525 | for s in subrepos: |
|
526 | 526 | del results[s] |
|
527 | 527 | del results[b'.hg'] |
|
528 | 528 | return results |
|
529 | 529 | |
|
530 | 530 | |
|
531 | 531 | def overridestatus( |
|
532 | 532 | orig, |
|
533 | 533 | self, |
|
534 | 534 | node1=b'.', |
|
535 | 535 | node2=None, |
|
536 | 536 | match=None, |
|
537 | 537 | ignored=False, |
|
538 | 538 | clean=False, |
|
539 | 539 | unknown=False, |
|
540 | 540 | listsubrepos=False, |
|
541 | 541 | ): |
|
542 | 542 | listignored = ignored |
|
543 | 543 | listclean = clean |
|
544 | 544 | listunknown = unknown |
|
545 | 545 | |
|
546 | 546 | def _cmpsets(l1, l2): |
|
547 | 547 | try: |
|
548 | 548 | if b'FSMONITOR_LOG_FILE' in encoding.environ: |
|
549 | 549 | fn = encoding.environ[b'FSMONITOR_LOG_FILE'] |
|
550 | 550 | f = open(fn, b'wb') |
|
551 | 551 | else: |
|
552 | 552 | fn = b'fsmonitorfail.log' |
|
553 | 553 | f = self.vfs.open(fn, b'wb') |
|
554 | 554 | except (IOError, OSError): |
|
555 | 555 | self.ui.warn(_(b'warning: unable to write to %s\n') % fn) |
|
556 | 556 | return |
|
557 | 557 | |
|
558 | 558 | try: |
|
559 | 559 | for i, (s1, s2) in enumerate(zip(l1, l2)): |
|
560 | 560 | if set(s1) != set(s2): |
|
561 | 561 | f.write(b'sets at position %d are unequal\n' % i) |
|
562 | 562 | f.write(b'watchman returned: %r\n' % s1) |
|
563 | 563 | f.write(b'stat returned: %r\n' % s2) |
|
564 | 564 | finally: |
|
565 | 565 | f.close() |
|
566 | 566 | |
|
567 | 567 | if isinstance(node1, context.changectx): |
|
568 | 568 | ctx1 = node1 |
|
569 | 569 | else: |
|
570 | 570 | ctx1 = self[node1] |
|
571 | 571 | if isinstance(node2, context.changectx): |
|
572 | 572 | ctx2 = node2 |
|
573 | 573 | else: |
|
574 | 574 | ctx2 = self[node2] |
|
575 | 575 | |
|
576 | 576 | working = ctx2.rev() is None |
|
577 | 577 | parentworking = working and ctx1 == self[b'.'] |
|
578 | 578 | match = match or matchmod.always() |
|
579 | 579 | |
|
580 | 580 | # Maybe we can use this opportunity to update Watchman's state. |
|
581 | 581 | # Mercurial uses workingcommitctx and/or memctx to represent the part of |
|
582 | 582 | # the workingctx that is to be committed. So don't update the state in |
|
583 | 583 | # that case. |
|
584 | 584 | # HG_PENDING is set in the environment when the dirstate is being updated |
|
585 | 585 | # in the middle of a transaction; we must not update our state in that |
|
586 | 586 | # case, or we risk forgetting about changes in the working copy. |
|
587 | 587 | updatestate = ( |
|
588 | 588 | parentworking |
|
589 | 589 | and match.always() |
|
590 | 590 | and not isinstance(ctx2, (context.workingcommitctx, context.memctx)) |
|
591 | 591 | and b'HG_PENDING' not in encoding.environ |
|
592 | 592 | ) |
|
593 | 593 | |
|
594 | 594 | try: |
|
595 | 595 | if self._fsmonitorstate.walk_on_invalidate: |
|
596 | 596 | # Use a short timeout to query the current clock. If that |
|
597 | 597 | # takes too long then we assume that the service will be slow |
|
598 | 598 | # to answer our query. |
|
599 | 599 | # walk_on_invalidate indicates that we prefer to walk the |
|
600 | 600 | # tree ourselves because we can ignore portions that Watchman |
|
601 | 601 | # cannot and we tend to be faster in the warmer buffer cache |
|
602 | 602 | # cases. |
|
603 | 603 | self._watchmanclient.settimeout(0.1) |
|
604 | 604 | else: |
|
605 | 605 | # Give Watchman more time to potentially complete its walk |
|
606 | 606 | # and return the initial clock. In this mode we assume that |
|
607 | 607 | # the filesystem will be slower than parsing a potentially |
|
608 | 608 | # very large Watchman result set. |
|
609 | 609 | self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1) |
|
610 | 610 | startclock = self._watchmanclient.getcurrentclock() |
|
611 | 611 | except Exception as ex: |
|
612 | 612 | self._watchmanclient.clearconnection() |
|
613 | 613 | _handleunavailable(self.ui, self._fsmonitorstate, ex) |
|
614 | 614 | # boo, Watchman failed. bail |
|
615 | 615 | return orig( |
|
616 | 616 | node1, |
|
617 | 617 | node2, |
|
618 | 618 | match, |
|
619 | 619 | listignored, |
|
620 | 620 | listclean, |
|
621 | 621 | listunknown, |
|
622 | 622 | listsubrepos, |
|
623 | 623 | ) |
|
624 | 624 | |
|
625 | 625 | if updatestate: |
|
626 | 626 | # We need info about unknown files. This may make things slower the |
|
627 | 627 | # first time, but whatever. |
|
628 | 628 | stateunknown = True |
|
629 | 629 | else: |
|
630 | 630 | stateunknown = listunknown |
|
631 | 631 | |
|
632 | 632 | if updatestate: |
|
633 | 633 | ps = poststatus(startclock) |
|
634 | 634 | self.addpostdsstatus(ps) |
|
635 | 635 | |
|
636 | 636 | r = orig( |
|
637 | 637 | node1, node2, match, listignored, listclean, stateunknown, listsubrepos |
|
638 | 638 | ) |
|
639 | 639 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
640 | 640 | |
|
641 | 641 | if not listunknown: |
|
642 | 642 | unknown = [] |
|
643 | 643 | |
|
644 | 644 | # don't do paranoid checks if we're not going to query Watchman anyway |
|
645 | 645 | full = listclean or match.traversedir is not None |
|
646 | 646 | if self._fsmonitorstate.mode == b'paranoid' and not full: |
|
647 | 647 | # run status again and fall back to the old walk this time |
|
648 | 648 | self.dirstate._fsmonitordisable = True |
|
649 | 649 | |
|
650 | 650 | # shut the UI up |
|
651 | 651 | quiet = self.ui.quiet |
|
652 | 652 | self.ui.quiet = True |
|
653 | 653 | fout, ferr = self.ui.fout, self.ui.ferr |
|
654 | 654 | self.ui.fout = self.ui.ferr = open(os.devnull, b'wb') |
|
655 | 655 | |
|
656 | 656 | try: |
|
657 | 657 | rv2 = orig( |
|
658 | 658 | node1, |
|
659 | 659 | node2, |
|
660 | 660 | match, |
|
661 | 661 | listignored, |
|
662 | 662 | listclean, |
|
663 | 663 | listunknown, |
|
664 | 664 | listsubrepos, |
|
665 | 665 | ) |
|
666 | 666 | finally: |
|
667 | 667 | self.dirstate._fsmonitordisable = False |
|
668 | 668 | self.ui.quiet = quiet |
|
669 | 669 | self.ui.fout, self.ui.ferr = fout, ferr |
|
670 | 670 | |
|
671 | 671 | # clean isn't tested since it's set to True above |
|
672 | 672 | with self.wlock(): |
|
673 | 673 | _cmpsets( |
|
674 | 674 | [modified, added, removed, deleted, unknown, ignored, clean], |
|
675 | 675 | rv2, |
|
676 | 676 | ) |
|
677 | 677 | modified, added, removed, deleted, unknown, ignored, clean = rv2 |
|
678 | 678 | |
|
679 | 679 | return scmutil.status( |
|
680 | 680 | modified, added, removed, deleted, unknown, ignored, clean |
|
681 | 681 | ) |
|
682 | 682 | |
|
683 | 683 | |
|
684 | 684 | class poststatus: |
|
685 | 685 | def __init__(self, startclock): |
|
686 | 686 | self._startclock = pycompat.sysbytes(startclock) |
|
687 | 687 | |
|
688 | 688 | def __call__(self, wctx, status): |
|
689 | 689 | clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock |
|
690 | 690 | hashignore = _hashignore(wctx.repo().dirstate._ignore) |
|
691 | 691 | notefiles = ( |
|
692 | 692 | status.modified |
|
693 | 693 | + status.added |
|
694 | 694 | + status.removed |
|
695 | 695 | + status.deleted |
|
696 | 696 | + status.unknown |
|
697 | 697 | ) |
|
698 | 698 | wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles) |
|
699 | 699 | |
|
700 | 700 | |
|
701 | 701 | def makedirstate(repo, dirstate): |
|
702 | 702 | class fsmonitordirstate(dirstate.__class__): |
|
703 | 703 | def _fsmonitorinit(self, repo): |
|
704 | 704 | # _fsmonitordisable is used in paranoid mode |
|
705 | 705 | self._fsmonitordisable = False |
|
706 | 706 | self._fsmonitorstate = repo._fsmonitorstate |
|
707 | 707 | self._watchmanclient = repo._watchmanclient |
|
708 | 708 | self._repo = weakref.proxy(repo) |
|
709 | 709 | |
|
710 | 710 | def walk(self, *args, **kwargs): |
|
711 | 711 | orig = super(fsmonitordirstate, self).walk |
|
712 | 712 | if self._fsmonitordisable: |
|
713 | 713 | return orig(*args, **kwargs) |
|
714 | 714 | return overridewalk(orig, self, *args, **kwargs) |
|
715 | 715 | |
|
716 | 716 | def rebuild(self, *args, **kwargs): |
|
717 | 717 | self._fsmonitorstate.invalidate() |
|
718 | 718 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) |
|
719 | 719 | |
|
720 | 720 | def invalidate(self, *args, **kwargs): |
|
721 | 721 | self._fsmonitorstate.invalidate() |
|
722 | 722 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) |
|
723 | 723 | |
|
724 | 724 | dirstate.__class__ = fsmonitordirstate |
|
725 | 725 | dirstate._fsmonitorinit(repo) |
|
726 | 726 | |
|
727 | 727 | |
|
728 | 728 | def wrapdirstate(orig, self): |
|
729 | 729 | ds = orig(self) |
|
730 | 730 | # only override the dirstate when Watchman is available for the repo |
|
731 | 731 | if util.safehasattr(self, b'_fsmonitorstate'): |
|
732 | 732 | makedirstate(self, ds) |
|
733 | 733 | return ds |
|
734 | 734 | |
|
735 | 735 | |
|
736 | 736 | def extsetup(ui): |
|
737 | 737 | extensions.wrapfilecache( |
|
738 | 738 | localrepo.localrepository, b'dirstate', wrapdirstate |
|
739 | 739 | ) |
|
740 | 740 | if pycompat.isdarwin: |
|
741 | 741 | # An assist for avoiding the dangling-symlink fsevents bug |
|
742 | 742 | extensions.wrapfunction(os, b'symlink', wrapsymlink) |
|
743 | 743 | |
|
744 | 744 | extensions.wrapfunction(merge, b'_update', wrapupdate) |
|
745 | 745 | |
|
746 | 746 | |
|
747 | 747 | def wrapsymlink(orig, source, link_name): |
|
748 | 748 | """if we create a dangling symlink, also touch the parent dir |
|
749 | 749 | to encourage fsevents notifications to work more correctly""" |
|
750 | 750 | try: |
|
751 | 751 | return orig(source, link_name) |
|
752 | 752 | finally: |
|
753 | 753 | try: |
|
754 | 754 | os.utime(os.path.dirname(link_name), None) |
|
755 | 755 | except OSError: |
|
756 | 756 | pass |
|
757 | 757 | |
|
758 | 758 | |
|
759 | 759 | class state_update: |
|
760 | 760 | """This context manager is responsible for dispatching the state-enter |
|
761 | 761 | and state-leave signals to the watchman service. The enter and leave |
|
762 | 762 | methods can be invoked manually (for scenarios where context manager |
|
763 | 763 | semantics are not possible). If parameters oldnode and newnode are None, |
|
764 | 764 | they will be populated based on current working copy in enter and |
|
765 | 765 | leave, respectively. Similarly, if the distance is none, it will be |
|
766 | 766 | calculated based on the oldnode and newnode in the leave method.""" |
|
767 | 767 | |
|
768 | 768 | def __init__( |
|
769 | 769 | self, |
|
770 | 770 | repo, |
|
771 | 771 | name, |
|
772 | 772 | oldnode=None, |
|
773 | 773 | newnode=None, |
|
774 | 774 | distance=None, |
|
775 | 775 | partial=False, |
|
776 | 776 | ): |
|
777 | 777 | self.repo = repo.unfiltered() |
|
778 | 778 | self.name = name |
|
779 | 779 | self.oldnode = oldnode |
|
780 | 780 | self.newnode = newnode |
|
781 | 781 | self.distance = distance |
|
782 | 782 | self.partial = partial |
|
783 | 783 | self._lock = None |
|
784 | 784 | self.need_leave = False |
|
785 | 785 | |
|
786 | 786 | def __enter__(self): |
|
787 | 787 | self.enter() |
|
788 | 788 | |
|
789 | 789 | def enter(self): |
|
790 | 790 | # Make sure we have a wlock prior to sending notifications to watchman. |
|
791 | 791 | # We don't want to race with other actors. In the update case, |
|
792 | 792 | # merge.update is going to take the wlock almost immediately. We are |
|
793 | 793 | # effectively extending the lock around several short sanity checks. |
|
794 | 794 | if self.oldnode is None: |
|
795 | 795 | self.oldnode = self.repo[b'.'].node() |
|
796 | 796 | |
|
797 | 797 | if self.repo.currentwlock() is None: |
|
798 | 798 | if util.safehasattr(self.repo, b'wlocknostateupdate'): |
|
799 | 799 | self._lock = self.repo.wlocknostateupdate() |
|
800 | 800 | else: |
|
801 | 801 | self._lock = self.repo.wlock() |
|
802 | 802 | self.need_leave = self._state(b'state-enter', hex(self.oldnode)) |
|
803 | 803 | return self |
|
804 | 804 | |
|
805 | 805 | def __exit__(self, type_, value, tb): |
|
806 | 806 | abort = True if type_ else False |
|
807 | 807 | self.exit(abort=abort) |
|
808 | 808 | |
|
809 | 809 | def exit(self, abort=False): |
|
810 | 810 | try: |
|
811 | 811 | if self.need_leave: |
|
812 | 812 | status = b'failed' if abort else b'ok' |
|
813 | 813 | if self.newnode is None: |
|
814 | 814 | self.newnode = self.repo[b'.'].node() |
|
815 | 815 | if self.distance is None: |
|
816 | 816 | self.distance = calcdistance( |
|
817 | 817 | self.repo, self.oldnode, self.newnode |
|
818 | 818 | ) |
|
819 | 819 | self._state(b'state-leave', hex(self.newnode), status=status) |
|
820 | 820 | finally: |
|
821 | 821 | self.need_leave = False |
|
822 | 822 | if self._lock: |
|
823 | 823 | self._lock.release() |
|
824 | 824 | |
|
825 | 825 | def _state(self, cmd, commithash, status=b'ok'): |
|
826 | 826 | if not util.safehasattr(self.repo, b'_watchmanclient'): |
|
827 | 827 | return False |
|
828 | 828 | try: |
|
829 | 829 | self.repo._watchmanclient.command( |
|
830 | 830 | cmd, |
|
831 | 831 | { |
|
832 | 832 | b'name': self.name, |
|
833 | 833 | b'metadata': { |
|
834 | 834 | # the target revision |
|
835 | 835 | b'rev': commithash, |
|
836 | 836 | # approximate number of commits between current and target |
|
837 | 837 | b'distance': self.distance if self.distance else 0, |
|
838 | 838 | # success/failure (only really meaningful for state-leave) |
|
839 | 839 | b'status': status, |
|
840 | 840 | # whether the working copy parent is changing |
|
841 | 841 | b'partial': self.partial, |
|
842 | 842 | }, |
|
843 | 843 | }, |
|
844 | 844 | ) |
|
845 | 845 | return True |
|
846 | 846 | except Exception as e: |
|
847 | 847 | # Swallow any errors; fire and forget |
|
848 | 848 | self.repo.ui.log( |
|
849 | 849 | b'watchman', b'Exception %s while running %s\n', e, cmd |
|
850 | 850 | ) |
|
851 | 851 | return False |
|
852 | 852 | |
|
853 | 853 | |
|
854 | 854 | # Estimate the distance between two nodes |
|
855 | 855 | def calcdistance(repo, oldnode, newnode): |
|
856 | 856 | anc = repo.changelog.ancestor(oldnode, newnode) |
|
857 | 857 | ancrev = repo[anc].rev() |
|
858 | 858 | distance = abs(repo[oldnode].rev() - ancrev) + abs( |
|
859 | 859 | repo[newnode].rev() - ancrev |
|
860 | 860 | ) |
|
861 | 861 | return distance |
|
862 | 862 | |
|
863 | 863 | |
|
864 | 864 | # Bracket working copy updates with calls to the watchman state-enter |
|
865 | 865 | # and state-leave commands. This allows clients to perform more intelligent |
|
866 | 866 | # settling during bulk file change scenarios |
|
867 | 867 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling |
|
868 | 868 | def wrapupdate( |
|
869 | 869 | orig, |
|
870 | 870 | repo, |
|
871 | 871 | node, |
|
872 | 872 | branchmerge, |
|
873 | 873 | force, |
|
874 | 874 | ancestor=None, |
|
875 | 875 | mergeancestor=False, |
|
876 | 876 | labels=None, |
|
877 | 877 | matcher=None, |
|
878 | 878 | **kwargs |
|
879 | 879 | ): |
|
880 | 880 | |
|
881 | 881 | distance = 0 |
|
882 | 882 | partial = True |
|
883 | 883 | oldnode = repo[b'.'].node() |
|
884 | 884 | newnode = repo[node].node() |
|
885 | 885 | if matcher is None or matcher.always(): |
|
886 | 886 | partial = False |
|
887 | 887 | distance = calcdistance(repo.unfiltered(), oldnode, newnode) |
|
888 | 888 | |
|
889 | 889 | with state_update( |
|
890 | 890 | repo, |
|
891 | 891 | name=b"hg.update", |
|
892 | 892 | oldnode=oldnode, |
|
893 | 893 | newnode=newnode, |
|
894 | 894 | distance=distance, |
|
895 | 895 | partial=partial, |
|
896 | 896 | ): |
|
897 | 897 | return orig( |
|
898 | 898 | repo, |
|
899 | 899 | node, |
|
900 | 900 | branchmerge, |
|
901 | 901 | force, |
|
902 | 902 | ancestor, |
|
903 | 903 | mergeancestor, |
|
904 | 904 | labels, |
|
905 | 905 | matcher, |
|
906 | 906 | **kwargs |
|
907 | 907 | ) |
|
908 | 908 | |
|
909 | 909 | |
|
910 | 910 | def repo_has_depth_one_nested_repo(repo): |
|
911 | 911 | for f in repo.wvfs.listdir(): |
|
912 | 912 | if os.path.isdir(os.path.join(repo.root, f, b'.hg')): |
|
913 | 913 | msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n' |
|
914 | 914 | repo.ui.debug(msg % f) |
|
915 | 915 | return True |
|
916 | 916 | return False |
|
917 | 917 | |
|
918 | 918 | |
|
919 | 919 | def reposetup(ui, repo): |
|
920 | 920 | # We don't work with largefiles or inotify |
|
921 | 921 | exts = extensions.enabled() |
|
922 | 922 | for ext in _blacklist: |
|
923 | 923 | if ext in exts: |
|
924 | 924 | ui.warn( |
|
925 | 925 | _( |
|
926 | 926 | b'The fsmonitor extension is incompatible with the %s ' |
|
927 | 927 | b'extension and has been disabled.\n' |
|
928 | 928 | ) |
|
929 | 929 | % ext |
|
930 | 930 | ) |
|
931 | 931 | return |
|
932 | 932 | |
|
933 | 933 | if repo.local(): |
|
934 | 934 | # We don't work with subrepos either. |
|
935 | 935 | # |
|
936 | 936 | # if repo[None].substate can cause a dirstate parse, which is too |
|
937 | 937 | # slow. Instead, look for a file called hgsubstate, |
|
938 | 938 | if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'): |
|
939 | 939 | return |
|
940 | 940 | |
|
941 | 941 | if repo_has_depth_one_nested_repo(repo): |
|
942 | 942 | return |
|
943 | 943 | |
|
944 | 944 | fsmonitorstate = state.state(repo) |
|
945 | 945 | if fsmonitorstate.mode == b'off': |
|
946 | 946 | return |
|
947 | 947 | |
|
948 | 948 | try: |
|
949 | 949 | client = watchmanclient.client(repo.ui, repo.root) |
|
950 | 950 | except Exception as ex: |
|
951 | 951 | _handleunavailable(ui, fsmonitorstate, ex) |
|
952 | 952 | return |
|
953 | 953 | |
|
954 | 954 | repo._fsmonitorstate = fsmonitorstate |
|
955 | 955 | repo._watchmanclient = client |
|
956 | 956 | |
|
957 | 957 | dirstate, cached = localrepo.isfilecached(repo, b'dirstate') |
|
958 | 958 | if cached: |
|
959 | 959 | # at this point since fsmonitorstate wasn't present, |
|
960 | 960 | # repo.dirstate is not a fsmonitordirstate |
|
961 | 961 | makedirstate(repo, dirstate) |
|
962 | 962 | |
|
963 | 963 | class fsmonitorrepo(repo.__class__): |
|
964 | 964 | def status(self, *args, **kwargs): |
|
965 | 965 | orig = super(fsmonitorrepo, self).status |
|
966 | 966 | return overridestatus(orig, self, *args, **kwargs) |
|
967 | 967 | |
|
968 | 968 | def wlocknostateupdate(self, *args, **kwargs): |
|
969 | 969 | return super(fsmonitorrepo, self).wlock(*args, **kwargs) |
|
970 | 970 | |
|
971 | 971 | def wlock(self, *args, **kwargs): |
|
972 | 972 | l = super(fsmonitorrepo, self).wlock(*args, **kwargs) |
|
973 | 973 | if not ui.configbool( |
|
974 | 974 | b"experimental", b"fsmonitor.transaction_notify" |
|
975 | 975 | ): |
|
976 | 976 | return l |
|
977 | 977 | if l.held != 1: |
|
978 | 978 | return l |
|
979 | 979 | origrelease = l.releasefn |
|
980 | 980 | |
|
981 | 981 | def staterelease(): |
|
982 | 982 | if origrelease: |
|
983 | 983 | origrelease() |
|
984 | 984 | if l.stateupdate: |
|
985 | 985 | l.stateupdate.exit() |
|
986 | 986 | l.stateupdate = None |
|
987 | 987 | |
|
988 | 988 | try: |
|
989 | 989 | l.stateupdate = None |
|
990 | 990 | l.stateupdate = state_update(self, name=b"hg.transaction") |
|
991 | 991 | l.stateupdate.enter() |
|
992 | 992 | l.releasefn = staterelease |
|
993 | 993 | except Exception as e: |
|
994 | 994 | # Swallow any errors; fire and forget |
|
995 | 995 | self.ui.log( |
|
996 | 996 | b'watchman', b'Exception in state update %s\n', e |
|
997 | 997 | ) |
|
998 | 998 | return l |
|
999 | 999 | |
|
1000 | 1000 | repo.__class__ = fsmonitorrepo |
@@ -1,1306 +1,1261 b'' | |||
|
1 | 1 | /* |
|
2 | 2 | parsers.c - efficient content parsing |
|
3 | 3 | |
|
4 | 4 | Copyright 2008 Olivia Mackall <olivia@selenic.com> and others |
|
5 | 5 | |
|
6 | 6 | This software may be used and distributed according to the terms of |
|
7 | 7 | the GNU General Public License, incorporated herein by reference. |
|
8 | 8 | */ |
|
9 | 9 | |
|
10 | 10 | #define PY_SSIZE_T_CLEAN |
|
11 | 11 | #include <Python.h> |
|
12 | 12 | #include <ctype.h> |
|
13 | 13 | #include <stddef.h> |
|
14 | 14 | #include <string.h> |
|
15 | 15 | |
|
16 | 16 | #include "bitmanipulation.h" |
|
17 | 17 | #include "charencode.h" |
|
18 | 18 | #include "util.h" |
|
19 | 19 | |
|
20 | 20 | static const char *const versionerrortext = "Python minor version mismatch"; |
|
21 | 21 | |
|
22 | 22 | static const int dirstate_v1_from_p2 = -2; |
|
23 | 23 | static const int dirstate_v1_nonnormal = -1; |
|
24 | 24 | static const int ambiguous_time = -1; |
|
25 | 25 | |
|
26 | 26 | static PyObject *dict_new_presized(PyObject *self, PyObject *args) |
|
27 | 27 | { |
|
28 | 28 | Py_ssize_t expected_size; |
|
29 | 29 | |
|
30 | 30 | if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) { |
|
31 | 31 | return NULL; |
|
32 | 32 | } |
|
33 | 33 | |
|
34 | 34 | return _dict_new_presized(expected_size); |
|
35 | 35 | } |
|
36 | 36 | |
|
37 | 37 | static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args, |
|
38 | 38 | PyObject *kwds) |
|
39 | 39 | { |
|
40 | 40 | /* We do all the initialization here and not a tp_init function because |
|
41 | 41 | * dirstate_item is immutable. */ |
|
42 | 42 | dirstateItemObject *t; |
|
43 | 43 | int wc_tracked; |
|
44 | 44 | int p1_tracked; |
|
45 | 45 | int p2_info; |
|
46 | 46 | int has_meaningful_data; |
|
47 | 47 | int has_meaningful_mtime; |
|
48 | 48 | int mtime_second_ambiguous; |
|
49 | 49 | int mode; |
|
50 | 50 | int size; |
|
51 | 51 | int mtime_s; |
|
52 | 52 | int mtime_ns; |
|
53 | 53 | PyObject *parentfiledata; |
|
54 | 54 | PyObject *mtime; |
|
55 | 55 | PyObject *fallback_exec; |
|
56 | 56 | PyObject *fallback_symlink; |
|
57 | 57 | static char *keywords_name[] = { |
|
58 | 58 | "wc_tracked", "p1_tracked", "p2_info", |
|
59 | 59 | "has_meaningful_data", "has_meaningful_mtime", "parentfiledata", |
|
60 | 60 | "fallback_exec", "fallback_symlink", NULL, |
|
61 | 61 | }; |
|
62 | 62 | wc_tracked = 0; |
|
63 | 63 | p1_tracked = 0; |
|
64 | 64 | p2_info = 0; |
|
65 | 65 | has_meaningful_mtime = 1; |
|
66 | 66 | has_meaningful_data = 1; |
|
67 | 67 | mtime_second_ambiguous = 0; |
|
68 | 68 | parentfiledata = Py_None; |
|
69 | 69 | fallback_exec = Py_None; |
|
70 | 70 | fallback_symlink = Py_None; |
|
71 | 71 | if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name, |
|
72 | 72 | &wc_tracked, &p1_tracked, &p2_info, |
|
73 | 73 | &has_meaningful_data, |
|
74 | 74 | &has_meaningful_mtime, &parentfiledata, |
|
75 | 75 | &fallback_exec, &fallback_symlink)) { |
|
76 | 76 | return NULL; |
|
77 | 77 | } |
|
78 | 78 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); |
|
79 | 79 | if (!t) { |
|
80 | 80 | return NULL; |
|
81 | 81 | } |
|
82 | 82 | |
|
83 | 83 | t->flags = 0; |
|
84 | 84 | if (wc_tracked) { |
|
85 | 85 | t->flags |= dirstate_flag_wc_tracked; |
|
86 | 86 | } |
|
87 | 87 | if (p1_tracked) { |
|
88 | 88 | t->flags |= dirstate_flag_p1_tracked; |
|
89 | 89 | } |
|
90 | 90 | if (p2_info) { |
|
91 | 91 | t->flags |= dirstate_flag_p2_info; |
|
92 | 92 | } |
|
93 | 93 | |
|
94 | 94 | if (fallback_exec != Py_None) { |
|
95 | 95 | t->flags |= dirstate_flag_has_fallback_exec; |
|
96 | 96 | if (PyObject_IsTrue(fallback_exec)) { |
|
97 | 97 | t->flags |= dirstate_flag_fallback_exec; |
|
98 | 98 | } |
|
99 | 99 | } |
|
100 | 100 | if (fallback_symlink != Py_None) { |
|
101 | 101 | t->flags |= dirstate_flag_has_fallback_symlink; |
|
102 | 102 | if (PyObject_IsTrue(fallback_symlink)) { |
|
103 | 103 | t->flags |= dirstate_flag_fallback_symlink; |
|
104 | 104 | } |
|
105 | 105 | } |
|
106 | 106 | |
|
107 | 107 | if (parentfiledata != Py_None) { |
|
108 | 108 | if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size, |
|
109 | 109 | &mtime)) { |
|
110 | 110 | return NULL; |
|
111 | 111 | } |
|
112 | 112 | if (mtime != Py_None) { |
|
113 | 113 | if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns, |
|
114 | 114 | &mtime_second_ambiguous)) { |
|
115 | 115 | return NULL; |
|
116 | 116 | } |
|
117 | 117 | } else { |
|
118 | 118 | has_meaningful_mtime = 0; |
|
119 | 119 | } |
|
120 | 120 | } else { |
|
121 | 121 | has_meaningful_data = 0; |
|
122 | 122 | has_meaningful_mtime = 0; |
|
123 | 123 | } |
|
124 | 124 | if (has_meaningful_data) { |
|
125 | 125 | t->flags |= dirstate_flag_has_meaningful_data; |
|
126 | 126 | t->mode = mode; |
|
127 | 127 | t->size = size; |
|
128 | 128 | if (mtime_second_ambiguous) { |
|
129 | 129 | t->flags |= dirstate_flag_mtime_second_ambiguous; |
|
130 | 130 | } |
|
131 | 131 | } else { |
|
132 | 132 | t->mode = 0; |
|
133 | 133 | t->size = 0; |
|
134 | 134 | } |
|
135 | 135 | if (has_meaningful_mtime) { |
|
136 | 136 | t->flags |= dirstate_flag_has_mtime; |
|
137 | 137 | t->mtime_s = mtime_s; |
|
138 | 138 | t->mtime_ns = mtime_ns; |
|
139 | 139 | } else { |
|
140 | 140 | t->mtime_s = 0; |
|
141 | 141 | t->mtime_ns = 0; |
|
142 | 142 | } |
|
143 | 143 | return (PyObject *)t; |
|
144 | 144 | } |
|
145 | 145 | |
|
146 | 146 | static void dirstate_item_dealloc(PyObject *o) |
|
147 | 147 | { |
|
148 | 148 | PyObject_Del(o); |
|
149 | 149 | } |
|
150 | 150 | |
|
151 | 151 | static inline bool dirstate_item_c_tracked(dirstateItemObject *self) |
|
152 | 152 | { |
|
153 | 153 | return (self->flags & dirstate_flag_wc_tracked); |
|
154 | 154 | } |
|
155 | 155 | |
|
156 | 156 | static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self) |
|
157 | 157 | { |
|
158 | 158 | const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
159 | 159 | dirstate_flag_p2_info; |
|
160 | 160 | return (self->flags & mask); |
|
161 | 161 | } |
|
162 | 162 | |
|
163 | 163 | static inline bool dirstate_item_c_added(dirstateItemObject *self) |
|
164 | 164 | { |
|
165 | 165 | const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
166 | 166 | dirstate_flag_p2_info); |
|
167 | 167 | const int target = dirstate_flag_wc_tracked; |
|
168 | 168 | return (self->flags & mask) == target; |
|
169 | 169 | } |
|
170 | 170 | |
|
171 | 171 | static inline bool dirstate_item_c_removed(dirstateItemObject *self) |
|
172 | 172 | { |
|
173 | 173 | if (self->flags & dirstate_flag_wc_tracked) { |
|
174 | 174 | return false; |
|
175 | 175 | } |
|
176 | 176 | return (self->flags & |
|
177 | 177 | (dirstate_flag_p1_tracked | dirstate_flag_p2_info)); |
|
178 | 178 | } |
|
179 | 179 | |
|
180 | 180 | static inline bool dirstate_item_c_merged(dirstateItemObject *self) |
|
181 | 181 | { |
|
182 | 182 | return ((self->flags & dirstate_flag_wc_tracked) && |
|
183 | 183 | (self->flags & dirstate_flag_p1_tracked) && |
|
184 | 184 | (self->flags & dirstate_flag_p2_info)); |
|
185 | 185 | } |
|
186 | 186 | |
|
187 | 187 | static inline bool dirstate_item_c_from_p2(dirstateItemObject *self) |
|
188 | 188 | { |
|
189 | 189 | return ((self->flags & dirstate_flag_wc_tracked) && |
|
190 | 190 | !(self->flags & dirstate_flag_p1_tracked) && |
|
191 | 191 | (self->flags & dirstate_flag_p2_info)); |
|
192 | 192 | } |
|
193 | 193 | |
|
194 | 194 | static inline char dirstate_item_c_v1_state(dirstateItemObject *self) |
|
195 | 195 | { |
|
196 | 196 | if (dirstate_item_c_removed(self)) { |
|
197 | 197 | return 'r'; |
|
198 | 198 | } else if (dirstate_item_c_merged(self)) { |
|
199 | 199 | return 'm'; |
|
200 | 200 | } else if (dirstate_item_c_added(self)) { |
|
201 | 201 | return 'a'; |
|
202 | 202 | } else { |
|
203 | 203 | return 'n'; |
|
204 | 204 | } |
|
205 | 205 | } |
|
206 | 206 | |
|
207 | 207 | static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self) |
|
208 | 208 | { |
|
209 | 209 | return (bool)self->flags & dirstate_flag_has_fallback_exec; |
|
210 | 210 | } |
|
211 | 211 | |
|
212 | 212 | static inline bool |
|
213 | 213 | dirstate_item_c_has_fallback_symlink(dirstateItemObject *self) |
|
214 | 214 | { |
|
215 | 215 | return (bool)self->flags & dirstate_flag_has_fallback_symlink; |
|
216 | 216 | } |
|
217 | 217 | |
|
218 | 218 | static inline int dirstate_item_c_v1_mode(dirstateItemObject *self) |
|
219 | 219 | { |
|
220 | 220 | if (self->flags & dirstate_flag_has_meaningful_data) { |
|
221 | 221 | return self->mode; |
|
222 | 222 | } else { |
|
223 | 223 | return 0; |
|
224 | 224 | } |
|
225 | 225 | } |
|
226 | 226 | |
|
227 | 227 | static inline int dirstate_item_c_v1_size(dirstateItemObject *self) |
|
228 | 228 | { |
|
229 | 229 | if (!(self->flags & dirstate_flag_wc_tracked) && |
|
230 | 230 | (self->flags & dirstate_flag_p2_info)) { |
|
231 | 231 | if (self->flags & dirstate_flag_p1_tracked) { |
|
232 | 232 | return dirstate_v1_nonnormal; |
|
233 | 233 | } else { |
|
234 | 234 | return dirstate_v1_from_p2; |
|
235 | 235 | } |
|
236 | 236 | } else if (dirstate_item_c_removed(self)) { |
|
237 | 237 | return 0; |
|
238 | 238 | } else if (self->flags & dirstate_flag_p2_info) { |
|
239 | 239 | return dirstate_v1_from_p2; |
|
240 | 240 | } else if (dirstate_item_c_added(self)) { |
|
241 | 241 | return dirstate_v1_nonnormal; |
|
242 | 242 | } else if (self->flags & dirstate_flag_has_meaningful_data) { |
|
243 | 243 | return self->size; |
|
244 | 244 | } else { |
|
245 | 245 | return dirstate_v1_nonnormal; |
|
246 | 246 | } |
|
247 | 247 | } |
|
248 | 248 | |
|
249 | 249 | static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self) |
|
250 | 250 | { |
|
251 | 251 | if (dirstate_item_c_removed(self)) { |
|
252 | 252 | return 0; |
|
253 | 253 | } else if (!(self->flags & dirstate_flag_has_mtime) || |
|
254 | 254 | !(self->flags & dirstate_flag_p1_tracked) || |
|
255 | 255 | !(self->flags & dirstate_flag_wc_tracked) || |
|
256 | 256 | (self->flags & dirstate_flag_p2_info) || |
|
257 | 257 | (self->flags & dirstate_flag_mtime_second_ambiguous)) { |
|
258 | 258 | return ambiguous_time; |
|
259 | 259 | } else { |
|
260 | 260 | return self->mtime_s; |
|
261 | 261 | } |
|
262 | 262 | } |
|
263 | 263 | |
|
264 | 264 | static PyObject *dirstate_item_v2_data(dirstateItemObject *self) |
|
265 | 265 | { |
|
266 | 266 | int flags = self->flags; |
|
267 | 267 | int mode = dirstate_item_c_v1_mode(self); |
|
268 | 268 | #ifdef S_IXUSR |
|
269 | 269 | /* This is for platforms with an exec bit */ |
|
270 | 270 | if ((mode & S_IXUSR) != 0) { |
|
271 | 271 | flags |= dirstate_flag_mode_exec_perm; |
|
272 | 272 | } else { |
|
273 | 273 | flags &= ~dirstate_flag_mode_exec_perm; |
|
274 | 274 | } |
|
275 | 275 | #else |
|
276 | 276 | flags &= ~dirstate_flag_mode_exec_perm; |
|
277 | 277 | #endif |
|
278 | 278 | #ifdef S_ISLNK |
|
279 | 279 | /* This is for platforms with support for symlinks */ |
|
280 | 280 | if (S_ISLNK(mode)) { |
|
281 | 281 | flags |= dirstate_flag_mode_is_symlink; |
|
282 | 282 | } else { |
|
283 | 283 | flags &= ~dirstate_flag_mode_is_symlink; |
|
284 | 284 | } |
|
285 | 285 | #else |
|
286 | 286 | flags &= ~dirstate_flag_mode_is_symlink; |
|
287 | 287 | #endif |
|
288 | 288 | return Py_BuildValue("iiii", flags, self->size, self->mtime_s, |
|
289 | 289 | self->mtime_ns); |
|
290 | 290 | }; |
|
291 | 291 | |
|
292 | static PyObject *dirstate_item_v1_state(dirstateItemObject *self) | |
|
293 | { | |
|
294 | char state = dirstate_item_c_v1_state(self); | |
|
295 | return PyBytes_FromStringAndSize(&state, 1); | |
|
296 | }; | |
|
297 | ||
|
298 | static PyObject *dirstate_item_v1_mode(dirstateItemObject *self) | |
|
299 | { | |
|
300 | return PyLong_FromLong(dirstate_item_c_v1_mode(self)); | |
|
301 | }; | |
|
302 | ||
|
303 | static PyObject *dirstate_item_v1_size(dirstateItemObject *self) | |
|
304 | { | |
|
305 | return PyLong_FromLong(dirstate_item_c_v1_size(self)); | |
|
306 | }; | |
|
307 | ||
|
308 | static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self) | |
|
309 | { | |
|
310 | return PyLong_FromLong(dirstate_item_c_v1_mtime(self)); | |
|
311 | }; | |
|
312 | ||
|
313 | 292 | static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self, |
|
314 | 293 | PyObject *other) |
|
315 | 294 | { |
|
316 | 295 | int other_s; |
|
317 | 296 | int other_ns; |
|
318 | 297 | int other_second_ambiguous; |
|
319 | 298 | if (!PyArg_ParseTuple(other, "iii", &other_s, &other_ns, |
|
320 | 299 | &other_second_ambiguous)) { |
|
321 | 300 | return NULL; |
|
322 | 301 | } |
|
323 | 302 | if (!(self->flags & dirstate_flag_has_mtime)) { |
|
324 | 303 | Py_RETURN_FALSE; |
|
325 | 304 | } |
|
326 | 305 | if (self->mtime_s != other_s) { |
|
327 | 306 | Py_RETURN_FALSE; |
|
328 | 307 | } |
|
329 | 308 | if (self->mtime_ns == 0 || other_ns == 0) { |
|
330 | 309 | if (self->flags & dirstate_flag_mtime_second_ambiguous) { |
|
331 | 310 | Py_RETURN_FALSE; |
|
332 | 311 | } else { |
|
333 | 312 | Py_RETURN_TRUE; |
|
334 | 313 | } |
|
335 | 314 | } |
|
336 | 315 | if (self->mtime_ns == other_ns) { |
|
337 | 316 | Py_RETURN_TRUE; |
|
338 | 317 | } else { |
|
339 | 318 | Py_RETURN_FALSE; |
|
340 | 319 | } |
|
341 | 320 | }; |
|
342 | 321 | |
|
343 | 322 | /* This will never change since it's bound to V1 |
|
344 | 323 | */ |
|
345 | 324 | static inline dirstateItemObject * |
|
346 | 325 | dirstate_item_from_v1_data(char state, int mode, int size, int mtime) |
|
347 | 326 | { |
|
348 | 327 | dirstateItemObject *t = |
|
349 | 328 | PyObject_New(dirstateItemObject, &dirstateItemType); |
|
350 | 329 | if (!t) { |
|
351 | 330 | return NULL; |
|
352 | 331 | } |
|
353 | 332 | t->flags = 0; |
|
354 | 333 | t->mode = 0; |
|
355 | 334 | t->size = 0; |
|
356 | 335 | t->mtime_s = 0; |
|
357 | 336 | t->mtime_ns = 0; |
|
358 | 337 | |
|
359 | 338 | if (state == 'm') { |
|
360 | 339 | t->flags = (dirstate_flag_wc_tracked | |
|
361 | 340 | dirstate_flag_p1_tracked | dirstate_flag_p2_info); |
|
362 | 341 | } else if (state == 'a') { |
|
363 | 342 | t->flags = dirstate_flag_wc_tracked; |
|
364 | 343 | } else if (state == 'r') { |
|
365 | 344 | if (size == dirstate_v1_nonnormal) { |
|
366 | 345 | t->flags = |
|
367 | 346 | dirstate_flag_p1_tracked | dirstate_flag_p2_info; |
|
368 | 347 | } else if (size == dirstate_v1_from_p2) { |
|
369 | 348 | t->flags = dirstate_flag_p2_info; |
|
370 | 349 | } else { |
|
371 | 350 | t->flags = dirstate_flag_p1_tracked; |
|
372 | 351 | } |
|
373 | 352 | } else if (state == 'n') { |
|
374 | 353 | if (size == dirstate_v1_from_p2) { |
|
375 | 354 | t->flags = |
|
376 | 355 | dirstate_flag_wc_tracked | dirstate_flag_p2_info; |
|
377 | 356 | } else if (size == dirstate_v1_nonnormal) { |
|
378 | 357 | t->flags = |
|
379 | 358 | dirstate_flag_wc_tracked | dirstate_flag_p1_tracked; |
|
380 | 359 | } else if (mtime == ambiguous_time) { |
|
381 | 360 | t->flags = (dirstate_flag_wc_tracked | |
|
382 | 361 | dirstate_flag_p1_tracked | |
|
383 | 362 | dirstate_flag_has_meaningful_data); |
|
384 | 363 | t->mode = mode; |
|
385 | 364 | t->size = size; |
|
386 | 365 | } else { |
|
387 | 366 | t->flags = (dirstate_flag_wc_tracked | |
|
388 | 367 | dirstate_flag_p1_tracked | |
|
389 | 368 | dirstate_flag_has_meaningful_data | |
|
390 | 369 | dirstate_flag_has_mtime); |
|
391 | 370 | t->mode = mode; |
|
392 | 371 | t->size = size; |
|
393 | 372 | t->mtime_s = mtime; |
|
394 | 373 | } |
|
395 | 374 | } else { |
|
396 | 375 | PyErr_Format(PyExc_RuntimeError, |
|
397 | 376 | "unknown state: `%c` (%d, %d, %d)", state, mode, |
|
398 | 377 | size, mtime, NULL); |
|
399 | 378 | Py_DECREF(t); |
|
400 | 379 | return NULL; |
|
401 | 380 | } |
|
402 | 381 | |
|
403 | 382 | return t; |
|
404 | 383 | } |
|
405 | 384 | |
|
406 | /* This will never change since it's bound to V1, unlike `dirstate_item_new` */ | |
|
407 | static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype, | |
|
408 | PyObject *args) | |
|
409 | { | |
|
410 | /* We do all the initialization here and not a tp_init function because | |
|
411 | * dirstate_item is immutable. */ | |
|
412 | char state; | |
|
413 | int size, mode, mtime; | |
|
414 | if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { | |
|
415 | return NULL; | |
|
416 | } | |
|
417 | return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime); | |
|
418 | }; | |
|
419 | ||
|
420 | 385 | static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype, |
|
421 | 386 | PyObject *args) |
|
422 | 387 | { |
|
423 | 388 | dirstateItemObject *t = |
|
424 | 389 | PyObject_New(dirstateItemObject, &dirstateItemType); |
|
425 | 390 | if (!t) { |
|
426 | 391 | return NULL; |
|
427 | 392 | } |
|
428 | 393 | if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s, |
|
429 | 394 | &t->mtime_ns)) { |
|
430 | 395 | return NULL; |
|
431 | 396 | } |
|
432 | 397 | if (t->flags & dirstate_flag_expected_state_is_modified) { |
|
433 | 398 | t->flags &= ~(dirstate_flag_expected_state_is_modified | |
|
434 | 399 | dirstate_flag_has_meaningful_data | |
|
435 | 400 | dirstate_flag_has_mtime); |
|
436 | 401 | } |
|
437 | 402 | t->mode = 0; |
|
438 | 403 | if (t->flags & dirstate_flag_has_meaningful_data) { |
|
439 | 404 | if (t->flags & dirstate_flag_mode_exec_perm) { |
|
440 | 405 | t->mode = 0755; |
|
441 | 406 | } else { |
|
442 | 407 | t->mode = 0644; |
|
443 | 408 | } |
|
444 | 409 | if (t->flags & dirstate_flag_mode_is_symlink) { |
|
445 | 410 | t->mode |= S_IFLNK; |
|
446 | 411 | } else { |
|
447 | 412 | t->mode |= S_IFREG; |
|
448 | 413 | } |
|
449 | 414 | } |
|
450 | 415 | return (PyObject *)t; |
|
451 | 416 | }; |
|
452 | 417 | |
|
453 | 418 | /* This means the next status call will have to actually check its content |
|
454 | 419 | to make sure it is correct. */ |
|
455 | 420 | static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self) |
|
456 | 421 | { |
|
457 | 422 | self->flags &= ~dirstate_flag_has_mtime; |
|
458 | 423 | Py_RETURN_NONE; |
|
459 | 424 | } |
|
460 | 425 | |
|
461 | 426 | /* See docstring of the python implementation for details */ |
|
462 | 427 | static PyObject *dirstate_item_set_clean(dirstateItemObject *self, |
|
463 | 428 | PyObject *args) |
|
464 | 429 | { |
|
465 | 430 | int size, mode, mtime_s, mtime_ns, mtime_second_ambiguous; |
|
466 | 431 | PyObject *mtime; |
|
467 | 432 | mtime_s = 0; |
|
468 | 433 | mtime_ns = 0; |
|
469 | 434 | mtime_second_ambiguous = 0; |
|
470 | 435 | if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) { |
|
471 | 436 | return NULL; |
|
472 | 437 | } |
|
473 | 438 | if (mtime != Py_None) { |
|
474 | 439 | if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns, |
|
475 | 440 | &mtime_second_ambiguous)) { |
|
476 | 441 | return NULL; |
|
477 | 442 | } |
|
478 | 443 | } else { |
|
479 | 444 | self->flags &= ~dirstate_flag_has_mtime; |
|
480 | 445 | } |
|
481 | 446 | self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
482 | 447 | dirstate_flag_has_meaningful_data | |
|
483 | 448 | dirstate_flag_has_mtime; |
|
484 | 449 | if (mtime_second_ambiguous) { |
|
485 | 450 | self->flags |= dirstate_flag_mtime_second_ambiguous; |
|
486 | 451 | } |
|
487 | 452 | self->mode = mode; |
|
488 | 453 | self->size = size; |
|
489 | 454 | self->mtime_s = mtime_s; |
|
490 | 455 | self->mtime_ns = mtime_ns; |
|
491 | 456 | Py_RETURN_NONE; |
|
492 | 457 | } |
|
493 | 458 | |
|
494 | 459 | static PyObject *dirstate_item_set_tracked(dirstateItemObject *self) |
|
495 | 460 | { |
|
496 | 461 | self->flags |= dirstate_flag_wc_tracked; |
|
497 | 462 | self->flags &= ~dirstate_flag_has_mtime; |
|
498 | 463 | Py_RETURN_NONE; |
|
499 | 464 | } |
|
500 | 465 | |
|
501 | 466 | static PyObject *dirstate_item_set_untracked(dirstateItemObject *self) |
|
502 | 467 | { |
|
503 | 468 | self->flags &= ~dirstate_flag_wc_tracked; |
|
504 | 469 | self->flags &= ~dirstate_flag_has_meaningful_data; |
|
505 | 470 | self->flags &= ~dirstate_flag_has_mtime; |
|
506 | 471 | self->mode = 0; |
|
507 | 472 | self->size = 0; |
|
508 | 473 | self->mtime_s = 0; |
|
509 | 474 | self->mtime_ns = 0; |
|
510 | 475 | Py_RETURN_NONE; |
|
511 | 476 | } |
|
512 | 477 | |
|
513 | 478 | static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self) |
|
514 | 479 | { |
|
515 | 480 | if (self->flags & dirstate_flag_p2_info) { |
|
516 | 481 | self->flags &= ~(dirstate_flag_p2_info | |
|
517 | 482 | dirstate_flag_has_meaningful_data | |
|
518 | 483 | dirstate_flag_has_mtime); |
|
519 | 484 | self->mode = 0; |
|
520 | 485 | self->size = 0; |
|
521 | 486 | self->mtime_s = 0; |
|
522 | 487 | self->mtime_ns = 0; |
|
523 | 488 | } |
|
524 | 489 | Py_RETURN_NONE; |
|
525 | 490 | } |
|
526 | 491 | static PyMethodDef dirstate_item_methods[] = { |
|
527 | 492 | {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS, |
|
528 | 493 | "return data suitable for v2 serialization"}, |
|
529 | {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS, | |
|
530 | "return a \"state\" suitable for v1 serialization"}, | |
|
531 | {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS, | |
|
532 | "return a \"mode\" suitable for v1 serialization"}, | |
|
533 | {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS, | |
|
534 | "return a \"size\" suitable for v1 serialization"}, | |
|
535 | {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS, | |
|
536 | "return a \"mtime\" suitable for v1 serialization"}, | |
|
537 | 494 | {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to, |
|
538 | 495 | METH_O, "True if the stored mtime is likely equal to the given mtime"}, |
|
539 | {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, | |
|
540 | METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"}, | |
|
541 | 496 | {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth, |
|
542 | 497 | METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"}, |
|
543 | 498 | {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty, |
|
544 | 499 | METH_NOARGS, "mark a file as \"possibly dirty\""}, |
|
545 | 500 | {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS, |
|
546 | 501 | "mark a file as \"clean\""}, |
|
547 | 502 | {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS, |
|
548 | 503 | "mark a file as \"tracked\""}, |
|
549 | 504 | {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS, |
|
550 | 505 | "mark a file as \"untracked\""}, |
|
551 | 506 | {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS, |
|
552 | 507 | "remove all \"merge-only\" from a DirstateItem"}, |
|
553 | 508 | {NULL} /* Sentinel */ |
|
554 | 509 | }; |
|
555 | 510 | |
|
556 | 511 | static PyObject *dirstate_item_get_mode(dirstateItemObject *self) |
|
557 | 512 | { |
|
558 | 513 | return PyLong_FromLong(dirstate_item_c_v1_mode(self)); |
|
559 | 514 | }; |
|
560 | 515 | |
|
561 | 516 | static PyObject *dirstate_item_get_size(dirstateItemObject *self) |
|
562 | 517 | { |
|
563 | 518 | return PyLong_FromLong(dirstate_item_c_v1_size(self)); |
|
564 | 519 | }; |
|
565 | 520 | |
|
566 | 521 | static PyObject *dirstate_item_get_mtime(dirstateItemObject *self) |
|
567 | 522 | { |
|
568 | 523 | return PyLong_FromLong(dirstate_item_c_v1_mtime(self)); |
|
569 | 524 | }; |
|
570 | 525 | |
|
571 | 526 | static PyObject *dirstate_item_get_state(dirstateItemObject *self) |
|
572 | 527 | { |
|
573 | 528 | char state = dirstate_item_c_v1_state(self); |
|
574 | 529 | return PyBytes_FromStringAndSize(&state, 1); |
|
575 | 530 | }; |
|
576 | 531 | |
|
577 | 532 | static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self) |
|
578 | 533 | { |
|
579 | 534 | if (dirstate_item_c_has_fallback_exec(self)) { |
|
580 | 535 | Py_RETURN_TRUE; |
|
581 | 536 | } else { |
|
582 | 537 | Py_RETURN_FALSE; |
|
583 | 538 | } |
|
584 | 539 | }; |
|
585 | 540 | |
|
586 | 541 | static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self) |
|
587 | 542 | { |
|
588 | 543 | if (dirstate_item_c_has_fallback_exec(self)) { |
|
589 | 544 | if (self->flags & dirstate_flag_fallback_exec) { |
|
590 | 545 | Py_RETURN_TRUE; |
|
591 | 546 | } else { |
|
592 | 547 | Py_RETURN_FALSE; |
|
593 | 548 | } |
|
594 | 549 | } else { |
|
595 | 550 | Py_RETURN_NONE; |
|
596 | 551 | } |
|
597 | 552 | }; |
|
598 | 553 | |
|
599 | 554 | static int dirstate_item_set_fallback_exec(dirstateItemObject *self, |
|
600 | 555 | PyObject *value) |
|
601 | 556 | { |
|
602 | 557 | if ((value == Py_None) || (value == NULL)) { |
|
603 | 558 | self->flags &= ~dirstate_flag_has_fallback_exec; |
|
604 | 559 | } else { |
|
605 | 560 | self->flags |= dirstate_flag_has_fallback_exec; |
|
606 | 561 | if (PyObject_IsTrue(value)) { |
|
607 | 562 | self->flags |= dirstate_flag_fallback_exec; |
|
608 | 563 | } else { |
|
609 | 564 | self->flags &= ~dirstate_flag_fallback_exec; |
|
610 | 565 | } |
|
611 | 566 | } |
|
612 | 567 | return 0; |
|
613 | 568 | }; |
|
614 | 569 | |
|
615 | 570 | static PyObject * |
|
616 | 571 | dirstate_item_get_has_fallback_symlink(dirstateItemObject *self) |
|
617 | 572 | { |
|
618 | 573 | if (dirstate_item_c_has_fallback_symlink(self)) { |
|
619 | 574 | Py_RETURN_TRUE; |
|
620 | 575 | } else { |
|
621 | 576 | Py_RETURN_FALSE; |
|
622 | 577 | } |
|
623 | 578 | }; |
|
624 | 579 | |
|
625 | 580 | static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self) |
|
626 | 581 | { |
|
627 | 582 | if (dirstate_item_c_has_fallback_symlink(self)) { |
|
628 | 583 | if (self->flags & dirstate_flag_fallback_symlink) { |
|
629 | 584 | Py_RETURN_TRUE; |
|
630 | 585 | } else { |
|
631 | 586 | Py_RETURN_FALSE; |
|
632 | 587 | } |
|
633 | 588 | } else { |
|
634 | 589 | Py_RETURN_NONE; |
|
635 | 590 | } |
|
636 | 591 | }; |
|
637 | 592 | |
|
638 | 593 | static int dirstate_item_set_fallback_symlink(dirstateItemObject *self, |
|
639 | 594 | PyObject *value) |
|
640 | 595 | { |
|
641 | 596 | if ((value == Py_None) || (value == NULL)) { |
|
642 | 597 | self->flags &= ~dirstate_flag_has_fallback_symlink; |
|
643 | 598 | } else { |
|
644 | 599 | self->flags |= dirstate_flag_has_fallback_symlink; |
|
645 | 600 | if (PyObject_IsTrue(value)) { |
|
646 | 601 | self->flags |= dirstate_flag_fallback_symlink; |
|
647 | 602 | } else { |
|
648 | 603 | self->flags &= ~dirstate_flag_fallback_symlink; |
|
649 | 604 | } |
|
650 | 605 | } |
|
651 | 606 | return 0; |
|
652 | 607 | }; |
|
653 | 608 | |
|
654 | 609 | static PyObject *dirstate_item_get_tracked(dirstateItemObject *self) |
|
655 | 610 | { |
|
656 | 611 | if (dirstate_item_c_tracked(self)) { |
|
657 | 612 | Py_RETURN_TRUE; |
|
658 | 613 | } else { |
|
659 | 614 | Py_RETURN_FALSE; |
|
660 | 615 | } |
|
661 | 616 | }; |
|
662 | 617 | static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self) |
|
663 | 618 | { |
|
664 | 619 | if (self->flags & dirstate_flag_p1_tracked) { |
|
665 | 620 | Py_RETURN_TRUE; |
|
666 | 621 | } else { |
|
667 | 622 | Py_RETURN_FALSE; |
|
668 | 623 | } |
|
669 | 624 | }; |
|
670 | 625 | |
|
671 | 626 | static PyObject *dirstate_item_get_added(dirstateItemObject *self) |
|
672 | 627 | { |
|
673 | 628 | if (dirstate_item_c_added(self)) { |
|
674 | 629 | Py_RETURN_TRUE; |
|
675 | 630 | } else { |
|
676 | 631 | Py_RETURN_FALSE; |
|
677 | 632 | } |
|
678 | 633 | }; |
|
679 | 634 | |
|
680 | 635 | static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self) |
|
681 | 636 | { |
|
682 | 637 | if (self->flags & dirstate_flag_wc_tracked && |
|
683 | 638 | self->flags & dirstate_flag_p2_info) { |
|
684 | 639 | Py_RETURN_TRUE; |
|
685 | 640 | } else { |
|
686 | 641 | Py_RETURN_FALSE; |
|
687 | 642 | } |
|
688 | 643 | }; |
|
689 | 644 | |
|
690 | 645 | static PyObject *dirstate_item_get_merged(dirstateItemObject *self) |
|
691 | 646 | { |
|
692 | 647 | if (dirstate_item_c_merged(self)) { |
|
693 | 648 | Py_RETURN_TRUE; |
|
694 | 649 | } else { |
|
695 | 650 | Py_RETURN_FALSE; |
|
696 | 651 | } |
|
697 | 652 | }; |
|
698 | 653 | |
|
699 | 654 | static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self) |
|
700 | 655 | { |
|
701 | 656 | if (dirstate_item_c_from_p2(self)) { |
|
702 | 657 | Py_RETURN_TRUE; |
|
703 | 658 | } else { |
|
704 | 659 | Py_RETURN_FALSE; |
|
705 | 660 | } |
|
706 | 661 | }; |
|
707 | 662 | |
|
708 | 663 | static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self) |
|
709 | 664 | { |
|
710 | 665 | if (!(self->flags & dirstate_flag_wc_tracked)) { |
|
711 | 666 | Py_RETURN_FALSE; |
|
712 | 667 | } else if (!(self->flags & dirstate_flag_p1_tracked)) { |
|
713 | 668 | Py_RETURN_FALSE; |
|
714 | 669 | } else if (self->flags & dirstate_flag_p2_info) { |
|
715 | 670 | Py_RETURN_FALSE; |
|
716 | 671 | } else { |
|
717 | 672 | Py_RETURN_TRUE; |
|
718 | 673 | } |
|
719 | 674 | }; |
|
720 | 675 | |
|
721 | 676 | static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self) |
|
722 | 677 | { |
|
723 | 678 | if (dirstate_item_c_any_tracked(self)) { |
|
724 | 679 | Py_RETURN_TRUE; |
|
725 | 680 | } else { |
|
726 | 681 | Py_RETURN_FALSE; |
|
727 | 682 | } |
|
728 | 683 | }; |
|
729 | 684 | |
|
730 | 685 | static PyObject *dirstate_item_get_removed(dirstateItemObject *self) |
|
731 | 686 | { |
|
732 | 687 | if (dirstate_item_c_removed(self)) { |
|
733 | 688 | Py_RETURN_TRUE; |
|
734 | 689 | } else { |
|
735 | 690 | Py_RETURN_FALSE; |
|
736 | 691 | } |
|
737 | 692 | }; |
|
738 | 693 | |
|
739 | 694 | static PyGetSetDef dirstate_item_getset[] = { |
|
740 | 695 | {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL}, |
|
741 | 696 | {"size", (getter)dirstate_item_get_size, NULL, "size", NULL}, |
|
742 | 697 | {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL}, |
|
743 | 698 | {"state", (getter)dirstate_item_get_state, NULL, "state", NULL}, |
|
744 | 699 | {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL, |
|
745 | 700 | "has_fallback_exec", NULL}, |
|
746 | 701 | {"fallback_exec", (getter)dirstate_item_get_fallback_exec, |
|
747 | 702 | (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL}, |
|
748 | 703 | {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink, |
|
749 | 704 | NULL, "has_fallback_symlink", NULL}, |
|
750 | 705 | {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink, |
|
751 | 706 | (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL}, |
|
752 | 707 | {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL}, |
|
753 | 708 | {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked", |
|
754 | 709 | NULL}, |
|
755 | 710 | {"added", (getter)dirstate_item_get_added, NULL, "added", NULL}, |
|
756 | 711 | {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL}, |
|
757 | 712 | {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL}, |
|
758 | 713 | {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, |
|
759 | 714 | {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean", |
|
760 | 715 | NULL}, |
|
761 | 716 | {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked", |
|
762 | 717 | NULL}, |
|
763 | 718 | {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL}, |
|
764 | 719 | {NULL} /* Sentinel */ |
|
765 | 720 | }; |
|
766 | 721 | |
|
767 | 722 | PyTypeObject dirstateItemType = { |
|
768 | 723 | PyVarObject_HEAD_INIT(NULL, 0) /* header */ |
|
769 | 724 | "dirstate_tuple", /* tp_name */ |
|
770 | 725 | sizeof(dirstateItemObject), /* tp_basicsize */ |
|
771 | 726 | 0, /* tp_itemsize */ |
|
772 | 727 | (destructor)dirstate_item_dealloc, /* tp_dealloc */ |
|
773 | 728 | 0, /* tp_print */ |
|
774 | 729 | 0, /* tp_getattr */ |
|
775 | 730 | 0, /* tp_setattr */ |
|
776 | 731 | 0, /* tp_compare */ |
|
777 | 732 | 0, /* tp_repr */ |
|
778 | 733 | 0, /* tp_as_number */ |
|
779 | 734 | 0, /* tp_as_sequence */ |
|
780 | 735 | 0, /* tp_as_mapping */ |
|
781 | 736 | 0, /* tp_hash */ |
|
782 | 737 | 0, /* tp_call */ |
|
783 | 738 | 0, /* tp_str */ |
|
784 | 739 | 0, /* tp_getattro */ |
|
785 | 740 | 0, /* tp_setattro */ |
|
786 | 741 | 0, /* tp_as_buffer */ |
|
787 | 742 | Py_TPFLAGS_DEFAULT, /* tp_flags */ |
|
788 | 743 | "dirstate tuple", /* tp_doc */ |
|
789 | 744 | 0, /* tp_traverse */ |
|
790 | 745 | 0, /* tp_clear */ |
|
791 | 746 | 0, /* tp_richcompare */ |
|
792 | 747 | 0, /* tp_weaklistoffset */ |
|
793 | 748 | 0, /* tp_iter */ |
|
794 | 749 | 0, /* tp_iternext */ |
|
795 | 750 | dirstate_item_methods, /* tp_methods */ |
|
796 | 751 | 0, /* tp_members */ |
|
797 | 752 | dirstate_item_getset, /* tp_getset */ |
|
798 | 753 | 0, /* tp_base */ |
|
799 | 754 | 0, /* tp_dict */ |
|
800 | 755 | 0, /* tp_descr_get */ |
|
801 | 756 | 0, /* tp_descr_set */ |
|
802 | 757 | 0, /* tp_dictoffset */ |
|
803 | 758 | 0, /* tp_init */ |
|
804 | 759 | 0, /* tp_alloc */ |
|
805 | 760 | dirstate_item_new, /* tp_new */ |
|
806 | 761 | }; |
|
807 | 762 | |
|
808 | 763 | static PyObject *parse_dirstate(PyObject *self, PyObject *args) |
|
809 | 764 | { |
|
810 | 765 | PyObject *dmap, *cmap, *parents = NULL, *ret = NULL; |
|
811 | 766 | PyObject *fname = NULL, *cname = NULL, *entry = NULL; |
|
812 | 767 | char state, *cur, *str, *cpos; |
|
813 | 768 | int mode, size, mtime; |
|
814 | 769 | unsigned int flen, pos = 40; |
|
815 | 770 | Py_ssize_t len = 40; |
|
816 | 771 | Py_ssize_t readlen; |
|
817 | 772 | |
|
818 | 773 | if (!PyArg_ParseTuple(args, "O!O!y#:parse_dirstate", &PyDict_Type, |
|
819 | 774 | &dmap, &PyDict_Type, &cmap, &str, &readlen)) { |
|
820 | 775 | goto quit; |
|
821 | 776 | } |
|
822 | 777 | |
|
823 | 778 | len = readlen; |
|
824 | 779 | |
|
825 | 780 | /* read parents */ |
|
826 | 781 | if (len < 40) { |
|
827 | 782 | PyErr_SetString(PyExc_ValueError, |
|
828 | 783 | "too little data for parents"); |
|
829 | 784 | goto quit; |
|
830 | 785 | } |
|
831 | 786 | |
|
832 | 787 | parents = Py_BuildValue("y#y#", str, (Py_ssize_t)20, str + 20, |
|
833 | 788 | (Py_ssize_t)20); |
|
834 | 789 | if (!parents) { |
|
835 | 790 | goto quit; |
|
836 | 791 | } |
|
837 | 792 | |
|
838 | 793 | /* read filenames */ |
|
839 | 794 | while (pos >= 40 && pos < len) { |
|
840 | 795 | if (pos + 17 > len) { |
|
841 | 796 | PyErr_SetString(PyExc_ValueError, |
|
842 | 797 | "overflow in dirstate"); |
|
843 | 798 | goto quit; |
|
844 | 799 | } |
|
845 | 800 | cur = str + pos; |
|
846 | 801 | /* unpack header */ |
|
847 | 802 | state = *cur; |
|
848 | 803 | mode = getbe32(cur + 1); |
|
849 | 804 | size = getbe32(cur + 5); |
|
850 | 805 | mtime = getbe32(cur + 9); |
|
851 | 806 | flen = getbe32(cur + 13); |
|
852 | 807 | pos += 17; |
|
853 | 808 | cur += 17; |
|
854 | 809 | if (flen > len - pos) { |
|
855 | 810 | PyErr_SetString(PyExc_ValueError, |
|
856 | 811 | "overflow in dirstate"); |
|
857 | 812 | goto quit; |
|
858 | 813 | } |
|
859 | 814 | |
|
860 | 815 | entry = (PyObject *)dirstate_item_from_v1_data(state, mode, |
|
861 | 816 | size, mtime); |
|
862 | 817 | if (!entry) |
|
863 | 818 | goto quit; |
|
864 | 819 | cpos = memchr(cur, 0, flen); |
|
865 | 820 | if (cpos) { |
|
866 | 821 | fname = PyBytes_FromStringAndSize(cur, cpos - cur); |
|
867 | 822 | cname = PyBytes_FromStringAndSize( |
|
868 | 823 | cpos + 1, flen - (cpos - cur) - 1); |
|
869 | 824 | if (!fname || !cname || |
|
870 | 825 | PyDict_SetItem(cmap, fname, cname) == -1 || |
|
871 | 826 | PyDict_SetItem(dmap, fname, entry) == -1) { |
|
872 | 827 | goto quit; |
|
873 | 828 | } |
|
874 | 829 | Py_DECREF(cname); |
|
875 | 830 | } else { |
|
876 | 831 | fname = PyBytes_FromStringAndSize(cur, flen); |
|
877 | 832 | if (!fname || |
|
878 | 833 | PyDict_SetItem(dmap, fname, entry) == -1) { |
|
879 | 834 | goto quit; |
|
880 | 835 | } |
|
881 | 836 | } |
|
882 | 837 | Py_DECREF(fname); |
|
883 | 838 | Py_DECREF(entry); |
|
884 | 839 | fname = cname = entry = NULL; |
|
885 | 840 | pos += flen; |
|
886 | 841 | } |
|
887 | 842 | |
|
888 | 843 | ret = parents; |
|
889 | 844 | Py_INCREF(ret); |
|
890 | 845 | quit: |
|
891 | 846 | Py_XDECREF(fname); |
|
892 | 847 | Py_XDECREF(cname); |
|
893 | 848 | Py_XDECREF(entry); |
|
894 | 849 | Py_XDECREF(parents); |
|
895 | 850 | return ret; |
|
896 | 851 | } |
|
897 | 852 | |
|
898 | 853 | /* |
|
899 | 854 | * Efficiently pack a dirstate object into its on-disk format. |
|
900 | 855 | */ |
|
901 | 856 | static PyObject *pack_dirstate(PyObject *self, PyObject *args) |
|
902 | 857 | { |
|
903 | 858 | PyObject *packobj = NULL; |
|
904 | 859 | PyObject *map, *copymap, *pl, *mtime_unset = NULL; |
|
905 | 860 | Py_ssize_t nbytes, pos, l; |
|
906 | 861 | PyObject *k, *v = NULL, *pn; |
|
907 | 862 | char *p, *s; |
|
908 | 863 | |
|
909 | 864 | if (!PyArg_ParseTuple(args, "O!O!O!:pack_dirstate", &PyDict_Type, &map, |
|
910 | 865 | &PyDict_Type, ©map, &PyTuple_Type, &pl)) { |
|
911 | 866 | return NULL; |
|
912 | 867 | } |
|
913 | 868 | |
|
914 | 869 | if (PyTuple_Size(pl) != 2) { |
|
915 | 870 | PyErr_SetString(PyExc_TypeError, "expected 2-element tuple"); |
|
916 | 871 | return NULL; |
|
917 | 872 | } |
|
918 | 873 | |
|
919 | 874 | /* Figure out how much we need to allocate. */ |
|
920 | 875 | for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) { |
|
921 | 876 | PyObject *c; |
|
922 | 877 | if (!PyBytes_Check(k)) { |
|
923 | 878 | PyErr_SetString(PyExc_TypeError, "expected string key"); |
|
924 | 879 | goto bail; |
|
925 | 880 | } |
|
926 | 881 | nbytes += PyBytes_GET_SIZE(k) + 17; |
|
927 | 882 | c = PyDict_GetItem(copymap, k); |
|
928 | 883 | if (c) { |
|
929 | 884 | if (!PyBytes_Check(c)) { |
|
930 | 885 | PyErr_SetString(PyExc_TypeError, |
|
931 | 886 | "expected string key"); |
|
932 | 887 | goto bail; |
|
933 | 888 | } |
|
934 | 889 | nbytes += PyBytes_GET_SIZE(c) + 1; |
|
935 | 890 | } |
|
936 | 891 | } |
|
937 | 892 | |
|
938 | 893 | packobj = PyBytes_FromStringAndSize(NULL, nbytes); |
|
939 | 894 | if (packobj == NULL) { |
|
940 | 895 | goto bail; |
|
941 | 896 | } |
|
942 | 897 | |
|
943 | 898 | p = PyBytes_AS_STRING(packobj); |
|
944 | 899 | |
|
945 | 900 | pn = PyTuple_GET_ITEM(pl, 0); |
|
946 | 901 | if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) { |
|
947 | 902 | PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash"); |
|
948 | 903 | goto bail; |
|
949 | 904 | } |
|
950 | 905 | memcpy(p, s, l); |
|
951 | 906 | p += 20; |
|
952 | 907 | pn = PyTuple_GET_ITEM(pl, 1); |
|
953 | 908 | if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) { |
|
954 | 909 | PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash"); |
|
955 | 910 | goto bail; |
|
956 | 911 | } |
|
957 | 912 | memcpy(p, s, l); |
|
958 | 913 | p += 20; |
|
959 | 914 | |
|
960 | 915 | for (pos = 0; PyDict_Next(map, &pos, &k, &v);) { |
|
961 | 916 | dirstateItemObject *tuple; |
|
962 | 917 | char state; |
|
963 | 918 | int mode, size, mtime; |
|
964 | 919 | Py_ssize_t len, l; |
|
965 | 920 | PyObject *o; |
|
966 | 921 | char *t; |
|
967 | 922 | |
|
968 | 923 | if (!dirstate_tuple_check(v)) { |
|
969 | 924 | PyErr_SetString(PyExc_TypeError, |
|
970 | 925 | "expected a dirstate tuple"); |
|
971 | 926 | goto bail; |
|
972 | 927 | } |
|
973 | 928 | tuple = (dirstateItemObject *)v; |
|
974 | 929 | |
|
975 | 930 | state = dirstate_item_c_v1_state(tuple); |
|
976 | 931 | mode = dirstate_item_c_v1_mode(tuple); |
|
977 | 932 | size = dirstate_item_c_v1_size(tuple); |
|
978 | 933 | mtime = dirstate_item_c_v1_mtime(tuple); |
|
979 | 934 | *p++ = state; |
|
980 | 935 | putbe32((uint32_t)mode, p); |
|
981 | 936 | putbe32((uint32_t)size, p + 4); |
|
982 | 937 | putbe32((uint32_t)mtime, p + 8); |
|
983 | 938 | t = p + 12; |
|
984 | 939 | p += 16; |
|
985 | 940 | len = PyBytes_GET_SIZE(k); |
|
986 | 941 | memcpy(p, PyBytes_AS_STRING(k), len); |
|
987 | 942 | p += len; |
|
988 | 943 | o = PyDict_GetItem(copymap, k); |
|
989 | 944 | if (o) { |
|
990 | 945 | *p++ = '\0'; |
|
991 | 946 | l = PyBytes_GET_SIZE(o); |
|
992 | 947 | memcpy(p, PyBytes_AS_STRING(o), l); |
|
993 | 948 | p += l; |
|
994 | 949 | len += l + 1; |
|
995 | 950 | } |
|
996 | 951 | putbe32((uint32_t)len, t); |
|
997 | 952 | } |
|
998 | 953 | |
|
999 | 954 | pos = p - PyBytes_AS_STRING(packobj); |
|
1000 | 955 | if (pos != nbytes) { |
|
1001 | 956 | PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld", |
|
1002 | 957 | (long)pos, (long)nbytes); |
|
1003 | 958 | goto bail; |
|
1004 | 959 | } |
|
1005 | 960 | |
|
1006 | 961 | return packobj; |
|
1007 | 962 | bail: |
|
1008 | 963 | Py_XDECREF(mtime_unset); |
|
1009 | 964 | Py_XDECREF(packobj); |
|
1010 | 965 | Py_XDECREF(v); |
|
1011 | 966 | return NULL; |
|
1012 | 967 | } |
|
1013 | 968 | |
|
1014 | 969 | #define BUMPED_FIX 1 |
|
1015 | 970 | #define USING_SHA_256 2 |
|
1016 | 971 | #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1) |
|
1017 | 972 | |
|
1018 | 973 | static PyObject *readshas(const char *source, unsigned char num, |
|
1019 | 974 | Py_ssize_t hashwidth) |
|
1020 | 975 | { |
|
1021 | 976 | int i; |
|
1022 | 977 | PyObject *list = PyTuple_New(num); |
|
1023 | 978 | if (list == NULL) { |
|
1024 | 979 | return NULL; |
|
1025 | 980 | } |
|
1026 | 981 | for (i = 0; i < num; i++) { |
|
1027 | 982 | PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth); |
|
1028 | 983 | if (hash == NULL) { |
|
1029 | 984 | Py_DECREF(list); |
|
1030 | 985 | return NULL; |
|
1031 | 986 | } |
|
1032 | 987 | PyTuple_SET_ITEM(list, i, hash); |
|
1033 | 988 | source += hashwidth; |
|
1034 | 989 | } |
|
1035 | 990 | return list; |
|
1036 | 991 | } |
|
1037 | 992 | |
|
1038 | 993 | static PyObject *fm1readmarker(const char *databegin, const char *dataend, |
|
1039 | 994 | uint32_t *msize) |
|
1040 | 995 | { |
|
1041 | 996 | const char *data = databegin; |
|
1042 | 997 | const char *meta; |
|
1043 | 998 | |
|
1044 | 999 | double mtime; |
|
1045 | 1000 | int16_t tz; |
|
1046 | 1001 | uint16_t flags; |
|
1047 | 1002 | unsigned char nsuccs, nparents, nmetadata; |
|
1048 | 1003 | Py_ssize_t hashwidth = 20; |
|
1049 | 1004 | |
|
1050 | 1005 | PyObject *prec = NULL, *parents = NULL, *succs = NULL; |
|
1051 | 1006 | PyObject *metadata = NULL, *ret = NULL; |
|
1052 | 1007 | int i; |
|
1053 | 1008 | |
|
1054 | 1009 | if (data + FM1_HEADER_SIZE > dataend) { |
|
1055 | 1010 | goto overflow; |
|
1056 | 1011 | } |
|
1057 | 1012 | |
|
1058 | 1013 | *msize = getbe32(data); |
|
1059 | 1014 | data += 4; |
|
1060 | 1015 | mtime = getbefloat64(data); |
|
1061 | 1016 | data += 8; |
|
1062 | 1017 | tz = getbeint16(data); |
|
1063 | 1018 | data += 2; |
|
1064 | 1019 | flags = getbeuint16(data); |
|
1065 | 1020 | data += 2; |
|
1066 | 1021 | |
|
1067 | 1022 | if (flags & USING_SHA_256) { |
|
1068 | 1023 | hashwidth = 32; |
|
1069 | 1024 | } |
|
1070 | 1025 | |
|
1071 | 1026 | nsuccs = (unsigned char)(*data++); |
|
1072 | 1027 | nparents = (unsigned char)(*data++); |
|
1073 | 1028 | nmetadata = (unsigned char)(*data++); |
|
1074 | 1029 | |
|
1075 | 1030 | if (databegin + *msize > dataend) { |
|
1076 | 1031 | goto overflow; |
|
1077 | 1032 | } |
|
1078 | 1033 | dataend = databegin + *msize; /* narrow down to marker size */ |
|
1079 | 1034 | |
|
1080 | 1035 | if (data + hashwidth > dataend) { |
|
1081 | 1036 | goto overflow; |
|
1082 | 1037 | } |
|
1083 | 1038 | prec = PyBytes_FromStringAndSize(data, hashwidth); |
|
1084 | 1039 | data += hashwidth; |
|
1085 | 1040 | if (prec == NULL) { |
|
1086 | 1041 | goto bail; |
|
1087 | 1042 | } |
|
1088 | 1043 | |
|
1089 | 1044 | if (data + nsuccs * hashwidth > dataend) { |
|
1090 | 1045 | goto overflow; |
|
1091 | 1046 | } |
|
1092 | 1047 | succs = readshas(data, nsuccs, hashwidth); |
|
1093 | 1048 | if (succs == NULL) { |
|
1094 | 1049 | goto bail; |
|
1095 | 1050 | } |
|
1096 | 1051 | data += nsuccs * hashwidth; |
|
1097 | 1052 | |
|
1098 | 1053 | if (nparents == 1 || nparents == 2) { |
|
1099 | 1054 | if (data + nparents * hashwidth > dataend) { |
|
1100 | 1055 | goto overflow; |
|
1101 | 1056 | } |
|
1102 | 1057 | parents = readshas(data, nparents, hashwidth); |
|
1103 | 1058 | if (parents == NULL) { |
|
1104 | 1059 | goto bail; |
|
1105 | 1060 | } |
|
1106 | 1061 | data += nparents * hashwidth; |
|
1107 | 1062 | } else { |
|
1108 | 1063 | parents = Py_None; |
|
1109 | 1064 | Py_INCREF(parents); |
|
1110 | 1065 | } |
|
1111 | 1066 | |
|
1112 | 1067 | if (data + 2 * nmetadata > dataend) { |
|
1113 | 1068 | goto overflow; |
|
1114 | 1069 | } |
|
1115 | 1070 | meta = data + (2 * nmetadata); |
|
1116 | 1071 | metadata = PyTuple_New(nmetadata); |
|
1117 | 1072 | if (metadata == NULL) { |
|
1118 | 1073 | goto bail; |
|
1119 | 1074 | } |
|
1120 | 1075 | for (i = 0; i < nmetadata; i++) { |
|
1121 | 1076 | PyObject *tmp, *left = NULL, *right = NULL; |
|
1122 | 1077 | Py_ssize_t leftsize = (unsigned char)(*data++); |
|
1123 | 1078 | Py_ssize_t rightsize = (unsigned char)(*data++); |
|
1124 | 1079 | if (meta + leftsize + rightsize > dataend) { |
|
1125 | 1080 | goto overflow; |
|
1126 | 1081 | } |
|
1127 | 1082 | left = PyBytes_FromStringAndSize(meta, leftsize); |
|
1128 | 1083 | meta += leftsize; |
|
1129 | 1084 | right = PyBytes_FromStringAndSize(meta, rightsize); |
|
1130 | 1085 | meta += rightsize; |
|
1131 | 1086 | tmp = PyTuple_New(2); |
|
1132 | 1087 | if (!left || !right || !tmp) { |
|
1133 | 1088 | Py_XDECREF(left); |
|
1134 | 1089 | Py_XDECREF(right); |
|
1135 | 1090 | Py_XDECREF(tmp); |
|
1136 | 1091 | goto bail; |
|
1137 | 1092 | } |
|
1138 | 1093 | PyTuple_SET_ITEM(tmp, 0, left); |
|
1139 | 1094 | PyTuple_SET_ITEM(tmp, 1, right); |
|
1140 | 1095 | PyTuple_SET_ITEM(metadata, i, tmp); |
|
1141 | 1096 | } |
|
1142 | 1097 | ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime, |
|
1143 | 1098 | (int)tz * 60, parents); |
|
1144 | 1099 | goto bail; /* return successfully */ |
|
1145 | 1100 | |
|
1146 | 1101 | overflow: |
|
1147 | 1102 | PyErr_SetString(PyExc_ValueError, "overflow in obsstore"); |
|
1148 | 1103 | bail: |
|
1149 | 1104 | Py_XDECREF(prec); |
|
1150 | 1105 | Py_XDECREF(succs); |
|
1151 | 1106 | Py_XDECREF(metadata); |
|
1152 | 1107 | Py_XDECREF(parents); |
|
1153 | 1108 | return ret; |
|
1154 | 1109 | } |
|
1155 | 1110 | |
|
1156 | 1111 | static PyObject *fm1readmarkers(PyObject *self, PyObject *args) |
|
1157 | 1112 | { |
|
1158 | 1113 | const char *data, *dataend; |
|
1159 | 1114 | Py_ssize_t datalen, offset, stop; |
|
1160 | 1115 | PyObject *markers = NULL; |
|
1161 | 1116 | |
|
1162 | 1117 | if (!PyArg_ParseTuple(args, "y#nn", &data, &datalen, &offset, &stop)) { |
|
1163 | 1118 | return NULL; |
|
1164 | 1119 | } |
|
1165 | 1120 | if (offset < 0) { |
|
1166 | 1121 | PyErr_SetString(PyExc_ValueError, |
|
1167 | 1122 | "invalid negative offset in fm1readmarkers"); |
|
1168 | 1123 | return NULL; |
|
1169 | 1124 | } |
|
1170 | 1125 | if (stop > datalen) { |
|
1171 | 1126 | PyErr_SetString( |
|
1172 | 1127 | PyExc_ValueError, |
|
1173 | 1128 | "stop longer than data length in fm1readmarkers"); |
|
1174 | 1129 | return NULL; |
|
1175 | 1130 | } |
|
1176 | 1131 | dataend = data + datalen; |
|
1177 | 1132 | data += offset; |
|
1178 | 1133 | markers = PyList_New(0); |
|
1179 | 1134 | if (!markers) { |
|
1180 | 1135 | return NULL; |
|
1181 | 1136 | } |
|
1182 | 1137 | while (offset < stop) { |
|
1183 | 1138 | uint32_t msize; |
|
1184 | 1139 | int error; |
|
1185 | 1140 | PyObject *record = fm1readmarker(data, dataend, &msize); |
|
1186 | 1141 | if (!record) { |
|
1187 | 1142 | goto bail; |
|
1188 | 1143 | } |
|
1189 | 1144 | error = PyList_Append(markers, record); |
|
1190 | 1145 | Py_DECREF(record); |
|
1191 | 1146 | if (error) { |
|
1192 | 1147 | goto bail; |
|
1193 | 1148 | } |
|
1194 | 1149 | data += msize; |
|
1195 | 1150 | offset += msize; |
|
1196 | 1151 | } |
|
1197 | 1152 | return markers; |
|
1198 | 1153 | bail: |
|
1199 | 1154 | Py_DECREF(markers); |
|
1200 | 1155 | return NULL; |
|
1201 | 1156 | } |
|
1202 | 1157 | |
|
1203 | 1158 | static char parsers_doc[] = "Efficient content parsing."; |
|
1204 | 1159 | |
|
1205 | 1160 | PyObject *encodedir(PyObject *self, PyObject *args); |
|
1206 | 1161 | PyObject *pathencode(PyObject *self, PyObject *args); |
|
1207 | 1162 | PyObject *lowerencode(PyObject *self, PyObject *args); |
|
1208 | 1163 | PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs); |
|
1209 | 1164 | |
|
1210 | 1165 | static PyMethodDef methods[] = { |
|
1211 | 1166 | {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"}, |
|
1212 | 1167 | {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"}, |
|
1213 | 1168 | {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS, |
|
1214 | 1169 | "parse a revlog index\n"}, |
|
1215 | 1170 | {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"}, |
|
1216 | 1171 | {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"}, |
|
1217 | 1172 | {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"}, |
|
1218 | 1173 | {"dict_new_presized", dict_new_presized, METH_VARARGS, |
|
1219 | 1174 | "construct a dict with an expected size\n"}, |
|
1220 | 1175 | {"make_file_foldmap", make_file_foldmap, METH_VARARGS, |
|
1221 | 1176 | "make file foldmap\n"}, |
|
1222 | 1177 | {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS, |
|
1223 | 1178 | "escape a UTF-8 byte string to JSON (fast path)\n"}, |
|
1224 | 1179 | {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"}, |
|
1225 | 1180 | {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"}, |
|
1226 | 1181 | {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"}, |
|
1227 | 1182 | {"fm1readmarkers", fm1readmarkers, METH_VARARGS, |
|
1228 | 1183 | "parse v1 obsolete markers\n"}, |
|
1229 | 1184 | {NULL, NULL}}; |
|
1230 | 1185 | |
|
1231 | 1186 | void dirs_module_init(PyObject *mod); |
|
1232 | 1187 | void manifest_module_init(PyObject *mod); |
|
1233 | 1188 | void revlog_module_init(PyObject *mod); |
|
1234 | 1189 | |
|
1235 | 1190 | static const int version = 20; |
|
1236 | 1191 | |
|
1237 | 1192 | static void module_init(PyObject *mod) |
|
1238 | 1193 | { |
|
1239 | 1194 | PyModule_AddIntConstant(mod, "version", version); |
|
1240 | 1195 | |
|
1241 | 1196 | /* This module constant has two purposes. First, it lets us unit test |
|
1242 | 1197 | * the ImportError raised without hard-coding any error text. This |
|
1243 | 1198 | * means we can change the text in the future without breaking tests, |
|
1244 | 1199 | * even across changesets without a recompile. Second, its presence |
|
1245 | 1200 | * can be used to determine whether the version-checking logic is |
|
1246 | 1201 | * present, which also helps in testing across changesets without a |
|
1247 | 1202 | * recompile. Note that this means the pure-Python version of parsers |
|
1248 | 1203 | * should not have this module constant. */ |
|
1249 | 1204 | PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext); |
|
1250 | 1205 | |
|
1251 | 1206 | dirs_module_init(mod); |
|
1252 | 1207 | manifest_module_init(mod); |
|
1253 | 1208 | revlog_module_init(mod); |
|
1254 | 1209 | |
|
1255 | 1210 | if (PyType_Ready(&dirstateItemType) < 0) { |
|
1256 | 1211 | return; |
|
1257 | 1212 | } |
|
1258 | 1213 | Py_INCREF(&dirstateItemType); |
|
1259 | 1214 | PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType); |
|
1260 | 1215 | } |
|
1261 | 1216 | |
|
1262 | 1217 | static int check_python_version(void) |
|
1263 | 1218 | { |
|
1264 | 1219 | PyObject *sys = PyImport_ImportModule("sys"), *ver; |
|
1265 | 1220 | long hexversion; |
|
1266 | 1221 | if (!sys) { |
|
1267 | 1222 | return -1; |
|
1268 | 1223 | } |
|
1269 | 1224 | ver = PyObject_GetAttrString(sys, "hexversion"); |
|
1270 | 1225 | Py_DECREF(sys); |
|
1271 | 1226 | if (!ver) { |
|
1272 | 1227 | return -1; |
|
1273 | 1228 | } |
|
1274 | 1229 | hexversion = PyLong_AsLong(ver); |
|
1275 | 1230 | Py_DECREF(ver); |
|
1276 | 1231 | /* sys.hexversion is a 32-bit number by default, so the -1 case |
|
1277 | 1232 | * should only occur in unusual circumstances (e.g. if sys.hexversion |
|
1278 | 1233 | * is manually set to an invalid value). */ |
|
1279 | 1234 | if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) { |
|
1280 | 1235 | PyErr_Format(PyExc_ImportError, |
|
1281 | 1236 | "%s: The Mercurial extension " |
|
1282 | 1237 | "modules were compiled with Python " PY_VERSION |
|
1283 | 1238 | ", but " |
|
1284 | 1239 | "Mercurial is currently using Python with " |
|
1285 | 1240 | "sys.hexversion=%ld: " |
|
1286 | 1241 | "Python %s\n at: %s", |
|
1287 | 1242 | versionerrortext, hexversion, Py_GetVersion(), |
|
1288 | 1243 | Py_GetProgramFullPath()); |
|
1289 | 1244 | return -1; |
|
1290 | 1245 | } |
|
1291 | 1246 | return 0; |
|
1292 | 1247 | } |
|
1293 | 1248 | |
|
1294 | 1249 | static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers", |
|
1295 | 1250 | parsers_doc, -1, methods}; |
|
1296 | 1251 | |
|
1297 | 1252 | PyMODINIT_FUNC PyInit_parsers(void) |
|
1298 | 1253 | { |
|
1299 | 1254 | PyObject *mod; |
|
1300 | 1255 | |
|
1301 | 1256 | if (check_python_version() == -1) |
|
1302 | 1257 | return NULL; |
|
1303 | 1258 | mod = PyModule_Create(&parsers_module); |
|
1304 | 1259 | module_init(mod); |
|
1305 | 1260 | return mod; |
|
1306 | 1261 | } |
@@ -1,974 +1,974 b'' | |||
|
1 | 1 | # parsers.py - Python implementation of parsers.c |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | |
|
9 | 9 | import io |
|
10 | 10 | import stat |
|
11 | 11 | import struct |
|
12 | 12 | import zlib |
|
13 | 13 | |
|
14 | 14 | from ..node import ( |
|
15 | 15 | nullrev, |
|
16 | 16 | sha1nodeconstants, |
|
17 | 17 | ) |
|
18 | 18 | from ..thirdparty import attr |
|
19 | 19 | from .. import ( |
|
20 | 20 | error, |
|
21 | 21 | revlogutils, |
|
22 | 22 | util, |
|
23 | 23 | ) |
|
24 | 24 | |
|
25 | 25 | from ..revlogutils import nodemap as nodemaputil |
|
26 | 26 | from ..revlogutils import constants as revlog_constants |
|
27 | 27 | |
|
28 | 28 | stringio = io.BytesIO |
|
29 | 29 | |
|
30 | 30 | |
|
31 | 31 | _pack = struct.pack |
|
32 | 32 | _unpack = struct.unpack |
|
33 | 33 | _compress = zlib.compress |
|
34 | 34 | _decompress = zlib.decompress |
|
35 | 35 | |
|
36 | 36 | |
|
37 | 37 | # a special value used internally for `size` if the file come from the other parent |
|
38 | 38 | FROM_P2 = -2 |
|
39 | 39 | |
|
40 | 40 | # a special value used internally for `size` if the file is modified/merged/added |
|
41 | 41 | NONNORMAL = -1 |
|
42 | 42 | |
|
43 | 43 | # a special value used internally for `time` if the time is ambigeous |
|
44 | 44 | AMBIGUOUS_TIME = -1 |
|
45 | 45 | |
|
46 | 46 | # Bits of the `flags` byte inside a node in the file format |
|
47 | 47 | DIRSTATE_V2_WDIR_TRACKED = 1 << 0 |
|
48 | 48 | DIRSTATE_V2_P1_TRACKED = 1 << 1 |
|
49 | 49 | DIRSTATE_V2_P2_INFO = 1 << 2 |
|
50 | 50 | DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3 |
|
51 | 51 | DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4 |
|
52 | 52 | DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5 |
|
53 | 53 | DIRSTATE_V2_FALLBACK_EXEC = 1 << 6 |
|
54 | 54 | DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7 |
|
55 | 55 | DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8 |
|
56 | 56 | DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9 |
|
57 | 57 | DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10 |
|
58 | 58 | DIRSTATE_V2_HAS_MTIME = 1 << 11 |
|
59 | 59 | DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12 |
|
60 | 60 | DIRSTATE_V2_DIRECTORY = 1 << 13 |
|
61 | 61 | DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14 |
|
62 | 62 | DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15 |
|
63 | 63 | |
|
64 | 64 | |
|
65 | 65 | @attr.s(slots=True, init=False) |
|
66 | 66 | class DirstateItem: |
|
67 | 67 | """represent a dirstate entry |
|
68 | 68 | |
|
69 | 69 | It hold multiple attributes |
|
70 | 70 | |
|
71 | 71 | # about file tracking |
|
72 | 72 | - wc_tracked: is the file tracked by the working copy |
|
73 | 73 | - p1_tracked: is the file tracked in working copy first parent |
|
74 | 74 | - p2_info: the file has been involved in some merge operation. Either |
|
75 | 75 | because it was actually merged, or because the p2 version was |
|
76 | 76 | ahead, or because some rename moved it there. In either case |
|
77 | 77 | `hg status` will want it displayed as modified. |
|
78 | 78 | |
|
79 | 79 | # about the file state expected from p1 manifest: |
|
80 | 80 | - mode: the file mode in p1 |
|
81 | 81 | - size: the file size in p1 |
|
82 | 82 | |
|
83 | 83 | These value can be set to None, which mean we don't have a meaningful value |
|
84 | 84 | to compare with. Either because we don't really care about them as there |
|
85 | 85 | `status` is known without having to look at the disk or because we don't |
|
86 | 86 | know these right now and a full comparison will be needed to find out if |
|
87 | 87 | the file is clean. |
|
88 | 88 | |
|
89 | 89 | # about the file state on disk last time we saw it: |
|
90 | 90 | - mtime: the last known clean mtime for the file. |
|
91 | 91 | |
|
92 | 92 | This value can be set to None if no cachable state exist. Either because we |
|
93 | 93 | do not care (see previous section) or because we could not cache something |
|
94 | 94 | yet. |
|
95 | 95 | """ |
|
96 | 96 | |
|
97 | 97 | _wc_tracked = attr.ib() |
|
98 | 98 | _p1_tracked = attr.ib() |
|
99 | 99 | _p2_info = attr.ib() |
|
100 | 100 | _mode = attr.ib() |
|
101 | 101 | _size = attr.ib() |
|
102 | 102 | _mtime_s = attr.ib() |
|
103 | 103 | _mtime_ns = attr.ib() |
|
104 | 104 | _fallback_exec = attr.ib() |
|
105 | 105 | _fallback_symlink = attr.ib() |
|
106 | 106 | _mtime_second_ambiguous = attr.ib() |
|
107 | 107 | |
|
108 | 108 | def __init__( |
|
109 | 109 | self, |
|
110 | 110 | wc_tracked=False, |
|
111 | 111 | p1_tracked=False, |
|
112 | 112 | p2_info=False, |
|
113 | 113 | has_meaningful_data=True, |
|
114 | 114 | has_meaningful_mtime=True, |
|
115 | 115 | parentfiledata=None, |
|
116 | 116 | fallback_exec=None, |
|
117 | 117 | fallback_symlink=None, |
|
118 | 118 | ): |
|
119 | 119 | self._wc_tracked = wc_tracked |
|
120 | 120 | self._p1_tracked = p1_tracked |
|
121 | 121 | self._p2_info = p2_info |
|
122 | 122 | |
|
123 | 123 | self._fallback_exec = fallback_exec |
|
124 | 124 | self._fallback_symlink = fallback_symlink |
|
125 | 125 | |
|
126 | 126 | self._mode = None |
|
127 | 127 | self._size = None |
|
128 | 128 | self._mtime_s = None |
|
129 | 129 | self._mtime_ns = None |
|
130 | 130 | self._mtime_second_ambiguous = False |
|
131 | 131 | if parentfiledata is None: |
|
132 | 132 | has_meaningful_mtime = False |
|
133 | 133 | has_meaningful_data = False |
|
134 | 134 | elif parentfiledata[2] is None: |
|
135 | 135 | has_meaningful_mtime = False |
|
136 | 136 | if has_meaningful_data: |
|
137 | 137 | self._mode = parentfiledata[0] |
|
138 | 138 | self._size = parentfiledata[1] |
|
139 | 139 | if has_meaningful_mtime: |
|
140 | 140 | ( |
|
141 | 141 | self._mtime_s, |
|
142 | 142 | self._mtime_ns, |
|
143 | 143 | self._mtime_second_ambiguous, |
|
144 | 144 | ) = parentfiledata[2] |
|
145 | 145 | |
|
146 | 146 | @classmethod |
|
147 | 147 | def from_v2_data(cls, flags, size, mtime_s, mtime_ns): |
|
148 | 148 | """Build a new DirstateItem object from V2 data""" |
|
149 | 149 | has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE) |
|
150 | 150 | has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME) |
|
151 | 151 | mode = None |
|
152 | 152 | |
|
153 | 153 | if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED: |
|
154 | 154 | # we do not have support for this flag in the code yet, |
|
155 | 155 | # force a lookup for this file. |
|
156 | 156 | has_mode_size = False |
|
157 | 157 | has_meaningful_mtime = False |
|
158 | 158 | |
|
159 | 159 | fallback_exec = None |
|
160 | 160 | if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC: |
|
161 | 161 | fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC |
|
162 | 162 | |
|
163 | 163 | fallback_symlink = None |
|
164 | 164 | if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK: |
|
165 | 165 | fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK |
|
166 | 166 | |
|
167 | 167 | if has_mode_size: |
|
168 | 168 | assert stat.S_IXUSR == 0o100 |
|
169 | 169 | if flags & DIRSTATE_V2_MODE_EXEC_PERM: |
|
170 | 170 | mode = 0o755 |
|
171 | 171 | else: |
|
172 | 172 | mode = 0o644 |
|
173 | 173 | if flags & DIRSTATE_V2_MODE_IS_SYMLINK: |
|
174 | 174 | mode |= stat.S_IFLNK |
|
175 | 175 | else: |
|
176 | 176 | mode |= stat.S_IFREG |
|
177 | 177 | |
|
178 | 178 | second_ambiguous = flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS |
|
179 | 179 | return cls( |
|
180 | 180 | wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED), |
|
181 | 181 | p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED), |
|
182 | 182 | p2_info=bool(flags & DIRSTATE_V2_P2_INFO), |
|
183 | 183 | has_meaningful_data=has_mode_size, |
|
184 | 184 | has_meaningful_mtime=has_meaningful_mtime, |
|
185 | 185 | parentfiledata=(mode, size, (mtime_s, mtime_ns, second_ambiguous)), |
|
186 | 186 | fallback_exec=fallback_exec, |
|
187 | 187 | fallback_symlink=fallback_symlink, |
|
188 | 188 | ) |
|
189 | 189 | |
|
190 | 190 | @classmethod |
|
191 | 191 | def from_v1_data(cls, state, mode, size, mtime): |
|
192 | 192 | """Build a new DirstateItem object from V1 data |
|
193 | 193 | |
|
194 | 194 | Since the dirstate-v1 format is frozen, the signature of this function |
|
195 | 195 | is not expected to change, unlike the __init__ one. |
|
196 | 196 | """ |
|
197 | 197 | if state == b'm': |
|
198 | 198 | return cls(wc_tracked=True, p1_tracked=True, p2_info=True) |
|
199 | 199 | elif state == b'a': |
|
200 | 200 | return cls(wc_tracked=True) |
|
201 | 201 | elif state == b'r': |
|
202 | 202 | if size == NONNORMAL: |
|
203 | 203 | p1_tracked = True |
|
204 | 204 | p2_info = True |
|
205 | 205 | elif size == FROM_P2: |
|
206 | 206 | p1_tracked = False |
|
207 | 207 | p2_info = True |
|
208 | 208 | else: |
|
209 | 209 | p1_tracked = True |
|
210 | 210 | p2_info = False |
|
211 | 211 | return cls(p1_tracked=p1_tracked, p2_info=p2_info) |
|
212 | 212 | elif state == b'n': |
|
213 | 213 | if size == FROM_P2: |
|
214 | 214 | return cls(wc_tracked=True, p2_info=True) |
|
215 | 215 | elif size == NONNORMAL: |
|
216 | 216 | return cls(wc_tracked=True, p1_tracked=True) |
|
217 | 217 | elif mtime == AMBIGUOUS_TIME: |
|
218 | 218 | return cls( |
|
219 | 219 | wc_tracked=True, |
|
220 | 220 | p1_tracked=True, |
|
221 | 221 | has_meaningful_mtime=False, |
|
222 | 222 | parentfiledata=(mode, size, (42, 0, False)), |
|
223 | 223 | ) |
|
224 | 224 | else: |
|
225 | 225 | return cls( |
|
226 | 226 | wc_tracked=True, |
|
227 | 227 | p1_tracked=True, |
|
228 | 228 | parentfiledata=(mode, size, (mtime, 0, False)), |
|
229 | 229 | ) |
|
230 | 230 | else: |
|
231 | 231 | raise RuntimeError(b'unknown state: %s' % state) |
|
232 | 232 | |
|
233 | 233 | def set_possibly_dirty(self): |
|
234 | 234 | """Mark a file as "possibly dirty" |
|
235 | 235 | |
|
236 | 236 | This means the next status call will have to actually check its content |
|
237 | 237 | to make sure it is correct. |
|
238 | 238 | """ |
|
239 | 239 | self._mtime_s = None |
|
240 | 240 | self._mtime_ns = None |
|
241 | 241 | |
|
242 | 242 | def set_clean(self, mode, size, mtime): |
|
243 | 243 | """mark a file as "clean" cancelling potential "possibly dirty call" |
|
244 | 244 | |
|
245 | 245 | Note: this function is a descendant of `dirstate.normal` and is |
|
246 | 246 | currently expected to be call on "normal" entry only. There are not |
|
247 | 247 | reason for this to not change in the future as long as the ccode is |
|
248 | 248 | updated to preserve the proper state of the non-normal files. |
|
249 | 249 | """ |
|
250 | 250 | self._wc_tracked = True |
|
251 | 251 | self._p1_tracked = True |
|
252 | 252 | self._mode = mode |
|
253 | 253 | self._size = size |
|
254 | 254 | self._mtime_s, self._mtime_ns, self._mtime_second_ambiguous = mtime |
|
255 | 255 | |
|
256 | 256 | def set_tracked(self): |
|
257 | 257 | """mark a file as tracked in the working copy |
|
258 | 258 | |
|
259 | 259 | This will ultimately be called by command like `hg add`. |
|
260 | 260 | """ |
|
261 | 261 | self._wc_tracked = True |
|
262 | 262 | # `set_tracked` is replacing various `normallookup` call. So we mark |
|
263 | 263 | # the files as needing lookup |
|
264 | 264 | # |
|
265 | 265 | # Consider dropping this in the future in favor of something less broad. |
|
266 | 266 | self._mtime_s = None |
|
267 | 267 | self._mtime_ns = None |
|
268 | 268 | |
|
269 | 269 | def set_untracked(self): |
|
270 | 270 | """mark a file as untracked in the working copy |
|
271 | 271 | |
|
272 | 272 | This will ultimately be called by command like `hg remove`. |
|
273 | 273 | """ |
|
274 | 274 | self._wc_tracked = False |
|
275 | 275 | self._mode = None |
|
276 | 276 | self._size = None |
|
277 | 277 | self._mtime_s = None |
|
278 | 278 | self._mtime_ns = None |
|
279 | 279 | |
|
280 | 280 | def drop_merge_data(self): |
|
281 | 281 | """remove all "merge-only" from a DirstateItem |
|
282 | 282 | |
|
283 | 283 | This is to be call by the dirstatemap code when the second parent is dropped |
|
284 | 284 | """ |
|
285 | 285 | if self._p2_info: |
|
286 | 286 | self._p2_info = False |
|
287 | 287 | self._mode = None |
|
288 | 288 | self._size = None |
|
289 | 289 | self._mtime_s = None |
|
290 | 290 | self._mtime_ns = None |
|
291 | 291 | |
|
292 | 292 | @property |
|
293 | 293 | def mode(self): |
|
294 | return self.v1_mode() | |
|
294 | return self._v1_mode() | |
|
295 | 295 | |
|
296 | 296 | @property |
|
297 | 297 | def size(self): |
|
298 | return self.v1_size() | |
|
298 | return self._v1_size() | |
|
299 | 299 | |
|
300 | 300 | @property |
|
301 | 301 | def mtime(self): |
|
302 | return self.v1_mtime() | |
|
302 | return self._v1_mtime() | |
|
303 | 303 | |
|
304 | 304 | def mtime_likely_equal_to(self, other_mtime): |
|
305 | 305 | self_sec = self._mtime_s |
|
306 | 306 | if self_sec is None: |
|
307 | 307 | return False |
|
308 | 308 | self_ns = self._mtime_ns |
|
309 | 309 | other_sec, other_ns, second_ambiguous = other_mtime |
|
310 | 310 | if self_sec != other_sec: |
|
311 | 311 | # seconds are different theses mtime are definitly not equal |
|
312 | 312 | return False |
|
313 | 313 | elif other_ns == 0 or self_ns == 0: |
|
314 | 314 | # at least one side as no nano-seconds information |
|
315 | 315 | |
|
316 | 316 | if self._mtime_second_ambiguous: |
|
317 | 317 | # We cannot trust the mtime in this case |
|
318 | 318 | return False |
|
319 | 319 | else: |
|
320 | 320 | # the "seconds" value was reliable on its own. We are good to go. |
|
321 | 321 | return True |
|
322 | 322 | else: |
|
323 | 323 | # We have nano second information, let us use them ! |
|
324 | 324 | return self_ns == other_ns |
|
325 | 325 | |
|
326 | 326 | @property |
|
327 | 327 | def state(self): |
|
328 | 328 | """ |
|
329 | 329 | States are: |
|
330 | 330 | n normal |
|
331 | 331 | m needs merging |
|
332 | 332 | r marked for removal |
|
333 | 333 | a marked for addition |
|
334 | 334 | |
|
335 | 335 | XXX This "state" is a bit obscure and mostly a direct expression of the |
|
336 | 336 | dirstatev1 format. It would make sense to ultimately deprecate it in |
|
337 | 337 | favor of the more "semantic" attributes. |
|
338 | 338 | """ |
|
339 | 339 | if not self.any_tracked: |
|
340 | 340 | return b'?' |
|
341 | return self.v1_state() | |
|
341 | return self._v1_state() | |
|
342 | 342 | |
|
343 | 343 | @property |
|
344 | 344 | def has_fallback_exec(self): |
|
345 | 345 | """True if "fallback" information are available for the "exec" bit |
|
346 | 346 | |
|
347 | 347 | Fallback information can be stored in the dirstate to keep track of |
|
348 | 348 | filesystem attribute tracked by Mercurial when the underlying file |
|
349 | 349 | system or operating system does not support that property, (e.g. |
|
350 | 350 | Windows). |
|
351 | 351 | |
|
352 | 352 | Not all version of the dirstate on-disk storage support preserving this |
|
353 | 353 | information. |
|
354 | 354 | """ |
|
355 | 355 | return self._fallback_exec is not None |
|
356 | 356 | |
|
357 | 357 | @property |
|
358 | 358 | def fallback_exec(self): |
|
359 | 359 | """ "fallback" information for the executable bit |
|
360 | 360 | |
|
361 | 361 | True if the file should be considered executable when we cannot get |
|
362 | 362 | this information from the files system. False if it should be |
|
363 | 363 | considered non-executable. |
|
364 | 364 | |
|
365 | 365 | See has_fallback_exec for details.""" |
|
366 | 366 | return self._fallback_exec |
|
367 | 367 | |
|
368 | 368 | @fallback_exec.setter |
|
369 | 369 | def set_fallback_exec(self, value): |
|
370 | 370 | """control "fallback" executable bit |
|
371 | 371 | |
|
372 | 372 | Set to: |
|
373 | 373 | - True if the file should be considered executable, |
|
374 | 374 | - False if the file should be considered non-executable, |
|
375 | 375 | - None if we do not have valid fallback data. |
|
376 | 376 | |
|
377 | 377 | See has_fallback_exec for details.""" |
|
378 | 378 | if value is None: |
|
379 | 379 | self._fallback_exec = None |
|
380 | 380 | else: |
|
381 | 381 | self._fallback_exec = bool(value) |
|
382 | 382 | |
|
383 | 383 | @property |
|
384 | 384 | def has_fallback_symlink(self): |
|
385 | 385 | """True if "fallback" information are available for symlink status |
|
386 | 386 | |
|
387 | 387 | Fallback information can be stored in the dirstate to keep track of |
|
388 | 388 | filesystem attribute tracked by Mercurial when the underlying file |
|
389 | 389 | system or operating system does not support that property, (e.g. |
|
390 | 390 | Windows). |
|
391 | 391 | |
|
392 | 392 | Not all version of the dirstate on-disk storage support preserving this |
|
393 | 393 | information.""" |
|
394 | 394 | return self._fallback_symlink is not None |
|
395 | 395 | |
|
396 | 396 | @property |
|
397 | 397 | def fallback_symlink(self): |
|
398 | 398 | """ "fallback" information for symlink status |
|
399 | 399 | |
|
400 | 400 | True if the file should be considered executable when we cannot get |
|
401 | 401 | this information from the files system. False if it should be |
|
402 | 402 | considered non-executable. |
|
403 | 403 | |
|
404 | 404 | See has_fallback_exec for details.""" |
|
405 | 405 | return self._fallback_symlink |
|
406 | 406 | |
|
407 | 407 | @fallback_symlink.setter |
|
408 | 408 | def set_fallback_symlink(self, value): |
|
409 | 409 | """control "fallback" symlink status |
|
410 | 410 | |
|
411 | 411 | Set to: |
|
412 | 412 | - True if the file should be considered a symlink, |
|
413 | 413 | - False if the file should be considered not a symlink, |
|
414 | 414 | - None if we do not have valid fallback data. |
|
415 | 415 | |
|
416 | 416 | See has_fallback_symlink for details.""" |
|
417 | 417 | if value is None: |
|
418 | 418 | self._fallback_symlink = None |
|
419 | 419 | else: |
|
420 | 420 | self._fallback_symlink = bool(value) |
|
421 | 421 | |
|
422 | 422 | @property |
|
423 | 423 | def tracked(self): |
|
424 | 424 | """True is the file is tracked in the working copy""" |
|
425 | 425 | return self._wc_tracked |
|
426 | 426 | |
|
427 | 427 | @property |
|
428 | 428 | def any_tracked(self): |
|
429 | 429 | """True is the file is tracked anywhere (wc or parents)""" |
|
430 | 430 | return self._wc_tracked or self._p1_tracked or self._p2_info |
|
431 | 431 | |
|
432 | 432 | @property |
|
433 | 433 | def added(self): |
|
434 | 434 | """True if the file has been added""" |
|
435 | 435 | return self._wc_tracked and not (self._p1_tracked or self._p2_info) |
|
436 | 436 | |
|
437 | 437 | @property |
|
438 | 438 | def maybe_clean(self): |
|
439 | 439 | """True if the file has a chance to be in the "clean" state""" |
|
440 | 440 | if not self._wc_tracked: |
|
441 | 441 | return False |
|
442 | 442 | elif not self._p1_tracked: |
|
443 | 443 | return False |
|
444 | 444 | elif self._p2_info: |
|
445 | 445 | return False |
|
446 | 446 | return True |
|
447 | 447 | |
|
448 | 448 | @property |
|
449 | 449 | def p1_tracked(self): |
|
450 | 450 | """True if the file is tracked in the first parent manifest""" |
|
451 | 451 | return self._p1_tracked |
|
452 | 452 | |
|
453 | 453 | @property |
|
454 | 454 | def p2_info(self): |
|
455 | 455 | """True if the file needed to merge or apply any input from p2 |
|
456 | 456 | |
|
457 | 457 | See the class documentation for details. |
|
458 | 458 | """ |
|
459 | 459 | return self._wc_tracked and self._p2_info |
|
460 | 460 | |
|
461 | 461 | @property |
|
462 | 462 | def removed(self): |
|
463 | 463 | """True if the file has been removed""" |
|
464 | 464 | return not self._wc_tracked and (self._p1_tracked or self._p2_info) |
|
465 | 465 | |
|
466 | 466 | def v2_data(self): |
|
467 | 467 | """Returns (flags, mode, size, mtime) for v2 serialization""" |
|
468 | 468 | flags = 0 |
|
469 | 469 | if self._wc_tracked: |
|
470 | 470 | flags |= DIRSTATE_V2_WDIR_TRACKED |
|
471 | 471 | if self._p1_tracked: |
|
472 | 472 | flags |= DIRSTATE_V2_P1_TRACKED |
|
473 | 473 | if self._p2_info: |
|
474 | 474 | flags |= DIRSTATE_V2_P2_INFO |
|
475 | 475 | if self._mode is not None and self._size is not None: |
|
476 | 476 | flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE |
|
477 | 477 | if self.mode & stat.S_IXUSR: |
|
478 | 478 | flags |= DIRSTATE_V2_MODE_EXEC_PERM |
|
479 | 479 | if stat.S_ISLNK(self.mode): |
|
480 | 480 | flags |= DIRSTATE_V2_MODE_IS_SYMLINK |
|
481 | 481 | if self._mtime_s is not None: |
|
482 | 482 | flags |= DIRSTATE_V2_HAS_MTIME |
|
483 | 483 | if self._mtime_second_ambiguous: |
|
484 | 484 | flags |= DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS |
|
485 | 485 | |
|
486 | 486 | if self._fallback_exec is not None: |
|
487 | 487 | flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC |
|
488 | 488 | if self._fallback_exec: |
|
489 | 489 | flags |= DIRSTATE_V2_FALLBACK_EXEC |
|
490 | 490 | |
|
491 | 491 | if self._fallback_symlink is not None: |
|
492 | 492 | flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK |
|
493 | 493 | if self._fallback_symlink: |
|
494 | 494 | flags |= DIRSTATE_V2_FALLBACK_SYMLINK |
|
495 | 495 | |
|
496 | 496 | # Note: we do not need to do anything regarding |
|
497 | 497 | # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED |
|
498 | 498 | # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME |
|
499 | 499 | return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0) |
|
500 | 500 | |
|
501 | def v1_state(self): | |
|
501 | def _v1_state(self): | |
|
502 | 502 | """return a "state" suitable for v1 serialization""" |
|
503 | 503 | if not self.any_tracked: |
|
504 | 504 | # the object has no state to record, this is -currently- |
|
505 | 505 | # unsupported |
|
506 | 506 | raise RuntimeError('untracked item') |
|
507 | 507 | elif self.removed: |
|
508 | 508 | return b'r' |
|
509 | 509 | elif self._p1_tracked and self._p2_info: |
|
510 | 510 | return b'm' |
|
511 | 511 | elif self.added: |
|
512 | 512 | return b'a' |
|
513 | 513 | else: |
|
514 | 514 | return b'n' |
|
515 | 515 | |
|
516 | def v1_mode(self): | |
|
516 | def _v1_mode(self): | |
|
517 | 517 | """return a "mode" suitable for v1 serialization""" |
|
518 | 518 | return self._mode if self._mode is not None else 0 |
|
519 | 519 | |
|
520 | def v1_size(self): | |
|
520 | def _v1_size(self): | |
|
521 | 521 | """return a "size" suitable for v1 serialization""" |
|
522 | 522 | if not self.any_tracked: |
|
523 | 523 | # the object has no state to record, this is -currently- |
|
524 | 524 | # unsupported |
|
525 | 525 | raise RuntimeError('untracked item') |
|
526 | 526 | elif self.removed and self._p1_tracked and self._p2_info: |
|
527 | 527 | return NONNORMAL |
|
528 | 528 | elif self._p2_info: |
|
529 | 529 | return FROM_P2 |
|
530 | 530 | elif self.removed: |
|
531 | 531 | return 0 |
|
532 | 532 | elif self.added: |
|
533 | 533 | return NONNORMAL |
|
534 | 534 | elif self._size is None: |
|
535 | 535 | return NONNORMAL |
|
536 | 536 | else: |
|
537 | 537 | return self._size |
|
538 | 538 | |
|
539 | def v1_mtime(self): | |
|
539 | def _v1_mtime(self): | |
|
540 | 540 | """return a "mtime" suitable for v1 serialization""" |
|
541 | 541 | if not self.any_tracked: |
|
542 | 542 | # the object has no state to record, this is -currently- |
|
543 | 543 | # unsupported |
|
544 | 544 | raise RuntimeError('untracked item') |
|
545 | 545 | elif self.removed: |
|
546 | 546 | return 0 |
|
547 | 547 | elif self._mtime_s is None: |
|
548 | 548 | return AMBIGUOUS_TIME |
|
549 | 549 | elif self._p2_info: |
|
550 | 550 | return AMBIGUOUS_TIME |
|
551 | 551 | elif not self._p1_tracked: |
|
552 | 552 | return AMBIGUOUS_TIME |
|
553 | 553 | elif self._mtime_second_ambiguous: |
|
554 | 554 | return AMBIGUOUS_TIME |
|
555 | 555 | else: |
|
556 | 556 | return self._mtime_s |
|
557 | 557 | |
|
558 | 558 | |
|
559 | 559 | def gettype(q): |
|
560 | 560 | return int(q & 0xFFFF) |
|
561 | 561 | |
|
562 | 562 | |
|
563 | 563 | class BaseIndexObject: |
|
564 | 564 | # Can I be passed to an algorithme implemented in Rust ? |
|
565 | 565 | rust_ext_compat = 0 |
|
566 | 566 | # Format of an index entry according to Python's `struct` language |
|
567 | 567 | index_format = revlog_constants.INDEX_ENTRY_V1 |
|
568 | 568 | # Size of a C unsigned long long int, platform independent |
|
569 | 569 | big_int_size = struct.calcsize(b'>Q') |
|
570 | 570 | # Size of a C long int, platform independent |
|
571 | 571 | int_size = struct.calcsize(b'>i') |
|
572 | 572 | # An empty index entry, used as a default value to be overridden, or nullrev |
|
573 | 573 | null_item = ( |
|
574 | 574 | 0, |
|
575 | 575 | 0, |
|
576 | 576 | 0, |
|
577 | 577 | -1, |
|
578 | 578 | -1, |
|
579 | 579 | -1, |
|
580 | 580 | -1, |
|
581 | 581 | sha1nodeconstants.nullid, |
|
582 | 582 | 0, |
|
583 | 583 | 0, |
|
584 | 584 | revlog_constants.COMP_MODE_INLINE, |
|
585 | 585 | revlog_constants.COMP_MODE_INLINE, |
|
586 | 586 | revlog_constants.RANK_UNKNOWN, |
|
587 | 587 | ) |
|
588 | 588 | |
|
589 | 589 | @util.propertycache |
|
590 | 590 | def entry_size(self): |
|
591 | 591 | return self.index_format.size |
|
592 | 592 | |
|
593 | 593 | @util.propertycache |
|
594 | 594 | def _nodemap(self): |
|
595 | 595 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) |
|
596 | 596 | for r in range(0, len(self)): |
|
597 | 597 | n = self[r][7] |
|
598 | 598 | nodemap[n] = r |
|
599 | 599 | return nodemap |
|
600 | 600 | |
|
601 | 601 | def has_node(self, node): |
|
602 | 602 | """return True if the node exist in the index""" |
|
603 | 603 | return node in self._nodemap |
|
604 | 604 | |
|
605 | 605 | def rev(self, node): |
|
606 | 606 | """return a revision for a node |
|
607 | 607 | |
|
608 | 608 | If the node is unknown, raise a RevlogError""" |
|
609 | 609 | return self._nodemap[node] |
|
610 | 610 | |
|
611 | 611 | def get_rev(self, node): |
|
612 | 612 | """return a revision for a node |
|
613 | 613 | |
|
614 | 614 | If the node is unknown, return None""" |
|
615 | 615 | return self._nodemap.get(node) |
|
616 | 616 | |
|
617 | 617 | def _stripnodes(self, start): |
|
618 | 618 | if '_nodemap' in vars(self): |
|
619 | 619 | for r in range(start, len(self)): |
|
620 | 620 | n = self[r][7] |
|
621 | 621 | del self._nodemap[n] |
|
622 | 622 | |
|
623 | 623 | def clearcaches(self): |
|
624 | 624 | self.__dict__.pop('_nodemap', None) |
|
625 | 625 | |
|
626 | 626 | def __len__(self): |
|
627 | 627 | return self._lgt + len(self._extra) |
|
628 | 628 | |
|
629 | 629 | def append(self, tup): |
|
630 | 630 | if '_nodemap' in vars(self): |
|
631 | 631 | self._nodemap[tup[7]] = len(self) |
|
632 | 632 | data = self._pack_entry(len(self), tup) |
|
633 | 633 | self._extra.append(data) |
|
634 | 634 | |
|
635 | 635 | def _pack_entry(self, rev, entry): |
|
636 | 636 | assert entry[8] == 0 |
|
637 | 637 | assert entry[9] == 0 |
|
638 | 638 | return self.index_format.pack(*entry[:8]) |
|
639 | 639 | |
|
640 | 640 | def _check_index(self, i): |
|
641 | 641 | if not isinstance(i, int): |
|
642 | 642 | raise TypeError(b"expecting int indexes") |
|
643 | 643 | if i < 0 or i >= len(self): |
|
644 | 644 | raise IndexError(i) |
|
645 | 645 | |
|
646 | 646 | def __getitem__(self, i): |
|
647 | 647 | if i == -1: |
|
648 | 648 | return self.null_item |
|
649 | 649 | self._check_index(i) |
|
650 | 650 | if i >= self._lgt: |
|
651 | 651 | data = self._extra[i - self._lgt] |
|
652 | 652 | else: |
|
653 | 653 | index = self._calculate_index(i) |
|
654 | 654 | data = self._data[index : index + self.entry_size] |
|
655 | 655 | r = self._unpack_entry(i, data) |
|
656 | 656 | if self._lgt and i == 0: |
|
657 | 657 | offset = revlogutils.offset_type(0, gettype(r[0])) |
|
658 | 658 | r = (offset,) + r[1:] |
|
659 | 659 | return r |
|
660 | 660 | |
|
661 | 661 | def _unpack_entry(self, rev, data): |
|
662 | 662 | r = self.index_format.unpack(data) |
|
663 | 663 | r = r + ( |
|
664 | 664 | 0, |
|
665 | 665 | 0, |
|
666 | 666 | revlog_constants.COMP_MODE_INLINE, |
|
667 | 667 | revlog_constants.COMP_MODE_INLINE, |
|
668 | 668 | revlog_constants.RANK_UNKNOWN, |
|
669 | 669 | ) |
|
670 | 670 | return r |
|
671 | 671 | |
|
672 | 672 | def pack_header(self, header): |
|
673 | 673 | """pack header information as binary""" |
|
674 | 674 | v_fmt = revlog_constants.INDEX_HEADER |
|
675 | 675 | return v_fmt.pack(header) |
|
676 | 676 | |
|
677 | 677 | def entry_binary(self, rev): |
|
678 | 678 | """return the raw binary string representing a revision""" |
|
679 | 679 | entry = self[rev] |
|
680 | 680 | p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8]) |
|
681 | 681 | if rev == 0: |
|
682 | 682 | p = p[revlog_constants.INDEX_HEADER.size :] |
|
683 | 683 | return p |
|
684 | 684 | |
|
685 | 685 | |
|
686 | 686 | class IndexObject(BaseIndexObject): |
|
687 | 687 | def __init__(self, data): |
|
688 | 688 | assert len(data) % self.entry_size == 0, ( |
|
689 | 689 | len(data), |
|
690 | 690 | self.entry_size, |
|
691 | 691 | len(data) % self.entry_size, |
|
692 | 692 | ) |
|
693 | 693 | self._data = data |
|
694 | 694 | self._lgt = len(data) // self.entry_size |
|
695 | 695 | self._extra = [] |
|
696 | 696 | |
|
697 | 697 | def _calculate_index(self, i): |
|
698 | 698 | return i * self.entry_size |
|
699 | 699 | |
|
700 | 700 | def __delitem__(self, i): |
|
701 | 701 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: |
|
702 | 702 | raise ValueError(b"deleting slices only supports a:-1 with step 1") |
|
703 | 703 | i = i.start |
|
704 | 704 | self._check_index(i) |
|
705 | 705 | self._stripnodes(i) |
|
706 | 706 | if i < self._lgt: |
|
707 | 707 | self._data = self._data[: i * self.entry_size] |
|
708 | 708 | self._lgt = i |
|
709 | 709 | self._extra = [] |
|
710 | 710 | else: |
|
711 | 711 | self._extra = self._extra[: i - self._lgt] |
|
712 | 712 | |
|
713 | 713 | |
|
714 | 714 | class PersistentNodeMapIndexObject(IndexObject): |
|
715 | 715 | """a Debug oriented class to test persistent nodemap |
|
716 | 716 | |
|
717 | 717 | We need a simple python object to test API and higher level behavior. See |
|
718 | 718 | the Rust implementation for more serious usage. This should be used only |
|
719 | 719 | through the dedicated `devel.persistent-nodemap` config. |
|
720 | 720 | """ |
|
721 | 721 | |
|
722 | 722 | def nodemap_data_all(self): |
|
723 | 723 | """Return bytes containing a full serialization of a nodemap |
|
724 | 724 | |
|
725 | 725 | The nodemap should be valid for the full set of revisions in the |
|
726 | 726 | index.""" |
|
727 | 727 | return nodemaputil.persistent_data(self) |
|
728 | 728 | |
|
729 | 729 | def nodemap_data_incremental(self): |
|
730 | 730 | """Return bytes containing a incremental update to persistent nodemap |
|
731 | 731 | |
|
732 | 732 | This containst the data for an append-only update of the data provided |
|
733 | 733 | in the last call to `update_nodemap_data`. |
|
734 | 734 | """ |
|
735 | 735 | if self._nm_root is None: |
|
736 | 736 | return None |
|
737 | 737 | docket = self._nm_docket |
|
738 | 738 | changed, data = nodemaputil.update_persistent_data( |
|
739 | 739 | self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev |
|
740 | 740 | ) |
|
741 | 741 | |
|
742 | 742 | self._nm_root = self._nm_max_idx = self._nm_docket = None |
|
743 | 743 | return docket, changed, data |
|
744 | 744 | |
|
745 | 745 | def update_nodemap_data(self, docket, nm_data): |
|
746 | 746 | """provide full block of persisted binary data for a nodemap |
|
747 | 747 | |
|
748 | 748 | The data are expected to come from disk. See `nodemap_data_all` for a |
|
749 | 749 | produceur of such data.""" |
|
750 | 750 | if nm_data is not None: |
|
751 | 751 | self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) |
|
752 | 752 | if self._nm_root: |
|
753 | 753 | self._nm_docket = docket |
|
754 | 754 | else: |
|
755 | 755 | self._nm_root = self._nm_max_idx = self._nm_docket = None |
|
756 | 756 | |
|
757 | 757 | |
|
758 | 758 | class InlinedIndexObject(BaseIndexObject): |
|
759 | 759 | def __init__(self, data, inline=0): |
|
760 | 760 | self._data = data |
|
761 | 761 | self._lgt = self._inline_scan(None) |
|
762 | 762 | self._inline_scan(self._lgt) |
|
763 | 763 | self._extra = [] |
|
764 | 764 | |
|
765 | 765 | def _inline_scan(self, lgt): |
|
766 | 766 | off = 0 |
|
767 | 767 | if lgt is not None: |
|
768 | 768 | self._offsets = [0] * lgt |
|
769 | 769 | count = 0 |
|
770 | 770 | while off <= len(self._data) - self.entry_size: |
|
771 | 771 | start = off + self.big_int_size |
|
772 | 772 | (s,) = struct.unpack( |
|
773 | 773 | b'>i', |
|
774 | 774 | self._data[start : start + self.int_size], |
|
775 | 775 | ) |
|
776 | 776 | if lgt is not None: |
|
777 | 777 | self._offsets[count] = off |
|
778 | 778 | count += 1 |
|
779 | 779 | off += self.entry_size + s |
|
780 | 780 | if off != len(self._data): |
|
781 | 781 | raise ValueError(b"corrupted data") |
|
782 | 782 | return count |
|
783 | 783 | |
|
784 | 784 | def __delitem__(self, i): |
|
785 | 785 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: |
|
786 | 786 | raise ValueError(b"deleting slices only supports a:-1 with step 1") |
|
787 | 787 | i = i.start |
|
788 | 788 | self._check_index(i) |
|
789 | 789 | self._stripnodes(i) |
|
790 | 790 | if i < self._lgt: |
|
791 | 791 | self._offsets = self._offsets[:i] |
|
792 | 792 | self._lgt = i |
|
793 | 793 | self._extra = [] |
|
794 | 794 | else: |
|
795 | 795 | self._extra = self._extra[: i - self._lgt] |
|
796 | 796 | |
|
797 | 797 | def _calculate_index(self, i): |
|
798 | 798 | return self._offsets[i] |
|
799 | 799 | |
|
800 | 800 | |
|
801 | 801 | def parse_index2(data, inline, format=revlog_constants.REVLOGV1): |
|
802 | 802 | if format == revlog_constants.CHANGELOGV2: |
|
803 | 803 | return parse_index_cl_v2(data) |
|
804 | 804 | if not inline: |
|
805 | 805 | if format == revlog_constants.REVLOGV2: |
|
806 | 806 | cls = IndexObject2 |
|
807 | 807 | else: |
|
808 | 808 | cls = IndexObject |
|
809 | 809 | return cls(data), None |
|
810 | 810 | cls = InlinedIndexObject |
|
811 | 811 | return cls(data, inline), (0, data) |
|
812 | 812 | |
|
813 | 813 | |
|
814 | 814 | def parse_index_cl_v2(data): |
|
815 | 815 | return IndexChangelogV2(data), None |
|
816 | 816 | |
|
817 | 817 | |
|
818 | 818 | class IndexObject2(IndexObject): |
|
819 | 819 | index_format = revlog_constants.INDEX_ENTRY_V2 |
|
820 | 820 | |
|
821 | 821 | def replace_sidedata_info( |
|
822 | 822 | self, |
|
823 | 823 | rev, |
|
824 | 824 | sidedata_offset, |
|
825 | 825 | sidedata_length, |
|
826 | 826 | offset_flags, |
|
827 | 827 | compression_mode, |
|
828 | 828 | ): |
|
829 | 829 | """ |
|
830 | 830 | Replace an existing index entry's sidedata offset and length with new |
|
831 | 831 | ones. |
|
832 | 832 | This cannot be used outside of the context of sidedata rewriting, |
|
833 | 833 | inside the transaction that creates the revision `rev`. |
|
834 | 834 | """ |
|
835 | 835 | if rev < 0: |
|
836 | 836 | raise KeyError |
|
837 | 837 | self._check_index(rev) |
|
838 | 838 | if rev < self._lgt: |
|
839 | 839 | msg = b"cannot rewrite entries outside of this transaction" |
|
840 | 840 | raise KeyError(msg) |
|
841 | 841 | else: |
|
842 | 842 | entry = list(self[rev]) |
|
843 | 843 | entry[0] = offset_flags |
|
844 | 844 | entry[8] = sidedata_offset |
|
845 | 845 | entry[9] = sidedata_length |
|
846 | 846 | entry[11] = compression_mode |
|
847 | 847 | entry = tuple(entry) |
|
848 | 848 | new = self._pack_entry(rev, entry) |
|
849 | 849 | self._extra[rev - self._lgt] = new |
|
850 | 850 | |
|
851 | 851 | def _unpack_entry(self, rev, data): |
|
852 | 852 | data = self.index_format.unpack(data) |
|
853 | 853 | entry = data[:10] |
|
854 | 854 | data_comp = data[10] & 3 |
|
855 | 855 | sidedata_comp = (data[10] & (3 << 2)) >> 2 |
|
856 | 856 | return entry + (data_comp, sidedata_comp, revlog_constants.RANK_UNKNOWN) |
|
857 | 857 | |
|
858 | 858 | def _pack_entry(self, rev, entry): |
|
859 | 859 | data = entry[:10] |
|
860 | 860 | data_comp = entry[10] & 3 |
|
861 | 861 | sidedata_comp = (entry[11] & 3) << 2 |
|
862 | 862 | data += (data_comp | sidedata_comp,) |
|
863 | 863 | |
|
864 | 864 | return self.index_format.pack(*data) |
|
865 | 865 | |
|
866 | 866 | def entry_binary(self, rev): |
|
867 | 867 | """return the raw binary string representing a revision""" |
|
868 | 868 | entry = self[rev] |
|
869 | 869 | return self._pack_entry(rev, entry) |
|
870 | 870 | |
|
871 | 871 | def pack_header(self, header): |
|
872 | 872 | """pack header information as binary""" |
|
873 | 873 | msg = 'version header should go in the docket, not the index: %d' |
|
874 | 874 | msg %= header |
|
875 | 875 | raise error.ProgrammingError(msg) |
|
876 | 876 | |
|
877 | 877 | |
|
878 | 878 | class IndexChangelogV2(IndexObject2): |
|
879 | 879 | index_format = revlog_constants.INDEX_ENTRY_CL_V2 |
|
880 | 880 | |
|
881 | 881 | null_item = ( |
|
882 | 882 | IndexObject2.null_item[: revlog_constants.ENTRY_RANK] |
|
883 | 883 | + (0,) # rank of null is 0 |
|
884 | 884 | + IndexObject2.null_item[revlog_constants.ENTRY_RANK :] |
|
885 | 885 | ) |
|
886 | 886 | |
|
887 | 887 | def _unpack_entry(self, rev, data, r=True): |
|
888 | 888 | items = self.index_format.unpack(data) |
|
889 | 889 | return ( |
|
890 | 890 | items[revlog_constants.INDEX_ENTRY_V2_IDX_OFFSET], |
|
891 | 891 | items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSED_LENGTH], |
|
892 | 892 | items[revlog_constants.INDEX_ENTRY_V2_IDX_UNCOMPRESSED_LENGTH], |
|
893 | 893 | rev, |
|
894 | 894 | rev, |
|
895 | 895 | items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_1], |
|
896 | 896 | items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_2], |
|
897 | 897 | items[revlog_constants.INDEX_ENTRY_V2_IDX_NODEID], |
|
898 | 898 | items[revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_OFFSET], |
|
899 | 899 | items[ |
|
900 | 900 | revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_COMPRESSED_LENGTH |
|
901 | 901 | ], |
|
902 | 902 | items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] & 3, |
|
903 | 903 | (items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] >> 2) |
|
904 | 904 | & 3, |
|
905 | 905 | items[revlog_constants.INDEX_ENTRY_V2_IDX_RANK], |
|
906 | 906 | ) |
|
907 | 907 | |
|
908 | 908 | def _pack_entry(self, rev, entry): |
|
909 | 909 | |
|
910 | 910 | base = entry[revlog_constants.ENTRY_DELTA_BASE] |
|
911 | 911 | link_rev = entry[revlog_constants.ENTRY_LINK_REV] |
|
912 | 912 | assert base == rev, (base, rev) |
|
913 | 913 | assert link_rev == rev, (link_rev, rev) |
|
914 | 914 | data = ( |
|
915 | 915 | entry[revlog_constants.ENTRY_DATA_OFFSET], |
|
916 | 916 | entry[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH], |
|
917 | 917 | entry[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH], |
|
918 | 918 | entry[revlog_constants.ENTRY_PARENT_1], |
|
919 | 919 | entry[revlog_constants.ENTRY_PARENT_2], |
|
920 | 920 | entry[revlog_constants.ENTRY_NODE_ID], |
|
921 | 921 | entry[revlog_constants.ENTRY_SIDEDATA_OFFSET], |
|
922 | 922 | entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSED_LENGTH], |
|
923 | 923 | entry[revlog_constants.ENTRY_DATA_COMPRESSION_MODE] & 3 |
|
924 | 924 | | (entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSION_MODE] & 3) |
|
925 | 925 | << 2, |
|
926 | 926 | entry[revlog_constants.ENTRY_RANK], |
|
927 | 927 | ) |
|
928 | 928 | return self.index_format.pack(*data) |
|
929 | 929 | |
|
930 | 930 | |
|
931 | 931 | def parse_index_devel_nodemap(data, inline): |
|
932 | 932 | """like parse_index2, but alway return a PersistentNodeMapIndexObject""" |
|
933 | 933 | return PersistentNodeMapIndexObject(data), None |
|
934 | 934 | |
|
935 | 935 | |
|
936 | 936 | def parse_dirstate(dmap, copymap, st): |
|
937 | 937 | parents = [st[:20], st[20:40]] |
|
938 | 938 | # dereference fields so they will be local in loop |
|
939 | 939 | format = b">cllll" |
|
940 | 940 | e_size = struct.calcsize(format) |
|
941 | 941 | pos1 = 40 |
|
942 | 942 | l = len(st) |
|
943 | 943 | |
|
944 | 944 | # the inner loop |
|
945 | 945 | while pos1 < l: |
|
946 | 946 | pos2 = pos1 + e_size |
|
947 | 947 | e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster |
|
948 | 948 | pos1 = pos2 + e[4] |
|
949 | 949 | f = st[pos2:pos1] |
|
950 | 950 | if b'\0' in f: |
|
951 | 951 | f, c = f.split(b'\0') |
|
952 | 952 | copymap[f] = c |
|
953 | 953 | dmap[f] = DirstateItem.from_v1_data(*e[:4]) |
|
954 | 954 | return parents |
|
955 | 955 | |
|
956 | 956 | |
|
957 | 957 | def pack_dirstate(dmap, copymap, pl): |
|
958 | 958 | cs = stringio() |
|
959 | 959 | write = cs.write |
|
960 | 960 | write(b"".join(pl)) |
|
961 | 961 | for f, e in dmap.items(): |
|
962 | 962 | if f in copymap: |
|
963 | 963 | f = b"%s\0%s" % (f, copymap[f]) |
|
964 | 964 | e = _pack( |
|
965 | 965 | b">cllll", |
|
966 | e.v1_state(), | |
|
967 | e.v1_mode(), | |
|
968 | e.v1_size(), | |
|
969 | e.v1_mtime(), | |
|
966 | e._v1_state(), | |
|
967 | e._v1_mode(), | |
|
968 | e._v1_size(), | |
|
969 | e._v1_mtime(), | |
|
970 | 970 | len(f), |
|
971 | 971 | ) |
|
972 | 972 | write(e) |
|
973 | 973 | write(f) |
|
974 | 974 | return cs.getvalue() |
@@ -1,285 +1,246 b'' | |||
|
1 | 1 | use cpython::exc; |
|
2 | 2 | use cpython::ObjectProtocol; |
|
3 | 3 | use cpython::PyBytes; |
|
4 | 4 | use cpython::PyErr; |
|
5 | 5 | use cpython::PyNone; |
|
6 | 6 | use cpython::PyObject; |
|
7 | 7 | use cpython::PyResult; |
|
8 | 8 | use cpython::Python; |
|
9 | 9 | use cpython::PythonObject; |
|
10 | 10 | use hg::dirstate::DirstateEntry; |
|
11 | use hg::dirstate::EntryState; | |
|
12 | 11 | use hg::dirstate::TruncatedTimestamp; |
|
13 | 12 | use std::cell::Cell; |
|
14 | use std::convert::TryFrom; | |
|
15 | 13 | |
|
16 | 14 | py_class!(pub class DirstateItem |py| { |
|
17 | 15 | data entry: Cell<DirstateEntry>; |
|
18 | 16 | |
|
19 | 17 | def __new__( |
|
20 | 18 | _cls, |
|
21 | 19 | wc_tracked: bool = false, |
|
22 | 20 | p1_tracked: bool = false, |
|
23 | 21 | p2_info: bool = false, |
|
24 | 22 | has_meaningful_data: bool = true, |
|
25 | 23 | has_meaningful_mtime: bool = true, |
|
26 | 24 | parentfiledata: Option<(u32, u32, Option<(u32, u32, bool)>)> = None, |
|
27 | 25 | fallback_exec: Option<bool> = None, |
|
28 | 26 | fallback_symlink: Option<bool> = None, |
|
29 | 27 | |
|
30 | 28 | ) -> PyResult<DirstateItem> { |
|
31 | 29 | let mut mode_size_opt = None; |
|
32 | 30 | let mut mtime_opt = None; |
|
33 | 31 | if let Some((mode, size, mtime)) = parentfiledata { |
|
34 | 32 | if has_meaningful_data { |
|
35 | 33 | mode_size_opt = Some((mode, size)) |
|
36 | 34 | } |
|
37 | 35 | if has_meaningful_mtime { |
|
38 | 36 | if let Some(m) = mtime { |
|
39 | 37 | mtime_opt = Some(timestamp(py, m)?); |
|
40 | 38 | } |
|
41 | 39 | } |
|
42 | 40 | } |
|
43 | 41 | let entry = DirstateEntry::from_v2_data( |
|
44 | 42 | wc_tracked, |
|
45 | 43 | p1_tracked, |
|
46 | 44 | p2_info, |
|
47 | 45 | mode_size_opt, |
|
48 | 46 | mtime_opt, |
|
49 | 47 | fallback_exec, |
|
50 | 48 | fallback_symlink, |
|
51 | 49 | ); |
|
52 | 50 | DirstateItem::create_instance(py, Cell::new(entry)) |
|
53 | 51 | } |
|
54 | 52 | |
|
55 | 53 | @property |
|
56 | 54 | def state(&self) -> PyResult<PyBytes> { |
|
57 | 55 | let state_byte: u8 = self.entry(py).get().state().into(); |
|
58 | 56 | Ok(PyBytes::new(py, &[state_byte])) |
|
59 | 57 | } |
|
60 | 58 | |
|
61 | 59 | @property |
|
62 | 60 | def mode(&self) -> PyResult<i32> { |
|
63 | 61 | Ok(self.entry(py).get().mode()) |
|
64 | 62 | } |
|
65 | 63 | |
|
66 | 64 | @property |
|
67 | 65 | def size(&self) -> PyResult<i32> { |
|
68 | 66 | Ok(self.entry(py).get().size()) |
|
69 | 67 | } |
|
70 | 68 | |
|
71 | 69 | @property |
|
72 | 70 | def mtime(&self) -> PyResult<i32> { |
|
73 | 71 | Ok(self.entry(py).get().mtime()) |
|
74 | 72 | } |
|
75 | 73 | |
|
76 | 74 | @property |
|
77 | 75 | def has_fallback_exec(&self) -> PyResult<bool> { |
|
78 | 76 | match self.entry(py).get().get_fallback_exec() { |
|
79 | 77 | Some(_) => Ok(true), |
|
80 | 78 | None => Ok(false), |
|
81 | 79 | } |
|
82 | 80 | } |
|
83 | 81 | |
|
84 | 82 | @property |
|
85 | 83 | def fallback_exec(&self) -> PyResult<Option<bool>> { |
|
86 | 84 | match self.entry(py).get().get_fallback_exec() { |
|
87 | 85 | Some(exec) => Ok(Some(exec)), |
|
88 | 86 | None => Ok(None), |
|
89 | 87 | } |
|
90 | 88 | } |
|
91 | 89 | |
|
92 | 90 | @fallback_exec.setter |
|
93 | 91 | def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> { |
|
94 | 92 | match value { |
|
95 | 93 | None => {self.entry(py).get().set_fallback_exec(None);}, |
|
96 | 94 | Some(value) => { |
|
97 | 95 | if value.is_none(py) { |
|
98 | 96 | self.entry(py).get().set_fallback_exec(None); |
|
99 | 97 | } else { |
|
100 | 98 | self.entry(py).get().set_fallback_exec( |
|
101 | 99 | Some(value.is_true(py)?) |
|
102 | 100 | ); |
|
103 | 101 | }}, |
|
104 | 102 | } |
|
105 | 103 | Ok(()) |
|
106 | 104 | } |
|
107 | 105 | |
|
108 | 106 | @property |
|
109 | 107 | def has_fallback_symlink(&self) -> PyResult<bool> { |
|
110 | 108 | match self.entry(py).get().get_fallback_symlink() { |
|
111 | 109 | Some(_) => Ok(true), |
|
112 | 110 | None => Ok(false), |
|
113 | 111 | } |
|
114 | 112 | } |
|
115 | 113 | |
|
116 | 114 | @property |
|
117 | 115 | def fallback_symlink(&self) -> PyResult<Option<bool>> { |
|
118 | 116 | match self.entry(py).get().get_fallback_symlink() { |
|
119 | 117 | Some(symlink) => Ok(Some(symlink)), |
|
120 | 118 | None => Ok(None), |
|
121 | 119 | } |
|
122 | 120 | } |
|
123 | 121 | |
|
124 | 122 | @fallback_symlink.setter |
|
125 | 123 | def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> { |
|
126 | 124 | match value { |
|
127 | 125 | None => {self.entry(py).get().set_fallback_symlink(None);}, |
|
128 | 126 | Some(value) => { |
|
129 | 127 | if value.is_none(py) { |
|
130 | 128 | self.entry(py).get().set_fallback_symlink(None); |
|
131 | 129 | } else { |
|
132 | 130 | self.entry(py).get().set_fallback_symlink( |
|
133 | 131 | Some(value.is_true(py)?) |
|
134 | 132 | ); |
|
135 | 133 | }}, |
|
136 | 134 | } |
|
137 | 135 | Ok(()) |
|
138 | 136 | } |
|
139 | 137 | |
|
140 | 138 | @property |
|
141 | 139 | def tracked(&self) -> PyResult<bool> { |
|
142 | 140 | Ok(self.entry(py).get().tracked()) |
|
143 | 141 | } |
|
144 | 142 | |
|
145 | 143 | @property |
|
146 | 144 | def p1_tracked(&self) -> PyResult<bool> { |
|
147 | 145 | Ok(self.entry(py).get().p1_tracked()) |
|
148 | 146 | } |
|
149 | 147 | |
|
150 | 148 | @property |
|
151 | 149 | def added(&self) -> PyResult<bool> { |
|
152 | 150 | Ok(self.entry(py).get().added()) |
|
153 | 151 | } |
|
154 | 152 | |
|
155 | 153 | |
|
156 | 154 | @property |
|
157 | 155 | def p2_info(&self) -> PyResult<bool> { |
|
158 | 156 | Ok(self.entry(py).get().p2_info()) |
|
159 | 157 | } |
|
160 | 158 | |
|
161 | 159 | @property |
|
162 | 160 | def removed(&self) -> PyResult<bool> { |
|
163 | 161 | Ok(self.entry(py).get().removed()) |
|
164 | 162 | } |
|
165 | 163 | |
|
166 | 164 | @property |
|
167 | 165 | def maybe_clean(&self) -> PyResult<bool> { |
|
168 | 166 | Ok(self.entry(py).get().maybe_clean()) |
|
169 | 167 | } |
|
170 | 168 | |
|
171 | 169 | @property |
|
172 | 170 | def any_tracked(&self) -> PyResult<bool> { |
|
173 | 171 | Ok(self.entry(py).get().any_tracked()) |
|
174 | 172 | } |
|
175 | 173 | |
|
176 | def v1_state(&self) -> PyResult<PyBytes> { | |
|
177 | let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data(); | |
|
178 | let state_byte: u8 = state.into(); | |
|
179 | Ok(PyBytes::new(py, &[state_byte])) | |
|
180 | } | |
|
181 | ||
|
182 | def v1_mode(&self) -> PyResult<i32> { | |
|
183 | let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data(); | |
|
184 | Ok(mode) | |
|
185 | } | |
|
186 | ||
|
187 | def v1_size(&self) -> PyResult<i32> { | |
|
188 | let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data(); | |
|
189 | Ok(size) | |
|
190 | } | |
|
191 | ||
|
192 | def v1_mtime(&self) -> PyResult<i32> { | |
|
193 | let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data(); | |
|
194 | Ok(mtime) | |
|
195 | } | |
|
196 | ||
|
197 | 174 | def mtime_likely_equal_to(&self, other: (u32, u32, bool)) |
|
198 | 175 | -> PyResult<bool> { |
|
199 | 176 | if let Some(mtime) = self.entry(py).get().truncated_mtime() { |
|
200 | 177 | Ok(mtime.likely_equal(timestamp(py, other)?)) |
|
201 | 178 | } else { |
|
202 | 179 | Ok(false) |
|
203 | 180 | } |
|
204 | 181 | } |
|
205 | 182 | |
|
206 | @classmethod | |
|
207 | def from_v1_data( | |
|
208 | _cls, | |
|
209 | state: PyBytes, | |
|
210 | mode: i32, | |
|
211 | size: i32, | |
|
212 | mtime: i32, | |
|
213 | ) -> PyResult<Self> { | |
|
214 | let state = <[u8; 1]>::try_from(state.data(py)) | |
|
215 | .ok() | |
|
216 | .and_then(|state| EntryState::try_from(state[0]).ok()) | |
|
217 | .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?; | |
|
218 | let entry = DirstateEntry::from_v1_data(state, mode, size, mtime); | |
|
219 | DirstateItem::create_instance(py, Cell::new(entry)) | |
|
220 | } | |
|
221 | ||
|
222 | 183 | def drop_merge_data(&self) -> PyResult<PyNone> { |
|
223 | 184 | self.update(py, |entry| entry.drop_merge_data()); |
|
224 | 185 | Ok(PyNone) |
|
225 | 186 | } |
|
226 | 187 | |
|
227 | 188 | def set_clean( |
|
228 | 189 | &self, |
|
229 | 190 | mode: u32, |
|
230 | 191 | size: u32, |
|
231 | 192 | mtime: (u32, u32, bool), |
|
232 | 193 | ) -> PyResult<PyNone> { |
|
233 | 194 | let mtime = timestamp(py, mtime)?; |
|
234 | 195 | self.update(py, |entry| entry.set_clean(mode, size, mtime)); |
|
235 | 196 | Ok(PyNone) |
|
236 | 197 | } |
|
237 | 198 | |
|
238 | 199 | def set_possibly_dirty(&self) -> PyResult<PyNone> { |
|
239 | 200 | self.update(py, |entry| entry.set_possibly_dirty()); |
|
240 | 201 | Ok(PyNone) |
|
241 | 202 | } |
|
242 | 203 | |
|
243 | 204 | def set_tracked(&self) -> PyResult<PyNone> { |
|
244 | 205 | self.update(py, |entry| entry.set_tracked()); |
|
245 | 206 | Ok(PyNone) |
|
246 | 207 | } |
|
247 | 208 | |
|
248 | 209 | def set_untracked(&self) -> PyResult<PyNone> { |
|
249 | 210 | self.update(py, |entry| entry.set_untracked()); |
|
250 | 211 | Ok(PyNone) |
|
251 | 212 | } |
|
252 | 213 | }); |
|
253 | 214 | |
|
254 | 215 | impl DirstateItem { |
|
255 | 216 | pub fn new_as_pyobject( |
|
256 | 217 | py: Python<'_>, |
|
257 | 218 | entry: DirstateEntry, |
|
258 | 219 | ) -> PyResult<PyObject> { |
|
259 | 220 | Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object()) |
|
260 | 221 | } |
|
261 | 222 | |
|
262 | 223 | pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry { |
|
263 | 224 | self.entry(py).get() |
|
264 | 225 | } |
|
265 | 226 | |
|
266 | 227 | // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable |
|
267 | 228 | pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) { |
|
268 | 229 | let mut entry = self.entry(py).get(); |
|
269 | 230 | f(&mut entry); |
|
270 | 231 | self.entry(py).set(entry) |
|
271 | 232 | } |
|
272 | 233 | } |
|
273 | 234 | |
|
274 | 235 | pub(crate) fn timestamp( |
|
275 | 236 | py: Python<'_>, |
|
276 | 237 | (s, ns, second_ambiguous): (u32, u32, bool), |
|
277 | 238 | ) -> PyResult<TruncatedTimestamp> { |
|
278 | 239 | TruncatedTimestamp::from_already_truncated(s, ns, second_ambiguous) |
|
279 | 240 | .map_err(|_| { |
|
280 | 241 | PyErr::new::<exc::ValueError, _>( |
|
281 | 242 | py, |
|
282 | 243 | "expected mtime truncated to 31 bits", |
|
283 | 244 | ) |
|
284 | 245 | }) |
|
285 | 246 | } |
General Comments 0
You need to be logged in to leave comments.
Login now