##// END OF EJS Templates
dirstate: remove v1_* methods from Python/C/Rust shared API...
Raphaël Gomès -
r49920:be9bf75a default
parent child Browse files
Show More
@@ -1,1000 +1,1000 b''
1 # __init__.py - fsmonitor initialization and overrides
1 # __init__.py - fsmonitor initialization and overrides
2 #
2 #
3 # Copyright 2013-2016 Facebook, Inc.
3 # Copyright 2013-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9
9
10 Integrates the file-watching program Watchman with Mercurial to produce faster
10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 status results.
11 status results.
12
12
13 On a particular Linux system, for a real-world repository with over 400,000
13 On a particular Linux system, for a real-world repository with over 400,000
14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 system, with fsmonitor it takes about 0.3 seconds.
15 system, with fsmonitor it takes about 0.3 seconds.
16
16
17 fsmonitor requires no configuration -- it will tell Watchman about your
17 fsmonitor requires no configuration -- it will tell Watchman about your
18 repository as necessary. You'll need to install Watchman from
18 repository as necessary. You'll need to install Watchman from
19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20
20
21 fsmonitor is incompatible with the largefiles and eol extensions, and
21 fsmonitor is incompatible with the largefiles and eol extensions, and
22 will disable itself if any of those are active.
22 will disable itself if any of those are active.
23
23
24 The following configuration options exist:
24 The following configuration options exist:
25
25
26 ::
26 ::
27
27
28 [fsmonitor]
28 [fsmonitor]
29 mode = {off, on, paranoid}
29 mode = {off, on, paranoid}
30
30
31 When `mode = off`, fsmonitor will disable itself (similar to not loading the
31 When `mode = off`, fsmonitor will disable itself (similar to not loading the
32 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
32 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
33 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
33 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
34 and ensure that the results are consistent.
34 and ensure that the results are consistent.
35
35
36 ::
36 ::
37
37
38 [fsmonitor]
38 [fsmonitor]
39 timeout = (float)
39 timeout = (float)
40
40
41 A value, in seconds, that determines how long fsmonitor will wait for Watchman
41 A value, in seconds, that determines how long fsmonitor will wait for Watchman
42 to return results. Defaults to `2.0`.
42 to return results. Defaults to `2.0`.
43
43
44 ::
44 ::
45
45
46 [fsmonitor]
46 [fsmonitor]
47 blacklistusers = (list of userids)
47 blacklistusers = (list of userids)
48
48
49 A list of usernames for which fsmonitor will disable itself altogether.
49 A list of usernames for which fsmonitor will disable itself altogether.
50
50
51 ::
51 ::
52
52
53 [fsmonitor]
53 [fsmonitor]
54 walk_on_invalidate = (boolean)
54 walk_on_invalidate = (boolean)
55
55
56 Whether or not to walk the whole repo ourselves when our cached state has been
56 Whether or not to walk the whole repo ourselves when our cached state has been
57 invalidated, for example when Watchman has been restarted or .hgignore rules
57 invalidated, for example when Watchman has been restarted or .hgignore rules
58 have been changed. Walking the repo in that case can result in competing for
58 have been changed. Walking the repo in that case can result in competing for
59 I/O with Watchman. For large repos it is recommended to set this value to
59 I/O with Watchman. For large repos it is recommended to set this value to
60 false. You may wish to set this to true if you have a very fast filesystem
60 false. You may wish to set this to true if you have a very fast filesystem
61 that can outpace the IPC overhead of getting the result data for the full repo
61 that can outpace the IPC overhead of getting the result data for the full repo
62 from Watchman. Defaults to false.
62 from Watchman. Defaults to false.
63
63
64 ::
64 ::
65
65
66 [fsmonitor]
66 [fsmonitor]
67 warn_when_unused = (boolean)
67 warn_when_unused = (boolean)
68
68
69 Whether to print a warning during certain operations when fsmonitor would be
69 Whether to print a warning during certain operations when fsmonitor would be
70 beneficial to performance but isn't enabled.
70 beneficial to performance but isn't enabled.
71
71
72 ::
72 ::
73
73
74 [fsmonitor]
74 [fsmonitor]
75 warn_update_file_count = (integer)
75 warn_update_file_count = (integer)
76 # or when mercurial is built with rust support
76 # or when mercurial is built with rust support
77 warn_update_file_count_rust = (integer)
77 warn_update_file_count_rust = (integer)
78
78
79 If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
79 If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
80 be printed during working directory updates if this many files will be
80 be printed during working directory updates if this many files will be
81 created.
81 created.
82 '''
82 '''
83
83
84 # Platforms Supported
84 # Platforms Supported
85 # ===================
85 # ===================
86 #
86 #
87 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
87 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
88 # even under severe loads.
88 # even under severe loads.
89 #
89 #
90 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
90 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
91 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
91 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
92 # user testing under normal loads.
92 # user testing under normal loads.
93 #
93 #
94 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
94 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
95 # very little testing has been done.
95 # very little testing has been done.
96 #
96 #
97 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
97 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
98 #
98 #
99 # Known Issues
99 # Known Issues
100 # ============
100 # ============
101 #
101 #
102 # * fsmonitor will disable itself if any of the following extensions are
102 # * fsmonitor will disable itself if any of the following extensions are
103 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
103 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
104 # * fsmonitor will produce incorrect results if nested repos that are not
104 # * fsmonitor will produce incorrect results if nested repos that are not
105 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
105 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
106 #
106 #
107 # The issues related to nested repos and subrepos are probably not fundamental
107 # The issues related to nested repos and subrepos are probably not fundamental
108 # ones. Patches to fix them are welcome.
108 # ones. Patches to fix them are welcome.
109
109
110
110
111 import codecs
111 import codecs
112 import os
112 import os
113 import stat
113 import stat
114 import sys
114 import sys
115 import tempfile
115 import tempfile
116 import weakref
116 import weakref
117
117
118 from mercurial.i18n import _
118 from mercurial.i18n import _
119 from mercurial.node import hex
119 from mercurial.node import hex
120 from mercurial.pycompat import open
120 from mercurial.pycompat import open
121 from mercurial import (
121 from mercurial import (
122 context,
122 context,
123 encoding,
123 encoding,
124 error,
124 error,
125 extensions,
125 extensions,
126 localrepo,
126 localrepo,
127 merge,
127 merge,
128 pathutil,
128 pathutil,
129 pycompat,
129 pycompat,
130 registrar,
130 registrar,
131 scmutil,
131 scmutil,
132 util,
132 util,
133 )
133 )
134 from mercurial import match as matchmod
134 from mercurial import match as matchmod
135 from mercurial.utils import (
135 from mercurial.utils import (
136 hashutil,
136 hashutil,
137 stringutil,
137 stringutil,
138 )
138 )
139
139
140 from . import (
140 from . import (
141 pywatchman,
141 pywatchman,
142 state,
142 state,
143 watchmanclient,
143 watchmanclient,
144 )
144 )
145
145
146 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
146 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
147 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
147 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
148 # be specifying the version(s) of Mercurial they are tested with, or
148 # be specifying the version(s) of Mercurial they are tested with, or
149 # leave the attribute unspecified.
149 # leave the attribute unspecified.
150 testedwith = b'ships-with-hg-core'
150 testedwith = b'ships-with-hg-core'
151
151
152 configtable = {}
152 configtable = {}
153 configitem = registrar.configitem(configtable)
153 configitem = registrar.configitem(configtable)
154
154
155 configitem(
155 configitem(
156 b'fsmonitor',
156 b'fsmonitor',
157 b'mode',
157 b'mode',
158 default=b'on',
158 default=b'on',
159 )
159 )
160 configitem(
160 configitem(
161 b'fsmonitor',
161 b'fsmonitor',
162 b'walk_on_invalidate',
162 b'walk_on_invalidate',
163 default=False,
163 default=False,
164 )
164 )
165 configitem(
165 configitem(
166 b'fsmonitor',
166 b'fsmonitor',
167 b'timeout',
167 b'timeout',
168 default=b'2',
168 default=b'2',
169 )
169 )
170 configitem(
170 configitem(
171 b'fsmonitor',
171 b'fsmonitor',
172 b'blacklistusers',
172 b'blacklistusers',
173 default=list,
173 default=list,
174 )
174 )
175 configitem(
175 configitem(
176 b'fsmonitor',
176 b'fsmonitor',
177 b'watchman_exe',
177 b'watchman_exe',
178 default=b'watchman',
178 default=b'watchman',
179 )
179 )
180 configitem(
180 configitem(
181 b'fsmonitor',
181 b'fsmonitor',
182 b'verbose',
182 b'verbose',
183 default=True,
183 default=True,
184 experimental=True,
184 experimental=True,
185 )
185 )
186 configitem(
186 configitem(
187 b'experimental',
187 b'experimental',
188 b'fsmonitor.transaction_notify',
188 b'fsmonitor.transaction_notify',
189 default=False,
189 default=False,
190 )
190 )
191
191
192 # This extension is incompatible with the following blacklisted extensions
192 # This extension is incompatible with the following blacklisted extensions
193 # and will disable itself when encountering one of these:
193 # and will disable itself when encountering one of these:
194 _blacklist = [b'largefiles', b'eol']
194 _blacklist = [b'largefiles', b'eol']
195
195
196
196
197 def debuginstall(ui, fm):
197 def debuginstall(ui, fm):
198 fm.write(
198 fm.write(
199 b"fsmonitor-watchman",
199 b"fsmonitor-watchman",
200 _(b"fsmonitor checking for watchman binary... (%s)\n"),
200 _(b"fsmonitor checking for watchman binary... (%s)\n"),
201 ui.configpath(b"fsmonitor", b"watchman_exe"),
201 ui.configpath(b"fsmonitor", b"watchman_exe"),
202 )
202 )
203 root = tempfile.mkdtemp()
203 root = tempfile.mkdtemp()
204 c = watchmanclient.client(ui, root)
204 c = watchmanclient.client(ui, root)
205 err = None
205 err = None
206 try:
206 try:
207 v = c.command(b"version")
207 v = c.command(b"version")
208 fm.write(
208 fm.write(
209 b"fsmonitor-watchman-version",
209 b"fsmonitor-watchman-version",
210 _(b" watchman binary version %s\n"),
210 _(b" watchman binary version %s\n"),
211 pycompat.bytestr(v["version"]),
211 pycompat.bytestr(v["version"]),
212 )
212 )
213 except watchmanclient.Unavailable as e:
213 except watchmanclient.Unavailable as e:
214 err = stringutil.forcebytestr(e)
214 err = stringutil.forcebytestr(e)
215 fm.condwrite(
215 fm.condwrite(
216 err,
216 err,
217 b"fsmonitor-watchman-error",
217 b"fsmonitor-watchman-error",
218 _(b" watchman binary missing or broken: %s\n"),
218 _(b" watchman binary missing or broken: %s\n"),
219 err,
219 err,
220 )
220 )
221 return 1 if err else 0
221 return 1 if err else 0
222
222
223
223
224 def _handleunavailable(ui, state, ex):
224 def _handleunavailable(ui, state, ex):
225 """Exception handler for Watchman interaction exceptions"""
225 """Exception handler for Watchman interaction exceptions"""
226 if isinstance(ex, watchmanclient.Unavailable):
226 if isinstance(ex, watchmanclient.Unavailable):
227 # experimental config: fsmonitor.verbose
227 # experimental config: fsmonitor.verbose
228 if ex.warn and ui.configbool(b'fsmonitor', b'verbose'):
228 if ex.warn and ui.configbool(b'fsmonitor', b'verbose'):
229 if b'illegal_fstypes' not in stringutil.forcebytestr(ex):
229 if b'illegal_fstypes' not in stringutil.forcebytestr(ex):
230 ui.warn(stringutil.forcebytestr(ex) + b'\n')
230 ui.warn(stringutil.forcebytestr(ex) + b'\n')
231 if ex.invalidate:
231 if ex.invalidate:
232 state.invalidate()
232 state.invalidate()
233 # experimental config: fsmonitor.verbose
233 # experimental config: fsmonitor.verbose
234 if ui.configbool(b'fsmonitor', b'verbose'):
234 if ui.configbool(b'fsmonitor', b'verbose'):
235 ui.log(
235 ui.log(
236 b'fsmonitor',
236 b'fsmonitor',
237 b'Watchman unavailable: %s\n',
237 b'Watchman unavailable: %s\n',
238 stringutil.forcebytestr(ex.msg),
238 stringutil.forcebytestr(ex.msg),
239 )
239 )
240 else:
240 else:
241 ui.log(
241 ui.log(
242 b'fsmonitor',
242 b'fsmonitor',
243 b'Watchman exception: %s\n',
243 b'Watchman exception: %s\n',
244 stringutil.forcebytestr(ex),
244 stringutil.forcebytestr(ex),
245 )
245 )
246
246
247
247
248 def _hashignore(ignore):
248 def _hashignore(ignore):
249 """Calculate hash for ignore patterns and filenames
249 """Calculate hash for ignore patterns and filenames
250
250
251 If this information changes between Mercurial invocations, we can't
251 If this information changes between Mercurial invocations, we can't
252 rely on Watchman information anymore and have to re-scan the working
252 rely on Watchman information anymore and have to re-scan the working
253 copy.
253 copy.
254
254
255 """
255 """
256 sha1 = hashutil.sha1()
256 sha1 = hashutil.sha1()
257 sha1.update(pycompat.byterepr(ignore))
257 sha1.update(pycompat.byterepr(ignore))
258 return pycompat.sysbytes(sha1.hexdigest())
258 return pycompat.sysbytes(sha1.hexdigest())
259
259
260
260
261 _watchmanencoding = pywatchman.encoding.get_local_encoding()
261 _watchmanencoding = pywatchman.encoding.get_local_encoding()
262 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
262 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
263 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
263 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
264
264
265
265
266 def _watchmantofsencoding(path):
266 def _watchmantofsencoding(path):
267 """Fix path to match watchman and local filesystem encoding
267 """Fix path to match watchman and local filesystem encoding
268
268
269 watchman's paths encoding can differ from filesystem encoding. For example,
269 watchman's paths encoding can differ from filesystem encoding. For example,
270 on Windows, it's always utf-8.
270 on Windows, it's always utf-8.
271 """
271 """
272 try:
272 try:
273 decoded = path.decode(_watchmanencoding)
273 decoded = path.decode(_watchmanencoding)
274 except UnicodeDecodeError as e:
274 except UnicodeDecodeError as e:
275 raise error.Abort(
275 raise error.Abort(
276 stringutil.forcebytestr(e), hint=b'watchman encoding error'
276 stringutil.forcebytestr(e), hint=b'watchman encoding error'
277 )
277 )
278
278
279 try:
279 try:
280 encoded = decoded.encode(_fsencoding, 'strict')
280 encoded = decoded.encode(_fsencoding, 'strict')
281 except UnicodeEncodeError as e:
281 except UnicodeEncodeError as e:
282 raise error.Abort(stringutil.forcebytestr(e))
282 raise error.Abort(stringutil.forcebytestr(e))
283
283
284 return encoded
284 return encoded
285
285
286
286
287 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
287 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
288 """Replacement for dirstate.walk, hooking into Watchman.
288 """Replacement for dirstate.walk, hooking into Watchman.
289
289
290 Whenever full is False, ignored is False, and the Watchman client is
290 Whenever full is False, ignored is False, and the Watchman client is
291 available, use Watchman combined with saved state to possibly return only a
291 available, use Watchman combined with saved state to possibly return only a
292 subset of files."""
292 subset of files."""
293
293
294 def bail(reason):
294 def bail(reason):
295 self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason)
295 self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason)
296 return orig(match, subrepos, unknown, ignored, full=True)
296 return orig(match, subrepos, unknown, ignored, full=True)
297
297
298 if full:
298 if full:
299 return bail(b'full rewalk requested')
299 return bail(b'full rewalk requested')
300 if ignored:
300 if ignored:
301 return bail(b'listing ignored files')
301 return bail(b'listing ignored files')
302 if not self._watchmanclient.available():
302 if not self._watchmanclient.available():
303 return bail(b'client unavailable')
303 return bail(b'client unavailable')
304 state = self._fsmonitorstate
304 state = self._fsmonitorstate
305 clock, ignorehash, notefiles = state.get()
305 clock, ignorehash, notefiles = state.get()
306 if not clock:
306 if not clock:
307 if state.walk_on_invalidate:
307 if state.walk_on_invalidate:
308 return bail(b'no clock')
308 return bail(b'no clock')
309 # Initial NULL clock value, see
309 # Initial NULL clock value, see
310 # https://facebook.github.io/watchman/docs/clockspec.html
310 # https://facebook.github.io/watchman/docs/clockspec.html
311 clock = b'c:0:0'
311 clock = b'c:0:0'
312 notefiles = []
312 notefiles = []
313
313
314 ignore = self._ignore
314 ignore = self._ignore
315 dirignore = self._dirignore
315 dirignore = self._dirignore
316 if unknown:
316 if unknown:
317 if _hashignore(ignore) != ignorehash and clock != b'c:0:0':
317 if _hashignore(ignore) != ignorehash and clock != b'c:0:0':
318 # ignore list changed -- can't rely on Watchman state any more
318 # ignore list changed -- can't rely on Watchman state any more
319 if state.walk_on_invalidate:
319 if state.walk_on_invalidate:
320 return bail(b'ignore rules changed')
320 return bail(b'ignore rules changed')
321 notefiles = []
321 notefiles = []
322 clock = b'c:0:0'
322 clock = b'c:0:0'
323 else:
323 else:
324 # always ignore
324 # always ignore
325 ignore = util.always
325 ignore = util.always
326 dirignore = util.always
326 dirignore = util.always
327
327
328 matchfn = match.matchfn
328 matchfn = match.matchfn
329 matchalways = match.always()
329 matchalways = match.always()
330 dmap = self._map
330 dmap = self._map
331 if util.safehasattr(dmap, b'_map'):
331 if util.safehasattr(dmap, b'_map'):
332 # for better performance, directly access the inner dirstate map if the
332 # for better performance, directly access the inner dirstate map if the
333 # standard dirstate implementation is in use.
333 # standard dirstate implementation is in use.
334 dmap = dmap._map
334 dmap = dmap._map
335 nonnormalset = {
335 nonnormalset = {
336 f
336 f
337 for f, e in self._map.items()
337 for f, e in self._map.items()
338 if e.v1_state() != b"n" or e.v1_mtime() == -1
338 if e._v1_state() != b"n" or e._v1_mtime() == -1
339 }
339 }
340
340
341 copymap = self._map.copymap
341 copymap = self._map.copymap
342 getkind = stat.S_IFMT
342 getkind = stat.S_IFMT
343 dirkind = stat.S_IFDIR
343 dirkind = stat.S_IFDIR
344 regkind = stat.S_IFREG
344 regkind = stat.S_IFREG
345 lnkkind = stat.S_IFLNK
345 lnkkind = stat.S_IFLNK
346 join = self._join
346 join = self._join
347 normcase = util.normcase
347 normcase = util.normcase
348 fresh_instance = False
348 fresh_instance = False
349
349
350 exact = skipstep3 = False
350 exact = skipstep3 = False
351 if match.isexact(): # match.exact
351 if match.isexact(): # match.exact
352 exact = True
352 exact = True
353 dirignore = util.always # skip step 2
353 dirignore = util.always # skip step 2
354 elif match.prefix(): # match.match, no patterns
354 elif match.prefix(): # match.match, no patterns
355 skipstep3 = True
355 skipstep3 = True
356
356
357 if not exact and self._checkcase:
357 if not exact and self._checkcase:
358 # note that even though we could receive directory entries, we're only
358 # note that even though we could receive directory entries, we're only
359 # interested in checking if a file with the same name exists. So only
359 # interested in checking if a file with the same name exists. So only
360 # normalize files if possible.
360 # normalize files if possible.
361 normalize = self._normalizefile
361 normalize = self._normalizefile
362 skipstep3 = False
362 skipstep3 = False
363 else:
363 else:
364 normalize = None
364 normalize = None
365
365
366 # step 1: find all explicit files
366 # step 1: find all explicit files
367 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
367 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
368
368
369 skipstep3 = skipstep3 and not (work or dirsnotfound)
369 skipstep3 = skipstep3 and not (work or dirsnotfound)
370 work = [d for d in work if not dirignore(d[0])]
370 work = [d for d in work if not dirignore(d[0])]
371
371
372 if not work and (exact or skipstep3):
372 if not work and (exact or skipstep3):
373 for s in subrepos:
373 for s in subrepos:
374 del results[s]
374 del results[s]
375 del results[b'.hg']
375 del results[b'.hg']
376 return results
376 return results
377
377
378 # step 2: query Watchman
378 # step 2: query Watchman
379 try:
379 try:
380 # Use the user-configured timeout for the query.
380 # Use the user-configured timeout for the query.
381 # Add a little slack over the top of the user query to allow for
381 # Add a little slack over the top of the user query to allow for
382 # overheads while transferring the data
382 # overheads while transferring the data
383 self._watchmanclient.settimeout(state.timeout + 0.1)
383 self._watchmanclient.settimeout(state.timeout + 0.1)
384 result = self._watchmanclient.command(
384 result = self._watchmanclient.command(
385 b'query',
385 b'query',
386 {
386 {
387 b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'],
387 b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'],
388 b'since': clock,
388 b'since': clock,
389 b'expression': [
389 b'expression': [
390 b'not',
390 b'not',
391 [
391 [
392 b'anyof',
392 b'anyof',
393 [b'dirname', b'.hg'],
393 [b'dirname', b'.hg'],
394 [b'name', b'.hg', b'wholename'],
394 [b'name', b'.hg', b'wholename'],
395 ],
395 ],
396 ],
396 ],
397 b'sync_timeout': int(state.timeout * 1000),
397 b'sync_timeout': int(state.timeout * 1000),
398 b'empty_on_fresh_instance': state.walk_on_invalidate,
398 b'empty_on_fresh_instance': state.walk_on_invalidate,
399 },
399 },
400 )
400 )
401 except Exception as ex:
401 except Exception as ex:
402 _handleunavailable(self._ui, state, ex)
402 _handleunavailable(self._ui, state, ex)
403 self._watchmanclient.clearconnection()
403 self._watchmanclient.clearconnection()
404 return bail(b'exception during run')
404 return bail(b'exception during run')
405 else:
405 else:
406 # We need to propagate the last observed clock up so that we
406 # We need to propagate the last observed clock up so that we
407 # can use it for our next query
407 # can use it for our next query
408 state.setlastclock(pycompat.sysbytes(result[b'clock']))
408 state.setlastclock(pycompat.sysbytes(result[b'clock']))
409 if result[b'is_fresh_instance']:
409 if result[b'is_fresh_instance']:
410 if state.walk_on_invalidate:
410 if state.walk_on_invalidate:
411 state.invalidate()
411 state.invalidate()
412 return bail(b'fresh instance')
412 return bail(b'fresh instance')
413 fresh_instance = True
413 fresh_instance = True
414 # Ignore any prior noteable files from the state info
414 # Ignore any prior noteable files from the state info
415 notefiles = []
415 notefiles = []
416
416
417 # for file paths which require normalization and we encounter a case
417 # for file paths which require normalization and we encounter a case
418 # collision, we store our own foldmap
418 # collision, we store our own foldmap
419 if normalize:
419 if normalize:
420 foldmap = {normcase(k): k for k in results}
420 foldmap = {normcase(k): k for k in results}
421
421
422 switch_slashes = pycompat.ossep == b'\\'
422 switch_slashes = pycompat.ossep == b'\\'
423 # The order of the results is, strictly speaking, undefined.
423 # The order of the results is, strictly speaking, undefined.
424 # For case changes on a case insensitive filesystem we may receive
424 # For case changes on a case insensitive filesystem we may receive
425 # two entries, one with exists=True and another with exists=False.
425 # two entries, one with exists=True and another with exists=False.
426 # The exists=True entries in the same response should be interpreted
426 # The exists=True entries in the same response should be interpreted
427 # as being happens-after the exists=False entries due to the way that
427 # as being happens-after the exists=False entries due to the way that
428 # Watchman tracks files. We use this property to reconcile deletes
428 # Watchman tracks files. We use this property to reconcile deletes
429 # for name case changes.
429 # for name case changes.
430 for entry in result[b'files']:
430 for entry in result[b'files']:
431 fname = entry[b'name']
431 fname = entry[b'name']
432
432
433 # Watchman always give us a str. Normalize to bytes on Python 3
433 # Watchman always give us a str. Normalize to bytes on Python 3
434 # using Watchman's encoding, if needed.
434 # using Watchman's encoding, if needed.
435 if not isinstance(fname, bytes):
435 if not isinstance(fname, bytes):
436 fname = fname.encode(_watchmanencoding)
436 fname = fname.encode(_watchmanencoding)
437
437
438 if _fixencoding:
438 if _fixencoding:
439 fname = _watchmantofsencoding(fname)
439 fname = _watchmantofsencoding(fname)
440
440
441 if switch_slashes:
441 if switch_slashes:
442 fname = fname.replace(b'\\', b'/')
442 fname = fname.replace(b'\\', b'/')
443 if normalize:
443 if normalize:
444 normed = normcase(fname)
444 normed = normcase(fname)
445 fname = normalize(fname, True, True)
445 fname = normalize(fname, True, True)
446 foldmap[normed] = fname
446 foldmap[normed] = fname
447 fmode = entry[b'mode']
447 fmode = entry[b'mode']
448 fexists = entry[b'exists']
448 fexists = entry[b'exists']
449 kind = getkind(fmode)
449 kind = getkind(fmode)
450
450
451 if b'/.hg/' in fname or fname.endswith(b'/.hg'):
451 if b'/.hg/' in fname or fname.endswith(b'/.hg'):
452 return bail(b'nested-repo-detected')
452 return bail(b'nested-repo-detected')
453
453
454 if not fexists:
454 if not fexists:
455 # if marked as deleted and we don't already have a change
455 # if marked as deleted and we don't already have a change
456 # record, mark it as deleted. If we already have an entry
456 # record, mark it as deleted. If we already have an entry
457 # for fname then it was either part of walkexplicit or was
457 # for fname then it was either part of walkexplicit or was
458 # an earlier result that was a case change
458 # an earlier result that was a case change
459 if (
459 if (
460 fname not in results
460 fname not in results
461 and fname in dmap
461 and fname in dmap
462 and (matchalways or matchfn(fname))
462 and (matchalways or matchfn(fname))
463 ):
463 ):
464 results[fname] = None
464 results[fname] = None
465 elif kind == dirkind:
465 elif kind == dirkind:
466 if fname in dmap and (matchalways or matchfn(fname)):
466 if fname in dmap and (matchalways or matchfn(fname)):
467 results[fname] = None
467 results[fname] = None
468 elif kind == regkind or kind == lnkkind:
468 elif kind == regkind or kind == lnkkind:
469 if fname in dmap:
469 if fname in dmap:
470 if matchalways or matchfn(fname):
470 if matchalways or matchfn(fname):
471 results[fname] = entry
471 results[fname] = entry
472 elif (matchalways or matchfn(fname)) and not ignore(fname):
472 elif (matchalways or matchfn(fname)) and not ignore(fname):
473 results[fname] = entry
473 results[fname] = entry
474 elif fname in dmap and (matchalways or matchfn(fname)):
474 elif fname in dmap and (matchalways or matchfn(fname)):
475 results[fname] = None
475 results[fname] = None
476
476
477 # step 3: query notable files we don't already know about
477 # step 3: query notable files we don't already know about
478 # XXX try not to iterate over the entire dmap
478 # XXX try not to iterate over the entire dmap
479 if normalize:
479 if normalize:
480 # any notable files that have changed case will already be handled
480 # any notable files that have changed case will already be handled
481 # above, so just check membership in the foldmap
481 # above, so just check membership in the foldmap
482 notefiles = {
482 notefiles = {
483 normalize(f, True, True)
483 normalize(f, True, True)
484 for f in notefiles
484 for f in notefiles
485 if normcase(f) not in foldmap
485 if normcase(f) not in foldmap
486 }
486 }
487 visit = {
487 visit = {
488 f
488 f
489 for f in notefiles
489 for f in notefiles
490 if (f not in results and matchfn(f) and (f in dmap or not ignore(f)))
490 if (f not in results and matchfn(f) and (f in dmap or not ignore(f)))
491 }
491 }
492
492
493 if not fresh_instance:
493 if not fresh_instance:
494 if matchalways:
494 if matchalways:
495 visit.update(f for f in nonnormalset if f not in results)
495 visit.update(f for f in nonnormalset if f not in results)
496 visit.update(f for f in copymap if f not in results)
496 visit.update(f for f in copymap if f not in results)
497 else:
497 else:
498 visit.update(
498 visit.update(
499 f for f in nonnormalset if f not in results and matchfn(f)
499 f for f in nonnormalset if f not in results and matchfn(f)
500 )
500 )
501 visit.update(f for f in copymap if f not in results and matchfn(f))
501 visit.update(f for f in copymap if f not in results and matchfn(f))
502 else:
502 else:
503 if matchalways:
503 if matchalways:
504 visit.update(f for f, st in dmap.items() if f not in results)
504 visit.update(f for f, st in dmap.items() if f not in results)
505 visit.update(f for f in copymap if f not in results)
505 visit.update(f for f in copymap if f not in results)
506 else:
506 else:
507 visit.update(
507 visit.update(
508 f for f, st in dmap.items() if f not in results and matchfn(f)
508 f for f, st in dmap.items() if f not in results and matchfn(f)
509 )
509 )
510 visit.update(f for f in copymap if f not in results and matchfn(f))
510 visit.update(f for f in copymap if f not in results and matchfn(f))
511
511
512 audit = pathutil.pathauditor(self._root, cached=True).check
512 audit = pathutil.pathauditor(self._root, cached=True).check
513 auditpass = [f for f in visit if audit(f)]
513 auditpass = [f for f in visit if audit(f)]
514 auditpass.sort()
514 auditpass.sort()
515 auditfail = visit.difference(auditpass)
515 auditfail = visit.difference(auditpass)
516 for f in auditfail:
516 for f in auditfail:
517 results[f] = None
517 results[f] = None
518
518
519 nf = iter(auditpass)
519 nf = iter(auditpass)
520 for st in util.statfiles([join(f) for f in auditpass]):
520 for st in util.statfiles([join(f) for f in auditpass]):
521 f = next(nf)
521 f = next(nf)
522 if st or f in dmap:
522 if st or f in dmap:
523 results[f] = st
523 results[f] = st
524
524
525 for s in subrepos:
525 for s in subrepos:
526 del results[s]
526 del results[s]
527 del results[b'.hg']
527 del results[b'.hg']
528 return results
528 return results
529
529
530
530
531 def overridestatus(
531 def overridestatus(
532 orig,
532 orig,
533 self,
533 self,
534 node1=b'.',
534 node1=b'.',
535 node2=None,
535 node2=None,
536 match=None,
536 match=None,
537 ignored=False,
537 ignored=False,
538 clean=False,
538 clean=False,
539 unknown=False,
539 unknown=False,
540 listsubrepos=False,
540 listsubrepos=False,
541 ):
541 ):
542 listignored = ignored
542 listignored = ignored
543 listclean = clean
543 listclean = clean
544 listunknown = unknown
544 listunknown = unknown
545
545
546 def _cmpsets(l1, l2):
546 def _cmpsets(l1, l2):
547 try:
547 try:
548 if b'FSMONITOR_LOG_FILE' in encoding.environ:
548 if b'FSMONITOR_LOG_FILE' in encoding.environ:
549 fn = encoding.environ[b'FSMONITOR_LOG_FILE']
549 fn = encoding.environ[b'FSMONITOR_LOG_FILE']
550 f = open(fn, b'wb')
550 f = open(fn, b'wb')
551 else:
551 else:
552 fn = b'fsmonitorfail.log'
552 fn = b'fsmonitorfail.log'
553 f = self.vfs.open(fn, b'wb')
553 f = self.vfs.open(fn, b'wb')
554 except (IOError, OSError):
554 except (IOError, OSError):
555 self.ui.warn(_(b'warning: unable to write to %s\n') % fn)
555 self.ui.warn(_(b'warning: unable to write to %s\n') % fn)
556 return
556 return
557
557
558 try:
558 try:
559 for i, (s1, s2) in enumerate(zip(l1, l2)):
559 for i, (s1, s2) in enumerate(zip(l1, l2)):
560 if set(s1) != set(s2):
560 if set(s1) != set(s2):
561 f.write(b'sets at position %d are unequal\n' % i)
561 f.write(b'sets at position %d are unequal\n' % i)
562 f.write(b'watchman returned: %r\n' % s1)
562 f.write(b'watchman returned: %r\n' % s1)
563 f.write(b'stat returned: %r\n' % s2)
563 f.write(b'stat returned: %r\n' % s2)
564 finally:
564 finally:
565 f.close()
565 f.close()
566
566
567 if isinstance(node1, context.changectx):
567 if isinstance(node1, context.changectx):
568 ctx1 = node1
568 ctx1 = node1
569 else:
569 else:
570 ctx1 = self[node1]
570 ctx1 = self[node1]
571 if isinstance(node2, context.changectx):
571 if isinstance(node2, context.changectx):
572 ctx2 = node2
572 ctx2 = node2
573 else:
573 else:
574 ctx2 = self[node2]
574 ctx2 = self[node2]
575
575
576 working = ctx2.rev() is None
576 working = ctx2.rev() is None
577 parentworking = working and ctx1 == self[b'.']
577 parentworking = working and ctx1 == self[b'.']
578 match = match or matchmod.always()
578 match = match or matchmod.always()
579
579
580 # Maybe we can use this opportunity to update Watchman's state.
580 # Maybe we can use this opportunity to update Watchman's state.
581 # Mercurial uses workingcommitctx and/or memctx to represent the part of
581 # Mercurial uses workingcommitctx and/or memctx to represent the part of
582 # the workingctx that is to be committed. So don't update the state in
582 # the workingctx that is to be committed. So don't update the state in
583 # that case.
583 # that case.
584 # HG_PENDING is set in the environment when the dirstate is being updated
584 # HG_PENDING is set in the environment when the dirstate is being updated
585 # in the middle of a transaction; we must not update our state in that
585 # in the middle of a transaction; we must not update our state in that
586 # case, or we risk forgetting about changes in the working copy.
586 # case, or we risk forgetting about changes in the working copy.
587 updatestate = (
587 updatestate = (
588 parentworking
588 parentworking
589 and match.always()
589 and match.always()
590 and not isinstance(ctx2, (context.workingcommitctx, context.memctx))
590 and not isinstance(ctx2, (context.workingcommitctx, context.memctx))
591 and b'HG_PENDING' not in encoding.environ
591 and b'HG_PENDING' not in encoding.environ
592 )
592 )
593
593
594 try:
594 try:
595 if self._fsmonitorstate.walk_on_invalidate:
595 if self._fsmonitorstate.walk_on_invalidate:
596 # Use a short timeout to query the current clock. If that
596 # Use a short timeout to query the current clock. If that
597 # takes too long then we assume that the service will be slow
597 # takes too long then we assume that the service will be slow
598 # to answer our query.
598 # to answer our query.
599 # walk_on_invalidate indicates that we prefer to walk the
599 # walk_on_invalidate indicates that we prefer to walk the
600 # tree ourselves because we can ignore portions that Watchman
600 # tree ourselves because we can ignore portions that Watchman
601 # cannot and we tend to be faster in the warmer buffer cache
601 # cannot and we tend to be faster in the warmer buffer cache
602 # cases.
602 # cases.
603 self._watchmanclient.settimeout(0.1)
603 self._watchmanclient.settimeout(0.1)
604 else:
604 else:
605 # Give Watchman more time to potentially complete its walk
605 # Give Watchman more time to potentially complete its walk
606 # and return the initial clock. In this mode we assume that
606 # and return the initial clock. In this mode we assume that
607 # the filesystem will be slower than parsing a potentially
607 # the filesystem will be slower than parsing a potentially
608 # very large Watchman result set.
608 # very large Watchman result set.
609 self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1)
609 self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1)
610 startclock = self._watchmanclient.getcurrentclock()
610 startclock = self._watchmanclient.getcurrentclock()
611 except Exception as ex:
611 except Exception as ex:
612 self._watchmanclient.clearconnection()
612 self._watchmanclient.clearconnection()
613 _handleunavailable(self.ui, self._fsmonitorstate, ex)
613 _handleunavailable(self.ui, self._fsmonitorstate, ex)
614 # boo, Watchman failed. bail
614 # boo, Watchman failed. bail
615 return orig(
615 return orig(
616 node1,
616 node1,
617 node2,
617 node2,
618 match,
618 match,
619 listignored,
619 listignored,
620 listclean,
620 listclean,
621 listunknown,
621 listunknown,
622 listsubrepos,
622 listsubrepos,
623 )
623 )
624
624
625 if updatestate:
625 if updatestate:
626 # We need info about unknown files. This may make things slower the
626 # We need info about unknown files. This may make things slower the
627 # first time, but whatever.
627 # first time, but whatever.
628 stateunknown = True
628 stateunknown = True
629 else:
629 else:
630 stateunknown = listunknown
630 stateunknown = listunknown
631
631
632 if updatestate:
632 if updatestate:
633 ps = poststatus(startclock)
633 ps = poststatus(startclock)
634 self.addpostdsstatus(ps)
634 self.addpostdsstatus(ps)
635
635
636 r = orig(
636 r = orig(
637 node1, node2, match, listignored, listclean, stateunknown, listsubrepos
637 node1, node2, match, listignored, listclean, stateunknown, listsubrepos
638 )
638 )
639 modified, added, removed, deleted, unknown, ignored, clean = r
639 modified, added, removed, deleted, unknown, ignored, clean = r
640
640
641 if not listunknown:
641 if not listunknown:
642 unknown = []
642 unknown = []
643
643
644 # don't do paranoid checks if we're not going to query Watchman anyway
644 # don't do paranoid checks if we're not going to query Watchman anyway
645 full = listclean or match.traversedir is not None
645 full = listclean or match.traversedir is not None
646 if self._fsmonitorstate.mode == b'paranoid' and not full:
646 if self._fsmonitorstate.mode == b'paranoid' and not full:
647 # run status again and fall back to the old walk this time
647 # run status again and fall back to the old walk this time
648 self.dirstate._fsmonitordisable = True
648 self.dirstate._fsmonitordisable = True
649
649
650 # shut the UI up
650 # shut the UI up
651 quiet = self.ui.quiet
651 quiet = self.ui.quiet
652 self.ui.quiet = True
652 self.ui.quiet = True
653 fout, ferr = self.ui.fout, self.ui.ferr
653 fout, ferr = self.ui.fout, self.ui.ferr
654 self.ui.fout = self.ui.ferr = open(os.devnull, b'wb')
654 self.ui.fout = self.ui.ferr = open(os.devnull, b'wb')
655
655
656 try:
656 try:
657 rv2 = orig(
657 rv2 = orig(
658 node1,
658 node1,
659 node2,
659 node2,
660 match,
660 match,
661 listignored,
661 listignored,
662 listclean,
662 listclean,
663 listunknown,
663 listunknown,
664 listsubrepos,
664 listsubrepos,
665 )
665 )
666 finally:
666 finally:
667 self.dirstate._fsmonitordisable = False
667 self.dirstate._fsmonitordisable = False
668 self.ui.quiet = quiet
668 self.ui.quiet = quiet
669 self.ui.fout, self.ui.ferr = fout, ferr
669 self.ui.fout, self.ui.ferr = fout, ferr
670
670
671 # clean isn't tested since it's set to True above
671 # clean isn't tested since it's set to True above
672 with self.wlock():
672 with self.wlock():
673 _cmpsets(
673 _cmpsets(
674 [modified, added, removed, deleted, unknown, ignored, clean],
674 [modified, added, removed, deleted, unknown, ignored, clean],
675 rv2,
675 rv2,
676 )
676 )
677 modified, added, removed, deleted, unknown, ignored, clean = rv2
677 modified, added, removed, deleted, unknown, ignored, clean = rv2
678
678
679 return scmutil.status(
679 return scmutil.status(
680 modified, added, removed, deleted, unknown, ignored, clean
680 modified, added, removed, deleted, unknown, ignored, clean
681 )
681 )
682
682
683
683
684 class poststatus:
684 class poststatus:
685 def __init__(self, startclock):
685 def __init__(self, startclock):
686 self._startclock = pycompat.sysbytes(startclock)
686 self._startclock = pycompat.sysbytes(startclock)
687
687
688 def __call__(self, wctx, status):
688 def __call__(self, wctx, status):
689 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
689 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
690 hashignore = _hashignore(wctx.repo().dirstate._ignore)
690 hashignore = _hashignore(wctx.repo().dirstate._ignore)
691 notefiles = (
691 notefiles = (
692 status.modified
692 status.modified
693 + status.added
693 + status.added
694 + status.removed
694 + status.removed
695 + status.deleted
695 + status.deleted
696 + status.unknown
696 + status.unknown
697 )
697 )
698 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
698 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
699
699
700
700
701 def makedirstate(repo, dirstate):
701 def makedirstate(repo, dirstate):
702 class fsmonitordirstate(dirstate.__class__):
702 class fsmonitordirstate(dirstate.__class__):
703 def _fsmonitorinit(self, repo):
703 def _fsmonitorinit(self, repo):
704 # _fsmonitordisable is used in paranoid mode
704 # _fsmonitordisable is used in paranoid mode
705 self._fsmonitordisable = False
705 self._fsmonitordisable = False
706 self._fsmonitorstate = repo._fsmonitorstate
706 self._fsmonitorstate = repo._fsmonitorstate
707 self._watchmanclient = repo._watchmanclient
707 self._watchmanclient = repo._watchmanclient
708 self._repo = weakref.proxy(repo)
708 self._repo = weakref.proxy(repo)
709
709
710 def walk(self, *args, **kwargs):
710 def walk(self, *args, **kwargs):
711 orig = super(fsmonitordirstate, self).walk
711 orig = super(fsmonitordirstate, self).walk
712 if self._fsmonitordisable:
712 if self._fsmonitordisable:
713 return orig(*args, **kwargs)
713 return orig(*args, **kwargs)
714 return overridewalk(orig, self, *args, **kwargs)
714 return overridewalk(orig, self, *args, **kwargs)
715
715
716 def rebuild(self, *args, **kwargs):
716 def rebuild(self, *args, **kwargs):
717 self._fsmonitorstate.invalidate()
717 self._fsmonitorstate.invalidate()
718 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
718 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
719
719
720 def invalidate(self, *args, **kwargs):
720 def invalidate(self, *args, **kwargs):
721 self._fsmonitorstate.invalidate()
721 self._fsmonitorstate.invalidate()
722 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
722 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
723
723
724 dirstate.__class__ = fsmonitordirstate
724 dirstate.__class__ = fsmonitordirstate
725 dirstate._fsmonitorinit(repo)
725 dirstate._fsmonitorinit(repo)
726
726
727
727
728 def wrapdirstate(orig, self):
728 def wrapdirstate(orig, self):
729 ds = orig(self)
729 ds = orig(self)
730 # only override the dirstate when Watchman is available for the repo
730 # only override the dirstate when Watchman is available for the repo
731 if util.safehasattr(self, b'_fsmonitorstate'):
731 if util.safehasattr(self, b'_fsmonitorstate'):
732 makedirstate(self, ds)
732 makedirstate(self, ds)
733 return ds
733 return ds
734
734
735
735
736 def extsetup(ui):
736 def extsetup(ui):
737 extensions.wrapfilecache(
737 extensions.wrapfilecache(
738 localrepo.localrepository, b'dirstate', wrapdirstate
738 localrepo.localrepository, b'dirstate', wrapdirstate
739 )
739 )
740 if pycompat.isdarwin:
740 if pycompat.isdarwin:
741 # An assist for avoiding the dangling-symlink fsevents bug
741 # An assist for avoiding the dangling-symlink fsevents bug
742 extensions.wrapfunction(os, b'symlink', wrapsymlink)
742 extensions.wrapfunction(os, b'symlink', wrapsymlink)
743
743
744 extensions.wrapfunction(merge, b'_update', wrapupdate)
744 extensions.wrapfunction(merge, b'_update', wrapupdate)
745
745
746
746
747 def wrapsymlink(orig, source, link_name):
747 def wrapsymlink(orig, source, link_name):
748 """if we create a dangling symlink, also touch the parent dir
748 """if we create a dangling symlink, also touch the parent dir
749 to encourage fsevents notifications to work more correctly"""
749 to encourage fsevents notifications to work more correctly"""
750 try:
750 try:
751 return orig(source, link_name)
751 return orig(source, link_name)
752 finally:
752 finally:
753 try:
753 try:
754 os.utime(os.path.dirname(link_name), None)
754 os.utime(os.path.dirname(link_name), None)
755 except OSError:
755 except OSError:
756 pass
756 pass
757
757
758
758
759 class state_update:
759 class state_update:
760 """This context manager is responsible for dispatching the state-enter
760 """This context manager is responsible for dispatching the state-enter
761 and state-leave signals to the watchman service. The enter and leave
761 and state-leave signals to the watchman service. The enter and leave
762 methods can be invoked manually (for scenarios where context manager
762 methods can be invoked manually (for scenarios where context manager
763 semantics are not possible). If parameters oldnode and newnode are None,
763 semantics are not possible). If parameters oldnode and newnode are None,
764 they will be populated based on current working copy in enter and
764 they will be populated based on current working copy in enter and
765 leave, respectively. Similarly, if the distance is none, it will be
765 leave, respectively. Similarly, if the distance is none, it will be
766 calculated based on the oldnode and newnode in the leave method."""
766 calculated based on the oldnode and newnode in the leave method."""
767
767
768 def __init__(
768 def __init__(
769 self,
769 self,
770 repo,
770 repo,
771 name,
771 name,
772 oldnode=None,
772 oldnode=None,
773 newnode=None,
773 newnode=None,
774 distance=None,
774 distance=None,
775 partial=False,
775 partial=False,
776 ):
776 ):
777 self.repo = repo.unfiltered()
777 self.repo = repo.unfiltered()
778 self.name = name
778 self.name = name
779 self.oldnode = oldnode
779 self.oldnode = oldnode
780 self.newnode = newnode
780 self.newnode = newnode
781 self.distance = distance
781 self.distance = distance
782 self.partial = partial
782 self.partial = partial
783 self._lock = None
783 self._lock = None
784 self.need_leave = False
784 self.need_leave = False
785
785
786 def __enter__(self):
786 def __enter__(self):
787 self.enter()
787 self.enter()
788
788
789 def enter(self):
789 def enter(self):
790 # Make sure we have a wlock prior to sending notifications to watchman.
790 # Make sure we have a wlock prior to sending notifications to watchman.
791 # We don't want to race with other actors. In the update case,
791 # We don't want to race with other actors. In the update case,
792 # merge.update is going to take the wlock almost immediately. We are
792 # merge.update is going to take the wlock almost immediately. We are
793 # effectively extending the lock around several short sanity checks.
793 # effectively extending the lock around several short sanity checks.
794 if self.oldnode is None:
794 if self.oldnode is None:
795 self.oldnode = self.repo[b'.'].node()
795 self.oldnode = self.repo[b'.'].node()
796
796
797 if self.repo.currentwlock() is None:
797 if self.repo.currentwlock() is None:
798 if util.safehasattr(self.repo, b'wlocknostateupdate'):
798 if util.safehasattr(self.repo, b'wlocknostateupdate'):
799 self._lock = self.repo.wlocknostateupdate()
799 self._lock = self.repo.wlocknostateupdate()
800 else:
800 else:
801 self._lock = self.repo.wlock()
801 self._lock = self.repo.wlock()
802 self.need_leave = self._state(b'state-enter', hex(self.oldnode))
802 self.need_leave = self._state(b'state-enter', hex(self.oldnode))
803 return self
803 return self
804
804
805 def __exit__(self, type_, value, tb):
805 def __exit__(self, type_, value, tb):
806 abort = True if type_ else False
806 abort = True if type_ else False
807 self.exit(abort=abort)
807 self.exit(abort=abort)
808
808
809 def exit(self, abort=False):
809 def exit(self, abort=False):
810 try:
810 try:
811 if self.need_leave:
811 if self.need_leave:
812 status = b'failed' if abort else b'ok'
812 status = b'failed' if abort else b'ok'
813 if self.newnode is None:
813 if self.newnode is None:
814 self.newnode = self.repo[b'.'].node()
814 self.newnode = self.repo[b'.'].node()
815 if self.distance is None:
815 if self.distance is None:
816 self.distance = calcdistance(
816 self.distance = calcdistance(
817 self.repo, self.oldnode, self.newnode
817 self.repo, self.oldnode, self.newnode
818 )
818 )
819 self._state(b'state-leave', hex(self.newnode), status=status)
819 self._state(b'state-leave', hex(self.newnode), status=status)
820 finally:
820 finally:
821 self.need_leave = False
821 self.need_leave = False
822 if self._lock:
822 if self._lock:
823 self._lock.release()
823 self._lock.release()
824
824
825 def _state(self, cmd, commithash, status=b'ok'):
825 def _state(self, cmd, commithash, status=b'ok'):
826 if not util.safehasattr(self.repo, b'_watchmanclient'):
826 if not util.safehasattr(self.repo, b'_watchmanclient'):
827 return False
827 return False
828 try:
828 try:
829 self.repo._watchmanclient.command(
829 self.repo._watchmanclient.command(
830 cmd,
830 cmd,
831 {
831 {
832 b'name': self.name,
832 b'name': self.name,
833 b'metadata': {
833 b'metadata': {
834 # the target revision
834 # the target revision
835 b'rev': commithash,
835 b'rev': commithash,
836 # approximate number of commits between current and target
836 # approximate number of commits between current and target
837 b'distance': self.distance if self.distance else 0,
837 b'distance': self.distance if self.distance else 0,
838 # success/failure (only really meaningful for state-leave)
838 # success/failure (only really meaningful for state-leave)
839 b'status': status,
839 b'status': status,
840 # whether the working copy parent is changing
840 # whether the working copy parent is changing
841 b'partial': self.partial,
841 b'partial': self.partial,
842 },
842 },
843 },
843 },
844 )
844 )
845 return True
845 return True
846 except Exception as e:
846 except Exception as e:
847 # Swallow any errors; fire and forget
847 # Swallow any errors; fire and forget
848 self.repo.ui.log(
848 self.repo.ui.log(
849 b'watchman', b'Exception %s while running %s\n', e, cmd
849 b'watchman', b'Exception %s while running %s\n', e, cmd
850 )
850 )
851 return False
851 return False
852
852
853
853
854 # Estimate the distance between two nodes
854 # Estimate the distance between two nodes
855 def calcdistance(repo, oldnode, newnode):
855 def calcdistance(repo, oldnode, newnode):
856 anc = repo.changelog.ancestor(oldnode, newnode)
856 anc = repo.changelog.ancestor(oldnode, newnode)
857 ancrev = repo[anc].rev()
857 ancrev = repo[anc].rev()
858 distance = abs(repo[oldnode].rev() - ancrev) + abs(
858 distance = abs(repo[oldnode].rev() - ancrev) + abs(
859 repo[newnode].rev() - ancrev
859 repo[newnode].rev() - ancrev
860 )
860 )
861 return distance
861 return distance
862
862
863
863
864 # Bracket working copy updates with calls to the watchman state-enter
864 # Bracket working copy updates with calls to the watchman state-enter
865 # and state-leave commands. This allows clients to perform more intelligent
865 # and state-leave commands. This allows clients to perform more intelligent
866 # settling during bulk file change scenarios
866 # settling during bulk file change scenarios
867 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
867 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
868 def wrapupdate(
868 def wrapupdate(
869 orig,
869 orig,
870 repo,
870 repo,
871 node,
871 node,
872 branchmerge,
872 branchmerge,
873 force,
873 force,
874 ancestor=None,
874 ancestor=None,
875 mergeancestor=False,
875 mergeancestor=False,
876 labels=None,
876 labels=None,
877 matcher=None,
877 matcher=None,
878 **kwargs
878 **kwargs
879 ):
879 ):
880
880
881 distance = 0
881 distance = 0
882 partial = True
882 partial = True
883 oldnode = repo[b'.'].node()
883 oldnode = repo[b'.'].node()
884 newnode = repo[node].node()
884 newnode = repo[node].node()
885 if matcher is None or matcher.always():
885 if matcher is None or matcher.always():
886 partial = False
886 partial = False
887 distance = calcdistance(repo.unfiltered(), oldnode, newnode)
887 distance = calcdistance(repo.unfiltered(), oldnode, newnode)
888
888
889 with state_update(
889 with state_update(
890 repo,
890 repo,
891 name=b"hg.update",
891 name=b"hg.update",
892 oldnode=oldnode,
892 oldnode=oldnode,
893 newnode=newnode,
893 newnode=newnode,
894 distance=distance,
894 distance=distance,
895 partial=partial,
895 partial=partial,
896 ):
896 ):
897 return orig(
897 return orig(
898 repo,
898 repo,
899 node,
899 node,
900 branchmerge,
900 branchmerge,
901 force,
901 force,
902 ancestor,
902 ancestor,
903 mergeancestor,
903 mergeancestor,
904 labels,
904 labels,
905 matcher,
905 matcher,
906 **kwargs
906 **kwargs
907 )
907 )
908
908
909
909
910 def repo_has_depth_one_nested_repo(repo):
910 def repo_has_depth_one_nested_repo(repo):
911 for f in repo.wvfs.listdir():
911 for f in repo.wvfs.listdir():
912 if os.path.isdir(os.path.join(repo.root, f, b'.hg')):
912 if os.path.isdir(os.path.join(repo.root, f, b'.hg')):
913 msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
913 msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
914 repo.ui.debug(msg % f)
914 repo.ui.debug(msg % f)
915 return True
915 return True
916 return False
916 return False
917
917
918
918
919 def reposetup(ui, repo):
919 def reposetup(ui, repo):
920 # We don't work with largefiles or inotify
920 # We don't work with largefiles or inotify
921 exts = extensions.enabled()
921 exts = extensions.enabled()
922 for ext in _blacklist:
922 for ext in _blacklist:
923 if ext in exts:
923 if ext in exts:
924 ui.warn(
924 ui.warn(
925 _(
925 _(
926 b'The fsmonitor extension is incompatible with the %s '
926 b'The fsmonitor extension is incompatible with the %s '
927 b'extension and has been disabled.\n'
927 b'extension and has been disabled.\n'
928 )
928 )
929 % ext
929 % ext
930 )
930 )
931 return
931 return
932
932
933 if repo.local():
933 if repo.local():
934 # We don't work with subrepos either.
934 # We don't work with subrepos either.
935 #
935 #
936 # if repo[None].substate can cause a dirstate parse, which is too
936 # if repo[None].substate can cause a dirstate parse, which is too
937 # slow. Instead, look for a file called hgsubstate,
937 # slow. Instead, look for a file called hgsubstate,
938 if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'):
938 if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'):
939 return
939 return
940
940
941 if repo_has_depth_one_nested_repo(repo):
941 if repo_has_depth_one_nested_repo(repo):
942 return
942 return
943
943
944 fsmonitorstate = state.state(repo)
944 fsmonitorstate = state.state(repo)
945 if fsmonitorstate.mode == b'off':
945 if fsmonitorstate.mode == b'off':
946 return
946 return
947
947
948 try:
948 try:
949 client = watchmanclient.client(repo.ui, repo.root)
949 client = watchmanclient.client(repo.ui, repo.root)
950 except Exception as ex:
950 except Exception as ex:
951 _handleunavailable(ui, fsmonitorstate, ex)
951 _handleunavailable(ui, fsmonitorstate, ex)
952 return
952 return
953
953
954 repo._fsmonitorstate = fsmonitorstate
954 repo._fsmonitorstate = fsmonitorstate
955 repo._watchmanclient = client
955 repo._watchmanclient = client
956
956
957 dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
957 dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
958 if cached:
958 if cached:
959 # at this point since fsmonitorstate wasn't present,
959 # at this point since fsmonitorstate wasn't present,
960 # repo.dirstate is not a fsmonitordirstate
960 # repo.dirstate is not a fsmonitordirstate
961 makedirstate(repo, dirstate)
961 makedirstate(repo, dirstate)
962
962
963 class fsmonitorrepo(repo.__class__):
963 class fsmonitorrepo(repo.__class__):
964 def status(self, *args, **kwargs):
964 def status(self, *args, **kwargs):
965 orig = super(fsmonitorrepo, self).status
965 orig = super(fsmonitorrepo, self).status
966 return overridestatus(orig, self, *args, **kwargs)
966 return overridestatus(orig, self, *args, **kwargs)
967
967
968 def wlocknostateupdate(self, *args, **kwargs):
968 def wlocknostateupdate(self, *args, **kwargs):
969 return super(fsmonitorrepo, self).wlock(*args, **kwargs)
969 return super(fsmonitorrepo, self).wlock(*args, **kwargs)
970
970
971 def wlock(self, *args, **kwargs):
971 def wlock(self, *args, **kwargs):
972 l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
972 l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
973 if not ui.configbool(
973 if not ui.configbool(
974 b"experimental", b"fsmonitor.transaction_notify"
974 b"experimental", b"fsmonitor.transaction_notify"
975 ):
975 ):
976 return l
976 return l
977 if l.held != 1:
977 if l.held != 1:
978 return l
978 return l
979 origrelease = l.releasefn
979 origrelease = l.releasefn
980
980
981 def staterelease():
981 def staterelease():
982 if origrelease:
982 if origrelease:
983 origrelease()
983 origrelease()
984 if l.stateupdate:
984 if l.stateupdate:
985 l.stateupdate.exit()
985 l.stateupdate.exit()
986 l.stateupdate = None
986 l.stateupdate = None
987
987
988 try:
988 try:
989 l.stateupdate = None
989 l.stateupdate = None
990 l.stateupdate = state_update(self, name=b"hg.transaction")
990 l.stateupdate = state_update(self, name=b"hg.transaction")
991 l.stateupdate.enter()
991 l.stateupdate.enter()
992 l.releasefn = staterelease
992 l.releasefn = staterelease
993 except Exception as e:
993 except Exception as e:
994 # Swallow any errors; fire and forget
994 # Swallow any errors; fire and forget
995 self.ui.log(
995 self.ui.log(
996 b'watchman', b'Exception in state update %s\n', e
996 b'watchman', b'Exception in state update %s\n', e
997 )
997 )
998 return l
998 return l
999
999
1000 repo.__class__ = fsmonitorrepo
1000 repo.__class__ = fsmonitorrepo
@@ -1,1306 +1,1261 b''
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
4 Copyright 2008 Olivia Mackall <olivia@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #define PY_SSIZE_T_CLEAN
10 #define PY_SSIZE_T_CLEAN
11 #include <Python.h>
11 #include <Python.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <stddef.h>
13 #include <stddef.h>
14 #include <string.h>
14 #include <string.h>
15
15
16 #include "bitmanipulation.h"
16 #include "bitmanipulation.h"
17 #include "charencode.h"
17 #include "charencode.h"
18 #include "util.h"
18 #include "util.h"
19
19
20 static const char *const versionerrortext = "Python minor version mismatch";
20 static const char *const versionerrortext = "Python minor version mismatch";
21
21
22 static const int dirstate_v1_from_p2 = -2;
22 static const int dirstate_v1_from_p2 = -2;
23 static const int dirstate_v1_nonnormal = -1;
23 static const int dirstate_v1_nonnormal = -1;
24 static const int ambiguous_time = -1;
24 static const int ambiguous_time = -1;
25
25
26 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
26 static PyObject *dict_new_presized(PyObject *self, PyObject *args)
27 {
27 {
28 Py_ssize_t expected_size;
28 Py_ssize_t expected_size;
29
29
30 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
30 if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
31 return NULL;
31 return NULL;
32 }
32 }
33
33
34 return _dict_new_presized(expected_size);
34 return _dict_new_presized(expected_size);
35 }
35 }
36
36
37 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
37 static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
38 PyObject *kwds)
38 PyObject *kwds)
39 {
39 {
40 /* We do all the initialization here and not a tp_init function because
40 /* We do all the initialization here and not a tp_init function because
41 * dirstate_item is immutable. */
41 * dirstate_item is immutable. */
42 dirstateItemObject *t;
42 dirstateItemObject *t;
43 int wc_tracked;
43 int wc_tracked;
44 int p1_tracked;
44 int p1_tracked;
45 int p2_info;
45 int p2_info;
46 int has_meaningful_data;
46 int has_meaningful_data;
47 int has_meaningful_mtime;
47 int has_meaningful_mtime;
48 int mtime_second_ambiguous;
48 int mtime_second_ambiguous;
49 int mode;
49 int mode;
50 int size;
50 int size;
51 int mtime_s;
51 int mtime_s;
52 int mtime_ns;
52 int mtime_ns;
53 PyObject *parentfiledata;
53 PyObject *parentfiledata;
54 PyObject *mtime;
54 PyObject *mtime;
55 PyObject *fallback_exec;
55 PyObject *fallback_exec;
56 PyObject *fallback_symlink;
56 PyObject *fallback_symlink;
57 static char *keywords_name[] = {
57 static char *keywords_name[] = {
58 "wc_tracked", "p1_tracked", "p2_info",
58 "wc_tracked", "p1_tracked", "p2_info",
59 "has_meaningful_data", "has_meaningful_mtime", "parentfiledata",
59 "has_meaningful_data", "has_meaningful_mtime", "parentfiledata",
60 "fallback_exec", "fallback_symlink", NULL,
60 "fallback_exec", "fallback_symlink", NULL,
61 };
61 };
62 wc_tracked = 0;
62 wc_tracked = 0;
63 p1_tracked = 0;
63 p1_tracked = 0;
64 p2_info = 0;
64 p2_info = 0;
65 has_meaningful_mtime = 1;
65 has_meaningful_mtime = 1;
66 has_meaningful_data = 1;
66 has_meaningful_data = 1;
67 mtime_second_ambiguous = 0;
67 mtime_second_ambiguous = 0;
68 parentfiledata = Py_None;
68 parentfiledata = Py_None;
69 fallback_exec = Py_None;
69 fallback_exec = Py_None;
70 fallback_symlink = Py_None;
70 fallback_symlink = Py_None;
71 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name,
71 if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iiiiiOOO", keywords_name,
72 &wc_tracked, &p1_tracked, &p2_info,
72 &wc_tracked, &p1_tracked, &p2_info,
73 &has_meaningful_data,
73 &has_meaningful_data,
74 &has_meaningful_mtime, &parentfiledata,
74 &has_meaningful_mtime, &parentfiledata,
75 &fallback_exec, &fallback_symlink)) {
75 &fallback_exec, &fallback_symlink)) {
76 return NULL;
76 return NULL;
77 }
77 }
78 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
78 t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
79 if (!t) {
79 if (!t) {
80 return NULL;
80 return NULL;
81 }
81 }
82
82
83 t->flags = 0;
83 t->flags = 0;
84 if (wc_tracked) {
84 if (wc_tracked) {
85 t->flags |= dirstate_flag_wc_tracked;
85 t->flags |= dirstate_flag_wc_tracked;
86 }
86 }
87 if (p1_tracked) {
87 if (p1_tracked) {
88 t->flags |= dirstate_flag_p1_tracked;
88 t->flags |= dirstate_flag_p1_tracked;
89 }
89 }
90 if (p2_info) {
90 if (p2_info) {
91 t->flags |= dirstate_flag_p2_info;
91 t->flags |= dirstate_flag_p2_info;
92 }
92 }
93
93
94 if (fallback_exec != Py_None) {
94 if (fallback_exec != Py_None) {
95 t->flags |= dirstate_flag_has_fallback_exec;
95 t->flags |= dirstate_flag_has_fallback_exec;
96 if (PyObject_IsTrue(fallback_exec)) {
96 if (PyObject_IsTrue(fallback_exec)) {
97 t->flags |= dirstate_flag_fallback_exec;
97 t->flags |= dirstate_flag_fallback_exec;
98 }
98 }
99 }
99 }
100 if (fallback_symlink != Py_None) {
100 if (fallback_symlink != Py_None) {
101 t->flags |= dirstate_flag_has_fallback_symlink;
101 t->flags |= dirstate_flag_has_fallback_symlink;
102 if (PyObject_IsTrue(fallback_symlink)) {
102 if (PyObject_IsTrue(fallback_symlink)) {
103 t->flags |= dirstate_flag_fallback_symlink;
103 t->flags |= dirstate_flag_fallback_symlink;
104 }
104 }
105 }
105 }
106
106
107 if (parentfiledata != Py_None) {
107 if (parentfiledata != Py_None) {
108 if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size,
108 if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size,
109 &mtime)) {
109 &mtime)) {
110 return NULL;
110 return NULL;
111 }
111 }
112 if (mtime != Py_None) {
112 if (mtime != Py_None) {
113 if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns,
113 if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns,
114 &mtime_second_ambiguous)) {
114 &mtime_second_ambiguous)) {
115 return NULL;
115 return NULL;
116 }
116 }
117 } else {
117 } else {
118 has_meaningful_mtime = 0;
118 has_meaningful_mtime = 0;
119 }
119 }
120 } else {
120 } else {
121 has_meaningful_data = 0;
121 has_meaningful_data = 0;
122 has_meaningful_mtime = 0;
122 has_meaningful_mtime = 0;
123 }
123 }
124 if (has_meaningful_data) {
124 if (has_meaningful_data) {
125 t->flags |= dirstate_flag_has_meaningful_data;
125 t->flags |= dirstate_flag_has_meaningful_data;
126 t->mode = mode;
126 t->mode = mode;
127 t->size = size;
127 t->size = size;
128 if (mtime_second_ambiguous) {
128 if (mtime_second_ambiguous) {
129 t->flags |= dirstate_flag_mtime_second_ambiguous;
129 t->flags |= dirstate_flag_mtime_second_ambiguous;
130 }
130 }
131 } else {
131 } else {
132 t->mode = 0;
132 t->mode = 0;
133 t->size = 0;
133 t->size = 0;
134 }
134 }
135 if (has_meaningful_mtime) {
135 if (has_meaningful_mtime) {
136 t->flags |= dirstate_flag_has_mtime;
136 t->flags |= dirstate_flag_has_mtime;
137 t->mtime_s = mtime_s;
137 t->mtime_s = mtime_s;
138 t->mtime_ns = mtime_ns;
138 t->mtime_ns = mtime_ns;
139 } else {
139 } else {
140 t->mtime_s = 0;
140 t->mtime_s = 0;
141 t->mtime_ns = 0;
141 t->mtime_ns = 0;
142 }
142 }
143 return (PyObject *)t;
143 return (PyObject *)t;
144 }
144 }
145
145
146 static void dirstate_item_dealloc(PyObject *o)
146 static void dirstate_item_dealloc(PyObject *o)
147 {
147 {
148 PyObject_Del(o);
148 PyObject_Del(o);
149 }
149 }
150
150
151 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
151 static inline bool dirstate_item_c_tracked(dirstateItemObject *self)
152 {
152 {
153 return (self->flags & dirstate_flag_wc_tracked);
153 return (self->flags & dirstate_flag_wc_tracked);
154 }
154 }
155
155
156 static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self)
156 static inline bool dirstate_item_c_any_tracked(dirstateItemObject *self)
157 {
157 {
158 const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
158 const int mask = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
159 dirstate_flag_p2_info;
159 dirstate_flag_p2_info;
160 return (self->flags & mask);
160 return (self->flags & mask);
161 }
161 }
162
162
163 static inline bool dirstate_item_c_added(dirstateItemObject *self)
163 static inline bool dirstate_item_c_added(dirstateItemObject *self)
164 {
164 {
165 const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
165 const int mask = (dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
166 dirstate_flag_p2_info);
166 dirstate_flag_p2_info);
167 const int target = dirstate_flag_wc_tracked;
167 const int target = dirstate_flag_wc_tracked;
168 return (self->flags & mask) == target;
168 return (self->flags & mask) == target;
169 }
169 }
170
170
171 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
171 static inline bool dirstate_item_c_removed(dirstateItemObject *self)
172 {
172 {
173 if (self->flags & dirstate_flag_wc_tracked) {
173 if (self->flags & dirstate_flag_wc_tracked) {
174 return false;
174 return false;
175 }
175 }
176 return (self->flags &
176 return (self->flags &
177 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
177 (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
178 }
178 }
179
179
180 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
180 static inline bool dirstate_item_c_merged(dirstateItemObject *self)
181 {
181 {
182 return ((self->flags & dirstate_flag_wc_tracked) &&
182 return ((self->flags & dirstate_flag_wc_tracked) &&
183 (self->flags & dirstate_flag_p1_tracked) &&
183 (self->flags & dirstate_flag_p1_tracked) &&
184 (self->flags & dirstate_flag_p2_info));
184 (self->flags & dirstate_flag_p2_info));
185 }
185 }
186
186
187 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
187 static inline bool dirstate_item_c_from_p2(dirstateItemObject *self)
188 {
188 {
189 return ((self->flags & dirstate_flag_wc_tracked) &&
189 return ((self->flags & dirstate_flag_wc_tracked) &&
190 !(self->flags & dirstate_flag_p1_tracked) &&
190 !(self->flags & dirstate_flag_p1_tracked) &&
191 (self->flags & dirstate_flag_p2_info));
191 (self->flags & dirstate_flag_p2_info));
192 }
192 }
193
193
194 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
194 static inline char dirstate_item_c_v1_state(dirstateItemObject *self)
195 {
195 {
196 if (dirstate_item_c_removed(self)) {
196 if (dirstate_item_c_removed(self)) {
197 return 'r';
197 return 'r';
198 } else if (dirstate_item_c_merged(self)) {
198 } else if (dirstate_item_c_merged(self)) {
199 return 'm';
199 return 'm';
200 } else if (dirstate_item_c_added(self)) {
200 } else if (dirstate_item_c_added(self)) {
201 return 'a';
201 return 'a';
202 } else {
202 } else {
203 return 'n';
203 return 'n';
204 }
204 }
205 }
205 }
206
206
207 static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self)
207 static inline bool dirstate_item_c_has_fallback_exec(dirstateItemObject *self)
208 {
208 {
209 return (bool)self->flags & dirstate_flag_has_fallback_exec;
209 return (bool)self->flags & dirstate_flag_has_fallback_exec;
210 }
210 }
211
211
212 static inline bool
212 static inline bool
213 dirstate_item_c_has_fallback_symlink(dirstateItemObject *self)
213 dirstate_item_c_has_fallback_symlink(dirstateItemObject *self)
214 {
214 {
215 return (bool)self->flags & dirstate_flag_has_fallback_symlink;
215 return (bool)self->flags & dirstate_flag_has_fallback_symlink;
216 }
216 }
217
217
218 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
218 static inline int dirstate_item_c_v1_mode(dirstateItemObject *self)
219 {
219 {
220 if (self->flags & dirstate_flag_has_meaningful_data) {
220 if (self->flags & dirstate_flag_has_meaningful_data) {
221 return self->mode;
221 return self->mode;
222 } else {
222 } else {
223 return 0;
223 return 0;
224 }
224 }
225 }
225 }
226
226
227 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
227 static inline int dirstate_item_c_v1_size(dirstateItemObject *self)
228 {
228 {
229 if (!(self->flags & dirstate_flag_wc_tracked) &&
229 if (!(self->flags & dirstate_flag_wc_tracked) &&
230 (self->flags & dirstate_flag_p2_info)) {
230 (self->flags & dirstate_flag_p2_info)) {
231 if (self->flags & dirstate_flag_p1_tracked) {
231 if (self->flags & dirstate_flag_p1_tracked) {
232 return dirstate_v1_nonnormal;
232 return dirstate_v1_nonnormal;
233 } else {
233 } else {
234 return dirstate_v1_from_p2;
234 return dirstate_v1_from_p2;
235 }
235 }
236 } else if (dirstate_item_c_removed(self)) {
236 } else if (dirstate_item_c_removed(self)) {
237 return 0;
237 return 0;
238 } else if (self->flags & dirstate_flag_p2_info) {
238 } else if (self->flags & dirstate_flag_p2_info) {
239 return dirstate_v1_from_p2;
239 return dirstate_v1_from_p2;
240 } else if (dirstate_item_c_added(self)) {
240 } else if (dirstate_item_c_added(self)) {
241 return dirstate_v1_nonnormal;
241 return dirstate_v1_nonnormal;
242 } else if (self->flags & dirstate_flag_has_meaningful_data) {
242 } else if (self->flags & dirstate_flag_has_meaningful_data) {
243 return self->size;
243 return self->size;
244 } else {
244 } else {
245 return dirstate_v1_nonnormal;
245 return dirstate_v1_nonnormal;
246 }
246 }
247 }
247 }
248
248
249 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
249 static inline int dirstate_item_c_v1_mtime(dirstateItemObject *self)
250 {
250 {
251 if (dirstate_item_c_removed(self)) {
251 if (dirstate_item_c_removed(self)) {
252 return 0;
252 return 0;
253 } else if (!(self->flags & dirstate_flag_has_mtime) ||
253 } else if (!(self->flags & dirstate_flag_has_mtime) ||
254 !(self->flags & dirstate_flag_p1_tracked) ||
254 !(self->flags & dirstate_flag_p1_tracked) ||
255 !(self->flags & dirstate_flag_wc_tracked) ||
255 !(self->flags & dirstate_flag_wc_tracked) ||
256 (self->flags & dirstate_flag_p2_info) ||
256 (self->flags & dirstate_flag_p2_info) ||
257 (self->flags & dirstate_flag_mtime_second_ambiguous)) {
257 (self->flags & dirstate_flag_mtime_second_ambiguous)) {
258 return ambiguous_time;
258 return ambiguous_time;
259 } else {
259 } else {
260 return self->mtime_s;
260 return self->mtime_s;
261 }
261 }
262 }
262 }
263
263
264 static PyObject *dirstate_item_v2_data(dirstateItemObject *self)
264 static PyObject *dirstate_item_v2_data(dirstateItemObject *self)
265 {
265 {
266 int flags = self->flags;
266 int flags = self->flags;
267 int mode = dirstate_item_c_v1_mode(self);
267 int mode = dirstate_item_c_v1_mode(self);
268 #ifdef S_IXUSR
268 #ifdef S_IXUSR
269 /* This is for platforms with an exec bit */
269 /* This is for platforms with an exec bit */
270 if ((mode & S_IXUSR) != 0) {
270 if ((mode & S_IXUSR) != 0) {
271 flags |= dirstate_flag_mode_exec_perm;
271 flags |= dirstate_flag_mode_exec_perm;
272 } else {
272 } else {
273 flags &= ~dirstate_flag_mode_exec_perm;
273 flags &= ~dirstate_flag_mode_exec_perm;
274 }
274 }
275 #else
275 #else
276 flags &= ~dirstate_flag_mode_exec_perm;
276 flags &= ~dirstate_flag_mode_exec_perm;
277 #endif
277 #endif
278 #ifdef S_ISLNK
278 #ifdef S_ISLNK
279 /* This is for platforms with support for symlinks */
279 /* This is for platforms with support for symlinks */
280 if (S_ISLNK(mode)) {
280 if (S_ISLNK(mode)) {
281 flags |= dirstate_flag_mode_is_symlink;
281 flags |= dirstate_flag_mode_is_symlink;
282 } else {
282 } else {
283 flags &= ~dirstate_flag_mode_is_symlink;
283 flags &= ~dirstate_flag_mode_is_symlink;
284 }
284 }
285 #else
285 #else
286 flags &= ~dirstate_flag_mode_is_symlink;
286 flags &= ~dirstate_flag_mode_is_symlink;
287 #endif
287 #endif
288 return Py_BuildValue("iiii", flags, self->size, self->mtime_s,
288 return Py_BuildValue("iiii", flags, self->size, self->mtime_s,
289 self->mtime_ns);
289 self->mtime_ns);
290 };
290 };
291
291
292 static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
293 {
294 char state = dirstate_item_c_v1_state(self);
295 return PyBytes_FromStringAndSize(&state, 1);
296 };
297
298 static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
299 {
300 return PyLong_FromLong(dirstate_item_c_v1_mode(self));
301 };
302
303 static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
304 {
305 return PyLong_FromLong(dirstate_item_c_v1_size(self));
306 };
307
308 static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
309 {
310 return PyLong_FromLong(dirstate_item_c_v1_mtime(self));
311 };
312
313 static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
292 static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self,
314 PyObject *other)
293 PyObject *other)
315 {
294 {
316 int other_s;
295 int other_s;
317 int other_ns;
296 int other_ns;
318 int other_second_ambiguous;
297 int other_second_ambiguous;
319 if (!PyArg_ParseTuple(other, "iii", &other_s, &other_ns,
298 if (!PyArg_ParseTuple(other, "iii", &other_s, &other_ns,
320 &other_second_ambiguous)) {
299 &other_second_ambiguous)) {
321 return NULL;
300 return NULL;
322 }
301 }
323 if (!(self->flags & dirstate_flag_has_mtime)) {
302 if (!(self->flags & dirstate_flag_has_mtime)) {
324 Py_RETURN_FALSE;
303 Py_RETURN_FALSE;
325 }
304 }
326 if (self->mtime_s != other_s) {
305 if (self->mtime_s != other_s) {
327 Py_RETURN_FALSE;
306 Py_RETURN_FALSE;
328 }
307 }
329 if (self->mtime_ns == 0 || other_ns == 0) {
308 if (self->mtime_ns == 0 || other_ns == 0) {
330 if (self->flags & dirstate_flag_mtime_second_ambiguous) {
309 if (self->flags & dirstate_flag_mtime_second_ambiguous) {
331 Py_RETURN_FALSE;
310 Py_RETURN_FALSE;
332 } else {
311 } else {
333 Py_RETURN_TRUE;
312 Py_RETURN_TRUE;
334 }
313 }
335 }
314 }
336 if (self->mtime_ns == other_ns) {
315 if (self->mtime_ns == other_ns) {
337 Py_RETURN_TRUE;
316 Py_RETURN_TRUE;
338 } else {
317 } else {
339 Py_RETURN_FALSE;
318 Py_RETURN_FALSE;
340 }
319 }
341 };
320 };
342
321
343 /* This will never change since it's bound to V1
322 /* This will never change since it's bound to V1
344 */
323 */
345 static inline dirstateItemObject *
324 static inline dirstateItemObject *
346 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
325 dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
347 {
326 {
348 dirstateItemObject *t =
327 dirstateItemObject *t =
349 PyObject_New(dirstateItemObject, &dirstateItemType);
328 PyObject_New(dirstateItemObject, &dirstateItemType);
350 if (!t) {
329 if (!t) {
351 return NULL;
330 return NULL;
352 }
331 }
353 t->flags = 0;
332 t->flags = 0;
354 t->mode = 0;
333 t->mode = 0;
355 t->size = 0;
334 t->size = 0;
356 t->mtime_s = 0;
335 t->mtime_s = 0;
357 t->mtime_ns = 0;
336 t->mtime_ns = 0;
358
337
359 if (state == 'm') {
338 if (state == 'm') {
360 t->flags = (dirstate_flag_wc_tracked |
339 t->flags = (dirstate_flag_wc_tracked |
361 dirstate_flag_p1_tracked | dirstate_flag_p2_info);
340 dirstate_flag_p1_tracked | dirstate_flag_p2_info);
362 } else if (state == 'a') {
341 } else if (state == 'a') {
363 t->flags = dirstate_flag_wc_tracked;
342 t->flags = dirstate_flag_wc_tracked;
364 } else if (state == 'r') {
343 } else if (state == 'r') {
365 if (size == dirstate_v1_nonnormal) {
344 if (size == dirstate_v1_nonnormal) {
366 t->flags =
345 t->flags =
367 dirstate_flag_p1_tracked | dirstate_flag_p2_info;
346 dirstate_flag_p1_tracked | dirstate_flag_p2_info;
368 } else if (size == dirstate_v1_from_p2) {
347 } else if (size == dirstate_v1_from_p2) {
369 t->flags = dirstate_flag_p2_info;
348 t->flags = dirstate_flag_p2_info;
370 } else {
349 } else {
371 t->flags = dirstate_flag_p1_tracked;
350 t->flags = dirstate_flag_p1_tracked;
372 }
351 }
373 } else if (state == 'n') {
352 } else if (state == 'n') {
374 if (size == dirstate_v1_from_p2) {
353 if (size == dirstate_v1_from_p2) {
375 t->flags =
354 t->flags =
376 dirstate_flag_wc_tracked | dirstate_flag_p2_info;
355 dirstate_flag_wc_tracked | dirstate_flag_p2_info;
377 } else if (size == dirstate_v1_nonnormal) {
356 } else if (size == dirstate_v1_nonnormal) {
378 t->flags =
357 t->flags =
379 dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
358 dirstate_flag_wc_tracked | dirstate_flag_p1_tracked;
380 } else if (mtime == ambiguous_time) {
359 } else if (mtime == ambiguous_time) {
381 t->flags = (dirstate_flag_wc_tracked |
360 t->flags = (dirstate_flag_wc_tracked |
382 dirstate_flag_p1_tracked |
361 dirstate_flag_p1_tracked |
383 dirstate_flag_has_meaningful_data);
362 dirstate_flag_has_meaningful_data);
384 t->mode = mode;
363 t->mode = mode;
385 t->size = size;
364 t->size = size;
386 } else {
365 } else {
387 t->flags = (dirstate_flag_wc_tracked |
366 t->flags = (dirstate_flag_wc_tracked |
388 dirstate_flag_p1_tracked |
367 dirstate_flag_p1_tracked |
389 dirstate_flag_has_meaningful_data |
368 dirstate_flag_has_meaningful_data |
390 dirstate_flag_has_mtime);
369 dirstate_flag_has_mtime);
391 t->mode = mode;
370 t->mode = mode;
392 t->size = size;
371 t->size = size;
393 t->mtime_s = mtime;
372 t->mtime_s = mtime;
394 }
373 }
395 } else {
374 } else {
396 PyErr_Format(PyExc_RuntimeError,
375 PyErr_Format(PyExc_RuntimeError,
397 "unknown state: `%c` (%d, %d, %d)", state, mode,
376 "unknown state: `%c` (%d, %d, %d)", state, mode,
398 size, mtime, NULL);
377 size, mtime, NULL);
399 Py_DECREF(t);
378 Py_DECREF(t);
400 return NULL;
379 return NULL;
401 }
380 }
402
381
403 return t;
382 return t;
404 }
383 }
405
384
406 /* This will never change since it's bound to V1, unlike `dirstate_item_new` */
407 static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
408 PyObject *args)
409 {
410 /* We do all the initialization here and not a tp_init function because
411 * dirstate_item is immutable. */
412 char state;
413 int size, mode, mtime;
414 if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
415 return NULL;
416 }
417 return (PyObject *)dirstate_item_from_v1_data(state, mode, size, mtime);
418 };
419
420 static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype,
385 static PyObject *dirstate_item_from_v2_meth(PyTypeObject *subtype,
421 PyObject *args)
386 PyObject *args)
422 {
387 {
423 dirstateItemObject *t =
388 dirstateItemObject *t =
424 PyObject_New(dirstateItemObject, &dirstateItemType);
389 PyObject_New(dirstateItemObject, &dirstateItemType);
425 if (!t) {
390 if (!t) {
426 return NULL;
391 return NULL;
427 }
392 }
428 if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s,
393 if (!PyArg_ParseTuple(args, "iiii", &t->flags, &t->size, &t->mtime_s,
429 &t->mtime_ns)) {
394 &t->mtime_ns)) {
430 return NULL;
395 return NULL;
431 }
396 }
432 if (t->flags & dirstate_flag_expected_state_is_modified) {
397 if (t->flags & dirstate_flag_expected_state_is_modified) {
433 t->flags &= ~(dirstate_flag_expected_state_is_modified |
398 t->flags &= ~(dirstate_flag_expected_state_is_modified |
434 dirstate_flag_has_meaningful_data |
399 dirstate_flag_has_meaningful_data |
435 dirstate_flag_has_mtime);
400 dirstate_flag_has_mtime);
436 }
401 }
437 t->mode = 0;
402 t->mode = 0;
438 if (t->flags & dirstate_flag_has_meaningful_data) {
403 if (t->flags & dirstate_flag_has_meaningful_data) {
439 if (t->flags & dirstate_flag_mode_exec_perm) {
404 if (t->flags & dirstate_flag_mode_exec_perm) {
440 t->mode = 0755;
405 t->mode = 0755;
441 } else {
406 } else {
442 t->mode = 0644;
407 t->mode = 0644;
443 }
408 }
444 if (t->flags & dirstate_flag_mode_is_symlink) {
409 if (t->flags & dirstate_flag_mode_is_symlink) {
445 t->mode |= S_IFLNK;
410 t->mode |= S_IFLNK;
446 } else {
411 } else {
447 t->mode |= S_IFREG;
412 t->mode |= S_IFREG;
448 }
413 }
449 }
414 }
450 return (PyObject *)t;
415 return (PyObject *)t;
451 };
416 };
452
417
453 /* This means the next status call will have to actually check its content
418 /* This means the next status call will have to actually check its content
454 to make sure it is correct. */
419 to make sure it is correct. */
455 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
420 static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
456 {
421 {
457 self->flags &= ~dirstate_flag_has_mtime;
422 self->flags &= ~dirstate_flag_has_mtime;
458 Py_RETURN_NONE;
423 Py_RETURN_NONE;
459 }
424 }
460
425
461 /* See docstring of the python implementation for details */
426 /* See docstring of the python implementation for details */
462 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
427 static PyObject *dirstate_item_set_clean(dirstateItemObject *self,
463 PyObject *args)
428 PyObject *args)
464 {
429 {
465 int size, mode, mtime_s, mtime_ns, mtime_second_ambiguous;
430 int size, mode, mtime_s, mtime_ns, mtime_second_ambiguous;
466 PyObject *mtime;
431 PyObject *mtime;
467 mtime_s = 0;
432 mtime_s = 0;
468 mtime_ns = 0;
433 mtime_ns = 0;
469 mtime_second_ambiguous = 0;
434 mtime_second_ambiguous = 0;
470 if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) {
435 if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) {
471 return NULL;
436 return NULL;
472 }
437 }
473 if (mtime != Py_None) {
438 if (mtime != Py_None) {
474 if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns,
439 if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns,
475 &mtime_second_ambiguous)) {
440 &mtime_second_ambiguous)) {
476 return NULL;
441 return NULL;
477 }
442 }
478 } else {
443 } else {
479 self->flags &= ~dirstate_flag_has_mtime;
444 self->flags &= ~dirstate_flag_has_mtime;
480 }
445 }
481 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
446 self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked |
482 dirstate_flag_has_meaningful_data |
447 dirstate_flag_has_meaningful_data |
483 dirstate_flag_has_mtime;
448 dirstate_flag_has_mtime;
484 if (mtime_second_ambiguous) {
449 if (mtime_second_ambiguous) {
485 self->flags |= dirstate_flag_mtime_second_ambiguous;
450 self->flags |= dirstate_flag_mtime_second_ambiguous;
486 }
451 }
487 self->mode = mode;
452 self->mode = mode;
488 self->size = size;
453 self->size = size;
489 self->mtime_s = mtime_s;
454 self->mtime_s = mtime_s;
490 self->mtime_ns = mtime_ns;
455 self->mtime_ns = mtime_ns;
491 Py_RETURN_NONE;
456 Py_RETURN_NONE;
492 }
457 }
493
458
494 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
459 static PyObject *dirstate_item_set_tracked(dirstateItemObject *self)
495 {
460 {
496 self->flags |= dirstate_flag_wc_tracked;
461 self->flags |= dirstate_flag_wc_tracked;
497 self->flags &= ~dirstate_flag_has_mtime;
462 self->flags &= ~dirstate_flag_has_mtime;
498 Py_RETURN_NONE;
463 Py_RETURN_NONE;
499 }
464 }
500
465
501 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
466 static PyObject *dirstate_item_set_untracked(dirstateItemObject *self)
502 {
467 {
503 self->flags &= ~dirstate_flag_wc_tracked;
468 self->flags &= ~dirstate_flag_wc_tracked;
504 self->flags &= ~dirstate_flag_has_meaningful_data;
469 self->flags &= ~dirstate_flag_has_meaningful_data;
505 self->flags &= ~dirstate_flag_has_mtime;
470 self->flags &= ~dirstate_flag_has_mtime;
506 self->mode = 0;
471 self->mode = 0;
507 self->size = 0;
472 self->size = 0;
508 self->mtime_s = 0;
473 self->mtime_s = 0;
509 self->mtime_ns = 0;
474 self->mtime_ns = 0;
510 Py_RETURN_NONE;
475 Py_RETURN_NONE;
511 }
476 }
512
477
513 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
478 static PyObject *dirstate_item_drop_merge_data(dirstateItemObject *self)
514 {
479 {
515 if (self->flags & dirstate_flag_p2_info) {
480 if (self->flags & dirstate_flag_p2_info) {
516 self->flags &= ~(dirstate_flag_p2_info |
481 self->flags &= ~(dirstate_flag_p2_info |
517 dirstate_flag_has_meaningful_data |
482 dirstate_flag_has_meaningful_data |
518 dirstate_flag_has_mtime);
483 dirstate_flag_has_mtime);
519 self->mode = 0;
484 self->mode = 0;
520 self->size = 0;
485 self->size = 0;
521 self->mtime_s = 0;
486 self->mtime_s = 0;
522 self->mtime_ns = 0;
487 self->mtime_ns = 0;
523 }
488 }
524 Py_RETURN_NONE;
489 Py_RETURN_NONE;
525 }
490 }
526 static PyMethodDef dirstate_item_methods[] = {
491 static PyMethodDef dirstate_item_methods[] = {
527 {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS,
492 {"v2_data", (PyCFunction)dirstate_item_v2_data, METH_NOARGS,
528 "return data suitable for v2 serialization"},
493 "return data suitable for v2 serialization"},
529 {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
530 "return a \"state\" suitable for v1 serialization"},
531 {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
532 "return a \"mode\" suitable for v1 serialization"},
533 {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
534 "return a \"size\" suitable for v1 serialization"},
535 {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
536 "return a \"mtime\" suitable for v1 serialization"},
537 {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
494 {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to,
538 METH_O, "True if the stored mtime is likely equal to the given mtime"},
495 METH_O, "True if the stored mtime is likely equal to the given mtime"},
539 {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth,
540 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V1 data"},
541 {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth,
496 {"from_v2_data", (PyCFunction)dirstate_item_from_v2_meth,
542 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"},
497 METH_VARARGS | METH_CLASS, "build a new DirstateItem object from V2 data"},
543 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
498 {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
544 METH_NOARGS, "mark a file as \"possibly dirty\""},
499 METH_NOARGS, "mark a file as \"possibly dirty\""},
545 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
500 {"set_clean", (PyCFunction)dirstate_item_set_clean, METH_VARARGS,
546 "mark a file as \"clean\""},
501 "mark a file as \"clean\""},
547 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
502 {"set_tracked", (PyCFunction)dirstate_item_set_tracked, METH_NOARGS,
548 "mark a file as \"tracked\""},
503 "mark a file as \"tracked\""},
549 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
504 {"set_untracked", (PyCFunction)dirstate_item_set_untracked, METH_NOARGS,
550 "mark a file as \"untracked\""},
505 "mark a file as \"untracked\""},
551 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
506 {"drop_merge_data", (PyCFunction)dirstate_item_drop_merge_data, METH_NOARGS,
552 "remove all \"merge-only\" from a DirstateItem"},
507 "remove all \"merge-only\" from a DirstateItem"},
553 {NULL} /* Sentinel */
508 {NULL} /* Sentinel */
554 };
509 };
555
510
556 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
511 static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
557 {
512 {
558 return PyLong_FromLong(dirstate_item_c_v1_mode(self));
513 return PyLong_FromLong(dirstate_item_c_v1_mode(self));
559 };
514 };
560
515
561 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
516 static PyObject *dirstate_item_get_size(dirstateItemObject *self)
562 {
517 {
563 return PyLong_FromLong(dirstate_item_c_v1_size(self));
518 return PyLong_FromLong(dirstate_item_c_v1_size(self));
564 };
519 };
565
520
566 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
521 static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
567 {
522 {
568 return PyLong_FromLong(dirstate_item_c_v1_mtime(self));
523 return PyLong_FromLong(dirstate_item_c_v1_mtime(self));
569 };
524 };
570
525
571 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
526 static PyObject *dirstate_item_get_state(dirstateItemObject *self)
572 {
527 {
573 char state = dirstate_item_c_v1_state(self);
528 char state = dirstate_item_c_v1_state(self);
574 return PyBytes_FromStringAndSize(&state, 1);
529 return PyBytes_FromStringAndSize(&state, 1);
575 };
530 };
576
531
577 static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self)
532 static PyObject *dirstate_item_get_has_fallback_exec(dirstateItemObject *self)
578 {
533 {
579 if (dirstate_item_c_has_fallback_exec(self)) {
534 if (dirstate_item_c_has_fallback_exec(self)) {
580 Py_RETURN_TRUE;
535 Py_RETURN_TRUE;
581 } else {
536 } else {
582 Py_RETURN_FALSE;
537 Py_RETURN_FALSE;
583 }
538 }
584 };
539 };
585
540
586 static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self)
541 static PyObject *dirstate_item_get_fallback_exec(dirstateItemObject *self)
587 {
542 {
588 if (dirstate_item_c_has_fallback_exec(self)) {
543 if (dirstate_item_c_has_fallback_exec(self)) {
589 if (self->flags & dirstate_flag_fallback_exec) {
544 if (self->flags & dirstate_flag_fallback_exec) {
590 Py_RETURN_TRUE;
545 Py_RETURN_TRUE;
591 } else {
546 } else {
592 Py_RETURN_FALSE;
547 Py_RETURN_FALSE;
593 }
548 }
594 } else {
549 } else {
595 Py_RETURN_NONE;
550 Py_RETURN_NONE;
596 }
551 }
597 };
552 };
598
553
599 static int dirstate_item_set_fallback_exec(dirstateItemObject *self,
554 static int dirstate_item_set_fallback_exec(dirstateItemObject *self,
600 PyObject *value)
555 PyObject *value)
601 {
556 {
602 if ((value == Py_None) || (value == NULL)) {
557 if ((value == Py_None) || (value == NULL)) {
603 self->flags &= ~dirstate_flag_has_fallback_exec;
558 self->flags &= ~dirstate_flag_has_fallback_exec;
604 } else {
559 } else {
605 self->flags |= dirstate_flag_has_fallback_exec;
560 self->flags |= dirstate_flag_has_fallback_exec;
606 if (PyObject_IsTrue(value)) {
561 if (PyObject_IsTrue(value)) {
607 self->flags |= dirstate_flag_fallback_exec;
562 self->flags |= dirstate_flag_fallback_exec;
608 } else {
563 } else {
609 self->flags &= ~dirstate_flag_fallback_exec;
564 self->flags &= ~dirstate_flag_fallback_exec;
610 }
565 }
611 }
566 }
612 return 0;
567 return 0;
613 };
568 };
614
569
615 static PyObject *
570 static PyObject *
616 dirstate_item_get_has_fallback_symlink(dirstateItemObject *self)
571 dirstate_item_get_has_fallback_symlink(dirstateItemObject *self)
617 {
572 {
618 if (dirstate_item_c_has_fallback_symlink(self)) {
573 if (dirstate_item_c_has_fallback_symlink(self)) {
619 Py_RETURN_TRUE;
574 Py_RETURN_TRUE;
620 } else {
575 } else {
621 Py_RETURN_FALSE;
576 Py_RETURN_FALSE;
622 }
577 }
623 };
578 };
624
579
625 static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self)
580 static PyObject *dirstate_item_get_fallback_symlink(dirstateItemObject *self)
626 {
581 {
627 if (dirstate_item_c_has_fallback_symlink(self)) {
582 if (dirstate_item_c_has_fallback_symlink(self)) {
628 if (self->flags & dirstate_flag_fallback_symlink) {
583 if (self->flags & dirstate_flag_fallback_symlink) {
629 Py_RETURN_TRUE;
584 Py_RETURN_TRUE;
630 } else {
585 } else {
631 Py_RETURN_FALSE;
586 Py_RETURN_FALSE;
632 }
587 }
633 } else {
588 } else {
634 Py_RETURN_NONE;
589 Py_RETURN_NONE;
635 }
590 }
636 };
591 };
637
592
638 static int dirstate_item_set_fallback_symlink(dirstateItemObject *self,
593 static int dirstate_item_set_fallback_symlink(dirstateItemObject *self,
639 PyObject *value)
594 PyObject *value)
640 {
595 {
641 if ((value == Py_None) || (value == NULL)) {
596 if ((value == Py_None) || (value == NULL)) {
642 self->flags &= ~dirstate_flag_has_fallback_symlink;
597 self->flags &= ~dirstate_flag_has_fallback_symlink;
643 } else {
598 } else {
644 self->flags |= dirstate_flag_has_fallback_symlink;
599 self->flags |= dirstate_flag_has_fallback_symlink;
645 if (PyObject_IsTrue(value)) {
600 if (PyObject_IsTrue(value)) {
646 self->flags |= dirstate_flag_fallback_symlink;
601 self->flags |= dirstate_flag_fallback_symlink;
647 } else {
602 } else {
648 self->flags &= ~dirstate_flag_fallback_symlink;
603 self->flags &= ~dirstate_flag_fallback_symlink;
649 }
604 }
650 }
605 }
651 return 0;
606 return 0;
652 };
607 };
653
608
654 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
609 static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
655 {
610 {
656 if (dirstate_item_c_tracked(self)) {
611 if (dirstate_item_c_tracked(self)) {
657 Py_RETURN_TRUE;
612 Py_RETURN_TRUE;
658 } else {
613 } else {
659 Py_RETURN_FALSE;
614 Py_RETURN_FALSE;
660 }
615 }
661 };
616 };
662 static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self)
617 static PyObject *dirstate_item_get_p1_tracked(dirstateItemObject *self)
663 {
618 {
664 if (self->flags & dirstate_flag_p1_tracked) {
619 if (self->flags & dirstate_flag_p1_tracked) {
665 Py_RETURN_TRUE;
620 Py_RETURN_TRUE;
666 } else {
621 } else {
667 Py_RETURN_FALSE;
622 Py_RETURN_FALSE;
668 }
623 }
669 };
624 };
670
625
671 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
626 static PyObject *dirstate_item_get_added(dirstateItemObject *self)
672 {
627 {
673 if (dirstate_item_c_added(self)) {
628 if (dirstate_item_c_added(self)) {
674 Py_RETURN_TRUE;
629 Py_RETURN_TRUE;
675 } else {
630 } else {
676 Py_RETURN_FALSE;
631 Py_RETURN_FALSE;
677 }
632 }
678 };
633 };
679
634
680 static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self)
635 static PyObject *dirstate_item_get_p2_info(dirstateItemObject *self)
681 {
636 {
682 if (self->flags & dirstate_flag_wc_tracked &&
637 if (self->flags & dirstate_flag_wc_tracked &&
683 self->flags & dirstate_flag_p2_info) {
638 self->flags & dirstate_flag_p2_info) {
684 Py_RETURN_TRUE;
639 Py_RETURN_TRUE;
685 } else {
640 } else {
686 Py_RETURN_FALSE;
641 Py_RETURN_FALSE;
687 }
642 }
688 };
643 };
689
644
690 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
645 static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
691 {
646 {
692 if (dirstate_item_c_merged(self)) {
647 if (dirstate_item_c_merged(self)) {
693 Py_RETURN_TRUE;
648 Py_RETURN_TRUE;
694 } else {
649 } else {
695 Py_RETURN_FALSE;
650 Py_RETURN_FALSE;
696 }
651 }
697 };
652 };
698
653
699 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
654 static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
700 {
655 {
701 if (dirstate_item_c_from_p2(self)) {
656 if (dirstate_item_c_from_p2(self)) {
702 Py_RETURN_TRUE;
657 Py_RETURN_TRUE;
703 } else {
658 } else {
704 Py_RETURN_FALSE;
659 Py_RETURN_FALSE;
705 }
660 }
706 };
661 };
707
662
708 static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self)
663 static PyObject *dirstate_item_get_maybe_clean(dirstateItemObject *self)
709 {
664 {
710 if (!(self->flags & dirstate_flag_wc_tracked)) {
665 if (!(self->flags & dirstate_flag_wc_tracked)) {
711 Py_RETURN_FALSE;
666 Py_RETURN_FALSE;
712 } else if (!(self->flags & dirstate_flag_p1_tracked)) {
667 } else if (!(self->flags & dirstate_flag_p1_tracked)) {
713 Py_RETURN_FALSE;
668 Py_RETURN_FALSE;
714 } else if (self->flags & dirstate_flag_p2_info) {
669 } else if (self->flags & dirstate_flag_p2_info) {
715 Py_RETURN_FALSE;
670 Py_RETURN_FALSE;
716 } else {
671 } else {
717 Py_RETURN_TRUE;
672 Py_RETURN_TRUE;
718 }
673 }
719 };
674 };
720
675
721 static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self)
676 static PyObject *dirstate_item_get_any_tracked(dirstateItemObject *self)
722 {
677 {
723 if (dirstate_item_c_any_tracked(self)) {
678 if (dirstate_item_c_any_tracked(self)) {
724 Py_RETURN_TRUE;
679 Py_RETURN_TRUE;
725 } else {
680 } else {
726 Py_RETURN_FALSE;
681 Py_RETURN_FALSE;
727 }
682 }
728 };
683 };
729
684
730 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
685 static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
731 {
686 {
732 if (dirstate_item_c_removed(self)) {
687 if (dirstate_item_c_removed(self)) {
733 Py_RETURN_TRUE;
688 Py_RETURN_TRUE;
734 } else {
689 } else {
735 Py_RETURN_FALSE;
690 Py_RETURN_FALSE;
736 }
691 }
737 };
692 };
738
693
739 static PyGetSetDef dirstate_item_getset[] = {
694 static PyGetSetDef dirstate_item_getset[] = {
740 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
695 {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
741 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
696 {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
742 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
697 {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
743 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
698 {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
744 {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL,
699 {"has_fallback_exec", (getter)dirstate_item_get_has_fallback_exec, NULL,
745 "has_fallback_exec", NULL},
700 "has_fallback_exec", NULL},
746 {"fallback_exec", (getter)dirstate_item_get_fallback_exec,
701 {"fallback_exec", (getter)dirstate_item_get_fallback_exec,
747 (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL},
702 (setter)dirstate_item_set_fallback_exec, "fallback_exec", NULL},
748 {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink,
703 {"has_fallback_symlink", (getter)dirstate_item_get_has_fallback_symlink,
749 NULL, "has_fallback_symlink", NULL},
704 NULL, "has_fallback_symlink", NULL},
750 {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink,
705 {"fallback_symlink", (getter)dirstate_item_get_fallback_symlink,
751 (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL},
706 (setter)dirstate_item_set_fallback_symlink, "fallback_symlink", NULL},
752 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
707 {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
753 {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked",
708 {"p1_tracked", (getter)dirstate_item_get_p1_tracked, NULL, "p1_tracked",
754 NULL},
709 NULL},
755 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
710 {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
756 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
711 {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
757 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
712 {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
758 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
713 {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
759 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
714 {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
760 NULL},
715 NULL},
761 {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked",
716 {"any_tracked", (getter)dirstate_item_get_any_tracked, NULL, "any_tracked",
762 NULL},
717 NULL},
763 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
718 {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
764 {NULL} /* Sentinel */
719 {NULL} /* Sentinel */
765 };
720 };
766
721
767 PyTypeObject dirstateItemType = {
722 PyTypeObject dirstateItemType = {
768 PyVarObject_HEAD_INIT(NULL, 0) /* header */
723 PyVarObject_HEAD_INIT(NULL, 0) /* header */
769 "dirstate_tuple", /* tp_name */
724 "dirstate_tuple", /* tp_name */
770 sizeof(dirstateItemObject), /* tp_basicsize */
725 sizeof(dirstateItemObject), /* tp_basicsize */
771 0, /* tp_itemsize */
726 0, /* tp_itemsize */
772 (destructor)dirstate_item_dealloc, /* tp_dealloc */
727 (destructor)dirstate_item_dealloc, /* tp_dealloc */
773 0, /* tp_print */
728 0, /* tp_print */
774 0, /* tp_getattr */
729 0, /* tp_getattr */
775 0, /* tp_setattr */
730 0, /* tp_setattr */
776 0, /* tp_compare */
731 0, /* tp_compare */
777 0, /* tp_repr */
732 0, /* tp_repr */
778 0, /* tp_as_number */
733 0, /* tp_as_number */
779 0, /* tp_as_sequence */
734 0, /* tp_as_sequence */
780 0, /* tp_as_mapping */
735 0, /* tp_as_mapping */
781 0, /* tp_hash */
736 0, /* tp_hash */
782 0, /* tp_call */
737 0, /* tp_call */
783 0, /* tp_str */
738 0, /* tp_str */
784 0, /* tp_getattro */
739 0, /* tp_getattro */
785 0, /* tp_setattro */
740 0, /* tp_setattro */
786 0, /* tp_as_buffer */
741 0, /* tp_as_buffer */
787 Py_TPFLAGS_DEFAULT, /* tp_flags */
742 Py_TPFLAGS_DEFAULT, /* tp_flags */
788 "dirstate tuple", /* tp_doc */
743 "dirstate tuple", /* tp_doc */
789 0, /* tp_traverse */
744 0, /* tp_traverse */
790 0, /* tp_clear */
745 0, /* tp_clear */
791 0, /* tp_richcompare */
746 0, /* tp_richcompare */
792 0, /* tp_weaklistoffset */
747 0, /* tp_weaklistoffset */
793 0, /* tp_iter */
748 0, /* tp_iter */
794 0, /* tp_iternext */
749 0, /* tp_iternext */
795 dirstate_item_methods, /* tp_methods */
750 dirstate_item_methods, /* tp_methods */
796 0, /* tp_members */
751 0, /* tp_members */
797 dirstate_item_getset, /* tp_getset */
752 dirstate_item_getset, /* tp_getset */
798 0, /* tp_base */
753 0, /* tp_base */
799 0, /* tp_dict */
754 0, /* tp_dict */
800 0, /* tp_descr_get */
755 0, /* tp_descr_get */
801 0, /* tp_descr_set */
756 0, /* tp_descr_set */
802 0, /* tp_dictoffset */
757 0, /* tp_dictoffset */
803 0, /* tp_init */
758 0, /* tp_init */
804 0, /* tp_alloc */
759 0, /* tp_alloc */
805 dirstate_item_new, /* tp_new */
760 dirstate_item_new, /* tp_new */
806 };
761 };
807
762
808 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
763 static PyObject *parse_dirstate(PyObject *self, PyObject *args)
809 {
764 {
810 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
765 PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
811 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
766 PyObject *fname = NULL, *cname = NULL, *entry = NULL;
812 char state, *cur, *str, *cpos;
767 char state, *cur, *str, *cpos;
813 int mode, size, mtime;
768 int mode, size, mtime;
814 unsigned int flen, pos = 40;
769 unsigned int flen, pos = 40;
815 Py_ssize_t len = 40;
770 Py_ssize_t len = 40;
816 Py_ssize_t readlen;
771 Py_ssize_t readlen;
817
772
818 if (!PyArg_ParseTuple(args, "O!O!y#:parse_dirstate", &PyDict_Type,
773 if (!PyArg_ParseTuple(args, "O!O!y#:parse_dirstate", &PyDict_Type,
819 &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
774 &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
820 goto quit;
775 goto quit;
821 }
776 }
822
777
823 len = readlen;
778 len = readlen;
824
779
825 /* read parents */
780 /* read parents */
826 if (len < 40) {
781 if (len < 40) {
827 PyErr_SetString(PyExc_ValueError,
782 PyErr_SetString(PyExc_ValueError,
828 "too little data for parents");
783 "too little data for parents");
829 goto quit;
784 goto quit;
830 }
785 }
831
786
832 parents = Py_BuildValue("y#y#", str, (Py_ssize_t)20, str + 20,
787 parents = Py_BuildValue("y#y#", str, (Py_ssize_t)20, str + 20,
833 (Py_ssize_t)20);
788 (Py_ssize_t)20);
834 if (!parents) {
789 if (!parents) {
835 goto quit;
790 goto quit;
836 }
791 }
837
792
838 /* read filenames */
793 /* read filenames */
839 while (pos >= 40 && pos < len) {
794 while (pos >= 40 && pos < len) {
840 if (pos + 17 > len) {
795 if (pos + 17 > len) {
841 PyErr_SetString(PyExc_ValueError,
796 PyErr_SetString(PyExc_ValueError,
842 "overflow in dirstate");
797 "overflow in dirstate");
843 goto quit;
798 goto quit;
844 }
799 }
845 cur = str + pos;
800 cur = str + pos;
846 /* unpack header */
801 /* unpack header */
847 state = *cur;
802 state = *cur;
848 mode = getbe32(cur + 1);
803 mode = getbe32(cur + 1);
849 size = getbe32(cur + 5);
804 size = getbe32(cur + 5);
850 mtime = getbe32(cur + 9);
805 mtime = getbe32(cur + 9);
851 flen = getbe32(cur + 13);
806 flen = getbe32(cur + 13);
852 pos += 17;
807 pos += 17;
853 cur += 17;
808 cur += 17;
854 if (flen > len - pos) {
809 if (flen > len - pos) {
855 PyErr_SetString(PyExc_ValueError,
810 PyErr_SetString(PyExc_ValueError,
856 "overflow in dirstate");
811 "overflow in dirstate");
857 goto quit;
812 goto quit;
858 }
813 }
859
814
860 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
815 entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
861 size, mtime);
816 size, mtime);
862 if (!entry)
817 if (!entry)
863 goto quit;
818 goto quit;
864 cpos = memchr(cur, 0, flen);
819 cpos = memchr(cur, 0, flen);
865 if (cpos) {
820 if (cpos) {
866 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
821 fname = PyBytes_FromStringAndSize(cur, cpos - cur);
867 cname = PyBytes_FromStringAndSize(
822 cname = PyBytes_FromStringAndSize(
868 cpos + 1, flen - (cpos - cur) - 1);
823 cpos + 1, flen - (cpos - cur) - 1);
869 if (!fname || !cname ||
824 if (!fname || !cname ||
870 PyDict_SetItem(cmap, fname, cname) == -1 ||
825 PyDict_SetItem(cmap, fname, cname) == -1 ||
871 PyDict_SetItem(dmap, fname, entry) == -1) {
826 PyDict_SetItem(dmap, fname, entry) == -1) {
872 goto quit;
827 goto quit;
873 }
828 }
874 Py_DECREF(cname);
829 Py_DECREF(cname);
875 } else {
830 } else {
876 fname = PyBytes_FromStringAndSize(cur, flen);
831 fname = PyBytes_FromStringAndSize(cur, flen);
877 if (!fname ||
832 if (!fname ||
878 PyDict_SetItem(dmap, fname, entry) == -1) {
833 PyDict_SetItem(dmap, fname, entry) == -1) {
879 goto quit;
834 goto quit;
880 }
835 }
881 }
836 }
882 Py_DECREF(fname);
837 Py_DECREF(fname);
883 Py_DECREF(entry);
838 Py_DECREF(entry);
884 fname = cname = entry = NULL;
839 fname = cname = entry = NULL;
885 pos += flen;
840 pos += flen;
886 }
841 }
887
842
888 ret = parents;
843 ret = parents;
889 Py_INCREF(ret);
844 Py_INCREF(ret);
890 quit:
845 quit:
891 Py_XDECREF(fname);
846 Py_XDECREF(fname);
892 Py_XDECREF(cname);
847 Py_XDECREF(cname);
893 Py_XDECREF(entry);
848 Py_XDECREF(entry);
894 Py_XDECREF(parents);
849 Py_XDECREF(parents);
895 return ret;
850 return ret;
896 }
851 }
897
852
898 /*
853 /*
899 * Efficiently pack a dirstate object into its on-disk format.
854 * Efficiently pack a dirstate object into its on-disk format.
900 */
855 */
901 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
856 static PyObject *pack_dirstate(PyObject *self, PyObject *args)
902 {
857 {
903 PyObject *packobj = NULL;
858 PyObject *packobj = NULL;
904 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
859 PyObject *map, *copymap, *pl, *mtime_unset = NULL;
905 Py_ssize_t nbytes, pos, l;
860 Py_ssize_t nbytes, pos, l;
906 PyObject *k, *v = NULL, *pn;
861 PyObject *k, *v = NULL, *pn;
907 char *p, *s;
862 char *p, *s;
908
863
909 if (!PyArg_ParseTuple(args, "O!O!O!:pack_dirstate", &PyDict_Type, &map,
864 if (!PyArg_ParseTuple(args, "O!O!O!:pack_dirstate", &PyDict_Type, &map,
910 &PyDict_Type, &copymap, &PyTuple_Type, &pl)) {
865 &PyDict_Type, &copymap, &PyTuple_Type, &pl)) {
911 return NULL;
866 return NULL;
912 }
867 }
913
868
914 if (PyTuple_Size(pl) != 2) {
869 if (PyTuple_Size(pl) != 2) {
915 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
870 PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
916 return NULL;
871 return NULL;
917 }
872 }
918
873
919 /* Figure out how much we need to allocate. */
874 /* Figure out how much we need to allocate. */
920 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
875 for (nbytes = 40, pos = 0; PyDict_Next(map, &pos, &k, &v);) {
921 PyObject *c;
876 PyObject *c;
922 if (!PyBytes_Check(k)) {
877 if (!PyBytes_Check(k)) {
923 PyErr_SetString(PyExc_TypeError, "expected string key");
878 PyErr_SetString(PyExc_TypeError, "expected string key");
924 goto bail;
879 goto bail;
925 }
880 }
926 nbytes += PyBytes_GET_SIZE(k) + 17;
881 nbytes += PyBytes_GET_SIZE(k) + 17;
927 c = PyDict_GetItem(copymap, k);
882 c = PyDict_GetItem(copymap, k);
928 if (c) {
883 if (c) {
929 if (!PyBytes_Check(c)) {
884 if (!PyBytes_Check(c)) {
930 PyErr_SetString(PyExc_TypeError,
885 PyErr_SetString(PyExc_TypeError,
931 "expected string key");
886 "expected string key");
932 goto bail;
887 goto bail;
933 }
888 }
934 nbytes += PyBytes_GET_SIZE(c) + 1;
889 nbytes += PyBytes_GET_SIZE(c) + 1;
935 }
890 }
936 }
891 }
937
892
938 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
893 packobj = PyBytes_FromStringAndSize(NULL, nbytes);
939 if (packobj == NULL) {
894 if (packobj == NULL) {
940 goto bail;
895 goto bail;
941 }
896 }
942
897
943 p = PyBytes_AS_STRING(packobj);
898 p = PyBytes_AS_STRING(packobj);
944
899
945 pn = PyTuple_GET_ITEM(pl, 0);
900 pn = PyTuple_GET_ITEM(pl, 0);
946 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
901 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
947 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
902 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
948 goto bail;
903 goto bail;
949 }
904 }
950 memcpy(p, s, l);
905 memcpy(p, s, l);
951 p += 20;
906 p += 20;
952 pn = PyTuple_GET_ITEM(pl, 1);
907 pn = PyTuple_GET_ITEM(pl, 1);
953 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
908 if (PyBytes_AsStringAndSize(pn, &s, &l) == -1 || l != 20) {
954 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
909 PyErr_SetString(PyExc_TypeError, "expected a 20-byte hash");
955 goto bail;
910 goto bail;
956 }
911 }
957 memcpy(p, s, l);
912 memcpy(p, s, l);
958 p += 20;
913 p += 20;
959
914
960 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
915 for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
961 dirstateItemObject *tuple;
916 dirstateItemObject *tuple;
962 char state;
917 char state;
963 int mode, size, mtime;
918 int mode, size, mtime;
964 Py_ssize_t len, l;
919 Py_ssize_t len, l;
965 PyObject *o;
920 PyObject *o;
966 char *t;
921 char *t;
967
922
968 if (!dirstate_tuple_check(v)) {
923 if (!dirstate_tuple_check(v)) {
969 PyErr_SetString(PyExc_TypeError,
924 PyErr_SetString(PyExc_TypeError,
970 "expected a dirstate tuple");
925 "expected a dirstate tuple");
971 goto bail;
926 goto bail;
972 }
927 }
973 tuple = (dirstateItemObject *)v;
928 tuple = (dirstateItemObject *)v;
974
929
975 state = dirstate_item_c_v1_state(tuple);
930 state = dirstate_item_c_v1_state(tuple);
976 mode = dirstate_item_c_v1_mode(tuple);
931 mode = dirstate_item_c_v1_mode(tuple);
977 size = dirstate_item_c_v1_size(tuple);
932 size = dirstate_item_c_v1_size(tuple);
978 mtime = dirstate_item_c_v1_mtime(tuple);
933 mtime = dirstate_item_c_v1_mtime(tuple);
979 *p++ = state;
934 *p++ = state;
980 putbe32((uint32_t)mode, p);
935 putbe32((uint32_t)mode, p);
981 putbe32((uint32_t)size, p + 4);
936 putbe32((uint32_t)size, p + 4);
982 putbe32((uint32_t)mtime, p + 8);
937 putbe32((uint32_t)mtime, p + 8);
983 t = p + 12;
938 t = p + 12;
984 p += 16;
939 p += 16;
985 len = PyBytes_GET_SIZE(k);
940 len = PyBytes_GET_SIZE(k);
986 memcpy(p, PyBytes_AS_STRING(k), len);
941 memcpy(p, PyBytes_AS_STRING(k), len);
987 p += len;
942 p += len;
988 o = PyDict_GetItem(copymap, k);
943 o = PyDict_GetItem(copymap, k);
989 if (o) {
944 if (o) {
990 *p++ = '\0';
945 *p++ = '\0';
991 l = PyBytes_GET_SIZE(o);
946 l = PyBytes_GET_SIZE(o);
992 memcpy(p, PyBytes_AS_STRING(o), l);
947 memcpy(p, PyBytes_AS_STRING(o), l);
993 p += l;
948 p += l;
994 len += l + 1;
949 len += l + 1;
995 }
950 }
996 putbe32((uint32_t)len, t);
951 putbe32((uint32_t)len, t);
997 }
952 }
998
953
999 pos = p - PyBytes_AS_STRING(packobj);
954 pos = p - PyBytes_AS_STRING(packobj);
1000 if (pos != nbytes) {
955 if (pos != nbytes) {
1001 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
956 PyErr_Format(PyExc_SystemError, "bad dirstate size: %ld != %ld",
1002 (long)pos, (long)nbytes);
957 (long)pos, (long)nbytes);
1003 goto bail;
958 goto bail;
1004 }
959 }
1005
960
1006 return packobj;
961 return packobj;
1007 bail:
962 bail:
1008 Py_XDECREF(mtime_unset);
963 Py_XDECREF(mtime_unset);
1009 Py_XDECREF(packobj);
964 Py_XDECREF(packobj);
1010 Py_XDECREF(v);
965 Py_XDECREF(v);
1011 return NULL;
966 return NULL;
1012 }
967 }
1013
968
1014 #define BUMPED_FIX 1
969 #define BUMPED_FIX 1
1015 #define USING_SHA_256 2
970 #define USING_SHA_256 2
1016 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
971 #define FM1_HEADER_SIZE (4 + 8 + 2 + 2 + 1 + 1 + 1)
1017
972
1018 static PyObject *readshas(const char *source, unsigned char num,
973 static PyObject *readshas(const char *source, unsigned char num,
1019 Py_ssize_t hashwidth)
974 Py_ssize_t hashwidth)
1020 {
975 {
1021 int i;
976 int i;
1022 PyObject *list = PyTuple_New(num);
977 PyObject *list = PyTuple_New(num);
1023 if (list == NULL) {
978 if (list == NULL) {
1024 return NULL;
979 return NULL;
1025 }
980 }
1026 for (i = 0; i < num; i++) {
981 for (i = 0; i < num; i++) {
1027 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
982 PyObject *hash = PyBytes_FromStringAndSize(source, hashwidth);
1028 if (hash == NULL) {
983 if (hash == NULL) {
1029 Py_DECREF(list);
984 Py_DECREF(list);
1030 return NULL;
985 return NULL;
1031 }
986 }
1032 PyTuple_SET_ITEM(list, i, hash);
987 PyTuple_SET_ITEM(list, i, hash);
1033 source += hashwidth;
988 source += hashwidth;
1034 }
989 }
1035 return list;
990 return list;
1036 }
991 }
1037
992
1038 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
993 static PyObject *fm1readmarker(const char *databegin, const char *dataend,
1039 uint32_t *msize)
994 uint32_t *msize)
1040 {
995 {
1041 const char *data = databegin;
996 const char *data = databegin;
1042 const char *meta;
997 const char *meta;
1043
998
1044 double mtime;
999 double mtime;
1045 int16_t tz;
1000 int16_t tz;
1046 uint16_t flags;
1001 uint16_t flags;
1047 unsigned char nsuccs, nparents, nmetadata;
1002 unsigned char nsuccs, nparents, nmetadata;
1048 Py_ssize_t hashwidth = 20;
1003 Py_ssize_t hashwidth = 20;
1049
1004
1050 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
1005 PyObject *prec = NULL, *parents = NULL, *succs = NULL;
1051 PyObject *metadata = NULL, *ret = NULL;
1006 PyObject *metadata = NULL, *ret = NULL;
1052 int i;
1007 int i;
1053
1008
1054 if (data + FM1_HEADER_SIZE > dataend) {
1009 if (data + FM1_HEADER_SIZE > dataend) {
1055 goto overflow;
1010 goto overflow;
1056 }
1011 }
1057
1012
1058 *msize = getbe32(data);
1013 *msize = getbe32(data);
1059 data += 4;
1014 data += 4;
1060 mtime = getbefloat64(data);
1015 mtime = getbefloat64(data);
1061 data += 8;
1016 data += 8;
1062 tz = getbeint16(data);
1017 tz = getbeint16(data);
1063 data += 2;
1018 data += 2;
1064 flags = getbeuint16(data);
1019 flags = getbeuint16(data);
1065 data += 2;
1020 data += 2;
1066
1021
1067 if (flags & USING_SHA_256) {
1022 if (flags & USING_SHA_256) {
1068 hashwidth = 32;
1023 hashwidth = 32;
1069 }
1024 }
1070
1025
1071 nsuccs = (unsigned char)(*data++);
1026 nsuccs = (unsigned char)(*data++);
1072 nparents = (unsigned char)(*data++);
1027 nparents = (unsigned char)(*data++);
1073 nmetadata = (unsigned char)(*data++);
1028 nmetadata = (unsigned char)(*data++);
1074
1029
1075 if (databegin + *msize > dataend) {
1030 if (databegin + *msize > dataend) {
1076 goto overflow;
1031 goto overflow;
1077 }
1032 }
1078 dataend = databegin + *msize; /* narrow down to marker size */
1033 dataend = databegin + *msize; /* narrow down to marker size */
1079
1034
1080 if (data + hashwidth > dataend) {
1035 if (data + hashwidth > dataend) {
1081 goto overflow;
1036 goto overflow;
1082 }
1037 }
1083 prec = PyBytes_FromStringAndSize(data, hashwidth);
1038 prec = PyBytes_FromStringAndSize(data, hashwidth);
1084 data += hashwidth;
1039 data += hashwidth;
1085 if (prec == NULL) {
1040 if (prec == NULL) {
1086 goto bail;
1041 goto bail;
1087 }
1042 }
1088
1043
1089 if (data + nsuccs * hashwidth > dataend) {
1044 if (data + nsuccs * hashwidth > dataend) {
1090 goto overflow;
1045 goto overflow;
1091 }
1046 }
1092 succs = readshas(data, nsuccs, hashwidth);
1047 succs = readshas(data, nsuccs, hashwidth);
1093 if (succs == NULL) {
1048 if (succs == NULL) {
1094 goto bail;
1049 goto bail;
1095 }
1050 }
1096 data += nsuccs * hashwidth;
1051 data += nsuccs * hashwidth;
1097
1052
1098 if (nparents == 1 || nparents == 2) {
1053 if (nparents == 1 || nparents == 2) {
1099 if (data + nparents * hashwidth > dataend) {
1054 if (data + nparents * hashwidth > dataend) {
1100 goto overflow;
1055 goto overflow;
1101 }
1056 }
1102 parents = readshas(data, nparents, hashwidth);
1057 parents = readshas(data, nparents, hashwidth);
1103 if (parents == NULL) {
1058 if (parents == NULL) {
1104 goto bail;
1059 goto bail;
1105 }
1060 }
1106 data += nparents * hashwidth;
1061 data += nparents * hashwidth;
1107 } else {
1062 } else {
1108 parents = Py_None;
1063 parents = Py_None;
1109 Py_INCREF(parents);
1064 Py_INCREF(parents);
1110 }
1065 }
1111
1066
1112 if (data + 2 * nmetadata > dataend) {
1067 if (data + 2 * nmetadata > dataend) {
1113 goto overflow;
1068 goto overflow;
1114 }
1069 }
1115 meta = data + (2 * nmetadata);
1070 meta = data + (2 * nmetadata);
1116 metadata = PyTuple_New(nmetadata);
1071 metadata = PyTuple_New(nmetadata);
1117 if (metadata == NULL) {
1072 if (metadata == NULL) {
1118 goto bail;
1073 goto bail;
1119 }
1074 }
1120 for (i = 0; i < nmetadata; i++) {
1075 for (i = 0; i < nmetadata; i++) {
1121 PyObject *tmp, *left = NULL, *right = NULL;
1076 PyObject *tmp, *left = NULL, *right = NULL;
1122 Py_ssize_t leftsize = (unsigned char)(*data++);
1077 Py_ssize_t leftsize = (unsigned char)(*data++);
1123 Py_ssize_t rightsize = (unsigned char)(*data++);
1078 Py_ssize_t rightsize = (unsigned char)(*data++);
1124 if (meta + leftsize + rightsize > dataend) {
1079 if (meta + leftsize + rightsize > dataend) {
1125 goto overflow;
1080 goto overflow;
1126 }
1081 }
1127 left = PyBytes_FromStringAndSize(meta, leftsize);
1082 left = PyBytes_FromStringAndSize(meta, leftsize);
1128 meta += leftsize;
1083 meta += leftsize;
1129 right = PyBytes_FromStringAndSize(meta, rightsize);
1084 right = PyBytes_FromStringAndSize(meta, rightsize);
1130 meta += rightsize;
1085 meta += rightsize;
1131 tmp = PyTuple_New(2);
1086 tmp = PyTuple_New(2);
1132 if (!left || !right || !tmp) {
1087 if (!left || !right || !tmp) {
1133 Py_XDECREF(left);
1088 Py_XDECREF(left);
1134 Py_XDECREF(right);
1089 Py_XDECREF(right);
1135 Py_XDECREF(tmp);
1090 Py_XDECREF(tmp);
1136 goto bail;
1091 goto bail;
1137 }
1092 }
1138 PyTuple_SET_ITEM(tmp, 0, left);
1093 PyTuple_SET_ITEM(tmp, 0, left);
1139 PyTuple_SET_ITEM(tmp, 1, right);
1094 PyTuple_SET_ITEM(tmp, 1, right);
1140 PyTuple_SET_ITEM(metadata, i, tmp);
1095 PyTuple_SET_ITEM(metadata, i, tmp);
1141 }
1096 }
1142 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1097 ret = Py_BuildValue("(OOHO(di)O)", prec, succs, flags, metadata, mtime,
1143 (int)tz * 60, parents);
1098 (int)tz * 60, parents);
1144 goto bail; /* return successfully */
1099 goto bail; /* return successfully */
1145
1100
1146 overflow:
1101 overflow:
1147 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1102 PyErr_SetString(PyExc_ValueError, "overflow in obsstore");
1148 bail:
1103 bail:
1149 Py_XDECREF(prec);
1104 Py_XDECREF(prec);
1150 Py_XDECREF(succs);
1105 Py_XDECREF(succs);
1151 Py_XDECREF(metadata);
1106 Py_XDECREF(metadata);
1152 Py_XDECREF(parents);
1107 Py_XDECREF(parents);
1153 return ret;
1108 return ret;
1154 }
1109 }
1155
1110
1156 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1111 static PyObject *fm1readmarkers(PyObject *self, PyObject *args)
1157 {
1112 {
1158 const char *data, *dataend;
1113 const char *data, *dataend;
1159 Py_ssize_t datalen, offset, stop;
1114 Py_ssize_t datalen, offset, stop;
1160 PyObject *markers = NULL;
1115 PyObject *markers = NULL;
1161
1116
1162 if (!PyArg_ParseTuple(args, "y#nn", &data, &datalen, &offset, &stop)) {
1117 if (!PyArg_ParseTuple(args, "y#nn", &data, &datalen, &offset, &stop)) {
1163 return NULL;
1118 return NULL;
1164 }
1119 }
1165 if (offset < 0) {
1120 if (offset < 0) {
1166 PyErr_SetString(PyExc_ValueError,
1121 PyErr_SetString(PyExc_ValueError,
1167 "invalid negative offset in fm1readmarkers");
1122 "invalid negative offset in fm1readmarkers");
1168 return NULL;
1123 return NULL;
1169 }
1124 }
1170 if (stop > datalen) {
1125 if (stop > datalen) {
1171 PyErr_SetString(
1126 PyErr_SetString(
1172 PyExc_ValueError,
1127 PyExc_ValueError,
1173 "stop longer than data length in fm1readmarkers");
1128 "stop longer than data length in fm1readmarkers");
1174 return NULL;
1129 return NULL;
1175 }
1130 }
1176 dataend = data + datalen;
1131 dataend = data + datalen;
1177 data += offset;
1132 data += offset;
1178 markers = PyList_New(0);
1133 markers = PyList_New(0);
1179 if (!markers) {
1134 if (!markers) {
1180 return NULL;
1135 return NULL;
1181 }
1136 }
1182 while (offset < stop) {
1137 while (offset < stop) {
1183 uint32_t msize;
1138 uint32_t msize;
1184 int error;
1139 int error;
1185 PyObject *record = fm1readmarker(data, dataend, &msize);
1140 PyObject *record = fm1readmarker(data, dataend, &msize);
1186 if (!record) {
1141 if (!record) {
1187 goto bail;
1142 goto bail;
1188 }
1143 }
1189 error = PyList_Append(markers, record);
1144 error = PyList_Append(markers, record);
1190 Py_DECREF(record);
1145 Py_DECREF(record);
1191 if (error) {
1146 if (error) {
1192 goto bail;
1147 goto bail;
1193 }
1148 }
1194 data += msize;
1149 data += msize;
1195 offset += msize;
1150 offset += msize;
1196 }
1151 }
1197 return markers;
1152 return markers;
1198 bail:
1153 bail:
1199 Py_DECREF(markers);
1154 Py_DECREF(markers);
1200 return NULL;
1155 return NULL;
1201 }
1156 }
1202
1157
1203 static char parsers_doc[] = "Efficient content parsing.";
1158 static char parsers_doc[] = "Efficient content parsing.";
1204
1159
1205 PyObject *encodedir(PyObject *self, PyObject *args);
1160 PyObject *encodedir(PyObject *self, PyObject *args);
1206 PyObject *pathencode(PyObject *self, PyObject *args);
1161 PyObject *pathencode(PyObject *self, PyObject *args);
1207 PyObject *lowerencode(PyObject *self, PyObject *args);
1162 PyObject *lowerencode(PyObject *self, PyObject *args);
1208 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1163 PyObject *parse_index2(PyObject *self, PyObject *args, PyObject *kwargs);
1209
1164
1210 static PyMethodDef methods[] = {
1165 static PyMethodDef methods[] = {
1211 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1166 {"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
1212 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1167 {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
1213 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1168 {"parse_index2", (PyCFunction)parse_index2, METH_VARARGS | METH_KEYWORDS,
1214 "parse a revlog index\n"},
1169 "parse a revlog index\n"},
1215 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1170 {"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
1216 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1171 {"asciilower", asciilower, METH_VARARGS, "lowercase an ASCII string\n"},
1217 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1172 {"asciiupper", asciiupper, METH_VARARGS, "uppercase an ASCII string\n"},
1218 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1173 {"dict_new_presized", dict_new_presized, METH_VARARGS,
1219 "construct a dict with an expected size\n"},
1174 "construct a dict with an expected size\n"},
1220 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1175 {"make_file_foldmap", make_file_foldmap, METH_VARARGS,
1221 "make file foldmap\n"},
1176 "make file foldmap\n"},
1222 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1177 {"jsonescapeu8fast", jsonescapeu8fast, METH_VARARGS,
1223 "escape a UTF-8 byte string to JSON (fast path)\n"},
1178 "escape a UTF-8 byte string to JSON (fast path)\n"},
1224 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1179 {"encodedir", encodedir, METH_VARARGS, "encodedir a path\n"},
1225 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1180 {"pathencode", pathencode, METH_VARARGS, "fncache-encode a path\n"},
1226 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1181 {"lowerencode", lowerencode, METH_VARARGS, "lower-encode a path\n"},
1227 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1182 {"fm1readmarkers", fm1readmarkers, METH_VARARGS,
1228 "parse v1 obsolete markers\n"},
1183 "parse v1 obsolete markers\n"},
1229 {NULL, NULL}};
1184 {NULL, NULL}};
1230
1185
1231 void dirs_module_init(PyObject *mod);
1186 void dirs_module_init(PyObject *mod);
1232 void manifest_module_init(PyObject *mod);
1187 void manifest_module_init(PyObject *mod);
1233 void revlog_module_init(PyObject *mod);
1188 void revlog_module_init(PyObject *mod);
1234
1189
1235 static const int version = 20;
1190 static const int version = 20;
1236
1191
1237 static void module_init(PyObject *mod)
1192 static void module_init(PyObject *mod)
1238 {
1193 {
1239 PyModule_AddIntConstant(mod, "version", version);
1194 PyModule_AddIntConstant(mod, "version", version);
1240
1195
1241 /* This module constant has two purposes. First, it lets us unit test
1196 /* This module constant has two purposes. First, it lets us unit test
1242 * the ImportError raised without hard-coding any error text. This
1197 * the ImportError raised without hard-coding any error text. This
1243 * means we can change the text in the future without breaking tests,
1198 * means we can change the text in the future without breaking tests,
1244 * even across changesets without a recompile. Second, its presence
1199 * even across changesets without a recompile. Second, its presence
1245 * can be used to determine whether the version-checking logic is
1200 * can be used to determine whether the version-checking logic is
1246 * present, which also helps in testing across changesets without a
1201 * present, which also helps in testing across changesets without a
1247 * recompile. Note that this means the pure-Python version of parsers
1202 * recompile. Note that this means the pure-Python version of parsers
1248 * should not have this module constant. */
1203 * should not have this module constant. */
1249 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1204 PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext);
1250
1205
1251 dirs_module_init(mod);
1206 dirs_module_init(mod);
1252 manifest_module_init(mod);
1207 manifest_module_init(mod);
1253 revlog_module_init(mod);
1208 revlog_module_init(mod);
1254
1209
1255 if (PyType_Ready(&dirstateItemType) < 0) {
1210 if (PyType_Ready(&dirstateItemType) < 0) {
1256 return;
1211 return;
1257 }
1212 }
1258 Py_INCREF(&dirstateItemType);
1213 Py_INCREF(&dirstateItemType);
1259 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1214 PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
1260 }
1215 }
1261
1216
1262 static int check_python_version(void)
1217 static int check_python_version(void)
1263 {
1218 {
1264 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1219 PyObject *sys = PyImport_ImportModule("sys"), *ver;
1265 long hexversion;
1220 long hexversion;
1266 if (!sys) {
1221 if (!sys) {
1267 return -1;
1222 return -1;
1268 }
1223 }
1269 ver = PyObject_GetAttrString(sys, "hexversion");
1224 ver = PyObject_GetAttrString(sys, "hexversion");
1270 Py_DECREF(sys);
1225 Py_DECREF(sys);
1271 if (!ver) {
1226 if (!ver) {
1272 return -1;
1227 return -1;
1273 }
1228 }
1274 hexversion = PyLong_AsLong(ver);
1229 hexversion = PyLong_AsLong(ver);
1275 Py_DECREF(ver);
1230 Py_DECREF(ver);
1276 /* sys.hexversion is a 32-bit number by default, so the -1 case
1231 /* sys.hexversion is a 32-bit number by default, so the -1 case
1277 * should only occur in unusual circumstances (e.g. if sys.hexversion
1232 * should only occur in unusual circumstances (e.g. if sys.hexversion
1278 * is manually set to an invalid value). */
1233 * is manually set to an invalid value). */
1279 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1234 if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) {
1280 PyErr_Format(PyExc_ImportError,
1235 PyErr_Format(PyExc_ImportError,
1281 "%s: The Mercurial extension "
1236 "%s: The Mercurial extension "
1282 "modules were compiled with Python " PY_VERSION
1237 "modules were compiled with Python " PY_VERSION
1283 ", but "
1238 ", but "
1284 "Mercurial is currently using Python with "
1239 "Mercurial is currently using Python with "
1285 "sys.hexversion=%ld: "
1240 "sys.hexversion=%ld: "
1286 "Python %s\n at: %s",
1241 "Python %s\n at: %s",
1287 versionerrortext, hexversion, Py_GetVersion(),
1242 versionerrortext, hexversion, Py_GetVersion(),
1288 Py_GetProgramFullPath());
1243 Py_GetProgramFullPath());
1289 return -1;
1244 return -1;
1290 }
1245 }
1291 return 0;
1246 return 0;
1292 }
1247 }
1293
1248
1294 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1249 static struct PyModuleDef parsers_module = {PyModuleDef_HEAD_INIT, "parsers",
1295 parsers_doc, -1, methods};
1250 parsers_doc, -1, methods};
1296
1251
1297 PyMODINIT_FUNC PyInit_parsers(void)
1252 PyMODINIT_FUNC PyInit_parsers(void)
1298 {
1253 {
1299 PyObject *mod;
1254 PyObject *mod;
1300
1255
1301 if (check_python_version() == -1)
1256 if (check_python_version() == -1)
1302 return NULL;
1257 return NULL;
1303 mod = PyModule_Create(&parsers_module);
1258 mod = PyModule_Create(&parsers_module);
1304 module_init(mod);
1259 module_init(mod);
1305 return mod;
1260 return mod;
1306 }
1261 }
@@ -1,974 +1,974 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import io
9 import io
10 import stat
10 import stat
11 import struct
11 import struct
12 import zlib
12 import zlib
13
13
14 from ..node import (
14 from ..node import (
15 nullrev,
15 nullrev,
16 sha1nodeconstants,
16 sha1nodeconstants,
17 )
17 )
18 from ..thirdparty import attr
18 from ..thirdparty import attr
19 from .. import (
19 from .. import (
20 error,
20 error,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = io.BytesIO
28 stringio = io.BytesIO
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46 # Bits of the `flags` byte inside a node in the file format
46 # Bits of the `flags` byte inside a node in the file format
47 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
47 DIRSTATE_V2_WDIR_TRACKED = 1 << 0
48 DIRSTATE_V2_P1_TRACKED = 1 << 1
48 DIRSTATE_V2_P1_TRACKED = 1 << 1
49 DIRSTATE_V2_P2_INFO = 1 << 2
49 DIRSTATE_V2_P2_INFO = 1 << 2
50 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3
50 DIRSTATE_V2_MODE_EXEC_PERM = 1 << 3
51 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4
51 DIRSTATE_V2_MODE_IS_SYMLINK = 1 << 4
52 DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5
52 DIRSTATE_V2_HAS_FALLBACK_EXEC = 1 << 5
53 DIRSTATE_V2_FALLBACK_EXEC = 1 << 6
53 DIRSTATE_V2_FALLBACK_EXEC = 1 << 6
54 DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7
54 DIRSTATE_V2_HAS_FALLBACK_SYMLINK = 1 << 7
55 DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8
55 DIRSTATE_V2_FALLBACK_SYMLINK = 1 << 8
56 DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9
56 DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED = 1 << 9
57 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10
57 DIRSTATE_V2_HAS_MODE_AND_SIZE = 1 << 10
58 DIRSTATE_V2_HAS_MTIME = 1 << 11
58 DIRSTATE_V2_HAS_MTIME = 1 << 11
59 DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12
59 DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS = 1 << 12
60 DIRSTATE_V2_DIRECTORY = 1 << 13
60 DIRSTATE_V2_DIRECTORY = 1 << 13
61 DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14
61 DIRSTATE_V2_ALL_UNKNOWN_RECORDED = 1 << 14
62 DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15
62 DIRSTATE_V2_ALL_IGNORED_RECORDED = 1 << 15
63
63
64
64
65 @attr.s(slots=True, init=False)
65 @attr.s(slots=True, init=False)
66 class DirstateItem:
66 class DirstateItem:
67 """represent a dirstate entry
67 """represent a dirstate entry
68
68
69 It hold multiple attributes
69 It hold multiple attributes
70
70
71 # about file tracking
71 # about file tracking
72 - wc_tracked: is the file tracked by the working copy
72 - wc_tracked: is the file tracked by the working copy
73 - p1_tracked: is the file tracked in working copy first parent
73 - p1_tracked: is the file tracked in working copy first parent
74 - p2_info: the file has been involved in some merge operation. Either
74 - p2_info: the file has been involved in some merge operation. Either
75 because it was actually merged, or because the p2 version was
75 because it was actually merged, or because the p2 version was
76 ahead, or because some rename moved it there. In either case
76 ahead, or because some rename moved it there. In either case
77 `hg status` will want it displayed as modified.
77 `hg status` will want it displayed as modified.
78
78
79 # about the file state expected from p1 manifest:
79 # about the file state expected from p1 manifest:
80 - mode: the file mode in p1
80 - mode: the file mode in p1
81 - size: the file size in p1
81 - size: the file size in p1
82
82
83 These value can be set to None, which mean we don't have a meaningful value
83 These value can be set to None, which mean we don't have a meaningful value
84 to compare with. Either because we don't really care about them as there
84 to compare with. Either because we don't really care about them as there
85 `status` is known without having to look at the disk or because we don't
85 `status` is known without having to look at the disk or because we don't
86 know these right now and a full comparison will be needed to find out if
86 know these right now and a full comparison will be needed to find out if
87 the file is clean.
87 the file is clean.
88
88
89 # about the file state on disk last time we saw it:
89 # about the file state on disk last time we saw it:
90 - mtime: the last known clean mtime for the file.
90 - mtime: the last known clean mtime for the file.
91
91
92 This value can be set to None if no cachable state exist. Either because we
92 This value can be set to None if no cachable state exist. Either because we
93 do not care (see previous section) or because we could not cache something
93 do not care (see previous section) or because we could not cache something
94 yet.
94 yet.
95 """
95 """
96
96
97 _wc_tracked = attr.ib()
97 _wc_tracked = attr.ib()
98 _p1_tracked = attr.ib()
98 _p1_tracked = attr.ib()
99 _p2_info = attr.ib()
99 _p2_info = attr.ib()
100 _mode = attr.ib()
100 _mode = attr.ib()
101 _size = attr.ib()
101 _size = attr.ib()
102 _mtime_s = attr.ib()
102 _mtime_s = attr.ib()
103 _mtime_ns = attr.ib()
103 _mtime_ns = attr.ib()
104 _fallback_exec = attr.ib()
104 _fallback_exec = attr.ib()
105 _fallback_symlink = attr.ib()
105 _fallback_symlink = attr.ib()
106 _mtime_second_ambiguous = attr.ib()
106 _mtime_second_ambiguous = attr.ib()
107
107
108 def __init__(
108 def __init__(
109 self,
109 self,
110 wc_tracked=False,
110 wc_tracked=False,
111 p1_tracked=False,
111 p1_tracked=False,
112 p2_info=False,
112 p2_info=False,
113 has_meaningful_data=True,
113 has_meaningful_data=True,
114 has_meaningful_mtime=True,
114 has_meaningful_mtime=True,
115 parentfiledata=None,
115 parentfiledata=None,
116 fallback_exec=None,
116 fallback_exec=None,
117 fallback_symlink=None,
117 fallback_symlink=None,
118 ):
118 ):
119 self._wc_tracked = wc_tracked
119 self._wc_tracked = wc_tracked
120 self._p1_tracked = p1_tracked
120 self._p1_tracked = p1_tracked
121 self._p2_info = p2_info
121 self._p2_info = p2_info
122
122
123 self._fallback_exec = fallback_exec
123 self._fallback_exec = fallback_exec
124 self._fallback_symlink = fallback_symlink
124 self._fallback_symlink = fallback_symlink
125
125
126 self._mode = None
126 self._mode = None
127 self._size = None
127 self._size = None
128 self._mtime_s = None
128 self._mtime_s = None
129 self._mtime_ns = None
129 self._mtime_ns = None
130 self._mtime_second_ambiguous = False
130 self._mtime_second_ambiguous = False
131 if parentfiledata is None:
131 if parentfiledata is None:
132 has_meaningful_mtime = False
132 has_meaningful_mtime = False
133 has_meaningful_data = False
133 has_meaningful_data = False
134 elif parentfiledata[2] is None:
134 elif parentfiledata[2] is None:
135 has_meaningful_mtime = False
135 has_meaningful_mtime = False
136 if has_meaningful_data:
136 if has_meaningful_data:
137 self._mode = parentfiledata[0]
137 self._mode = parentfiledata[0]
138 self._size = parentfiledata[1]
138 self._size = parentfiledata[1]
139 if has_meaningful_mtime:
139 if has_meaningful_mtime:
140 (
140 (
141 self._mtime_s,
141 self._mtime_s,
142 self._mtime_ns,
142 self._mtime_ns,
143 self._mtime_second_ambiguous,
143 self._mtime_second_ambiguous,
144 ) = parentfiledata[2]
144 ) = parentfiledata[2]
145
145
146 @classmethod
146 @classmethod
147 def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
147 def from_v2_data(cls, flags, size, mtime_s, mtime_ns):
148 """Build a new DirstateItem object from V2 data"""
148 """Build a new DirstateItem object from V2 data"""
149 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
149 has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE)
150 has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
150 has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME)
151 mode = None
151 mode = None
152
152
153 if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
153 if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED:
154 # we do not have support for this flag in the code yet,
154 # we do not have support for this flag in the code yet,
155 # force a lookup for this file.
155 # force a lookup for this file.
156 has_mode_size = False
156 has_mode_size = False
157 has_meaningful_mtime = False
157 has_meaningful_mtime = False
158
158
159 fallback_exec = None
159 fallback_exec = None
160 if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC:
160 if flags & DIRSTATE_V2_HAS_FALLBACK_EXEC:
161 fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC
161 fallback_exec = flags & DIRSTATE_V2_FALLBACK_EXEC
162
162
163 fallback_symlink = None
163 fallback_symlink = None
164 if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK:
164 if flags & DIRSTATE_V2_HAS_FALLBACK_SYMLINK:
165 fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK
165 fallback_symlink = flags & DIRSTATE_V2_FALLBACK_SYMLINK
166
166
167 if has_mode_size:
167 if has_mode_size:
168 assert stat.S_IXUSR == 0o100
168 assert stat.S_IXUSR == 0o100
169 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
169 if flags & DIRSTATE_V2_MODE_EXEC_PERM:
170 mode = 0o755
170 mode = 0o755
171 else:
171 else:
172 mode = 0o644
172 mode = 0o644
173 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
173 if flags & DIRSTATE_V2_MODE_IS_SYMLINK:
174 mode |= stat.S_IFLNK
174 mode |= stat.S_IFLNK
175 else:
175 else:
176 mode |= stat.S_IFREG
176 mode |= stat.S_IFREG
177
177
178 second_ambiguous = flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS
178 second_ambiguous = flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS
179 return cls(
179 return cls(
180 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
180 wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED),
181 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
181 p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED),
182 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
182 p2_info=bool(flags & DIRSTATE_V2_P2_INFO),
183 has_meaningful_data=has_mode_size,
183 has_meaningful_data=has_mode_size,
184 has_meaningful_mtime=has_meaningful_mtime,
184 has_meaningful_mtime=has_meaningful_mtime,
185 parentfiledata=(mode, size, (mtime_s, mtime_ns, second_ambiguous)),
185 parentfiledata=(mode, size, (mtime_s, mtime_ns, second_ambiguous)),
186 fallback_exec=fallback_exec,
186 fallback_exec=fallback_exec,
187 fallback_symlink=fallback_symlink,
187 fallback_symlink=fallback_symlink,
188 )
188 )
189
189
190 @classmethod
190 @classmethod
191 def from_v1_data(cls, state, mode, size, mtime):
191 def from_v1_data(cls, state, mode, size, mtime):
192 """Build a new DirstateItem object from V1 data
192 """Build a new DirstateItem object from V1 data
193
193
194 Since the dirstate-v1 format is frozen, the signature of this function
194 Since the dirstate-v1 format is frozen, the signature of this function
195 is not expected to change, unlike the __init__ one.
195 is not expected to change, unlike the __init__ one.
196 """
196 """
197 if state == b'm':
197 if state == b'm':
198 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
198 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
199 elif state == b'a':
199 elif state == b'a':
200 return cls(wc_tracked=True)
200 return cls(wc_tracked=True)
201 elif state == b'r':
201 elif state == b'r':
202 if size == NONNORMAL:
202 if size == NONNORMAL:
203 p1_tracked = True
203 p1_tracked = True
204 p2_info = True
204 p2_info = True
205 elif size == FROM_P2:
205 elif size == FROM_P2:
206 p1_tracked = False
206 p1_tracked = False
207 p2_info = True
207 p2_info = True
208 else:
208 else:
209 p1_tracked = True
209 p1_tracked = True
210 p2_info = False
210 p2_info = False
211 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
211 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
212 elif state == b'n':
212 elif state == b'n':
213 if size == FROM_P2:
213 if size == FROM_P2:
214 return cls(wc_tracked=True, p2_info=True)
214 return cls(wc_tracked=True, p2_info=True)
215 elif size == NONNORMAL:
215 elif size == NONNORMAL:
216 return cls(wc_tracked=True, p1_tracked=True)
216 return cls(wc_tracked=True, p1_tracked=True)
217 elif mtime == AMBIGUOUS_TIME:
217 elif mtime == AMBIGUOUS_TIME:
218 return cls(
218 return cls(
219 wc_tracked=True,
219 wc_tracked=True,
220 p1_tracked=True,
220 p1_tracked=True,
221 has_meaningful_mtime=False,
221 has_meaningful_mtime=False,
222 parentfiledata=(mode, size, (42, 0, False)),
222 parentfiledata=(mode, size, (42, 0, False)),
223 )
223 )
224 else:
224 else:
225 return cls(
225 return cls(
226 wc_tracked=True,
226 wc_tracked=True,
227 p1_tracked=True,
227 p1_tracked=True,
228 parentfiledata=(mode, size, (mtime, 0, False)),
228 parentfiledata=(mode, size, (mtime, 0, False)),
229 )
229 )
230 else:
230 else:
231 raise RuntimeError(b'unknown state: %s' % state)
231 raise RuntimeError(b'unknown state: %s' % state)
232
232
233 def set_possibly_dirty(self):
233 def set_possibly_dirty(self):
234 """Mark a file as "possibly dirty"
234 """Mark a file as "possibly dirty"
235
235
236 This means the next status call will have to actually check its content
236 This means the next status call will have to actually check its content
237 to make sure it is correct.
237 to make sure it is correct.
238 """
238 """
239 self._mtime_s = None
239 self._mtime_s = None
240 self._mtime_ns = None
240 self._mtime_ns = None
241
241
242 def set_clean(self, mode, size, mtime):
242 def set_clean(self, mode, size, mtime):
243 """mark a file as "clean" cancelling potential "possibly dirty call"
243 """mark a file as "clean" cancelling potential "possibly dirty call"
244
244
245 Note: this function is a descendant of `dirstate.normal` and is
245 Note: this function is a descendant of `dirstate.normal` and is
246 currently expected to be call on "normal" entry only. There are not
246 currently expected to be call on "normal" entry only. There are not
247 reason for this to not change in the future as long as the ccode is
247 reason for this to not change in the future as long as the ccode is
248 updated to preserve the proper state of the non-normal files.
248 updated to preserve the proper state of the non-normal files.
249 """
249 """
250 self._wc_tracked = True
250 self._wc_tracked = True
251 self._p1_tracked = True
251 self._p1_tracked = True
252 self._mode = mode
252 self._mode = mode
253 self._size = size
253 self._size = size
254 self._mtime_s, self._mtime_ns, self._mtime_second_ambiguous = mtime
254 self._mtime_s, self._mtime_ns, self._mtime_second_ambiguous = mtime
255
255
256 def set_tracked(self):
256 def set_tracked(self):
257 """mark a file as tracked in the working copy
257 """mark a file as tracked in the working copy
258
258
259 This will ultimately be called by command like `hg add`.
259 This will ultimately be called by command like `hg add`.
260 """
260 """
261 self._wc_tracked = True
261 self._wc_tracked = True
262 # `set_tracked` is replacing various `normallookup` call. So we mark
262 # `set_tracked` is replacing various `normallookup` call. So we mark
263 # the files as needing lookup
263 # the files as needing lookup
264 #
264 #
265 # Consider dropping this in the future in favor of something less broad.
265 # Consider dropping this in the future in favor of something less broad.
266 self._mtime_s = None
266 self._mtime_s = None
267 self._mtime_ns = None
267 self._mtime_ns = None
268
268
269 def set_untracked(self):
269 def set_untracked(self):
270 """mark a file as untracked in the working copy
270 """mark a file as untracked in the working copy
271
271
272 This will ultimately be called by command like `hg remove`.
272 This will ultimately be called by command like `hg remove`.
273 """
273 """
274 self._wc_tracked = False
274 self._wc_tracked = False
275 self._mode = None
275 self._mode = None
276 self._size = None
276 self._size = None
277 self._mtime_s = None
277 self._mtime_s = None
278 self._mtime_ns = None
278 self._mtime_ns = None
279
279
280 def drop_merge_data(self):
280 def drop_merge_data(self):
281 """remove all "merge-only" from a DirstateItem
281 """remove all "merge-only" from a DirstateItem
282
282
283 This is to be call by the dirstatemap code when the second parent is dropped
283 This is to be call by the dirstatemap code when the second parent is dropped
284 """
284 """
285 if self._p2_info:
285 if self._p2_info:
286 self._p2_info = False
286 self._p2_info = False
287 self._mode = None
287 self._mode = None
288 self._size = None
288 self._size = None
289 self._mtime_s = None
289 self._mtime_s = None
290 self._mtime_ns = None
290 self._mtime_ns = None
291
291
292 @property
292 @property
293 def mode(self):
293 def mode(self):
294 return self.v1_mode()
294 return self._v1_mode()
295
295
296 @property
296 @property
297 def size(self):
297 def size(self):
298 return self.v1_size()
298 return self._v1_size()
299
299
300 @property
300 @property
301 def mtime(self):
301 def mtime(self):
302 return self.v1_mtime()
302 return self._v1_mtime()
303
303
304 def mtime_likely_equal_to(self, other_mtime):
304 def mtime_likely_equal_to(self, other_mtime):
305 self_sec = self._mtime_s
305 self_sec = self._mtime_s
306 if self_sec is None:
306 if self_sec is None:
307 return False
307 return False
308 self_ns = self._mtime_ns
308 self_ns = self._mtime_ns
309 other_sec, other_ns, second_ambiguous = other_mtime
309 other_sec, other_ns, second_ambiguous = other_mtime
310 if self_sec != other_sec:
310 if self_sec != other_sec:
311 # seconds are different theses mtime are definitly not equal
311 # seconds are different theses mtime are definitly not equal
312 return False
312 return False
313 elif other_ns == 0 or self_ns == 0:
313 elif other_ns == 0 or self_ns == 0:
314 # at least one side as no nano-seconds information
314 # at least one side as no nano-seconds information
315
315
316 if self._mtime_second_ambiguous:
316 if self._mtime_second_ambiguous:
317 # We cannot trust the mtime in this case
317 # We cannot trust the mtime in this case
318 return False
318 return False
319 else:
319 else:
320 # the "seconds" value was reliable on its own. We are good to go.
320 # the "seconds" value was reliable on its own. We are good to go.
321 return True
321 return True
322 else:
322 else:
323 # We have nano second information, let us use them !
323 # We have nano second information, let us use them !
324 return self_ns == other_ns
324 return self_ns == other_ns
325
325
326 @property
326 @property
327 def state(self):
327 def state(self):
328 """
328 """
329 States are:
329 States are:
330 n normal
330 n normal
331 m needs merging
331 m needs merging
332 r marked for removal
332 r marked for removal
333 a marked for addition
333 a marked for addition
334
334
335 XXX This "state" is a bit obscure and mostly a direct expression of the
335 XXX This "state" is a bit obscure and mostly a direct expression of the
336 dirstatev1 format. It would make sense to ultimately deprecate it in
336 dirstatev1 format. It would make sense to ultimately deprecate it in
337 favor of the more "semantic" attributes.
337 favor of the more "semantic" attributes.
338 """
338 """
339 if not self.any_tracked:
339 if not self.any_tracked:
340 return b'?'
340 return b'?'
341 return self.v1_state()
341 return self._v1_state()
342
342
343 @property
343 @property
344 def has_fallback_exec(self):
344 def has_fallback_exec(self):
345 """True if "fallback" information are available for the "exec" bit
345 """True if "fallback" information are available for the "exec" bit
346
346
347 Fallback information can be stored in the dirstate to keep track of
347 Fallback information can be stored in the dirstate to keep track of
348 filesystem attribute tracked by Mercurial when the underlying file
348 filesystem attribute tracked by Mercurial when the underlying file
349 system or operating system does not support that property, (e.g.
349 system or operating system does not support that property, (e.g.
350 Windows).
350 Windows).
351
351
352 Not all version of the dirstate on-disk storage support preserving this
352 Not all version of the dirstate on-disk storage support preserving this
353 information.
353 information.
354 """
354 """
355 return self._fallback_exec is not None
355 return self._fallback_exec is not None
356
356
357 @property
357 @property
358 def fallback_exec(self):
358 def fallback_exec(self):
359 """ "fallback" information for the executable bit
359 """ "fallback" information for the executable bit
360
360
361 True if the file should be considered executable when we cannot get
361 True if the file should be considered executable when we cannot get
362 this information from the files system. False if it should be
362 this information from the files system. False if it should be
363 considered non-executable.
363 considered non-executable.
364
364
365 See has_fallback_exec for details."""
365 See has_fallback_exec for details."""
366 return self._fallback_exec
366 return self._fallback_exec
367
367
368 @fallback_exec.setter
368 @fallback_exec.setter
369 def set_fallback_exec(self, value):
369 def set_fallback_exec(self, value):
370 """control "fallback" executable bit
370 """control "fallback" executable bit
371
371
372 Set to:
372 Set to:
373 - True if the file should be considered executable,
373 - True if the file should be considered executable,
374 - False if the file should be considered non-executable,
374 - False if the file should be considered non-executable,
375 - None if we do not have valid fallback data.
375 - None if we do not have valid fallback data.
376
376
377 See has_fallback_exec for details."""
377 See has_fallback_exec for details."""
378 if value is None:
378 if value is None:
379 self._fallback_exec = None
379 self._fallback_exec = None
380 else:
380 else:
381 self._fallback_exec = bool(value)
381 self._fallback_exec = bool(value)
382
382
383 @property
383 @property
384 def has_fallback_symlink(self):
384 def has_fallback_symlink(self):
385 """True if "fallback" information are available for symlink status
385 """True if "fallback" information are available for symlink status
386
386
387 Fallback information can be stored in the dirstate to keep track of
387 Fallback information can be stored in the dirstate to keep track of
388 filesystem attribute tracked by Mercurial when the underlying file
388 filesystem attribute tracked by Mercurial when the underlying file
389 system or operating system does not support that property, (e.g.
389 system or operating system does not support that property, (e.g.
390 Windows).
390 Windows).
391
391
392 Not all version of the dirstate on-disk storage support preserving this
392 Not all version of the dirstate on-disk storage support preserving this
393 information."""
393 information."""
394 return self._fallback_symlink is not None
394 return self._fallback_symlink is not None
395
395
396 @property
396 @property
397 def fallback_symlink(self):
397 def fallback_symlink(self):
398 """ "fallback" information for symlink status
398 """ "fallback" information for symlink status
399
399
400 True if the file should be considered executable when we cannot get
400 True if the file should be considered executable when we cannot get
401 this information from the files system. False if it should be
401 this information from the files system. False if it should be
402 considered non-executable.
402 considered non-executable.
403
403
404 See has_fallback_exec for details."""
404 See has_fallback_exec for details."""
405 return self._fallback_symlink
405 return self._fallback_symlink
406
406
407 @fallback_symlink.setter
407 @fallback_symlink.setter
408 def set_fallback_symlink(self, value):
408 def set_fallback_symlink(self, value):
409 """control "fallback" symlink status
409 """control "fallback" symlink status
410
410
411 Set to:
411 Set to:
412 - True if the file should be considered a symlink,
412 - True if the file should be considered a symlink,
413 - False if the file should be considered not a symlink,
413 - False if the file should be considered not a symlink,
414 - None if we do not have valid fallback data.
414 - None if we do not have valid fallback data.
415
415
416 See has_fallback_symlink for details."""
416 See has_fallback_symlink for details."""
417 if value is None:
417 if value is None:
418 self._fallback_symlink = None
418 self._fallback_symlink = None
419 else:
419 else:
420 self._fallback_symlink = bool(value)
420 self._fallback_symlink = bool(value)
421
421
422 @property
422 @property
423 def tracked(self):
423 def tracked(self):
424 """True is the file is tracked in the working copy"""
424 """True is the file is tracked in the working copy"""
425 return self._wc_tracked
425 return self._wc_tracked
426
426
427 @property
427 @property
428 def any_tracked(self):
428 def any_tracked(self):
429 """True is the file is tracked anywhere (wc or parents)"""
429 """True is the file is tracked anywhere (wc or parents)"""
430 return self._wc_tracked or self._p1_tracked or self._p2_info
430 return self._wc_tracked or self._p1_tracked or self._p2_info
431
431
432 @property
432 @property
433 def added(self):
433 def added(self):
434 """True if the file has been added"""
434 """True if the file has been added"""
435 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
435 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
436
436
437 @property
437 @property
438 def maybe_clean(self):
438 def maybe_clean(self):
439 """True if the file has a chance to be in the "clean" state"""
439 """True if the file has a chance to be in the "clean" state"""
440 if not self._wc_tracked:
440 if not self._wc_tracked:
441 return False
441 return False
442 elif not self._p1_tracked:
442 elif not self._p1_tracked:
443 return False
443 return False
444 elif self._p2_info:
444 elif self._p2_info:
445 return False
445 return False
446 return True
446 return True
447
447
448 @property
448 @property
449 def p1_tracked(self):
449 def p1_tracked(self):
450 """True if the file is tracked in the first parent manifest"""
450 """True if the file is tracked in the first parent manifest"""
451 return self._p1_tracked
451 return self._p1_tracked
452
452
453 @property
453 @property
454 def p2_info(self):
454 def p2_info(self):
455 """True if the file needed to merge or apply any input from p2
455 """True if the file needed to merge or apply any input from p2
456
456
457 See the class documentation for details.
457 See the class documentation for details.
458 """
458 """
459 return self._wc_tracked and self._p2_info
459 return self._wc_tracked and self._p2_info
460
460
461 @property
461 @property
462 def removed(self):
462 def removed(self):
463 """True if the file has been removed"""
463 """True if the file has been removed"""
464 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
464 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
465
465
466 def v2_data(self):
466 def v2_data(self):
467 """Returns (flags, mode, size, mtime) for v2 serialization"""
467 """Returns (flags, mode, size, mtime) for v2 serialization"""
468 flags = 0
468 flags = 0
469 if self._wc_tracked:
469 if self._wc_tracked:
470 flags |= DIRSTATE_V2_WDIR_TRACKED
470 flags |= DIRSTATE_V2_WDIR_TRACKED
471 if self._p1_tracked:
471 if self._p1_tracked:
472 flags |= DIRSTATE_V2_P1_TRACKED
472 flags |= DIRSTATE_V2_P1_TRACKED
473 if self._p2_info:
473 if self._p2_info:
474 flags |= DIRSTATE_V2_P2_INFO
474 flags |= DIRSTATE_V2_P2_INFO
475 if self._mode is not None and self._size is not None:
475 if self._mode is not None and self._size is not None:
476 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
476 flags |= DIRSTATE_V2_HAS_MODE_AND_SIZE
477 if self.mode & stat.S_IXUSR:
477 if self.mode & stat.S_IXUSR:
478 flags |= DIRSTATE_V2_MODE_EXEC_PERM
478 flags |= DIRSTATE_V2_MODE_EXEC_PERM
479 if stat.S_ISLNK(self.mode):
479 if stat.S_ISLNK(self.mode):
480 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
480 flags |= DIRSTATE_V2_MODE_IS_SYMLINK
481 if self._mtime_s is not None:
481 if self._mtime_s is not None:
482 flags |= DIRSTATE_V2_HAS_MTIME
482 flags |= DIRSTATE_V2_HAS_MTIME
483 if self._mtime_second_ambiguous:
483 if self._mtime_second_ambiguous:
484 flags |= DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS
484 flags |= DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS
485
485
486 if self._fallback_exec is not None:
486 if self._fallback_exec is not None:
487 flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
487 flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC
488 if self._fallback_exec:
488 if self._fallback_exec:
489 flags |= DIRSTATE_V2_FALLBACK_EXEC
489 flags |= DIRSTATE_V2_FALLBACK_EXEC
490
490
491 if self._fallback_symlink is not None:
491 if self._fallback_symlink is not None:
492 flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK
492 flags |= DIRSTATE_V2_HAS_FALLBACK_SYMLINK
493 if self._fallback_symlink:
493 if self._fallback_symlink:
494 flags |= DIRSTATE_V2_FALLBACK_SYMLINK
494 flags |= DIRSTATE_V2_FALLBACK_SYMLINK
495
495
496 # Note: we do not need to do anything regarding
496 # Note: we do not need to do anything regarding
497 # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED
497 # DIRSTATE_V2_ALL_UNKNOWN_RECORDED and DIRSTATE_V2_ALL_IGNORED_RECORDED
498 # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME
498 # since we never set _DIRSTATE_V2_HAS_DIRCTORY_MTIME
499 return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0)
499 return (flags, self._size or 0, self._mtime_s or 0, self._mtime_ns or 0)
500
500
501 def v1_state(self):
501 def _v1_state(self):
502 """return a "state" suitable for v1 serialization"""
502 """return a "state" suitable for v1 serialization"""
503 if not self.any_tracked:
503 if not self.any_tracked:
504 # the object has no state to record, this is -currently-
504 # the object has no state to record, this is -currently-
505 # unsupported
505 # unsupported
506 raise RuntimeError('untracked item')
506 raise RuntimeError('untracked item')
507 elif self.removed:
507 elif self.removed:
508 return b'r'
508 return b'r'
509 elif self._p1_tracked and self._p2_info:
509 elif self._p1_tracked and self._p2_info:
510 return b'm'
510 return b'm'
511 elif self.added:
511 elif self.added:
512 return b'a'
512 return b'a'
513 else:
513 else:
514 return b'n'
514 return b'n'
515
515
516 def v1_mode(self):
516 def _v1_mode(self):
517 """return a "mode" suitable for v1 serialization"""
517 """return a "mode" suitable for v1 serialization"""
518 return self._mode if self._mode is not None else 0
518 return self._mode if self._mode is not None else 0
519
519
520 def v1_size(self):
520 def _v1_size(self):
521 """return a "size" suitable for v1 serialization"""
521 """return a "size" suitable for v1 serialization"""
522 if not self.any_tracked:
522 if not self.any_tracked:
523 # the object has no state to record, this is -currently-
523 # the object has no state to record, this is -currently-
524 # unsupported
524 # unsupported
525 raise RuntimeError('untracked item')
525 raise RuntimeError('untracked item')
526 elif self.removed and self._p1_tracked and self._p2_info:
526 elif self.removed and self._p1_tracked and self._p2_info:
527 return NONNORMAL
527 return NONNORMAL
528 elif self._p2_info:
528 elif self._p2_info:
529 return FROM_P2
529 return FROM_P2
530 elif self.removed:
530 elif self.removed:
531 return 0
531 return 0
532 elif self.added:
532 elif self.added:
533 return NONNORMAL
533 return NONNORMAL
534 elif self._size is None:
534 elif self._size is None:
535 return NONNORMAL
535 return NONNORMAL
536 else:
536 else:
537 return self._size
537 return self._size
538
538
539 def v1_mtime(self):
539 def _v1_mtime(self):
540 """return a "mtime" suitable for v1 serialization"""
540 """return a "mtime" suitable for v1 serialization"""
541 if not self.any_tracked:
541 if not self.any_tracked:
542 # the object has no state to record, this is -currently-
542 # the object has no state to record, this is -currently-
543 # unsupported
543 # unsupported
544 raise RuntimeError('untracked item')
544 raise RuntimeError('untracked item')
545 elif self.removed:
545 elif self.removed:
546 return 0
546 return 0
547 elif self._mtime_s is None:
547 elif self._mtime_s is None:
548 return AMBIGUOUS_TIME
548 return AMBIGUOUS_TIME
549 elif self._p2_info:
549 elif self._p2_info:
550 return AMBIGUOUS_TIME
550 return AMBIGUOUS_TIME
551 elif not self._p1_tracked:
551 elif not self._p1_tracked:
552 return AMBIGUOUS_TIME
552 return AMBIGUOUS_TIME
553 elif self._mtime_second_ambiguous:
553 elif self._mtime_second_ambiguous:
554 return AMBIGUOUS_TIME
554 return AMBIGUOUS_TIME
555 else:
555 else:
556 return self._mtime_s
556 return self._mtime_s
557
557
558
558
559 def gettype(q):
559 def gettype(q):
560 return int(q & 0xFFFF)
560 return int(q & 0xFFFF)
561
561
562
562
563 class BaseIndexObject:
563 class BaseIndexObject:
564 # Can I be passed to an algorithme implemented in Rust ?
564 # Can I be passed to an algorithme implemented in Rust ?
565 rust_ext_compat = 0
565 rust_ext_compat = 0
566 # Format of an index entry according to Python's `struct` language
566 # Format of an index entry according to Python's `struct` language
567 index_format = revlog_constants.INDEX_ENTRY_V1
567 index_format = revlog_constants.INDEX_ENTRY_V1
568 # Size of a C unsigned long long int, platform independent
568 # Size of a C unsigned long long int, platform independent
569 big_int_size = struct.calcsize(b'>Q')
569 big_int_size = struct.calcsize(b'>Q')
570 # Size of a C long int, platform independent
570 # Size of a C long int, platform independent
571 int_size = struct.calcsize(b'>i')
571 int_size = struct.calcsize(b'>i')
572 # An empty index entry, used as a default value to be overridden, or nullrev
572 # An empty index entry, used as a default value to be overridden, or nullrev
573 null_item = (
573 null_item = (
574 0,
574 0,
575 0,
575 0,
576 0,
576 0,
577 -1,
577 -1,
578 -1,
578 -1,
579 -1,
579 -1,
580 -1,
580 -1,
581 sha1nodeconstants.nullid,
581 sha1nodeconstants.nullid,
582 0,
582 0,
583 0,
583 0,
584 revlog_constants.COMP_MODE_INLINE,
584 revlog_constants.COMP_MODE_INLINE,
585 revlog_constants.COMP_MODE_INLINE,
585 revlog_constants.COMP_MODE_INLINE,
586 revlog_constants.RANK_UNKNOWN,
586 revlog_constants.RANK_UNKNOWN,
587 )
587 )
588
588
589 @util.propertycache
589 @util.propertycache
590 def entry_size(self):
590 def entry_size(self):
591 return self.index_format.size
591 return self.index_format.size
592
592
593 @util.propertycache
593 @util.propertycache
594 def _nodemap(self):
594 def _nodemap(self):
595 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
595 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
596 for r in range(0, len(self)):
596 for r in range(0, len(self)):
597 n = self[r][7]
597 n = self[r][7]
598 nodemap[n] = r
598 nodemap[n] = r
599 return nodemap
599 return nodemap
600
600
601 def has_node(self, node):
601 def has_node(self, node):
602 """return True if the node exist in the index"""
602 """return True if the node exist in the index"""
603 return node in self._nodemap
603 return node in self._nodemap
604
604
605 def rev(self, node):
605 def rev(self, node):
606 """return a revision for a node
606 """return a revision for a node
607
607
608 If the node is unknown, raise a RevlogError"""
608 If the node is unknown, raise a RevlogError"""
609 return self._nodemap[node]
609 return self._nodemap[node]
610
610
611 def get_rev(self, node):
611 def get_rev(self, node):
612 """return a revision for a node
612 """return a revision for a node
613
613
614 If the node is unknown, return None"""
614 If the node is unknown, return None"""
615 return self._nodemap.get(node)
615 return self._nodemap.get(node)
616
616
617 def _stripnodes(self, start):
617 def _stripnodes(self, start):
618 if '_nodemap' in vars(self):
618 if '_nodemap' in vars(self):
619 for r in range(start, len(self)):
619 for r in range(start, len(self)):
620 n = self[r][7]
620 n = self[r][7]
621 del self._nodemap[n]
621 del self._nodemap[n]
622
622
623 def clearcaches(self):
623 def clearcaches(self):
624 self.__dict__.pop('_nodemap', None)
624 self.__dict__.pop('_nodemap', None)
625
625
626 def __len__(self):
626 def __len__(self):
627 return self._lgt + len(self._extra)
627 return self._lgt + len(self._extra)
628
628
629 def append(self, tup):
629 def append(self, tup):
630 if '_nodemap' in vars(self):
630 if '_nodemap' in vars(self):
631 self._nodemap[tup[7]] = len(self)
631 self._nodemap[tup[7]] = len(self)
632 data = self._pack_entry(len(self), tup)
632 data = self._pack_entry(len(self), tup)
633 self._extra.append(data)
633 self._extra.append(data)
634
634
635 def _pack_entry(self, rev, entry):
635 def _pack_entry(self, rev, entry):
636 assert entry[8] == 0
636 assert entry[8] == 0
637 assert entry[9] == 0
637 assert entry[9] == 0
638 return self.index_format.pack(*entry[:8])
638 return self.index_format.pack(*entry[:8])
639
639
640 def _check_index(self, i):
640 def _check_index(self, i):
641 if not isinstance(i, int):
641 if not isinstance(i, int):
642 raise TypeError(b"expecting int indexes")
642 raise TypeError(b"expecting int indexes")
643 if i < 0 or i >= len(self):
643 if i < 0 or i >= len(self):
644 raise IndexError(i)
644 raise IndexError(i)
645
645
646 def __getitem__(self, i):
646 def __getitem__(self, i):
647 if i == -1:
647 if i == -1:
648 return self.null_item
648 return self.null_item
649 self._check_index(i)
649 self._check_index(i)
650 if i >= self._lgt:
650 if i >= self._lgt:
651 data = self._extra[i - self._lgt]
651 data = self._extra[i - self._lgt]
652 else:
652 else:
653 index = self._calculate_index(i)
653 index = self._calculate_index(i)
654 data = self._data[index : index + self.entry_size]
654 data = self._data[index : index + self.entry_size]
655 r = self._unpack_entry(i, data)
655 r = self._unpack_entry(i, data)
656 if self._lgt and i == 0:
656 if self._lgt and i == 0:
657 offset = revlogutils.offset_type(0, gettype(r[0]))
657 offset = revlogutils.offset_type(0, gettype(r[0]))
658 r = (offset,) + r[1:]
658 r = (offset,) + r[1:]
659 return r
659 return r
660
660
661 def _unpack_entry(self, rev, data):
661 def _unpack_entry(self, rev, data):
662 r = self.index_format.unpack(data)
662 r = self.index_format.unpack(data)
663 r = r + (
663 r = r + (
664 0,
664 0,
665 0,
665 0,
666 revlog_constants.COMP_MODE_INLINE,
666 revlog_constants.COMP_MODE_INLINE,
667 revlog_constants.COMP_MODE_INLINE,
667 revlog_constants.COMP_MODE_INLINE,
668 revlog_constants.RANK_UNKNOWN,
668 revlog_constants.RANK_UNKNOWN,
669 )
669 )
670 return r
670 return r
671
671
672 def pack_header(self, header):
672 def pack_header(self, header):
673 """pack header information as binary"""
673 """pack header information as binary"""
674 v_fmt = revlog_constants.INDEX_HEADER
674 v_fmt = revlog_constants.INDEX_HEADER
675 return v_fmt.pack(header)
675 return v_fmt.pack(header)
676
676
677 def entry_binary(self, rev):
677 def entry_binary(self, rev):
678 """return the raw binary string representing a revision"""
678 """return the raw binary string representing a revision"""
679 entry = self[rev]
679 entry = self[rev]
680 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
680 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
681 if rev == 0:
681 if rev == 0:
682 p = p[revlog_constants.INDEX_HEADER.size :]
682 p = p[revlog_constants.INDEX_HEADER.size :]
683 return p
683 return p
684
684
685
685
686 class IndexObject(BaseIndexObject):
686 class IndexObject(BaseIndexObject):
687 def __init__(self, data):
687 def __init__(self, data):
688 assert len(data) % self.entry_size == 0, (
688 assert len(data) % self.entry_size == 0, (
689 len(data),
689 len(data),
690 self.entry_size,
690 self.entry_size,
691 len(data) % self.entry_size,
691 len(data) % self.entry_size,
692 )
692 )
693 self._data = data
693 self._data = data
694 self._lgt = len(data) // self.entry_size
694 self._lgt = len(data) // self.entry_size
695 self._extra = []
695 self._extra = []
696
696
697 def _calculate_index(self, i):
697 def _calculate_index(self, i):
698 return i * self.entry_size
698 return i * self.entry_size
699
699
700 def __delitem__(self, i):
700 def __delitem__(self, i):
701 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
701 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
702 raise ValueError(b"deleting slices only supports a:-1 with step 1")
702 raise ValueError(b"deleting slices only supports a:-1 with step 1")
703 i = i.start
703 i = i.start
704 self._check_index(i)
704 self._check_index(i)
705 self._stripnodes(i)
705 self._stripnodes(i)
706 if i < self._lgt:
706 if i < self._lgt:
707 self._data = self._data[: i * self.entry_size]
707 self._data = self._data[: i * self.entry_size]
708 self._lgt = i
708 self._lgt = i
709 self._extra = []
709 self._extra = []
710 else:
710 else:
711 self._extra = self._extra[: i - self._lgt]
711 self._extra = self._extra[: i - self._lgt]
712
712
713
713
714 class PersistentNodeMapIndexObject(IndexObject):
714 class PersistentNodeMapIndexObject(IndexObject):
715 """a Debug oriented class to test persistent nodemap
715 """a Debug oriented class to test persistent nodemap
716
716
717 We need a simple python object to test API and higher level behavior. See
717 We need a simple python object to test API and higher level behavior. See
718 the Rust implementation for more serious usage. This should be used only
718 the Rust implementation for more serious usage. This should be used only
719 through the dedicated `devel.persistent-nodemap` config.
719 through the dedicated `devel.persistent-nodemap` config.
720 """
720 """
721
721
722 def nodemap_data_all(self):
722 def nodemap_data_all(self):
723 """Return bytes containing a full serialization of a nodemap
723 """Return bytes containing a full serialization of a nodemap
724
724
725 The nodemap should be valid for the full set of revisions in the
725 The nodemap should be valid for the full set of revisions in the
726 index."""
726 index."""
727 return nodemaputil.persistent_data(self)
727 return nodemaputil.persistent_data(self)
728
728
729 def nodemap_data_incremental(self):
729 def nodemap_data_incremental(self):
730 """Return bytes containing a incremental update to persistent nodemap
730 """Return bytes containing a incremental update to persistent nodemap
731
731
732 This containst the data for an append-only update of the data provided
732 This containst the data for an append-only update of the data provided
733 in the last call to `update_nodemap_data`.
733 in the last call to `update_nodemap_data`.
734 """
734 """
735 if self._nm_root is None:
735 if self._nm_root is None:
736 return None
736 return None
737 docket = self._nm_docket
737 docket = self._nm_docket
738 changed, data = nodemaputil.update_persistent_data(
738 changed, data = nodemaputil.update_persistent_data(
739 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
739 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
740 )
740 )
741
741
742 self._nm_root = self._nm_max_idx = self._nm_docket = None
742 self._nm_root = self._nm_max_idx = self._nm_docket = None
743 return docket, changed, data
743 return docket, changed, data
744
744
745 def update_nodemap_data(self, docket, nm_data):
745 def update_nodemap_data(self, docket, nm_data):
746 """provide full block of persisted binary data for a nodemap
746 """provide full block of persisted binary data for a nodemap
747
747
748 The data are expected to come from disk. See `nodemap_data_all` for a
748 The data are expected to come from disk. See `nodemap_data_all` for a
749 produceur of such data."""
749 produceur of such data."""
750 if nm_data is not None:
750 if nm_data is not None:
751 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
751 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
752 if self._nm_root:
752 if self._nm_root:
753 self._nm_docket = docket
753 self._nm_docket = docket
754 else:
754 else:
755 self._nm_root = self._nm_max_idx = self._nm_docket = None
755 self._nm_root = self._nm_max_idx = self._nm_docket = None
756
756
757
757
758 class InlinedIndexObject(BaseIndexObject):
758 class InlinedIndexObject(BaseIndexObject):
759 def __init__(self, data, inline=0):
759 def __init__(self, data, inline=0):
760 self._data = data
760 self._data = data
761 self._lgt = self._inline_scan(None)
761 self._lgt = self._inline_scan(None)
762 self._inline_scan(self._lgt)
762 self._inline_scan(self._lgt)
763 self._extra = []
763 self._extra = []
764
764
765 def _inline_scan(self, lgt):
765 def _inline_scan(self, lgt):
766 off = 0
766 off = 0
767 if lgt is not None:
767 if lgt is not None:
768 self._offsets = [0] * lgt
768 self._offsets = [0] * lgt
769 count = 0
769 count = 0
770 while off <= len(self._data) - self.entry_size:
770 while off <= len(self._data) - self.entry_size:
771 start = off + self.big_int_size
771 start = off + self.big_int_size
772 (s,) = struct.unpack(
772 (s,) = struct.unpack(
773 b'>i',
773 b'>i',
774 self._data[start : start + self.int_size],
774 self._data[start : start + self.int_size],
775 )
775 )
776 if lgt is not None:
776 if lgt is not None:
777 self._offsets[count] = off
777 self._offsets[count] = off
778 count += 1
778 count += 1
779 off += self.entry_size + s
779 off += self.entry_size + s
780 if off != len(self._data):
780 if off != len(self._data):
781 raise ValueError(b"corrupted data")
781 raise ValueError(b"corrupted data")
782 return count
782 return count
783
783
784 def __delitem__(self, i):
784 def __delitem__(self, i):
785 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
785 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
786 raise ValueError(b"deleting slices only supports a:-1 with step 1")
786 raise ValueError(b"deleting slices only supports a:-1 with step 1")
787 i = i.start
787 i = i.start
788 self._check_index(i)
788 self._check_index(i)
789 self._stripnodes(i)
789 self._stripnodes(i)
790 if i < self._lgt:
790 if i < self._lgt:
791 self._offsets = self._offsets[:i]
791 self._offsets = self._offsets[:i]
792 self._lgt = i
792 self._lgt = i
793 self._extra = []
793 self._extra = []
794 else:
794 else:
795 self._extra = self._extra[: i - self._lgt]
795 self._extra = self._extra[: i - self._lgt]
796
796
797 def _calculate_index(self, i):
797 def _calculate_index(self, i):
798 return self._offsets[i]
798 return self._offsets[i]
799
799
800
800
801 def parse_index2(data, inline, format=revlog_constants.REVLOGV1):
801 def parse_index2(data, inline, format=revlog_constants.REVLOGV1):
802 if format == revlog_constants.CHANGELOGV2:
802 if format == revlog_constants.CHANGELOGV2:
803 return parse_index_cl_v2(data)
803 return parse_index_cl_v2(data)
804 if not inline:
804 if not inline:
805 if format == revlog_constants.REVLOGV2:
805 if format == revlog_constants.REVLOGV2:
806 cls = IndexObject2
806 cls = IndexObject2
807 else:
807 else:
808 cls = IndexObject
808 cls = IndexObject
809 return cls(data), None
809 return cls(data), None
810 cls = InlinedIndexObject
810 cls = InlinedIndexObject
811 return cls(data, inline), (0, data)
811 return cls(data, inline), (0, data)
812
812
813
813
814 def parse_index_cl_v2(data):
814 def parse_index_cl_v2(data):
815 return IndexChangelogV2(data), None
815 return IndexChangelogV2(data), None
816
816
817
817
818 class IndexObject2(IndexObject):
818 class IndexObject2(IndexObject):
819 index_format = revlog_constants.INDEX_ENTRY_V2
819 index_format = revlog_constants.INDEX_ENTRY_V2
820
820
821 def replace_sidedata_info(
821 def replace_sidedata_info(
822 self,
822 self,
823 rev,
823 rev,
824 sidedata_offset,
824 sidedata_offset,
825 sidedata_length,
825 sidedata_length,
826 offset_flags,
826 offset_flags,
827 compression_mode,
827 compression_mode,
828 ):
828 ):
829 """
829 """
830 Replace an existing index entry's sidedata offset and length with new
830 Replace an existing index entry's sidedata offset and length with new
831 ones.
831 ones.
832 This cannot be used outside of the context of sidedata rewriting,
832 This cannot be used outside of the context of sidedata rewriting,
833 inside the transaction that creates the revision `rev`.
833 inside the transaction that creates the revision `rev`.
834 """
834 """
835 if rev < 0:
835 if rev < 0:
836 raise KeyError
836 raise KeyError
837 self._check_index(rev)
837 self._check_index(rev)
838 if rev < self._lgt:
838 if rev < self._lgt:
839 msg = b"cannot rewrite entries outside of this transaction"
839 msg = b"cannot rewrite entries outside of this transaction"
840 raise KeyError(msg)
840 raise KeyError(msg)
841 else:
841 else:
842 entry = list(self[rev])
842 entry = list(self[rev])
843 entry[0] = offset_flags
843 entry[0] = offset_flags
844 entry[8] = sidedata_offset
844 entry[8] = sidedata_offset
845 entry[9] = sidedata_length
845 entry[9] = sidedata_length
846 entry[11] = compression_mode
846 entry[11] = compression_mode
847 entry = tuple(entry)
847 entry = tuple(entry)
848 new = self._pack_entry(rev, entry)
848 new = self._pack_entry(rev, entry)
849 self._extra[rev - self._lgt] = new
849 self._extra[rev - self._lgt] = new
850
850
851 def _unpack_entry(self, rev, data):
851 def _unpack_entry(self, rev, data):
852 data = self.index_format.unpack(data)
852 data = self.index_format.unpack(data)
853 entry = data[:10]
853 entry = data[:10]
854 data_comp = data[10] & 3
854 data_comp = data[10] & 3
855 sidedata_comp = (data[10] & (3 << 2)) >> 2
855 sidedata_comp = (data[10] & (3 << 2)) >> 2
856 return entry + (data_comp, sidedata_comp, revlog_constants.RANK_UNKNOWN)
856 return entry + (data_comp, sidedata_comp, revlog_constants.RANK_UNKNOWN)
857
857
858 def _pack_entry(self, rev, entry):
858 def _pack_entry(self, rev, entry):
859 data = entry[:10]
859 data = entry[:10]
860 data_comp = entry[10] & 3
860 data_comp = entry[10] & 3
861 sidedata_comp = (entry[11] & 3) << 2
861 sidedata_comp = (entry[11] & 3) << 2
862 data += (data_comp | sidedata_comp,)
862 data += (data_comp | sidedata_comp,)
863
863
864 return self.index_format.pack(*data)
864 return self.index_format.pack(*data)
865
865
866 def entry_binary(self, rev):
866 def entry_binary(self, rev):
867 """return the raw binary string representing a revision"""
867 """return the raw binary string representing a revision"""
868 entry = self[rev]
868 entry = self[rev]
869 return self._pack_entry(rev, entry)
869 return self._pack_entry(rev, entry)
870
870
871 def pack_header(self, header):
871 def pack_header(self, header):
872 """pack header information as binary"""
872 """pack header information as binary"""
873 msg = 'version header should go in the docket, not the index: %d'
873 msg = 'version header should go in the docket, not the index: %d'
874 msg %= header
874 msg %= header
875 raise error.ProgrammingError(msg)
875 raise error.ProgrammingError(msg)
876
876
877
877
878 class IndexChangelogV2(IndexObject2):
878 class IndexChangelogV2(IndexObject2):
879 index_format = revlog_constants.INDEX_ENTRY_CL_V2
879 index_format = revlog_constants.INDEX_ENTRY_CL_V2
880
880
881 null_item = (
881 null_item = (
882 IndexObject2.null_item[: revlog_constants.ENTRY_RANK]
882 IndexObject2.null_item[: revlog_constants.ENTRY_RANK]
883 + (0,) # rank of null is 0
883 + (0,) # rank of null is 0
884 + IndexObject2.null_item[revlog_constants.ENTRY_RANK :]
884 + IndexObject2.null_item[revlog_constants.ENTRY_RANK :]
885 )
885 )
886
886
887 def _unpack_entry(self, rev, data, r=True):
887 def _unpack_entry(self, rev, data, r=True):
888 items = self.index_format.unpack(data)
888 items = self.index_format.unpack(data)
889 return (
889 return (
890 items[revlog_constants.INDEX_ENTRY_V2_IDX_OFFSET],
890 items[revlog_constants.INDEX_ENTRY_V2_IDX_OFFSET],
891 items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSED_LENGTH],
891 items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSED_LENGTH],
892 items[revlog_constants.INDEX_ENTRY_V2_IDX_UNCOMPRESSED_LENGTH],
892 items[revlog_constants.INDEX_ENTRY_V2_IDX_UNCOMPRESSED_LENGTH],
893 rev,
893 rev,
894 rev,
894 rev,
895 items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_1],
895 items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_1],
896 items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_2],
896 items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_2],
897 items[revlog_constants.INDEX_ENTRY_V2_IDX_NODEID],
897 items[revlog_constants.INDEX_ENTRY_V2_IDX_NODEID],
898 items[revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_OFFSET],
898 items[revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_OFFSET],
899 items[
899 items[
900 revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_COMPRESSED_LENGTH
900 revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_COMPRESSED_LENGTH
901 ],
901 ],
902 items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] & 3,
902 items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] & 3,
903 (items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] >> 2)
903 (items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] >> 2)
904 & 3,
904 & 3,
905 items[revlog_constants.INDEX_ENTRY_V2_IDX_RANK],
905 items[revlog_constants.INDEX_ENTRY_V2_IDX_RANK],
906 )
906 )
907
907
908 def _pack_entry(self, rev, entry):
908 def _pack_entry(self, rev, entry):
909
909
910 base = entry[revlog_constants.ENTRY_DELTA_BASE]
910 base = entry[revlog_constants.ENTRY_DELTA_BASE]
911 link_rev = entry[revlog_constants.ENTRY_LINK_REV]
911 link_rev = entry[revlog_constants.ENTRY_LINK_REV]
912 assert base == rev, (base, rev)
912 assert base == rev, (base, rev)
913 assert link_rev == rev, (link_rev, rev)
913 assert link_rev == rev, (link_rev, rev)
914 data = (
914 data = (
915 entry[revlog_constants.ENTRY_DATA_OFFSET],
915 entry[revlog_constants.ENTRY_DATA_OFFSET],
916 entry[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH],
916 entry[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH],
917 entry[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH],
917 entry[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH],
918 entry[revlog_constants.ENTRY_PARENT_1],
918 entry[revlog_constants.ENTRY_PARENT_1],
919 entry[revlog_constants.ENTRY_PARENT_2],
919 entry[revlog_constants.ENTRY_PARENT_2],
920 entry[revlog_constants.ENTRY_NODE_ID],
920 entry[revlog_constants.ENTRY_NODE_ID],
921 entry[revlog_constants.ENTRY_SIDEDATA_OFFSET],
921 entry[revlog_constants.ENTRY_SIDEDATA_OFFSET],
922 entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSED_LENGTH],
922 entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSED_LENGTH],
923 entry[revlog_constants.ENTRY_DATA_COMPRESSION_MODE] & 3
923 entry[revlog_constants.ENTRY_DATA_COMPRESSION_MODE] & 3
924 | (entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSION_MODE] & 3)
924 | (entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSION_MODE] & 3)
925 << 2,
925 << 2,
926 entry[revlog_constants.ENTRY_RANK],
926 entry[revlog_constants.ENTRY_RANK],
927 )
927 )
928 return self.index_format.pack(*data)
928 return self.index_format.pack(*data)
929
929
930
930
931 def parse_index_devel_nodemap(data, inline):
931 def parse_index_devel_nodemap(data, inline):
932 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
932 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
933 return PersistentNodeMapIndexObject(data), None
933 return PersistentNodeMapIndexObject(data), None
934
934
935
935
936 def parse_dirstate(dmap, copymap, st):
936 def parse_dirstate(dmap, copymap, st):
937 parents = [st[:20], st[20:40]]
937 parents = [st[:20], st[20:40]]
938 # dereference fields so they will be local in loop
938 # dereference fields so they will be local in loop
939 format = b">cllll"
939 format = b">cllll"
940 e_size = struct.calcsize(format)
940 e_size = struct.calcsize(format)
941 pos1 = 40
941 pos1 = 40
942 l = len(st)
942 l = len(st)
943
943
944 # the inner loop
944 # the inner loop
945 while pos1 < l:
945 while pos1 < l:
946 pos2 = pos1 + e_size
946 pos2 = pos1 + e_size
947 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
947 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
948 pos1 = pos2 + e[4]
948 pos1 = pos2 + e[4]
949 f = st[pos2:pos1]
949 f = st[pos2:pos1]
950 if b'\0' in f:
950 if b'\0' in f:
951 f, c = f.split(b'\0')
951 f, c = f.split(b'\0')
952 copymap[f] = c
952 copymap[f] = c
953 dmap[f] = DirstateItem.from_v1_data(*e[:4])
953 dmap[f] = DirstateItem.from_v1_data(*e[:4])
954 return parents
954 return parents
955
955
956
956
957 def pack_dirstate(dmap, copymap, pl):
957 def pack_dirstate(dmap, copymap, pl):
958 cs = stringio()
958 cs = stringio()
959 write = cs.write
959 write = cs.write
960 write(b"".join(pl))
960 write(b"".join(pl))
961 for f, e in dmap.items():
961 for f, e in dmap.items():
962 if f in copymap:
962 if f in copymap:
963 f = b"%s\0%s" % (f, copymap[f])
963 f = b"%s\0%s" % (f, copymap[f])
964 e = _pack(
964 e = _pack(
965 b">cllll",
965 b">cllll",
966 e.v1_state(),
966 e._v1_state(),
967 e.v1_mode(),
967 e._v1_mode(),
968 e.v1_size(),
968 e._v1_size(),
969 e.v1_mtime(),
969 e._v1_mtime(),
970 len(f),
970 len(f),
971 )
971 )
972 write(e)
972 write(e)
973 write(f)
973 write(f)
974 return cs.getvalue()
974 return cs.getvalue()
@@ -1,285 +1,246 b''
1 use cpython::exc;
1 use cpython::exc;
2 use cpython::ObjectProtocol;
2 use cpython::ObjectProtocol;
3 use cpython::PyBytes;
3 use cpython::PyBytes;
4 use cpython::PyErr;
4 use cpython::PyErr;
5 use cpython::PyNone;
5 use cpython::PyNone;
6 use cpython::PyObject;
6 use cpython::PyObject;
7 use cpython::PyResult;
7 use cpython::PyResult;
8 use cpython::Python;
8 use cpython::Python;
9 use cpython::PythonObject;
9 use cpython::PythonObject;
10 use hg::dirstate::DirstateEntry;
10 use hg::dirstate::DirstateEntry;
11 use hg::dirstate::EntryState;
12 use hg::dirstate::TruncatedTimestamp;
11 use hg::dirstate::TruncatedTimestamp;
13 use std::cell::Cell;
12 use std::cell::Cell;
14 use std::convert::TryFrom;
15
13
16 py_class!(pub class DirstateItem |py| {
14 py_class!(pub class DirstateItem |py| {
17 data entry: Cell<DirstateEntry>;
15 data entry: Cell<DirstateEntry>;
18
16
19 def __new__(
17 def __new__(
20 _cls,
18 _cls,
21 wc_tracked: bool = false,
19 wc_tracked: bool = false,
22 p1_tracked: bool = false,
20 p1_tracked: bool = false,
23 p2_info: bool = false,
21 p2_info: bool = false,
24 has_meaningful_data: bool = true,
22 has_meaningful_data: bool = true,
25 has_meaningful_mtime: bool = true,
23 has_meaningful_mtime: bool = true,
26 parentfiledata: Option<(u32, u32, Option<(u32, u32, bool)>)> = None,
24 parentfiledata: Option<(u32, u32, Option<(u32, u32, bool)>)> = None,
27 fallback_exec: Option<bool> = None,
25 fallback_exec: Option<bool> = None,
28 fallback_symlink: Option<bool> = None,
26 fallback_symlink: Option<bool> = None,
29
27
30 ) -> PyResult<DirstateItem> {
28 ) -> PyResult<DirstateItem> {
31 let mut mode_size_opt = None;
29 let mut mode_size_opt = None;
32 let mut mtime_opt = None;
30 let mut mtime_opt = None;
33 if let Some((mode, size, mtime)) = parentfiledata {
31 if let Some((mode, size, mtime)) = parentfiledata {
34 if has_meaningful_data {
32 if has_meaningful_data {
35 mode_size_opt = Some((mode, size))
33 mode_size_opt = Some((mode, size))
36 }
34 }
37 if has_meaningful_mtime {
35 if has_meaningful_mtime {
38 if let Some(m) = mtime {
36 if let Some(m) = mtime {
39 mtime_opt = Some(timestamp(py, m)?);
37 mtime_opt = Some(timestamp(py, m)?);
40 }
38 }
41 }
39 }
42 }
40 }
43 let entry = DirstateEntry::from_v2_data(
41 let entry = DirstateEntry::from_v2_data(
44 wc_tracked,
42 wc_tracked,
45 p1_tracked,
43 p1_tracked,
46 p2_info,
44 p2_info,
47 mode_size_opt,
45 mode_size_opt,
48 mtime_opt,
46 mtime_opt,
49 fallback_exec,
47 fallback_exec,
50 fallback_symlink,
48 fallback_symlink,
51 );
49 );
52 DirstateItem::create_instance(py, Cell::new(entry))
50 DirstateItem::create_instance(py, Cell::new(entry))
53 }
51 }
54
52
55 @property
53 @property
56 def state(&self) -> PyResult<PyBytes> {
54 def state(&self) -> PyResult<PyBytes> {
57 let state_byte: u8 = self.entry(py).get().state().into();
55 let state_byte: u8 = self.entry(py).get().state().into();
58 Ok(PyBytes::new(py, &[state_byte]))
56 Ok(PyBytes::new(py, &[state_byte]))
59 }
57 }
60
58
61 @property
59 @property
62 def mode(&self) -> PyResult<i32> {
60 def mode(&self) -> PyResult<i32> {
63 Ok(self.entry(py).get().mode())
61 Ok(self.entry(py).get().mode())
64 }
62 }
65
63
66 @property
64 @property
67 def size(&self) -> PyResult<i32> {
65 def size(&self) -> PyResult<i32> {
68 Ok(self.entry(py).get().size())
66 Ok(self.entry(py).get().size())
69 }
67 }
70
68
71 @property
69 @property
72 def mtime(&self) -> PyResult<i32> {
70 def mtime(&self) -> PyResult<i32> {
73 Ok(self.entry(py).get().mtime())
71 Ok(self.entry(py).get().mtime())
74 }
72 }
75
73
76 @property
74 @property
77 def has_fallback_exec(&self) -> PyResult<bool> {
75 def has_fallback_exec(&self) -> PyResult<bool> {
78 match self.entry(py).get().get_fallback_exec() {
76 match self.entry(py).get().get_fallback_exec() {
79 Some(_) => Ok(true),
77 Some(_) => Ok(true),
80 None => Ok(false),
78 None => Ok(false),
81 }
79 }
82 }
80 }
83
81
84 @property
82 @property
85 def fallback_exec(&self) -> PyResult<Option<bool>> {
83 def fallback_exec(&self) -> PyResult<Option<bool>> {
86 match self.entry(py).get().get_fallback_exec() {
84 match self.entry(py).get().get_fallback_exec() {
87 Some(exec) => Ok(Some(exec)),
85 Some(exec) => Ok(Some(exec)),
88 None => Ok(None),
86 None => Ok(None),
89 }
87 }
90 }
88 }
91
89
92 @fallback_exec.setter
90 @fallback_exec.setter
93 def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> {
91 def set_fallback_exec(&self, value: Option<PyObject>) -> PyResult<()> {
94 match value {
92 match value {
95 None => {self.entry(py).get().set_fallback_exec(None);},
93 None => {self.entry(py).get().set_fallback_exec(None);},
96 Some(value) => {
94 Some(value) => {
97 if value.is_none(py) {
95 if value.is_none(py) {
98 self.entry(py).get().set_fallback_exec(None);
96 self.entry(py).get().set_fallback_exec(None);
99 } else {
97 } else {
100 self.entry(py).get().set_fallback_exec(
98 self.entry(py).get().set_fallback_exec(
101 Some(value.is_true(py)?)
99 Some(value.is_true(py)?)
102 );
100 );
103 }},
101 }},
104 }
102 }
105 Ok(())
103 Ok(())
106 }
104 }
107
105
108 @property
106 @property
109 def has_fallback_symlink(&self) -> PyResult<bool> {
107 def has_fallback_symlink(&self) -> PyResult<bool> {
110 match self.entry(py).get().get_fallback_symlink() {
108 match self.entry(py).get().get_fallback_symlink() {
111 Some(_) => Ok(true),
109 Some(_) => Ok(true),
112 None => Ok(false),
110 None => Ok(false),
113 }
111 }
114 }
112 }
115
113
116 @property
114 @property
117 def fallback_symlink(&self) -> PyResult<Option<bool>> {
115 def fallback_symlink(&self) -> PyResult<Option<bool>> {
118 match self.entry(py).get().get_fallback_symlink() {
116 match self.entry(py).get().get_fallback_symlink() {
119 Some(symlink) => Ok(Some(symlink)),
117 Some(symlink) => Ok(Some(symlink)),
120 None => Ok(None),
118 None => Ok(None),
121 }
119 }
122 }
120 }
123
121
124 @fallback_symlink.setter
122 @fallback_symlink.setter
125 def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> {
123 def set_fallback_symlink(&self, value: Option<PyObject>) -> PyResult<()> {
126 match value {
124 match value {
127 None => {self.entry(py).get().set_fallback_symlink(None);},
125 None => {self.entry(py).get().set_fallback_symlink(None);},
128 Some(value) => {
126 Some(value) => {
129 if value.is_none(py) {
127 if value.is_none(py) {
130 self.entry(py).get().set_fallback_symlink(None);
128 self.entry(py).get().set_fallback_symlink(None);
131 } else {
129 } else {
132 self.entry(py).get().set_fallback_symlink(
130 self.entry(py).get().set_fallback_symlink(
133 Some(value.is_true(py)?)
131 Some(value.is_true(py)?)
134 );
132 );
135 }},
133 }},
136 }
134 }
137 Ok(())
135 Ok(())
138 }
136 }
139
137
140 @property
138 @property
141 def tracked(&self) -> PyResult<bool> {
139 def tracked(&self) -> PyResult<bool> {
142 Ok(self.entry(py).get().tracked())
140 Ok(self.entry(py).get().tracked())
143 }
141 }
144
142
145 @property
143 @property
146 def p1_tracked(&self) -> PyResult<bool> {
144 def p1_tracked(&self) -> PyResult<bool> {
147 Ok(self.entry(py).get().p1_tracked())
145 Ok(self.entry(py).get().p1_tracked())
148 }
146 }
149
147
150 @property
148 @property
151 def added(&self) -> PyResult<bool> {
149 def added(&self) -> PyResult<bool> {
152 Ok(self.entry(py).get().added())
150 Ok(self.entry(py).get().added())
153 }
151 }
154
152
155
153
156 @property
154 @property
157 def p2_info(&self) -> PyResult<bool> {
155 def p2_info(&self) -> PyResult<bool> {
158 Ok(self.entry(py).get().p2_info())
156 Ok(self.entry(py).get().p2_info())
159 }
157 }
160
158
161 @property
159 @property
162 def removed(&self) -> PyResult<bool> {
160 def removed(&self) -> PyResult<bool> {
163 Ok(self.entry(py).get().removed())
161 Ok(self.entry(py).get().removed())
164 }
162 }
165
163
166 @property
164 @property
167 def maybe_clean(&self) -> PyResult<bool> {
165 def maybe_clean(&self) -> PyResult<bool> {
168 Ok(self.entry(py).get().maybe_clean())
166 Ok(self.entry(py).get().maybe_clean())
169 }
167 }
170
168
171 @property
169 @property
172 def any_tracked(&self) -> PyResult<bool> {
170 def any_tracked(&self) -> PyResult<bool> {
173 Ok(self.entry(py).get().any_tracked())
171 Ok(self.entry(py).get().any_tracked())
174 }
172 }
175
173
176 def v1_state(&self) -> PyResult<PyBytes> {
177 let (state, _mode, _size, _mtime) = self.entry(py).get().v1_data();
178 let state_byte: u8 = state.into();
179 Ok(PyBytes::new(py, &[state_byte]))
180 }
181
182 def v1_mode(&self) -> PyResult<i32> {
183 let (_state, mode, _size, _mtime) = self.entry(py).get().v1_data();
184 Ok(mode)
185 }
186
187 def v1_size(&self) -> PyResult<i32> {
188 let (_state, _mode, size, _mtime) = self.entry(py).get().v1_data();
189 Ok(size)
190 }
191
192 def v1_mtime(&self) -> PyResult<i32> {
193 let (_state, _mode, _size, mtime) = self.entry(py).get().v1_data();
194 Ok(mtime)
195 }
196
197 def mtime_likely_equal_to(&self, other: (u32, u32, bool))
174 def mtime_likely_equal_to(&self, other: (u32, u32, bool))
198 -> PyResult<bool> {
175 -> PyResult<bool> {
199 if let Some(mtime) = self.entry(py).get().truncated_mtime() {
176 if let Some(mtime) = self.entry(py).get().truncated_mtime() {
200 Ok(mtime.likely_equal(timestamp(py, other)?))
177 Ok(mtime.likely_equal(timestamp(py, other)?))
201 } else {
178 } else {
202 Ok(false)
179 Ok(false)
203 }
180 }
204 }
181 }
205
182
206 @classmethod
207 def from_v1_data(
208 _cls,
209 state: PyBytes,
210 mode: i32,
211 size: i32,
212 mtime: i32,
213 ) -> PyResult<Self> {
214 let state = <[u8; 1]>::try_from(state.data(py))
215 .ok()
216 .and_then(|state| EntryState::try_from(state[0]).ok())
217 .ok_or_else(|| PyErr::new::<exc::ValueError, _>(py, "invalid state"))?;
218 let entry = DirstateEntry::from_v1_data(state, mode, size, mtime);
219 DirstateItem::create_instance(py, Cell::new(entry))
220 }
221
222 def drop_merge_data(&self) -> PyResult<PyNone> {
183 def drop_merge_data(&self) -> PyResult<PyNone> {
223 self.update(py, |entry| entry.drop_merge_data());
184 self.update(py, |entry| entry.drop_merge_data());
224 Ok(PyNone)
185 Ok(PyNone)
225 }
186 }
226
187
227 def set_clean(
188 def set_clean(
228 &self,
189 &self,
229 mode: u32,
190 mode: u32,
230 size: u32,
191 size: u32,
231 mtime: (u32, u32, bool),
192 mtime: (u32, u32, bool),
232 ) -> PyResult<PyNone> {
193 ) -> PyResult<PyNone> {
233 let mtime = timestamp(py, mtime)?;
194 let mtime = timestamp(py, mtime)?;
234 self.update(py, |entry| entry.set_clean(mode, size, mtime));
195 self.update(py, |entry| entry.set_clean(mode, size, mtime));
235 Ok(PyNone)
196 Ok(PyNone)
236 }
197 }
237
198
238 def set_possibly_dirty(&self) -> PyResult<PyNone> {
199 def set_possibly_dirty(&self) -> PyResult<PyNone> {
239 self.update(py, |entry| entry.set_possibly_dirty());
200 self.update(py, |entry| entry.set_possibly_dirty());
240 Ok(PyNone)
201 Ok(PyNone)
241 }
202 }
242
203
243 def set_tracked(&self) -> PyResult<PyNone> {
204 def set_tracked(&self) -> PyResult<PyNone> {
244 self.update(py, |entry| entry.set_tracked());
205 self.update(py, |entry| entry.set_tracked());
245 Ok(PyNone)
206 Ok(PyNone)
246 }
207 }
247
208
248 def set_untracked(&self) -> PyResult<PyNone> {
209 def set_untracked(&self) -> PyResult<PyNone> {
249 self.update(py, |entry| entry.set_untracked());
210 self.update(py, |entry| entry.set_untracked());
250 Ok(PyNone)
211 Ok(PyNone)
251 }
212 }
252 });
213 });
253
214
254 impl DirstateItem {
215 impl DirstateItem {
255 pub fn new_as_pyobject(
216 pub fn new_as_pyobject(
256 py: Python<'_>,
217 py: Python<'_>,
257 entry: DirstateEntry,
218 entry: DirstateEntry,
258 ) -> PyResult<PyObject> {
219 ) -> PyResult<PyObject> {
259 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
220 Ok(DirstateItem::create_instance(py, Cell::new(entry))?.into_object())
260 }
221 }
261
222
262 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
223 pub fn get_entry(&self, py: Python<'_>) -> DirstateEntry {
263 self.entry(py).get()
224 self.entry(py).get()
264 }
225 }
265
226
266 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
227 // TODO: Use https://doc.rust-lang.org/std/cell/struct.Cell.html#method.update instead when it’s stable
267 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
228 pub fn update(&self, py: Python<'_>, f: impl FnOnce(&mut DirstateEntry)) {
268 let mut entry = self.entry(py).get();
229 let mut entry = self.entry(py).get();
269 f(&mut entry);
230 f(&mut entry);
270 self.entry(py).set(entry)
231 self.entry(py).set(entry)
271 }
232 }
272 }
233 }
273
234
274 pub(crate) fn timestamp(
235 pub(crate) fn timestamp(
275 py: Python<'_>,
236 py: Python<'_>,
276 (s, ns, second_ambiguous): (u32, u32, bool),
237 (s, ns, second_ambiguous): (u32, u32, bool),
277 ) -> PyResult<TruncatedTimestamp> {
238 ) -> PyResult<TruncatedTimestamp> {
278 TruncatedTimestamp::from_already_truncated(s, ns, second_ambiguous)
239 TruncatedTimestamp::from_already_truncated(s, ns, second_ambiguous)
279 .map_err(|_| {
240 .map_err(|_| {
280 PyErr::new::<exc::ValueError, _>(
241 PyErr::new::<exc::ValueError, _>(
281 py,
242 py,
282 "expected mtime truncated to 31 bits",
243 "expected mtime truncated to 31 bits",
283 )
244 )
284 })
245 })
285 }
246 }
General Comments 0
You need to be logged in to leave comments. Login now