##// END OF EJS Templates
dirstate: invalidate on all exceptions...
marmoute -
r51000:2323b74f default
parent child Browse files
Show More
@@ -1,1660 +1,1660 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import stat
12 import stat
13 import uuid
13 import uuid
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import delattr
16 from .pycompat import delattr
17
17
18 from hgdemandimport import tracing
18 from hgdemandimport import tracing
19
19
20 from . import (
20 from . import (
21 dirstatemap,
21 dirstatemap,
22 encoding,
22 encoding,
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 node,
25 node,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 from .dirstateutils import (
33 from .dirstateutils import (
34 timestamp,
34 timestamp,
35 )
35 )
36
36
37 from .interfaces import (
37 from .interfaces import (
38 dirstate as intdirstate,
38 dirstate as intdirstate,
39 util as interfaceutil,
39 util as interfaceutil,
40 )
40 )
41
41
42 parsers = policy.importmod('parsers')
42 parsers = policy.importmod('parsers')
43 rustmod = policy.importrust('dirstate')
43 rustmod = policy.importrust('dirstate')
44
44
45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48 filecache = scmutil.filecache
48 filecache = scmutil.filecache
49 _rangemask = dirstatemap.rangemask
49 _rangemask = dirstatemap.rangemask
50
50
51 DirstateItem = dirstatemap.DirstateItem
51 DirstateItem = dirstatemap.DirstateItem
52
52
53
53
54 class repocache(filecache):
54 class repocache(filecache):
55 """filecache for files in .hg/"""
55 """filecache for files in .hg/"""
56
56
57 def join(self, obj, fname):
57 def join(self, obj, fname):
58 return obj._opener.join(fname)
58 return obj._opener.join(fname)
59
59
60
60
61 class rootcache(filecache):
61 class rootcache(filecache):
62 """filecache for files in the repository root"""
62 """filecache for files in the repository root"""
63
63
64 def join(self, obj, fname):
64 def join(self, obj, fname):
65 return obj._join(fname)
65 return obj._join(fname)
66
66
67
67
68 def requires_changing_parents(func):
68 def requires_changing_parents(func):
69 def wrap(self, *args, **kwargs):
69 def wrap(self, *args, **kwargs):
70 if not self.is_changing_parents:
70 if not self.is_changing_parents:
71 msg = 'calling `%s` outside of a changing_parents context'
71 msg = 'calling `%s` outside of a changing_parents context'
72 msg %= func.__name__
72 msg %= func.__name__
73 raise error.ProgrammingError(msg)
73 raise error.ProgrammingError(msg)
74 if self._invalidated_context:
74 if self._invalidated_context:
75 msg = 'calling `%s` after the dirstate was invalidated'
75 msg = 'calling `%s` after the dirstate was invalidated'
76 raise error.ProgrammingError(msg)
76 raise error.ProgrammingError(msg)
77 return func(self, *args, **kwargs)
77 return func(self, *args, **kwargs)
78
78
79 return wrap
79 return wrap
80
80
81
81
82 def requires_changing_files(func):
82 def requires_changing_files(func):
83 def wrap(self, *args, **kwargs):
83 def wrap(self, *args, **kwargs):
84 if not self.is_changing_files:
84 if not self.is_changing_files:
85 msg = 'calling `%s` outside of a `changing_files`'
85 msg = 'calling `%s` outside of a `changing_files`'
86 msg %= func.__name__
86 msg %= func.__name__
87 raise error.ProgrammingError(msg)
87 raise error.ProgrammingError(msg)
88 return func(self, *args, **kwargs)
88 return func(self, *args, **kwargs)
89
89
90 return wrap
90 return wrap
91
91
92
92
93 def requires_not_changing_parents(func):
93 def requires_not_changing_parents(func):
94 def wrap(self, *args, **kwargs):
94 def wrap(self, *args, **kwargs):
95 if self.is_changing_parents:
95 if self.is_changing_parents:
96 msg = 'calling `%s` inside of a changing_parents context'
96 msg = 'calling `%s` inside of a changing_parents context'
97 msg %= func.__name__
97 msg %= func.__name__
98 raise error.ProgrammingError(msg)
98 raise error.ProgrammingError(msg)
99 return func(self, *args, **kwargs)
99 return func(self, *args, **kwargs)
100
100
101 return wrap
101 return wrap
102
102
103
103
104 CHANGE_TYPE_PARENTS = "parents"
104 CHANGE_TYPE_PARENTS = "parents"
105 CHANGE_TYPE_FILES = "files"
105 CHANGE_TYPE_FILES = "files"
106
106
107
107
108 @interfaceutil.implementer(intdirstate.idirstate)
108 @interfaceutil.implementer(intdirstate.idirstate)
109 class dirstate:
109 class dirstate:
110 def __init__(
110 def __init__(
111 self,
111 self,
112 opener,
112 opener,
113 ui,
113 ui,
114 root,
114 root,
115 validate,
115 validate,
116 sparsematchfn,
116 sparsematchfn,
117 nodeconstants,
117 nodeconstants,
118 use_dirstate_v2,
118 use_dirstate_v2,
119 use_tracked_hint=False,
119 use_tracked_hint=False,
120 ):
120 ):
121 """Create a new dirstate object.
121 """Create a new dirstate object.
122
122
123 opener is an open()-like callable that can be used to open the
123 opener is an open()-like callable that can be used to open the
124 dirstate file; root is the root of the directory tracked by
124 dirstate file; root is the root of the directory tracked by
125 the dirstate.
125 the dirstate.
126 """
126 """
127 self._use_dirstate_v2 = use_dirstate_v2
127 self._use_dirstate_v2 = use_dirstate_v2
128 self._use_tracked_hint = use_tracked_hint
128 self._use_tracked_hint = use_tracked_hint
129 self._nodeconstants = nodeconstants
129 self._nodeconstants = nodeconstants
130 self._opener = opener
130 self._opener = opener
131 self._validate = validate
131 self._validate = validate
132 self._root = root
132 self._root = root
133 # Either build a sparse-matcher or None if sparse is disabled
133 # Either build a sparse-matcher or None if sparse is disabled
134 self._sparsematchfn = sparsematchfn
134 self._sparsematchfn = sparsematchfn
135 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
135 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
136 # UNC path pointing to root share (issue4557)
136 # UNC path pointing to root share (issue4557)
137 self._rootdir = pathutil.normasprefix(root)
137 self._rootdir = pathutil.normasprefix(root)
138 # True is any internal state may be different
138 # True is any internal state may be different
139 self._dirty = False
139 self._dirty = False
140 # True if the set of tracked file may be different
140 # True if the set of tracked file may be different
141 self._dirty_tracked_set = False
141 self._dirty_tracked_set = False
142 self._ui = ui
142 self._ui = ui
143 self._filecache = {}
143 self._filecache = {}
144 # nesting level of `changing_parents` context
144 # nesting level of `changing_parents` context
145 self._changing_level = 0
145 self._changing_level = 0
146 # the change currently underway
146 # the change currently underway
147 self._change_type = None
147 self._change_type = None
148 # True if the current dirstate changing operations have been
148 # True if the current dirstate changing operations have been
149 # invalidated (used to make sure all nested contexts have been exited)
149 # invalidated (used to make sure all nested contexts have been exited)
150 self._invalidated_context = False
150 self._invalidated_context = False
151 self._filename = b'dirstate'
151 self._filename = b'dirstate'
152 self._filename_th = b'dirstate-tracked-hint'
152 self._filename_th = b'dirstate-tracked-hint'
153 self._pendingfilename = b'%s.pending' % self._filename
153 self._pendingfilename = b'%s.pending' % self._filename
154 self._plchangecallbacks = {}
154 self._plchangecallbacks = {}
155 self._origpl = None
155 self._origpl = None
156 self._mapcls = dirstatemap.dirstatemap
156 self._mapcls = dirstatemap.dirstatemap
157 # Access and cache cwd early, so we don't access it for the first time
157 # Access and cache cwd early, so we don't access it for the first time
158 # after a working-copy update caused it to not exist (accessing it then
158 # after a working-copy update caused it to not exist (accessing it then
159 # raises an exception).
159 # raises an exception).
160 self._cwd
160 self._cwd
161
161
162 def prefetch_parents(self):
162 def prefetch_parents(self):
163 """make sure the parents are loaded
163 """make sure the parents are loaded
164
164
165 Used to avoid a race condition.
165 Used to avoid a race condition.
166 """
166 """
167 self._pl
167 self._pl
168
168
169 @contextlib.contextmanager
169 @contextlib.contextmanager
170 def _changing(self, repo, change_type):
170 def _changing(self, repo, change_type):
171 if repo.currentwlock() is None:
171 if repo.currentwlock() is None:
172 msg = b"trying to change the dirstate without holding the wlock"
172 msg = b"trying to change the dirstate without holding the wlock"
173 raise error.ProgrammingError(msg)
173 raise error.ProgrammingError(msg)
174 if self._invalidated_context:
174 if self._invalidated_context:
175 msg = "trying to use an invalidated dirstate before it has reset"
175 msg = "trying to use an invalidated dirstate before it has reset"
176 raise error.ProgrammingError(msg)
176 raise error.ProgrammingError(msg)
177
177
178 has_tr = repo.currenttransaction() is not None
178 has_tr = repo.currenttransaction() is not None
179
179
180 # different type of change are mutually exclusive
180 # different type of change are mutually exclusive
181 if self._change_type is None:
181 if self._change_type is None:
182 assert self._changing_level == 0
182 assert self._changing_level == 0
183 self._change_type = change_type
183 self._change_type = change_type
184 elif self._change_type != change_type:
184 elif self._change_type != change_type:
185 msg = (
185 msg = (
186 'trying to open "%s" dirstate-changing context while a "%s" is'
186 'trying to open "%s" dirstate-changing context while a "%s" is'
187 ' already open'
187 ' already open'
188 )
188 )
189 msg %= (change_type, self._change_type)
189 msg %= (change_type, self._change_type)
190 raise error.ProgrammingError(msg)
190 raise error.ProgrammingError(msg)
191 self._changing_level += 1
191 self._changing_level += 1
192 try:
192 try:
193 yield
193 yield
194 except Exception:
194 except: # re-raises
195 self.invalidate()
195 self.invalidate()
196 raise
196 raise
197 finally:
197 finally:
198 tr = repo.currenttransaction()
198 tr = repo.currenttransaction()
199 if self._changing_level > 0:
199 if self._changing_level > 0:
200 if self._invalidated_context:
200 if self._invalidated_context:
201 # make sure we invalidate anything an upper context might
201 # make sure we invalidate anything an upper context might
202 # have changed.
202 # have changed.
203 self.invalidate()
203 self.invalidate()
204 self._changing_level -= 1
204 self._changing_level -= 1
205 # The invalidation is complete once we exit the final context
205 # The invalidation is complete once we exit the final context
206 # manager
206 # manager
207 if self._changing_level <= 0:
207 if self._changing_level <= 0:
208 self._change_type = None
208 self._change_type = None
209 assert self._changing_level == 0
209 assert self._changing_level == 0
210 if self._invalidated_context:
210 if self._invalidated_context:
211 self._invalidated_context = False
211 self._invalidated_context = False
212 else:
212 else:
213 # When an exception occured, `_invalidated_context`
213 # When an exception occured, `_invalidated_context`
214 # would have been set to True by the `invalidate`
214 # would have been set to True by the `invalidate`
215 # call earlier.
215 # call earlier.
216 #
216 #
217 # We don't have more straightforward code, because the
217 # We don't have more straightforward code, because the
218 # Exception catching (and the associated `invalidate`
218 # Exception catching (and the associated `invalidate`
219 # calling) might have been called by a nested context
219 # calling) might have been called by a nested context
220 # instead of the top level one.
220 # instead of the top level one.
221 self.write(tr)
221 self.write(tr)
222 if has_tr != (tr is not None):
222 if has_tr != (tr is not None):
223 if has_tr:
223 if has_tr:
224 m = "transaction vanished while changing dirstate"
224 m = "transaction vanished while changing dirstate"
225 else:
225 else:
226 m = "transaction appeared while changing dirstate"
226 m = "transaction appeared while changing dirstate"
227 raise error.ProgrammingError(m)
227 raise error.ProgrammingError(m)
228
228
229 @contextlib.contextmanager
229 @contextlib.contextmanager
230 def changing_parents(self, repo):
230 def changing_parents(self, repo):
231 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
231 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
232 yield c
232 yield c
233
233
234 @contextlib.contextmanager
234 @contextlib.contextmanager
235 def changing_files(self, repo):
235 def changing_files(self, repo):
236 with self._changing(repo, CHANGE_TYPE_FILES) as c:
236 with self._changing(repo, CHANGE_TYPE_FILES) as c:
237 yield c
237 yield c
238
238
239 # here to help migration to the new code
239 # here to help migration to the new code
240 def parentchange(self):
240 def parentchange(self):
241 msg = (
241 msg = (
242 "Mercurial 6.4 and later requires call to "
242 "Mercurial 6.4 and later requires call to "
243 "`dirstate.changing_parents(repo)`"
243 "`dirstate.changing_parents(repo)`"
244 )
244 )
245 raise error.ProgrammingError(msg)
245 raise error.ProgrammingError(msg)
246
246
247 @property
247 @property
248 def is_changing_any(self):
248 def is_changing_any(self):
249 """Returns true if the dirstate is in the middle of a set of changes.
249 """Returns true if the dirstate is in the middle of a set of changes.
250
250
251 This returns True for any kind of change.
251 This returns True for any kind of change.
252 """
252 """
253 return self._changing_level > 0
253 return self._changing_level > 0
254
254
255 def pendingparentchange(self):
255 def pendingparentchange(self):
256 return self.is_changing_parent()
256 return self.is_changing_parent()
257
257
258 def is_changing_parent(self):
258 def is_changing_parent(self):
259 """Returns true if the dirstate is in the middle of a set of changes
259 """Returns true if the dirstate is in the middle of a set of changes
260 that modify the dirstate parent.
260 that modify the dirstate parent.
261 """
261 """
262 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
262 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
263 return self.is_changing_parents
263 return self.is_changing_parents
264
264
265 @property
265 @property
266 def is_changing_parents(self):
266 def is_changing_parents(self):
267 """Returns true if the dirstate is in the middle of a set of changes
267 """Returns true if the dirstate is in the middle of a set of changes
268 that modify the dirstate parent.
268 that modify the dirstate parent.
269 """
269 """
270 if self._changing_level <= 0:
270 if self._changing_level <= 0:
271 return False
271 return False
272 return self._change_type == CHANGE_TYPE_PARENTS
272 return self._change_type == CHANGE_TYPE_PARENTS
273
273
274 @property
274 @property
275 def is_changing_files(self):
275 def is_changing_files(self):
276 """Returns true if the dirstate is in the middle of a set of changes
276 """Returns true if the dirstate is in the middle of a set of changes
277 that modify the files tracked or their sources.
277 that modify the files tracked or their sources.
278 """
278 """
279 if self._changing_level <= 0:
279 if self._changing_level <= 0:
280 return False
280 return False
281 return self._change_type == CHANGE_TYPE_FILES
281 return self._change_type == CHANGE_TYPE_FILES
282
282
283 @propertycache
283 @propertycache
284 def _map(self):
284 def _map(self):
285 """Return the dirstate contents (see documentation for dirstatemap)."""
285 """Return the dirstate contents (see documentation for dirstatemap)."""
286 self._map = self._mapcls(
286 self._map = self._mapcls(
287 self._ui,
287 self._ui,
288 self._opener,
288 self._opener,
289 self._root,
289 self._root,
290 self._nodeconstants,
290 self._nodeconstants,
291 self._use_dirstate_v2,
291 self._use_dirstate_v2,
292 )
292 )
293 return self._map
293 return self._map
294
294
295 @property
295 @property
296 def _sparsematcher(self):
296 def _sparsematcher(self):
297 """The matcher for the sparse checkout.
297 """The matcher for the sparse checkout.
298
298
299 The working directory may not include every file from a manifest. The
299 The working directory may not include every file from a manifest. The
300 matcher obtained by this property will match a path if it is to be
300 matcher obtained by this property will match a path if it is to be
301 included in the working directory.
301 included in the working directory.
302
302
303 When sparse if disabled, return None.
303 When sparse if disabled, return None.
304 """
304 """
305 if self._sparsematchfn is None:
305 if self._sparsematchfn is None:
306 return None
306 return None
307 # TODO there is potential to cache this property. For now, the matcher
307 # TODO there is potential to cache this property. For now, the matcher
308 # is resolved on every access. (But the called function does use a
308 # is resolved on every access. (But the called function does use a
309 # cache to keep the lookup fast.)
309 # cache to keep the lookup fast.)
310 return self._sparsematchfn()
310 return self._sparsematchfn()
311
311
312 @repocache(b'branch')
312 @repocache(b'branch')
313 def _branch(self):
313 def _branch(self):
314 try:
314 try:
315 return self._opener.read(b"branch").strip() or b"default"
315 return self._opener.read(b"branch").strip() or b"default"
316 except FileNotFoundError:
316 except FileNotFoundError:
317 return b"default"
317 return b"default"
318
318
319 @property
319 @property
320 def _pl(self):
320 def _pl(self):
321 return self._map.parents()
321 return self._map.parents()
322
322
323 def hasdir(self, d):
323 def hasdir(self, d):
324 return self._map.hastrackeddir(d)
324 return self._map.hastrackeddir(d)
325
325
326 @rootcache(b'.hgignore')
326 @rootcache(b'.hgignore')
327 def _ignore(self):
327 def _ignore(self):
328 files = self._ignorefiles()
328 files = self._ignorefiles()
329 if not files:
329 if not files:
330 return matchmod.never()
330 return matchmod.never()
331
331
332 pats = [b'include:%s' % f for f in files]
332 pats = [b'include:%s' % f for f in files]
333 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
333 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
334
334
335 @propertycache
335 @propertycache
336 def _slash(self):
336 def _slash(self):
337 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
337 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
338
338
339 @propertycache
339 @propertycache
340 def _checklink(self):
340 def _checklink(self):
341 return util.checklink(self._root)
341 return util.checklink(self._root)
342
342
343 @propertycache
343 @propertycache
344 def _checkexec(self):
344 def _checkexec(self):
345 return bool(util.checkexec(self._root))
345 return bool(util.checkexec(self._root))
346
346
347 @propertycache
347 @propertycache
348 def _checkcase(self):
348 def _checkcase(self):
349 return not util.fscasesensitive(self._join(b'.hg'))
349 return not util.fscasesensitive(self._join(b'.hg'))
350
350
351 def _join(self, f):
351 def _join(self, f):
352 # much faster than os.path.join()
352 # much faster than os.path.join()
353 # it's safe because f is always a relative path
353 # it's safe because f is always a relative path
354 return self._rootdir + f
354 return self._rootdir + f
355
355
356 def flagfunc(self, buildfallback):
356 def flagfunc(self, buildfallback):
357 """build a callable that returns flags associated with a filename
357 """build a callable that returns flags associated with a filename
358
358
359 The information is extracted from three possible layers:
359 The information is extracted from three possible layers:
360 1. the file system if it supports the information
360 1. the file system if it supports the information
361 2. the "fallback" information stored in the dirstate if any
361 2. the "fallback" information stored in the dirstate if any
362 3. a more expensive mechanism inferring the flags from the parents.
362 3. a more expensive mechanism inferring the flags from the parents.
363 """
363 """
364
364
365 # small hack to cache the result of buildfallback()
365 # small hack to cache the result of buildfallback()
366 fallback_func = []
366 fallback_func = []
367
367
368 def get_flags(x):
368 def get_flags(x):
369 entry = None
369 entry = None
370 fallback_value = None
370 fallback_value = None
371 try:
371 try:
372 st = os.lstat(self._join(x))
372 st = os.lstat(self._join(x))
373 except OSError:
373 except OSError:
374 return b''
374 return b''
375
375
376 if self._checklink:
376 if self._checklink:
377 if util.statislink(st):
377 if util.statislink(st):
378 return b'l'
378 return b'l'
379 else:
379 else:
380 entry = self.get_entry(x)
380 entry = self.get_entry(x)
381 if entry.has_fallback_symlink:
381 if entry.has_fallback_symlink:
382 if entry.fallback_symlink:
382 if entry.fallback_symlink:
383 return b'l'
383 return b'l'
384 else:
384 else:
385 if not fallback_func:
385 if not fallback_func:
386 fallback_func.append(buildfallback())
386 fallback_func.append(buildfallback())
387 fallback_value = fallback_func[0](x)
387 fallback_value = fallback_func[0](x)
388 if b'l' in fallback_value:
388 if b'l' in fallback_value:
389 return b'l'
389 return b'l'
390
390
391 if self._checkexec:
391 if self._checkexec:
392 if util.statisexec(st):
392 if util.statisexec(st):
393 return b'x'
393 return b'x'
394 else:
394 else:
395 if entry is None:
395 if entry is None:
396 entry = self.get_entry(x)
396 entry = self.get_entry(x)
397 if entry.has_fallback_exec:
397 if entry.has_fallback_exec:
398 if entry.fallback_exec:
398 if entry.fallback_exec:
399 return b'x'
399 return b'x'
400 else:
400 else:
401 if fallback_value is None:
401 if fallback_value is None:
402 if not fallback_func:
402 if not fallback_func:
403 fallback_func.append(buildfallback())
403 fallback_func.append(buildfallback())
404 fallback_value = fallback_func[0](x)
404 fallback_value = fallback_func[0](x)
405 if b'x' in fallback_value:
405 if b'x' in fallback_value:
406 return b'x'
406 return b'x'
407 return b''
407 return b''
408
408
409 return get_flags
409 return get_flags
410
410
411 @propertycache
411 @propertycache
412 def _cwd(self):
412 def _cwd(self):
413 # internal config: ui.forcecwd
413 # internal config: ui.forcecwd
414 forcecwd = self._ui.config(b'ui', b'forcecwd')
414 forcecwd = self._ui.config(b'ui', b'forcecwd')
415 if forcecwd:
415 if forcecwd:
416 return forcecwd
416 return forcecwd
417 return encoding.getcwd()
417 return encoding.getcwd()
418
418
419 def getcwd(self):
419 def getcwd(self):
420 """Return the path from which a canonical path is calculated.
420 """Return the path from which a canonical path is calculated.
421
421
422 This path should be used to resolve file patterns or to convert
422 This path should be used to resolve file patterns or to convert
423 canonical paths back to file paths for display. It shouldn't be
423 canonical paths back to file paths for display. It shouldn't be
424 used to get real file paths. Use vfs functions instead.
424 used to get real file paths. Use vfs functions instead.
425 """
425 """
426 cwd = self._cwd
426 cwd = self._cwd
427 if cwd == self._root:
427 if cwd == self._root:
428 return b''
428 return b''
429 # self._root ends with a path separator if self._root is '/' or 'C:\'
429 # self._root ends with a path separator if self._root is '/' or 'C:\'
430 rootsep = self._root
430 rootsep = self._root
431 if not util.endswithsep(rootsep):
431 if not util.endswithsep(rootsep):
432 rootsep += pycompat.ossep
432 rootsep += pycompat.ossep
433 if cwd.startswith(rootsep):
433 if cwd.startswith(rootsep):
434 return cwd[len(rootsep) :]
434 return cwd[len(rootsep) :]
435 else:
435 else:
436 # we're outside the repo. return an absolute path.
436 # we're outside the repo. return an absolute path.
437 return cwd
437 return cwd
438
438
439 def pathto(self, f, cwd=None):
439 def pathto(self, f, cwd=None):
440 if cwd is None:
440 if cwd is None:
441 cwd = self.getcwd()
441 cwd = self.getcwd()
442 path = util.pathto(self._root, cwd, f)
442 path = util.pathto(self._root, cwd, f)
443 if self._slash:
443 if self._slash:
444 return util.pconvert(path)
444 return util.pconvert(path)
445 return path
445 return path
446
446
447 def get_entry(self, path):
447 def get_entry(self, path):
448 """return a DirstateItem for the associated path"""
448 """return a DirstateItem for the associated path"""
449 entry = self._map.get(path)
449 entry = self._map.get(path)
450 if entry is None:
450 if entry is None:
451 return DirstateItem()
451 return DirstateItem()
452 return entry
452 return entry
453
453
454 def __contains__(self, key):
454 def __contains__(self, key):
455 return key in self._map
455 return key in self._map
456
456
457 def __iter__(self):
457 def __iter__(self):
458 return iter(sorted(self._map))
458 return iter(sorted(self._map))
459
459
460 def items(self):
460 def items(self):
461 return self._map.items()
461 return self._map.items()
462
462
463 iteritems = items
463 iteritems = items
464
464
465 def parents(self):
465 def parents(self):
466 return [self._validate(p) for p in self._pl]
466 return [self._validate(p) for p in self._pl]
467
467
468 def p1(self):
468 def p1(self):
469 return self._validate(self._pl[0])
469 return self._validate(self._pl[0])
470
470
471 def p2(self):
471 def p2(self):
472 return self._validate(self._pl[1])
472 return self._validate(self._pl[1])
473
473
474 @property
474 @property
475 def in_merge(self):
475 def in_merge(self):
476 """True if a merge is in progress"""
476 """True if a merge is in progress"""
477 return self._pl[1] != self._nodeconstants.nullid
477 return self._pl[1] != self._nodeconstants.nullid
478
478
479 def branch(self):
479 def branch(self):
480 return encoding.tolocal(self._branch)
480 return encoding.tolocal(self._branch)
481
481
482 # XXX since this make the dirstate dirty, we should enforce that it is done
482 # XXX since this make the dirstate dirty, we should enforce that it is done
483 # withing an appropriate change-context that scope the change and ensure it
483 # withing an appropriate change-context that scope the change and ensure it
484 # eventually get written on disk (or rolled back)
484 # eventually get written on disk (or rolled back)
485 def setparents(self, p1, p2=None):
485 def setparents(self, p1, p2=None):
486 """Set dirstate parents to p1 and p2.
486 """Set dirstate parents to p1 and p2.
487
487
488 When moving from two parents to one, "merged" entries a
488 When moving from two parents to one, "merged" entries a
489 adjusted to normal and previous copy records discarded and
489 adjusted to normal and previous copy records discarded and
490 returned by the call.
490 returned by the call.
491
491
492 See localrepo.setparents()
492 See localrepo.setparents()
493 """
493 """
494 if p2 is None:
494 if p2 is None:
495 p2 = self._nodeconstants.nullid
495 p2 = self._nodeconstants.nullid
496 if self._changing_level == 0:
496 if self._changing_level == 0:
497 raise ValueError(
497 raise ValueError(
498 b"cannot set dirstate parent outside of "
498 b"cannot set dirstate parent outside of "
499 b"dirstate.changing_parents context manager"
499 b"dirstate.changing_parents context manager"
500 )
500 )
501
501
502 self._dirty = True
502 self._dirty = True
503 oldp2 = self._pl[1]
503 oldp2 = self._pl[1]
504 if self._origpl is None:
504 if self._origpl is None:
505 self._origpl = self._pl
505 self._origpl = self._pl
506 nullid = self._nodeconstants.nullid
506 nullid = self._nodeconstants.nullid
507 # True if we need to fold p2 related state back to a linear case
507 # True if we need to fold p2 related state back to a linear case
508 fold_p2 = oldp2 != nullid and p2 == nullid
508 fold_p2 = oldp2 != nullid and p2 == nullid
509 return self._map.setparents(p1, p2, fold_p2=fold_p2)
509 return self._map.setparents(p1, p2, fold_p2=fold_p2)
510
510
511 def setbranch(self, branch):
511 def setbranch(self, branch):
512 self.__class__._branch.set(self, encoding.fromlocal(branch))
512 self.__class__._branch.set(self, encoding.fromlocal(branch))
513 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
513 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
514 try:
514 try:
515 f.write(self._branch + b'\n')
515 f.write(self._branch + b'\n')
516 f.close()
516 f.close()
517
517
518 # make sure filecache has the correct stat info for _branch after
518 # make sure filecache has the correct stat info for _branch after
519 # replacing the underlying file
519 # replacing the underlying file
520 ce = self._filecache[b'_branch']
520 ce = self._filecache[b'_branch']
521 if ce:
521 if ce:
522 ce.refresh()
522 ce.refresh()
523 except: # re-raises
523 except: # re-raises
524 f.discard()
524 f.discard()
525 raise
525 raise
526
526
527 def invalidate(self):
527 def invalidate(self):
528 """Causes the next access to reread the dirstate.
528 """Causes the next access to reread the dirstate.
529
529
530 This is different from localrepo.invalidatedirstate() because it always
530 This is different from localrepo.invalidatedirstate() because it always
531 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
531 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
532 check whether the dirstate has changed before rereading it."""
532 check whether the dirstate has changed before rereading it."""
533
533
534 for a in ("_map", "_branch", "_ignore"):
534 for a in ("_map", "_branch", "_ignore"):
535 if a in self.__dict__:
535 if a in self.__dict__:
536 delattr(self, a)
536 delattr(self, a)
537 self._dirty = False
537 self._dirty = False
538 self._dirty_tracked_set = False
538 self._dirty_tracked_set = False
539 self._invalidated_context = self._changing_level > 0
539 self._invalidated_context = self._changing_level > 0
540 self._origpl = None
540 self._origpl = None
541
541
542 # XXX since this make the dirstate dirty, we should enforce that it is done
542 # XXX since this make the dirstate dirty, we should enforce that it is done
543 # withing an appropriate change-context that scope the change and ensure it
543 # withing an appropriate change-context that scope the change and ensure it
544 # eventually get written on disk (or rolled back)
544 # eventually get written on disk (or rolled back)
545 def copy(self, source, dest):
545 def copy(self, source, dest):
546 """Mark dest as a copy of source. Unmark dest if source is None."""
546 """Mark dest as a copy of source. Unmark dest if source is None."""
547 if source == dest:
547 if source == dest:
548 return
548 return
549 self._dirty = True
549 self._dirty = True
550 if source is not None:
550 if source is not None:
551 self._check_sparse(source)
551 self._check_sparse(source)
552 self._map.copymap[dest] = source
552 self._map.copymap[dest] = source
553 else:
553 else:
554 self._map.copymap.pop(dest, None)
554 self._map.copymap.pop(dest, None)
555
555
556 def copied(self, file):
556 def copied(self, file):
557 return self._map.copymap.get(file, None)
557 return self._map.copymap.get(file, None)
558
558
559 def copies(self):
559 def copies(self):
560 return self._map.copymap
560 return self._map.copymap
561
561
562 @requires_changing_files
562 @requires_changing_files
563 def set_tracked(self, filename, reset_copy=False):
563 def set_tracked(self, filename, reset_copy=False):
564 """a "public" method for generic code to mark a file as tracked
564 """a "public" method for generic code to mark a file as tracked
565
565
566 This function is to be called outside of "update/merge" case. For
566 This function is to be called outside of "update/merge" case. For
567 example by a command like `hg add X`.
567 example by a command like `hg add X`.
568
568
569 if reset_copy is set, any existing copy information will be dropped.
569 if reset_copy is set, any existing copy information will be dropped.
570
570
571 return True the file was previously untracked, False otherwise.
571 return True the file was previously untracked, False otherwise.
572 """
572 """
573 self._dirty = True
573 self._dirty = True
574 entry = self._map.get(filename)
574 entry = self._map.get(filename)
575 if entry is None or not entry.tracked:
575 if entry is None or not entry.tracked:
576 self._check_new_tracked_filename(filename)
576 self._check_new_tracked_filename(filename)
577 pre_tracked = self._map.set_tracked(filename)
577 pre_tracked = self._map.set_tracked(filename)
578 if reset_copy:
578 if reset_copy:
579 self._map.copymap.pop(filename, None)
579 self._map.copymap.pop(filename, None)
580 if pre_tracked:
580 if pre_tracked:
581 self._dirty_tracked_set = True
581 self._dirty_tracked_set = True
582 return pre_tracked
582 return pre_tracked
583
583
584 @requires_changing_files
584 @requires_changing_files
585 def set_untracked(self, filename):
585 def set_untracked(self, filename):
586 """a "public" method for generic code to mark a file as untracked
586 """a "public" method for generic code to mark a file as untracked
587
587
588 This function is to be called outside of "update/merge" case. For
588 This function is to be called outside of "update/merge" case. For
589 example by a command like `hg remove X`.
589 example by a command like `hg remove X`.
590
590
591 return True the file was previously tracked, False otherwise.
591 return True the file was previously tracked, False otherwise.
592 """
592 """
593 ret = self._map.set_untracked(filename)
593 ret = self._map.set_untracked(filename)
594 if ret:
594 if ret:
595 self._dirty = True
595 self._dirty = True
596 self._dirty_tracked_set = True
596 self._dirty_tracked_set = True
597 return ret
597 return ret
598
598
599 @requires_not_changing_parents
599 @requires_not_changing_parents
600 def set_clean(self, filename, parentfiledata):
600 def set_clean(self, filename, parentfiledata):
601 """record that the current state of the file on disk is known to be clean"""
601 """record that the current state of the file on disk is known to be clean"""
602 self._dirty = True
602 self._dirty = True
603 if not self._map[filename].tracked:
603 if not self._map[filename].tracked:
604 self._check_new_tracked_filename(filename)
604 self._check_new_tracked_filename(filename)
605 (mode, size, mtime) = parentfiledata
605 (mode, size, mtime) = parentfiledata
606 self._map.set_clean(filename, mode, size, mtime)
606 self._map.set_clean(filename, mode, size, mtime)
607
607
608 @requires_not_changing_parents
608 @requires_not_changing_parents
609 def set_possibly_dirty(self, filename):
609 def set_possibly_dirty(self, filename):
610 """record that the current state of the file on disk is unknown"""
610 """record that the current state of the file on disk is unknown"""
611 self._dirty = True
611 self._dirty = True
612 self._map.set_possibly_dirty(filename)
612 self._map.set_possibly_dirty(filename)
613
613
614 @requires_changing_parents
614 @requires_changing_parents
615 def update_file_p1(
615 def update_file_p1(
616 self,
616 self,
617 filename,
617 filename,
618 p1_tracked,
618 p1_tracked,
619 ):
619 ):
620 """Set a file as tracked in the parent (or not)
620 """Set a file as tracked in the parent (or not)
621
621
622 This is to be called when adjust the dirstate to a new parent after an history
622 This is to be called when adjust the dirstate to a new parent after an history
623 rewriting operation.
623 rewriting operation.
624
624
625 It should not be called during a merge (p2 != nullid) and only within
625 It should not be called during a merge (p2 != nullid) and only within
626 a `with dirstate.changing_parents(repo):` context.
626 a `with dirstate.changing_parents(repo):` context.
627 """
627 """
628 if self.in_merge:
628 if self.in_merge:
629 msg = b'update_file_reference should not be called when merging'
629 msg = b'update_file_reference should not be called when merging'
630 raise error.ProgrammingError(msg)
630 raise error.ProgrammingError(msg)
631 entry = self._map.get(filename)
631 entry = self._map.get(filename)
632 if entry is None:
632 if entry is None:
633 wc_tracked = False
633 wc_tracked = False
634 else:
634 else:
635 wc_tracked = entry.tracked
635 wc_tracked = entry.tracked
636 if not (p1_tracked or wc_tracked):
636 if not (p1_tracked or wc_tracked):
637 # the file is no longer relevant to anyone
637 # the file is no longer relevant to anyone
638 if self._map.get(filename) is not None:
638 if self._map.get(filename) is not None:
639 self._map.reset_state(filename)
639 self._map.reset_state(filename)
640 self._dirty = True
640 self._dirty = True
641 elif (not p1_tracked) and wc_tracked:
641 elif (not p1_tracked) and wc_tracked:
642 if entry is not None and entry.added:
642 if entry is not None and entry.added:
643 return # avoid dropping copy information (maybe?)
643 return # avoid dropping copy information (maybe?)
644
644
645 self._map.reset_state(
645 self._map.reset_state(
646 filename,
646 filename,
647 wc_tracked,
647 wc_tracked,
648 p1_tracked,
648 p1_tracked,
649 # the underlying reference might have changed, we will have to
649 # the underlying reference might have changed, we will have to
650 # check it.
650 # check it.
651 has_meaningful_mtime=False,
651 has_meaningful_mtime=False,
652 )
652 )
653
653
654 @requires_changing_parents
654 @requires_changing_parents
655 def update_file(
655 def update_file(
656 self,
656 self,
657 filename,
657 filename,
658 wc_tracked,
658 wc_tracked,
659 p1_tracked,
659 p1_tracked,
660 p2_info=False,
660 p2_info=False,
661 possibly_dirty=False,
661 possibly_dirty=False,
662 parentfiledata=None,
662 parentfiledata=None,
663 ):
663 ):
664 """update the information about a file in the dirstate
664 """update the information about a file in the dirstate
665
665
666 This is to be called when the direstates parent changes to keep track
666 This is to be called when the direstates parent changes to keep track
667 of what is the file situation in regards to the working copy and its parent.
667 of what is the file situation in regards to the working copy and its parent.
668
668
669 This function must be called within a `dirstate.changing_parents` context.
669 This function must be called within a `dirstate.changing_parents` context.
670
670
671 note: the API is at an early stage and we might need to adjust it
671 note: the API is at an early stage and we might need to adjust it
672 depending of what information ends up being relevant and useful to
672 depending of what information ends up being relevant and useful to
673 other processing.
673 other processing.
674 """
674 """
675 self._update_file(
675 self._update_file(
676 filename=filename,
676 filename=filename,
677 wc_tracked=wc_tracked,
677 wc_tracked=wc_tracked,
678 p1_tracked=p1_tracked,
678 p1_tracked=p1_tracked,
679 p2_info=p2_info,
679 p2_info=p2_info,
680 possibly_dirty=possibly_dirty,
680 possibly_dirty=possibly_dirty,
681 parentfiledata=parentfiledata,
681 parentfiledata=parentfiledata,
682 )
682 )
683
683
684 # XXX since this make the dirstate dirty, we should enforce that it is done
684 # XXX since this make the dirstate dirty, we should enforce that it is done
685 # withing an appropriate change-context that scope the change and ensure it
685 # withing an appropriate change-context that scope the change and ensure it
686 # eventually get written on disk (or rolled back)
686 # eventually get written on disk (or rolled back)
687 def hacky_extension_update_file(self, *args, **kwargs):
687 def hacky_extension_update_file(self, *args, **kwargs):
688 """NEVER USE THIS, YOU DO NOT NEED IT
688 """NEVER USE THIS, YOU DO NOT NEED IT
689
689
690 This function is a variant of "update_file" to be called by a small set
690 This function is a variant of "update_file" to be called by a small set
691 of extensions, it also adjust the internal state of file, but can be
691 of extensions, it also adjust the internal state of file, but can be
692 called outside an `changing_parents` context.
692 called outside an `changing_parents` context.
693
693
694 A very small number of extension meddle with the working copy content
694 A very small number of extension meddle with the working copy content
695 in a way that requires to adjust the dirstate accordingly. At the time
695 in a way that requires to adjust the dirstate accordingly. At the time
696 this command is written they are :
696 this command is written they are :
697 - keyword,
697 - keyword,
698 - largefile,
698 - largefile,
699 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
699 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
700
700
701 This function could probably be replaced by more semantic one (like
701 This function could probably be replaced by more semantic one (like
702 "adjust expected size" or "always revalidate file content", etc)
702 "adjust expected size" or "always revalidate file content", etc)
703 however at the time where this is writen, this is too much of a detour
703 however at the time where this is writen, this is too much of a detour
704 to be considered.
704 to be considered.
705 """
705 """
706 self._update_file(
706 self._update_file(
707 *args,
707 *args,
708 **kwargs,
708 **kwargs,
709 )
709 )
710
710
711 def _update_file(
711 def _update_file(
712 self,
712 self,
713 filename,
713 filename,
714 wc_tracked,
714 wc_tracked,
715 p1_tracked,
715 p1_tracked,
716 p2_info=False,
716 p2_info=False,
717 possibly_dirty=False,
717 possibly_dirty=False,
718 parentfiledata=None,
718 parentfiledata=None,
719 ):
719 ):
720
720
721 # note: I do not think we need to double check name clash here since we
721 # note: I do not think we need to double check name clash here since we
722 # are in a update/merge case that should already have taken care of
722 # are in a update/merge case that should already have taken care of
723 # this. The test agrees
723 # this. The test agrees
724
724
725 self._dirty = True
725 self._dirty = True
726 old_entry = self._map.get(filename)
726 old_entry = self._map.get(filename)
727 if old_entry is None:
727 if old_entry is None:
728 prev_tracked = False
728 prev_tracked = False
729 else:
729 else:
730 prev_tracked = old_entry.tracked
730 prev_tracked = old_entry.tracked
731 if prev_tracked != wc_tracked:
731 if prev_tracked != wc_tracked:
732 self._dirty_tracked_set = True
732 self._dirty_tracked_set = True
733
733
734 self._map.reset_state(
734 self._map.reset_state(
735 filename,
735 filename,
736 wc_tracked,
736 wc_tracked,
737 p1_tracked,
737 p1_tracked,
738 p2_info=p2_info,
738 p2_info=p2_info,
739 has_meaningful_mtime=not possibly_dirty,
739 has_meaningful_mtime=not possibly_dirty,
740 parentfiledata=parentfiledata,
740 parentfiledata=parentfiledata,
741 )
741 )
742
742
743 def _check_new_tracked_filename(self, filename):
743 def _check_new_tracked_filename(self, filename):
744 scmutil.checkfilename(filename)
744 scmutil.checkfilename(filename)
745 if self._map.hastrackeddir(filename):
745 if self._map.hastrackeddir(filename):
746 msg = _(b'directory %r already in dirstate')
746 msg = _(b'directory %r already in dirstate')
747 msg %= pycompat.bytestr(filename)
747 msg %= pycompat.bytestr(filename)
748 raise error.Abort(msg)
748 raise error.Abort(msg)
749 # shadows
749 # shadows
750 for d in pathutil.finddirs(filename):
750 for d in pathutil.finddirs(filename):
751 if self._map.hastrackeddir(d):
751 if self._map.hastrackeddir(d):
752 break
752 break
753 entry = self._map.get(d)
753 entry = self._map.get(d)
754 if entry is not None and not entry.removed:
754 if entry is not None and not entry.removed:
755 msg = _(b'file %r in dirstate clashes with %r')
755 msg = _(b'file %r in dirstate clashes with %r')
756 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
756 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
757 raise error.Abort(msg)
757 raise error.Abort(msg)
758 self._check_sparse(filename)
758 self._check_sparse(filename)
759
759
760 def _check_sparse(self, filename):
760 def _check_sparse(self, filename):
761 """Check that a filename is inside the sparse profile"""
761 """Check that a filename is inside the sparse profile"""
762 sparsematch = self._sparsematcher
762 sparsematch = self._sparsematcher
763 if sparsematch is not None and not sparsematch.always():
763 if sparsematch is not None and not sparsematch.always():
764 if not sparsematch(filename):
764 if not sparsematch(filename):
765 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
765 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
766 hint = _(
766 hint = _(
767 b'include file with `hg debugsparse --include <pattern>` or use '
767 b'include file with `hg debugsparse --include <pattern>` or use '
768 b'`hg add -s <file>` to include file directory while adding'
768 b'`hg add -s <file>` to include file directory while adding'
769 )
769 )
770 raise error.Abort(msg % filename, hint=hint)
770 raise error.Abort(msg % filename, hint=hint)
771
771
772 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
772 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
773 if exists is None:
773 if exists is None:
774 exists = os.path.lexists(os.path.join(self._root, path))
774 exists = os.path.lexists(os.path.join(self._root, path))
775 if not exists:
775 if not exists:
776 # Maybe a path component exists
776 # Maybe a path component exists
777 if not ignoremissing and b'/' in path:
777 if not ignoremissing and b'/' in path:
778 d, f = path.rsplit(b'/', 1)
778 d, f = path.rsplit(b'/', 1)
779 d = self._normalize(d, False, ignoremissing, None)
779 d = self._normalize(d, False, ignoremissing, None)
780 folded = d + b"/" + f
780 folded = d + b"/" + f
781 else:
781 else:
782 # No path components, preserve original case
782 # No path components, preserve original case
783 folded = path
783 folded = path
784 else:
784 else:
785 # recursively normalize leading directory components
785 # recursively normalize leading directory components
786 # against dirstate
786 # against dirstate
787 if b'/' in normed:
787 if b'/' in normed:
788 d, f = normed.rsplit(b'/', 1)
788 d, f = normed.rsplit(b'/', 1)
789 d = self._normalize(d, False, ignoremissing, True)
789 d = self._normalize(d, False, ignoremissing, True)
790 r = self._root + b"/" + d
790 r = self._root + b"/" + d
791 folded = d + b"/" + util.fspath(f, r)
791 folded = d + b"/" + util.fspath(f, r)
792 else:
792 else:
793 folded = util.fspath(normed, self._root)
793 folded = util.fspath(normed, self._root)
794 storemap[normed] = folded
794 storemap[normed] = folded
795
795
796 return folded
796 return folded
797
797
798 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
798 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
799 normed = util.normcase(path)
799 normed = util.normcase(path)
800 folded = self._map.filefoldmap.get(normed, None)
800 folded = self._map.filefoldmap.get(normed, None)
801 if folded is None:
801 if folded is None:
802 if isknown:
802 if isknown:
803 folded = path
803 folded = path
804 else:
804 else:
805 folded = self._discoverpath(
805 folded = self._discoverpath(
806 path, normed, ignoremissing, exists, self._map.filefoldmap
806 path, normed, ignoremissing, exists, self._map.filefoldmap
807 )
807 )
808 return folded
808 return folded
809
809
810 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
810 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
811 normed = util.normcase(path)
811 normed = util.normcase(path)
812 folded = self._map.filefoldmap.get(normed, None)
812 folded = self._map.filefoldmap.get(normed, None)
813 if folded is None:
813 if folded is None:
814 folded = self._map.dirfoldmap.get(normed, None)
814 folded = self._map.dirfoldmap.get(normed, None)
815 if folded is None:
815 if folded is None:
816 if isknown:
816 if isknown:
817 folded = path
817 folded = path
818 else:
818 else:
819 # store discovered result in dirfoldmap so that future
819 # store discovered result in dirfoldmap so that future
820 # normalizefile calls don't start matching directories
820 # normalizefile calls don't start matching directories
821 folded = self._discoverpath(
821 folded = self._discoverpath(
822 path, normed, ignoremissing, exists, self._map.dirfoldmap
822 path, normed, ignoremissing, exists, self._map.dirfoldmap
823 )
823 )
824 return folded
824 return folded
825
825
826 def normalize(self, path, isknown=False, ignoremissing=False):
826 def normalize(self, path, isknown=False, ignoremissing=False):
827 """
827 """
828 normalize the case of a pathname when on a casefolding filesystem
828 normalize the case of a pathname when on a casefolding filesystem
829
829
830 isknown specifies whether the filename came from walking the
830 isknown specifies whether the filename came from walking the
831 disk, to avoid extra filesystem access.
831 disk, to avoid extra filesystem access.
832
832
833 If ignoremissing is True, missing path are returned
833 If ignoremissing is True, missing path are returned
834 unchanged. Otherwise, we try harder to normalize possibly
834 unchanged. Otherwise, we try harder to normalize possibly
835 existing path components.
835 existing path components.
836
836
837 The normalized case is determined based on the following precedence:
837 The normalized case is determined based on the following precedence:
838
838
839 - version of name already stored in the dirstate
839 - version of name already stored in the dirstate
840 - version of name stored on disk
840 - version of name stored on disk
841 - version provided via command arguments
841 - version provided via command arguments
842 """
842 """
843
843
844 if self._checkcase:
844 if self._checkcase:
845 return self._normalize(path, isknown, ignoremissing)
845 return self._normalize(path, isknown, ignoremissing)
846 return path
846 return path
847
847
848 # XXX since this make the dirstate dirty, we should enforce that it is done
848 # XXX since this make the dirstate dirty, we should enforce that it is done
849 # withing an appropriate change-context that scope the change and ensure it
849 # withing an appropriate change-context that scope the change and ensure it
850 # eventually get written on disk (or rolled back)
850 # eventually get written on disk (or rolled back)
851 def clear(self):
851 def clear(self):
852 self._map.clear()
852 self._map.clear()
853 self._dirty = True
853 self._dirty = True
854
854
855 # XXX since this make the dirstate dirty, we should enforce that it is done
855 # XXX since this make the dirstate dirty, we should enforce that it is done
856 # withing an appropriate change-context that scope the change and ensure it
856 # withing an appropriate change-context that scope the change and ensure it
857 # eventually get written on disk (or rolled back)
857 # eventually get written on disk (or rolled back)
858 def rebuild(self, parent, allfiles, changedfiles=None):
858 def rebuild(self, parent, allfiles, changedfiles=None):
859 matcher = self._sparsematcher
859 matcher = self._sparsematcher
860 if matcher is not None and not matcher.always():
860 if matcher is not None and not matcher.always():
861 # should not add non-matching files
861 # should not add non-matching files
862 allfiles = [f for f in allfiles if matcher(f)]
862 allfiles = [f for f in allfiles if matcher(f)]
863 if changedfiles:
863 if changedfiles:
864 changedfiles = [f for f in changedfiles if matcher(f)]
864 changedfiles = [f for f in changedfiles if matcher(f)]
865
865
866 if changedfiles is not None:
866 if changedfiles is not None:
867 # these files will be deleted from the dirstate when they are
867 # these files will be deleted from the dirstate when they are
868 # not found to be in allfiles
868 # not found to be in allfiles
869 dirstatefilestoremove = {f for f in self if not matcher(f)}
869 dirstatefilestoremove = {f for f in self if not matcher(f)}
870 changedfiles = dirstatefilestoremove.union(changedfiles)
870 changedfiles = dirstatefilestoremove.union(changedfiles)
871
871
872 if changedfiles is None:
872 if changedfiles is None:
873 # Rebuild entire dirstate
873 # Rebuild entire dirstate
874 to_lookup = allfiles
874 to_lookup = allfiles
875 to_drop = []
875 to_drop = []
876 self.clear()
876 self.clear()
877 elif len(changedfiles) < 10:
877 elif len(changedfiles) < 10:
878 # Avoid turning allfiles into a set, which can be expensive if it's
878 # Avoid turning allfiles into a set, which can be expensive if it's
879 # large.
879 # large.
880 to_lookup = []
880 to_lookup = []
881 to_drop = []
881 to_drop = []
882 for f in changedfiles:
882 for f in changedfiles:
883 if f in allfiles:
883 if f in allfiles:
884 to_lookup.append(f)
884 to_lookup.append(f)
885 else:
885 else:
886 to_drop.append(f)
886 to_drop.append(f)
887 else:
887 else:
888 changedfilesset = set(changedfiles)
888 changedfilesset = set(changedfiles)
889 to_lookup = changedfilesset & set(allfiles)
889 to_lookup = changedfilesset & set(allfiles)
890 to_drop = changedfilesset - to_lookup
890 to_drop = changedfilesset - to_lookup
891
891
892 if self._origpl is None:
892 if self._origpl is None:
893 self._origpl = self._pl
893 self._origpl = self._pl
894 self._map.setparents(parent, self._nodeconstants.nullid)
894 self._map.setparents(parent, self._nodeconstants.nullid)
895
895
896 for f in to_lookup:
896 for f in to_lookup:
897 if self.in_merge:
897 if self.in_merge:
898 self.set_tracked(f)
898 self.set_tracked(f)
899 else:
899 else:
900 self._map.reset_state(
900 self._map.reset_state(
901 f,
901 f,
902 wc_tracked=True,
902 wc_tracked=True,
903 p1_tracked=True,
903 p1_tracked=True,
904 )
904 )
905 for f in to_drop:
905 for f in to_drop:
906 self._map.reset_state(f)
906 self._map.reset_state(f)
907
907
908 self._dirty = True
908 self._dirty = True
909
909
910 def identity(self):
910 def identity(self):
911 """Return identity of dirstate itself to detect changing in storage
911 """Return identity of dirstate itself to detect changing in storage
912
912
913 If identity of previous dirstate is equal to this, writing
913 If identity of previous dirstate is equal to this, writing
914 changes based on the former dirstate out can keep consistency.
914 changes based on the former dirstate out can keep consistency.
915 """
915 """
916 return self._map.identity
916 return self._map.identity
917
917
918 def write(self, tr):
918 def write(self, tr):
919 if not self._dirty:
919 if not self._dirty:
920 return
920 return
921
921
922 write_key = self._use_tracked_hint and self._dirty_tracked_set
922 write_key = self._use_tracked_hint and self._dirty_tracked_set
923 if tr:
923 if tr:
924 # make sure we invalidate the current change on abort
924 # make sure we invalidate the current change on abort
925 if tr is not None:
925 if tr is not None:
926 tr.addabort(
926 tr.addabort(
927 b'dirstate-invalidate',
927 b'dirstate-invalidate',
928 lambda tr: self.invalidate(),
928 lambda tr: self.invalidate(),
929 )
929 )
930 # delay writing in-memory changes out
930 # delay writing in-memory changes out
931 tr.addfilegenerator(
931 tr.addfilegenerator(
932 b'dirstate-1-main',
932 b'dirstate-1-main',
933 (self._filename,),
933 (self._filename,),
934 lambda f: self._writedirstate(tr, f),
934 lambda f: self._writedirstate(tr, f),
935 location=b'plain',
935 location=b'plain',
936 post_finalize=True,
936 post_finalize=True,
937 )
937 )
938 if write_key:
938 if write_key:
939 tr.addfilegenerator(
939 tr.addfilegenerator(
940 b'dirstate-2-key-post',
940 b'dirstate-2-key-post',
941 (self._filename_th,),
941 (self._filename_th,),
942 lambda f: self._write_tracked_hint(tr, f),
942 lambda f: self._write_tracked_hint(tr, f),
943 location=b'plain',
943 location=b'plain',
944 post_finalize=True,
944 post_finalize=True,
945 )
945 )
946 return
946 return
947
947
948 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
948 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
949 with file(self._filename) as f:
949 with file(self._filename) as f:
950 self._writedirstate(tr, f)
950 self._writedirstate(tr, f)
951 if write_key:
951 if write_key:
952 # we update the key-file after writing to make sure reader have a
952 # we update the key-file after writing to make sure reader have a
953 # key that match the newly written content
953 # key that match the newly written content
954 with file(self._filename_th) as f:
954 with file(self._filename_th) as f:
955 self._write_tracked_hint(tr, f)
955 self._write_tracked_hint(tr, f)
956
956
957 def delete_tracked_hint(self):
957 def delete_tracked_hint(self):
958 """remove the tracked_hint file
958 """remove the tracked_hint file
959
959
960 To be used by format downgrades operation"""
960 To be used by format downgrades operation"""
961 self._opener.unlink(self._filename_th)
961 self._opener.unlink(self._filename_th)
962 self._use_tracked_hint = False
962 self._use_tracked_hint = False
963
963
964 def addparentchangecallback(self, category, callback):
964 def addparentchangecallback(self, category, callback):
965 """add a callback to be called when the wd parents are changed
965 """add a callback to be called when the wd parents are changed
966
966
967 Callback will be called with the following arguments:
967 Callback will be called with the following arguments:
968 dirstate, (oldp1, oldp2), (newp1, newp2)
968 dirstate, (oldp1, oldp2), (newp1, newp2)
969
969
970 Category is a unique identifier to allow overwriting an old callback
970 Category is a unique identifier to allow overwriting an old callback
971 with a newer callback.
971 with a newer callback.
972 """
972 """
973 self._plchangecallbacks[category] = callback
973 self._plchangecallbacks[category] = callback
974
974
975 def _writedirstate(self, tr, st):
975 def _writedirstate(self, tr, st):
976 # notify callbacks about parents change
976 # notify callbacks about parents change
977 if self._origpl is not None and self._origpl != self._pl:
977 if self._origpl is not None and self._origpl != self._pl:
978 for c, callback in sorted(self._plchangecallbacks.items()):
978 for c, callback in sorted(self._plchangecallbacks.items()):
979 callback(self, self._origpl, self._pl)
979 callback(self, self._origpl, self._pl)
980 self._origpl = None
980 self._origpl = None
981 self._map.write(tr, st)
981 self._map.write(tr, st)
982 self._dirty = False
982 self._dirty = False
983 self._dirty_tracked_set = False
983 self._dirty_tracked_set = False
984
984
985 def _write_tracked_hint(self, tr, f):
985 def _write_tracked_hint(self, tr, f):
986 key = node.hex(uuid.uuid4().bytes)
986 key = node.hex(uuid.uuid4().bytes)
987 f.write(b"1\n%s\n" % key) # 1 is the format version
987 f.write(b"1\n%s\n" % key) # 1 is the format version
988
988
989 def _dirignore(self, f):
989 def _dirignore(self, f):
990 if self._ignore(f):
990 if self._ignore(f):
991 return True
991 return True
992 for p in pathutil.finddirs(f):
992 for p in pathutil.finddirs(f):
993 if self._ignore(p):
993 if self._ignore(p):
994 return True
994 return True
995 return False
995 return False
996
996
997 def _ignorefiles(self):
997 def _ignorefiles(self):
998 files = []
998 files = []
999 if os.path.exists(self._join(b'.hgignore')):
999 if os.path.exists(self._join(b'.hgignore')):
1000 files.append(self._join(b'.hgignore'))
1000 files.append(self._join(b'.hgignore'))
1001 for name, path in self._ui.configitems(b"ui"):
1001 for name, path in self._ui.configitems(b"ui"):
1002 if name == b'ignore' or name.startswith(b'ignore.'):
1002 if name == b'ignore' or name.startswith(b'ignore.'):
1003 # we need to use os.path.join here rather than self._join
1003 # we need to use os.path.join here rather than self._join
1004 # because path is arbitrary and user-specified
1004 # because path is arbitrary and user-specified
1005 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1005 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1006 return files
1006 return files
1007
1007
1008 def _ignorefileandline(self, f):
1008 def _ignorefileandline(self, f):
1009 files = collections.deque(self._ignorefiles())
1009 files = collections.deque(self._ignorefiles())
1010 visited = set()
1010 visited = set()
1011 while files:
1011 while files:
1012 i = files.popleft()
1012 i = files.popleft()
1013 patterns = matchmod.readpatternfile(
1013 patterns = matchmod.readpatternfile(
1014 i, self._ui.warn, sourceinfo=True
1014 i, self._ui.warn, sourceinfo=True
1015 )
1015 )
1016 for pattern, lineno, line in patterns:
1016 for pattern, lineno, line in patterns:
1017 kind, p = matchmod._patsplit(pattern, b'glob')
1017 kind, p = matchmod._patsplit(pattern, b'glob')
1018 if kind == b"subinclude":
1018 if kind == b"subinclude":
1019 if p not in visited:
1019 if p not in visited:
1020 files.append(p)
1020 files.append(p)
1021 continue
1021 continue
1022 m = matchmod.match(
1022 m = matchmod.match(
1023 self._root, b'', [], [pattern], warn=self._ui.warn
1023 self._root, b'', [], [pattern], warn=self._ui.warn
1024 )
1024 )
1025 if m(f):
1025 if m(f):
1026 return (i, lineno, line)
1026 return (i, lineno, line)
1027 visited.add(i)
1027 visited.add(i)
1028 return (None, -1, b"")
1028 return (None, -1, b"")
1029
1029
1030 def _walkexplicit(self, match, subrepos):
1030 def _walkexplicit(self, match, subrepos):
1031 """Get stat data about the files explicitly specified by match.
1031 """Get stat data about the files explicitly specified by match.
1032
1032
1033 Return a triple (results, dirsfound, dirsnotfound).
1033 Return a triple (results, dirsfound, dirsnotfound).
1034 - results is a mapping from filename to stat result. It also contains
1034 - results is a mapping from filename to stat result. It also contains
1035 listings mapping subrepos and .hg to None.
1035 listings mapping subrepos and .hg to None.
1036 - dirsfound is a list of files found to be directories.
1036 - dirsfound is a list of files found to be directories.
1037 - dirsnotfound is a list of files that the dirstate thinks are
1037 - dirsnotfound is a list of files that the dirstate thinks are
1038 directories and that were not found."""
1038 directories and that were not found."""
1039
1039
1040 def badtype(mode):
1040 def badtype(mode):
1041 kind = _(b'unknown')
1041 kind = _(b'unknown')
1042 if stat.S_ISCHR(mode):
1042 if stat.S_ISCHR(mode):
1043 kind = _(b'character device')
1043 kind = _(b'character device')
1044 elif stat.S_ISBLK(mode):
1044 elif stat.S_ISBLK(mode):
1045 kind = _(b'block device')
1045 kind = _(b'block device')
1046 elif stat.S_ISFIFO(mode):
1046 elif stat.S_ISFIFO(mode):
1047 kind = _(b'fifo')
1047 kind = _(b'fifo')
1048 elif stat.S_ISSOCK(mode):
1048 elif stat.S_ISSOCK(mode):
1049 kind = _(b'socket')
1049 kind = _(b'socket')
1050 elif stat.S_ISDIR(mode):
1050 elif stat.S_ISDIR(mode):
1051 kind = _(b'directory')
1051 kind = _(b'directory')
1052 return _(b'unsupported file type (type is %s)') % kind
1052 return _(b'unsupported file type (type is %s)') % kind
1053
1053
1054 badfn = match.bad
1054 badfn = match.bad
1055 dmap = self._map
1055 dmap = self._map
1056 lstat = os.lstat
1056 lstat = os.lstat
1057 getkind = stat.S_IFMT
1057 getkind = stat.S_IFMT
1058 dirkind = stat.S_IFDIR
1058 dirkind = stat.S_IFDIR
1059 regkind = stat.S_IFREG
1059 regkind = stat.S_IFREG
1060 lnkkind = stat.S_IFLNK
1060 lnkkind = stat.S_IFLNK
1061 join = self._join
1061 join = self._join
1062 dirsfound = []
1062 dirsfound = []
1063 foundadd = dirsfound.append
1063 foundadd = dirsfound.append
1064 dirsnotfound = []
1064 dirsnotfound = []
1065 notfoundadd = dirsnotfound.append
1065 notfoundadd = dirsnotfound.append
1066
1066
1067 if not match.isexact() and self._checkcase:
1067 if not match.isexact() and self._checkcase:
1068 normalize = self._normalize
1068 normalize = self._normalize
1069 else:
1069 else:
1070 normalize = None
1070 normalize = None
1071
1071
1072 files = sorted(match.files())
1072 files = sorted(match.files())
1073 subrepos.sort()
1073 subrepos.sort()
1074 i, j = 0, 0
1074 i, j = 0, 0
1075 while i < len(files) and j < len(subrepos):
1075 while i < len(files) and j < len(subrepos):
1076 subpath = subrepos[j] + b"/"
1076 subpath = subrepos[j] + b"/"
1077 if files[i] < subpath:
1077 if files[i] < subpath:
1078 i += 1
1078 i += 1
1079 continue
1079 continue
1080 while i < len(files) and files[i].startswith(subpath):
1080 while i < len(files) and files[i].startswith(subpath):
1081 del files[i]
1081 del files[i]
1082 j += 1
1082 j += 1
1083
1083
1084 if not files or b'' in files:
1084 if not files or b'' in files:
1085 files = [b'']
1085 files = [b'']
1086 # constructing the foldmap is expensive, so don't do it for the
1086 # constructing the foldmap is expensive, so don't do it for the
1087 # common case where files is ['']
1087 # common case where files is ['']
1088 normalize = None
1088 normalize = None
1089 results = dict.fromkeys(subrepos)
1089 results = dict.fromkeys(subrepos)
1090 results[b'.hg'] = None
1090 results[b'.hg'] = None
1091
1091
1092 for ff in files:
1092 for ff in files:
1093 if normalize:
1093 if normalize:
1094 nf = normalize(ff, False, True)
1094 nf = normalize(ff, False, True)
1095 else:
1095 else:
1096 nf = ff
1096 nf = ff
1097 if nf in results:
1097 if nf in results:
1098 continue
1098 continue
1099
1099
1100 try:
1100 try:
1101 st = lstat(join(nf))
1101 st = lstat(join(nf))
1102 kind = getkind(st.st_mode)
1102 kind = getkind(st.st_mode)
1103 if kind == dirkind:
1103 if kind == dirkind:
1104 if nf in dmap:
1104 if nf in dmap:
1105 # file replaced by dir on disk but still in dirstate
1105 # file replaced by dir on disk but still in dirstate
1106 results[nf] = None
1106 results[nf] = None
1107 foundadd((nf, ff))
1107 foundadd((nf, ff))
1108 elif kind == regkind or kind == lnkkind:
1108 elif kind == regkind or kind == lnkkind:
1109 results[nf] = st
1109 results[nf] = st
1110 else:
1110 else:
1111 badfn(ff, badtype(kind))
1111 badfn(ff, badtype(kind))
1112 if nf in dmap:
1112 if nf in dmap:
1113 results[nf] = None
1113 results[nf] = None
1114 except (OSError) as inst:
1114 except (OSError) as inst:
1115 # nf not found on disk - it is dirstate only
1115 # nf not found on disk - it is dirstate only
1116 if nf in dmap: # does it exactly match a missing file?
1116 if nf in dmap: # does it exactly match a missing file?
1117 results[nf] = None
1117 results[nf] = None
1118 else: # does it match a missing directory?
1118 else: # does it match a missing directory?
1119 if self._map.hasdir(nf):
1119 if self._map.hasdir(nf):
1120 notfoundadd(nf)
1120 notfoundadd(nf)
1121 else:
1121 else:
1122 badfn(ff, encoding.strtolocal(inst.strerror))
1122 badfn(ff, encoding.strtolocal(inst.strerror))
1123
1123
1124 # match.files() may contain explicitly-specified paths that shouldn't
1124 # match.files() may contain explicitly-specified paths that shouldn't
1125 # be taken; drop them from the list of files found. dirsfound/notfound
1125 # be taken; drop them from the list of files found. dirsfound/notfound
1126 # aren't filtered here because they will be tested later.
1126 # aren't filtered here because they will be tested later.
1127 if match.anypats():
1127 if match.anypats():
1128 for f in list(results):
1128 for f in list(results):
1129 if f == b'.hg' or f in subrepos:
1129 if f == b'.hg' or f in subrepos:
1130 # keep sentinel to disable further out-of-repo walks
1130 # keep sentinel to disable further out-of-repo walks
1131 continue
1131 continue
1132 if not match(f):
1132 if not match(f):
1133 del results[f]
1133 del results[f]
1134
1134
1135 # Case insensitive filesystems cannot rely on lstat() failing to detect
1135 # Case insensitive filesystems cannot rely on lstat() failing to detect
1136 # a case-only rename. Prune the stat object for any file that does not
1136 # a case-only rename. Prune the stat object for any file that does not
1137 # match the case in the filesystem, if there are multiple files that
1137 # match the case in the filesystem, if there are multiple files that
1138 # normalize to the same path.
1138 # normalize to the same path.
1139 if match.isexact() and self._checkcase:
1139 if match.isexact() and self._checkcase:
1140 normed = {}
1140 normed = {}
1141
1141
1142 for f, st in results.items():
1142 for f, st in results.items():
1143 if st is None:
1143 if st is None:
1144 continue
1144 continue
1145
1145
1146 nc = util.normcase(f)
1146 nc = util.normcase(f)
1147 paths = normed.get(nc)
1147 paths = normed.get(nc)
1148
1148
1149 if paths is None:
1149 if paths is None:
1150 paths = set()
1150 paths = set()
1151 normed[nc] = paths
1151 normed[nc] = paths
1152
1152
1153 paths.add(f)
1153 paths.add(f)
1154
1154
1155 for norm, paths in normed.items():
1155 for norm, paths in normed.items():
1156 if len(paths) > 1:
1156 if len(paths) > 1:
1157 for path in paths:
1157 for path in paths:
1158 folded = self._discoverpath(
1158 folded = self._discoverpath(
1159 path, norm, True, None, self._map.dirfoldmap
1159 path, norm, True, None, self._map.dirfoldmap
1160 )
1160 )
1161 if path != folded:
1161 if path != folded:
1162 results[path] = None
1162 results[path] = None
1163
1163
1164 return results, dirsfound, dirsnotfound
1164 return results, dirsfound, dirsnotfound
1165
1165
1166 def walk(self, match, subrepos, unknown, ignored, full=True):
1166 def walk(self, match, subrepos, unknown, ignored, full=True):
1167 """
1167 """
1168 Walk recursively through the directory tree, finding all files
1168 Walk recursively through the directory tree, finding all files
1169 matched by match.
1169 matched by match.
1170
1170
1171 If full is False, maybe skip some known-clean files.
1171 If full is False, maybe skip some known-clean files.
1172
1172
1173 Return a dict mapping filename to stat-like object (either
1173 Return a dict mapping filename to stat-like object (either
1174 mercurial.osutil.stat instance or return value of os.stat()).
1174 mercurial.osutil.stat instance or return value of os.stat()).
1175
1175
1176 """
1176 """
1177 # full is a flag that extensions that hook into walk can use -- this
1177 # full is a flag that extensions that hook into walk can use -- this
1178 # implementation doesn't use it at all. This satisfies the contract
1178 # implementation doesn't use it at all. This satisfies the contract
1179 # because we only guarantee a "maybe".
1179 # because we only guarantee a "maybe".
1180
1180
1181 if ignored:
1181 if ignored:
1182 ignore = util.never
1182 ignore = util.never
1183 dirignore = util.never
1183 dirignore = util.never
1184 elif unknown:
1184 elif unknown:
1185 ignore = self._ignore
1185 ignore = self._ignore
1186 dirignore = self._dirignore
1186 dirignore = self._dirignore
1187 else:
1187 else:
1188 # if not unknown and not ignored, drop dir recursion and step 2
1188 # if not unknown and not ignored, drop dir recursion and step 2
1189 ignore = util.always
1189 ignore = util.always
1190 dirignore = util.always
1190 dirignore = util.always
1191
1191
1192 if self._sparsematchfn is not None:
1192 if self._sparsematchfn is not None:
1193 em = matchmod.exact(match.files())
1193 em = matchmod.exact(match.files())
1194 sm = matchmod.unionmatcher([self._sparsematcher, em])
1194 sm = matchmod.unionmatcher([self._sparsematcher, em])
1195 match = matchmod.intersectmatchers(match, sm)
1195 match = matchmod.intersectmatchers(match, sm)
1196
1196
1197 matchfn = match.matchfn
1197 matchfn = match.matchfn
1198 matchalways = match.always()
1198 matchalways = match.always()
1199 matchtdir = match.traversedir
1199 matchtdir = match.traversedir
1200 dmap = self._map
1200 dmap = self._map
1201 listdir = util.listdir
1201 listdir = util.listdir
1202 lstat = os.lstat
1202 lstat = os.lstat
1203 dirkind = stat.S_IFDIR
1203 dirkind = stat.S_IFDIR
1204 regkind = stat.S_IFREG
1204 regkind = stat.S_IFREG
1205 lnkkind = stat.S_IFLNK
1205 lnkkind = stat.S_IFLNK
1206 join = self._join
1206 join = self._join
1207
1207
1208 exact = skipstep3 = False
1208 exact = skipstep3 = False
1209 if match.isexact(): # match.exact
1209 if match.isexact(): # match.exact
1210 exact = True
1210 exact = True
1211 dirignore = util.always # skip step 2
1211 dirignore = util.always # skip step 2
1212 elif match.prefix(): # match.match, no patterns
1212 elif match.prefix(): # match.match, no patterns
1213 skipstep3 = True
1213 skipstep3 = True
1214
1214
1215 if not exact and self._checkcase:
1215 if not exact and self._checkcase:
1216 normalize = self._normalize
1216 normalize = self._normalize
1217 normalizefile = self._normalizefile
1217 normalizefile = self._normalizefile
1218 skipstep3 = False
1218 skipstep3 = False
1219 else:
1219 else:
1220 normalize = self._normalize
1220 normalize = self._normalize
1221 normalizefile = None
1221 normalizefile = None
1222
1222
1223 # step 1: find all explicit files
1223 # step 1: find all explicit files
1224 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1224 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1225 if matchtdir:
1225 if matchtdir:
1226 for d in work:
1226 for d in work:
1227 matchtdir(d[0])
1227 matchtdir(d[0])
1228 for d in dirsnotfound:
1228 for d in dirsnotfound:
1229 matchtdir(d)
1229 matchtdir(d)
1230
1230
1231 skipstep3 = skipstep3 and not (work or dirsnotfound)
1231 skipstep3 = skipstep3 and not (work or dirsnotfound)
1232 work = [d for d in work if not dirignore(d[0])]
1232 work = [d for d in work if not dirignore(d[0])]
1233
1233
1234 # step 2: visit subdirectories
1234 # step 2: visit subdirectories
1235 def traverse(work, alreadynormed):
1235 def traverse(work, alreadynormed):
1236 wadd = work.append
1236 wadd = work.append
1237 while work:
1237 while work:
1238 tracing.counter('dirstate.walk work', len(work))
1238 tracing.counter('dirstate.walk work', len(work))
1239 nd = work.pop()
1239 nd = work.pop()
1240 visitentries = match.visitchildrenset(nd)
1240 visitentries = match.visitchildrenset(nd)
1241 if not visitentries:
1241 if not visitentries:
1242 continue
1242 continue
1243 if visitentries == b'this' or visitentries == b'all':
1243 if visitentries == b'this' or visitentries == b'all':
1244 visitentries = None
1244 visitentries = None
1245 skip = None
1245 skip = None
1246 if nd != b'':
1246 if nd != b'':
1247 skip = b'.hg'
1247 skip = b'.hg'
1248 try:
1248 try:
1249 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1249 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1250 entries = listdir(join(nd), stat=True, skip=skip)
1250 entries = listdir(join(nd), stat=True, skip=skip)
1251 except (PermissionError, FileNotFoundError) as inst:
1251 except (PermissionError, FileNotFoundError) as inst:
1252 match.bad(
1252 match.bad(
1253 self.pathto(nd), encoding.strtolocal(inst.strerror)
1253 self.pathto(nd), encoding.strtolocal(inst.strerror)
1254 )
1254 )
1255 continue
1255 continue
1256 for f, kind, st in entries:
1256 for f, kind, st in entries:
1257 # Some matchers may return files in the visitentries set,
1257 # Some matchers may return files in the visitentries set,
1258 # instead of 'this', if the matcher explicitly mentions them
1258 # instead of 'this', if the matcher explicitly mentions them
1259 # and is not an exactmatcher. This is acceptable; we do not
1259 # and is not an exactmatcher. This is acceptable; we do not
1260 # make any hard assumptions about file-or-directory below
1260 # make any hard assumptions about file-or-directory below
1261 # based on the presence of `f` in visitentries. If
1261 # based on the presence of `f` in visitentries. If
1262 # visitchildrenset returned a set, we can always skip the
1262 # visitchildrenset returned a set, we can always skip the
1263 # entries *not* in the set it provided regardless of whether
1263 # entries *not* in the set it provided regardless of whether
1264 # they're actually a file or a directory.
1264 # they're actually a file or a directory.
1265 if visitentries and f not in visitentries:
1265 if visitentries and f not in visitentries:
1266 continue
1266 continue
1267 if normalizefile:
1267 if normalizefile:
1268 # even though f might be a directory, we're only
1268 # even though f might be a directory, we're only
1269 # interested in comparing it to files currently in the
1269 # interested in comparing it to files currently in the
1270 # dmap -- therefore normalizefile is enough
1270 # dmap -- therefore normalizefile is enough
1271 nf = normalizefile(
1271 nf = normalizefile(
1272 nd and (nd + b"/" + f) or f, True, True
1272 nd and (nd + b"/" + f) or f, True, True
1273 )
1273 )
1274 else:
1274 else:
1275 nf = nd and (nd + b"/" + f) or f
1275 nf = nd and (nd + b"/" + f) or f
1276 if nf not in results:
1276 if nf not in results:
1277 if kind == dirkind:
1277 if kind == dirkind:
1278 if not ignore(nf):
1278 if not ignore(nf):
1279 if matchtdir:
1279 if matchtdir:
1280 matchtdir(nf)
1280 matchtdir(nf)
1281 wadd(nf)
1281 wadd(nf)
1282 if nf in dmap and (matchalways or matchfn(nf)):
1282 if nf in dmap and (matchalways or matchfn(nf)):
1283 results[nf] = None
1283 results[nf] = None
1284 elif kind == regkind or kind == lnkkind:
1284 elif kind == regkind or kind == lnkkind:
1285 if nf in dmap:
1285 if nf in dmap:
1286 if matchalways or matchfn(nf):
1286 if matchalways or matchfn(nf):
1287 results[nf] = st
1287 results[nf] = st
1288 elif (matchalways or matchfn(nf)) and not ignore(
1288 elif (matchalways or matchfn(nf)) and not ignore(
1289 nf
1289 nf
1290 ):
1290 ):
1291 # unknown file -- normalize if necessary
1291 # unknown file -- normalize if necessary
1292 if not alreadynormed:
1292 if not alreadynormed:
1293 nf = normalize(nf, False, True)
1293 nf = normalize(nf, False, True)
1294 results[nf] = st
1294 results[nf] = st
1295 elif nf in dmap and (matchalways or matchfn(nf)):
1295 elif nf in dmap and (matchalways or matchfn(nf)):
1296 results[nf] = None
1296 results[nf] = None
1297
1297
1298 for nd, d in work:
1298 for nd, d in work:
1299 # alreadynormed means that processwork doesn't have to do any
1299 # alreadynormed means that processwork doesn't have to do any
1300 # expensive directory normalization
1300 # expensive directory normalization
1301 alreadynormed = not normalize or nd == d
1301 alreadynormed = not normalize or nd == d
1302 traverse([d], alreadynormed)
1302 traverse([d], alreadynormed)
1303
1303
1304 for s in subrepos:
1304 for s in subrepos:
1305 del results[s]
1305 del results[s]
1306 del results[b'.hg']
1306 del results[b'.hg']
1307
1307
1308 # step 3: visit remaining files from dmap
1308 # step 3: visit remaining files from dmap
1309 if not skipstep3 and not exact:
1309 if not skipstep3 and not exact:
1310 # If a dmap file is not in results yet, it was either
1310 # If a dmap file is not in results yet, it was either
1311 # a) not matching matchfn b) ignored, c) missing, or d) under a
1311 # a) not matching matchfn b) ignored, c) missing, or d) under a
1312 # symlink directory.
1312 # symlink directory.
1313 if not results and matchalways:
1313 if not results and matchalways:
1314 visit = [f for f in dmap]
1314 visit = [f for f in dmap]
1315 else:
1315 else:
1316 visit = [f for f in dmap if f not in results and matchfn(f)]
1316 visit = [f for f in dmap if f not in results and matchfn(f)]
1317 visit.sort()
1317 visit.sort()
1318
1318
1319 if unknown:
1319 if unknown:
1320 # unknown == True means we walked all dirs under the roots
1320 # unknown == True means we walked all dirs under the roots
1321 # that wasn't ignored, and everything that matched was stat'ed
1321 # that wasn't ignored, and everything that matched was stat'ed
1322 # and is already in results.
1322 # and is already in results.
1323 # The rest must thus be ignored or under a symlink.
1323 # The rest must thus be ignored or under a symlink.
1324 audit_path = pathutil.pathauditor(self._root, cached=True)
1324 audit_path = pathutil.pathauditor(self._root, cached=True)
1325
1325
1326 for nf in iter(visit):
1326 for nf in iter(visit):
1327 # If a stat for the same file was already added with a
1327 # If a stat for the same file was already added with a
1328 # different case, don't add one for this, since that would
1328 # different case, don't add one for this, since that would
1329 # make it appear as if the file exists under both names
1329 # make it appear as if the file exists under both names
1330 # on disk.
1330 # on disk.
1331 if (
1331 if (
1332 normalizefile
1332 normalizefile
1333 and normalizefile(nf, True, True) in results
1333 and normalizefile(nf, True, True) in results
1334 ):
1334 ):
1335 results[nf] = None
1335 results[nf] = None
1336 # Report ignored items in the dmap as long as they are not
1336 # Report ignored items in the dmap as long as they are not
1337 # under a symlink directory.
1337 # under a symlink directory.
1338 elif audit_path.check(nf):
1338 elif audit_path.check(nf):
1339 try:
1339 try:
1340 results[nf] = lstat(join(nf))
1340 results[nf] = lstat(join(nf))
1341 # file was just ignored, no links, and exists
1341 # file was just ignored, no links, and exists
1342 except OSError:
1342 except OSError:
1343 # file doesn't exist
1343 # file doesn't exist
1344 results[nf] = None
1344 results[nf] = None
1345 else:
1345 else:
1346 # It's either missing or under a symlink directory
1346 # It's either missing or under a symlink directory
1347 # which we in this case report as missing
1347 # which we in this case report as missing
1348 results[nf] = None
1348 results[nf] = None
1349 else:
1349 else:
1350 # We may not have walked the full directory tree above,
1350 # We may not have walked the full directory tree above,
1351 # so stat and check everything we missed.
1351 # so stat and check everything we missed.
1352 iv = iter(visit)
1352 iv = iter(visit)
1353 for st in util.statfiles([join(i) for i in visit]):
1353 for st in util.statfiles([join(i) for i in visit]):
1354 results[next(iv)] = st
1354 results[next(iv)] = st
1355 return results
1355 return results
1356
1356
1357 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1357 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1358 if self._sparsematchfn is not None:
1358 if self._sparsematchfn is not None:
1359 em = matchmod.exact(matcher.files())
1359 em = matchmod.exact(matcher.files())
1360 sm = matchmod.unionmatcher([self._sparsematcher, em])
1360 sm = matchmod.unionmatcher([self._sparsematcher, em])
1361 matcher = matchmod.intersectmatchers(matcher, sm)
1361 matcher = matchmod.intersectmatchers(matcher, sm)
1362 # Force Rayon (Rust parallelism library) to respect the number of
1362 # Force Rayon (Rust parallelism library) to respect the number of
1363 # workers. This is a temporary workaround until Rust code knows
1363 # workers. This is a temporary workaround until Rust code knows
1364 # how to read the config file.
1364 # how to read the config file.
1365 numcpus = self._ui.configint(b"worker", b"numcpus")
1365 numcpus = self._ui.configint(b"worker", b"numcpus")
1366 if numcpus is not None:
1366 if numcpus is not None:
1367 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1367 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1368
1368
1369 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1369 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1370 if not workers_enabled:
1370 if not workers_enabled:
1371 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1371 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1372
1372
1373 (
1373 (
1374 lookup,
1374 lookup,
1375 modified,
1375 modified,
1376 added,
1376 added,
1377 removed,
1377 removed,
1378 deleted,
1378 deleted,
1379 clean,
1379 clean,
1380 ignored,
1380 ignored,
1381 unknown,
1381 unknown,
1382 warnings,
1382 warnings,
1383 bad,
1383 bad,
1384 traversed,
1384 traversed,
1385 dirty,
1385 dirty,
1386 ) = rustmod.status(
1386 ) = rustmod.status(
1387 self._map._map,
1387 self._map._map,
1388 matcher,
1388 matcher,
1389 self._rootdir,
1389 self._rootdir,
1390 self._ignorefiles(),
1390 self._ignorefiles(),
1391 self._checkexec,
1391 self._checkexec,
1392 bool(list_clean),
1392 bool(list_clean),
1393 bool(list_ignored),
1393 bool(list_ignored),
1394 bool(list_unknown),
1394 bool(list_unknown),
1395 bool(matcher.traversedir),
1395 bool(matcher.traversedir),
1396 )
1396 )
1397
1397
1398 self._dirty |= dirty
1398 self._dirty |= dirty
1399
1399
1400 if matcher.traversedir:
1400 if matcher.traversedir:
1401 for dir in traversed:
1401 for dir in traversed:
1402 matcher.traversedir(dir)
1402 matcher.traversedir(dir)
1403
1403
1404 if self._ui.warn:
1404 if self._ui.warn:
1405 for item in warnings:
1405 for item in warnings:
1406 if isinstance(item, tuple):
1406 if isinstance(item, tuple):
1407 file_path, syntax = item
1407 file_path, syntax = item
1408 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1408 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1409 file_path,
1409 file_path,
1410 syntax,
1410 syntax,
1411 )
1411 )
1412 self._ui.warn(msg)
1412 self._ui.warn(msg)
1413 else:
1413 else:
1414 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1414 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1415 self._ui.warn(
1415 self._ui.warn(
1416 msg
1416 msg
1417 % (
1417 % (
1418 pathutil.canonpath(
1418 pathutil.canonpath(
1419 self._rootdir, self._rootdir, item
1419 self._rootdir, self._rootdir, item
1420 ),
1420 ),
1421 b"No such file or directory",
1421 b"No such file or directory",
1422 )
1422 )
1423 )
1423 )
1424
1424
1425 for fn, message in bad:
1425 for fn, message in bad:
1426 matcher.bad(fn, encoding.strtolocal(message))
1426 matcher.bad(fn, encoding.strtolocal(message))
1427
1427
1428 status = scmutil.status(
1428 status = scmutil.status(
1429 modified=modified,
1429 modified=modified,
1430 added=added,
1430 added=added,
1431 removed=removed,
1431 removed=removed,
1432 deleted=deleted,
1432 deleted=deleted,
1433 unknown=unknown,
1433 unknown=unknown,
1434 ignored=ignored,
1434 ignored=ignored,
1435 clean=clean,
1435 clean=clean,
1436 )
1436 )
1437 return (lookup, status)
1437 return (lookup, status)
1438
1438
1439 # XXX since this can make the dirstate dirty (through rust), we should
1439 # XXX since this can make the dirstate dirty (through rust), we should
1440 # enforce that it is done withing an appropriate change-context that scope
1440 # enforce that it is done withing an appropriate change-context that scope
1441 # the change and ensure it eventually get written on disk (or rolled back)
1441 # the change and ensure it eventually get written on disk (or rolled back)
1442 def status(self, match, subrepos, ignored, clean, unknown):
1442 def status(self, match, subrepos, ignored, clean, unknown):
1443 """Determine the status of the working copy relative to the
1443 """Determine the status of the working copy relative to the
1444 dirstate and return a pair of (unsure, status), where status is of type
1444 dirstate and return a pair of (unsure, status), where status is of type
1445 scmutil.status and:
1445 scmutil.status and:
1446
1446
1447 unsure:
1447 unsure:
1448 files that might have been modified since the dirstate was
1448 files that might have been modified since the dirstate was
1449 written, but need to be read to be sure (size is the same
1449 written, but need to be read to be sure (size is the same
1450 but mtime differs)
1450 but mtime differs)
1451 status.modified:
1451 status.modified:
1452 files that have definitely been modified since the dirstate
1452 files that have definitely been modified since the dirstate
1453 was written (different size or mode)
1453 was written (different size or mode)
1454 status.clean:
1454 status.clean:
1455 files that have definitely not been modified since the
1455 files that have definitely not been modified since the
1456 dirstate was written
1456 dirstate was written
1457 """
1457 """
1458 listignored, listclean, listunknown = ignored, clean, unknown
1458 listignored, listclean, listunknown = ignored, clean, unknown
1459 lookup, modified, added, unknown, ignored = [], [], [], [], []
1459 lookup, modified, added, unknown, ignored = [], [], [], [], []
1460 removed, deleted, clean = [], [], []
1460 removed, deleted, clean = [], [], []
1461
1461
1462 dmap = self._map
1462 dmap = self._map
1463 dmap.preload()
1463 dmap.preload()
1464
1464
1465 use_rust = True
1465 use_rust = True
1466
1466
1467 allowed_matchers = (
1467 allowed_matchers = (
1468 matchmod.alwaysmatcher,
1468 matchmod.alwaysmatcher,
1469 matchmod.differencematcher,
1469 matchmod.differencematcher,
1470 matchmod.exactmatcher,
1470 matchmod.exactmatcher,
1471 matchmod.includematcher,
1471 matchmod.includematcher,
1472 matchmod.intersectionmatcher,
1472 matchmod.intersectionmatcher,
1473 matchmod.nevermatcher,
1473 matchmod.nevermatcher,
1474 matchmod.unionmatcher,
1474 matchmod.unionmatcher,
1475 )
1475 )
1476
1476
1477 if rustmod is None:
1477 if rustmod is None:
1478 use_rust = False
1478 use_rust = False
1479 elif self._checkcase:
1479 elif self._checkcase:
1480 # Case-insensitive filesystems are not handled yet
1480 # Case-insensitive filesystems are not handled yet
1481 use_rust = False
1481 use_rust = False
1482 elif subrepos:
1482 elif subrepos:
1483 use_rust = False
1483 use_rust = False
1484 elif not isinstance(match, allowed_matchers):
1484 elif not isinstance(match, allowed_matchers):
1485 # Some matchers have yet to be implemented
1485 # Some matchers have yet to be implemented
1486 use_rust = False
1486 use_rust = False
1487
1487
1488 # Get the time from the filesystem so we can disambiguate files that
1488 # Get the time from the filesystem so we can disambiguate files that
1489 # appear modified in the present or future.
1489 # appear modified in the present or future.
1490 try:
1490 try:
1491 mtime_boundary = timestamp.get_fs_now(self._opener)
1491 mtime_boundary = timestamp.get_fs_now(self._opener)
1492 except OSError:
1492 except OSError:
1493 # In largefiles or readonly context
1493 # In largefiles or readonly context
1494 mtime_boundary = None
1494 mtime_boundary = None
1495
1495
1496 if use_rust:
1496 if use_rust:
1497 try:
1497 try:
1498 res = self._rust_status(
1498 res = self._rust_status(
1499 match, listclean, listignored, listunknown
1499 match, listclean, listignored, listunknown
1500 )
1500 )
1501 return res + (mtime_boundary,)
1501 return res + (mtime_boundary,)
1502 except rustmod.FallbackError:
1502 except rustmod.FallbackError:
1503 pass
1503 pass
1504
1504
1505 def noop(f):
1505 def noop(f):
1506 pass
1506 pass
1507
1507
1508 dcontains = dmap.__contains__
1508 dcontains = dmap.__contains__
1509 dget = dmap.__getitem__
1509 dget = dmap.__getitem__
1510 ladd = lookup.append # aka "unsure"
1510 ladd = lookup.append # aka "unsure"
1511 madd = modified.append
1511 madd = modified.append
1512 aadd = added.append
1512 aadd = added.append
1513 uadd = unknown.append if listunknown else noop
1513 uadd = unknown.append if listunknown else noop
1514 iadd = ignored.append if listignored else noop
1514 iadd = ignored.append if listignored else noop
1515 radd = removed.append
1515 radd = removed.append
1516 dadd = deleted.append
1516 dadd = deleted.append
1517 cadd = clean.append if listclean else noop
1517 cadd = clean.append if listclean else noop
1518 mexact = match.exact
1518 mexact = match.exact
1519 dirignore = self._dirignore
1519 dirignore = self._dirignore
1520 checkexec = self._checkexec
1520 checkexec = self._checkexec
1521 checklink = self._checklink
1521 checklink = self._checklink
1522 copymap = self._map.copymap
1522 copymap = self._map.copymap
1523
1523
1524 # We need to do full walks when either
1524 # We need to do full walks when either
1525 # - we're listing all clean files, or
1525 # - we're listing all clean files, or
1526 # - match.traversedir does something, because match.traversedir should
1526 # - match.traversedir does something, because match.traversedir should
1527 # be called for every dir in the working dir
1527 # be called for every dir in the working dir
1528 full = listclean or match.traversedir is not None
1528 full = listclean or match.traversedir is not None
1529 for fn, st in self.walk(
1529 for fn, st in self.walk(
1530 match, subrepos, listunknown, listignored, full=full
1530 match, subrepos, listunknown, listignored, full=full
1531 ).items():
1531 ).items():
1532 if not dcontains(fn):
1532 if not dcontains(fn):
1533 if (listignored or mexact(fn)) and dirignore(fn):
1533 if (listignored or mexact(fn)) and dirignore(fn):
1534 if listignored:
1534 if listignored:
1535 iadd(fn)
1535 iadd(fn)
1536 else:
1536 else:
1537 uadd(fn)
1537 uadd(fn)
1538 continue
1538 continue
1539
1539
1540 t = dget(fn)
1540 t = dget(fn)
1541 mode = t.mode
1541 mode = t.mode
1542 size = t.size
1542 size = t.size
1543
1543
1544 if not st and t.tracked:
1544 if not st and t.tracked:
1545 dadd(fn)
1545 dadd(fn)
1546 elif t.p2_info:
1546 elif t.p2_info:
1547 madd(fn)
1547 madd(fn)
1548 elif t.added:
1548 elif t.added:
1549 aadd(fn)
1549 aadd(fn)
1550 elif t.removed:
1550 elif t.removed:
1551 radd(fn)
1551 radd(fn)
1552 elif t.tracked:
1552 elif t.tracked:
1553 if not checklink and t.has_fallback_symlink:
1553 if not checklink and t.has_fallback_symlink:
1554 # If the file system does not support symlink, the mode
1554 # If the file system does not support symlink, the mode
1555 # might not be correctly stored in the dirstate, so do not
1555 # might not be correctly stored in the dirstate, so do not
1556 # trust it.
1556 # trust it.
1557 ladd(fn)
1557 ladd(fn)
1558 elif not checkexec and t.has_fallback_exec:
1558 elif not checkexec and t.has_fallback_exec:
1559 # If the file system does not support exec bits, the mode
1559 # If the file system does not support exec bits, the mode
1560 # might not be correctly stored in the dirstate, so do not
1560 # might not be correctly stored in the dirstate, so do not
1561 # trust it.
1561 # trust it.
1562 ladd(fn)
1562 ladd(fn)
1563 elif (
1563 elif (
1564 size >= 0
1564 size >= 0
1565 and (
1565 and (
1566 (size != st.st_size and size != st.st_size & _rangemask)
1566 (size != st.st_size and size != st.st_size & _rangemask)
1567 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1567 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1568 )
1568 )
1569 or fn in copymap
1569 or fn in copymap
1570 ):
1570 ):
1571 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1571 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1572 # issue6456: Size returned may be longer due to
1572 # issue6456: Size returned may be longer due to
1573 # encryption on EXT-4 fscrypt, undecided.
1573 # encryption on EXT-4 fscrypt, undecided.
1574 ladd(fn)
1574 ladd(fn)
1575 else:
1575 else:
1576 madd(fn)
1576 madd(fn)
1577 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1577 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1578 # There might be a change in the future if for example the
1578 # There might be a change in the future if for example the
1579 # internal clock is off, but this is a case where the issues
1579 # internal clock is off, but this is a case where the issues
1580 # the user would face would be a lot worse and there is
1580 # the user would face would be a lot worse and there is
1581 # nothing we can really do.
1581 # nothing we can really do.
1582 ladd(fn)
1582 ladd(fn)
1583 elif listclean:
1583 elif listclean:
1584 cadd(fn)
1584 cadd(fn)
1585 status = scmutil.status(
1585 status = scmutil.status(
1586 modified, added, removed, deleted, unknown, ignored, clean
1586 modified, added, removed, deleted, unknown, ignored, clean
1587 )
1587 )
1588 return (lookup, status, mtime_boundary)
1588 return (lookup, status, mtime_boundary)
1589
1589
1590 def matches(self, match):
1590 def matches(self, match):
1591 """
1591 """
1592 return files in the dirstate (in whatever state) filtered by match
1592 return files in the dirstate (in whatever state) filtered by match
1593 """
1593 """
1594 dmap = self._map
1594 dmap = self._map
1595 if rustmod is not None:
1595 if rustmod is not None:
1596 dmap = self._map._map
1596 dmap = self._map._map
1597
1597
1598 if match.always():
1598 if match.always():
1599 return dmap.keys()
1599 return dmap.keys()
1600 files = match.files()
1600 files = match.files()
1601 if match.isexact():
1601 if match.isexact():
1602 # fast path -- filter the other way around, since typically files is
1602 # fast path -- filter the other way around, since typically files is
1603 # much smaller than dmap
1603 # much smaller than dmap
1604 return [f for f in files if f in dmap]
1604 return [f for f in files if f in dmap]
1605 if match.prefix() and all(fn in dmap for fn in files):
1605 if match.prefix() and all(fn in dmap for fn in files):
1606 # fast path -- all the values are known to be files, so just return
1606 # fast path -- all the values are known to be files, so just return
1607 # that
1607 # that
1608 return list(files)
1608 return list(files)
1609 return [f for f in dmap if match(f)]
1609 return [f for f in dmap if match(f)]
1610
1610
1611 def _actualfilename(self, tr):
1611 def _actualfilename(self, tr):
1612 if tr:
1612 if tr:
1613 return self._pendingfilename
1613 return self._pendingfilename
1614 else:
1614 else:
1615 return self._filename
1615 return self._filename
1616
1616
1617 def all_file_names(self):
1617 def all_file_names(self):
1618 """list all filename currently used by this dirstate
1618 """list all filename currently used by this dirstate
1619
1619
1620 This is only used to do `hg rollback` related backup in the transaction
1620 This is only used to do `hg rollback` related backup in the transaction
1621 """
1621 """
1622 if not self._opener.exists(self._filename):
1622 if not self._opener.exists(self._filename):
1623 # no data every written to disk yet
1623 # no data every written to disk yet
1624 return ()
1624 return ()
1625 elif self._use_dirstate_v2:
1625 elif self._use_dirstate_v2:
1626 return (
1626 return (
1627 self._filename,
1627 self._filename,
1628 self._map.docket.data_filename(),
1628 self._map.docket.data_filename(),
1629 )
1629 )
1630 else:
1630 else:
1631 return (self._filename,)
1631 return (self._filename,)
1632
1632
1633 def verify(self, m1, m2, p1, narrow_matcher=None):
1633 def verify(self, m1, m2, p1, narrow_matcher=None):
1634 """
1634 """
1635 check the dirstate contents against the parent manifest and yield errors
1635 check the dirstate contents against the parent manifest and yield errors
1636 """
1636 """
1637 missing_from_p1 = _(
1637 missing_from_p1 = _(
1638 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1638 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1639 )
1639 )
1640 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1640 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1641 missing_from_ps = _(
1641 missing_from_ps = _(
1642 b"%s marked as modified, but not in either manifest\n"
1642 b"%s marked as modified, but not in either manifest\n"
1643 )
1643 )
1644 missing_from_ds = _(
1644 missing_from_ds = _(
1645 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1645 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1646 )
1646 )
1647 for f, entry in self.items():
1647 for f, entry in self.items():
1648 if entry.p1_tracked:
1648 if entry.p1_tracked:
1649 if entry.modified and f not in m1 and f not in m2:
1649 if entry.modified and f not in m1 and f not in m2:
1650 yield missing_from_ps % f
1650 yield missing_from_ps % f
1651 elif f not in m1:
1651 elif f not in m1:
1652 yield missing_from_p1 % (f, node.short(p1))
1652 yield missing_from_p1 % (f, node.short(p1))
1653 if entry.added and f in m1:
1653 if entry.added and f in m1:
1654 yield unexpected_in_p1 % f
1654 yield unexpected_in_p1 % f
1655 for f in m1:
1655 for f in m1:
1656 if narrow_matcher is not None and not narrow_matcher(f):
1656 if narrow_matcher is not None and not narrow_matcher(f):
1657 continue
1657 continue
1658 entry = self.get_entry(f)
1658 entry = self.get_entry(f)
1659 if not entry.p1_tracked:
1659 if not entry.p1_tracked:
1660 yield missing_from_ds % (f, node.short(p1))
1660 yield missing_from_ds % (f, node.short(p1))
@@ -1,828 +1,842 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 This file focuses mainly on updating largefiles in the working
3 This file focuses mainly on updating largefiles in the working
4 directory (and ".hg/largefiles/dirstate")
4 directory (and ".hg/largefiles/dirstate")
5
5
6 $ cat >> $HGRCPATH <<EOF
6 $ cat >> $HGRCPATH <<EOF
7 > [ui]
7 > [ui]
8 > merge = internal:merge
8 > merge = internal:merge
9 > [extensions]
9 > [extensions]
10 > largefiles =
10 > largefiles =
11 > [extdiff]
11 > [extdiff]
12 > # for portability:
12 > # for portability:
13 > pdiff = sh "$RUNTESTDIR/pdiff"
13 > pdiff = sh "$RUNTESTDIR/pdiff"
14 > EOF
14 > EOF
15
15
16 $ hg init repo
16 $ hg init repo
17 $ cd repo
17 $ cd repo
18
18
19 $ echo large1 > large1
19 $ echo large1 > large1
20 $ echo large2 > large2
20 $ echo large2 > large2
21 $ hg add --large large1 large2
21 $ hg add --large large1 large2
22 $ echo normal1 > normal1
22 $ echo normal1 > normal1
23 $ hg add normal1
23 $ hg add normal1
24 $ hg commit -m '#0'
24 $ hg commit -m '#0'
25 $ echo 'large1 in #1' > large1
25 $ echo 'large1 in #1' > large1
26 $ echo 'normal1 in #1' > normal1
26 $ echo 'normal1 in #1' > normal1
27 $ hg commit -m '#1'
27 $ hg commit -m '#1'
28 $ hg pdiff -r '.^' --config extensions.extdiff=
28 $ hg pdiff -r '.^' --config extensions.extdiff=
29 diff -Nru repo.0d9d9b8dc9a3/.hglf/large1 repo/.hglf/large1
29 diff -Nru repo.0d9d9b8dc9a3/.hglf/large1 repo/.hglf/large1
30 --- repo.0d9d9b8dc9a3/.hglf/large1 * (glob)
30 --- repo.0d9d9b8dc9a3/.hglf/large1 * (glob)
31 +++ repo/.hglf/large1 * (glob)
31 +++ repo/.hglf/large1 * (glob)
32 @@ -1* +1* @@ (glob)
32 @@ -1* +1* @@ (glob)
33 -4669e532d5b2c093a78eca010077e708a071bb64
33 -4669e532d5b2c093a78eca010077e708a071bb64
34 +58e24f733a964da346e2407a2bee99d9001184f5
34 +58e24f733a964da346e2407a2bee99d9001184f5
35 diff -Nru repo.0d9d9b8dc9a3/normal1 repo/normal1
35 diff -Nru repo.0d9d9b8dc9a3/normal1 repo/normal1
36 --- repo.0d9d9b8dc9a3/normal1 * (glob)
36 --- repo.0d9d9b8dc9a3/normal1 * (glob)
37 +++ repo/normal1 * (glob)
37 +++ repo/normal1 * (glob)
38 @@ -1* +1* @@ (glob)
38 @@ -1* +1* @@ (glob)
39 -normal1
39 -normal1
40 +normal1 in #1
40 +normal1 in #1
41 [1]
41 [1]
42 $ hg update -q -C 0
42 $ hg update -q -C 0
43 $ echo 'large2 in #2' > large2
43 $ echo 'large2 in #2' > large2
44 $ hg commit -m '#2'
44 $ hg commit -m '#2'
45 created new head
45 created new head
46
46
47 Test that update also updates the lfdirstate of 'unsure' largefiles after
47 Test that update also updates the lfdirstate of 'unsure' largefiles after
48 hashing them:
48 hashing them:
49
49
50 The previous operations will usually have left us with largefiles with a mtime
50 The previous operations will usually have left us with largefiles with a mtime
51 within the same second as the dirstate was written.
51 within the same second as the dirstate was written.
52 The lfdirstate entries will thus have been written with an invalidated/unset
52 The lfdirstate entries will thus have been written with an invalidated/unset
53 mtime to make sure further changes within the same second is detected.
53 mtime to make sure further changes within the same second is detected.
54 We will however occasionally be "lucky" and get a tick between writing
54 We will however occasionally be "lucky" and get a tick between writing
55 largefiles and writing dirstate so we get valid lfdirstate timestamps. The
55 largefiles and writing dirstate so we get valid lfdirstate timestamps. The
56 following verification is thus disabled but can be verified manually.
56 following verification is thus disabled but can be verified manually.
57
57
58 #if false
58 #if false
59 $ hg debugdirstate --large --nodate
59 $ hg debugdirstate --large --nodate
60 n 644 7 unset large1
60 n 644 7 unset large1
61 n 644 13 unset large2
61 n 644 13 unset large2
62 #endif
62 #endif
63
63
64 Wait to make sure we get a tick so the mtime of the largefiles become valid.
64 Wait to make sure we get a tick so the mtime of the largefiles become valid.
65
65
66 $ sleep 1
66 $ sleep 1
67
67
68 A linear merge will update standins before performing the actual merge. It will
68 A linear merge will update standins before performing the actual merge. It will
69 do a lfdirstate status walk and find 'unset'/'unsure' files, hash them, and
69 do a lfdirstate status walk and find 'unset'/'unsure' files, hash them, and
70 update the corresponding standins.
70 update the corresponding standins.
71
71
72 Verify that it actually marks the clean files as clean in lfdirstate so
72 Verify that it actually marks the clean files as clean in lfdirstate so
73 we don't have to hash them again next time we update.
73 we don't have to hash them again next time we update.
74
74
75 # note:
75 # note:
76 # We do this less agressively now, to avoid race condition, however the
76 # We do this less agressively now, to avoid race condition, however the
77 # cache
77 # cache
78 # is properly set after the next status
78 # is properly set after the next status
79 #
79 #
80 # The "changed" output is marked as missing-correct-output/known-bad-output
80 # The "changed" output is marked as missing-correct-output/known-bad-output
81 # for clarify
81 # for clarify
82
82
83 $ hg up
83 $ hg up
84 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 updated to "f74e50bd9e55: #2"
85 updated to "f74e50bd9e55: #2"
86 1 other heads for branch "default"
86 1 other heads for branch "default"
87 $ hg debugdirstate --large --nodate
87 $ hg debugdirstate --large --nodate
88 n 644 7 set large1 (missing-correct-output !)
88 n 644 7 set large1 (missing-correct-output !)
89 n 644 13 set large2 (missing-correct-output !)
89 n 644 13 set large2 (missing-correct-output !)
90 n 0 -1 unset large1 (known-bad-output !)
90 n 0 -1 unset large1 (known-bad-output !)
91 n 0 -1 unset large2 (known-bad-output !)
91 n 0 -1 unset large2 (known-bad-output !)
92 $ sleep 1 # so that mtime are not ambiguous
92 $ sleep 1 # so that mtime are not ambiguous
93 $ hg status
93 $ hg status
94 $ hg debugdirstate --large --nodate
94 $ hg debugdirstate --large --nodate
95 n 644 7 set large1
95 n 644 7 set large1
96 n 644 13 set large2
96 n 644 13 set large2
97
97
98 Test that lfdirstate keeps track of last modification of largefiles and
98 Test that lfdirstate keeps track of last modification of largefiles and
99 prevents unnecessary hashing of content - also after linear/noop update
99 prevents unnecessary hashing of content - also after linear/noop update
100
100
101 (XXX Since there is a possible race during update, we only do this after the next
101 (XXX Since there is a possible race during update, we only do this after the next
102 status call, this is slower, but more correct)
102 status call, this is slower, but more correct)
103
103
104 $ sleep 1
104 $ sleep 1
105 $ hg st
105 $ hg st
106 $ hg debugdirstate --large --nodate
106 $ hg debugdirstate --large --nodate
107 n 644 7 set large1
107 n 644 7 set large1
108 n 644 13 set large2
108 n 644 13 set large2
109 $ hg up
109 $ hg up
110 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 updated to "f74e50bd9e55: #2"
111 updated to "f74e50bd9e55: #2"
112 1 other heads for branch "default"
112 1 other heads for branch "default"
113 $ hg debugdirstate --large --nodate
113 $ hg debugdirstate --large --nodate
114 n 644 7 set large1 (missing-correct-output !)
114 n 644 7 set large1 (missing-correct-output !)
115 n 644 13 set large2 (missing-correct-output !)
115 n 644 13 set large2 (missing-correct-output !)
116 n 0 -1 unset large1 (known-bad-output !)
116 n 0 -1 unset large1 (known-bad-output !)
117 n 0 -1 unset large2 (known-bad-output !)
117 n 0 -1 unset large2 (known-bad-output !)
118 $ sleep 1 # so that mtime are not ambiguous
118 $ sleep 1 # so that mtime are not ambiguous
119 $ hg status
119 $ hg status
120 $ hg debugdirstate --large --nodate
120 $ hg debugdirstate --large --nodate
121 n 644 7 set large1
121 n 644 7 set large1
122 n 644 13 set large2
122 n 644 13 set large2
123
123
124 Test that "hg merge" updates largefiles from "other" correctly
124 Test that "hg merge" updates largefiles from "other" correctly
125
125
126 (getting largefiles from "other" normally)
126 (getting largefiles from "other" normally)
127
127
128 $ hg status -A large1
128 $ hg status -A large1
129 C large1
129 C large1
130 $ cat large1
130 $ cat large1
131 large1
131 large1
132 $ cat .hglf/large1
132 $ cat .hglf/large1
133 4669e532d5b2c093a78eca010077e708a071bb64
133 4669e532d5b2c093a78eca010077e708a071bb64
134 $ hg merge --config debug.dirstate.delaywrite=2
134 $ hg merge --config debug.dirstate.delaywrite=2
135 getting changed largefiles
135 getting changed largefiles
136 1 largefiles updated, 0 removed
136 1 largefiles updated, 0 removed
137 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
137 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
138 (branch merge, don't forget to commit)
138 (branch merge, don't forget to commit)
139 $ hg status -A large1
139 $ hg status -A large1
140 M large1
140 M large1
141 $ cat large1
141 $ cat large1
142 large1 in #1
142 large1 in #1
143 $ cat .hglf/large1
143 $ cat .hglf/large1
144 58e24f733a964da346e2407a2bee99d9001184f5
144 58e24f733a964da346e2407a2bee99d9001184f5
145 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
145 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
146 -4669e532d5b2c093a78eca010077e708a071bb64
146 -4669e532d5b2c093a78eca010077e708a071bb64
147 +58e24f733a964da346e2407a2bee99d9001184f5
147 +58e24f733a964da346e2407a2bee99d9001184f5
148
148
149 (getting largefiles from "other" via conflict prompt)
149 (getting largefiles from "other" via conflict prompt)
150
150
151 $ hg update -q -C 2
151 $ hg update -q -C 2
152 $ echo 'large1 in #3' > large1
152 $ echo 'large1 in #3' > large1
153 $ echo 'normal1 in #3' > normal1
153 $ echo 'normal1 in #3' > normal1
154 $ hg commit -m '#3'
154 $ hg commit -m '#3'
155 $ cat .hglf/large1
155 $ cat .hglf/large1
156 e5bb990443d6a92aaf7223813720f7566c9dd05b
156 e5bb990443d6a92aaf7223813720f7566c9dd05b
157 $ hg merge --config debug.dirstate.delaywrite=2 --config ui.interactive=True <<EOF
157 $ hg merge --config debug.dirstate.delaywrite=2 --config ui.interactive=True <<EOF
158 > o
158 > o
159 > EOF
159 > EOF
160 largefile large1 has a merge conflict
160 largefile large1 has a merge conflict
161 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
161 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
162 you can keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5.
162 you can keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5.
163 what do you want to do? o
163 what do you want to do? o
164 merging normal1
164 merging normal1
165 warning: conflicts while merging normal1! (edit, then use 'hg resolve --mark')
165 warning: conflicts while merging normal1! (edit, then use 'hg resolve --mark')
166 getting changed largefiles
166 getting changed largefiles
167 1 largefiles updated, 0 removed
167 1 largefiles updated, 0 removed
168 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
168 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
169 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
169 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
170 [1]
170 [1]
171 $ hg status -A large1
171 $ hg status -A large1
172 M large1
172 M large1
173 $ cat large1
173 $ cat large1
174 large1 in #1
174 large1 in #1
175 $ cat .hglf/large1
175 $ cat .hglf/large1
176 58e24f733a964da346e2407a2bee99d9001184f5
176 58e24f733a964da346e2407a2bee99d9001184f5
177 $ rm normal1.orig
177 $ rm normal1.orig
178
178
179 (merge non-existing largefiles from "other" via conflict prompt -
179 (merge non-existing largefiles from "other" via conflict prompt -
180 make sure the following commit doesn't abort in a confusing way when trying to
180 make sure the following commit doesn't abort in a confusing way when trying to
181 mark the non-existing file as normal in lfdirstate)
181 mark the non-existing file as normal in lfdirstate)
182
182
183 $ mv .hg/largefiles/58e24f733a964da346e2407a2bee99d9001184f5 .
183 $ mv .hg/largefiles/58e24f733a964da346e2407a2bee99d9001184f5 .
184 $ hg update -q -C 3
184 $ hg update -q -C 3
185 $ hg merge --config largefiles.usercache=not --config debug.dirstate.delaywrite=2 --tool :local --config ui.interactive=True <<EOF
185 $ hg merge --config largefiles.usercache=not --config debug.dirstate.delaywrite=2 --tool :local --config ui.interactive=True <<EOF
186 > o
186 > o
187 > EOF
187 > EOF
188 largefile large1 has a merge conflict
188 largefile large1 has a merge conflict
189 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
189 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
190 you can keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5.
190 you can keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5.
191 what do you want to do? o
191 what do you want to do? o
192 getting changed largefiles
192 getting changed largefiles
193 large1: largefile 58e24f733a964da346e2407a2bee99d9001184f5 not available from file:/*/$TESTTMP/repo (glob)
193 large1: largefile 58e24f733a964da346e2407a2bee99d9001184f5 not available from file:/*/$TESTTMP/repo (glob)
194 0 largefiles updated, 0 removed
194 0 largefiles updated, 0 removed
195 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
195 0 files updated, 2 files merged, 0 files removed, 0 files unresolved
196 (branch merge, don't forget to commit)
196 (branch merge, don't forget to commit)
197 $ hg commit -m '1-2-3 testing' --config largefiles.usercache=not
197 $ hg commit -m '1-2-3 testing' --config largefiles.usercache=not
198 large1: largefile 58e24f733a964da346e2407a2bee99d9001184f5 not available from local store
198 large1: largefile 58e24f733a964da346e2407a2bee99d9001184f5 not available from local store
199 $ hg up -C . --config largefiles.usercache=not
199 $ hg up -C . --config largefiles.usercache=not
200 getting changed largefiles
200 getting changed largefiles
201 large1: largefile 58e24f733a964da346e2407a2bee99d9001184f5 not available from file:/*/$TESTTMP/repo (glob)
201 large1: largefile 58e24f733a964da346e2407a2bee99d9001184f5 not available from file:/*/$TESTTMP/repo (glob)
202 0 largefiles updated, 0 removed
202 0 largefiles updated, 0 removed
203 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
203 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 $ hg st large1
204 $ hg st large1
205 ! large1
205 ! large1
206 $ hg rollback -q
206 $ hg rollback -q
207 $ mv 58e24f733a964da346e2407a2bee99d9001184f5 .hg/largefiles/
207 $ mv 58e24f733a964da346e2407a2bee99d9001184f5 .hg/largefiles/
208
208
209 Test that "hg revert -r REV" updates largefiles from "REV" correctly
209 Test that "hg revert -r REV" updates largefiles from "REV" correctly
210
210
211 $ hg update -q -C 3
211 $ hg update -q -C 3
212 $ hg status -A large1
212 $ hg status -A large1
213 C large1
213 C large1
214 $ cat large1
214 $ cat large1
215 large1 in #3
215 large1 in #3
216 $ cat .hglf/large1
216 $ cat .hglf/large1
217 e5bb990443d6a92aaf7223813720f7566c9dd05b
217 e5bb990443d6a92aaf7223813720f7566c9dd05b
218 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
218 $ hg diff -c 1 --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
219 -4669e532d5b2c093a78eca010077e708a071bb64
219 -4669e532d5b2c093a78eca010077e708a071bb64
220 +58e24f733a964da346e2407a2bee99d9001184f5
220 +58e24f733a964da346e2407a2bee99d9001184f5
221 $ hg revert --no-backup -r 1 --config debug.dirstate.delaywrite=2 large1
221 $ hg revert --no-backup -r 1 --config debug.dirstate.delaywrite=2 large1
222 $ hg status -A large1
222 $ hg status -A large1
223 M large1
223 M large1
224 $ cat large1
224 $ cat large1
225 large1 in #1
225 large1 in #1
226 $ cat .hglf/large1
226 $ cat .hglf/large1
227 58e24f733a964da346e2407a2bee99d9001184f5
227 58e24f733a964da346e2407a2bee99d9001184f5
228
228
229 Test that "hg rollback" restores status of largefiles correctly
229 Test that "hg rollback" restores status of largefiles correctly
230
230
231 $ hg update -C -q
231 $ hg update -C -q
232 $ hg remove large1
232 $ hg remove large1
233 $ test -f .hglf/large1
233 $ test -f .hglf/large1
234 [1]
234 [1]
235 $ hg forget large2
235 $ hg forget large2
236 $ test -f .hglf/large2
236 $ test -f .hglf/large2
237 [1]
237 [1]
238 $ echo largeX > largeX
238 $ echo largeX > largeX
239 $ hg add --large largeX
239 $ hg add --large largeX
240 $ cat .hglf/largeX
240 $ cat .hglf/largeX
241
241
242 $ hg commit -m 'will be rollback-ed soon'
242 $ hg commit -m 'will be rollback-ed soon'
243 $ echo largeY > largeY
243 $ echo largeY > largeY
244 $ hg add --large largeY
244 $ hg add --large largeY
245
245
246 $ hg status -A large1
246 $ hg status -A large1
247 large1: $ENOENT$
247 large1: $ENOENT$
248
248
249 $ hg status -A large2
249 $ hg status -A large2
250 ? large2
250 ? large2
251 $ hg status -A largeX
251 $ hg status -A largeX
252 C largeX
252 C largeX
253 $ hg status -A largeY
253 $ hg status -A largeY
254 A largeY
254 A largeY
255 $ hg rollback
255 $ hg rollback
256 repository tip rolled back to revision 3 (undo commit)
256 repository tip rolled back to revision 3 (undo commit)
257 working directory now based on revision 3
257 working directory now based on revision 3
258 $ hg status -A large1
258 $ hg status -A large1
259 R large1
259 R large1
260 $ test -f .hglf/large1
260 $ test -f .hglf/large1
261 [1]
261 [1]
262 $ hg status -A large2
262 $ hg status -A large2
263 R large2
263 R large2
264 $ test -f .hglf/large2
264 $ test -f .hglf/large2
265 [1]
265 [1]
266 $ hg status -A largeX
266 $ hg status -A largeX
267 A largeX
267 A largeX
268 $ cat .hglf/largeX
268 $ cat .hglf/largeX
269
269
270 $ hg status -A largeY
270 $ hg status -A largeY
271 ? largeY
271 ? largeY
272 $ test -f .hglf/largeY
272 $ test -f .hglf/largeY
273 [1]
273 [1]
274 $ rm largeY
274 $ rm largeY
275
275
276 Test that "hg rollback" restores standins correctly
276 Test that "hg rollback" restores standins correctly
277
277
278 $ hg commit -m 'will be rollback-ed soon'
278 $ hg commit -m 'will be rollback-ed soon'
279 $ hg update -q -C 2
279 $ hg update -q -C 2
280 $ cat large1
280 $ cat large1
281 large1
281 large1
282 $ cat .hglf/large1
282 $ cat .hglf/large1
283 4669e532d5b2c093a78eca010077e708a071bb64
283 4669e532d5b2c093a78eca010077e708a071bb64
284 $ cat large2
284 $ cat large2
285 large2 in #2
285 large2 in #2
286 $ cat .hglf/large2
286 $ cat .hglf/large2
287 3cfce6277e7668985707b6887ce56f9f62f6ccd9
287 3cfce6277e7668985707b6887ce56f9f62f6ccd9
288
288
289 $ hg rollback -q -f
289 $ hg rollback -q -f
290 $ cat large1
290 $ cat large1
291 large1
291 large1
292 $ cat .hglf/large1
292 $ cat .hglf/large1
293 4669e532d5b2c093a78eca010077e708a071bb64
293 4669e532d5b2c093a78eca010077e708a071bb64
294 $ cat large2
294 $ cat large2
295 large2 in #2
295 large2 in #2
296 $ cat .hglf/large2
296 $ cat .hglf/large2
297 3cfce6277e7668985707b6887ce56f9f62f6ccd9
297 3cfce6277e7668985707b6887ce56f9f62f6ccd9
298
298
299 (rollback the parent of the working directory, when the parent of it
299 (rollback the parent of the working directory, when the parent of it
300 is not branch-tip)
300 is not branch-tip)
301
301
302 $ hg update -q -C 1
302 $ hg update -q -C 1
303 $ cat .hglf/large1
303 $ cat .hglf/large1
304 58e24f733a964da346e2407a2bee99d9001184f5
304 58e24f733a964da346e2407a2bee99d9001184f5
305 $ cat .hglf/large2
305 $ cat .hglf/large2
306 1deebade43c8c498a3c8daddac0244dc55d1331d
306 1deebade43c8c498a3c8daddac0244dc55d1331d
307
307
308 $ echo normalX > normalX
308 $ echo normalX > normalX
309 $ hg add normalX
309 $ hg add normalX
310 $ hg commit -m 'will be rollback-ed soon'
310 $ hg commit -m 'will be rollback-ed soon'
311 $ hg rollback -q
311 $ hg rollback -q
312
312
313 $ cat .hglf/large1
313 $ cat .hglf/large1
314 58e24f733a964da346e2407a2bee99d9001184f5
314 58e24f733a964da346e2407a2bee99d9001184f5
315 $ cat .hglf/large2
315 $ cat .hglf/large2
316 1deebade43c8c498a3c8daddac0244dc55d1331d
316 1deebade43c8c498a3c8daddac0244dc55d1331d
317 $ rm normalX
317 $ rm normalX
318
318
319 Test that "hg status" shows status of largefiles correctly just after
319 Test that "hg status" shows status of largefiles correctly just after
320 automated commit like rebase/transplant
320 automated commit like rebase/transplant
321
321
322 $ cat >> .hg/hgrc <<EOF
322 $ cat >> .hg/hgrc <<EOF
323 > [extensions]
323 > [extensions]
324 > rebase =
324 > rebase =
325 > strip =
325 > strip =
326 > transplant =
326 > transplant =
327 > EOF
327 > EOF
328 $ hg update -q -C 1
328 $ hg update -q -C 1
329 $ hg remove large1
329 $ hg remove large1
330 $ echo largeX > largeX
330 $ echo largeX > largeX
331 $ hg add --large largeX
331 $ hg add --large largeX
332 $ hg commit -m '#4'
332 $ hg commit -m '#4'
333
333
334 $ hg rebase -s 1 -d 2 --keep
334 $ hg rebase -s 1 -d 2 --keep
335 rebasing 1:72518492caa6 "#1"
335 rebasing 1:72518492caa6 "#1"
336 rebasing 4:07d6153b5c04 tip "#4"
336 rebasing 4:07d6153b5c04 tip "#4"
337
337
338 $ hg status -A large1
338 $ hg status -A large1
339 large1: $ENOENT$
339 large1: $ENOENT$
340
340
341 $ hg status -A largeX
341 $ hg status -A largeX
342 C largeX
342 C largeX
343 $ hg strip -q 5
343 $ hg strip -q 5
344
344
345 $ hg update -q -C 2
345 $ hg update -q -C 2
346 $ hg transplant -q 1 4
346 $ hg transplant -q 1 4
347
347
348 $ hg status -A large1
348 $ hg status -A large1
349 large1: $ENOENT$
349 large1: $ENOENT$
350
350
351 $ hg status -A largeX
351 $ hg status -A largeX
352 C largeX
352 C largeX
353 $ hg strip -q 5
353 $ hg strip -q 5
354
354
355 $ hg update -q -C 2
355 $ hg update -q -C 2
356 $ hg transplant -q --merge 1 --merge 4
356 $ hg transplant -q --merge 1 --merge 4
357
357
358 $ hg status -A large1
358 $ hg status -A large1
359 large1: $ENOENT$
359 large1: $ENOENT$
360
360
361 $ hg status -A largeX
361 $ hg status -A largeX
362 C largeX
362 C largeX
363 $ hg strip -q 5
363 $ hg strip -q 5
364
364
365 Test that linear merge can detect modification (and conflict) correctly
365 Test that linear merge can detect modification (and conflict) correctly
366
366
367 (linear merge without conflict)
367 (linear merge without conflict)
368
368
369 $ echo 'large2 for linear merge (no conflict)' > large2
369 $ echo 'large2 for linear merge (no conflict)' > large2
370 $ hg update 3 --config debug.dirstate.delaywrite=2
370 $ hg update 3 --config debug.dirstate.delaywrite=2
371 getting changed largefiles
371 getting changed largefiles
372 1 largefiles updated, 0 removed
372 1 largefiles updated, 0 removed
373 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
373 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
374 $ hg status -A large2
374 $ hg status -A large2
375 M large2
375 M large2
376 $ cat large2
376 $ cat large2
377 large2 for linear merge (no conflict)
377 large2 for linear merge (no conflict)
378 $ cat .hglf/large2
378 $ cat .hglf/large2
379 9c4bf8f1b33536d6e5f89447e10620cfe52ea710
379 9c4bf8f1b33536d6e5f89447e10620cfe52ea710
380
380
381 (linear merge with conflict, choosing "other")
381 (linear merge with conflict, choosing "other")
382
382
383 $ hg update -q -C 2
383 $ hg update -q -C 2
384 $ echo 'large1 for linear merge (conflict)' > large1
384 $ echo 'large1 for linear merge (conflict)' > large1
385 $ hg update 3 --config ui.interactive=True <<EOF
385 $ hg update 3 --config ui.interactive=True <<EOF
386 > o
386 > o
387 > EOF
387 > EOF
388 largefile large1 has a merge conflict
388 largefile large1 has a merge conflict
389 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
389 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
390 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
390 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
391 what do you want to do? o
391 what do you want to do? o
392 getting changed largefiles
392 getting changed largefiles
393 1 largefiles updated, 0 removed
393 1 largefiles updated, 0 removed
394 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
394 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
395 $ hg status -A large1
395 $ hg status -A large1
396 C large1
396 C large1
397 $ cat large1
397 $ cat large1
398 large1 in #3
398 large1 in #3
399 $ cat .hglf/large1
399 $ cat .hglf/large1
400 e5bb990443d6a92aaf7223813720f7566c9dd05b
400 e5bb990443d6a92aaf7223813720f7566c9dd05b
401
401
402 (linear merge with conflict, choosing "local")
402 (linear merge with conflict, choosing "local")
403
403
404 $ hg update -q -C 2
404 $ hg update -q -C 2
405 $ echo 'large1 for linear merge (conflict)' > large1
405 $ echo 'large1 for linear merge (conflict)' > large1
406 $ hg update 3 --config debug.dirstate.delaywrite=2
406 $ hg update 3 --config debug.dirstate.delaywrite=2
407 largefile large1 has a merge conflict
407 largefile large1 has a merge conflict
408 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
408 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
409 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
409 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
410 what do you want to do? l
410 what do you want to do? l
411 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
411 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
412 $ hg status -A large1
412 $ hg status -A large1
413 M large1
413 M large1
414 $ cat large1
414 $ cat large1
415 large1 for linear merge (conflict)
415 large1 for linear merge (conflict)
416 $ cat .hglf/large1
416 $ cat .hglf/large1
417 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
417 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
418
418
419 Test a linear merge to a revision containing same-name normal file
419 Test a linear merge to a revision containing same-name normal file
420
420
421 $ hg update -q -C 3
421 $ hg update -q -C 3
422 $ hg remove large2
422 $ hg remove large2
423 $ echo 'large2 as normal file' > large2
423 $ echo 'large2 as normal file' > large2
424 $ hg add large2
424 $ hg add large2
425 $ echo 'large3 as normal file' > large3
425 $ echo 'large3 as normal file' > large3
426 $ hg add large3
426 $ hg add large3
427 $ hg commit -m '#5'
427 $ hg commit -m '#5'
428 $ hg manifest
428 $ hg manifest
429 .hglf/large1
429 .hglf/large1
430 large2
430 large2
431 large3
431 large3
432 normal1
432 normal1
433
433
434 (modified largefile is already switched to normal)
434 (modified largefile is already switched to normal)
435
435
436 $ hg update -q -C 2
436 $ hg update -q -C 2
437 $ echo 'modified large2 for linear merge' > large2
437 $ echo 'modified large2 for linear merge' > large2
438 $ hg update -q 5
438 $ hg update -q 5
439 remote turned local largefile large2 into a normal file
439 remote turned local largefile large2 into a normal file
440 keep (l)argefile or use (n)ormal file? l
440 keep (l)argefile or use (n)ormal file? l
441 $ hg debugdirstate --no-dates | grep large2
441 $ hg debugdirstate --no-dates | grep large2
442 a 0 -1 unset .hglf/large2
442 a 0 -1 unset .hglf/large2
443 r 0 0 set large2
443 r 0 0 set large2
444 $ hg status -A large2
444 $ hg status -A large2
445 A large2
445 A large2
446 $ cat large2
446 $ cat large2
447 modified large2 for linear merge
447 modified large2 for linear merge
448
448
449 (added largefile is already committed as normal)
449 (added largefile is already committed as normal)
450
450
451 $ hg update -q -C 2
451 $ hg update -q -C 2
452 $ echo 'large3 as large file for linear merge' > large3
452 $ echo 'large3 as large file for linear merge' > large3
453 $ hg add --large large3
453 $ hg add --large large3
454 $ hg update -q 5
454 $ hg update -q 5
455 remote turned local largefile large3 into a normal file
455 remote turned local largefile large3 into a normal file
456 keep (l)argefile or use (n)ormal file? l
456 keep (l)argefile or use (n)ormal file? l
457 $ hg debugdirstate --no-dates | grep large3
457 $ hg debugdirstate --no-dates | grep large3
458 a 0 -1 unset .hglf/large3
458 a 0 -1 unset .hglf/large3
459 r 0 0 set large3
459 r 0 0 set large3
460 $ hg status -A large3
460 $ hg status -A large3
461 A large3
461 A large3
462 $ cat large3
462 $ cat large3
463 large3 as large file for linear merge
463 large3 as large file for linear merge
464 $ rm -f large3 .hglf/large3
464 $ rm -f large3 .hglf/large3
465
465
466 Test that the internal linear merging works correctly
466 Test that the internal linear merging works correctly
467 (both heads are stripped to keep pairing of revision number and commit log)
467 (both heads are stripped to keep pairing of revision number and commit log)
468
468
469 $ hg update -q -C 2
469 $ hg update -q -C 2
470 $ hg strip 3 4
470 $ hg strip 3 4
471 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/9530e27857f7-2e7b195d-backup.hg
471 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/9530e27857f7-2e7b195d-backup.hg
472 $ mv .hg/strip-backup/9530e27857f7-2e7b195d-backup.hg $TESTTMP
472 $ mv .hg/strip-backup/9530e27857f7-2e7b195d-backup.hg $TESTTMP
473
473
474 (internal linear merging at "hg pull --update")
474 (internal linear merging at "hg pull --update")
475
475
476 $ echo 'large1 for linear merge (conflict)' > large1
476 $ echo 'large1 for linear merge (conflict)' > large1
477 $ echo 'large2 for linear merge (conflict with normal file)' > large2
477 $ echo 'large2 for linear merge (conflict with normal file)' > large2
478 $ hg pull --update --config debug.dirstate.delaywrite=2 $TESTTMP/9530e27857f7-2e7b195d-backup.hg
478 $ hg pull --update --config debug.dirstate.delaywrite=2 $TESTTMP/9530e27857f7-2e7b195d-backup.hg
479 pulling from $TESTTMP/9530e27857f7-2e7b195d-backup.hg
479 pulling from $TESTTMP/9530e27857f7-2e7b195d-backup.hg
480 searching for changes
480 searching for changes
481 adding changesets
481 adding changesets
482 adding manifests
482 adding manifests
483 adding file changes
483 adding file changes
484 added 3 changesets with 5 changes to 5 files
484 added 3 changesets with 5 changes to 5 files
485 new changesets 9530e27857f7:d65e59e952a9 (3 drafts)
485 new changesets 9530e27857f7:d65e59e952a9 (3 drafts)
486 remote turned local largefile large2 into a normal file
486 remote turned local largefile large2 into a normal file
487 keep (l)argefile or use (n)ormal file? l
487 keep (l)argefile or use (n)ormal file? l
488 largefile large1 has a merge conflict
488 largefile large1 has a merge conflict
489 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
489 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
490 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
490 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
491 what do you want to do? l
491 what do you want to do? l
492 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
492 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
493 updated to "d65e59e952a9: #5"
493 updated to "d65e59e952a9: #5"
494 1 other heads for branch "default"
494 1 other heads for branch "default"
495
495
496 $ hg status -A large1
496 $ hg status -A large1
497 M large1
497 M large1
498 $ cat large1
498 $ cat large1
499 large1 for linear merge (conflict)
499 large1 for linear merge (conflict)
500 $ cat .hglf/large1
500 $ cat .hglf/large1
501 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
501 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
502 $ hg status -A large2
502 $ hg status -A large2
503 A large2
503 A large2
504 $ cat large2
504 $ cat large2
505 large2 for linear merge (conflict with normal file)
505 large2 for linear merge (conflict with normal file)
506 $ cat .hglf/large2
506 $ cat .hglf/large2
507 d7591fe9be0f6227d90bddf3e4f52ff41fc1f544
507 d7591fe9be0f6227d90bddf3e4f52ff41fc1f544
508
508
509 (internal linear merging at "hg unbundle --update")
509 (internal linear merging at "hg unbundle --update")
510
510
511 $ hg update -q -C 2
511 $ hg update -q -C 2
512 $ hg rollback -q
512 $ hg rollback -q
513
513
514 $ echo 'large1 for linear merge (conflict)' > large1
514 $ echo 'large1 for linear merge (conflict)' > large1
515 $ echo 'large2 for linear merge (conflict with normal file)' > large2
515 $ echo 'large2 for linear merge (conflict with normal file)' > large2
516 $ hg unbundle --update --config debug.dirstate.delaywrite=2 $TESTTMP/9530e27857f7-2e7b195d-backup.hg
516 $ hg unbundle --update --config debug.dirstate.delaywrite=2 $TESTTMP/9530e27857f7-2e7b195d-backup.hg
517 adding changesets
517 adding changesets
518 adding manifests
518 adding manifests
519 adding file changes
519 adding file changes
520 added 3 changesets with 5 changes to 5 files
520 added 3 changesets with 5 changes to 5 files
521 new changesets 9530e27857f7:d65e59e952a9 (3 drafts)
521 new changesets 9530e27857f7:d65e59e952a9 (3 drafts)
522 remote turned local largefile large2 into a normal file
522 remote turned local largefile large2 into a normal file
523 keep (l)argefile or use (n)ormal file? l
523 keep (l)argefile or use (n)ormal file? l
524 largefile large1 has a merge conflict
524 largefile large1 has a merge conflict
525 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
525 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
526 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
526 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
527 what do you want to do? l
527 what do you want to do? l
528 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
528 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
529 updated to "d65e59e952a9: #5"
529 updated to "d65e59e952a9: #5"
530 1 other heads for branch "default"
530 1 other heads for branch "default"
531
531
532 $ hg status -A large1
532 $ hg status -A large1
533 M large1
533 M large1
534 $ cat large1
534 $ cat large1
535 large1 for linear merge (conflict)
535 large1 for linear merge (conflict)
536 $ cat .hglf/large1
536 $ cat .hglf/large1
537 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
537 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
538 $ hg status -A large2
538 $ hg status -A large2
539 A large2
539 A large2
540 $ cat large2
540 $ cat large2
541 large2 for linear merge (conflict with normal file)
541 large2 for linear merge (conflict with normal file)
542 $ cat .hglf/large2
542 $ cat .hglf/large2
543 d7591fe9be0f6227d90bddf3e4f52ff41fc1f544
543 d7591fe9be0f6227d90bddf3e4f52ff41fc1f544
544
544
545 (internal linear merging in subrepo at "hg update")
545 (internal linear merging in subrepo at "hg update")
546
546
547 $ cd ..
547 $ cd ..
548 $ hg init subparent
548 $ hg init subparent
549 $ cd subparent
549 $ cd subparent
550
550
551 $ hg clone -q -u 2 ../repo sub
551 $ hg clone -q -u 2 ../repo sub
552 $ cat > .hgsub <<EOF
552 $ cat > .hgsub <<EOF
553 > sub = sub
553 > sub = sub
554 > EOF
554 > EOF
555 $ hg add .hgsub
555 $ hg add .hgsub
556 $ hg commit -m '#0@parent'
556 $ hg commit -m '#0@parent'
557 $ cat .hgsubstate
557 $ cat .hgsubstate
558 f74e50bd9e5594b7cf1e6c5cbab86ddd25f3ca2f sub
558 f74e50bd9e5594b7cf1e6c5cbab86ddd25f3ca2f sub
559 $ hg -R sub update -q
559 $ hg -R sub update -q
560 $ hg commit -m '#1@parent'
560 $ hg commit -m '#1@parent'
561 $ cat .hgsubstate
561 $ cat .hgsubstate
562 d65e59e952a9638e2ce863b41a420ca723dd3e8d sub
562 d65e59e952a9638e2ce863b41a420ca723dd3e8d sub
563 $ hg update -q 0
563 $ hg update -q 0
564
564
565 $ echo 'large1 for linear merge (conflict)' > sub/large1
565 $ echo 'large1 for linear merge (conflict)' > sub/large1
566 $ echo 'large2 for linear merge (conflict with normal file)' > sub/large2
566 $ echo 'large2 for linear merge (conflict with normal file)' > sub/large2
567 $ hg update --config ui.interactive=True --config debug.dirstate.delaywrite=2 <<EOF
567 $ hg update --config ui.interactive=True --config debug.dirstate.delaywrite=2 <<EOF
568 > m
568 > m
569 > r
569 > r
570 > l
570 > l
571 > l
571 > l
572 > EOF
572 > EOF
573 subrepository sub diverged (local revision: f74e50bd9e55, remote revision: d65e59e952a9)
573 subrepository sub diverged (local revision: f74e50bd9e55, remote revision: d65e59e952a9)
574 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
574 you can (m)erge, keep (l)ocal [working copy] or keep (r)emote [destination].
575 what do you want to do? m
575 what do you want to do? m
576 subrepository sources for sub differ (in checked out version)
576 subrepository sources for sub differ (in checked out version)
577 you can use (l)ocal source (f74e50bd9e55) or (r)emote source (d65e59e952a9).
577 you can use (l)ocal source (f74e50bd9e55) or (r)emote source (d65e59e952a9).
578 what do you want to do? r
578 what do you want to do? r
579 remote turned local largefile large2 into a normal file
579 remote turned local largefile large2 into a normal file
580 keep (l)argefile or use (n)ormal file? l
580 keep (l)argefile or use (n)ormal file? l
581 largefile large1 has a merge conflict
581 largefile large1 has a merge conflict
582 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
582 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
583 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
583 you can keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b.
584 what do you want to do? l
584 what do you want to do? l
585 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
585 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
586 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
586 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
587
587
588 $ hg -R sub status -A sub/large1
588 $ hg -R sub status -A sub/large1
589 M sub/large1
589 M sub/large1
590 $ cat sub/large1
590 $ cat sub/large1
591 large1 for linear merge (conflict)
591 large1 for linear merge (conflict)
592 $ cat sub/.hglf/large1
592 $ cat sub/.hglf/large1
593 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
593 ba94c2efe5b7c5e0af8d189295ce00553b0612b7
594 $ hg -R sub status -A sub/large2
594 $ hg -R sub status -A sub/large2
595 A sub/large2
595 A sub/large2
596 $ cat sub/large2
596 $ cat sub/large2
597 large2 for linear merge (conflict with normal file)
597 large2 for linear merge (conflict with normal file)
598 $ cat sub/.hglf/large2
598 $ cat sub/.hglf/large2
599 d7591fe9be0f6227d90bddf3e4f52ff41fc1f544
599 d7591fe9be0f6227d90bddf3e4f52ff41fc1f544
600
600
601 $ cd ..
601 $ cd ..
602 $ cd repo
602 $ cd repo
603
603
604 Test that rebase updates largefiles in the working directory even if
604 Test that rebase updates largefiles in the working directory even if
605 it is aborted by conflict.
605 it is aborted by conflict.
606
606
607 $ hg update -q -C 3
607 $ hg update -q -C 3
608 $ cat .hglf/large1
608 $ cat .hglf/large1
609 e5bb990443d6a92aaf7223813720f7566c9dd05b
609 e5bb990443d6a92aaf7223813720f7566c9dd05b
610 $ cat large1
610 $ cat large1
611 large1 in #3
611 large1 in #3
612 $ hg rebase -s 1 -d 3 --keep --config ui.interactive=True <<EOF
612 $ hg rebase -s 1 -d 3 --keep --config ui.interactive=True <<EOF
613 > o
613 > o
614 > EOF
614 > EOF
615 rebasing 1:72518492caa6 "#1"
615 rebasing 1:72518492caa6 "#1"
616 largefile large1 has a merge conflict
616 largefile large1 has a merge conflict
617 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
617 ancestor was 4669e532d5b2c093a78eca010077e708a071bb64
618 you can keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5.
618 you can keep (l)ocal e5bb990443d6a92aaf7223813720f7566c9dd05b or take (o)ther 58e24f733a964da346e2407a2bee99d9001184f5.
619 what do you want to do? o
619 what do you want to do? o
620 merging normal1
620 merging normal1
621 warning: conflicts while merging normal1! (edit, then use 'hg resolve --mark')
621 warning: conflicts while merging normal1! (edit, then use 'hg resolve --mark')
622 unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
622 unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
623 [240]
623 [240]
624 $ cat .hglf/large1
624 $ cat .hglf/large1
625 58e24f733a964da346e2407a2bee99d9001184f5
625 58e24f733a964da346e2407a2bee99d9001184f5
626 $ cat large1
626 $ cat large1
627 large1 in #1
627 large1 in #1
628 $ rm normal1.orig
628 $ rm normal1.orig
629
629
630 Test that rebase updates standins for manually modified largefiles at
630 Test that rebase updates standins for manually modified largefiles at
631 the 1st commit of resuming.
631 the 1st commit of resuming.
632
632
633 $ echo "manually modified before 'hg rebase --continue'" > large1
633 $ echo "manually modified before 'hg rebase --continue'" > large1
634 $ hg resolve -m normal1
634 $ hg resolve -m normal1
635 (no more unresolved files)
635 (no more unresolved files)
636 continue: hg rebase --continue
636 continue: hg rebase --continue
637 $ hg rebase --continue --config ui.interactive=True <<EOF
637 $ hg rebase --continue --config ui.interactive=True <<EOF
638 > c
638 > c
639 > EOF
639 > EOF
640 rebasing 1:72518492caa6 "#1"
640 rebasing 1:72518492caa6 "#1"
641 rebasing 4:07d6153b5c04 "#4"
641 rebasing 4:07d6153b5c04 "#4"
642 file '.hglf/large1' was deleted in other [source] but was modified in local [dest].
642 file '.hglf/large1' was deleted in other [source] but was modified in local [dest].
643 You can use (c)hanged version, (d)elete, or leave (u)nresolved.
643 You can use (c)hanged version, (d)elete, or leave (u)nresolved.
644 What do you want to do? c
644 What do you want to do? c
645
645
646 $ hg diff -c "tip~1" --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
646 $ hg diff -c "tip~1" --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
647 -e5bb990443d6a92aaf7223813720f7566c9dd05b
647 -e5bb990443d6a92aaf7223813720f7566c9dd05b
648 +8a4f783556e7dea21139ca0466eafce954c75c13
648 +8a4f783556e7dea21139ca0466eafce954c75c13
649 $ rm -f large1
649 $ rm -f large1
650 $ hg update -q -C tip
650 $ hg update -q -C tip
651 $ cat large1
651 $ cat large1
652 manually modified before 'hg rebase --continue'
652 manually modified before 'hg rebase --continue'
653
653
654 Test that transplant updates largefiles, of which standins are safely
654 Test that transplant updates largefiles, of which standins are safely
655 changed, even if it is aborted by conflict of other.
655 changed, even if it is aborted by conflict of other.
656
656
657 $ hg update -q -C 5
657 $ hg update -q -C 5
658 $ cat .hglf/large1
658 $ cat .hglf/large1
659 e5bb990443d6a92aaf7223813720f7566c9dd05b
659 e5bb990443d6a92aaf7223813720f7566c9dd05b
660 $ cat large1
660 $ cat large1
661 large1 in #3
661 large1 in #3
662 $ hg diff -c 4 .hglf/largeX | grep '^[+-][0-9a-z]'
662 $ hg diff -c 4 .hglf/largeX | grep '^[+-][0-9a-z]'
663 +fa44618ea25181aff4f48b70428294790cec9f61
663 +fa44618ea25181aff4f48b70428294790cec9f61
664 $ hg transplant 4
664 $ hg transplant 4
665 applying 07d6153b5c04
665 applying 07d6153b5c04
666 patching file .hglf/large1
666 patching file .hglf/large1
667 Hunk #1 FAILED at 0
667 Hunk #1 FAILED at 0
668 1 out of 1 hunks FAILED -- saving rejects to file .hglf/large1.rej
668 1 out of 1 hunks FAILED -- saving rejects to file .hglf/large1.rej
669 patch failed to apply
669 patch failed to apply
670 abort: fix up the working directory and run hg transplant --continue
670 abort: fix up the working directory and run hg transplant --continue
671 [255]
671 [255]
672 $ hg status -A large1
672 $ hg status -A large1
673 C large1
673 C large1
674 $ cat .hglf/large1
674 $ cat .hglf/large1
675 e5bb990443d6a92aaf7223813720f7566c9dd05b
675 e5bb990443d6a92aaf7223813720f7566c9dd05b
676 $ cat large1
676 $ cat large1
677 large1 in #3
677 large1 in #3
678 $ hg status -A largeX
678 $ hg status -A largeX
679 A largeX
679 A largeX
680 $ cat .hglf/largeX
680 $ cat .hglf/largeX
681 fa44618ea25181aff4f48b70428294790cec9f61
681 fa44618ea25181aff4f48b70428294790cec9f61
682 $ cat largeX
682 $ cat largeX
683 largeX
683 largeX
684
684
685 Test that transplant updates standins for manually modified largefiles
685 Test that transplant updates standins for manually modified largefiles
686 at the 1st commit of resuming.
686 at the 1st commit of resuming.
687
687
688 $ echo "manually modified before 'hg transplant --continue'" > large1
688 $ echo "manually modified before 'hg transplant --continue'" > large1
689 $ hg transplant --continue
689 $ hg transplant --continue
690 07d6153b5c04 transplanted as f1bf30eb88cc
690 07d6153b5c04 transplanted as f1bf30eb88cc
691 $ hg diff -c tip .hglf/large1 | grep '^[+-][0-9a-z]'
691 $ hg diff -c tip .hglf/large1 | grep '^[+-][0-9a-z]'
692 -e5bb990443d6a92aaf7223813720f7566c9dd05b
692 -e5bb990443d6a92aaf7223813720f7566c9dd05b
693 +6a4f36d4075fbe0f30ec1d26ca44e63c05903671
693 +6a4f36d4075fbe0f30ec1d26ca44e63c05903671
694 $ rm -f large1
694 $ rm -f large1
695 $ hg update -q -C tip
695 $ hg update -q -C tip
696 $ cat large1
696 $ cat large1
697 manually modified before 'hg transplant --continue'
697 manually modified before 'hg transplant --continue'
698
698
699 Test that "hg status" doesn't show removal of largefiles not managed
699 Test that "hg status" doesn't show removal of largefiles not managed
700 in the target context.
700 in the target context.
701
701
702 $ hg update -q -C 4
702 $ hg update -q -C 4
703 $ hg remove largeX
703 $ hg remove largeX
704 $ hg status -A largeX
704 $ hg status -A largeX
705 R largeX
705 R largeX
706 $ hg status -A --rev '.^1' largeX
706 $ hg status -A --rev '.^1' largeX
707
707
708 #if execbit
708 #if execbit
709
709
710 Test that "hg status" against revisions other than parent notices exec
710 Test that "hg status" against revisions other than parent notices exec
711 bit changes of largefiles.
711 bit changes of largefiles.
712
712
713 $ hg update -q -C 4
713 $ hg update -q -C 4
714
714
715 (the case that large2 doesn't have exec bit in the target context but
715 (the case that large2 doesn't have exec bit in the target context but
716 in the working context)
716 in the working context)
717
717
718 $ chmod +x large2
718 $ chmod +x large2
719 $ hg status -A --rev 0 large2
719 $ hg status -A --rev 0 large2
720 M large2
720 M large2
721 $ hg commit -m 'chmod +x large2'
721 $ hg commit -m 'chmod +x large2'
722
722
723 (the case that large2 has exec bit in the target context but not in
723 (the case that large2 has exec bit in the target context but not in
724 the working context)
724 the working context)
725
725
726 $ echo dummy > dummy
726 $ echo dummy > dummy
727 $ hg add dummy
727 $ hg add dummy
728 $ hg commit -m 'revision for separation'
728 $ hg commit -m 'revision for separation'
729 $ chmod -x large2
729 $ chmod -x large2
730 $ hg status -A --rev '.^1' large2
730 $ hg status -A --rev '.^1' large2
731 M large2
731 M large2
732
732
733 #else
733 #else
734
734
735 Test that "hg status" against revisions other than parent ignores exec
735 Test that "hg status" against revisions other than parent ignores exec
736 bit correctly on the platform being unaware of it.
736 bit correctly on the platform being unaware of it.
737
737
738 $ hg update -q -C 4
738 $ hg update -q -C 4
739
739
740 $ cat > ../exec-bit.patch <<EOF
740 $ cat > ../exec-bit.patch <<EOF
741 > # HG changeset patch
741 > # HG changeset patch
742 > # User test
742 > # User test
743 > # Date 0 0
743 > # Date 0 0
744 > # Thu Jan 01 00:00:00 1970 +0000
744 > # Thu Jan 01 00:00:00 1970 +0000
745 > # Node ID be1b433a65b12b27b5519d92213e14f7e1769b90
745 > # Node ID be1b433a65b12b27b5519d92213e14f7e1769b90
746 > # Parent 07d6153b5c04313efb75deec9ba577de7faeb727
746 > # Parent 07d6153b5c04313efb75deec9ba577de7faeb727
747 > chmod +x large2
747 > chmod +x large2
748 >
748 >
749 > diff --git a/.hglf/large2 b/.hglf/large2
749 > diff --git a/.hglf/large2 b/.hglf/large2
750 > old mode 100644
750 > old mode 100644
751 > new mode 100755
751 > new mode 100755
752 > EOF
752 > EOF
753 $ hg import --exact --bypass ../exec-bit.patch
753 $ hg import --exact --bypass ../exec-bit.patch
754 applying ../exec-bit.patch
754 applying ../exec-bit.patch
755 $ hg status -A --rev tip large2
755 $ hg status -A --rev tip large2
756 C large2
756 C large2
757
757
758 #endif
758 #endif
759
759
760 The fileset revset is evaluated for each revision, instead of once on wdir(),
760 The fileset revset is evaluated for each revision, instead of once on wdir(),
761 and then patterns matched on each revision. Here, no exec bits are set in
761 and then patterns matched on each revision. Here, no exec bits are set in
762 wdir(), but a matching revision is detected.
762 wdir(), but a matching revision is detected.
763
763
764 (Teach large2 is not an executable. Maybe this is a bug of largefiles.)
764 (Teach large2 is not an executable. Maybe this is a bug of largefiles.)
765 #if execbit
765 #if execbit
766 $ chmod -x .hglf/large2
766 $ chmod -x .hglf/large2
767 #endif
767 #endif
768
768
769 $ hg files 'set:exec()'
769 $ hg files 'set:exec()'
770 [1]
770 [1]
771 $ hg log -qr 'file("set:exec()")'
771 $ hg log -qr 'file("set:exec()")'
772 9:be1b433a65b1
772 9:be1b433a65b1
773
773
774 Test a fatal error interrupting an update. Verify that status report dirty
774 Test a fatal error interrupting an update
775 files correctly after an interrupted update. Also verify that checking all
775 -----------------------------------------
776 hashes reveals it isn't clean.
776
777 In a previous version this test was tasked to:
778 | verify that status report dirty files correctly after an interrupted
779 | update. Also verify that checking all hashes reveals it isn't clean.
780
781 In the mean time improvement to the update logic means it is much harder to get the dirstate file written too early. So the original intend seems "fine".
782
783 However, it shows another error where the standin file for large1 seems to be
784 silently updated, confusing the general logic. This seems to have been broken
785 before our updates and the test is marked as such.
777
786
778 Start with clean dirstates:
787 Start with clean dirstates:
779 $ hg up --quiet --clean --rev "8^"
788 $ hg up --quiet --clean --rev "8^"
780 $ sleep 1
789 $ sleep 1
781 $ cat large1
790 $ cat large1
782 large1 in #3
791 large1 in #3
783 $ hg st
792 $ hg st
784
793
785 Update standins without updating largefiles - large1 is modified and largeX is
794 Update standins without updating largefiles - large1 is modified and largeX is
786 added:
795 added:
787 $ cat << EOF > ../crashupdatelfiles.py
796 $ cat << EOF > ../crashupdatelfiles.py
788 > import hgext.largefiles.lfutil
797 > import hgext.largefiles.lfutil
789 > def getlfilestoupdate(oldstandins, newstandins):
798 > def getlfilestoupdate(oldstandins, newstandins):
790 > raise SystemExit(7)
799 > raise SystemExit(7)
791 > hgext.largefiles.lfutil.getlfilestoupdate = getlfilestoupdate
800 > hgext.largefiles.lfutil.getlfilestoupdate = getlfilestoupdate
792 > EOF
801 > EOF
793 $ hg up -Cr "8" --config extensions.crashupdatelfiles=../crashupdatelfiles.py
802 $ hg up -Cr "8" --config extensions.crashupdatelfiles=../crashupdatelfiles.py
794 [254]
803 [254]
795 Check large1 content and status ... and that update will undo modifications:
804 Check large1 content and status ... and that update will undo modifications:
796 $ hg id
805 $ hg id
797 f1bf30eb88cc
806 d65e59e952a9+ (known-bad-output !)
807 d65e59e952a9 (missing-correct-output !)
798 $ cat large1
808 $ cat large1
799 large1 in #3
809 large1 in #3
800 $ hg st
810 $ hg st
801 M large1
802 ! largeX
803 $ hg up -Cr 8
811 $ hg up -Cr 8
804 getting changed largefiles
812 getting changed largefiles
805 2 largefiles updated, 0 removed
813 1 largefiles updated, 0 removed (known-bad-output !)
814 2 largefiles updated, 0 removed (missing-correct-output !)
806 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
815 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
807 $ cat large1
816 $ cat large1
808 manually modified before 'hg transplant --continue'
817 large1 in #3 (known-bad-output !)
818 manually modified before 'hg transplant --continue' (missing-correct-output !)
809 $ hg st
819 $ hg st
820 M large1 (known-bad-output !)
821
822 $ hg revert --all --no-backup
823 reverting .hglf/large1 (known-bad-output !)
810 Force largefiles rehashing and check that all changes have been caught by
824 Force largefiles rehashing and check that all changes have been caught by
811 status and update:
825 status and update:
812 $ rm .hg/largefiles/dirstate
826 $ rm .hg/largefiles/dirstate
813 $ hg st
827 $ hg st
814
828
815 $ cd ..
829 $ cd ..
816
830
817 Test that "hg convert" avoids copying largefiles from the working
831 Test that "hg convert" avoids copying largefiles from the working
818 directory into store, because "hg convert" doesn't update largefiles
832 directory into store, because "hg convert" doesn't update largefiles
819 in the working directory (removing files under ".cache/largefiles"
833 in the working directory (removing files under ".cache/largefiles"
820 forces "hg convert" to copy corresponding largefiles)
834 forces "hg convert" to copy corresponding largefiles)
821
835
822 $ cat >> $HGRCPATH <<EOF
836 $ cat >> $HGRCPATH <<EOF
823 > [extensions]
837 > [extensions]
824 > convert =
838 > convert =
825 > EOF
839 > EOF
826
840
827 $ rm $TESTTMP/.cache/largefiles/6a4f36d4075fbe0f30ec1d26ca44e63c05903671
841 $ rm $TESTTMP/.cache/largefiles/6a4f36d4075fbe0f30ec1d26ca44e63c05903671
828 $ hg convert -q repo repo.converted
842 $ hg convert -q repo repo.converted
General Comments 0
You need to be logged in to leave comments. Login now