##// END OF EJS Templates
dirstate: make the `transaction` argument of `setbranch` mandatory...
marmoute -
r52166:79cd29d5 default
parent child Browse files
Show More
@@ -1,395 +1,395 b''
1 1 import contextlib
2 2 import os
3 3
4 4 from mercurial.node import sha1nodeconstants
5 5 from mercurial import (
6 6 dirstatemap,
7 7 error,
8 8 extensions,
9 9 match as matchmod,
10 10 pycompat,
11 11 scmutil,
12 12 util,
13 13 )
14 14 from mercurial.dirstateutils import (
15 15 timestamp,
16 16 )
17 17 from mercurial.interfaces import (
18 18 dirstate as intdirstate,
19 19 util as interfaceutil,
20 20 )
21 21
22 22 from . import gitutil
23 23
24 24
25 25 DirstateItem = dirstatemap.DirstateItem
26 26 propertycache = util.propertycache
27 27 pygit2 = gitutil.get_pygit2()
28 28
29 29
30 30 def readpatternfile(orig, filepath, warn, sourceinfo=False):
31 31 if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')):
32 32 return orig(filepath, warn, sourceinfo=False)
33 33 result = []
34 34 warnings = []
35 35 with open(filepath, 'rb') as fp:
36 36 for l in fp:
37 37 l = l.strip()
38 38 if not l or l.startswith(b'#'):
39 39 continue
40 40 if l.startswith(b'!'):
41 41 warnings.append(b'unsupported ignore pattern %s' % l)
42 42 continue
43 43 if l.startswith(b'/'):
44 44 result.append(b'rootglob:' + l[1:])
45 45 else:
46 46 result.append(b'relglob:' + l)
47 47 return result, warnings
48 48
49 49
50 50 extensions.wrapfunction(matchmod, 'readpatternfile', readpatternfile)
51 51
52 52
53 53 _STATUS_MAP = {}
54 54 if pygit2:
55 55 _STATUS_MAP = {
56 56 pygit2.GIT_STATUS_CONFLICTED: b'm',
57 57 pygit2.GIT_STATUS_CURRENT: b'n',
58 58 pygit2.GIT_STATUS_IGNORED: b'?',
59 59 pygit2.GIT_STATUS_INDEX_DELETED: b'r',
60 60 pygit2.GIT_STATUS_INDEX_MODIFIED: b'n',
61 61 pygit2.GIT_STATUS_INDEX_NEW: b'a',
62 62 pygit2.GIT_STATUS_INDEX_RENAMED: b'a',
63 63 pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n',
64 64 pygit2.GIT_STATUS_WT_DELETED: b'r',
65 65 pygit2.GIT_STATUS_WT_MODIFIED: b'n',
66 66 pygit2.GIT_STATUS_WT_NEW: b'?',
67 67 pygit2.GIT_STATUS_WT_RENAMED: b'a',
68 68 pygit2.GIT_STATUS_WT_TYPECHANGE: b'n',
69 69 pygit2.GIT_STATUS_WT_UNREADABLE: b'?',
70 70 pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: b'm',
71 71 }
72 72
73 73
74 74 @interfaceutil.implementer(intdirstate.idirstate)
75 75 class gitdirstate:
76 76 def __init__(self, ui, vfs, gitrepo, use_dirstate_v2):
77 77 self._ui = ui
78 78 self._root = os.path.dirname(vfs.base)
79 79 self._opener = vfs
80 80 self.git = gitrepo
81 81 self._plchangecallbacks = {}
82 82 # TODO: context.poststatusfixup is bad and uses this attribute
83 83 self._dirty = False
84 84 self._mapcls = dirstatemap.dirstatemap
85 85 self._use_dirstate_v2 = use_dirstate_v2
86 86
87 87 @propertycache
88 88 def _map(self):
89 89 """Return the dirstate contents (see documentation for dirstatemap)."""
90 90 self._map = self._mapcls(
91 91 self._ui,
92 92 self._opener,
93 93 self._root,
94 94 sha1nodeconstants,
95 95 self._use_dirstate_v2,
96 96 )
97 97 return self._map
98 98
99 99 def p1(self):
100 100 try:
101 101 return self.git.head.peel().id.raw
102 102 except pygit2.GitError:
103 103 # Typically happens when peeling HEAD fails, as in an
104 104 # empty repository.
105 105 return sha1nodeconstants.nullid
106 106
107 107 def p2(self):
108 108 # TODO: MERGE_HEAD? something like that, right?
109 109 return sha1nodeconstants.nullid
110 110
111 111 def setparents(self, p1, p2=None):
112 112 if p2 is None:
113 113 p2 = sha1nodeconstants.nullid
114 114 assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
115 115 self.git.head.set_target(gitutil.togitnode(p1))
116 116
117 117 @util.propertycache
118 118 def identity(self):
119 119 return util.filestat.frompath(
120 120 os.path.join(self._root, b'.git', b'index')
121 121 )
122 122
123 123 def branch(self):
124 124 return b'default'
125 125
126 126 def parents(self):
127 127 # TODO how on earth do we find p2 if a merge is in flight?
128 128 return self.p1(), sha1nodeconstants.nullid
129 129
130 130 def __iter__(self):
131 131 return (pycompat.fsencode(f.path) for f in self.git.index)
132 132
133 133 def items(self):
134 134 for ie in self.git.index:
135 135 yield ie.path, None # value should be a DirstateItem
136 136
137 137 # py2,3 compat forward
138 138 iteritems = items
139 139
140 140 def __getitem__(self, filename):
141 141 try:
142 142 gs = self.git.status_file(filename)
143 143 except KeyError:
144 144 return b'?'
145 145 return _STATUS_MAP[gs]
146 146
147 147 def __contains__(self, filename):
148 148 try:
149 149 gs = self.git.status_file(filename)
150 150 return _STATUS_MAP[gs] != b'?'
151 151 except KeyError:
152 152 return False
153 153
154 154 def status(self, match, subrepos, ignored, clean, unknown):
155 155 listclean = clean
156 156 # TODO handling of clean files - can we get that from git.status()?
157 157 modified, added, removed, deleted, unknown, ignored, clean = (
158 158 [],
159 159 [],
160 160 [],
161 161 [],
162 162 [],
163 163 [],
164 164 [],
165 165 )
166 166
167 167 try:
168 168 mtime_boundary = timestamp.get_fs_now(self._opener)
169 169 except OSError:
170 170 # In largefiles or readonly context
171 171 mtime_boundary = None
172 172
173 173 gstatus = self.git.status()
174 174 for path, status in gstatus.items():
175 175 path = pycompat.fsencode(path)
176 176 if not match(path):
177 177 continue
178 178 if status == pygit2.GIT_STATUS_IGNORED:
179 179 if path.endswith(b'/'):
180 180 continue
181 181 ignored.append(path)
182 182 elif status in (
183 183 pygit2.GIT_STATUS_WT_MODIFIED,
184 184 pygit2.GIT_STATUS_INDEX_MODIFIED,
185 185 pygit2.GIT_STATUS_WT_MODIFIED
186 186 | pygit2.GIT_STATUS_INDEX_MODIFIED,
187 187 ):
188 188 modified.append(path)
189 189 elif status == pygit2.GIT_STATUS_INDEX_NEW:
190 190 added.append(path)
191 191 elif status == pygit2.GIT_STATUS_WT_NEW:
192 192 unknown.append(path)
193 193 elif status == pygit2.GIT_STATUS_WT_DELETED:
194 194 deleted.append(path)
195 195 elif status == pygit2.GIT_STATUS_INDEX_DELETED:
196 196 removed.append(path)
197 197 else:
198 198 raise error.Abort(
199 199 b'unhandled case: status for %r is %r' % (path, status)
200 200 )
201 201
202 202 if listclean:
203 203 observed = set(
204 204 modified + added + removed + deleted + unknown + ignored
205 205 )
206 206 index = self.git.index
207 207 index.read()
208 208 for entry in index:
209 209 path = pycompat.fsencode(entry.path)
210 210 if not match(path):
211 211 continue
212 212 if path in observed:
213 213 continue # already in some other set
214 214 if path[-1] == b'/':
215 215 continue # directory
216 216 clean.append(path)
217 217
218 218 # TODO are we really always sure of status here?
219 219 return (
220 220 False,
221 221 scmutil.status(
222 222 modified, added, removed, deleted, unknown, ignored, clean
223 223 ),
224 224 mtime_boundary,
225 225 )
226 226
227 227 def flagfunc(self, buildfallback):
228 228 # TODO we can do better
229 229 return buildfallback()
230 230
231 231 def getcwd(self):
232 232 # TODO is this a good way to do this?
233 233 return os.path.dirname(
234 234 os.path.dirname(pycompat.fsencode(self.git.path))
235 235 )
236 236
237 237 def get_entry(self, path):
238 238 """return a DirstateItem for the associated path"""
239 239 entry = self._map.get(path)
240 240 if entry is None:
241 241 return DirstateItem()
242 242 return entry
243 243
244 244 def normalize(self, path):
245 245 normed = util.normcase(path)
246 246 assert normed == path, b"TODO handling of case folding: %s != %s" % (
247 247 normed,
248 248 path,
249 249 )
250 250 return path
251 251
252 252 @property
253 253 def _checklink(self):
254 254 return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path)))
255 255
256 256 def copies(self):
257 257 # TODO support copies?
258 258 return {}
259 259
260 260 # # TODO what the heck is this
261 261 _filecache = set()
262 262
263 263 def is_changing_parents(self):
264 264 # TODO: we need to implement the context manager bits and
265 265 # correctly stage/revert index edits.
266 266 return False
267 267
268 268 def is_changing_any(self):
269 269 # TODO: we need to implement the context manager bits and
270 270 # correctly stage/revert index edits.
271 271 return False
272 272
273 273 def write(self, tr):
274 274 # TODO: call parent change callbacks
275 275
276 276 if tr:
277 277
278 278 def writeinner(category):
279 279 self.git.index.write()
280 280
281 281 tr.addpending(b'gitdirstate', writeinner)
282 282 else:
283 283 self.git.index.write()
284 284
285 285 def pathto(self, f, cwd=None):
286 286 if cwd is None:
287 287 cwd = self.getcwd()
288 288 # TODO core dirstate does something about slashes here
289 289 assert isinstance(f, bytes)
290 290 r = util.pathto(self._root, cwd, f)
291 291 return r
292 292
293 293 def matches(self, match):
294 294 for x in self.git.index:
295 295 p = pycompat.fsencode(x.path)
296 296 if match(p):
297 297 yield p
298 298
299 299 def set_clean(self, f, parentfiledata):
300 300 """Mark a file normal and clean."""
301 301 # TODO: for now we just let libgit2 re-stat the file. We can
302 302 # clearly do better.
303 303
304 304 def set_possibly_dirty(self, f):
305 305 """Mark a file normal, but possibly dirty."""
306 306 # TODO: for now we just let libgit2 re-stat the file. We can
307 307 # clearly do better.
308 308
309 309 def walk(self, match, subrepos, unknown, ignored, full=True):
310 310 # TODO: we need to use .status() and not iterate the index,
311 311 # because the index doesn't force a re-walk and so `hg add` of
312 312 # a new file without an intervening call to status will
313 313 # silently do nothing.
314 314 r = {}
315 315 cwd = self.getcwd()
316 316 for path, status in self.git.status().items():
317 317 if path.startswith('.hg/'):
318 318 continue
319 319 path = pycompat.fsencode(path)
320 320 if not match(path):
321 321 continue
322 322 # TODO construct the stat info from the status object?
323 323 try:
324 324 s = os.stat(os.path.join(cwd, path))
325 325 except FileNotFoundError:
326 326 continue
327 327 r[path] = s
328 328 return r
329 329
330 330 def set_tracked(self, f, reset_copy=False):
331 331 # TODO: support copies and reset_copy=True
332 332 uf = pycompat.fsdecode(f)
333 333 if uf in self.git.index:
334 334 return False
335 335 index = self.git.index
336 336 index.read()
337 337 index.add(uf)
338 338 index.write()
339 339 return True
340 340
341 341 def add(self, f):
342 342 index = self.git.index
343 343 index.read()
344 344 index.add(pycompat.fsdecode(f))
345 345 index.write()
346 346
347 347 def drop(self, f):
348 348 index = self.git.index
349 349 index.read()
350 350 fs = pycompat.fsdecode(f)
351 351 if fs in index:
352 352 index.remove(fs)
353 353 index.write()
354 354
355 355 def set_untracked(self, f):
356 356 index = self.git.index
357 357 index.read()
358 358 fs = pycompat.fsdecode(f)
359 359 if fs in index:
360 360 index.remove(fs)
361 361 index.write()
362 362 return True
363 363 return False
364 364
365 365 def remove(self, f):
366 366 index = self.git.index
367 367 index.read()
368 368 index.remove(pycompat.fsdecode(f))
369 369 index.write()
370 370
371 371 def copied(self, path):
372 372 # TODO: track copies?
373 373 return None
374 374
375 375 def prefetch_parents(self):
376 376 # TODO
377 377 pass
378 378
379 379 def update_file(self, *args, **kwargs):
380 380 # TODO
381 381 pass
382 382
383 383 @contextlib.contextmanager
384 384 def changing_parents(self, repo):
385 385 # TODO: track this maybe?
386 386 yield
387 387
388 388 def addparentchangecallback(self, category, callback):
389 389 # TODO: should this be added to the dirstate interface?
390 390 self._plchangecallbacks[category] = callback
391 391
392 def setbranch(self, branch, transaction=None):
392 def setbranch(self, branch, transaction):
393 393 raise error.Abort(
394 394 b'git repos do not support branches. try using bookmarks'
395 395 )
@@ -1,1832 +1,1825 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12 import stat
13 13 import uuid
14 14
15 15 from .i18n import _
16 16
17 17 from hgdemandimport import tracing
18 18
19 19 from . import (
20 20 dirstatemap,
21 21 encoding,
22 22 error,
23 23 match as matchmod,
24 24 node,
25 25 pathutil,
26 26 policy,
27 27 pycompat,
28 28 scmutil,
29 29 txnutil,
30 30 util,
31 31 )
32 32
33 33 from .dirstateutils import (
34 34 timestamp,
35 35 )
36 36
37 37 from .interfaces import (
38 38 dirstate as intdirstate,
39 39 util as interfaceutil,
40 40 )
41 41
42 42 parsers = policy.importmod('parsers')
43 43 rustmod = policy.importrust('dirstate')
44 44
45 # use to detect lack of a parameter
46 SENTINEL = object()
47
48 45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
49 46
50 47 propertycache = util.propertycache
51 48 filecache = scmutil.filecache
52 49 _rangemask = dirstatemap.rangemask
53 50
54 51 DirstateItem = dirstatemap.DirstateItem
55 52
56 53
57 54 class repocache(filecache):
58 55 """filecache for files in .hg/"""
59 56
60 57 def join(self, obj, fname):
61 58 return obj._opener.join(fname)
62 59
63 60
64 61 class rootcache(filecache):
65 62 """filecache for files in the repository root"""
66 63
67 64 def join(self, obj, fname):
68 65 return obj._join(fname)
69 66
70 67
71 68 def check_invalidated(func):
72 69 """check that the func is called with a non-invalidated dirstate
73 70
74 71 The dirstate is in an "invalidated state" after an error occured during its
75 72 modification and remains so until we exited the top level scope that framed
76 73 such change.
77 74 """
78 75
79 76 def wrap(self, *args, **kwargs):
80 77 if self._invalidated_context:
81 78 msg = 'calling `%s` after the dirstate was invalidated'
82 79 msg %= func.__name__
83 80 raise error.ProgrammingError(msg)
84 81 return func(self, *args, **kwargs)
85 82
86 83 return wrap
87 84
88 85
89 86 def requires_changing_parents(func):
90 87 def wrap(self, *args, **kwargs):
91 88 if not self.is_changing_parents:
92 89 msg = 'calling `%s` outside of a changing_parents context'
93 90 msg %= func.__name__
94 91 raise error.ProgrammingError(msg)
95 92 return func(self, *args, **kwargs)
96 93
97 94 return check_invalidated(wrap)
98 95
99 96
100 97 def requires_changing_files(func):
101 98 def wrap(self, *args, **kwargs):
102 99 if not self.is_changing_files:
103 100 msg = 'calling `%s` outside of a `changing_files`'
104 101 msg %= func.__name__
105 102 raise error.ProgrammingError(msg)
106 103 return func(self, *args, **kwargs)
107 104
108 105 return check_invalidated(wrap)
109 106
110 107
111 108 def requires_changing_any(func):
112 109 def wrap(self, *args, **kwargs):
113 110 if not self.is_changing_any:
114 111 msg = 'calling `%s` outside of a changing context'
115 112 msg %= func.__name__
116 113 raise error.ProgrammingError(msg)
117 114 return func(self, *args, **kwargs)
118 115
119 116 return check_invalidated(wrap)
120 117
121 118
122 119 def requires_changing_files_or_status(func):
123 120 def wrap(self, *args, **kwargs):
124 121 if not (self.is_changing_files or self._running_status > 0):
125 122 msg = (
126 123 'calling `%s` outside of a changing_files '
127 124 'or running_status context'
128 125 )
129 126 msg %= func.__name__
130 127 raise error.ProgrammingError(msg)
131 128 return func(self, *args, **kwargs)
132 129
133 130 return check_invalidated(wrap)
134 131
135 132
136 133 CHANGE_TYPE_PARENTS = "parents"
137 134 CHANGE_TYPE_FILES = "files"
138 135
139 136
140 137 @interfaceutil.implementer(intdirstate.idirstate)
141 138 class dirstate:
142 139
143 140 # used by largefile to avoid overwritting transaction callback
144 141 _tr_key_suffix = b''
145 142
146 143 def __init__(
147 144 self,
148 145 opener,
149 146 ui,
150 147 root,
151 148 validate,
152 149 sparsematchfn,
153 150 nodeconstants,
154 151 use_dirstate_v2,
155 152 use_tracked_hint=False,
156 153 ):
157 154 """Create a new dirstate object.
158 155
159 156 opener is an open()-like callable that can be used to open the
160 157 dirstate file; root is the root of the directory tracked by
161 158 the dirstate.
162 159 """
163 160 self._use_dirstate_v2 = use_dirstate_v2
164 161 self._use_tracked_hint = use_tracked_hint
165 162 self._nodeconstants = nodeconstants
166 163 self._opener = opener
167 164 self._validate = validate
168 165 self._root = root
169 166 # Either build a sparse-matcher or None if sparse is disabled
170 167 self._sparsematchfn = sparsematchfn
171 168 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
172 169 # UNC path pointing to root share (issue4557)
173 170 self._rootdir = pathutil.normasprefix(root)
174 171 # True is any internal state may be different
175 172 self._dirty = False
176 173 # True if the set of tracked file may be different
177 174 self._dirty_tracked_set = False
178 175 self._ui = ui
179 176 self._filecache = {}
180 177 # nesting level of `changing_parents` context
181 178 self._changing_level = 0
182 179 # the change currently underway
183 180 self._change_type = None
184 181 # number of open _running_status context
185 182 self._running_status = 0
186 183 # True if the current dirstate changing operations have been
187 184 # invalidated (used to make sure all nested contexts have been exited)
188 185 self._invalidated_context = False
189 186 self._attached_to_a_transaction = False
190 187 self._filename = b'dirstate'
191 188 self._filename_th = b'dirstate-tracked-hint'
192 189 self._pendingfilename = b'%s.pending' % self._filename
193 190 self._plchangecallbacks = {}
194 191 self._origpl = None
195 192 self._mapcls = dirstatemap.dirstatemap
196 193 # Access and cache cwd early, so we don't access it for the first time
197 194 # after a working-copy update caused it to not exist (accessing it then
198 195 # raises an exception).
199 196 self._cwd
200 197
201 198 def refresh(self):
202 199 # XXX if this happens, you likely did not enter the `changing_xxx`
203 200 # using `repo.dirstate`, so a later `repo.dirstate` accesss might call
204 201 # `refresh`.
205 202 if self.is_changing_any:
206 203 msg = "refreshing the dirstate in the middle of a change"
207 204 raise error.ProgrammingError(msg)
208 205 if '_branch' in vars(self):
209 206 del self._branch
210 207 if '_map' in vars(self) and self._map.may_need_refresh():
211 208 self.invalidate()
212 209
213 210 def prefetch_parents(self):
214 211 """make sure the parents are loaded
215 212
216 213 Used to avoid a race condition.
217 214 """
218 215 self._pl
219 216
220 217 @contextlib.contextmanager
221 218 @check_invalidated
222 219 def running_status(self, repo):
223 220 """Wrap a status operation
224 221
225 222 This context is not mutally exclusive with the `changing_*` context. It
226 223 also do not warrant for the `wlock` to be taken.
227 224
228 225 If the wlock is taken, this context will behave in a simple way, and
229 226 ensure the data are scheduled for write when leaving the top level
230 227 context.
231 228
232 229 If the lock is not taken, it will only warrant that the data are either
233 230 committed (written) and rolled back (invalidated) when exiting the top
234 231 level context. The write/invalidate action must be performed by the
235 232 wrapped code.
236 233
237 234
238 235 The expected logic is:
239 236
240 237 A: read the dirstate
241 238 B: run status
242 239 This might make the dirstate dirty by updating cache,
243 240 especially in Rust.
244 241 C: do more "post status fixup if relevant
245 242 D: try to take the w-lock (this will invalidate the changes if they were raced)
246 243 E0: if dirstate changed on disk β†’ discard change (done by dirstate internal)
247 244 E1: elif lock was acquired β†’ write the changes
248 245 E2: else β†’ discard the changes
249 246 """
250 247 has_lock = repo.currentwlock() is not None
251 248 is_changing = self.is_changing_any
252 249 tr = repo.currenttransaction()
253 250 has_tr = tr is not None
254 251 nested = bool(self._running_status)
255 252
256 253 first_and_alone = not (is_changing or has_tr or nested)
257 254
258 255 # enforce no change happened outside of a proper context.
259 256 if first_and_alone and self._dirty:
260 257 has_tr = repo.currenttransaction() is not None
261 258 if not has_tr and self._changing_level == 0 and self._dirty:
262 259 msg = "entering a status context, but dirstate is already dirty"
263 260 raise error.ProgrammingError(msg)
264 261
265 262 should_write = has_lock and not (nested or is_changing)
266 263
267 264 self._running_status += 1
268 265 try:
269 266 yield
270 267 except Exception:
271 268 self.invalidate()
272 269 raise
273 270 finally:
274 271 self._running_status -= 1
275 272 if self._invalidated_context:
276 273 should_write = False
277 274 self.invalidate()
278 275
279 276 if should_write:
280 277 assert repo.currenttransaction() is tr
281 278 self.write(tr)
282 279 elif not has_lock:
283 280 if self._dirty:
284 281 msg = b'dirstate dirty while exiting an isolated status context'
285 282 repo.ui.develwarn(msg)
286 283 self.invalidate()
287 284
288 285 @contextlib.contextmanager
289 286 @check_invalidated
290 287 def _changing(self, repo, change_type):
291 288 if repo.currentwlock() is None:
292 289 msg = b"trying to change the dirstate without holding the wlock"
293 290 raise error.ProgrammingError(msg)
294 291
295 292 has_tr = repo.currenttransaction() is not None
296 293 if not has_tr and self._changing_level == 0 and self._dirty:
297 294 msg = b"entering a changing context, but dirstate is already dirty"
298 295 repo.ui.develwarn(msg)
299 296
300 297 assert self._changing_level >= 0
301 298 # different type of change are mutually exclusive
302 299 if self._change_type is None:
303 300 assert self._changing_level == 0
304 301 self._change_type = change_type
305 302 elif self._change_type != change_type:
306 303 msg = (
307 304 'trying to open "%s" dirstate-changing context while a "%s" is'
308 305 ' already open'
309 306 )
310 307 msg %= (change_type, self._change_type)
311 308 raise error.ProgrammingError(msg)
312 309 should_write = False
313 310 self._changing_level += 1
314 311 try:
315 312 yield
316 313 except: # re-raises
317 314 self.invalidate() # this will set `_invalidated_context`
318 315 raise
319 316 finally:
320 317 assert self._changing_level > 0
321 318 self._changing_level -= 1
322 319 # If the dirstate is being invalidated, call invalidate again.
323 320 # This will throw away anything added by a upper context and
324 321 # reset the `_invalidated_context` flag when relevant
325 322 if self._changing_level <= 0:
326 323 self._change_type = None
327 324 assert self._changing_level == 0
328 325 if self._invalidated_context:
329 326 # make sure we invalidate anything an upper context might
330 327 # have changed.
331 328 self.invalidate()
332 329 else:
333 330 should_write = self._changing_level <= 0
334 331 tr = repo.currenttransaction()
335 332 if has_tr != (tr is not None):
336 333 if has_tr:
337 334 m = "transaction vanished while changing dirstate"
338 335 else:
339 336 m = "transaction appeared while changing dirstate"
340 337 raise error.ProgrammingError(m)
341 338 if should_write:
342 339 self.write(tr)
343 340
344 341 @contextlib.contextmanager
345 342 def changing_parents(self, repo):
346 343 """Wrap a dirstate change related to a change of working copy parents
347 344
348 345 This context scopes a series of dirstate modifications that match an
349 346 update of the working copy parents (typically `hg update`, `hg merge`
350 347 etc).
351 348
352 349 The dirstate's methods that perform this kind of modifications require
353 350 this context to be present before being called.
354 351 Such methods are decorated with `@requires_changing_parents`.
355 352
356 353 The new dirstate contents will be written to disk when the top-most
357 354 `changing_parents` context exits successfully. If an exception is
358 355 raised during a `changing_parents` context of any level, all changes
359 356 are invalidated. If this context is open within an open transaction,
360 357 the dirstate writing is delayed until that transaction is successfully
361 358 committed (and the dirstate is invalidated on transaction abort).
362 359
363 360 The `changing_parents` operation is mutually exclusive with the
364 361 `changing_files` one.
365 362 """
366 363 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
367 364 yield c
368 365
369 366 @contextlib.contextmanager
370 367 def changing_files(self, repo):
371 368 """Wrap a dirstate change related to the set of tracked files
372 369
373 370 This context scopes a series of dirstate modifications that change the
374 371 set of tracked files. (typically `hg add`, `hg remove` etc) or some
375 372 dirstate stored information (like `hg rename --after`) but preserve
376 373 the working copy parents.
377 374
378 375 The dirstate's methods that perform this kind of modifications require
379 376 this context to be present before being called.
380 377 Such methods are decorated with `@requires_changing_files`.
381 378
382 379 The new dirstate contents will be written to disk when the top-most
383 380 `changing_files` context exits successfully. If an exception is raised
384 381 during a `changing_files` context of any level, all changes are
385 382 invalidated. If this context is open within an open transaction, the
386 383 dirstate writing is delayed until that transaction is successfully
387 384 committed (and the dirstate is invalidated on transaction abort).
388 385
389 386 The `changing_files` operation is mutually exclusive with the
390 387 `changing_parents` one.
391 388 """
392 389 with self._changing(repo, CHANGE_TYPE_FILES) as c:
393 390 yield c
394 391
395 392 # here to help migration to the new code
396 393 def parentchange(self):
397 394 msg = (
398 395 "Mercurial 6.4 and later requires call to "
399 396 "`dirstate.changing_parents(repo)`"
400 397 )
401 398 raise error.ProgrammingError(msg)
402 399
403 400 @property
404 401 def is_changing_any(self):
405 402 """Returns true if the dirstate is in the middle of a set of changes.
406 403
407 404 This returns True for any kind of change.
408 405 """
409 406 return self._changing_level > 0
410 407
411 408 @property
412 409 def is_changing_parents(self):
413 410 """Returns true if the dirstate is in the middle of a set of changes
414 411 that modify the dirstate parent.
415 412 """
416 413 if self._changing_level <= 0:
417 414 return False
418 415 return self._change_type == CHANGE_TYPE_PARENTS
419 416
420 417 @property
421 418 def is_changing_files(self):
422 419 """Returns true if the dirstate is in the middle of a set of changes
423 420 that modify the files tracked or their sources.
424 421 """
425 422 if self._changing_level <= 0:
426 423 return False
427 424 return self._change_type == CHANGE_TYPE_FILES
428 425
429 426 @propertycache
430 427 def _map(self):
431 428 """Return the dirstate contents (see documentation for dirstatemap)."""
432 429 return self._mapcls(
433 430 self._ui,
434 431 self._opener,
435 432 self._root,
436 433 self._nodeconstants,
437 434 self._use_dirstate_v2,
438 435 )
439 436
440 437 @property
441 438 def _sparsematcher(self):
442 439 """The matcher for the sparse checkout.
443 440
444 441 The working directory may not include every file from a manifest. The
445 442 matcher obtained by this property will match a path if it is to be
446 443 included in the working directory.
447 444
448 445 When sparse if disabled, return None.
449 446 """
450 447 if self._sparsematchfn is None:
451 448 return None
452 449 # TODO there is potential to cache this property. For now, the matcher
453 450 # is resolved on every access. (But the called function does use a
454 451 # cache to keep the lookup fast.)
455 452 return self._sparsematchfn()
456 453
457 454 @repocache(b'branch')
458 455 def _branch(self):
459 456 f = None
460 457 data = b''
461 458 try:
462 459 f, mode = txnutil.trypending(self._root, self._opener, b'branch')
463 460 data = f.read().strip()
464 461 except FileNotFoundError:
465 462 pass
466 463 finally:
467 464 if f is not None:
468 465 f.close()
469 466 if not data:
470 467 return b"default"
471 468 return data
472 469
473 470 @property
474 471 def _pl(self):
475 472 return self._map.parents()
476 473
477 474 def hasdir(self, d):
478 475 return self._map.hastrackeddir(d)
479 476
480 477 @rootcache(b'.hgignore')
481 478 def _ignore(self):
482 479 files = self._ignorefiles()
483 480 if not files:
484 481 return matchmod.never()
485 482
486 483 pats = [b'include:%s' % f for f in files]
487 484 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
488 485
489 486 @propertycache
490 487 def _slash(self):
491 488 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
492 489
493 490 @propertycache
494 491 def _checklink(self):
495 492 return util.checklink(self._root)
496 493
497 494 @propertycache
498 495 def _checkexec(self):
499 496 return bool(util.checkexec(self._root))
500 497
501 498 @propertycache
502 499 def _checkcase(self):
503 500 return not util.fscasesensitive(self._join(b'.hg'))
504 501
505 502 def _join(self, f):
506 503 # much faster than os.path.join()
507 504 # it's safe because f is always a relative path
508 505 return self._rootdir + f
509 506
510 507 def flagfunc(self, buildfallback):
511 508 """build a callable that returns flags associated with a filename
512 509
513 510 The information is extracted from three possible layers:
514 511 1. the file system if it supports the information
515 512 2. the "fallback" information stored in the dirstate if any
516 513 3. a more expensive mechanism inferring the flags from the parents.
517 514 """
518 515
519 516 # small hack to cache the result of buildfallback()
520 517 fallback_func = []
521 518
522 519 def get_flags(x):
523 520 entry = None
524 521 fallback_value = None
525 522 try:
526 523 st = os.lstat(self._join(x))
527 524 except OSError:
528 525 return b''
529 526
530 527 if self._checklink:
531 528 if util.statislink(st):
532 529 return b'l'
533 530 else:
534 531 entry = self.get_entry(x)
535 532 if entry.has_fallback_symlink:
536 533 if entry.fallback_symlink:
537 534 return b'l'
538 535 else:
539 536 if not fallback_func:
540 537 fallback_func.append(buildfallback())
541 538 fallback_value = fallback_func[0](x)
542 539 if b'l' in fallback_value:
543 540 return b'l'
544 541
545 542 if self._checkexec:
546 543 if util.statisexec(st):
547 544 return b'x'
548 545 else:
549 546 if entry is None:
550 547 entry = self.get_entry(x)
551 548 if entry.has_fallback_exec:
552 549 if entry.fallback_exec:
553 550 return b'x'
554 551 else:
555 552 if fallback_value is None:
556 553 if not fallback_func:
557 554 fallback_func.append(buildfallback())
558 555 fallback_value = fallback_func[0](x)
559 556 if b'x' in fallback_value:
560 557 return b'x'
561 558 return b''
562 559
563 560 return get_flags
564 561
565 562 @propertycache
566 563 def _cwd(self):
567 564 # internal config: ui.forcecwd
568 565 forcecwd = self._ui.config(b'ui', b'forcecwd')
569 566 if forcecwd:
570 567 return forcecwd
571 568 return encoding.getcwd()
572 569
573 570 def getcwd(self):
574 571 """Return the path from which a canonical path is calculated.
575 572
576 573 This path should be used to resolve file patterns or to convert
577 574 canonical paths back to file paths for display. It shouldn't be
578 575 used to get real file paths. Use vfs functions instead.
579 576 """
580 577 cwd = self._cwd
581 578 if cwd == self._root:
582 579 return b''
583 580 # self._root ends with a path separator if self._root is '/' or 'C:\'
584 581 rootsep = self._root
585 582 if not util.endswithsep(rootsep):
586 583 rootsep += pycompat.ossep
587 584 if cwd.startswith(rootsep):
588 585 return cwd[len(rootsep) :]
589 586 else:
590 587 # we're outside the repo. return an absolute path.
591 588 return cwd
592 589
593 590 def pathto(self, f, cwd=None):
594 591 if cwd is None:
595 592 cwd = self.getcwd()
596 593 path = util.pathto(self._root, cwd, f)
597 594 if self._slash:
598 595 return util.pconvert(path)
599 596 return path
600 597
601 598 def get_entry(self, path):
602 599 """return a DirstateItem for the associated path"""
603 600 entry = self._map.get(path)
604 601 if entry is None:
605 602 return DirstateItem()
606 603 return entry
607 604
608 605 def __contains__(self, key):
609 606 return key in self._map
610 607
611 608 def __iter__(self):
612 609 return iter(sorted(self._map))
613 610
614 611 def items(self):
615 612 return self._map.items()
616 613
617 614 iteritems = items
618 615
619 616 def parents(self):
620 617 return [self._validate(p) for p in self._pl]
621 618
622 619 def p1(self):
623 620 return self._validate(self._pl[0])
624 621
625 622 def p2(self):
626 623 return self._validate(self._pl[1])
627 624
628 625 @property
629 626 def in_merge(self):
630 627 """True if a merge is in progress"""
631 628 return self._pl[1] != self._nodeconstants.nullid
632 629
633 630 def branch(self):
634 631 return encoding.tolocal(self._branch)
635 632
636 633 @requires_changing_parents
637 634 def setparents(self, p1, p2=None):
638 635 """Set dirstate parents to p1 and p2.
639 636
640 637 When moving from two parents to one, "merged" entries a
641 638 adjusted to normal and previous copy records discarded and
642 639 returned by the call.
643 640
644 641 See localrepo.setparents()
645 642 """
646 643 if p2 is None:
647 644 p2 = self._nodeconstants.nullid
648 645 if self._changing_level == 0:
649 646 raise ValueError(
650 647 b"cannot set dirstate parent outside of "
651 648 b"dirstate.changing_parents context manager"
652 649 )
653 650
654 651 self._dirty = True
655 652 oldp2 = self._pl[1]
656 653 if self._origpl is None:
657 654 self._origpl = self._pl
658 655 nullid = self._nodeconstants.nullid
659 656 # True if we need to fold p2 related state back to a linear case
660 657 fold_p2 = oldp2 != nullid and p2 == nullid
661 658 return self._map.setparents(p1, p2, fold_p2=fold_p2)
662 659
663 def setbranch(self, branch, transaction=SENTINEL):
660 def setbranch(self, branch, transaction):
664 661 self.__class__._branch.set(self, encoding.fromlocal(branch))
665 if transaction is SENTINEL:
666 msg = b"setbranch needs a `transaction` argument"
667 self._ui.deprecwarn(msg, b'6.5')
668 transaction = None
669 662 if transaction is not None:
670 663 self._setup_tr_abort(transaction)
671 664 transaction.addfilegenerator(
672 665 b'dirstate-3-branch%s' % self._tr_key_suffix,
673 666 (b'branch',),
674 667 self._write_branch,
675 668 location=b'plain',
676 669 post_finalize=True,
677 670 )
678 671 return
679 672
680 673 vfs = self._opener
681 674 with vfs(b'branch', b'w', atomictemp=True, checkambig=True) as f:
682 675 self._write_branch(f)
683 676 # make sure filecache has the correct stat info for _branch after
684 677 # replacing the underlying file
685 678 #
686 679 # XXX do we actually need this,
687 680 # refreshing the attribute is quite cheap
688 681 ce = self._filecache[b'_branch']
689 682 if ce:
690 683 ce.refresh()
691 684
692 685 def _write_branch(self, file_obj):
693 686 file_obj.write(self._branch + b'\n')
694 687
695 688 def invalidate(self):
696 689 """Causes the next access to reread the dirstate.
697 690
698 691 This is different from localrepo.invalidatedirstate() because it always
699 692 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
700 693 check whether the dirstate has changed before rereading it."""
701 694
702 695 for a in ("_map", "_branch", "_ignore"):
703 696 if a in self.__dict__:
704 697 delattr(self, a)
705 698 self._dirty = False
706 699 self._dirty_tracked_set = False
707 700 self._invalidated_context = bool(
708 701 self._changing_level > 0
709 702 or self._attached_to_a_transaction
710 703 or self._running_status
711 704 )
712 705 self._origpl = None
713 706
714 707 @requires_changing_any
715 708 def copy(self, source, dest):
716 709 """Mark dest as a copy of source. Unmark dest if source is None."""
717 710 if source == dest:
718 711 return
719 712 self._dirty = True
720 713 if source is not None:
721 714 self._check_sparse(source)
722 715 self._map.copymap[dest] = source
723 716 else:
724 717 self._map.copymap.pop(dest, None)
725 718
726 719 def copied(self, file):
727 720 return self._map.copymap.get(file, None)
728 721
729 722 def copies(self):
730 723 return self._map.copymap
731 724
732 725 @requires_changing_files
733 726 def set_tracked(self, filename, reset_copy=False):
734 727 """a "public" method for generic code to mark a file as tracked
735 728
736 729 This function is to be called outside of "update/merge" case. For
737 730 example by a command like `hg add X`.
738 731
739 732 if reset_copy is set, any existing copy information will be dropped.
740 733
741 734 return True the file was previously untracked, False otherwise.
742 735 """
743 736 self._dirty = True
744 737 entry = self._map.get(filename)
745 738 if entry is None or not entry.tracked:
746 739 self._check_new_tracked_filename(filename)
747 740 pre_tracked = self._map.set_tracked(filename)
748 741 if reset_copy:
749 742 self._map.copymap.pop(filename, None)
750 743 if pre_tracked:
751 744 self._dirty_tracked_set = True
752 745 return pre_tracked
753 746
754 747 @requires_changing_files
755 748 def set_untracked(self, filename):
756 749 """a "public" method for generic code to mark a file as untracked
757 750
758 751 This function is to be called outside of "update/merge" case. For
759 752 example by a command like `hg remove X`.
760 753
761 754 return True the file was previously tracked, False otherwise.
762 755 """
763 756 ret = self._map.set_untracked(filename)
764 757 if ret:
765 758 self._dirty = True
766 759 self._dirty_tracked_set = True
767 760 return ret
768 761
769 762 @requires_changing_files_or_status
770 763 def set_clean(self, filename, parentfiledata):
771 764 """record that the current state of the file on disk is known to be clean"""
772 765 self._dirty = True
773 766 if not self._map[filename].tracked:
774 767 self._check_new_tracked_filename(filename)
775 768 (mode, size, mtime) = parentfiledata
776 769 self._map.set_clean(filename, mode, size, mtime)
777 770
778 771 @requires_changing_files_or_status
779 772 def set_possibly_dirty(self, filename):
780 773 """record that the current state of the file on disk is unknown"""
781 774 self._dirty = True
782 775 self._map.set_possibly_dirty(filename)
783 776
784 777 @requires_changing_parents
785 778 def update_file_p1(
786 779 self,
787 780 filename,
788 781 p1_tracked,
789 782 ):
790 783 """Set a file as tracked in the parent (or not)
791 784
792 785 This is to be called when adjust the dirstate to a new parent after an history
793 786 rewriting operation.
794 787
795 788 It should not be called during a merge (p2 != nullid) and only within
796 789 a `with dirstate.changing_parents(repo):` context.
797 790 """
798 791 if self.in_merge:
799 792 msg = b'update_file_reference should not be called when merging'
800 793 raise error.ProgrammingError(msg)
801 794 entry = self._map.get(filename)
802 795 if entry is None:
803 796 wc_tracked = False
804 797 else:
805 798 wc_tracked = entry.tracked
806 799 if not (p1_tracked or wc_tracked):
807 800 # the file is no longer relevant to anyone
808 801 if self._map.get(filename) is not None:
809 802 self._map.reset_state(filename)
810 803 self._dirty = True
811 804 elif (not p1_tracked) and wc_tracked:
812 805 if entry is not None and entry.added:
813 806 return # avoid dropping copy information (maybe?)
814 807
815 808 self._map.reset_state(
816 809 filename,
817 810 wc_tracked,
818 811 p1_tracked,
819 812 # the underlying reference might have changed, we will have to
820 813 # check it.
821 814 has_meaningful_mtime=False,
822 815 )
823 816
824 817 @requires_changing_parents
825 818 def update_file(
826 819 self,
827 820 filename,
828 821 wc_tracked,
829 822 p1_tracked,
830 823 p2_info=False,
831 824 possibly_dirty=False,
832 825 parentfiledata=None,
833 826 ):
834 827 """update the information about a file in the dirstate
835 828
836 829 This is to be called when the direstates parent changes to keep track
837 830 of what is the file situation in regards to the working copy and its parent.
838 831
839 832 This function must be called within a `dirstate.changing_parents` context.
840 833
841 834 note: the API is at an early stage and we might need to adjust it
842 835 depending of what information ends up being relevant and useful to
843 836 other processing.
844 837 """
845 838 self._update_file(
846 839 filename=filename,
847 840 wc_tracked=wc_tracked,
848 841 p1_tracked=p1_tracked,
849 842 p2_info=p2_info,
850 843 possibly_dirty=possibly_dirty,
851 844 parentfiledata=parentfiledata,
852 845 )
853 846
854 847 def hacky_extension_update_file(self, *args, **kwargs):
855 848 """NEVER USE THIS, YOU DO NOT NEED IT
856 849
857 850 This function is a variant of "update_file" to be called by a small set
858 851 of extensions, it also adjust the internal state of file, but can be
859 852 called outside an `changing_parents` context.
860 853
861 854 A very small number of extension meddle with the working copy content
862 855 in a way that requires to adjust the dirstate accordingly. At the time
863 856 this command is written they are :
864 857 - keyword,
865 858 - largefile,
866 859 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
867 860
868 861 This function could probably be replaced by more semantic one (like
869 862 "adjust expected size" or "always revalidate file content", etc)
870 863 however at the time where this is writen, this is too much of a detour
871 864 to be considered.
872 865 """
873 866 if not (self._changing_level > 0 or self._running_status > 0):
874 867 msg = "requires a changes context"
875 868 raise error.ProgrammingError(msg)
876 869 self._update_file(
877 870 *args,
878 871 **kwargs,
879 872 )
880 873
881 874 def _update_file(
882 875 self,
883 876 filename,
884 877 wc_tracked,
885 878 p1_tracked,
886 879 p2_info=False,
887 880 possibly_dirty=False,
888 881 parentfiledata=None,
889 882 ):
890 883
891 884 # note: I do not think we need to double check name clash here since we
892 885 # are in a update/merge case that should already have taken care of
893 886 # this. The test agrees
894 887
895 888 self._dirty = True
896 889 old_entry = self._map.get(filename)
897 890 if old_entry is None:
898 891 prev_tracked = False
899 892 else:
900 893 prev_tracked = old_entry.tracked
901 894 if prev_tracked != wc_tracked:
902 895 self._dirty_tracked_set = True
903 896
904 897 self._map.reset_state(
905 898 filename,
906 899 wc_tracked,
907 900 p1_tracked,
908 901 p2_info=p2_info,
909 902 has_meaningful_mtime=not possibly_dirty,
910 903 parentfiledata=parentfiledata,
911 904 )
912 905
913 906 def _check_new_tracked_filename(self, filename):
914 907 scmutil.checkfilename(filename)
915 908 if self._map.hastrackeddir(filename):
916 909 msg = _(b'directory %r already in dirstate')
917 910 msg %= pycompat.bytestr(filename)
918 911 raise error.Abort(msg)
919 912 # shadows
920 913 for d in pathutil.finddirs(filename):
921 914 if self._map.hastrackeddir(d):
922 915 break
923 916 entry = self._map.get(d)
924 917 if entry is not None and not entry.removed:
925 918 msg = _(b'file %r in dirstate clashes with %r')
926 919 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
927 920 raise error.Abort(msg)
928 921 self._check_sparse(filename)
929 922
930 923 def _check_sparse(self, filename):
931 924 """Check that a filename is inside the sparse profile"""
932 925 sparsematch = self._sparsematcher
933 926 if sparsematch is not None and not sparsematch.always():
934 927 if not sparsematch(filename):
935 928 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
936 929 hint = _(
937 930 b'include file with `hg debugsparse --include <pattern>` or use '
938 931 b'`hg add -s <file>` to include file directory while adding'
939 932 )
940 933 raise error.Abort(msg % filename, hint=hint)
941 934
942 935 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
943 936 if exists is None:
944 937 exists = os.path.lexists(os.path.join(self._root, path))
945 938 if not exists:
946 939 # Maybe a path component exists
947 940 if not ignoremissing and b'/' in path:
948 941 d, f = path.rsplit(b'/', 1)
949 942 d = self._normalize(d, False, ignoremissing, None)
950 943 folded = d + b"/" + f
951 944 else:
952 945 # No path components, preserve original case
953 946 folded = path
954 947 else:
955 948 # recursively normalize leading directory components
956 949 # against dirstate
957 950 if b'/' in normed:
958 951 d, f = normed.rsplit(b'/', 1)
959 952 d = self._normalize(d, False, ignoremissing, True)
960 953 r = self._root + b"/" + d
961 954 folded = d + b"/" + util.fspath(f, r)
962 955 else:
963 956 folded = util.fspath(normed, self._root)
964 957 storemap[normed] = folded
965 958
966 959 return folded
967 960
968 961 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
969 962 normed = util.normcase(path)
970 963 folded = self._map.filefoldmap.get(normed, None)
971 964 if folded is None:
972 965 if isknown:
973 966 folded = path
974 967 else:
975 968 folded = self._discoverpath(
976 969 path, normed, ignoremissing, exists, self._map.filefoldmap
977 970 )
978 971 return folded
979 972
980 973 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
981 974 normed = util.normcase(path)
982 975 folded = self._map.filefoldmap.get(normed, None)
983 976 if folded is None:
984 977 folded = self._map.dirfoldmap.get(normed, None)
985 978 if folded is None:
986 979 if isknown:
987 980 folded = path
988 981 else:
989 982 # store discovered result in dirfoldmap so that future
990 983 # normalizefile calls don't start matching directories
991 984 folded = self._discoverpath(
992 985 path, normed, ignoremissing, exists, self._map.dirfoldmap
993 986 )
994 987 return folded
995 988
996 989 def normalize(self, path, isknown=False, ignoremissing=False):
997 990 """
998 991 normalize the case of a pathname when on a casefolding filesystem
999 992
1000 993 isknown specifies whether the filename came from walking the
1001 994 disk, to avoid extra filesystem access.
1002 995
1003 996 If ignoremissing is True, missing path are returned
1004 997 unchanged. Otherwise, we try harder to normalize possibly
1005 998 existing path components.
1006 999
1007 1000 The normalized case is determined based on the following precedence:
1008 1001
1009 1002 - version of name already stored in the dirstate
1010 1003 - version of name stored on disk
1011 1004 - version provided via command arguments
1012 1005 """
1013 1006
1014 1007 if self._checkcase:
1015 1008 return self._normalize(path, isknown, ignoremissing)
1016 1009 return path
1017 1010
1018 1011 # XXX this method is barely used, as a result:
1019 1012 # - its semantic is unclear
1020 1013 # - do we really needs it ?
1021 1014 @requires_changing_parents
1022 1015 def clear(self):
1023 1016 self._map.clear()
1024 1017 self._dirty = True
1025 1018
1026 1019 @requires_changing_parents
1027 1020 def rebuild(self, parent, allfiles, changedfiles=None):
1028 1021 matcher = self._sparsematcher
1029 1022 if matcher is not None and not matcher.always():
1030 1023 # should not add non-matching files
1031 1024 allfiles = [f for f in allfiles if matcher(f)]
1032 1025 if changedfiles:
1033 1026 changedfiles = [f for f in changedfiles if matcher(f)]
1034 1027
1035 1028 if changedfiles is not None:
1036 1029 # these files will be deleted from the dirstate when they are
1037 1030 # not found to be in allfiles
1038 1031 dirstatefilestoremove = {f for f in self if not matcher(f)}
1039 1032 changedfiles = dirstatefilestoremove.union(changedfiles)
1040 1033
1041 1034 if changedfiles is None:
1042 1035 # Rebuild entire dirstate
1043 1036 to_lookup = allfiles
1044 1037 to_drop = []
1045 1038 self.clear()
1046 1039 elif len(changedfiles) < 10:
1047 1040 # Avoid turning allfiles into a set, which can be expensive if it's
1048 1041 # large.
1049 1042 to_lookup = []
1050 1043 to_drop = []
1051 1044 for f in changedfiles:
1052 1045 if f in allfiles:
1053 1046 to_lookup.append(f)
1054 1047 else:
1055 1048 to_drop.append(f)
1056 1049 else:
1057 1050 changedfilesset = set(changedfiles)
1058 1051 to_lookup = changedfilesset & set(allfiles)
1059 1052 to_drop = changedfilesset - to_lookup
1060 1053
1061 1054 if self._origpl is None:
1062 1055 self._origpl = self._pl
1063 1056 self._map.setparents(parent, self._nodeconstants.nullid)
1064 1057
1065 1058 for f in to_lookup:
1066 1059 if self.in_merge:
1067 1060 self.set_tracked(f)
1068 1061 else:
1069 1062 self._map.reset_state(
1070 1063 f,
1071 1064 wc_tracked=True,
1072 1065 p1_tracked=True,
1073 1066 )
1074 1067 for f in to_drop:
1075 1068 self._map.reset_state(f)
1076 1069
1077 1070 self._dirty = True
1078 1071
1079 1072 def _setup_tr_abort(self, tr):
1080 1073 """make sure we invalidate the current change on abort"""
1081 1074 if tr is None:
1082 1075 return
1083 1076
1084 1077 def on_abort(tr):
1085 1078 self._attached_to_a_transaction = False
1086 1079 self.invalidate()
1087 1080
1088 1081 tr.addabort(
1089 1082 b'dirstate-invalidate%s' % self._tr_key_suffix,
1090 1083 on_abort,
1091 1084 )
1092 1085
1093 1086 def write(self, tr):
1094 1087 if not self._dirty:
1095 1088 return
1096 1089 # make sure we don't request a write of invalidated content
1097 1090 # XXX move before the dirty check once `unlock` stop calling `write`
1098 1091 assert not self._invalidated_context
1099 1092
1100 1093 write_key = self._use_tracked_hint and self._dirty_tracked_set
1101 1094 if tr:
1102 1095
1103 1096 self._setup_tr_abort(tr)
1104 1097 self._attached_to_a_transaction = True
1105 1098
1106 1099 def on_success(f):
1107 1100 self._attached_to_a_transaction = False
1108 1101 self._writedirstate(tr, f),
1109 1102
1110 1103 # delay writing in-memory changes out
1111 1104 tr.addfilegenerator(
1112 1105 b'dirstate-1-main%s' % self._tr_key_suffix,
1113 1106 (self._filename,),
1114 1107 on_success,
1115 1108 location=b'plain',
1116 1109 post_finalize=True,
1117 1110 )
1118 1111 if write_key:
1119 1112 tr.addfilegenerator(
1120 1113 b'dirstate-2-key-post%s' % self._tr_key_suffix,
1121 1114 (self._filename_th,),
1122 1115 lambda f: self._write_tracked_hint(tr, f),
1123 1116 location=b'plain',
1124 1117 post_finalize=True,
1125 1118 )
1126 1119 return
1127 1120
1128 1121 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
1129 1122 with file(self._filename) as f:
1130 1123 self._writedirstate(tr, f)
1131 1124 if write_key:
1132 1125 # we update the key-file after writing to make sure reader have a
1133 1126 # key that match the newly written content
1134 1127 with file(self._filename_th) as f:
1135 1128 self._write_tracked_hint(tr, f)
1136 1129
1137 1130 def delete_tracked_hint(self):
1138 1131 """remove the tracked_hint file
1139 1132
1140 1133 To be used by format downgrades operation"""
1141 1134 self._opener.unlink(self._filename_th)
1142 1135 self._use_tracked_hint = False
1143 1136
1144 1137 def addparentchangecallback(self, category, callback):
1145 1138 """add a callback to be called when the wd parents are changed
1146 1139
1147 1140 Callback will be called with the following arguments:
1148 1141 dirstate, (oldp1, oldp2), (newp1, newp2)
1149 1142
1150 1143 Category is a unique identifier to allow overwriting an old callback
1151 1144 with a newer callback.
1152 1145 """
1153 1146 self._plchangecallbacks[category] = callback
1154 1147
1155 1148 def _writedirstate(self, tr, st):
1156 1149 # make sure we don't write invalidated content
1157 1150 assert not self._invalidated_context
1158 1151 # notify callbacks about parents change
1159 1152 if self._origpl is not None and self._origpl != self._pl:
1160 1153 for c, callback in sorted(self._plchangecallbacks.items()):
1161 1154 callback(self, self._origpl, self._pl)
1162 1155 self._origpl = None
1163 1156 self._map.write(tr, st)
1164 1157 self._dirty = False
1165 1158 self._dirty_tracked_set = False
1166 1159
1167 1160 def _write_tracked_hint(self, tr, f):
1168 1161 key = node.hex(uuid.uuid4().bytes)
1169 1162 f.write(b"1\n%s\n" % key) # 1 is the format version
1170 1163
1171 1164 def _dirignore(self, f):
1172 1165 if self._ignore(f):
1173 1166 return True
1174 1167 for p in pathutil.finddirs(f):
1175 1168 if self._ignore(p):
1176 1169 return True
1177 1170 return False
1178 1171
1179 1172 def _ignorefiles(self):
1180 1173 files = []
1181 1174 if os.path.exists(self._join(b'.hgignore')):
1182 1175 files.append(self._join(b'.hgignore'))
1183 1176 for name, path in self._ui.configitems(b"ui"):
1184 1177 if name == b'ignore' or name.startswith(b'ignore.'):
1185 1178 # we need to use os.path.join here rather than self._join
1186 1179 # because path is arbitrary and user-specified
1187 1180 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1188 1181 return files
1189 1182
1190 1183 def _ignorefileandline(self, f):
1191 1184 files = collections.deque(self._ignorefiles())
1192 1185 visited = set()
1193 1186 while files:
1194 1187 i = files.popleft()
1195 1188 patterns = matchmod.readpatternfile(
1196 1189 i, self._ui.warn, sourceinfo=True
1197 1190 )
1198 1191 for pattern, lineno, line in patterns:
1199 1192 kind, p = matchmod._patsplit(pattern, b'glob')
1200 1193 if kind == b"subinclude":
1201 1194 if p not in visited:
1202 1195 files.append(p)
1203 1196 continue
1204 1197 m = matchmod.match(
1205 1198 self._root, b'', [], [pattern], warn=self._ui.warn
1206 1199 )
1207 1200 if m(f):
1208 1201 return (i, lineno, line)
1209 1202 visited.add(i)
1210 1203 return (None, -1, b"")
1211 1204
1212 1205 def _walkexplicit(self, match, subrepos):
1213 1206 """Get stat data about the files explicitly specified by match.
1214 1207
1215 1208 Return a triple (results, dirsfound, dirsnotfound).
1216 1209 - results is a mapping from filename to stat result. It also contains
1217 1210 listings mapping subrepos and .hg to None.
1218 1211 - dirsfound is a list of files found to be directories.
1219 1212 - dirsnotfound is a list of files that the dirstate thinks are
1220 1213 directories and that were not found."""
1221 1214
1222 1215 def badtype(mode):
1223 1216 kind = _(b'unknown')
1224 1217 if stat.S_ISCHR(mode):
1225 1218 kind = _(b'character device')
1226 1219 elif stat.S_ISBLK(mode):
1227 1220 kind = _(b'block device')
1228 1221 elif stat.S_ISFIFO(mode):
1229 1222 kind = _(b'fifo')
1230 1223 elif stat.S_ISSOCK(mode):
1231 1224 kind = _(b'socket')
1232 1225 elif stat.S_ISDIR(mode):
1233 1226 kind = _(b'directory')
1234 1227 return _(b'unsupported file type (type is %s)') % kind
1235 1228
1236 1229 badfn = match.bad
1237 1230 dmap = self._map
1238 1231 lstat = os.lstat
1239 1232 getkind = stat.S_IFMT
1240 1233 dirkind = stat.S_IFDIR
1241 1234 regkind = stat.S_IFREG
1242 1235 lnkkind = stat.S_IFLNK
1243 1236 join = self._join
1244 1237 dirsfound = []
1245 1238 foundadd = dirsfound.append
1246 1239 dirsnotfound = []
1247 1240 notfoundadd = dirsnotfound.append
1248 1241
1249 1242 if not match.isexact() and self._checkcase:
1250 1243 normalize = self._normalize
1251 1244 else:
1252 1245 normalize = None
1253 1246
1254 1247 files = sorted(match.files())
1255 1248 subrepos.sort()
1256 1249 i, j = 0, 0
1257 1250 while i < len(files) and j < len(subrepos):
1258 1251 subpath = subrepos[j] + b"/"
1259 1252 if files[i] < subpath:
1260 1253 i += 1
1261 1254 continue
1262 1255 while i < len(files) and files[i].startswith(subpath):
1263 1256 del files[i]
1264 1257 j += 1
1265 1258
1266 1259 if not files or b'' in files:
1267 1260 files = [b'']
1268 1261 # constructing the foldmap is expensive, so don't do it for the
1269 1262 # common case where files is ['']
1270 1263 normalize = None
1271 1264 results = dict.fromkeys(subrepos)
1272 1265 results[b'.hg'] = None
1273 1266
1274 1267 for ff in files:
1275 1268 if normalize:
1276 1269 nf = normalize(ff, False, True)
1277 1270 else:
1278 1271 nf = ff
1279 1272 if nf in results:
1280 1273 continue
1281 1274
1282 1275 try:
1283 1276 st = lstat(join(nf))
1284 1277 kind = getkind(st.st_mode)
1285 1278 if kind == dirkind:
1286 1279 if nf in dmap:
1287 1280 # file replaced by dir on disk but still in dirstate
1288 1281 results[nf] = None
1289 1282 foundadd((nf, ff))
1290 1283 elif kind == regkind or kind == lnkkind:
1291 1284 results[nf] = st
1292 1285 else:
1293 1286 badfn(ff, badtype(kind))
1294 1287 if nf in dmap:
1295 1288 results[nf] = None
1296 1289 except (OSError) as inst:
1297 1290 # nf not found on disk - it is dirstate only
1298 1291 if nf in dmap: # does it exactly match a missing file?
1299 1292 results[nf] = None
1300 1293 else: # does it match a missing directory?
1301 1294 if self._map.hasdir(nf):
1302 1295 notfoundadd(nf)
1303 1296 else:
1304 1297 badfn(ff, encoding.strtolocal(inst.strerror))
1305 1298
1306 1299 # match.files() may contain explicitly-specified paths that shouldn't
1307 1300 # be taken; drop them from the list of files found. dirsfound/notfound
1308 1301 # aren't filtered here because they will be tested later.
1309 1302 if match.anypats():
1310 1303 for f in list(results):
1311 1304 if f == b'.hg' or f in subrepos:
1312 1305 # keep sentinel to disable further out-of-repo walks
1313 1306 continue
1314 1307 if not match(f):
1315 1308 del results[f]
1316 1309
1317 1310 # Case insensitive filesystems cannot rely on lstat() failing to detect
1318 1311 # a case-only rename. Prune the stat object for any file that does not
1319 1312 # match the case in the filesystem, if there are multiple files that
1320 1313 # normalize to the same path.
1321 1314 if match.isexact() and self._checkcase:
1322 1315 normed = {}
1323 1316
1324 1317 for f, st in results.items():
1325 1318 if st is None:
1326 1319 continue
1327 1320
1328 1321 nc = util.normcase(f)
1329 1322 paths = normed.get(nc)
1330 1323
1331 1324 if paths is None:
1332 1325 paths = set()
1333 1326 normed[nc] = paths
1334 1327
1335 1328 paths.add(f)
1336 1329
1337 1330 for norm, paths in normed.items():
1338 1331 if len(paths) > 1:
1339 1332 for path in paths:
1340 1333 folded = self._discoverpath(
1341 1334 path, norm, True, None, self._map.dirfoldmap
1342 1335 )
1343 1336 if path != folded:
1344 1337 results[path] = None
1345 1338
1346 1339 return results, dirsfound, dirsnotfound
1347 1340
1348 1341 def walk(self, match, subrepos, unknown, ignored, full=True):
1349 1342 """
1350 1343 Walk recursively through the directory tree, finding all files
1351 1344 matched by match.
1352 1345
1353 1346 If full is False, maybe skip some known-clean files.
1354 1347
1355 1348 Return a dict mapping filename to stat-like object (either
1356 1349 mercurial.osutil.stat instance or return value of os.stat()).
1357 1350
1358 1351 """
1359 1352 # full is a flag that extensions that hook into walk can use -- this
1360 1353 # implementation doesn't use it at all. This satisfies the contract
1361 1354 # because we only guarantee a "maybe".
1362 1355
1363 1356 if ignored:
1364 1357 ignore = util.never
1365 1358 dirignore = util.never
1366 1359 elif unknown:
1367 1360 ignore = self._ignore
1368 1361 dirignore = self._dirignore
1369 1362 else:
1370 1363 # if not unknown and not ignored, drop dir recursion and step 2
1371 1364 ignore = util.always
1372 1365 dirignore = util.always
1373 1366
1374 1367 if self._sparsematchfn is not None:
1375 1368 em = matchmod.exact(match.files())
1376 1369 sm = matchmod.unionmatcher([self._sparsematcher, em])
1377 1370 match = matchmod.intersectmatchers(match, sm)
1378 1371
1379 1372 matchfn = match.matchfn
1380 1373 matchalways = match.always()
1381 1374 matchtdir = match.traversedir
1382 1375 dmap = self._map
1383 1376 listdir = util.listdir
1384 1377 lstat = os.lstat
1385 1378 dirkind = stat.S_IFDIR
1386 1379 regkind = stat.S_IFREG
1387 1380 lnkkind = stat.S_IFLNK
1388 1381 join = self._join
1389 1382
1390 1383 exact = skipstep3 = False
1391 1384 if match.isexact(): # match.exact
1392 1385 exact = True
1393 1386 dirignore = util.always # skip step 2
1394 1387 elif match.prefix(): # match.match, no patterns
1395 1388 skipstep3 = True
1396 1389
1397 1390 if not exact and self._checkcase:
1398 1391 normalize = self._normalize
1399 1392 normalizefile = self._normalizefile
1400 1393 skipstep3 = False
1401 1394 else:
1402 1395 normalize = self._normalize
1403 1396 normalizefile = None
1404 1397
1405 1398 # step 1: find all explicit files
1406 1399 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1407 1400 if matchtdir:
1408 1401 for d in work:
1409 1402 matchtdir(d[0])
1410 1403 for d in dirsnotfound:
1411 1404 matchtdir(d)
1412 1405
1413 1406 skipstep3 = skipstep3 and not (work or dirsnotfound)
1414 1407 work = [d for d in work if not dirignore(d[0])]
1415 1408
1416 1409 # step 2: visit subdirectories
1417 1410 def traverse(work, alreadynormed):
1418 1411 wadd = work.append
1419 1412 while work:
1420 1413 tracing.counter('dirstate.walk work', len(work))
1421 1414 nd = work.pop()
1422 1415 visitentries = match.visitchildrenset(nd)
1423 1416 if not visitentries:
1424 1417 continue
1425 1418 if visitentries == b'this' or visitentries == b'all':
1426 1419 visitentries = None
1427 1420 skip = None
1428 1421 if nd != b'':
1429 1422 skip = b'.hg'
1430 1423 try:
1431 1424 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1432 1425 entries = listdir(join(nd), stat=True, skip=skip)
1433 1426 except (PermissionError, FileNotFoundError) as inst:
1434 1427 match.bad(
1435 1428 self.pathto(nd), encoding.strtolocal(inst.strerror)
1436 1429 )
1437 1430 continue
1438 1431 for f, kind, st in entries:
1439 1432 # Some matchers may return files in the visitentries set,
1440 1433 # instead of 'this', if the matcher explicitly mentions them
1441 1434 # and is not an exactmatcher. This is acceptable; we do not
1442 1435 # make any hard assumptions about file-or-directory below
1443 1436 # based on the presence of `f` in visitentries. If
1444 1437 # visitchildrenset returned a set, we can always skip the
1445 1438 # entries *not* in the set it provided regardless of whether
1446 1439 # they're actually a file or a directory.
1447 1440 if visitentries and f not in visitentries:
1448 1441 continue
1449 1442 if normalizefile:
1450 1443 # even though f might be a directory, we're only
1451 1444 # interested in comparing it to files currently in the
1452 1445 # dmap -- therefore normalizefile is enough
1453 1446 nf = normalizefile(
1454 1447 nd and (nd + b"/" + f) or f, True, True
1455 1448 )
1456 1449 else:
1457 1450 nf = nd and (nd + b"/" + f) or f
1458 1451 if nf not in results:
1459 1452 if kind == dirkind:
1460 1453 if not ignore(nf):
1461 1454 if matchtdir:
1462 1455 matchtdir(nf)
1463 1456 wadd(nf)
1464 1457 if nf in dmap and (matchalways or matchfn(nf)):
1465 1458 results[nf] = None
1466 1459 elif kind == regkind or kind == lnkkind:
1467 1460 if nf in dmap:
1468 1461 if matchalways or matchfn(nf):
1469 1462 results[nf] = st
1470 1463 elif (matchalways or matchfn(nf)) and not ignore(
1471 1464 nf
1472 1465 ):
1473 1466 # unknown file -- normalize if necessary
1474 1467 if not alreadynormed:
1475 1468 nf = normalize(nf, False, True)
1476 1469 results[nf] = st
1477 1470 elif nf in dmap and (matchalways or matchfn(nf)):
1478 1471 results[nf] = None
1479 1472
1480 1473 for nd, d in work:
1481 1474 # alreadynormed means that processwork doesn't have to do any
1482 1475 # expensive directory normalization
1483 1476 alreadynormed = not normalize or nd == d
1484 1477 traverse([d], alreadynormed)
1485 1478
1486 1479 for s in subrepos:
1487 1480 del results[s]
1488 1481 del results[b'.hg']
1489 1482
1490 1483 # step 3: visit remaining files from dmap
1491 1484 if not skipstep3 and not exact:
1492 1485 # If a dmap file is not in results yet, it was either
1493 1486 # a) not matching matchfn b) ignored, c) missing, or d) under a
1494 1487 # symlink directory.
1495 1488 if not results and matchalways:
1496 1489 visit = [f for f in dmap]
1497 1490 else:
1498 1491 visit = [f for f in dmap if f not in results and matchfn(f)]
1499 1492 visit.sort()
1500 1493
1501 1494 if unknown:
1502 1495 # unknown == True means we walked all dirs under the roots
1503 1496 # that wasn't ignored, and everything that matched was stat'ed
1504 1497 # and is already in results.
1505 1498 # The rest must thus be ignored or under a symlink.
1506 1499 audit_path = pathutil.pathauditor(self._root, cached=True)
1507 1500
1508 1501 for nf in iter(visit):
1509 1502 # If a stat for the same file was already added with a
1510 1503 # different case, don't add one for this, since that would
1511 1504 # make it appear as if the file exists under both names
1512 1505 # on disk.
1513 1506 if (
1514 1507 normalizefile
1515 1508 and normalizefile(nf, True, True) in results
1516 1509 ):
1517 1510 results[nf] = None
1518 1511 # Report ignored items in the dmap as long as they are not
1519 1512 # under a symlink directory.
1520 1513 elif audit_path.check(nf):
1521 1514 try:
1522 1515 results[nf] = lstat(join(nf))
1523 1516 # file was just ignored, no links, and exists
1524 1517 except OSError:
1525 1518 # file doesn't exist
1526 1519 results[nf] = None
1527 1520 else:
1528 1521 # It's either missing or under a symlink directory
1529 1522 # which we in this case report as missing
1530 1523 results[nf] = None
1531 1524 else:
1532 1525 # We may not have walked the full directory tree above,
1533 1526 # so stat and check everything we missed.
1534 1527 iv = iter(visit)
1535 1528 for st in util.statfiles([join(i) for i in visit]):
1536 1529 results[next(iv)] = st
1537 1530 return results
1538 1531
1539 1532 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1540 1533 if self._sparsematchfn is not None:
1541 1534 em = matchmod.exact(matcher.files())
1542 1535 sm = matchmod.unionmatcher([self._sparsematcher, em])
1543 1536 matcher = matchmod.intersectmatchers(matcher, sm)
1544 1537 # Force Rayon (Rust parallelism library) to respect the number of
1545 1538 # workers. This is a temporary workaround until Rust code knows
1546 1539 # how to read the config file.
1547 1540 numcpus = self._ui.configint(b"worker", b"numcpus")
1548 1541 if numcpus is not None:
1549 1542 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1550 1543
1551 1544 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1552 1545 if not workers_enabled:
1553 1546 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1554 1547
1555 1548 (
1556 1549 lookup,
1557 1550 modified,
1558 1551 added,
1559 1552 removed,
1560 1553 deleted,
1561 1554 clean,
1562 1555 ignored,
1563 1556 unknown,
1564 1557 warnings,
1565 1558 bad,
1566 1559 traversed,
1567 1560 dirty,
1568 1561 ) = rustmod.status(
1569 1562 self._map._map,
1570 1563 matcher,
1571 1564 self._rootdir,
1572 1565 self._ignorefiles(),
1573 1566 self._checkexec,
1574 1567 bool(list_clean),
1575 1568 bool(list_ignored),
1576 1569 bool(list_unknown),
1577 1570 bool(matcher.traversedir),
1578 1571 )
1579 1572
1580 1573 self._dirty |= dirty
1581 1574
1582 1575 if matcher.traversedir:
1583 1576 for dir in traversed:
1584 1577 matcher.traversedir(dir)
1585 1578
1586 1579 if self._ui.warn:
1587 1580 for item in warnings:
1588 1581 if isinstance(item, tuple):
1589 1582 file_path, syntax = item
1590 1583 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1591 1584 file_path,
1592 1585 syntax,
1593 1586 )
1594 1587 self._ui.warn(msg)
1595 1588 else:
1596 1589 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1597 1590 self._ui.warn(
1598 1591 msg
1599 1592 % (
1600 1593 pathutil.canonpath(
1601 1594 self._rootdir, self._rootdir, item
1602 1595 ),
1603 1596 b"No such file or directory",
1604 1597 )
1605 1598 )
1606 1599
1607 1600 for fn, message in bad:
1608 1601 matcher.bad(fn, encoding.strtolocal(message))
1609 1602
1610 1603 status = scmutil.status(
1611 1604 modified=modified,
1612 1605 added=added,
1613 1606 removed=removed,
1614 1607 deleted=deleted,
1615 1608 unknown=unknown,
1616 1609 ignored=ignored,
1617 1610 clean=clean,
1618 1611 )
1619 1612 return (lookup, status)
1620 1613
1621 1614 def status(self, match, subrepos, ignored, clean, unknown):
1622 1615 """Determine the status of the working copy relative to the
1623 1616 dirstate and return a pair of (unsure, status), where status is of type
1624 1617 scmutil.status and:
1625 1618
1626 1619 unsure:
1627 1620 files that might have been modified since the dirstate was
1628 1621 written, but need to be read to be sure (size is the same
1629 1622 but mtime differs)
1630 1623 status.modified:
1631 1624 files that have definitely been modified since the dirstate
1632 1625 was written (different size or mode)
1633 1626 status.clean:
1634 1627 files that have definitely not been modified since the
1635 1628 dirstate was written
1636 1629 """
1637 1630 if not self._running_status:
1638 1631 msg = "Calling `status` outside a `running_status` context"
1639 1632 raise error.ProgrammingError(msg)
1640 1633 listignored, listclean, listunknown = ignored, clean, unknown
1641 1634 lookup, modified, added, unknown, ignored = [], [], [], [], []
1642 1635 removed, deleted, clean = [], [], []
1643 1636
1644 1637 dmap = self._map
1645 1638 dmap.preload()
1646 1639
1647 1640 use_rust = True
1648 1641
1649 1642 allowed_matchers = (
1650 1643 matchmod.alwaysmatcher,
1651 1644 matchmod.differencematcher,
1652 1645 matchmod.exactmatcher,
1653 1646 matchmod.includematcher,
1654 1647 matchmod.intersectionmatcher,
1655 1648 matchmod.nevermatcher,
1656 1649 matchmod.unionmatcher,
1657 1650 )
1658 1651
1659 1652 if rustmod is None:
1660 1653 use_rust = False
1661 1654 elif self._checkcase:
1662 1655 # Case-insensitive filesystems are not handled yet
1663 1656 use_rust = False
1664 1657 elif subrepos:
1665 1658 use_rust = False
1666 1659 elif not isinstance(match, allowed_matchers):
1667 1660 # Some matchers have yet to be implemented
1668 1661 use_rust = False
1669 1662
1670 1663 # Get the time from the filesystem so we can disambiguate files that
1671 1664 # appear modified in the present or future.
1672 1665 try:
1673 1666 mtime_boundary = timestamp.get_fs_now(self._opener)
1674 1667 except OSError:
1675 1668 # In largefiles or readonly context
1676 1669 mtime_boundary = None
1677 1670
1678 1671 if use_rust:
1679 1672 try:
1680 1673 res = self._rust_status(
1681 1674 match, listclean, listignored, listunknown
1682 1675 )
1683 1676 return res + (mtime_boundary,)
1684 1677 except rustmod.FallbackError:
1685 1678 pass
1686 1679
1687 1680 def noop(f):
1688 1681 pass
1689 1682
1690 1683 dcontains = dmap.__contains__
1691 1684 dget = dmap.__getitem__
1692 1685 ladd = lookup.append # aka "unsure"
1693 1686 madd = modified.append
1694 1687 aadd = added.append
1695 1688 uadd = unknown.append if listunknown else noop
1696 1689 iadd = ignored.append if listignored else noop
1697 1690 radd = removed.append
1698 1691 dadd = deleted.append
1699 1692 cadd = clean.append if listclean else noop
1700 1693 mexact = match.exact
1701 1694 dirignore = self._dirignore
1702 1695 checkexec = self._checkexec
1703 1696 checklink = self._checklink
1704 1697 copymap = self._map.copymap
1705 1698
1706 1699 # We need to do full walks when either
1707 1700 # - we're listing all clean files, or
1708 1701 # - match.traversedir does something, because match.traversedir should
1709 1702 # be called for every dir in the working dir
1710 1703 full = listclean or match.traversedir is not None
1711 1704 for fn, st in self.walk(
1712 1705 match, subrepos, listunknown, listignored, full=full
1713 1706 ).items():
1714 1707 if not dcontains(fn):
1715 1708 if (listignored or mexact(fn)) and dirignore(fn):
1716 1709 if listignored:
1717 1710 iadd(fn)
1718 1711 else:
1719 1712 uadd(fn)
1720 1713 continue
1721 1714
1722 1715 t = dget(fn)
1723 1716 mode = t.mode
1724 1717 size = t.size
1725 1718
1726 1719 if not st and t.tracked:
1727 1720 dadd(fn)
1728 1721 elif t.p2_info:
1729 1722 madd(fn)
1730 1723 elif t.added:
1731 1724 aadd(fn)
1732 1725 elif t.removed:
1733 1726 radd(fn)
1734 1727 elif t.tracked:
1735 1728 if not checklink and t.has_fallback_symlink:
1736 1729 # If the file system does not support symlink, the mode
1737 1730 # might not be correctly stored in the dirstate, so do not
1738 1731 # trust it.
1739 1732 ladd(fn)
1740 1733 elif not checkexec and t.has_fallback_exec:
1741 1734 # If the file system does not support exec bits, the mode
1742 1735 # might not be correctly stored in the dirstate, so do not
1743 1736 # trust it.
1744 1737 ladd(fn)
1745 1738 elif (
1746 1739 size >= 0
1747 1740 and (
1748 1741 (size != st.st_size and size != st.st_size & _rangemask)
1749 1742 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1750 1743 )
1751 1744 or fn in copymap
1752 1745 ):
1753 1746 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1754 1747 # issue6456: Size returned may be longer due to
1755 1748 # encryption on EXT-4 fscrypt, undecided.
1756 1749 ladd(fn)
1757 1750 else:
1758 1751 madd(fn)
1759 1752 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1760 1753 # There might be a change in the future if for example the
1761 1754 # internal clock is off, but this is a case where the issues
1762 1755 # the user would face would be a lot worse and there is
1763 1756 # nothing we can really do.
1764 1757 ladd(fn)
1765 1758 elif listclean:
1766 1759 cadd(fn)
1767 1760 status = scmutil.status(
1768 1761 modified, added, removed, deleted, unknown, ignored, clean
1769 1762 )
1770 1763 return (lookup, status, mtime_boundary)
1771 1764
1772 1765 def matches(self, match):
1773 1766 """
1774 1767 return files in the dirstate (in whatever state) filtered by match
1775 1768 """
1776 1769 dmap = self._map
1777 1770 if rustmod is not None:
1778 1771 dmap = self._map._map
1779 1772
1780 1773 if match.always():
1781 1774 return dmap.keys()
1782 1775 files = match.files()
1783 1776 if match.isexact():
1784 1777 # fast path -- filter the other way around, since typically files is
1785 1778 # much smaller than dmap
1786 1779 return [f for f in files if f in dmap]
1787 1780 if match.prefix() and all(fn in dmap for fn in files):
1788 1781 # fast path -- all the values are known to be files, so just return
1789 1782 # that
1790 1783 return list(files)
1791 1784 return [f for f in dmap if match(f)]
1792 1785
1793 1786 def all_file_names(self):
1794 1787 """list all filename currently used by this dirstate
1795 1788
1796 1789 This is only used to do `hg rollback` related backup in the transaction
1797 1790 """
1798 1791 files = [b'branch']
1799 1792 if self._opener.exists(self._filename):
1800 1793 files.append(self._filename)
1801 1794 if self._use_dirstate_v2:
1802 1795 files.append(self._map.docket.data_filename())
1803 1796 return tuple(files)
1804 1797
1805 1798 def verify(self, m1, m2, p1, narrow_matcher=None):
1806 1799 """
1807 1800 check the dirstate contents against the parent manifest and yield errors
1808 1801 """
1809 1802 missing_from_p1 = _(
1810 1803 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1811 1804 )
1812 1805 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1813 1806 missing_from_ps = _(
1814 1807 b"%s marked as modified, but not in either manifest\n"
1815 1808 )
1816 1809 missing_from_ds = _(
1817 1810 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1818 1811 )
1819 1812 for f, entry in self.items():
1820 1813 if entry.p1_tracked:
1821 1814 if entry.modified and f not in m1 and f not in m2:
1822 1815 yield missing_from_ps % f
1823 1816 elif f not in m1:
1824 1817 yield missing_from_p1 % (f, node.short(p1))
1825 1818 if entry.added and f in m1:
1826 1819 yield unexpected_in_p1 % f
1827 1820 for f in m1:
1828 1821 if narrow_matcher is not None and not narrow_matcher(f):
1829 1822 continue
1830 1823 entry = self.get_entry(f)
1831 1824 if not entry.p1_tracked:
1832 1825 yield missing_from_ds % (f, node.short(p1))
@@ -1,219 +1,219 b''
1 1 import contextlib
2 2
3 3 from . import util as interfaceutil
4 4
5 5
6 6 class idirstate(interfaceutil.Interface):
7 7 def __init__(
8 8 opener,
9 9 ui,
10 10 root,
11 11 validate,
12 12 sparsematchfn,
13 13 nodeconstants,
14 14 use_dirstate_v2,
15 15 use_tracked_hint=False,
16 16 ):
17 17 """Create a new dirstate object.
18 18
19 19 opener is an open()-like callable that can be used to open the
20 20 dirstate file; root is the root of the directory tracked by
21 21 the dirstate.
22 22 """
23 23
24 24 # TODO: all these private methods and attributes should be made
25 25 # public or removed from the interface.
26 26 _ignore = interfaceutil.Attribute("""Matcher for ignored files.""")
27 27 is_changing_any = interfaceutil.Attribute(
28 28 """True if any changes in progress."""
29 29 )
30 30 is_changing_parents = interfaceutil.Attribute(
31 31 """True if parents changes in progress."""
32 32 )
33 33 is_changing_files = interfaceutil.Attribute(
34 34 """True if file tracking changes in progress."""
35 35 )
36 36
37 37 def _ignorefiles():
38 38 """Return a list of files containing patterns to ignore."""
39 39
40 40 def _ignorefileandline(f):
41 41 """Given a file `f`, return the ignore file and line that ignores it."""
42 42
43 43 _checklink = interfaceutil.Attribute("""Callable for checking symlinks.""")
44 44 _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""")
45 45
46 46 @contextlib.contextmanager
47 47 def changing_parents(repo):
48 48 """Context manager for handling dirstate parents.
49 49
50 50 If an exception occurs in the scope of the context manager,
51 51 the incoherent dirstate won't be written when wlock is
52 52 released.
53 53 """
54 54
55 55 @contextlib.contextmanager
56 56 def changing_files(repo):
57 57 """Context manager for handling dirstate files.
58 58
59 59 If an exception occurs in the scope of the context manager,
60 60 the incoherent dirstate won't be written when wlock is
61 61 released.
62 62 """
63 63
64 64 def hasdir(d):
65 65 pass
66 66
67 67 def flagfunc(buildfallback):
68 68 """build a callable that returns flags associated with a filename
69 69
70 70 The information is extracted from three possible layers:
71 71 1. the file system if it supports the information
72 72 2. the "fallback" information stored in the dirstate if any
73 73 3. a more expensive mechanism inferring the flags from the parents.
74 74 """
75 75
76 76 def getcwd():
77 77 """Return the path from which a canonical path is calculated.
78 78
79 79 This path should be used to resolve file patterns or to convert
80 80 canonical paths back to file paths for display. It shouldn't be
81 81 used to get real file paths. Use vfs functions instead.
82 82 """
83 83
84 84 def pathto(f, cwd=None):
85 85 pass
86 86
87 87 def get_entry(path):
88 88 """return a DirstateItem for the associated path"""
89 89
90 90 def __contains__(key):
91 91 """Check if bytestring `key` is known to the dirstate."""
92 92
93 93 def __iter__():
94 94 """Iterate the dirstate's contained filenames as bytestrings."""
95 95
96 96 def items():
97 97 """Iterate the dirstate's entries as (filename, DirstateItem.
98 98
99 99 As usual, filename is a bytestring.
100 100 """
101 101
102 102 iteritems = items
103 103
104 104 def parents():
105 105 pass
106 106
107 107 def p1():
108 108 pass
109 109
110 110 def p2():
111 111 pass
112 112
113 113 def branch():
114 114 pass
115 115
116 116 def setparents(p1, p2=None):
117 117 """Set dirstate parents to p1 and p2.
118 118
119 119 When moving from two parents to one, "merged" entries a
120 120 adjusted to normal and previous copy records discarded and
121 121 returned by the call.
122 122
123 123 See localrepo.setparents()
124 124 """
125 125
126 def setbranch(branch, transaction=None):
126 def setbranch(branch, transaction):
127 127 pass
128 128
129 129 def invalidate():
130 130 """Causes the next access to reread the dirstate.
131 131
132 132 This is different from localrepo.invalidatedirstate() because it always
133 133 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
134 134 check whether the dirstate has changed before rereading it."""
135 135
136 136 def copy(source, dest):
137 137 """Mark dest as a copy of source. Unmark dest if source is None."""
138 138
139 139 def copied(file):
140 140 pass
141 141
142 142 def copies():
143 143 pass
144 144
145 145 def normalize(path, isknown=False, ignoremissing=False):
146 146 """
147 147 normalize the case of a pathname when on a casefolding filesystem
148 148
149 149 isknown specifies whether the filename came from walking the
150 150 disk, to avoid extra filesystem access.
151 151
152 152 If ignoremissing is True, missing path are returned
153 153 unchanged. Otherwise, we try harder to normalize possibly
154 154 existing path components.
155 155
156 156 The normalized case is determined based on the following precedence:
157 157
158 158 - version of name already stored in the dirstate
159 159 - version of name stored on disk
160 160 - version provided via command arguments
161 161 """
162 162
163 163 def clear():
164 164 pass
165 165
166 166 def rebuild(parent, allfiles, changedfiles=None):
167 167 pass
168 168
169 169 def write(tr):
170 170 pass
171 171
172 172 def addparentchangecallback(category, callback):
173 173 """add a callback to be called when the wd parents are changed
174 174
175 175 Callback will be called with the following arguments:
176 176 dirstate, (oldp1, oldp2), (newp1, newp2)
177 177
178 178 Category is a unique identifier to allow overwriting an old callback
179 179 with a newer callback.
180 180 """
181 181
182 182 def walk(match, subrepos, unknown, ignored, full=True):
183 183 """
184 184 Walk recursively through the directory tree, finding all files
185 185 matched by match.
186 186
187 187 If full is False, maybe skip some known-clean files.
188 188
189 189 Return a dict mapping filename to stat-like object (either
190 190 mercurial.osutil.stat instance or return value of os.stat()).
191 191
192 192 """
193 193
194 194 def status(match, subrepos, ignored, clean, unknown):
195 195 """Determine the status of the working copy relative to the
196 196 dirstate and return a pair of (unsure, status), where status is of type
197 197 scmutil.status and:
198 198
199 199 unsure:
200 200 files that might have been modified since the dirstate was
201 201 written, but need to be read to be sure (size is the same
202 202 but mtime differs)
203 203 status.modified:
204 204 files that have definitely been modified since the dirstate
205 205 was written (different size or mode)
206 206 status.clean:
207 207 files that have definitely not been modified since the
208 208 dirstate was written
209 209 """
210 210
211 211 def matches(match):
212 212 """
213 213 return files in the dirstate (in whatever state) filtered by match
214 214 """
215 215
216 216 def verify(m1, m2, p1, narrow_matcher=None):
217 217 """
218 218 check the dirstate contents against the parent manifest and yield errors
219 219 """
General Comments 0
You need to be logged in to leave comments. Login now