##// END OF EJS Templates
dirstate: remove the dedicated backup logic...
marmoute -
r50981:76d44983 default
parent child Browse files
Show More
@@ -1,407 +1,395 b''
1 1 import contextlib
2 2 import os
3 3
4 4 from mercurial.node import sha1nodeconstants
5 5 from mercurial import (
6 6 dirstatemap,
7 7 error,
8 8 extensions,
9 9 match as matchmod,
10 10 pycompat,
11 11 scmutil,
12 12 util,
13 13 )
14 14 from mercurial.dirstateutils import (
15 15 timestamp,
16 16 )
17 17 from mercurial.interfaces import (
18 18 dirstate as intdirstate,
19 19 util as interfaceutil,
20 20 )
21 21
22 22 from . import gitutil
23 23
24 24
25 25 DirstateItem = dirstatemap.DirstateItem
26 26 propertycache = util.propertycache
27 27 pygit2 = gitutil.get_pygit2()
28 28
29 29
30 30 def readpatternfile(orig, filepath, warn, sourceinfo=False):
31 31 if not (b'info/exclude' in filepath or filepath.endswith(b'.gitignore')):
32 32 return orig(filepath, warn, sourceinfo=False)
33 33 result = []
34 34 warnings = []
35 35 with open(filepath, 'rb') as fp:
36 36 for l in fp:
37 37 l = l.strip()
38 38 if not l or l.startswith(b'#'):
39 39 continue
40 40 if l.startswith(b'!'):
41 41 warnings.append(b'unsupported ignore pattern %s' % l)
42 42 continue
43 43 if l.startswith(b'/'):
44 44 result.append(b'rootglob:' + l[1:])
45 45 else:
46 46 result.append(b'relglob:' + l)
47 47 return result, warnings
48 48
49 49
50 50 extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile)
51 51
52 52
53 53 _STATUS_MAP = {}
54 54 if pygit2:
55 55 _STATUS_MAP = {
56 56 pygit2.GIT_STATUS_CONFLICTED: b'm',
57 57 pygit2.GIT_STATUS_CURRENT: b'n',
58 58 pygit2.GIT_STATUS_IGNORED: b'?',
59 59 pygit2.GIT_STATUS_INDEX_DELETED: b'r',
60 60 pygit2.GIT_STATUS_INDEX_MODIFIED: b'n',
61 61 pygit2.GIT_STATUS_INDEX_NEW: b'a',
62 62 pygit2.GIT_STATUS_INDEX_RENAMED: b'a',
63 63 pygit2.GIT_STATUS_INDEX_TYPECHANGE: b'n',
64 64 pygit2.GIT_STATUS_WT_DELETED: b'r',
65 65 pygit2.GIT_STATUS_WT_MODIFIED: b'n',
66 66 pygit2.GIT_STATUS_WT_NEW: b'?',
67 67 pygit2.GIT_STATUS_WT_RENAMED: b'a',
68 68 pygit2.GIT_STATUS_WT_TYPECHANGE: b'n',
69 69 pygit2.GIT_STATUS_WT_UNREADABLE: b'?',
70 70 pygit2.GIT_STATUS_INDEX_MODIFIED | pygit2.GIT_STATUS_WT_MODIFIED: b'm',
71 71 }
72 72
73 73
74 74 @interfaceutil.implementer(intdirstate.idirstate)
75 75 class gitdirstate:
76 76 def __init__(self, ui, vfs, gitrepo, use_dirstate_v2):
77 77 self._ui = ui
78 78 self._root = os.path.dirname(vfs.base)
79 79 self._opener = vfs
80 80 self.git = gitrepo
81 81 self._plchangecallbacks = {}
82 82 # TODO: context.poststatusfixup is bad and uses this attribute
83 83 self._dirty = False
84 84 self._mapcls = dirstatemap.dirstatemap
85 85 self._use_dirstate_v2 = use_dirstate_v2
86 86
87 87 @propertycache
88 88 def _map(self):
89 89 """Return the dirstate contents (see documentation for dirstatemap)."""
90 90 self._map = self._mapcls(
91 91 self._ui,
92 92 self._opener,
93 93 self._root,
94 94 sha1nodeconstants,
95 95 self._use_dirstate_v2,
96 96 )
97 97 return self._map
98 98
99 99 def p1(self):
100 100 try:
101 101 return self.git.head.peel().id.raw
102 102 except pygit2.GitError:
103 103 # Typically happens when peeling HEAD fails, as in an
104 104 # empty repository.
105 105 return sha1nodeconstants.nullid
106 106
107 107 def p2(self):
108 108 # TODO: MERGE_HEAD? something like that, right?
109 109 return sha1nodeconstants.nullid
110 110
111 111 def setparents(self, p1, p2=None):
112 112 if p2 is None:
113 113 p2 = sha1nodeconstants.nullid
114 114 assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
115 115 self.git.head.set_target(gitutil.togitnode(p1))
116 116
117 117 @util.propertycache
118 118 def identity(self):
119 119 return util.filestat.frompath(
120 120 os.path.join(self._root, b'.git', b'index')
121 121 )
122 122
123 123 def branch(self):
124 124 return b'default'
125 125
126 126 def parents(self):
127 127 # TODO how on earth do we find p2 if a merge is in flight?
128 128 return self.p1(), sha1nodeconstants.nullid
129 129
130 130 def __iter__(self):
131 131 return (pycompat.fsencode(f.path) for f in self.git.index)
132 132
133 133 def items(self):
134 134 for ie in self.git.index:
135 135 yield ie.path, None # value should be a DirstateItem
136 136
137 137 # py2,3 compat forward
138 138 iteritems = items
139 139
140 140 def __getitem__(self, filename):
141 141 try:
142 142 gs = self.git.status_file(filename)
143 143 except KeyError:
144 144 return b'?'
145 145 return _STATUS_MAP[gs]
146 146
147 147 def __contains__(self, filename):
148 148 try:
149 149 gs = self.git.status_file(filename)
150 150 return _STATUS_MAP[gs] != b'?'
151 151 except KeyError:
152 152 return False
153 153
154 154 def status(self, match, subrepos, ignored, clean, unknown):
155 155 listclean = clean
156 156 # TODO handling of clean files - can we get that from git.status()?
157 157 modified, added, removed, deleted, unknown, ignored, clean = (
158 158 [],
159 159 [],
160 160 [],
161 161 [],
162 162 [],
163 163 [],
164 164 [],
165 165 )
166 166
167 167 try:
168 168 mtime_boundary = timestamp.get_fs_now(self._opener)
169 169 except OSError:
170 170 # In largefiles or readonly context
171 171 mtime_boundary = None
172 172
173 173 gstatus = self.git.status()
174 174 for path, status in gstatus.items():
175 175 path = pycompat.fsencode(path)
176 176 if not match(path):
177 177 continue
178 178 if status == pygit2.GIT_STATUS_IGNORED:
179 179 if path.endswith(b'/'):
180 180 continue
181 181 ignored.append(path)
182 182 elif status in (
183 183 pygit2.GIT_STATUS_WT_MODIFIED,
184 184 pygit2.GIT_STATUS_INDEX_MODIFIED,
185 185 pygit2.GIT_STATUS_WT_MODIFIED
186 186 | pygit2.GIT_STATUS_INDEX_MODIFIED,
187 187 ):
188 188 modified.append(path)
189 189 elif status == pygit2.GIT_STATUS_INDEX_NEW:
190 190 added.append(path)
191 191 elif status == pygit2.GIT_STATUS_WT_NEW:
192 192 unknown.append(path)
193 193 elif status == pygit2.GIT_STATUS_WT_DELETED:
194 194 deleted.append(path)
195 195 elif status == pygit2.GIT_STATUS_INDEX_DELETED:
196 196 removed.append(path)
197 197 else:
198 198 raise error.Abort(
199 199 b'unhandled case: status for %r is %r' % (path, status)
200 200 )
201 201
202 202 if listclean:
203 203 observed = set(
204 204 modified + added + removed + deleted + unknown + ignored
205 205 )
206 206 index = self.git.index
207 207 index.read()
208 208 for entry in index:
209 209 path = pycompat.fsencode(entry.path)
210 210 if not match(path):
211 211 continue
212 212 if path in observed:
213 213 continue # already in some other set
214 214 if path[-1] == b'/':
215 215 continue # directory
216 216 clean.append(path)
217 217
218 218 # TODO are we really always sure of status here?
219 219 return (
220 220 False,
221 221 scmutil.status(
222 222 modified, added, removed, deleted, unknown, ignored, clean
223 223 ),
224 224 mtime_boundary,
225 225 )
226 226
227 227 def flagfunc(self, buildfallback):
228 228 # TODO we can do better
229 229 return buildfallback()
230 230
231 231 def getcwd(self):
232 232 # TODO is this a good way to do this?
233 233 return os.path.dirname(
234 234 os.path.dirname(pycompat.fsencode(self.git.path))
235 235 )
236 236
237 237 def get_entry(self, path):
238 238 """return a DirstateItem for the associated path"""
239 239 entry = self._map.get(path)
240 240 if entry is None:
241 241 return DirstateItem()
242 242 return entry
243 243
244 244 def normalize(self, path):
245 245 normed = util.normcase(path)
246 246 assert normed == path, b"TODO handling of case folding: %s != %s" % (
247 247 normed,
248 248 path,
249 249 )
250 250 return path
251 251
252 252 @property
253 253 def _checklink(self):
254 254 return util.checklink(os.path.dirname(pycompat.fsencode(self.git.path)))
255 255
256 256 def copies(self):
257 257 # TODO support copies?
258 258 return {}
259 259
260 260 # # TODO what the heck is this
261 261 _filecache = set()
262 262
263 263 def is_changing_parents(self):
264 264 # TODO: we need to implement the context manager bits and
265 265 # correctly stage/revert index edits.
266 266 return False
267 267
268 268 def is_changing_any(self):
269 269 # TODO: we need to implement the context manager bits and
270 270 # correctly stage/revert index edits.
271 271 return False
272 272
273 273 def write(self, tr):
274 274 # TODO: call parent change callbacks
275 275
276 276 if tr:
277 277
278 278 def writeinner(category):
279 279 self.git.index.write()
280 280
281 281 tr.addpending(b'gitdirstate', writeinner)
282 282 else:
283 283 self.git.index.write()
284 284
285 285 def pathto(self, f, cwd=None):
286 286 if cwd is None:
287 287 cwd = self.getcwd()
288 288 # TODO core dirstate does something about slashes here
289 289 assert isinstance(f, bytes)
290 290 r = util.pathto(self._root, cwd, f)
291 291 return r
292 292
293 293 def matches(self, match):
294 294 for x in self.git.index:
295 295 p = pycompat.fsencode(x.path)
296 296 if match(p):
297 297 yield p
298 298
299 299 def set_clean(self, f, parentfiledata):
300 300 """Mark a file normal and clean."""
301 301 # TODO: for now we just let libgit2 re-stat the file. We can
302 302 # clearly do better.
303 303
304 304 def set_possibly_dirty(self, f):
305 305 """Mark a file normal, but possibly dirty."""
306 306 # TODO: for now we just let libgit2 re-stat the file. We can
307 307 # clearly do better.
308 308
309 309 def walk(self, match, subrepos, unknown, ignored, full=True):
310 310 # TODO: we need to use .status() and not iterate the index,
311 311 # because the index doesn't force a re-walk and so `hg add` of
312 312 # a new file without an intervening call to status will
313 313 # silently do nothing.
314 314 r = {}
315 315 cwd = self.getcwd()
316 316 for path, status in self.git.status().items():
317 317 if path.startswith('.hg/'):
318 318 continue
319 319 path = pycompat.fsencode(path)
320 320 if not match(path):
321 321 continue
322 322 # TODO construct the stat info from the status object?
323 323 try:
324 324 s = os.stat(os.path.join(cwd, path))
325 325 except FileNotFoundError:
326 326 continue
327 327 r[path] = s
328 328 return r
329 329
330 def savebackup(self, tr, backupname):
331 # TODO: figure out a strategy for saving index backups.
332 pass
333
334 def restorebackup(self, tr, backupname):
335 # TODO: figure out a strategy for saving index backups.
336 pass
337
338 330 def set_tracked(self, f, reset_copy=False):
339 331 # TODO: support copies and reset_copy=True
340 332 uf = pycompat.fsdecode(f)
341 333 if uf in self.git.index:
342 334 return False
343 335 index = self.git.index
344 336 index.read()
345 337 index.add(uf)
346 338 index.write()
347 339 return True
348 340
349 341 def add(self, f):
350 342 index = self.git.index
351 343 index.read()
352 344 index.add(pycompat.fsdecode(f))
353 345 index.write()
354 346
355 347 def drop(self, f):
356 348 index = self.git.index
357 349 index.read()
358 350 fs = pycompat.fsdecode(f)
359 351 if fs in index:
360 352 index.remove(fs)
361 353 index.write()
362 354
363 355 def set_untracked(self, f):
364 356 index = self.git.index
365 357 index.read()
366 358 fs = pycompat.fsdecode(f)
367 359 if fs in index:
368 360 index.remove(fs)
369 361 index.write()
370 362 return True
371 363 return False
372 364
373 365 def remove(self, f):
374 366 index = self.git.index
375 367 index.read()
376 368 index.remove(pycompat.fsdecode(f))
377 369 index.write()
378 370
379 371 def copied(self, path):
380 372 # TODO: track copies?
381 373 return None
382 374
383 375 def prefetch_parents(self):
384 376 # TODO
385 377 pass
386 378
387 379 def update_file(self, *args, **kwargs):
388 380 # TODO
389 381 pass
390 382
391 383 @contextlib.contextmanager
392 384 def changing_parents(self, repo):
393 385 # TODO: track this maybe?
394 386 yield
395 387
396 388 def addparentchangecallback(self, category, callback):
397 389 # TODO: should this be added to the dirstate interface?
398 390 self._plchangecallbacks[category] = callback
399 391
400 def clearbackup(self, tr, backupname):
401 # TODO
402 pass
403
404 392 def setbranch(self, branch):
405 393 raise error.Abort(
406 394 b'git repos do not support branches. try using bookmarks'
407 395 )
@@ -1,1778 +1,1660 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12 import stat
13 13 import uuid
14 14
15 15 from .i18n import _
16 16 from .pycompat import delattr
17 17
18 18 from hgdemandimport import tracing
19 19
20 20 from . import (
21 21 dirstatemap,
22 22 encoding,
23 23 error,
24 24 match as matchmod,
25 25 node,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 util,
31 31 )
32 32
33 33 from .dirstateutils import (
34 docket as docketmod,
35 34 timestamp,
36 35 )
37 36
38 37 from .interfaces import (
39 38 dirstate as intdirstate,
40 39 util as interfaceutil,
41 40 )
42 41
43 42 parsers = policy.importmod('parsers')
44 43 rustmod = policy.importrust('dirstate')
45 44
46 45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 46
48 47 propertycache = util.propertycache
49 48 filecache = scmutil.filecache
50 49 _rangemask = dirstatemap.rangemask
51 50
52 51 DirstateItem = dirstatemap.DirstateItem
53 52
54 53
55 54 class repocache(filecache):
56 55 """filecache for files in .hg/"""
57 56
58 57 def join(self, obj, fname):
59 58 return obj._opener.join(fname)
60 59
61 60
62 61 class rootcache(filecache):
63 62 """filecache for files in the repository root"""
64 63
65 64 def join(self, obj, fname):
66 65 return obj._join(fname)
67 66
68 67
69 68 def requires_changing_parents(func):
70 69 def wrap(self, *args, **kwargs):
71 70 if not self.is_changing_parents:
72 71 msg = 'calling `%s` outside of a changing_parents context'
73 72 msg %= func.__name__
74 73 raise error.ProgrammingError(msg)
75 74 if self._invalidated_context:
76 75 msg = 'calling `%s` after the dirstate was invalidated'
77 76 raise error.ProgrammingError(msg)
78 77 return func(self, *args, **kwargs)
79 78
80 79 return wrap
81 80
82 81
83 82 def requires_changing_files(func):
84 83 def wrap(self, *args, **kwargs):
85 84 if not self.is_changing_files:
86 85 msg = 'calling `%s` outside of a `changing_files`'
87 86 msg %= func.__name__
88 87 raise error.ProgrammingError(msg)
89 88 return func(self, *args, **kwargs)
90 89
91 90 return wrap
92 91
93 92
94 93 def requires_not_changing_parents(func):
95 94 def wrap(self, *args, **kwargs):
96 95 if self.is_changing_parents:
97 96 msg = 'calling `%s` inside of a changing_parents context'
98 97 msg %= func.__name__
99 98 raise error.ProgrammingError(msg)
100 99 return func(self, *args, **kwargs)
101 100
102 101 return wrap
103 102
104 103
105 104 CHANGE_TYPE_PARENTS = "parents"
106 105 CHANGE_TYPE_FILES = "files"
107 106
108 107
109 108 @interfaceutil.implementer(intdirstate.idirstate)
110 109 class dirstate:
111 110 def __init__(
112 111 self,
113 112 opener,
114 113 ui,
115 114 root,
116 115 validate,
117 116 sparsematchfn,
118 117 nodeconstants,
119 118 use_dirstate_v2,
120 119 use_tracked_hint=False,
121 120 ):
122 121 """Create a new dirstate object.
123 122
124 123 opener is an open()-like callable that can be used to open the
125 124 dirstate file; root is the root of the directory tracked by
126 125 the dirstate.
127 126 """
128 127 self._use_dirstate_v2 = use_dirstate_v2
129 128 self._use_tracked_hint = use_tracked_hint
130 129 self._nodeconstants = nodeconstants
131 130 self._opener = opener
132 131 self._validate = validate
133 132 self._root = root
134 133 # Either build a sparse-matcher or None if sparse is disabled
135 134 self._sparsematchfn = sparsematchfn
136 135 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
137 136 # UNC path pointing to root share (issue4557)
138 137 self._rootdir = pathutil.normasprefix(root)
139 138 # True is any internal state may be different
140 139 self._dirty = False
141 140 # True if the set of tracked file may be different
142 141 self._dirty_tracked_set = False
143 142 self._ui = ui
144 143 self._filecache = {}
145 144 # nesting level of `changing_parents` context
146 145 self._changing_level = 0
147 146 # the change currently underway
148 147 self._change_type = None
149 148 # True if the current dirstate changing operations have been
150 149 # invalidated (used to make sure all nested contexts have been exited)
151 150 self._invalidated_context = False
152 151 self._filename = b'dirstate'
153 152 self._filename_th = b'dirstate-tracked-hint'
154 153 self._pendingfilename = b'%s.pending' % self._filename
155 154 self._plchangecallbacks = {}
156 155 self._origpl = None
157 156 self._mapcls = dirstatemap.dirstatemap
158 157 # Access and cache cwd early, so we don't access it for the first time
159 158 # after a working-copy update caused it to not exist (accessing it then
160 159 # raises an exception).
161 160 self._cwd
162 161
163 162 def prefetch_parents(self):
164 163 """make sure the parents are loaded
165 164
166 165 Used to avoid a race condition.
167 166 """
168 167 self._pl
169 168
170 169 @contextlib.contextmanager
171 170 def _changing(self, repo, change_type):
172 171 if repo.currentwlock() is None:
173 172 msg = b"trying to change the dirstate without holding the wlock"
174 173 raise error.ProgrammingError(msg)
175 174 if self._invalidated_context:
176 175 msg = "trying to use an invalidated dirstate before it has reset"
177 176 raise error.ProgrammingError(msg)
178 177
179 178 has_tr = repo.currenttransaction() is not None
180 179
181 180 # different type of change are mutually exclusive
182 181 if self._change_type is None:
183 182 assert self._changing_level == 0
184 183 self._change_type = change_type
185 184 elif self._change_type != change_type:
186 185 msg = (
187 186 'trying to open "%s" dirstate-changing context while a "%s" is'
188 187 ' already open'
189 188 )
190 189 msg %= (change_type, self._change_type)
191 190 raise error.ProgrammingError(msg)
192 191 self._changing_level += 1
193 192 try:
194 193 yield
195 194 except Exception:
196 195 self.invalidate()
197 196 raise
198 197 finally:
199 198 tr = repo.currenttransaction()
200 199 if self._changing_level > 0:
201 200 if self._invalidated_context:
202 201 # make sure we invalidate anything an upper context might
203 202 # have changed.
204 203 self.invalidate()
205 204 self._changing_level -= 1
206 205 # The invalidation is complete once we exit the final context
207 206 # manager
208 207 if self._changing_level <= 0:
209 208 self._change_type = None
210 209 assert self._changing_level == 0
211 210 if self._invalidated_context:
212 211 self._invalidated_context = False
213 212 else:
214 213 # When an exception occured, `_invalidated_context`
215 214 # would have been set to True by the `invalidate`
216 215 # call earlier.
217 216 #
218 217 # We don't have more straightforward code, because the
219 218 # Exception catching (and the associated `invalidate`
220 219 # calling) might have been called by a nested context
221 220 # instead of the top level one.
222 221 self.write(tr)
223 222 if has_tr != (tr is not None):
224 223 if has_tr:
225 224 m = "transaction vanished while changing dirstate"
226 225 else:
227 226 m = "transaction appeared while changing dirstate"
228 227 raise error.ProgrammingError(m)
229 228
230 229 @contextlib.contextmanager
231 230 def changing_parents(self, repo):
232 231 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
233 232 yield c
234 233
235 234 @contextlib.contextmanager
236 235 def changing_files(self, repo):
237 236 with self._changing(repo, CHANGE_TYPE_FILES) as c:
238 237 yield c
239 238
240 239 # here to help migration to the new code
241 240 def parentchange(self):
242 241 msg = (
243 242 "Mercurial 6.4 and later requires call to "
244 243 "`dirstate.changing_parents(repo)`"
245 244 )
246 245 raise error.ProgrammingError(msg)
247 246
248 247 @property
249 248 def is_changing_any(self):
250 249 """Returns true if the dirstate is in the middle of a set of changes.
251 250
252 251 This returns True for any kind of change.
253 252 """
254 253 return self._changing_level > 0
255 254
256 255 def pendingparentchange(self):
257 256 return self.is_changing_parent()
258 257
259 258 def is_changing_parent(self):
260 259 """Returns true if the dirstate is in the middle of a set of changes
261 260 that modify the dirstate parent.
262 261 """
263 262 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
264 263 return self.is_changing_parents
265 264
266 265 @property
267 266 def is_changing_parents(self):
268 267 """Returns true if the dirstate is in the middle of a set of changes
269 268 that modify the dirstate parent.
270 269 """
271 270 if self._changing_level <= 0:
272 271 return False
273 272 return self._change_type == CHANGE_TYPE_PARENTS
274 273
275 274 @property
276 275 def is_changing_files(self):
277 276 """Returns true if the dirstate is in the middle of a set of changes
278 277 that modify the files tracked or their sources.
279 278 """
280 279 if self._changing_level <= 0:
281 280 return False
282 281 return self._change_type == CHANGE_TYPE_FILES
283 282
284 283 @propertycache
285 284 def _map(self):
286 285 """Return the dirstate contents (see documentation for dirstatemap)."""
287 286 self._map = self._mapcls(
288 287 self._ui,
289 288 self._opener,
290 289 self._root,
291 290 self._nodeconstants,
292 291 self._use_dirstate_v2,
293 292 )
294 293 return self._map
295 294
296 295 @property
297 296 def _sparsematcher(self):
298 297 """The matcher for the sparse checkout.
299 298
300 299 The working directory may not include every file from a manifest. The
301 300 matcher obtained by this property will match a path if it is to be
302 301 included in the working directory.
303 302
304 303 When sparse if disabled, return None.
305 304 """
306 305 if self._sparsematchfn is None:
307 306 return None
308 307 # TODO there is potential to cache this property. For now, the matcher
309 308 # is resolved on every access. (But the called function does use a
310 309 # cache to keep the lookup fast.)
311 310 return self._sparsematchfn()
312 311
313 312 @repocache(b'branch')
314 313 def _branch(self):
315 314 try:
316 315 return self._opener.read(b"branch").strip() or b"default"
317 316 except FileNotFoundError:
318 317 return b"default"
319 318
320 319 @property
321 320 def _pl(self):
322 321 return self._map.parents()
323 322
324 323 def hasdir(self, d):
325 324 return self._map.hastrackeddir(d)
326 325
327 326 @rootcache(b'.hgignore')
328 327 def _ignore(self):
329 328 files = self._ignorefiles()
330 329 if not files:
331 330 return matchmod.never()
332 331
333 332 pats = [b'include:%s' % f for f in files]
334 333 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
335 334
336 335 @propertycache
337 336 def _slash(self):
338 337 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
339 338
340 339 @propertycache
341 340 def _checklink(self):
342 341 return util.checklink(self._root)
343 342
344 343 @propertycache
345 344 def _checkexec(self):
346 345 return bool(util.checkexec(self._root))
347 346
348 347 @propertycache
349 348 def _checkcase(self):
350 349 return not util.fscasesensitive(self._join(b'.hg'))
351 350
352 351 def _join(self, f):
353 352 # much faster than os.path.join()
354 353 # it's safe because f is always a relative path
355 354 return self._rootdir + f
356 355
357 356 def flagfunc(self, buildfallback):
358 357 """build a callable that returns flags associated with a filename
359 358
360 359 The information is extracted from three possible layers:
361 360 1. the file system if it supports the information
362 361 2. the "fallback" information stored in the dirstate if any
363 362 3. a more expensive mechanism inferring the flags from the parents.
364 363 """
365 364
366 365 # small hack to cache the result of buildfallback()
367 366 fallback_func = []
368 367
369 368 def get_flags(x):
370 369 entry = None
371 370 fallback_value = None
372 371 try:
373 372 st = os.lstat(self._join(x))
374 373 except OSError:
375 374 return b''
376 375
377 376 if self._checklink:
378 377 if util.statislink(st):
379 378 return b'l'
380 379 else:
381 380 entry = self.get_entry(x)
382 381 if entry.has_fallback_symlink:
383 382 if entry.fallback_symlink:
384 383 return b'l'
385 384 else:
386 385 if not fallback_func:
387 386 fallback_func.append(buildfallback())
388 387 fallback_value = fallback_func[0](x)
389 388 if b'l' in fallback_value:
390 389 return b'l'
391 390
392 391 if self._checkexec:
393 392 if util.statisexec(st):
394 393 return b'x'
395 394 else:
396 395 if entry is None:
397 396 entry = self.get_entry(x)
398 397 if entry.has_fallback_exec:
399 398 if entry.fallback_exec:
400 399 return b'x'
401 400 else:
402 401 if fallback_value is None:
403 402 if not fallback_func:
404 403 fallback_func.append(buildfallback())
405 404 fallback_value = fallback_func[0](x)
406 405 if b'x' in fallback_value:
407 406 return b'x'
408 407 return b''
409 408
410 409 return get_flags
411 410
412 411 @propertycache
413 412 def _cwd(self):
414 413 # internal config: ui.forcecwd
415 414 forcecwd = self._ui.config(b'ui', b'forcecwd')
416 415 if forcecwd:
417 416 return forcecwd
418 417 return encoding.getcwd()
419 418
420 419 def getcwd(self):
421 420 """Return the path from which a canonical path is calculated.
422 421
423 422 This path should be used to resolve file patterns or to convert
424 423 canonical paths back to file paths for display. It shouldn't be
425 424 used to get real file paths. Use vfs functions instead.
426 425 """
427 426 cwd = self._cwd
428 427 if cwd == self._root:
429 428 return b''
430 429 # self._root ends with a path separator if self._root is '/' or 'C:\'
431 430 rootsep = self._root
432 431 if not util.endswithsep(rootsep):
433 432 rootsep += pycompat.ossep
434 433 if cwd.startswith(rootsep):
435 434 return cwd[len(rootsep) :]
436 435 else:
437 436 # we're outside the repo. return an absolute path.
438 437 return cwd
439 438
440 439 def pathto(self, f, cwd=None):
441 440 if cwd is None:
442 441 cwd = self.getcwd()
443 442 path = util.pathto(self._root, cwd, f)
444 443 if self._slash:
445 444 return util.pconvert(path)
446 445 return path
447 446
448 447 def get_entry(self, path):
449 448 """return a DirstateItem for the associated path"""
450 449 entry = self._map.get(path)
451 450 if entry is None:
452 451 return DirstateItem()
453 452 return entry
454 453
455 454 def __contains__(self, key):
456 455 return key in self._map
457 456
458 457 def __iter__(self):
459 458 return iter(sorted(self._map))
460 459
461 460 def items(self):
462 461 return self._map.items()
463 462
464 463 iteritems = items
465 464
466 465 def parents(self):
467 466 return [self._validate(p) for p in self._pl]
468 467
469 468 def p1(self):
470 469 return self._validate(self._pl[0])
471 470
472 471 def p2(self):
473 472 return self._validate(self._pl[1])
474 473
475 474 @property
476 475 def in_merge(self):
477 476 """True if a merge is in progress"""
478 477 return self._pl[1] != self._nodeconstants.nullid
479 478
480 479 def branch(self):
481 480 return encoding.tolocal(self._branch)
482 481
483 482 # XXX since this make the dirstate dirty, we should enforce that it is done
484 483 # withing an appropriate change-context that scope the change and ensure it
485 484 # eventually get written on disk (or rolled back)
486 485 def setparents(self, p1, p2=None):
487 486 """Set dirstate parents to p1 and p2.
488 487
489 488 When moving from two parents to one, "merged" entries a
490 489 adjusted to normal and previous copy records discarded and
491 490 returned by the call.
492 491
493 492 See localrepo.setparents()
494 493 """
495 494 if p2 is None:
496 495 p2 = self._nodeconstants.nullid
497 496 if self._changing_level == 0:
498 497 raise ValueError(
499 498 b"cannot set dirstate parent outside of "
500 499 b"dirstate.changing_parents context manager"
501 500 )
502 501
503 502 self._dirty = True
504 503 oldp2 = self._pl[1]
505 504 if self._origpl is None:
506 505 self._origpl = self._pl
507 506 nullid = self._nodeconstants.nullid
508 507 # True if we need to fold p2 related state back to a linear case
509 508 fold_p2 = oldp2 != nullid and p2 == nullid
510 509 return self._map.setparents(p1, p2, fold_p2=fold_p2)
511 510
512 511 def setbranch(self, branch):
513 512 self.__class__._branch.set(self, encoding.fromlocal(branch))
514 513 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
515 514 try:
516 515 f.write(self._branch + b'\n')
517 516 f.close()
518 517
519 518 # make sure filecache has the correct stat info for _branch after
520 519 # replacing the underlying file
521 520 ce = self._filecache[b'_branch']
522 521 if ce:
523 522 ce.refresh()
524 523 except: # re-raises
525 524 f.discard()
526 525 raise
527 526
528 527 def invalidate(self):
529 528 """Causes the next access to reread the dirstate.
530 529
531 530 This is different from localrepo.invalidatedirstate() because it always
532 531 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
533 532 check whether the dirstate has changed before rereading it."""
534 533
535 534 for a in ("_map", "_branch", "_ignore"):
536 535 if a in self.__dict__:
537 536 delattr(self, a)
538 537 self._dirty = False
539 538 self._dirty_tracked_set = False
540 539 self._invalidated_context = self._changing_level > 0
541 540 self._origpl = None
542 541
543 542 # XXX since this make the dirstate dirty, we should enforce that it is done
544 543 # withing an appropriate change-context that scope the change and ensure it
545 544 # eventually get written on disk (or rolled back)
546 545 def copy(self, source, dest):
547 546 """Mark dest as a copy of source. Unmark dest if source is None."""
548 547 if source == dest:
549 548 return
550 549 self._dirty = True
551 550 if source is not None:
552 551 self._check_sparse(source)
553 552 self._map.copymap[dest] = source
554 553 else:
555 554 self._map.copymap.pop(dest, None)
556 555
557 556 def copied(self, file):
558 557 return self._map.copymap.get(file, None)
559 558
560 559 def copies(self):
561 560 return self._map.copymap
562 561
563 562 @requires_changing_files
564 563 def set_tracked(self, filename, reset_copy=False):
565 564 """a "public" method for generic code to mark a file as tracked
566 565
567 566 This function is to be called outside of "update/merge" case. For
568 567 example by a command like `hg add X`.
569 568
570 569 if reset_copy is set, any existing copy information will be dropped.
571 570
572 571 return True the file was previously untracked, False otherwise.
573 572 """
574 573 self._dirty = True
575 574 entry = self._map.get(filename)
576 575 if entry is None or not entry.tracked:
577 576 self._check_new_tracked_filename(filename)
578 577 pre_tracked = self._map.set_tracked(filename)
579 578 if reset_copy:
580 579 self._map.copymap.pop(filename, None)
581 580 if pre_tracked:
582 581 self._dirty_tracked_set = True
583 582 return pre_tracked
584 583
585 584 @requires_changing_files
586 585 def set_untracked(self, filename):
587 586 """a "public" method for generic code to mark a file as untracked
588 587
589 588 This function is to be called outside of "update/merge" case. For
590 589 example by a command like `hg remove X`.
591 590
592 591 return True the file was previously tracked, False otherwise.
593 592 """
594 593 ret = self._map.set_untracked(filename)
595 594 if ret:
596 595 self._dirty = True
597 596 self._dirty_tracked_set = True
598 597 return ret
599 598
600 599 @requires_not_changing_parents
601 600 def set_clean(self, filename, parentfiledata):
602 601 """record that the current state of the file on disk is known to be clean"""
603 602 self._dirty = True
604 603 if not self._map[filename].tracked:
605 604 self._check_new_tracked_filename(filename)
606 605 (mode, size, mtime) = parentfiledata
607 606 self._map.set_clean(filename, mode, size, mtime)
608 607
609 608 @requires_not_changing_parents
610 609 def set_possibly_dirty(self, filename):
611 610 """record that the current state of the file on disk is unknown"""
612 611 self._dirty = True
613 612 self._map.set_possibly_dirty(filename)
614 613
615 614 @requires_changing_parents
616 615 def update_file_p1(
617 616 self,
618 617 filename,
619 618 p1_tracked,
620 619 ):
621 620 """Set a file as tracked in the parent (or not)
622 621
623 622 This is to be called when adjust the dirstate to a new parent after an history
624 623 rewriting operation.
625 624
626 625 It should not be called during a merge (p2 != nullid) and only within
627 626 a `with dirstate.changing_parents(repo):` context.
628 627 """
629 628 if self.in_merge:
630 629 msg = b'update_file_reference should not be called when merging'
631 630 raise error.ProgrammingError(msg)
632 631 entry = self._map.get(filename)
633 632 if entry is None:
634 633 wc_tracked = False
635 634 else:
636 635 wc_tracked = entry.tracked
637 636 if not (p1_tracked or wc_tracked):
638 637 # the file is no longer relevant to anyone
639 638 if self._map.get(filename) is not None:
640 639 self._map.reset_state(filename)
641 640 self._dirty = True
642 641 elif (not p1_tracked) and wc_tracked:
643 642 if entry is not None and entry.added:
644 643 return # avoid dropping copy information (maybe?)
645 644
646 645 self._map.reset_state(
647 646 filename,
648 647 wc_tracked,
649 648 p1_tracked,
650 649 # the underlying reference might have changed, we will have to
651 650 # check it.
652 651 has_meaningful_mtime=False,
653 652 )
654 653
655 654 @requires_changing_parents
656 655 def update_file(
657 656 self,
658 657 filename,
659 658 wc_tracked,
660 659 p1_tracked,
661 660 p2_info=False,
662 661 possibly_dirty=False,
663 662 parentfiledata=None,
664 663 ):
665 664 """update the information about a file in the dirstate
666 665
667 666 This is to be called when the direstates parent changes to keep track
668 667 of what is the file situation in regards to the working copy and its parent.
669 668
670 669 This function must be called within a `dirstate.changing_parents` context.
671 670
672 671 note: the API is at an early stage and we might need to adjust it
673 672 depending of what information ends up being relevant and useful to
674 673 other processing.
675 674 """
676 675 self._update_file(
677 676 filename=filename,
678 677 wc_tracked=wc_tracked,
679 678 p1_tracked=p1_tracked,
680 679 p2_info=p2_info,
681 680 possibly_dirty=possibly_dirty,
682 681 parentfiledata=parentfiledata,
683 682 )
684 683
685 684 # XXX since this make the dirstate dirty, we should enforce that it is done
686 685 # withing an appropriate change-context that scope the change and ensure it
687 686 # eventually get written on disk (or rolled back)
688 687 def hacky_extension_update_file(self, *args, **kwargs):
689 688 """NEVER USE THIS, YOU DO NOT NEED IT
690 689
691 690 This function is a variant of "update_file" to be called by a small set
692 691 of extensions, it also adjust the internal state of file, but can be
693 692 called outside an `changing_parents` context.
694 693
695 694 A very small number of extension meddle with the working copy content
696 695 in a way that requires to adjust the dirstate accordingly. At the time
697 696 this command is written they are :
698 697 - keyword,
699 698 - largefile,
700 699 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
701 700
702 701 This function could probably be replaced by more semantic one (like
703 702 "adjust expected size" or "always revalidate file content", etc)
704 703 however at the time where this is writen, this is too much of a detour
705 704 to be considered.
706 705 """
707 706 self._update_file(
708 707 *args,
709 708 **kwargs,
710 709 )
711 710
712 711 def _update_file(
713 712 self,
714 713 filename,
715 714 wc_tracked,
716 715 p1_tracked,
717 716 p2_info=False,
718 717 possibly_dirty=False,
719 718 parentfiledata=None,
720 719 ):
721 720
722 721 # note: I do not think we need to double check name clash here since we
723 722 # are in a update/merge case that should already have taken care of
724 723 # this. The test agrees
725 724
726 725 self._dirty = True
727 726 old_entry = self._map.get(filename)
728 727 if old_entry is None:
729 728 prev_tracked = False
730 729 else:
731 730 prev_tracked = old_entry.tracked
732 731 if prev_tracked != wc_tracked:
733 732 self._dirty_tracked_set = True
734 733
735 734 self._map.reset_state(
736 735 filename,
737 736 wc_tracked,
738 737 p1_tracked,
739 738 p2_info=p2_info,
740 739 has_meaningful_mtime=not possibly_dirty,
741 740 parentfiledata=parentfiledata,
742 741 )
743 742
744 743 def _check_new_tracked_filename(self, filename):
745 744 scmutil.checkfilename(filename)
746 745 if self._map.hastrackeddir(filename):
747 746 msg = _(b'directory %r already in dirstate')
748 747 msg %= pycompat.bytestr(filename)
749 748 raise error.Abort(msg)
750 749 # shadows
751 750 for d in pathutil.finddirs(filename):
752 751 if self._map.hastrackeddir(d):
753 752 break
754 753 entry = self._map.get(d)
755 754 if entry is not None and not entry.removed:
756 755 msg = _(b'file %r in dirstate clashes with %r')
757 756 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
758 757 raise error.Abort(msg)
759 758 self._check_sparse(filename)
760 759
761 760 def _check_sparse(self, filename):
762 761 """Check that a filename is inside the sparse profile"""
763 762 sparsematch = self._sparsematcher
764 763 if sparsematch is not None and not sparsematch.always():
765 764 if not sparsematch(filename):
766 765 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
767 766 hint = _(
768 767 b'include file with `hg debugsparse --include <pattern>` or use '
769 768 b'`hg add -s <file>` to include file directory while adding'
770 769 )
771 770 raise error.Abort(msg % filename, hint=hint)
772 771
773 772 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
774 773 if exists is None:
775 774 exists = os.path.lexists(os.path.join(self._root, path))
776 775 if not exists:
777 776 # Maybe a path component exists
778 777 if not ignoremissing and b'/' in path:
779 778 d, f = path.rsplit(b'/', 1)
780 779 d = self._normalize(d, False, ignoremissing, None)
781 780 folded = d + b"/" + f
782 781 else:
783 782 # No path components, preserve original case
784 783 folded = path
785 784 else:
786 785 # recursively normalize leading directory components
787 786 # against dirstate
788 787 if b'/' in normed:
789 788 d, f = normed.rsplit(b'/', 1)
790 789 d = self._normalize(d, False, ignoremissing, True)
791 790 r = self._root + b"/" + d
792 791 folded = d + b"/" + util.fspath(f, r)
793 792 else:
794 793 folded = util.fspath(normed, self._root)
795 794 storemap[normed] = folded
796 795
797 796 return folded
798 797
799 798 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
800 799 normed = util.normcase(path)
801 800 folded = self._map.filefoldmap.get(normed, None)
802 801 if folded is None:
803 802 if isknown:
804 803 folded = path
805 804 else:
806 805 folded = self._discoverpath(
807 806 path, normed, ignoremissing, exists, self._map.filefoldmap
808 807 )
809 808 return folded
810 809
811 810 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
812 811 normed = util.normcase(path)
813 812 folded = self._map.filefoldmap.get(normed, None)
814 813 if folded is None:
815 814 folded = self._map.dirfoldmap.get(normed, None)
816 815 if folded is None:
817 816 if isknown:
818 817 folded = path
819 818 else:
820 819 # store discovered result in dirfoldmap so that future
821 820 # normalizefile calls don't start matching directories
822 821 folded = self._discoverpath(
823 822 path, normed, ignoremissing, exists, self._map.dirfoldmap
824 823 )
825 824 return folded
826 825
827 826 def normalize(self, path, isknown=False, ignoremissing=False):
828 827 """
829 828 normalize the case of a pathname when on a casefolding filesystem
830 829
831 830 isknown specifies whether the filename came from walking the
832 831 disk, to avoid extra filesystem access.
833 832
834 833 If ignoremissing is True, missing path are returned
835 834 unchanged. Otherwise, we try harder to normalize possibly
836 835 existing path components.
837 836
838 837 The normalized case is determined based on the following precedence:
839 838
840 839 - version of name already stored in the dirstate
841 840 - version of name stored on disk
842 841 - version provided via command arguments
843 842 """
844 843
845 844 if self._checkcase:
846 845 return self._normalize(path, isknown, ignoremissing)
847 846 return path
848 847
849 848 # XXX since this make the dirstate dirty, we should enforce that it is done
850 849 # withing an appropriate change-context that scope the change and ensure it
851 850 # eventually get written on disk (or rolled back)
852 851 def clear(self):
853 852 self._map.clear()
854 853 self._dirty = True
855 854
856 855 # XXX since this make the dirstate dirty, we should enforce that it is done
857 856 # withing an appropriate change-context that scope the change and ensure it
858 857 # eventually get written on disk (or rolled back)
859 858 def rebuild(self, parent, allfiles, changedfiles=None):
860 859 matcher = self._sparsematcher
861 860 if matcher is not None and not matcher.always():
862 861 # should not add non-matching files
863 862 allfiles = [f for f in allfiles if matcher(f)]
864 863 if changedfiles:
865 864 changedfiles = [f for f in changedfiles if matcher(f)]
866 865
867 866 if changedfiles is not None:
868 867 # these files will be deleted from the dirstate when they are
869 868 # not found to be in allfiles
870 869 dirstatefilestoremove = {f for f in self if not matcher(f)}
871 870 changedfiles = dirstatefilestoremove.union(changedfiles)
872 871
873 872 if changedfiles is None:
874 873 # Rebuild entire dirstate
875 874 to_lookup = allfiles
876 875 to_drop = []
877 876 self.clear()
878 877 elif len(changedfiles) < 10:
879 878 # Avoid turning allfiles into a set, which can be expensive if it's
880 879 # large.
881 880 to_lookup = []
882 881 to_drop = []
883 882 for f in changedfiles:
884 883 if f in allfiles:
885 884 to_lookup.append(f)
886 885 else:
887 886 to_drop.append(f)
888 887 else:
889 888 changedfilesset = set(changedfiles)
890 889 to_lookup = changedfilesset & set(allfiles)
891 890 to_drop = changedfilesset - to_lookup
892 891
893 892 if self._origpl is None:
894 893 self._origpl = self._pl
895 894 self._map.setparents(parent, self._nodeconstants.nullid)
896 895
897 896 for f in to_lookup:
898 897 if self.in_merge:
899 898 self.set_tracked(f)
900 899 else:
901 900 self._map.reset_state(
902 901 f,
903 902 wc_tracked=True,
904 903 p1_tracked=True,
905 904 )
906 905 for f in to_drop:
907 906 self._map.reset_state(f)
908 907
909 908 self._dirty = True
910 909
911 910 def identity(self):
912 911 """Return identity of dirstate itself to detect changing in storage
913 912
914 913 If identity of previous dirstate is equal to this, writing
915 914 changes based on the former dirstate out can keep consistency.
916 915 """
917 916 return self._map.identity
918 917
919 918 def write(self, tr):
920 919 if not self._dirty:
921 920 return
922 921
923 922 write_key = self._use_tracked_hint and self._dirty_tracked_set
924 923 if tr:
925 924 # make sure we invalidate the current change on abort
926 925 if tr is not None:
927 926 tr.addabort(
928 927 b'dirstate-invalidate',
929 928 lambda tr: self.invalidate(),
930 929 )
931 930 # delay writing in-memory changes out
932 931 tr.addfilegenerator(
933 932 b'dirstate-1-main',
934 933 (self._filename,),
935 934 lambda f: self._writedirstate(tr, f),
936 935 location=b'plain',
937 936 post_finalize=True,
938 937 )
939 938 if write_key:
940 939 tr.addfilegenerator(
941 940 b'dirstate-2-key-post',
942 941 (self._filename_th,),
943 942 lambda f: self._write_tracked_hint(tr, f),
944 943 location=b'plain',
945 944 post_finalize=True,
946 945 )
947 946 return
948 947
949 948 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
950 949 with file(self._filename) as f:
951 950 self._writedirstate(tr, f)
952 951 if write_key:
953 952 # we update the key-file after writing to make sure reader have a
954 953 # key that match the newly written content
955 954 with file(self._filename_th) as f:
956 955 self._write_tracked_hint(tr, f)
957 956
958 957 def delete_tracked_hint(self):
959 958 """remove the tracked_hint file
960 959
961 960 To be used by format downgrades operation"""
962 961 self._opener.unlink(self._filename_th)
963 962 self._use_tracked_hint = False
964 963
965 964 def addparentchangecallback(self, category, callback):
966 965 """add a callback to be called when the wd parents are changed
967 966
968 967 Callback will be called with the following arguments:
969 968 dirstate, (oldp1, oldp2), (newp1, newp2)
970 969
971 970 Category is a unique identifier to allow overwriting an old callback
972 971 with a newer callback.
973 972 """
974 973 self._plchangecallbacks[category] = callback
975 974
976 975 def _writedirstate(self, tr, st):
977 976 # notify callbacks about parents change
978 977 if self._origpl is not None and self._origpl != self._pl:
979 978 for c, callback in sorted(self._plchangecallbacks.items()):
980 979 callback(self, self._origpl, self._pl)
981 980 self._origpl = None
982 981 self._map.write(tr, st)
983 982 self._dirty = False
984 983 self._dirty_tracked_set = False
985 984
986 985 def _write_tracked_hint(self, tr, f):
987 986 key = node.hex(uuid.uuid4().bytes)
988 987 f.write(b"1\n%s\n" % key) # 1 is the format version
989 988
990 989 def _dirignore(self, f):
991 990 if self._ignore(f):
992 991 return True
993 992 for p in pathutil.finddirs(f):
994 993 if self._ignore(p):
995 994 return True
996 995 return False
997 996
998 997 def _ignorefiles(self):
999 998 files = []
1000 999 if os.path.exists(self._join(b'.hgignore')):
1001 1000 files.append(self._join(b'.hgignore'))
1002 1001 for name, path in self._ui.configitems(b"ui"):
1003 1002 if name == b'ignore' or name.startswith(b'ignore.'):
1004 1003 # we need to use os.path.join here rather than self._join
1005 1004 # because path is arbitrary and user-specified
1006 1005 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1007 1006 return files
1008 1007
1009 1008 def _ignorefileandline(self, f):
1010 1009 files = collections.deque(self._ignorefiles())
1011 1010 visited = set()
1012 1011 while files:
1013 1012 i = files.popleft()
1014 1013 patterns = matchmod.readpatternfile(
1015 1014 i, self._ui.warn, sourceinfo=True
1016 1015 )
1017 1016 for pattern, lineno, line in patterns:
1018 1017 kind, p = matchmod._patsplit(pattern, b'glob')
1019 1018 if kind == b"subinclude":
1020 1019 if p not in visited:
1021 1020 files.append(p)
1022 1021 continue
1023 1022 m = matchmod.match(
1024 1023 self._root, b'', [], [pattern], warn=self._ui.warn
1025 1024 )
1026 1025 if m(f):
1027 1026 return (i, lineno, line)
1028 1027 visited.add(i)
1029 1028 return (None, -1, b"")
1030 1029
1031 1030 def _walkexplicit(self, match, subrepos):
1032 1031 """Get stat data about the files explicitly specified by match.
1033 1032
1034 1033 Return a triple (results, dirsfound, dirsnotfound).
1035 1034 - results is a mapping from filename to stat result. It also contains
1036 1035 listings mapping subrepos and .hg to None.
1037 1036 - dirsfound is a list of files found to be directories.
1038 1037 - dirsnotfound is a list of files that the dirstate thinks are
1039 1038 directories and that were not found."""
1040 1039
1041 1040 def badtype(mode):
1042 1041 kind = _(b'unknown')
1043 1042 if stat.S_ISCHR(mode):
1044 1043 kind = _(b'character device')
1045 1044 elif stat.S_ISBLK(mode):
1046 1045 kind = _(b'block device')
1047 1046 elif stat.S_ISFIFO(mode):
1048 1047 kind = _(b'fifo')
1049 1048 elif stat.S_ISSOCK(mode):
1050 1049 kind = _(b'socket')
1051 1050 elif stat.S_ISDIR(mode):
1052 1051 kind = _(b'directory')
1053 1052 return _(b'unsupported file type (type is %s)') % kind
1054 1053
1055 1054 badfn = match.bad
1056 1055 dmap = self._map
1057 1056 lstat = os.lstat
1058 1057 getkind = stat.S_IFMT
1059 1058 dirkind = stat.S_IFDIR
1060 1059 regkind = stat.S_IFREG
1061 1060 lnkkind = stat.S_IFLNK
1062 1061 join = self._join
1063 1062 dirsfound = []
1064 1063 foundadd = dirsfound.append
1065 1064 dirsnotfound = []
1066 1065 notfoundadd = dirsnotfound.append
1067 1066
1068 1067 if not match.isexact() and self._checkcase:
1069 1068 normalize = self._normalize
1070 1069 else:
1071 1070 normalize = None
1072 1071
1073 1072 files = sorted(match.files())
1074 1073 subrepos.sort()
1075 1074 i, j = 0, 0
1076 1075 while i < len(files) and j < len(subrepos):
1077 1076 subpath = subrepos[j] + b"/"
1078 1077 if files[i] < subpath:
1079 1078 i += 1
1080 1079 continue
1081 1080 while i < len(files) and files[i].startswith(subpath):
1082 1081 del files[i]
1083 1082 j += 1
1084 1083
1085 1084 if not files or b'' in files:
1086 1085 files = [b'']
1087 1086 # constructing the foldmap is expensive, so don't do it for the
1088 1087 # common case where files is ['']
1089 1088 normalize = None
1090 1089 results = dict.fromkeys(subrepos)
1091 1090 results[b'.hg'] = None
1092 1091
1093 1092 for ff in files:
1094 1093 if normalize:
1095 1094 nf = normalize(ff, False, True)
1096 1095 else:
1097 1096 nf = ff
1098 1097 if nf in results:
1099 1098 continue
1100 1099
1101 1100 try:
1102 1101 st = lstat(join(nf))
1103 1102 kind = getkind(st.st_mode)
1104 1103 if kind == dirkind:
1105 1104 if nf in dmap:
1106 1105 # file replaced by dir on disk but still in dirstate
1107 1106 results[nf] = None
1108 1107 foundadd((nf, ff))
1109 1108 elif kind == regkind or kind == lnkkind:
1110 1109 results[nf] = st
1111 1110 else:
1112 1111 badfn(ff, badtype(kind))
1113 1112 if nf in dmap:
1114 1113 results[nf] = None
1115 1114 except (OSError) as inst:
1116 1115 # nf not found on disk - it is dirstate only
1117 1116 if nf in dmap: # does it exactly match a missing file?
1118 1117 results[nf] = None
1119 1118 else: # does it match a missing directory?
1120 1119 if self._map.hasdir(nf):
1121 1120 notfoundadd(nf)
1122 1121 else:
1123 1122 badfn(ff, encoding.strtolocal(inst.strerror))
1124 1123
1125 1124 # match.files() may contain explicitly-specified paths that shouldn't
1126 1125 # be taken; drop them from the list of files found. dirsfound/notfound
1127 1126 # aren't filtered here because they will be tested later.
1128 1127 if match.anypats():
1129 1128 for f in list(results):
1130 1129 if f == b'.hg' or f in subrepos:
1131 1130 # keep sentinel to disable further out-of-repo walks
1132 1131 continue
1133 1132 if not match(f):
1134 1133 del results[f]
1135 1134
1136 1135 # Case insensitive filesystems cannot rely on lstat() failing to detect
1137 1136 # a case-only rename. Prune the stat object for any file that does not
1138 1137 # match the case in the filesystem, if there are multiple files that
1139 1138 # normalize to the same path.
1140 1139 if match.isexact() and self._checkcase:
1141 1140 normed = {}
1142 1141
1143 1142 for f, st in results.items():
1144 1143 if st is None:
1145 1144 continue
1146 1145
1147 1146 nc = util.normcase(f)
1148 1147 paths = normed.get(nc)
1149 1148
1150 1149 if paths is None:
1151 1150 paths = set()
1152 1151 normed[nc] = paths
1153 1152
1154 1153 paths.add(f)
1155 1154
1156 1155 for norm, paths in normed.items():
1157 1156 if len(paths) > 1:
1158 1157 for path in paths:
1159 1158 folded = self._discoverpath(
1160 1159 path, norm, True, None, self._map.dirfoldmap
1161 1160 )
1162 1161 if path != folded:
1163 1162 results[path] = None
1164 1163
1165 1164 return results, dirsfound, dirsnotfound
1166 1165
1167 1166 def walk(self, match, subrepos, unknown, ignored, full=True):
1168 1167 """
1169 1168 Walk recursively through the directory tree, finding all files
1170 1169 matched by match.
1171 1170
1172 1171 If full is False, maybe skip some known-clean files.
1173 1172
1174 1173 Return a dict mapping filename to stat-like object (either
1175 1174 mercurial.osutil.stat instance or return value of os.stat()).
1176 1175
1177 1176 """
1178 1177 # full is a flag that extensions that hook into walk can use -- this
1179 1178 # implementation doesn't use it at all. This satisfies the contract
1180 1179 # because we only guarantee a "maybe".
1181 1180
1182 1181 if ignored:
1183 1182 ignore = util.never
1184 1183 dirignore = util.never
1185 1184 elif unknown:
1186 1185 ignore = self._ignore
1187 1186 dirignore = self._dirignore
1188 1187 else:
1189 1188 # if not unknown and not ignored, drop dir recursion and step 2
1190 1189 ignore = util.always
1191 1190 dirignore = util.always
1192 1191
1193 1192 if self._sparsematchfn is not None:
1194 1193 em = matchmod.exact(match.files())
1195 1194 sm = matchmod.unionmatcher([self._sparsematcher, em])
1196 1195 match = matchmod.intersectmatchers(match, sm)
1197 1196
1198 1197 matchfn = match.matchfn
1199 1198 matchalways = match.always()
1200 1199 matchtdir = match.traversedir
1201 1200 dmap = self._map
1202 1201 listdir = util.listdir
1203 1202 lstat = os.lstat
1204 1203 dirkind = stat.S_IFDIR
1205 1204 regkind = stat.S_IFREG
1206 1205 lnkkind = stat.S_IFLNK
1207 1206 join = self._join
1208 1207
1209 1208 exact = skipstep3 = False
1210 1209 if match.isexact(): # match.exact
1211 1210 exact = True
1212 1211 dirignore = util.always # skip step 2
1213 1212 elif match.prefix(): # match.match, no patterns
1214 1213 skipstep3 = True
1215 1214
1216 1215 if not exact and self._checkcase:
1217 1216 normalize = self._normalize
1218 1217 normalizefile = self._normalizefile
1219 1218 skipstep3 = False
1220 1219 else:
1221 1220 normalize = self._normalize
1222 1221 normalizefile = None
1223 1222
1224 1223 # step 1: find all explicit files
1225 1224 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1226 1225 if matchtdir:
1227 1226 for d in work:
1228 1227 matchtdir(d[0])
1229 1228 for d in dirsnotfound:
1230 1229 matchtdir(d)
1231 1230
1232 1231 skipstep3 = skipstep3 and not (work or dirsnotfound)
1233 1232 work = [d for d in work if not dirignore(d[0])]
1234 1233
1235 1234 # step 2: visit subdirectories
1236 1235 def traverse(work, alreadynormed):
1237 1236 wadd = work.append
1238 1237 while work:
1239 1238 tracing.counter('dirstate.walk work', len(work))
1240 1239 nd = work.pop()
1241 1240 visitentries = match.visitchildrenset(nd)
1242 1241 if not visitentries:
1243 1242 continue
1244 1243 if visitentries == b'this' or visitentries == b'all':
1245 1244 visitentries = None
1246 1245 skip = None
1247 1246 if nd != b'':
1248 1247 skip = b'.hg'
1249 1248 try:
1250 1249 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1251 1250 entries = listdir(join(nd), stat=True, skip=skip)
1252 1251 except (PermissionError, FileNotFoundError) as inst:
1253 1252 match.bad(
1254 1253 self.pathto(nd), encoding.strtolocal(inst.strerror)
1255 1254 )
1256 1255 continue
1257 1256 for f, kind, st in entries:
1258 1257 # Some matchers may return files in the visitentries set,
1259 1258 # instead of 'this', if the matcher explicitly mentions them
1260 1259 # and is not an exactmatcher. This is acceptable; we do not
1261 1260 # make any hard assumptions about file-or-directory below
1262 1261 # based on the presence of `f` in visitentries. If
1263 1262 # visitchildrenset returned a set, we can always skip the
1264 1263 # entries *not* in the set it provided regardless of whether
1265 1264 # they're actually a file or a directory.
1266 1265 if visitentries and f not in visitentries:
1267 1266 continue
1268 1267 if normalizefile:
1269 1268 # even though f might be a directory, we're only
1270 1269 # interested in comparing it to files currently in the
1271 1270 # dmap -- therefore normalizefile is enough
1272 1271 nf = normalizefile(
1273 1272 nd and (nd + b"/" + f) or f, True, True
1274 1273 )
1275 1274 else:
1276 1275 nf = nd and (nd + b"/" + f) or f
1277 1276 if nf not in results:
1278 1277 if kind == dirkind:
1279 1278 if not ignore(nf):
1280 1279 if matchtdir:
1281 1280 matchtdir(nf)
1282 1281 wadd(nf)
1283 1282 if nf in dmap and (matchalways or matchfn(nf)):
1284 1283 results[nf] = None
1285 1284 elif kind == regkind or kind == lnkkind:
1286 1285 if nf in dmap:
1287 1286 if matchalways or matchfn(nf):
1288 1287 results[nf] = st
1289 1288 elif (matchalways or matchfn(nf)) and not ignore(
1290 1289 nf
1291 1290 ):
1292 1291 # unknown file -- normalize if necessary
1293 1292 if not alreadynormed:
1294 1293 nf = normalize(nf, False, True)
1295 1294 results[nf] = st
1296 1295 elif nf in dmap and (matchalways or matchfn(nf)):
1297 1296 results[nf] = None
1298 1297
1299 1298 for nd, d in work:
1300 1299 # alreadynormed means that processwork doesn't have to do any
1301 1300 # expensive directory normalization
1302 1301 alreadynormed = not normalize or nd == d
1303 1302 traverse([d], alreadynormed)
1304 1303
1305 1304 for s in subrepos:
1306 1305 del results[s]
1307 1306 del results[b'.hg']
1308 1307
1309 1308 # step 3: visit remaining files from dmap
1310 1309 if not skipstep3 and not exact:
1311 1310 # If a dmap file is not in results yet, it was either
1312 1311 # a) not matching matchfn b) ignored, c) missing, or d) under a
1313 1312 # symlink directory.
1314 1313 if not results and matchalways:
1315 1314 visit = [f for f in dmap]
1316 1315 else:
1317 1316 visit = [f for f in dmap if f not in results and matchfn(f)]
1318 1317 visit.sort()
1319 1318
1320 1319 if unknown:
1321 1320 # unknown == True means we walked all dirs under the roots
1322 1321 # that wasn't ignored, and everything that matched was stat'ed
1323 1322 # and is already in results.
1324 1323 # The rest must thus be ignored or under a symlink.
1325 1324 audit_path = pathutil.pathauditor(self._root, cached=True)
1326 1325
1327 1326 for nf in iter(visit):
1328 1327 # If a stat for the same file was already added with a
1329 1328 # different case, don't add one for this, since that would
1330 1329 # make it appear as if the file exists under both names
1331 1330 # on disk.
1332 1331 if (
1333 1332 normalizefile
1334 1333 and normalizefile(nf, True, True) in results
1335 1334 ):
1336 1335 results[nf] = None
1337 1336 # Report ignored items in the dmap as long as they are not
1338 1337 # under a symlink directory.
1339 1338 elif audit_path.check(nf):
1340 1339 try:
1341 1340 results[nf] = lstat(join(nf))
1342 1341 # file was just ignored, no links, and exists
1343 1342 except OSError:
1344 1343 # file doesn't exist
1345 1344 results[nf] = None
1346 1345 else:
1347 1346 # It's either missing or under a symlink directory
1348 1347 # which we in this case report as missing
1349 1348 results[nf] = None
1350 1349 else:
1351 1350 # We may not have walked the full directory tree above,
1352 1351 # so stat and check everything we missed.
1353 1352 iv = iter(visit)
1354 1353 for st in util.statfiles([join(i) for i in visit]):
1355 1354 results[next(iv)] = st
1356 1355 return results
1357 1356
1358 1357 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1359 1358 if self._sparsematchfn is not None:
1360 1359 em = matchmod.exact(matcher.files())
1361 1360 sm = matchmod.unionmatcher([self._sparsematcher, em])
1362 1361 matcher = matchmod.intersectmatchers(matcher, sm)
1363 1362 # Force Rayon (Rust parallelism library) to respect the number of
1364 1363 # workers. This is a temporary workaround until Rust code knows
1365 1364 # how to read the config file.
1366 1365 numcpus = self._ui.configint(b"worker", b"numcpus")
1367 1366 if numcpus is not None:
1368 1367 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1369 1368
1370 1369 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1371 1370 if not workers_enabled:
1372 1371 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1373 1372
1374 1373 (
1375 1374 lookup,
1376 1375 modified,
1377 1376 added,
1378 1377 removed,
1379 1378 deleted,
1380 1379 clean,
1381 1380 ignored,
1382 1381 unknown,
1383 1382 warnings,
1384 1383 bad,
1385 1384 traversed,
1386 1385 dirty,
1387 1386 ) = rustmod.status(
1388 1387 self._map._map,
1389 1388 matcher,
1390 1389 self._rootdir,
1391 1390 self._ignorefiles(),
1392 1391 self._checkexec,
1393 1392 bool(list_clean),
1394 1393 bool(list_ignored),
1395 1394 bool(list_unknown),
1396 1395 bool(matcher.traversedir),
1397 1396 )
1398 1397
1399 1398 self._dirty |= dirty
1400 1399
1401 1400 if matcher.traversedir:
1402 1401 for dir in traversed:
1403 1402 matcher.traversedir(dir)
1404 1403
1405 1404 if self._ui.warn:
1406 1405 for item in warnings:
1407 1406 if isinstance(item, tuple):
1408 1407 file_path, syntax = item
1409 1408 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1410 1409 file_path,
1411 1410 syntax,
1412 1411 )
1413 1412 self._ui.warn(msg)
1414 1413 else:
1415 1414 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1416 1415 self._ui.warn(
1417 1416 msg
1418 1417 % (
1419 1418 pathutil.canonpath(
1420 1419 self._rootdir, self._rootdir, item
1421 1420 ),
1422 1421 b"No such file or directory",
1423 1422 )
1424 1423 )
1425 1424
1426 1425 for fn, message in bad:
1427 1426 matcher.bad(fn, encoding.strtolocal(message))
1428 1427
1429 1428 status = scmutil.status(
1430 1429 modified=modified,
1431 1430 added=added,
1432 1431 removed=removed,
1433 1432 deleted=deleted,
1434 1433 unknown=unknown,
1435 1434 ignored=ignored,
1436 1435 clean=clean,
1437 1436 )
1438 1437 return (lookup, status)
1439 1438
1440 1439 # XXX since this can make the dirstate dirty (through rust), we should
1441 1440 # enforce that it is done withing an appropriate change-context that scope
1442 1441 # the change and ensure it eventually get written on disk (or rolled back)
1443 1442 def status(self, match, subrepos, ignored, clean, unknown):
1444 1443 """Determine the status of the working copy relative to the
1445 1444 dirstate and return a pair of (unsure, status), where status is of type
1446 1445 scmutil.status and:
1447 1446
1448 1447 unsure:
1449 1448 files that might have been modified since the dirstate was
1450 1449 written, but need to be read to be sure (size is the same
1451 1450 but mtime differs)
1452 1451 status.modified:
1453 1452 files that have definitely been modified since the dirstate
1454 1453 was written (different size or mode)
1455 1454 status.clean:
1456 1455 files that have definitely not been modified since the
1457 1456 dirstate was written
1458 1457 """
1459 1458 listignored, listclean, listunknown = ignored, clean, unknown
1460 1459 lookup, modified, added, unknown, ignored = [], [], [], [], []
1461 1460 removed, deleted, clean = [], [], []
1462 1461
1463 1462 dmap = self._map
1464 1463 dmap.preload()
1465 1464
1466 1465 use_rust = True
1467 1466
1468 1467 allowed_matchers = (
1469 1468 matchmod.alwaysmatcher,
1470 1469 matchmod.differencematcher,
1471 1470 matchmod.exactmatcher,
1472 1471 matchmod.includematcher,
1473 1472 matchmod.intersectionmatcher,
1474 1473 matchmod.nevermatcher,
1475 1474 matchmod.unionmatcher,
1476 1475 )
1477 1476
1478 1477 if rustmod is None:
1479 1478 use_rust = False
1480 1479 elif self._checkcase:
1481 1480 # Case-insensitive filesystems are not handled yet
1482 1481 use_rust = False
1483 1482 elif subrepos:
1484 1483 use_rust = False
1485 1484 elif not isinstance(match, allowed_matchers):
1486 1485 # Some matchers have yet to be implemented
1487 1486 use_rust = False
1488 1487
1489 1488 # Get the time from the filesystem so we can disambiguate files that
1490 1489 # appear modified in the present or future.
1491 1490 try:
1492 1491 mtime_boundary = timestamp.get_fs_now(self._opener)
1493 1492 except OSError:
1494 1493 # In largefiles or readonly context
1495 1494 mtime_boundary = None
1496 1495
1497 1496 if use_rust:
1498 1497 try:
1499 1498 res = self._rust_status(
1500 1499 match, listclean, listignored, listunknown
1501 1500 )
1502 1501 return res + (mtime_boundary,)
1503 1502 except rustmod.FallbackError:
1504 1503 pass
1505 1504
1506 1505 def noop(f):
1507 1506 pass
1508 1507
1509 1508 dcontains = dmap.__contains__
1510 1509 dget = dmap.__getitem__
1511 1510 ladd = lookup.append # aka "unsure"
1512 1511 madd = modified.append
1513 1512 aadd = added.append
1514 1513 uadd = unknown.append if listunknown else noop
1515 1514 iadd = ignored.append if listignored else noop
1516 1515 radd = removed.append
1517 1516 dadd = deleted.append
1518 1517 cadd = clean.append if listclean else noop
1519 1518 mexact = match.exact
1520 1519 dirignore = self._dirignore
1521 1520 checkexec = self._checkexec
1522 1521 checklink = self._checklink
1523 1522 copymap = self._map.copymap
1524 1523
1525 1524 # We need to do full walks when either
1526 1525 # - we're listing all clean files, or
1527 1526 # - match.traversedir does something, because match.traversedir should
1528 1527 # be called for every dir in the working dir
1529 1528 full = listclean or match.traversedir is not None
1530 1529 for fn, st in self.walk(
1531 1530 match, subrepos, listunknown, listignored, full=full
1532 1531 ).items():
1533 1532 if not dcontains(fn):
1534 1533 if (listignored or mexact(fn)) and dirignore(fn):
1535 1534 if listignored:
1536 1535 iadd(fn)
1537 1536 else:
1538 1537 uadd(fn)
1539 1538 continue
1540 1539
1541 1540 t = dget(fn)
1542 1541 mode = t.mode
1543 1542 size = t.size
1544 1543
1545 1544 if not st and t.tracked:
1546 1545 dadd(fn)
1547 1546 elif t.p2_info:
1548 1547 madd(fn)
1549 1548 elif t.added:
1550 1549 aadd(fn)
1551 1550 elif t.removed:
1552 1551 radd(fn)
1553 1552 elif t.tracked:
1554 1553 if not checklink and t.has_fallback_symlink:
1555 1554 # If the file system does not support symlink, the mode
1556 1555 # might not be correctly stored in the dirstate, so do not
1557 1556 # trust it.
1558 1557 ladd(fn)
1559 1558 elif not checkexec and t.has_fallback_exec:
1560 1559 # If the file system does not support exec bits, the mode
1561 1560 # might not be correctly stored in the dirstate, so do not
1562 1561 # trust it.
1563 1562 ladd(fn)
1564 1563 elif (
1565 1564 size >= 0
1566 1565 and (
1567 1566 (size != st.st_size and size != st.st_size & _rangemask)
1568 1567 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1569 1568 )
1570 1569 or fn in copymap
1571 1570 ):
1572 1571 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1573 1572 # issue6456: Size returned may be longer due to
1574 1573 # encryption on EXT-4 fscrypt, undecided.
1575 1574 ladd(fn)
1576 1575 else:
1577 1576 madd(fn)
1578 1577 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1579 1578 # There might be a change in the future if for example the
1580 1579 # internal clock is off, but this is a case where the issues
1581 1580 # the user would face would be a lot worse and there is
1582 1581 # nothing we can really do.
1583 1582 ladd(fn)
1584 1583 elif listclean:
1585 1584 cadd(fn)
1586 1585 status = scmutil.status(
1587 1586 modified, added, removed, deleted, unknown, ignored, clean
1588 1587 )
1589 1588 return (lookup, status, mtime_boundary)
1590 1589
1591 1590 def matches(self, match):
1592 1591 """
1593 1592 return files in the dirstate (in whatever state) filtered by match
1594 1593 """
1595 1594 dmap = self._map
1596 1595 if rustmod is not None:
1597 1596 dmap = self._map._map
1598 1597
1599 1598 if match.always():
1600 1599 return dmap.keys()
1601 1600 files = match.files()
1602 1601 if match.isexact():
1603 1602 # fast path -- filter the other way around, since typically files is
1604 1603 # much smaller than dmap
1605 1604 return [f for f in files if f in dmap]
1606 1605 if match.prefix() and all(fn in dmap for fn in files):
1607 1606 # fast path -- all the values are known to be files, so just return
1608 1607 # that
1609 1608 return list(files)
1610 1609 return [f for f in dmap if match(f)]
1611 1610
1612 1611 def _actualfilename(self, tr):
1613 1612 if tr:
1614 1613 return self._pendingfilename
1615 1614 else:
1616 1615 return self._filename
1617 1616
1618 1617 def all_file_names(self):
1619 1618 """list all filename currently used by this dirstate
1620 1619
1621 1620 This is only used to do `hg rollback` related backup in the transaction
1622 1621 """
1623 1622 if not self._opener.exists(self._filename):
1624 1623 # no data every written to disk yet
1625 1624 return ()
1626 1625 elif self._use_dirstate_v2:
1627 1626 return (
1628 1627 self._filename,
1629 1628 self._map.docket.data_filename(),
1630 1629 )
1631 1630 else:
1632 1631 return (self._filename,)
1633 1632
1634 def data_backup_filename(self, backupname):
1635 if not self._use_dirstate_v2:
1636 return None
1637 return backupname + b'.v2-data'
1638
1639 def _new_backup_data_filename(self, backupname):
1640 """return a filename to backup a data-file or None"""
1641 if not self._use_dirstate_v2:
1642 return None
1643 if self._map.docket.uuid is None:
1644 # not created yet, nothing to backup
1645 return None
1646 data_filename = self._map.docket.data_filename()
1647 return data_filename, self.data_backup_filename(backupname)
1648
1649 def backup_data_file(self, backupname):
1650 if not self._use_dirstate_v2:
1651 return None
1652 docket = docketmod.DirstateDocket.parse(
1653 self._opener.read(backupname),
1654 self._nodeconstants,
1655 )
1656 return self.data_backup_filename(backupname), docket.data_filename()
1657
1658 def savebackup(self, tr, backupname):
1659 '''Save current dirstate into backup file'''
1660 filename = self._actualfilename(tr)
1661 assert backupname != filename
1662
1663 # use '_writedirstate' instead of 'write' to write changes certainly,
1664 # because the latter omits writing out if transaction is running.
1665 # output file will be used to create backup of dirstate at this point.
1666 if self._dirty:
1667 self._writedirstate(
1668 tr,
1669 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1670 )
1671
1672 if tr:
1673 # ensure that subsequent tr.writepending returns True for
1674 # changes written out above, even if dirstate is never
1675 # changed after this
1676 tr.addfilegenerator(
1677 b'dirstate-1-main',
1678 (self._filename,),
1679 lambda f: self._writedirstate(tr, f),
1680 location=b'plain',
1681 post_finalize=True,
1682 )
1683
1684 self._opener.tryunlink(backupname)
1685 if self._opener.exists(filename):
1686 # hardlink backup is okay because _writedirstate is always called
1687 # with an "atomictemp=True" file.
1688 util.copyfile(
1689 self._opener.join(filename),
1690 self._opener.join(backupname),
1691 hardlink=True,
1692 )
1693 data_pair = self._new_backup_data_filename(backupname)
1694 if data_pair is not None:
1695 data_filename, bck_data_filename = data_pair
1696 util.copyfile(
1697 self._opener.join(data_filename),
1698 self._opener.join(bck_data_filename),
1699 hardlink=True,
1700 )
1701 if tr is not None:
1702 # ensure that pending file written above is unlinked at
1703 # failure, even if tr.writepending isn't invoked until the
1704 # end of this transaction
1705 tr.registertmp(bck_data_filename, location=b'plain')
1706
1707 def restorebackup(self, tr, backupname):
1708 '''Restore dirstate by backup file'''
1709 # this "invalidate()" prevents "wlock.release()" from writing
1710 # changes of dirstate out after restoring from backup file
1711 self.invalidate()
1712 o = self._opener
1713 if not o.exists(backupname):
1714 # there was no file backup, delete existing files
1715 filename = self._actualfilename(tr)
1716 data_file = None
1717 if self._use_dirstate_v2 and self._map.docket.uuid is not None:
1718 data_file = self._map.docket.data_filename()
1719 if o.exists(filename):
1720 o.unlink(filename)
1721 if data_file is not None and o.exists(data_file):
1722 o.unlink(data_file)
1723 return
1724 filename = self._actualfilename(tr)
1725 data_pair = self.backup_data_file(backupname)
1726 if o.exists(filename) and util.samefile(
1727 o.join(backupname), o.join(filename)
1728 ):
1729 o.unlink(backupname)
1730 else:
1731 o.rename(backupname, filename, checkambig=True)
1732
1733 if data_pair is not None:
1734 data_backup, target = data_pair
1735 if o.exists(target) and util.samefile(
1736 o.join(data_backup), o.join(target)
1737 ):
1738 o.unlink(data_backup)
1739 else:
1740 o.rename(data_backup, target, checkambig=True)
1741
1742 def clearbackup(self, tr, backupname):
1743 '''Clear backup file'''
1744 o = self._opener
1745 if o.exists(backupname):
1746 data_backup = self.backup_data_file(backupname)
1747 o.unlink(backupname)
1748 if data_backup is not None:
1749 o.unlink(data_backup[0])
1750
1751 1633 def verify(self, m1, m2, p1, narrow_matcher=None):
1752 1634 """
1753 1635 check the dirstate contents against the parent manifest and yield errors
1754 1636 """
1755 1637 missing_from_p1 = _(
1756 1638 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1757 1639 )
1758 1640 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1759 1641 missing_from_ps = _(
1760 1642 b"%s marked as modified, but not in either manifest\n"
1761 1643 )
1762 1644 missing_from_ds = _(
1763 1645 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1764 1646 )
1765 1647 for f, entry in self.items():
1766 1648 if entry.p1_tracked:
1767 1649 if entry.modified and f not in m1 and f not in m2:
1768 1650 yield missing_from_ps % f
1769 1651 elif f not in m1:
1770 1652 yield missing_from_p1 % (f, node.short(p1))
1771 1653 if entry.added and f in m1:
1772 1654 yield unexpected_in_p1 % f
1773 1655 for f in m1:
1774 1656 if narrow_matcher is not None and not narrow_matcher(f):
1775 1657 continue
1776 1658 entry = self.get_entry(f)
1777 1659 if not entry.p1_tracked:
1778 1660 yield missing_from_ds % (f, node.short(p1))
@@ -1,235 +1,226 b''
1 1 import contextlib
2 2
3 3 from . import util as interfaceutil
4 4
5 5
6 6 class idirstate(interfaceutil.Interface):
7 7 def __init__(
8 8 opener,
9 9 ui,
10 10 root,
11 11 validate,
12 12 sparsematchfn,
13 13 nodeconstants,
14 14 use_dirstate_v2,
15 15 use_tracked_hint=False,
16 16 ):
17 17 """Create a new dirstate object.
18 18
19 19 opener is an open()-like callable that can be used to open the
20 20 dirstate file; root is the root of the directory tracked by
21 21 the dirstate.
22 22 """
23 23
24 24 # TODO: all these private methods and attributes should be made
25 25 # public or removed from the interface.
26 26 _ignore = interfaceutil.Attribute("""Matcher for ignored files.""")
27 27 is_changing_any = interfaceutil.Attribute(
28 28 """True if any changes in progress."""
29 29 )
30 30 is_changing_parents = interfaceutil.Attribute(
31 31 """True if parents changes in progress."""
32 32 )
33 33 is_changing_files = interfaceutil.Attribute(
34 34 """True if file tracking changes in progress."""
35 35 )
36 36
37 37 def _ignorefiles():
38 38 """Return a list of files containing patterns to ignore."""
39 39
40 40 def _ignorefileandline(f):
41 41 """Given a file `f`, return the ignore file and line that ignores it."""
42 42
43 43 _checklink = interfaceutil.Attribute("""Callable for checking symlinks.""")
44 44 _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""")
45 45
46 46 @contextlib.contextmanager
47 47 def changing_parents(repo):
48 48 """Context manager for handling dirstate parents.
49 49
50 50 If an exception occurs in the scope of the context manager,
51 51 the incoherent dirstate won't be written when wlock is
52 52 released.
53 53 """
54 54
55 55 @contextlib.contextmanager
56 56 def changing_files(repo):
57 57 """Context manager for handling dirstate files.
58 58
59 59 If an exception occurs in the scope of the context manager,
60 60 the incoherent dirstate won't be written when wlock is
61 61 released.
62 62 """
63 63
64 64 def hasdir(d):
65 65 pass
66 66
67 67 def flagfunc(buildfallback):
68 68 """build a callable that returns flags associated with a filename
69 69
70 70 The information is extracted from three possible layers:
71 71 1. the file system if it supports the information
72 72 2. the "fallback" information stored in the dirstate if any
73 73 3. a more expensive mechanism inferring the flags from the parents.
74 74 """
75 75
76 76 def getcwd():
77 77 """Return the path from which a canonical path is calculated.
78 78
79 79 This path should be used to resolve file patterns or to convert
80 80 canonical paths back to file paths for display. It shouldn't be
81 81 used to get real file paths. Use vfs functions instead.
82 82 """
83 83
84 84 def pathto(f, cwd=None):
85 85 pass
86 86
87 87 def get_entry(path):
88 88 """return a DirstateItem for the associated path"""
89 89
90 90 def __contains__(key):
91 91 """Check if bytestring `key` is known to the dirstate."""
92 92
93 93 def __iter__():
94 94 """Iterate the dirstate's contained filenames as bytestrings."""
95 95
96 96 def items():
97 97 """Iterate the dirstate's entries as (filename, DirstateItem.
98 98
99 99 As usual, filename is a bytestring.
100 100 """
101 101
102 102 iteritems = items
103 103
104 104 def parents():
105 105 pass
106 106
107 107 def p1():
108 108 pass
109 109
110 110 def p2():
111 111 pass
112 112
113 113 def branch():
114 114 pass
115 115
116 116 def setparents(p1, p2=None):
117 117 """Set dirstate parents to p1 and p2.
118 118
119 119 When moving from two parents to one, "merged" entries a
120 120 adjusted to normal and previous copy records discarded and
121 121 returned by the call.
122 122
123 123 See localrepo.setparents()
124 124 """
125 125
126 126 def setbranch(branch):
127 127 pass
128 128
129 129 def invalidate():
130 130 """Causes the next access to reread the dirstate.
131 131
132 132 This is different from localrepo.invalidatedirstate() because it always
133 133 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
134 134 check whether the dirstate has changed before rereading it."""
135 135
136 136 def copy(source, dest):
137 137 """Mark dest as a copy of source. Unmark dest if source is None."""
138 138
139 139 def copied(file):
140 140 pass
141 141
142 142 def copies():
143 143 pass
144 144
145 145 def normalize(path, isknown=False, ignoremissing=False):
146 146 """
147 147 normalize the case of a pathname when on a casefolding filesystem
148 148
149 149 isknown specifies whether the filename came from walking the
150 150 disk, to avoid extra filesystem access.
151 151
152 152 If ignoremissing is True, missing path are returned
153 153 unchanged. Otherwise, we try harder to normalize possibly
154 154 existing path components.
155 155
156 156 The normalized case is determined based on the following precedence:
157 157
158 158 - version of name already stored in the dirstate
159 159 - version of name stored on disk
160 160 - version provided via command arguments
161 161 """
162 162
163 163 def clear():
164 164 pass
165 165
166 166 def rebuild(parent, allfiles, changedfiles=None):
167 167 pass
168 168
169 169 def identity():
170 170 """Return identity of dirstate itself to detect changing in storage
171 171
172 172 If identity of previous dirstate is equal to this, writing
173 173 changes based on the former dirstate out can keep consistency.
174 174 """
175 175
176 176 def write(tr):
177 177 pass
178 178
179 179 def addparentchangecallback(category, callback):
180 180 """add a callback to be called when the wd parents are changed
181 181
182 182 Callback will be called with the following arguments:
183 183 dirstate, (oldp1, oldp2), (newp1, newp2)
184 184
185 185 Category is a unique identifier to allow overwriting an old callback
186 186 with a newer callback.
187 187 """
188 188
189 189 def walk(match, subrepos, unknown, ignored, full=True):
190 190 """
191 191 Walk recursively through the directory tree, finding all files
192 192 matched by match.
193 193
194 194 If full is False, maybe skip some known-clean files.
195 195
196 196 Return a dict mapping filename to stat-like object (either
197 197 mercurial.osutil.stat instance or return value of os.stat()).
198 198
199 199 """
200 200
201 201 def status(match, subrepos, ignored, clean, unknown):
202 202 """Determine the status of the working copy relative to the
203 203 dirstate and return a pair of (unsure, status), where status is of type
204 204 scmutil.status and:
205 205
206 206 unsure:
207 207 files that might have been modified since the dirstate was
208 208 written, but need to be read to be sure (size is the same
209 209 but mtime differs)
210 210 status.modified:
211 211 files that have definitely been modified since the dirstate
212 212 was written (different size or mode)
213 213 status.clean:
214 214 files that have definitely not been modified since the
215 215 dirstate was written
216 216 """
217 217
218 218 def matches(match):
219 219 """
220 220 return files in the dirstate (in whatever state) filtered by match
221 221 """
222 222
223 def savebackup(tr, backupname):
224 '''Save current dirstate into backup file'''
225
226 def restorebackup(tr, backupname):
227 '''Restore dirstate by backup file'''
228
229 def clearbackup(tr, backupname):
230 '''Clear backup file'''
231
232 223 def verify(m1, m2, p1, narrow_matcher=None):
233 224 """
234 225 check the dirstate contents against the parent manifest and yield errors
235 226 """
General Comments 0
You need to be logged in to leave comments. Login now