##// END OF EJS Templates
interfaces: introduce an interface for dirstate implementations...
Augie Fackler -
r43197:d459cd8e default
parent child Browse files
Show More
@@ -0,0 +1,257 b''
1 from __future__ import absolute_import, print_function
2
3 import contextlib
4
5 from .. import (
6 node as nodemod,
7 )
8
9 from . import (
10 util as interfaceutil,
11 )
12
13 class idirstate(interfaceutil.Interface):
14
15 def __init__(opener, ui, root, validate, sparsematchfn):
16 '''Create a new dirstate object.
17
18 opener is an open()-like callable that can be used to open the
19 dirstate file; root is the root of the directory tracked by
20 the dirstate.
21 '''
22
23 @contextlib.contextmanager
24 def parentchange():
25 '''Context manager for handling dirstate parents.
26
27 If an exception occurs in the scope of the context manager,
28 the incoherent dirstate won't be written when wlock is
29 released.
30 '''
31
32 def pendingparentchange():
33 '''Returns true if the dirstate is in the middle of a set of changes
34 that modify the dirstate parent.
35 '''
36
37 _map = interfaceutil.Attribute(
38 """Return the dirstate contents (see documentation for dirstatemap).
39
40 TODO this should not be exposed.
41 """
42 )
43
44 def hasdir(d):
45 pass
46
47 _ignore = interfaceutil.Attribute('Matcher for ignored files.')
48
49 _checklink = interfaceutil.Attribute('Callable for checking symlinks.')
50 _checkexec = interfaceutil.Attribute('Callable for checking exec bits.')
51
52 def flagfunc(buildfallback):
53 pass
54
55 def getcwd():
56 '''Return the path from which a canonical path is calculated.
57
58 This path should be used to resolve file patterns or to convert
59 canonical paths back to file paths for display. It shouldn't be
60 used to get real file paths. Use vfs functions instead.
61 '''
62
63 def pathto(f, cwd=None):
64 pass
65
66 def __getitem__(key):
67 '''Return the current state of key (a filename) in the dirstate.
68
69 States are:
70 n normal
71 m needs merging
72 r marked for removal
73 a marked for addition
74 ? not tracked
75 '''
76
77 def __contains__(key):
78 """Check if bytestring `key` is known to the dirstate."""
79
80 def __iter__():
81 """Iterate the dirstate's contained filenames as bytestrings."""
82
83 def items():
84 """Iterate the dirstate's entries as (filename, dirstatetuple).
85
86 As usual, filename is a bytestring.
87 """
88
89 iteritems = items
90
91 def parents():
92 pass
93
94 def p1():
95 pass
96
97 def p2():
98 pass
99
100 def branch():
101 pass
102
103 def setparents(p1, p2=nodemod.nullid):
104 """Set dirstate parents to p1 and p2.
105
106 When moving from two parents to one, 'm' merged entries a
107 adjusted to normal and previous copy records discarded and
108 returned by the call.
109
110 See localrepo.setparents()
111 """
112
113 def setbranch(branch):
114 pass
115
116 def invalidate():
117 '''Causes the next access to reread the dirstate.
118
119 This is different from localrepo.invalidatedirstate() because it always
120 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
121 check whether the dirstate has changed before rereading it.'''
122
123 def copy(source, dest):
124 """Mark dest as a copy of source. Unmark dest if source is None."""
125
126 def copied(file):
127 pass
128
129 def copies():
130 pass
131
132 def normal(f, parentfiledata=None):
133 '''Mark a file normal and clean.
134
135 parentfiledata: (mode, size, mtime) of the clean file
136
137 parentfiledata should be computed from memory (for mode,
138 size), as or close as possible from the point where we
139 determined the file was clean, to limit the risk of the
140 file having been changed by an external process between the
141 moment where the file was determined to be clean and now.'''
142 pass
143
144 def normallookup(f):
145 '''Mark a file normal, but possibly dirty.'''
146
147 def otherparent(f):
148 '''Mark as coming from the other parent, always dirty.'''
149
150 def add(f):
151 '''Mark a file added.'''
152
153 def remove(f):
154 '''Mark a file removed.'''
155
156 def merge(f):
157 '''Mark a file merged.'''
158
159 def drop(f):
160 '''Drop a file from the dirstate'''
161
162 def normalize(path, isknown=False, ignoremissing=False):
163 '''
164 normalize the case of a pathname when on a casefolding filesystem
165
166 isknown specifies whether the filename came from walking the
167 disk, to avoid extra filesystem access.
168
169 If ignoremissing is True, missing path are returned
170 unchanged. Otherwise, we try harder to normalize possibly
171 existing path components.
172
173 The normalized case is determined based on the following precedence:
174
175 - version of name already stored in the dirstate
176 - version of name stored on disk
177 - version provided via command arguments
178 '''
179
180 def clear():
181 pass
182
183 def rebuild(parent, allfiles, changedfiles=None):
184 pass
185
186 def identity():
187 '''Return identity of dirstate it to detect changing in storage
188
189 If identity of previous dirstate is equal to this, writing
190 changes based on the former dirstate out can keep consistency.
191 '''
192
193 def write(tr):
194 pass
195
196 def addparentchangecallback(category, callback):
197 """add a callback to be called when the wd parents are changed
198
199 Callback will be called with the following arguments:
200 dirstate, (oldp1, oldp2), (newp1, newp2)
201
202 Category is a unique identifier to allow overwriting an old callback
203 with a newer callback.
204 """
205
206 def _ignorefiles():
207 """Return a list of files containing patterns to ignore.
208
209 TODO this should not be exposed."""
210
211 def _ignorefileandline(f):
212 """Given a file `f`, return the ignore file and line that ignores it.
213
214 TODO this should not be exposed."""
215
216 def walk(match, subrepos, unknown, ignored, full=True):
217 '''
218 Walk recursively through the directory tree, finding all files
219 matched by match.
220
221 If full is False, maybe skip some known-clean files.
222
223 Return a dict mapping filename to stat-like object (either
224 mercurial.osutil.stat instance or return value of os.stat()).
225
226 '''
227
228 def status(match, subrepos, ignored, clean, unknown):
229 '''Determine the status of the working copy relative to the
230 dirstate and return a pair of (unsure, status), where status is of type
231 scmutil.status and:
232
233 unsure:
234 files that might have been modified since the dirstate was
235 written, but need to be read to be sure (size is the same
236 but mtime differs)
237 status.modified:
238 files that have definitely been modified since the dirstate
239 was written (different size or mode)
240 status.clean:
241 files that have definitely not been modified since the
242 dirstate was written
243 '''
244
245 def matches(match):
246 '''
247 return files in the dirstate (in whatever state) filtered by match
248 '''
249
250 def savebackup(tr, backupname):
251 '''Save current dirstate into backup file'''
252
253 def restorebackup(tr, backupname):
254 '''Restore dirstate by backup file'''
255
256 def clearbackup(tr, backupname):
257 '''Clear backup file'''
@@ -1,1708 +1,1714 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 from .interfaces import (
31 dirstate as intdirstate,
32 util as interfaceutil,
33 )
34
30 35 parsers = policy.importmod(r'parsers')
31 36 rustmod = policy.importrust(r'dirstate')
32 37
33 38 propertycache = util.propertycache
34 39 filecache = scmutil.filecache
35 40 _rangemask = 0x7fffffff
36 41
37 42 dirstatetuple = parsers.dirstatetuple
38 43
39 44 class repocache(filecache):
40 45 """filecache for files in .hg/"""
41 46 def join(self, obj, fname):
42 47 return obj._opener.join(fname)
43 48
44 49 class rootcache(filecache):
45 50 """filecache for files in the repository root"""
46 51 def join(self, obj, fname):
47 52 return obj._join(fname)
48 53
49 54 def _getfsnow(vfs):
50 55 '''Get "now" timestamp on filesystem'''
51 56 tmpfd, tmpname = vfs.mkstemp()
52 57 try:
53 58 return os.fstat(tmpfd)[stat.ST_MTIME]
54 59 finally:
55 60 os.close(tmpfd)
56 61 vfs.unlink(tmpname)
57 62
63 @interfaceutil.implementer(intdirstate.idirstate)
58 64 class dirstate(object):
59 65
60 66 def __init__(self, opener, ui, root, validate, sparsematchfn):
61 67 '''Create a new dirstate object.
62 68
63 69 opener is an open()-like callable that can be used to open the
64 70 dirstate file; root is the root of the directory tracked by
65 71 the dirstate.
66 72 '''
67 73 self._opener = opener
68 74 self._validate = validate
69 75 self._root = root
70 76 self._sparsematchfn = sparsematchfn
71 77 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
72 78 # UNC path pointing to root share (issue4557)
73 79 self._rootdir = pathutil.normasprefix(root)
74 80 self._dirty = False
75 81 self._lastnormaltime = 0
76 82 self._ui = ui
77 83 self._filecache = {}
78 84 self._parentwriters = 0
79 85 self._filename = 'dirstate'
80 86 self._pendingfilename = '%s.pending' % self._filename
81 87 self._plchangecallbacks = {}
82 88 self._origpl = None
83 89 self._updatedfiles = set()
84 90 self._mapcls = dirstatemap
85 91 # Access and cache cwd early, so we don't access it for the first time
86 92 # after a working-copy update caused it to not exist (accessing it then
87 93 # raises an exception).
88 94 self._cwd
89 95
90 96 @contextlib.contextmanager
91 97 def parentchange(self):
92 98 '''Context manager for handling dirstate parents.
93 99
94 100 If an exception occurs in the scope of the context manager,
95 101 the incoherent dirstate won't be written when wlock is
96 102 released.
97 103 '''
98 104 self._parentwriters += 1
99 105 yield
100 106 # Typically we want the "undo" step of a context manager in a
101 107 # finally block so it happens even when an exception
102 108 # occurs. In this case, however, we only want to decrement
103 109 # parentwriters if the code in the with statement exits
104 110 # normally, so we don't have a try/finally here on purpose.
105 111 self._parentwriters -= 1
106 112
107 113 def pendingparentchange(self):
108 114 '''Returns true if the dirstate is in the middle of a set of changes
109 115 that modify the dirstate parent.
110 116 '''
111 117 return self._parentwriters > 0
112 118
113 119 @propertycache
114 120 def _map(self):
115 121 """Return the dirstate contents (see documentation for dirstatemap)."""
116 122 self._map = self._mapcls(self._ui, self._opener, self._root)
117 123 return self._map
118 124
119 125 @property
120 126 def _sparsematcher(self):
121 127 """The matcher for the sparse checkout.
122 128
123 129 The working directory may not include every file from a manifest. The
124 130 matcher obtained by this property will match a path if it is to be
125 131 included in the working directory.
126 132 """
127 133 # TODO there is potential to cache this property. For now, the matcher
128 134 # is resolved on every access. (But the called function does use a
129 135 # cache to keep the lookup fast.)
130 136 return self._sparsematchfn()
131 137
132 138 @repocache('branch')
133 139 def _branch(self):
134 140 try:
135 141 return self._opener.read("branch").strip() or "default"
136 142 except IOError as inst:
137 143 if inst.errno != errno.ENOENT:
138 144 raise
139 145 return "default"
140 146
141 147 @property
142 148 def _pl(self):
143 149 return self._map.parents()
144 150
145 151 def hasdir(self, d):
146 152 return self._map.hastrackeddir(d)
147 153
148 154 @rootcache('.hgignore')
149 155 def _ignore(self):
150 156 files = self._ignorefiles()
151 157 if not files:
152 158 return matchmod.never()
153 159
154 160 pats = ['include:%s' % f for f in files]
155 161 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
156 162
157 163 @propertycache
158 164 def _slash(self):
159 165 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
160 166
161 167 @propertycache
162 168 def _checklink(self):
163 169 return util.checklink(self._root)
164 170
165 171 @propertycache
166 172 def _checkexec(self):
167 173 return util.checkexec(self._root)
168 174
169 175 @propertycache
170 176 def _checkcase(self):
171 177 return not util.fscasesensitive(self._join('.hg'))
172 178
173 179 def _join(self, f):
174 180 # much faster than os.path.join()
175 181 # it's safe because f is always a relative path
176 182 return self._rootdir + f
177 183
178 184 def flagfunc(self, buildfallback):
179 185 if self._checklink and self._checkexec:
180 186 def f(x):
181 187 try:
182 188 st = os.lstat(self._join(x))
183 189 if util.statislink(st):
184 190 return 'l'
185 191 if util.statisexec(st):
186 192 return 'x'
187 193 except OSError:
188 194 pass
189 195 return ''
190 196 return f
191 197
192 198 fallback = buildfallback()
193 199 if self._checklink:
194 200 def f(x):
195 201 if os.path.islink(self._join(x)):
196 202 return 'l'
197 203 if 'x' in fallback(x):
198 204 return 'x'
199 205 return ''
200 206 return f
201 207 if self._checkexec:
202 208 def f(x):
203 209 if 'l' in fallback(x):
204 210 return 'l'
205 211 if util.isexec(self._join(x)):
206 212 return 'x'
207 213 return ''
208 214 return f
209 215 else:
210 216 return fallback
211 217
212 218 @propertycache
213 219 def _cwd(self):
214 220 # internal config: ui.forcecwd
215 221 forcecwd = self._ui.config('ui', 'forcecwd')
216 222 if forcecwd:
217 223 return forcecwd
218 224 return encoding.getcwd()
219 225
220 226 def getcwd(self):
221 227 '''Return the path from which a canonical path is calculated.
222 228
223 229 This path should be used to resolve file patterns or to convert
224 230 canonical paths back to file paths for display. It shouldn't be
225 231 used to get real file paths. Use vfs functions instead.
226 232 '''
227 233 cwd = self._cwd
228 234 if cwd == self._root:
229 235 return ''
230 236 # self._root ends with a path separator if self._root is '/' or 'C:\'
231 237 rootsep = self._root
232 238 if not util.endswithsep(rootsep):
233 239 rootsep += pycompat.ossep
234 240 if cwd.startswith(rootsep):
235 241 return cwd[len(rootsep):]
236 242 else:
237 243 # we're outside the repo. return an absolute path.
238 244 return cwd
239 245
240 246 def pathto(self, f, cwd=None):
241 247 if cwd is None:
242 248 cwd = self.getcwd()
243 249 path = util.pathto(self._root, cwd, f)
244 250 if self._slash:
245 251 return util.pconvert(path)
246 252 return path
247 253
248 254 def __getitem__(self, key):
249 255 '''Return the current state of key (a filename) in the dirstate.
250 256
251 257 States are:
252 258 n normal
253 259 m needs merging
254 260 r marked for removal
255 261 a marked for addition
256 262 ? not tracked
257 263 '''
258 264 return self._map.get(key, ("?",))[0]
259 265
260 266 def __contains__(self, key):
261 267 return key in self._map
262 268
263 269 def __iter__(self):
264 270 return iter(sorted(self._map))
265 271
266 272 def items(self):
267 273 return self._map.iteritems()
268 274
269 275 iteritems = items
270 276
271 277 def parents(self):
272 278 return [self._validate(p) for p in self._pl]
273 279
274 280 def p1(self):
275 281 return self._validate(self._pl[0])
276 282
277 283 def p2(self):
278 284 return self._validate(self._pl[1])
279 285
280 286 def branch(self):
281 287 return encoding.tolocal(self._branch)
282 288
283 289 def setparents(self, p1, p2=nullid):
284 290 """Set dirstate parents to p1 and p2.
285 291
286 292 When moving from two parents to one, 'm' merged entries a
287 293 adjusted to normal and previous copy records discarded and
288 294 returned by the call.
289 295
290 296 See localrepo.setparents()
291 297 """
292 298 if self._parentwriters == 0:
293 299 raise ValueError("cannot set dirstate parent outside of "
294 300 "dirstate.parentchange context manager")
295 301
296 302 self._dirty = True
297 303 oldp2 = self._pl[1]
298 304 if self._origpl is None:
299 305 self._origpl = self._pl
300 306 self._map.setparents(p1, p2)
301 307 copies = {}
302 308 if oldp2 != nullid and p2 == nullid:
303 309 candidatefiles = self._map.nonnormalset.union(
304 310 self._map.otherparentset)
305 311 for f in candidatefiles:
306 312 s = self._map.get(f)
307 313 if s is None:
308 314 continue
309 315
310 316 # Discard 'm' markers when moving away from a merge state
311 317 if s[0] == 'm':
312 318 source = self._map.copymap.get(f)
313 319 if source:
314 320 copies[f] = source
315 321 self.normallookup(f)
316 322 # Also fix up otherparent markers
317 323 elif s[0] == 'n' and s[2] == -2:
318 324 source = self._map.copymap.get(f)
319 325 if source:
320 326 copies[f] = source
321 327 self.add(f)
322 328 return copies
323 329
324 330 def setbranch(self, branch):
325 331 self.__class__._branch.set(self, encoding.fromlocal(branch))
326 332 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
327 333 try:
328 334 f.write(self._branch + '\n')
329 335 f.close()
330 336
331 337 # make sure filecache has the correct stat info for _branch after
332 338 # replacing the underlying file
333 339 ce = self._filecache['_branch']
334 340 if ce:
335 341 ce.refresh()
336 342 except: # re-raises
337 343 f.discard()
338 344 raise
339 345
340 346 def invalidate(self):
341 347 '''Causes the next access to reread the dirstate.
342 348
343 349 This is different from localrepo.invalidatedirstate() because it always
344 350 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
345 351 check whether the dirstate has changed before rereading it.'''
346 352
347 353 for a in (r"_map", r"_branch", r"_ignore"):
348 354 if a in self.__dict__:
349 355 delattr(self, a)
350 356 self._lastnormaltime = 0
351 357 self._dirty = False
352 358 self._updatedfiles.clear()
353 359 self._parentwriters = 0
354 360 self._origpl = None
355 361
356 362 def copy(self, source, dest):
357 363 """Mark dest as a copy of source. Unmark dest if source is None."""
358 364 if source == dest:
359 365 return
360 366 self._dirty = True
361 367 if source is not None:
362 368 self._map.copymap[dest] = source
363 369 self._updatedfiles.add(source)
364 370 self._updatedfiles.add(dest)
365 371 elif self._map.copymap.pop(dest, None):
366 372 self._updatedfiles.add(dest)
367 373
368 374 def copied(self, file):
369 375 return self._map.copymap.get(file, None)
370 376
371 377 def copies(self):
372 378 return self._map.copymap
373 379
374 380 def _addpath(self, f, state, mode, size, mtime):
375 381 oldstate = self[f]
376 382 if state == 'a' or oldstate == 'r':
377 383 scmutil.checkfilename(f)
378 384 if self._map.hastrackeddir(f):
379 385 raise error.Abort(_('directory %r already in dirstate') %
380 386 pycompat.bytestr(f))
381 387 # shadows
382 388 for d in util.finddirs(f):
383 389 if self._map.hastrackeddir(d):
384 390 break
385 391 entry = self._map.get(d)
386 392 if entry is not None and entry[0] != 'r':
387 393 raise error.Abort(
388 394 _('file %r in dirstate clashes with %r') %
389 395 (pycompat.bytestr(d), pycompat.bytestr(f)))
390 396 self._dirty = True
391 397 self._updatedfiles.add(f)
392 398 self._map.addfile(f, oldstate, state, mode, size, mtime)
393 399
394 400 def normal(self, f, parentfiledata=None):
395 401 '''Mark a file normal and clean.
396 402
397 403 parentfiledata: (mode, size, mtime) of the clean file
398 404
399 405 parentfiledata should be computed from memory (for mode,
400 406 size), as or close as possible from the point where we
401 407 determined the file was clean, to limit the risk of the
402 408 file having been changed by an external process between the
403 409 moment where the file was determined to be clean and now.'''
404 410 if parentfiledata:
405 411 (mode, size, mtime) = parentfiledata
406 412 else:
407 413 s = os.lstat(self._join(f))
408 414 mode = s.st_mode
409 415 size = s.st_size
410 416 mtime = s[stat.ST_MTIME]
411 417 self._addpath(f, 'n', mode, size & _rangemask, mtime & _rangemask)
412 418 self._map.copymap.pop(f, None)
413 419 if f in self._map.nonnormalset:
414 420 self._map.nonnormalset.remove(f)
415 421 if mtime > self._lastnormaltime:
416 422 # Remember the most recent modification timeslot for status(),
417 423 # to make sure we won't miss future size-preserving file content
418 424 # modifications that happen within the same timeslot.
419 425 self._lastnormaltime = mtime
420 426
421 427 def normallookup(self, f):
422 428 '''Mark a file normal, but possibly dirty.'''
423 429 if self._pl[1] != nullid:
424 430 # if there is a merge going on and the file was either
425 431 # in state 'm' (-1) or coming from other parent (-2) before
426 432 # being removed, restore that state.
427 433 entry = self._map.get(f)
428 434 if entry is not None:
429 435 if entry[0] == 'r' and entry[2] in (-1, -2):
430 436 source = self._map.copymap.get(f)
431 437 if entry[2] == -1:
432 438 self.merge(f)
433 439 elif entry[2] == -2:
434 440 self.otherparent(f)
435 441 if source:
436 442 self.copy(source, f)
437 443 return
438 444 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
439 445 return
440 446 self._addpath(f, 'n', 0, -1, -1)
441 447 self._map.copymap.pop(f, None)
442 448
443 449 def otherparent(self, f):
444 450 '''Mark as coming from the other parent, always dirty.'''
445 451 if self._pl[1] == nullid:
446 452 raise error.Abort(_("setting %r to other parent "
447 453 "only allowed in merges") % f)
448 454 if f in self and self[f] == 'n':
449 455 # merge-like
450 456 self._addpath(f, 'm', 0, -2, -1)
451 457 else:
452 458 # add-like
453 459 self._addpath(f, 'n', 0, -2, -1)
454 460 self._map.copymap.pop(f, None)
455 461
456 462 def add(self, f):
457 463 '''Mark a file added.'''
458 464 self._addpath(f, 'a', 0, -1, -1)
459 465 self._map.copymap.pop(f, None)
460 466
461 467 def remove(self, f):
462 468 '''Mark a file removed.'''
463 469 self._dirty = True
464 470 oldstate = self[f]
465 471 size = 0
466 472 if self._pl[1] != nullid:
467 473 entry = self._map.get(f)
468 474 if entry is not None:
469 475 # backup the previous state
470 476 if entry[0] == 'm': # merge
471 477 size = -1
472 478 elif entry[0] == 'n' and entry[2] == -2: # other parent
473 479 size = -2
474 480 self._map.otherparentset.add(f)
475 481 self._updatedfiles.add(f)
476 482 self._map.removefile(f, oldstate, size)
477 483 if size == 0:
478 484 self._map.copymap.pop(f, None)
479 485
480 486 def merge(self, f):
481 487 '''Mark a file merged.'''
482 488 if self._pl[1] == nullid:
483 489 return self.normallookup(f)
484 490 return self.otherparent(f)
485 491
486 492 def drop(self, f):
487 493 '''Drop a file from the dirstate'''
488 494 oldstate = self[f]
489 495 if self._map.dropfile(f, oldstate):
490 496 self._dirty = True
491 497 self._updatedfiles.add(f)
492 498 self._map.copymap.pop(f, None)
493 499
494 500 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
495 501 if exists is None:
496 502 exists = os.path.lexists(os.path.join(self._root, path))
497 503 if not exists:
498 504 # Maybe a path component exists
499 505 if not ignoremissing and '/' in path:
500 506 d, f = path.rsplit('/', 1)
501 507 d = self._normalize(d, False, ignoremissing, None)
502 508 folded = d + "/" + f
503 509 else:
504 510 # No path components, preserve original case
505 511 folded = path
506 512 else:
507 513 # recursively normalize leading directory components
508 514 # against dirstate
509 515 if '/' in normed:
510 516 d, f = normed.rsplit('/', 1)
511 517 d = self._normalize(d, False, ignoremissing, True)
512 518 r = self._root + "/" + d
513 519 folded = d + "/" + util.fspath(f, r)
514 520 else:
515 521 folded = util.fspath(normed, self._root)
516 522 storemap[normed] = folded
517 523
518 524 return folded
519 525
520 526 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
521 527 normed = util.normcase(path)
522 528 folded = self._map.filefoldmap.get(normed, None)
523 529 if folded is None:
524 530 if isknown:
525 531 folded = path
526 532 else:
527 533 folded = self._discoverpath(path, normed, ignoremissing, exists,
528 534 self._map.filefoldmap)
529 535 return folded
530 536
531 537 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
532 538 normed = util.normcase(path)
533 539 folded = self._map.filefoldmap.get(normed, None)
534 540 if folded is None:
535 541 folded = self._map.dirfoldmap.get(normed, None)
536 542 if folded is None:
537 543 if isknown:
538 544 folded = path
539 545 else:
540 546 # store discovered result in dirfoldmap so that future
541 547 # normalizefile calls don't start matching directories
542 548 folded = self._discoverpath(path, normed, ignoremissing, exists,
543 549 self._map.dirfoldmap)
544 550 return folded
545 551
546 552 def normalize(self, path, isknown=False, ignoremissing=False):
547 553 '''
548 554 normalize the case of a pathname when on a casefolding filesystem
549 555
550 556 isknown specifies whether the filename came from walking the
551 557 disk, to avoid extra filesystem access.
552 558
553 559 If ignoremissing is True, missing path are returned
554 560 unchanged. Otherwise, we try harder to normalize possibly
555 561 existing path components.
556 562
557 563 The normalized case is determined based on the following precedence:
558 564
559 565 - version of name already stored in the dirstate
560 566 - version of name stored on disk
561 567 - version provided via command arguments
562 568 '''
563 569
564 570 if self._checkcase:
565 571 return self._normalize(path, isknown, ignoremissing)
566 572 return path
567 573
568 574 def clear(self):
569 575 self._map.clear()
570 576 self._lastnormaltime = 0
571 577 self._updatedfiles.clear()
572 578 self._dirty = True
573 579
574 580 def rebuild(self, parent, allfiles, changedfiles=None):
575 581 if changedfiles is None:
576 582 # Rebuild entire dirstate
577 583 changedfiles = allfiles
578 584 lastnormaltime = self._lastnormaltime
579 585 self.clear()
580 586 self._lastnormaltime = lastnormaltime
581 587
582 588 if self._origpl is None:
583 589 self._origpl = self._pl
584 590 self._map.setparents(parent, nullid)
585 591 for f in changedfiles:
586 592 if f in allfiles:
587 593 self.normallookup(f)
588 594 else:
589 595 self.drop(f)
590 596
591 597 self._dirty = True
592 598
593 599 def identity(self):
594 600 '''Return identity of dirstate itself to detect changing in storage
595 601
596 602 If identity of previous dirstate is equal to this, writing
597 603 changes based on the former dirstate out can keep consistency.
598 604 '''
599 605 return self._map.identity
600 606
601 607 def write(self, tr):
602 608 if not self._dirty:
603 609 return
604 610
605 611 filename = self._filename
606 612 if tr:
607 613 # 'dirstate.write()' is not only for writing in-memory
608 614 # changes out, but also for dropping ambiguous timestamp.
609 615 # delayed writing re-raise "ambiguous timestamp issue".
610 616 # See also the wiki page below for detail:
611 617 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
612 618
613 619 # emulate dropping timestamp in 'parsers.pack_dirstate'
614 620 now = _getfsnow(self._opener)
615 621 self._map.clearambiguoustimes(self._updatedfiles, now)
616 622
617 623 # emulate that all 'dirstate.normal' results are written out
618 624 self._lastnormaltime = 0
619 625 self._updatedfiles.clear()
620 626
621 627 # delay writing in-memory changes out
622 628 tr.addfilegenerator('dirstate', (self._filename,),
623 629 self._writedirstate, location='plain')
624 630 return
625 631
626 632 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
627 633 self._writedirstate(st)
628 634
629 635 def addparentchangecallback(self, category, callback):
630 636 """add a callback to be called when the wd parents are changed
631 637
632 638 Callback will be called with the following arguments:
633 639 dirstate, (oldp1, oldp2), (newp1, newp2)
634 640
635 641 Category is a unique identifier to allow overwriting an old callback
636 642 with a newer callback.
637 643 """
638 644 self._plchangecallbacks[category] = callback
639 645
640 646 def _writedirstate(self, st):
641 647 # notify callbacks about parents change
642 648 if self._origpl is not None and self._origpl != self._pl:
643 649 for c, callback in sorted(self._plchangecallbacks.iteritems()):
644 650 callback(self, self._origpl, self._pl)
645 651 self._origpl = None
646 652 # use the modification time of the newly created temporary file as the
647 653 # filesystem's notion of 'now'
648 654 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
649 655
650 656 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
651 657 # timestamp of each entries in dirstate, because of 'now > mtime'
652 658 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite')
653 659 if delaywrite > 0:
654 660 # do we have any files to delay for?
655 661 items = self._map.iteritems()
656 662 for f, e in items:
657 663 if e[0] == 'n' and e[3] == now:
658 664 import time # to avoid useless import
659 665 # rather than sleep n seconds, sleep until the next
660 666 # multiple of n seconds
661 667 clock = time.time()
662 668 start = int(clock) - (int(clock) % delaywrite)
663 669 end = start + delaywrite
664 670 time.sleep(end - clock)
665 671 now = end # trust our estimate that the end is near now
666 672 break
667 673 # since the iterator is potentially not deleted,
668 674 # delete the iterator to release the reference for the Rust
669 675 # implementation.
670 676 # TODO make the Rust implementation behave like Python
671 677 # since this would not work with a non ref-counting GC.
672 678 del items
673 679
674 680 self._map.write(st, now)
675 681 self._lastnormaltime = 0
676 682 self._dirty = False
677 683
678 684 def _dirignore(self, f):
679 685 if self._ignore(f):
680 686 return True
681 687 for p in util.finddirs(f):
682 688 if self._ignore(p):
683 689 return True
684 690 return False
685 691
686 692 def _ignorefiles(self):
687 693 files = []
688 694 if os.path.exists(self._join('.hgignore')):
689 695 files.append(self._join('.hgignore'))
690 696 for name, path in self._ui.configitems("ui"):
691 697 if name == 'ignore' or name.startswith('ignore.'):
692 698 # we need to use os.path.join here rather than self._join
693 699 # because path is arbitrary and user-specified
694 700 files.append(os.path.join(self._rootdir, util.expandpath(path)))
695 701 return files
696 702
697 703 def _ignorefileandline(self, f):
698 704 files = collections.deque(self._ignorefiles())
699 705 visited = set()
700 706 while files:
701 707 i = files.popleft()
702 708 patterns = matchmod.readpatternfile(i, self._ui.warn,
703 709 sourceinfo=True)
704 710 for pattern, lineno, line in patterns:
705 711 kind, p = matchmod._patsplit(pattern, 'glob')
706 712 if kind == "subinclude":
707 713 if p not in visited:
708 714 files.append(p)
709 715 continue
710 716 m = matchmod.match(self._root, '', [], [pattern],
711 717 warn=self._ui.warn)
712 718 if m(f):
713 719 return (i, lineno, line)
714 720 visited.add(i)
715 721 return (None, -1, "")
716 722
717 723 def _walkexplicit(self, match, subrepos):
718 724 '''Get stat data about the files explicitly specified by match.
719 725
720 726 Return a triple (results, dirsfound, dirsnotfound).
721 727 - results is a mapping from filename to stat result. It also contains
722 728 listings mapping subrepos and .hg to None.
723 729 - dirsfound is a list of files found to be directories.
724 730 - dirsnotfound is a list of files that the dirstate thinks are
725 731 directories and that were not found.'''
726 732
727 733 def badtype(mode):
728 734 kind = _('unknown')
729 735 if stat.S_ISCHR(mode):
730 736 kind = _('character device')
731 737 elif stat.S_ISBLK(mode):
732 738 kind = _('block device')
733 739 elif stat.S_ISFIFO(mode):
734 740 kind = _('fifo')
735 741 elif stat.S_ISSOCK(mode):
736 742 kind = _('socket')
737 743 elif stat.S_ISDIR(mode):
738 744 kind = _('directory')
739 745 return _('unsupported file type (type is %s)') % kind
740 746
741 747 matchedir = match.explicitdir
742 748 badfn = match.bad
743 749 dmap = self._map
744 750 lstat = os.lstat
745 751 getkind = stat.S_IFMT
746 752 dirkind = stat.S_IFDIR
747 753 regkind = stat.S_IFREG
748 754 lnkkind = stat.S_IFLNK
749 755 join = self._join
750 756 dirsfound = []
751 757 foundadd = dirsfound.append
752 758 dirsnotfound = []
753 759 notfoundadd = dirsnotfound.append
754 760
755 761 if not match.isexact() and self._checkcase:
756 762 normalize = self._normalize
757 763 else:
758 764 normalize = None
759 765
760 766 files = sorted(match.files())
761 767 subrepos.sort()
762 768 i, j = 0, 0
763 769 while i < len(files) and j < len(subrepos):
764 770 subpath = subrepos[j] + "/"
765 771 if files[i] < subpath:
766 772 i += 1
767 773 continue
768 774 while i < len(files) and files[i].startswith(subpath):
769 775 del files[i]
770 776 j += 1
771 777
772 778 if not files or '' in files:
773 779 files = ['']
774 780 # constructing the foldmap is expensive, so don't do it for the
775 781 # common case where files is ['']
776 782 normalize = None
777 783 results = dict.fromkeys(subrepos)
778 784 results['.hg'] = None
779 785
780 786 for ff in files:
781 787 if normalize:
782 788 nf = normalize(ff, False, True)
783 789 else:
784 790 nf = ff
785 791 if nf in results:
786 792 continue
787 793
788 794 try:
789 795 st = lstat(join(nf))
790 796 kind = getkind(st.st_mode)
791 797 if kind == dirkind:
792 798 if nf in dmap:
793 799 # file replaced by dir on disk but still in dirstate
794 800 results[nf] = None
795 801 if matchedir:
796 802 matchedir(nf)
797 803 foundadd((nf, ff))
798 804 elif kind == regkind or kind == lnkkind:
799 805 results[nf] = st
800 806 else:
801 807 badfn(ff, badtype(kind))
802 808 if nf in dmap:
803 809 results[nf] = None
804 810 except OSError as inst: # nf not found on disk - it is dirstate only
805 811 if nf in dmap: # does it exactly match a missing file?
806 812 results[nf] = None
807 813 else: # does it match a missing directory?
808 814 if self._map.hasdir(nf):
809 815 if matchedir:
810 816 matchedir(nf)
811 817 notfoundadd(nf)
812 818 else:
813 819 badfn(ff, encoding.strtolocal(inst.strerror))
814 820
815 821 # match.files() may contain explicitly-specified paths that shouldn't
816 822 # be taken; drop them from the list of files found. dirsfound/notfound
817 823 # aren't filtered here because they will be tested later.
818 824 if match.anypats():
819 825 for f in list(results):
820 826 if f == '.hg' or f in subrepos:
821 827 # keep sentinel to disable further out-of-repo walks
822 828 continue
823 829 if not match(f):
824 830 del results[f]
825 831
826 832 # Case insensitive filesystems cannot rely on lstat() failing to detect
827 833 # a case-only rename. Prune the stat object for any file that does not
828 834 # match the case in the filesystem, if there are multiple files that
829 835 # normalize to the same path.
830 836 if match.isexact() and self._checkcase:
831 837 normed = {}
832 838
833 839 for f, st in results.iteritems():
834 840 if st is None:
835 841 continue
836 842
837 843 nc = util.normcase(f)
838 844 paths = normed.get(nc)
839 845
840 846 if paths is None:
841 847 paths = set()
842 848 normed[nc] = paths
843 849
844 850 paths.add(f)
845 851
846 852 for norm, paths in normed.iteritems():
847 853 if len(paths) > 1:
848 854 for path in paths:
849 855 folded = self._discoverpath(path, norm, True, None,
850 856 self._map.dirfoldmap)
851 857 if path != folded:
852 858 results[path] = None
853 859
854 860 return results, dirsfound, dirsnotfound
855 861
856 862 def walk(self, match, subrepos, unknown, ignored, full=True):
857 863 '''
858 864 Walk recursively through the directory tree, finding all files
859 865 matched by match.
860 866
861 867 If full is False, maybe skip some known-clean files.
862 868
863 869 Return a dict mapping filename to stat-like object (either
864 870 mercurial.osutil.stat instance or return value of os.stat()).
865 871
866 872 '''
867 873 # full is a flag that extensions that hook into walk can use -- this
868 874 # implementation doesn't use it at all. This satisfies the contract
869 875 # because we only guarantee a "maybe".
870 876
871 877 if ignored:
872 878 ignore = util.never
873 879 dirignore = util.never
874 880 elif unknown:
875 881 ignore = self._ignore
876 882 dirignore = self._dirignore
877 883 else:
878 884 # if not unknown and not ignored, drop dir recursion and step 2
879 885 ignore = util.always
880 886 dirignore = util.always
881 887
882 888 matchfn = match.matchfn
883 889 matchalways = match.always()
884 890 matchtdir = match.traversedir
885 891 dmap = self._map
886 892 listdir = util.listdir
887 893 lstat = os.lstat
888 894 dirkind = stat.S_IFDIR
889 895 regkind = stat.S_IFREG
890 896 lnkkind = stat.S_IFLNK
891 897 join = self._join
892 898
893 899 exact = skipstep3 = False
894 900 if match.isexact(): # match.exact
895 901 exact = True
896 902 dirignore = util.always # skip step 2
897 903 elif match.prefix(): # match.match, no patterns
898 904 skipstep3 = True
899 905
900 906 if not exact and self._checkcase:
901 907 normalize = self._normalize
902 908 normalizefile = self._normalizefile
903 909 skipstep3 = False
904 910 else:
905 911 normalize = self._normalize
906 912 normalizefile = None
907 913
908 914 # step 1: find all explicit files
909 915 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
910 916
911 917 skipstep3 = skipstep3 and not (work or dirsnotfound)
912 918 work = [d for d in work if not dirignore(d[0])]
913 919
914 920 # step 2: visit subdirectories
915 921 def traverse(work, alreadynormed):
916 922 wadd = work.append
917 923 while work:
918 924 nd = work.pop()
919 925 visitentries = match.visitchildrenset(nd)
920 926 if not visitentries:
921 927 continue
922 928 if visitentries == 'this' or visitentries == 'all':
923 929 visitentries = None
924 930 skip = None
925 931 if nd != '':
926 932 skip = '.hg'
927 933 try:
928 934 entries = listdir(join(nd), stat=True, skip=skip)
929 935 except OSError as inst:
930 936 if inst.errno in (errno.EACCES, errno.ENOENT):
931 937 match.bad(self.pathto(nd),
932 938 encoding.strtolocal(inst.strerror))
933 939 continue
934 940 raise
935 941 for f, kind, st in entries:
936 942 # Some matchers may return files in the visitentries set,
937 943 # instead of 'this', if the matcher explicitly mentions them
938 944 # and is not an exactmatcher. This is acceptable; we do not
939 945 # make any hard assumptions about file-or-directory below
940 946 # based on the presence of `f` in visitentries. If
941 947 # visitchildrenset returned a set, we can always skip the
942 948 # entries *not* in the set it provided regardless of whether
943 949 # they're actually a file or a directory.
944 950 if visitentries and f not in visitentries:
945 951 continue
946 952 if normalizefile:
947 953 # even though f might be a directory, we're only
948 954 # interested in comparing it to files currently in the
949 955 # dmap -- therefore normalizefile is enough
950 956 nf = normalizefile(nd and (nd + "/" + f) or f, True,
951 957 True)
952 958 else:
953 959 nf = nd and (nd + "/" + f) or f
954 960 if nf not in results:
955 961 if kind == dirkind:
956 962 if not ignore(nf):
957 963 if matchtdir:
958 964 matchtdir(nf)
959 965 wadd(nf)
960 966 if nf in dmap and (matchalways or matchfn(nf)):
961 967 results[nf] = None
962 968 elif kind == regkind or kind == lnkkind:
963 969 if nf in dmap:
964 970 if matchalways or matchfn(nf):
965 971 results[nf] = st
966 972 elif ((matchalways or matchfn(nf))
967 973 and not ignore(nf)):
968 974 # unknown file -- normalize if necessary
969 975 if not alreadynormed:
970 976 nf = normalize(nf, False, True)
971 977 results[nf] = st
972 978 elif nf in dmap and (matchalways or matchfn(nf)):
973 979 results[nf] = None
974 980
975 981 for nd, d in work:
976 982 # alreadynormed means that processwork doesn't have to do any
977 983 # expensive directory normalization
978 984 alreadynormed = not normalize or nd == d
979 985 traverse([d], alreadynormed)
980 986
981 987 for s in subrepos:
982 988 del results[s]
983 989 del results['.hg']
984 990
985 991 # step 3: visit remaining files from dmap
986 992 if not skipstep3 and not exact:
987 993 # If a dmap file is not in results yet, it was either
988 994 # a) not matching matchfn b) ignored, c) missing, or d) under a
989 995 # symlink directory.
990 996 if not results and matchalways:
991 997 visit = [f for f in dmap]
992 998 else:
993 999 visit = [f for f in dmap if f not in results and matchfn(f)]
994 1000 visit.sort()
995 1001
996 1002 if unknown:
997 1003 # unknown == True means we walked all dirs under the roots
998 1004 # that wasn't ignored, and everything that matched was stat'ed
999 1005 # and is already in results.
1000 1006 # The rest must thus be ignored or under a symlink.
1001 1007 audit_path = pathutil.pathauditor(self._root, cached=True)
1002 1008
1003 1009 for nf in iter(visit):
1004 1010 # If a stat for the same file was already added with a
1005 1011 # different case, don't add one for this, since that would
1006 1012 # make it appear as if the file exists under both names
1007 1013 # on disk.
1008 1014 if (normalizefile and
1009 1015 normalizefile(nf, True, True) in results):
1010 1016 results[nf] = None
1011 1017 # Report ignored items in the dmap as long as they are not
1012 1018 # under a symlink directory.
1013 1019 elif audit_path.check(nf):
1014 1020 try:
1015 1021 results[nf] = lstat(join(nf))
1016 1022 # file was just ignored, no links, and exists
1017 1023 except OSError:
1018 1024 # file doesn't exist
1019 1025 results[nf] = None
1020 1026 else:
1021 1027 # It's either missing or under a symlink directory
1022 1028 # which we in this case report as missing
1023 1029 results[nf] = None
1024 1030 else:
1025 1031 # We may not have walked the full directory tree above,
1026 1032 # so stat and check everything we missed.
1027 1033 iv = iter(visit)
1028 1034 for st in util.statfiles([join(i) for i in visit]):
1029 1035 results[next(iv)] = st
1030 1036 return results
1031 1037
1032 1038 def status(self, match, subrepos, ignored, clean, unknown):
1033 1039 '''Determine the status of the working copy relative to the
1034 1040 dirstate and return a pair of (unsure, status), where status is of type
1035 1041 scmutil.status and:
1036 1042
1037 1043 unsure:
1038 1044 files that might have been modified since the dirstate was
1039 1045 written, but need to be read to be sure (size is the same
1040 1046 but mtime differs)
1041 1047 status.modified:
1042 1048 files that have definitely been modified since the dirstate
1043 1049 was written (different size or mode)
1044 1050 status.clean:
1045 1051 files that have definitely not been modified since the
1046 1052 dirstate was written
1047 1053 '''
1048 1054 listignored, listclean, listunknown = ignored, clean, unknown
1049 1055 lookup, modified, added, unknown, ignored = [], [], [], [], []
1050 1056 removed, deleted, clean = [], [], []
1051 1057
1052 1058 dmap = self._map
1053 1059 dmap.preload()
1054 1060 dcontains = dmap.__contains__
1055 1061 dget = dmap.__getitem__
1056 1062 ladd = lookup.append # aka "unsure"
1057 1063 madd = modified.append
1058 1064 aadd = added.append
1059 1065 uadd = unknown.append
1060 1066 iadd = ignored.append
1061 1067 radd = removed.append
1062 1068 dadd = deleted.append
1063 1069 cadd = clean.append
1064 1070 mexact = match.exact
1065 1071 dirignore = self._dirignore
1066 1072 checkexec = self._checkexec
1067 1073 copymap = self._map.copymap
1068 1074 lastnormaltime = self._lastnormaltime
1069 1075
1070 1076 # We need to do full walks when either
1071 1077 # - we're listing all clean files, or
1072 1078 # - match.traversedir does something, because match.traversedir should
1073 1079 # be called for every dir in the working dir
1074 1080 full = listclean or match.traversedir is not None
1075 1081 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1076 1082 full=full).iteritems():
1077 1083 if not dcontains(fn):
1078 1084 if (listignored or mexact(fn)) and dirignore(fn):
1079 1085 if listignored:
1080 1086 iadd(fn)
1081 1087 else:
1082 1088 uadd(fn)
1083 1089 continue
1084 1090
1085 1091 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1086 1092 # written like that for performance reasons. dmap[fn] is not a
1087 1093 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1088 1094 # opcode has fast paths when the value to be unpacked is a tuple or
1089 1095 # a list, but falls back to creating a full-fledged iterator in
1090 1096 # general. That is much slower than simply accessing and storing the
1091 1097 # tuple members one by one.
1092 1098 t = dget(fn)
1093 1099 state = t[0]
1094 1100 mode = t[1]
1095 1101 size = t[2]
1096 1102 time = t[3]
1097 1103
1098 1104 if not st and state in "nma":
1099 1105 dadd(fn)
1100 1106 elif state == 'n':
1101 1107 if (size >= 0 and
1102 1108 ((size != st.st_size and size != st.st_size & _rangemask)
1103 1109 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1104 1110 or size == -2 # other parent
1105 1111 or fn in copymap):
1106 1112 madd(fn)
1107 1113 elif (time != st[stat.ST_MTIME]
1108 1114 and time != st[stat.ST_MTIME] & _rangemask):
1109 1115 ladd(fn)
1110 1116 elif st[stat.ST_MTIME] == lastnormaltime:
1111 1117 # fn may have just been marked as normal and it may have
1112 1118 # changed in the same second without changing its size.
1113 1119 # This can happen if we quickly do multiple commits.
1114 1120 # Force lookup, so we don't miss such a racy file change.
1115 1121 ladd(fn)
1116 1122 elif listclean:
1117 1123 cadd(fn)
1118 1124 elif state == 'm':
1119 1125 madd(fn)
1120 1126 elif state == 'a':
1121 1127 aadd(fn)
1122 1128 elif state == 'r':
1123 1129 radd(fn)
1124 1130
1125 1131 return (lookup, scmutil.status(modified, added, removed, deleted,
1126 1132 unknown, ignored, clean))
1127 1133
1128 1134 def matches(self, match):
1129 1135 '''
1130 1136 return files in the dirstate (in whatever state) filtered by match
1131 1137 '''
1132 1138 dmap = self._map
1133 1139 if match.always():
1134 1140 return dmap.keys()
1135 1141 files = match.files()
1136 1142 if match.isexact():
1137 1143 # fast path -- filter the other way around, since typically files is
1138 1144 # much smaller than dmap
1139 1145 return [f for f in files if f in dmap]
1140 1146 if match.prefix() and all(fn in dmap for fn in files):
1141 1147 # fast path -- all the values are known to be files, so just return
1142 1148 # that
1143 1149 return list(files)
1144 1150 return [f for f in dmap if match(f)]
1145 1151
1146 1152 def _actualfilename(self, tr):
1147 1153 if tr:
1148 1154 return self._pendingfilename
1149 1155 else:
1150 1156 return self._filename
1151 1157
1152 1158 def savebackup(self, tr, backupname):
1153 1159 '''Save current dirstate into backup file'''
1154 1160 filename = self._actualfilename(tr)
1155 1161 assert backupname != filename
1156 1162
1157 1163 # use '_writedirstate' instead of 'write' to write changes certainly,
1158 1164 # because the latter omits writing out if transaction is running.
1159 1165 # output file will be used to create backup of dirstate at this point.
1160 1166 if self._dirty or not self._opener.exists(filename):
1161 1167 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1162 1168 checkambig=True))
1163 1169
1164 1170 if tr:
1165 1171 # ensure that subsequent tr.writepending returns True for
1166 1172 # changes written out above, even if dirstate is never
1167 1173 # changed after this
1168 1174 tr.addfilegenerator('dirstate', (self._filename,),
1169 1175 self._writedirstate, location='plain')
1170 1176
1171 1177 # ensure that pending file written above is unlinked at
1172 1178 # failure, even if tr.writepending isn't invoked until the
1173 1179 # end of this transaction
1174 1180 tr.registertmp(filename, location='plain')
1175 1181
1176 1182 self._opener.tryunlink(backupname)
1177 1183 # hardlink backup is okay because _writedirstate is always called
1178 1184 # with an "atomictemp=True" file.
1179 1185 util.copyfile(self._opener.join(filename),
1180 1186 self._opener.join(backupname), hardlink=True)
1181 1187
1182 1188 def restorebackup(self, tr, backupname):
1183 1189 '''Restore dirstate by backup file'''
1184 1190 # this "invalidate()" prevents "wlock.release()" from writing
1185 1191 # changes of dirstate out after restoring from backup file
1186 1192 self.invalidate()
1187 1193 filename = self._actualfilename(tr)
1188 1194 o = self._opener
1189 1195 if util.samefile(o.join(backupname), o.join(filename)):
1190 1196 o.unlink(backupname)
1191 1197 else:
1192 1198 o.rename(backupname, filename, checkambig=True)
1193 1199
1194 1200 def clearbackup(self, tr, backupname):
1195 1201 '''Clear backup file'''
1196 1202 self._opener.unlink(backupname)
1197 1203
1198 1204 class dirstatemap(object):
1199 1205 """Map encapsulating the dirstate's contents.
1200 1206
1201 1207 The dirstate contains the following state:
1202 1208
1203 1209 - `identity` is the identity of the dirstate file, which can be used to
1204 1210 detect when changes have occurred to the dirstate file.
1205 1211
1206 1212 - `parents` is a pair containing the parents of the working copy. The
1207 1213 parents are updated by calling `setparents`.
1208 1214
1209 1215 - the state map maps filenames to tuples of (state, mode, size, mtime),
1210 1216 where state is a single character representing 'normal', 'added',
1211 1217 'removed', or 'merged'. It is read by treating the dirstate as a
1212 1218 dict. File state is updated by calling the `addfile`, `removefile` and
1213 1219 `dropfile` methods.
1214 1220
1215 1221 - `copymap` maps destination filenames to their source filename.
1216 1222
1217 1223 The dirstate also provides the following views onto the state:
1218 1224
1219 1225 - `nonnormalset` is a set of the filenames that have state other
1220 1226 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1221 1227
1222 1228 - `otherparentset` is a set of the filenames that are marked as coming
1223 1229 from the second parent when the dirstate is currently being merged.
1224 1230
1225 1231 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1226 1232 form that they appear as in the dirstate.
1227 1233
1228 1234 - `dirfoldmap` is a dict mapping normalized directory names to the
1229 1235 denormalized form that they appear as in the dirstate.
1230 1236 """
1231 1237
1232 1238 def __init__(self, ui, opener, root):
1233 1239 self._ui = ui
1234 1240 self._opener = opener
1235 1241 self._root = root
1236 1242 self._filename = 'dirstate'
1237 1243
1238 1244 self._parents = None
1239 1245 self._dirtyparents = False
1240 1246
1241 1247 # for consistent view between _pl() and _read() invocations
1242 1248 self._pendingmode = None
1243 1249
1244 1250 @propertycache
1245 1251 def _map(self):
1246 1252 self._map = {}
1247 1253 self.read()
1248 1254 return self._map
1249 1255
1250 1256 @propertycache
1251 1257 def copymap(self):
1252 1258 self.copymap = {}
1253 1259 self._map
1254 1260 return self.copymap
1255 1261
1256 1262 def clear(self):
1257 1263 self._map.clear()
1258 1264 self.copymap.clear()
1259 1265 self.setparents(nullid, nullid)
1260 1266 util.clearcachedproperty(self, "_dirs")
1261 1267 util.clearcachedproperty(self, "_alldirs")
1262 1268 util.clearcachedproperty(self, "filefoldmap")
1263 1269 util.clearcachedproperty(self, "dirfoldmap")
1264 1270 util.clearcachedproperty(self, "nonnormalset")
1265 1271 util.clearcachedproperty(self, "otherparentset")
1266 1272
1267 1273 def items(self):
1268 1274 return self._map.iteritems()
1269 1275
1270 1276 # forward for python2,3 compat
1271 1277 iteritems = items
1272 1278
1273 1279 def __len__(self):
1274 1280 return len(self._map)
1275 1281
1276 1282 def __iter__(self):
1277 1283 return iter(self._map)
1278 1284
1279 1285 def get(self, key, default=None):
1280 1286 return self._map.get(key, default)
1281 1287
1282 1288 def __contains__(self, key):
1283 1289 return key in self._map
1284 1290
1285 1291 def __getitem__(self, key):
1286 1292 return self._map[key]
1287 1293
1288 1294 def keys(self):
1289 1295 return self._map.keys()
1290 1296
1291 1297 def preload(self):
1292 1298 """Loads the underlying data, if it's not already loaded"""
1293 1299 self._map
1294 1300
1295 1301 def addfile(self, f, oldstate, state, mode, size, mtime):
1296 1302 """Add a tracked file to the dirstate."""
1297 1303 if oldstate in "?r" and r"_dirs" in self.__dict__:
1298 1304 self._dirs.addpath(f)
1299 1305 if oldstate == "?" and r"_alldirs" in self.__dict__:
1300 1306 self._alldirs.addpath(f)
1301 1307 self._map[f] = dirstatetuple(state, mode, size, mtime)
1302 1308 if state != 'n' or mtime == -1:
1303 1309 self.nonnormalset.add(f)
1304 1310 if size == -2:
1305 1311 self.otherparentset.add(f)
1306 1312
1307 1313 def removefile(self, f, oldstate, size):
1308 1314 """
1309 1315 Mark a file as removed in the dirstate.
1310 1316
1311 1317 The `size` parameter is used to store sentinel values that indicate
1312 1318 the file's previous state. In the future, we should refactor this
1313 1319 to be more explicit about what that state is.
1314 1320 """
1315 1321 if oldstate not in "?r" and r"_dirs" in self.__dict__:
1316 1322 self._dirs.delpath(f)
1317 1323 if oldstate == "?" and r"_alldirs" in self.__dict__:
1318 1324 self._alldirs.addpath(f)
1319 1325 if r"filefoldmap" in self.__dict__:
1320 1326 normed = util.normcase(f)
1321 1327 self.filefoldmap.pop(normed, None)
1322 1328 self._map[f] = dirstatetuple('r', 0, size, 0)
1323 1329 self.nonnormalset.add(f)
1324 1330
1325 1331 def dropfile(self, f, oldstate):
1326 1332 """
1327 1333 Remove a file from the dirstate. Returns True if the file was
1328 1334 previously recorded.
1329 1335 """
1330 1336 exists = self._map.pop(f, None) is not None
1331 1337 if exists:
1332 1338 if oldstate != "r" and r"_dirs" in self.__dict__:
1333 1339 self._dirs.delpath(f)
1334 1340 if r"_alldirs" in self.__dict__:
1335 1341 self._alldirs.delpath(f)
1336 1342 if r"filefoldmap" in self.__dict__:
1337 1343 normed = util.normcase(f)
1338 1344 self.filefoldmap.pop(normed, None)
1339 1345 self.nonnormalset.discard(f)
1340 1346 return exists
1341 1347
1342 1348 def clearambiguoustimes(self, files, now):
1343 1349 for f in files:
1344 1350 e = self.get(f)
1345 1351 if e is not None and e[0] == 'n' and e[3] == now:
1346 1352 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1347 1353 self.nonnormalset.add(f)
1348 1354
1349 1355 def nonnormalentries(self):
1350 1356 '''Compute the nonnormal dirstate entries from the dmap'''
1351 1357 try:
1352 1358 return parsers.nonnormalotherparententries(self._map)
1353 1359 except AttributeError:
1354 1360 nonnorm = set()
1355 1361 otherparent = set()
1356 1362 for fname, e in self._map.iteritems():
1357 1363 if e[0] != 'n' or e[3] == -1:
1358 1364 nonnorm.add(fname)
1359 1365 if e[0] == 'n' and e[2] == -2:
1360 1366 otherparent.add(fname)
1361 1367 return nonnorm, otherparent
1362 1368
1363 1369 @propertycache
1364 1370 def filefoldmap(self):
1365 1371 """Returns a dictionary mapping normalized case paths to their
1366 1372 non-normalized versions.
1367 1373 """
1368 1374 try:
1369 1375 makefilefoldmap = parsers.make_file_foldmap
1370 1376 except AttributeError:
1371 1377 pass
1372 1378 else:
1373 1379 return makefilefoldmap(self._map, util.normcasespec,
1374 1380 util.normcasefallback)
1375 1381
1376 1382 f = {}
1377 1383 normcase = util.normcase
1378 1384 for name, s in self._map.iteritems():
1379 1385 if s[0] != 'r':
1380 1386 f[normcase(name)] = name
1381 1387 f['.'] = '.' # prevents useless util.fspath() invocation
1382 1388 return f
1383 1389
1384 1390 def hastrackeddir(self, d):
1385 1391 """
1386 1392 Returns True if the dirstate contains a tracked (not removed) file
1387 1393 in this directory.
1388 1394 """
1389 1395 return d in self._dirs
1390 1396
1391 1397 def hasdir(self, d):
1392 1398 """
1393 1399 Returns True if the dirstate contains a file (tracked or removed)
1394 1400 in this directory.
1395 1401 """
1396 1402 return d in self._alldirs
1397 1403
1398 1404 @propertycache
1399 1405 def _dirs(self):
1400 1406 return util.dirs(self._map, 'r')
1401 1407
1402 1408 @propertycache
1403 1409 def _alldirs(self):
1404 1410 return util.dirs(self._map)
1405 1411
1406 1412 def _opendirstatefile(self):
1407 1413 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1408 1414 if self._pendingmode is not None and self._pendingmode != mode:
1409 1415 fp.close()
1410 1416 raise error.Abort(_('working directory state may be '
1411 1417 'changed parallelly'))
1412 1418 self._pendingmode = mode
1413 1419 return fp
1414 1420
1415 1421 def parents(self):
1416 1422 if not self._parents:
1417 1423 try:
1418 1424 fp = self._opendirstatefile()
1419 1425 st = fp.read(40)
1420 1426 fp.close()
1421 1427 except IOError as err:
1422 1428 if err.errno != errno.ENOENT:
1423 1429 raise
1424 1430 # File doesn't exist, so the current state is empty
1425 1431 st = ''
1426 1432
1427 1433 l = len(st)
1428 1434 if l == 40:
1429 1435 self._parents = (st[:20], st[20:40])
1430 1436 elif l == 0:
1431 1437 self._parents = (nullid, nullid)
1432 1438 else:
1433 1439 raise error.Abort(_('working directory state appears '
1434 1440 'damaged!'))
1435 1441
1436 1442 return self._parents
1437 1443
1438 1444 def setparents(self, p1, p2):
1439 1445 self._parents = (p1, p2)
1440 1446 self._dirtyparents = True
1441 1447
1442 1448 def read(self):
1443 1449 # ignore HG_PENDING because identity is used only for writing
1444 1450 self.identity = util.filestat.frompath(
1445 1451 self._opener.join(self._filename))
1446 1452
1447 1453 try:
1448 1454 fp = self._opendirstatefile()
1449 1455 try:
1450 1456 st = fp.read()
1451 1457 finally:
1452 1458 fp.close()
1453 1459 except IOError as err:
1454 1460 if err.errno != errno.ENOENT:
1455 1461 raise
1456 1462 return
1457 1463 if not st:
1458 1464 return
1459 1465
1460 1466 if util.safehasattr(parsers, 'dict_new_presized'):
1461 1467 # Make an estimate of the number of files in the dirstate based on
1462 1468 # its size. From a linear regression on a set of real-world repos,
1463 1469 # all over 10,000 files, the size of a dirstate entry is 85
1464 1470 # bytes. The cost of resizing is significantly higher than the cost
1465 1471 # of filling in a larger presized dict, so subtract 20% from the
1466 1472 # size.
1467 1473 #
1468 1474 # This heuristic is imperfect in many ways, so in a future dirstate
1469 1475 # format update it makes sense to just record the number of entries
1470 1476 # on write.
1471 1477 self._map = parsers.dict_new_presized(len(st) // 71)
1472 1478
1473 1479 # Python's garbage collector triggers a GC each time a certain number
1474 1480 # of container objects (the number being defined by
1475 1481 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1476 1482 # for each file in the dirstate. The C version then immediately marks
1477 1483 # them as not to be tracked by the collector. However, this has no
1478 1484 # effect on when GCs are triggered, only on what objects the GC looks
1479 1485 # into. This means that O(number of files) GCs are unavoidable.
1480 1486 # Depending on when in the process's lifetime the dirstate is parsed,
1481 1487 # this can get very expensive. As a workaround, disable GC while
1482 1488 # parsing the dirstate.
1483 1489 #
1484 1490 # (we cannot decorate the function directly since it is in a C module)
1485 1491 parse_dirstate = util.nogc(parsers.parse_dirstate)
1486 1492 p = parse_dirstate(self._map, self.copymap, st)
1487 1493 if not self._dirtyparents:
1488 1494 self.setparents(*p)
1489 1495
1490 1496 # Avoid excess attribute lookups by fast pathing certain checks
1491 1497 self.__contains__ = self._map.__contains__
1492 1498 self.__getitem__ = self._map.__getitem__
1493 1499 self.get = self._map.get
1494 1500
1495 1501 def write(self, st, now):
1496 1502 st.write(parsers.pack_dirstate(self._map, self.copymap,
1497 1503 self.parents(), now))
1498 1504 st.close()
1499 1505 self._dirtyparents = False
1500 1506 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1501 1507
1502 1508 @propertycache
1503 1509 def nonnormalset(self):
1504 1510 nonnorm, otherparents = self.nonnormalentries()
1505 1511 self.otherparentset = otherparents
1506 1512 return nonnorm
1507 1513
1508 1514 @propertycache
1509 1515 def otherparentset(self):
1510 1516 nonnorm, otherparents = self.nonnormalentries()
1511 1517 self.nonnormalset = nonnorm
1512 1518 return otherparents
1513 1519
1514 1520 @propertycache
1515 1521 def identity(self):
1516 1522 self._map
1517 1523 return self.identity
1518 1524
1519 1525 @propertycache
1520 1526 def dirfoldmap(self):
1521 1527 f = {}
1522 1528 normcase = util.normcase
1523 1529 for name in self._dirs:
1524 1530 f[normcase(name)] = name
1525 1531 return f
1526 1532
1527 1533
1528 1534 if rustmod is not None:
1529 1535 class dirstatemap(object):
1530 1536 def __init__(self, ui, opener, root):
1531 1537 self._ui = ui
1532 1538 self._opener = opener
1533 1539 self._root = root
1534 1540 self._filename = 'dirstate'
1535 1541 self._parents = None
1536 1542 self._dirtyparents = False
1537 1543
1538 1544 # for consistent view between _pl() and _read() invocations
1539 1545 self._pendingmode = None
1540 1546
1541 1547 def addfile(self, *args, **kwargs):
1542 1548 return self._rustmap.addfile(*args, **kwargs)
1543 1549
1544 1550 def removefile(self, *args, **kwargs):
1545 1551 return self._rustmap.removefile(*args, **kwargs)
1546 1552
1547 1553 def dropfile(self, *args, **kwargs):
1548 1554 return self._rustmap.dropfile(*args, **kwargs)
1549 1555
1550 1556 def clearambiguoustimes(self, *args, **kwargs):
1551 1557 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1552 1558
1553 1559 def nonnormalentries(self):
1554 1560 return self._rustmap.nonnormalentries()
1555 1561
1556 1562 def get(self, *args, **kwargs):
1557 1563 return self._rustmap.get(*args, **kwargs)
1558 1564
1559 1565 @propertycache
1560 1566 def _rustmap(self):
1561 1567 self._rustmap = rustmod.DirstateMap(self._root)
1562 1568 self.read()
1563 1569 return self._rustmap
1564 1570
1565 1571 @property
1566 1572 def copymap(self):
1567 1573 return self._rustmap.copymap()
1568 1574
1569 1575 def preload(self):
1570 1576 self._rustmap
1571 1577
1572 1578 def clear(self):
1573 1579 self._rustmap.clear()
1574 1580 self.setparents(nullid, nullid)
1575 1581 util.clearcachedproperty(self, "_dirs")
1576 1582 util.clearcachedproperty(self, "_alldirs")
1577 1583 util.clearcachedproperty(self, "dirfoldmap")
1578 1584
1579 1585 def items(self):
1580 1586 return self._rustmap.items()
1581 1587
1582 1588 def keys(self):
1583 1589 return iter(self._rustmap)
1584 1590
1585 1591 def __contains__(self, key):
1586 1592 return key in self._rustmap
1587 1593
1588 1594 def __getitem__(self, item):
1589 1595 return self._rustmap[item]
1590 1596
1591 1597 def __len__(self):
1592 1598 return len(self._rustmap)
1593 1599
1594 1600 def __iter__(self):
1595 1601 return iter(self._rustmap)
1596 1602
1597 1603 # forward for python2,3 compat
1598 1604 iteritems = items
1599 1605
1600 1606 def _opendirstatefile(self):
1601 1607 fp, mode = txnutil.trypending(self._root, self._opener,
1602 1608 self._filename)
1603 1609 if self._pendingmode is not None and self._pendingmode != mode:
1604 1610 fp.close()
1605 1611 raise error.Abort(_('working directory state may be '
1606 1612 'changed parallelly'))
1607 1613 self._pendingmode = mode
1608 1614 return fp
1609 1615
1610 1616 def setparents(self, p1, p2):
1611 1617 self._rustmap.setparents(p1, p2)
1612 1618 self._parents = (p1, p2)
1613 1619 self._dirtyparents = True
1614 1620
1615 1621 def parents(self):
1616 1622 if not self._parents:
1617 1623 try:
1618 1624 fp = self._opendirstatefile()
1619 1625 st = fp.read(40)
1620 1626 fp.close()
1621 1627 except IOError as err:
1622 1628 if err.errno != errno.ENOENT:
1623 1629 raise
1624 1630 # File doesn't exist, so the current state is empty
1625 1631 st = ''
1626 1632
1627 1633 try:
1628 1634 self._parents = self._rustmap.parents(st)
1629 1635 except ValueError:
1630 1636 raise error.Abort(_('working directory state appears '
1631 1637 'damaged!'))
1632 1638
1633 1639 return self._parents
1634 1640
1635 1641 def read(self):
1636 1642 # ignore HG_PENDING because identity is used only for writing
1637 1643 self.identity = util.filestat.frompath(
1638 1644 self._opener.join(self._filename))
1639 1645
1640 1646 try:
1641 1647 fp = self._opendirstatefile()
1642 1648 try:
1643 1649 st = fp.read()
1644 1650 finally:
1645 1651 fp.close()
1646 1652 except IOError as err:
1647 1653 if err.errno != errno.ENOENT:
1648 1654 raise
1649 1655 return
1650 1656 if not st:
1651 1657 return
1652 1658
1653 1659 parse_dirstate = util.nogc(self._rustmap.read)
1654 1660 parents = parse_dirstate(st)
1655 1661 if parents and not self._dirtyparents:
1656 1662 self.setparents(*parents)
1657 1663
1658 1664 def write(self, st, now):
1659 1665 parents = self.parents()
1660 1666 st.write(self._rustmap.write(parents[0], parents[1], now))
1661 1667 st.close()
1662 1668 self._dirtyparents = False
1663 1669
1664 1670 @propertycache
1665 1671 def filefoldmap(self):
1666 1672 """Returns a dictionary mapping normalized case paths to their
1667 1673 non-normalized versions.
1668 1674 """
1669 1675 return self._rustmap.filefoldmapasdict()
1670 1676
1671 1677 def hastrackeddir(self, d):
1672 1678 self._dirs # Trigger Python's propertycache
1673 1679 return self._rustmap.hastrackeddir(d)
1674 1680
1675 1681 def hasdir(self, d):
1676 1682 self._dirs # Trigger Python's propertycache
1677 1683 return self._rustmap.hasdir(d)
1678 1684
1679 1685 @propertycache
1680 1686 def _dirs(self):
1681 1687 return self._rustmap.getdirs()
1682 1688
1683 1689 @propertycache
1684 1690 def _alldirs(self):
1685 1691 return self._rustmap.getalldirs()
1686 1692
1687 1693 @propertycache
1688 1694 def identity(self):
1689 1695 self._rustmap
1690 1696 return self.identity
1691 1697
1692 1698 @property
1693 1699 def nonnormalset(self):
1694 1700 nonnorm, otherparents = self._rustmap.nonnormalentries()
1695 1701 return nonnorm
1696 1702
1697 1703 @property
1698 1704 def otherparentset(self):
1699 1705 nonnorm, otherparents = self._rustmap.nonnormalentries()
1700 1706 return otherparents
1701 1707
1702 1708 @propertycache
1703 1709 def dirfoldmap(self):
1704 1710 f = {}
1705 1711 normcase = util.normcase
1706 1712 for name in self._dirs:
1707 1713 f[normcase(name)] = name
1708 1714 return f
@@ -1,235 +1,239 b''
1 1 # Test that certain objects conform to well-defined interfaces.
2 2
3 3 from __future__ import absolute_import, print_function
4 4
5 5 from mercurial import encoding
6 6 encoding.environ[b'HGREALINTERFACES'] = b'1'
7 7
8 8 import os
9 9 import subprocess
10 10 import sys
11 11
12 12 # Only run if tests are run in a repo
13 13 if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
14 14 'test-repo']):
15 15 sys.exit(80)
16 16
17 17 from mercurial.interfaces import (
18 dirstate as intdirstate,
18 19 repository,
19 20 )
20 21 from mercurial.thirdparty.zope import (
21 22 interface as zi,
22 23 )
23 24 from mercurial.thirdparty.zope.interface import (
24 25 verify as ziverify,
25 26 )
26 27 from mercurial import (
27 28 bundlerepo,
29 dirstate,
28 30 filelog,
29 31 httppeer,
30 32 localrepo,
31 33 manifest,
32 34 pycompat,
33 35 revlog,
34 36 sshpeer,
35 37 statichttprepo,
36 38 ui as uimod,
37 39 unionrepo,
38 40 vfs as vfsmod,
39 41 wireprotoserver,
40 42 wireprototypes,
41 43 wireprotov1peer,
42 44 wireprotov2server,
43 45 )
44 46
45 47 testdir = os.path.dirname(__file__)
46 48 rootdir = pycompat.fsencode(os.path.normpath(os.path.join(testdir, '..')))
47 49
48 50 sys.path[0:0] = [testdir]
49 51 import simplestorerepo
50 52 del sys.path[0]
51 53
52 54 def checkzobject(o, allowextra=False):
53 55 """Verify an object with a zope interface."""
54 56 ifaces = zi.providedBy(o)
55 57 if not ifaces:
56 58 print('%r does not provide any zope interfaces' % o)
57 59 return
58 60
59 61 # Run zope.interface's built-in verification routine. This verifies that
60 62 # everything that is supposed to be present is present.
61 63 for iface in ifaces:
62 64 ziverify.verifyObject(iface, o)
63 65
64 66 if allowextra:
65 67 return
66 68
67 69 # Now verify that the object provides no extra public attributes that
68 70 # aren't declared as part of interfaces.
69 71 allowed = set()
70 72 for iface in ifaces:
71 73 allowed |= set(iface.names(all=True))
72 74
73 75 public = {a for a in dir(o) if not a.startswith('_')}
74 76
75 77 for attr in sorted(public - allowed):
76 78 print('public attribute not declared in interfaces: %s.%s' % (
77 79 o.__class__.__name__, attr))
78 80
79 81 # Facilitates testing localpeer.
80 82 class dummyrepo(object):
81 83 def __init__(self):
82 84 self.ui = uimod.ui()
83 85 def filtered(self, name):
84 86 pass
85 87 def _restrictcapabilities(self, caps):
86 88 pass
87 89
88 90 class dummyopener(object):
89 91 handlers = []
90 92
91 93 # Facilitates testing sshpeer without requiring a server.
92 94 class badpeer(httppeer.httppeer):
93 95 def __init__(self):
94 96 super(badpeer, self).__init__(None, None, None, dummyopener(), None,
95 97 None)
96 98 self.badattribute = True
97 99
98 100 def badmethod(self):
99 101 pass
100 102
101 103 class dummypipe(object):
102 104 def close(self):
103 105 pass
104 106
105 107 def main():
106 108 ui = uimod.ui()
107 109 # Needed so we can open a local repo with obsstore without a warning.
108 110 ui.setconfig(b'experimental', b'evolution.createmarkers', True)
109 111
110 112 checkzobject(badpeer())
111 113
112 114 ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
113 115 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
114 116
115 117 ziverify.verifyClass(repository.ipeerv2, httppeer.httpv2peer)
116 118 checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
117 119
118 120 ziverify.verifyClass(repository.ipeerbase,
119 121 localrepo.localpeer)
120 122 checkzobject(localrepo.localpeer(dummyrepo()))
121 123
122 124 ziverify.verifyClass(repository.ipeercommandexecutor,
123 125 localrepo.localcommandexecutor)
124 126 checkzobject(localrepo.localcommandexecutor(None))
125 127
126 128 ziverify.verifyClass(repository.ipeercommandexecutor,
127 129 wireprotov1peer.peerexecutor)
128 130 checkzobject(wireprotov1peer.peerexecutor(None))
129 131
130 132 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
131 133 checkzobject(sshpeer.sshv1peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
132 134 dummypipe(), None, None))
133 135
134 136 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
135 137 checkzobject(sshpeer.sshv2peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
136 138 dummypipe(), None, None))
137 139
138 140 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
139 141 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
140 142
141 143 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
142 144 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
143 145
144 146 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
145 147 checkzobject(unionrepo.unionpeer(dummyrepo()))
146 148
147 149 ziverify.verifyClass(repository.ilocalrepositorymain,
148 150 localrepo.localrepository)
149 151 ziverify.verifyClass(repository.ilocalrepositoryfilestorage,
150 152 localrepo.revlogfilestorage)
151 153 repo = localrepo.makelocalrepository(ui, rootdir)
152 154 checkzobject(repo)
153 155
154 156 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
155 157 wireprotoserver.sshv1protocolhandler)
156 158 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
157 159 wireprotoserver.sshv2protocolhandler)
158 160 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
159 161 wireprotoserver.httpv1protocolhandler)
160 162 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
161 163 wireprotov2server.httpv2protocolhandler)
162 164
163 165 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
164 166 checkzobject(sshv1)
165 167 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
166 168 checkzobject(sshv2)
167 169
168 170 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
169 171 checkzobject(httpv1)
170 172 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
171 173 checkzobject(httpv2)
172 174
173 175 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
174 176 ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
175 177 ziverify.verifyClass(repository.imanifestrevisionstored,
176 178 manifest.manifestctx)
177 179 ziverify.verifyClass(repository.imanifestrevisionwritable,
178 180 manifest.memmanifestctx)
179 181 ziverify.verifyClass(repository.imanifestrevisionstored,
180 182 manifest.treemanifestctx)
181 183 ziverify.verifyClass(repository.imanifestrevisionwritable,
182 184 manifest.memtreemanifestctx)
183 185 ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
184 186 ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
185 187
186 188 ziverify.verifyClass(repository.irevisiondelta,
187 189 simplestorerepo.simplestorerevisiondelta)
188 190 ziverify.verifyClass(repository.ifilestorage, simplestorerepo.filestorage)
189 191 ziverify.verifyClass(repository.iverifyproblem,
190 192 simplestorerepo.simplefilestoreproblem)
191 193
194 ziverify.verifyClass(intdirstate.idirstate, dirstate.dirstate)
195
192 196 vfs = vfsmod.vfs(b'.')
193 197 fl = filelog.filelog(vfs, b'dummy.i')
194 198 checkzobject(fl, allowextra=True)
195 199
196 200 # Conforms to imanifestlog.
197 201 ml = manifest.manifestlog(vfs, repo, manifest.manifestrevlog(repo.svfs),
198 202 repo.narrowmatch())
199 203 checkzobject(ml)
200 204 checkzobject(repo.manifestlog)
201 205
202 206 # Conforms to imanifestrevision.
203 207 mctx = ml[repo[0].manifestnode()]
204 208 checkzobject(mctx)
205 209
206 210 # Conforms to imanifestrevisionwritable.
207 211 checkzobject(mctx.new())
208 212 checkzobject(mctx.copy())
209 213
210 214 # Conforms to imanifestdict.
211 215 checkzobject(mctx.read())
212 216
213 217 mrl = manifest.manifestrevlog(vfs)
214 218 checkzobject(mrl)
215 219
216 220 ziverify.verifyClass(repository.irevisiondelta,
217 221 revlog.revlogrevisiondelta)
218 222
219 223 rd = revlog.revlogrevisiondelta(
220 224 node=b'',
221 225 p1node=b'',
222 226 p2node=b'',
223 227 basenode=b'',
224 228 linknode=b'',
225 229 flags=b'',
226 230 baserevisionsize=None,
227 231 revision=b'',
228 232 delta=None)
229 233 checkzobject(rd)
230 234
231 235 ziverify.verifyClass(repository.iverifyproblem,
232 236 revlog.revlogproblem)
233 237 checkzobject(revlog.revlogproblem())
234 238
235 239 main()
General Comments 0
You need to be logged in to leave comments. Login now