##// END OF EJS Templates
vfs: add "mkstemp()"
FUJIWARA Katsunori -
r20980:6fb4c94a default
parent child Browse files
Show More
@@ -1,945 +1,954 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 import os, errno, re, glob
13 import os, errno, re, glob, tempfile
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 def itersubrepos(ctx1, ctx2):
24 24 """find subrepos in ctx1 or ctx2"""
25 25 # Create a (subpath, ctx) mapping where we prefer subpaths from
26 26 # ctx1. The subpaths from ctx2 are important when the .hgsub file
27 27 # has been modified (in ctx2) but not yet committed (in ctx1).
28 28 subpaths = dict.fromkeys(ctx2.substate, ctx2)
29 29 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
30 30 for subpath, ctx in sorted(subpaths.iteritems()):
31 31 yield subpath, ctx.sub(subpath)
32 32
33 33 def nochangesfound(ui, repo, excluded=None):
34 34 '''Report no changes for push/pull, excluded is None or a list of
35 35 nodes excluded from the push/pull.
36 36 '''
37 37 secretlist = []
38 38 if excluded:
39 39 for n in excluded:
40 40 if n not in repo:
41 41 # discovery should not have included the filtered revision,
42 42 # we have to explicitly exclude it until discovery is cleanup.
43 43 continue
44 44 ctx = repo[n]
45 45 if ctx.phase() >= phases.secret and not ctx.extinct():
46 46 secretlist.append(n)
47 47
48 48 if secretlist:
49 49 ui.status(_("no changes found (ignored %d secret changesets)\n")
50 50 % len(secretlist))
51 51 else:
52 52 ui.status(_("no changes found\n"))
53 53
54 54 def checknewlabel(repo, lbl, kind):
55 55 # Do not use the "kind" parameter in ui output.
56 56 # It makes strings difficult to translate.
57 57 if lbl in ['tip', '.', 'null']:
58 58 raise util.Abort(_("the name '%s' is reserved") % lbl)
59 59 for c in (':', '\0', '\n', '\r'):
60 60 if c in lbl:
61 61 raise util.Abort(_("%r cannot be used in a name") % c)
62 62 try:
63 63 int(lbl)
64 64 raise util.Abort(_("cannot use an integer as a name"))
65 65 except ValueError:
66 66 pass
67 67
68 68 def checkfilename(f):
69 69 '''Check that the filename f is an acceptable filename for a tracked file'''
70 70 if '\r' in f or '\n' in f:
71 71 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
72 72
73 73 def checkportable(ui, f):
74 74 '''Check if filename f is portable and warn or abort depending on config'''
75 75 checkfilename(f)
76 76 abort, warn = checkportabilityalert(ui)
77 77 if abort or warn:
78 78 msg = util.checkwinfilename(f)
79 79 if msg:
80 80 msg = "%s: %r" % (msg, f)
81 81 if abort:
82 82 raise util.Abort(msg)
83 83 ui.warn(_("warning: %s\n") % msg)
84 84
85 85 def checkportabilityalert(ui):
86 86 '''check if the user's config requests nothing, a warning, or abort for
87 87 non-portable filenames'''
88 88 val = ui.config('ui', 'portablefilenames', 'warn')
89 89 lval = val.lower()
90 90 bval = util.parsebool(val)
91 91 abort = os.name == 'nt' or lval == 'abort'
92 92 warn = bval or lval == 'warn'
93 93 if bval is None and not (warn or abort or lval == 'ignore'):
94 94 raise error.ConfigError(
95 95 _("ui.portablefilenames value is invalid ('%s')") % val)
96 96 return abort, warn
97 97
98 98 class casecollisionauditor(object):
99 99 def __init__(self, ui, abort, dirstate):
100 100 self._ui = ui
101 101 self._abort = abort
102 102 allfiles = '\0'.join(dirstate._map)
103 103 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
104 104 self._dirstate = dirstate
105 105 # The purpose of _newfiles is so that we don't complain about
106 106 # case collisions if someone were to call this object with the
107 107 # same filename twice.
108 108 self._newfiles = set()
109 109
110 110 def __call__(self, f):
111 111 if f in self._newfiles:
112 112 return
113 113 fl = encoding.lower(f)
114 114 if fl in self._loweredfiles and f not in self._dirstate:
115 115 msg = _('possible case-folding collision for %s') % f
116 116 if self._abort:
117 117 raise util.Abort(msg)
118 118 self._ui.warn(_("warning: %s\n") % msg)
119 119 self._loweredfiles.add(fl)
120 120 self._newfiles.add(f)
121 121
122 122 class abstractvfs(object):
123 123 """Abstract base class; cannot be instantiated"""
124 124
125 125 def __init__(self, *args, **kwargs):
126 126 '''Prevent instantiation; don't call this from subclasses.'''
127 127 raise NotImplementedError('attempted instantiating ' + str(type(self)))
128 128
129 129 def tryread(self, path):
130 130 '''gracefully return an empty string for missing files'''
131 131 try:
132 132 return self.read(path)
133 133 except IOError, inst:
134 134 if inst.errno != errno.ENOENT:
135 135 raise
136 136 return ""
137 137
138 138 def open(self, path, mode="r", text=False, atomictemp=False):
139 139 self.open = self.__call__
140 140 return self.__call__(path, mode, text, atomictemp)
141 141
142 142 def read(self, path):
143 143 fp = self(path, 'rb')
144 144 try:
145 145 return fp.read()
146 146 finally:
147 147 fp.close()
148 148
149 149 def write(self, path, data):
150 150 fp = self(path, 'wb')
151 151 try:
152 152 return fp.write(data)
153 153 finally:
154 154 fp.close()
155 155
156 156 def append(self, path, data):
157 157 fp = self(path, 'ab')
158 158 try:
159 159 return fp.write(data)
160 160 finally:
161 161 fp.close()
162 162
163 163 def chmod(self, path, mode):
164 164 return os.chmod(self.join(path), mode)
165 165
166 166 def exists(self, path=None):
167 167 return os.path.exists(self.join(path))
168 168
169 169 def fstat(self, fp):
170 170 return util.fstat(fp)
171 171
172 172 def isdir(self, path=None):
173 173 return os.path.isdir(self.join(path))
174 174
175 175 def isfile(self, path=None):
176 176 return os.path.isfile(self.join(path))
177 177
178 178 def islink(self, path=None):
179 179 return os.path.islink(self.join(path))
180 180
181 181 def lstat(self, path=None):
182 182 return os.lstat(self.join(path))
183 183
184 184 def makedir(self, path=None, notindexed=True):
185 185 return util.makedir(self.join(path), notindexed)
186 186
187 187 def makedirs(self, path=None, mode=None):
188 188 return util.makedirs(self.join(path), mode)
189 189
190 190 def makelock(self, info, path):
191 191 return util.makelock(info, self.join(path))
192 192
193 193 def mkdir(self, path=None):
194 194 return os.mkdir(self.join(path))
195 195
196 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
197 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
198 dir=self.join(dir), text=text)
199 dname, fname = util.split(name)
200 if dir:
201 return fd, os.path.join(dir, fname)
202 else:
203 return fd, fname
204
196 205 def readdir(self, path=None, stat=None, skip=None):
197 206 return osutil.listdir(self.join(path), stat, skip)
198 207
199 208 def readlock(self, path):
200 209 return util.readlock(self.join(path))
201 210
202 211 def rename(self, src, dst):
203 212 return util.rename(self.join(src), self.join(dst))
204 213
205 214 def readlink(self, path):
206 215 return os.readlink(self.join(path))
207 216
208 217 def setflags(self, path, l, x):
209 218 return util.setflags(self.join(path), l, x)
210 219
211 220 def stat(self, path=None):
212 221 return os.stat(self.join(path))
213 222
214 223 def unlink(self, path=None):
215 224 return util.unlink(self.join(path))
216 225
217 226 def utime(self, path=None, t=None):
218 227 return os.utime(self.join(path), t)
219 228
220 229 class vfs(abstractvfs):
221 230 '''Operate files relative to a base directory
222 231
223 232 This class is used to hide the details of COW semantics and
224 233 remote file access from higher level code.
225 234 '''
226 235 def __init__(self, base, audit=True, expandpath=False, realpath=False):
227 236 if expandpath:
228 237 base = util.expandpath(base)
229 238 if realpath:
230 239 base = os.path.realpath(base)
231 240 self.base = base
232 241 self._setmustaudit(audit)
233 242 self.createmode = None
234 243 self._trustnlink = None
235 244
236 245 def _getmustaudit(self):
237 246 return self._audit
238 247
239 248 def _setmustaudit(self, onoff):
240 249 self._audit = onoff
241 250 if onoff:
242 251 self.audit = pathutil.pathauditor(self.base)
243 252 else:
244 253 self.audit = util.always
245 254
246 255 mustaudit = property(_getmustaudit, _setmustaudit)
247 256
248 257 @util.propertycache
249 258 def _cansymlink(self):
250 259 return util.checklink(self.base)
251 260
252 261 @util.propertycache
253 262 def _chmod(self):
254 263 return util.checkexec(self.base)
255 264
256 265 def _fixfilemode(self, name):
257 266 if self.createmode is None or not self._chmod:
258 267 return
259 268 os.chmod(name, self.createmode & 0666)
260 269
261 270 def __call__(self, path, mode="r", text=False, atomictemp=False):
262 271 if self._audit:
263 272 r = util.checkosfilename(path)
264 273 if r:
265 274 raise util.Abort("%s: %r" % (r, path))
266 275 self.audit(path)
267 276 f = self.join(path)
268 277
269 278 if not text and "b" not in mode:
270 279 mode += "b" # for that other OS
271 280
272 281 nlink = -1
273 282 if mode not in ('r', 'rb'):
274 283 dirname, basename = util.split(f)
275 284 # If basename is empty, then the path is malformed because it points
276 285 # to a directory. Let the posixfile() call below raise IOError.
277 286 if basename:
278 287 if atomictemp:
279 288 util.ensuredirs(dirname, self.createmode)
280 289 return util.atomictempfile(f, mode, self.createmode)
281 290 try:
282 291 if 'w' in mode:
283 292 util.unlink(f)
284 293 nlink = 0
285 294 else:
286 295 # nlinks() may behave differently for files on Windows
287 296 # shares if the file is open.
288 297 fd = util.posixfile(f)
289 298 nlink = util.nlinks(f)
290 299 if nlink < 1:
291 300 nlink = 2 # force mktempcopy (issue1922)
292 301 fd.close()
293 302 except (OSError, IOError), e:
294 303 if e.errno != errno.ENOENT:
295 304 raise
296 305 nlink = 0
297 306 util.ensuredirs(dirname, self.createmode)
298 307 if nlink > 0:
299 308 if self._trustnlink is None:
300 309 self._trustnlink = nlink > 1 or util.checknlink(f)
301 310 if nlink > 1 or not self._trustnlink:
302 311 util.rename(util.mktempcopy(f), f)
303 312 fp = util.posixfile(f, mode)
304 313 if nlink == 0:
305 314 self._fixfilemode(f)
306 315 return fp
307 316
308 317 def symlink(self, src, dst):
309 318 self.audit(dst)
310 319 linkname = self.join(dst)
311 320 try:
312 321 os.unlink(linkname)
313 322 except OSError:
314 323 pass
315 324
316 325 util.ensuredirs(os.path.dirname(linkname), self.createmode)
317 326
318 327 if self._cansymlink:
319 328 try:
320 329 os.symlink(src, linkname)
321 330 except OSError, err:
322 331 raise OSError(err.errno, _('could not symlink to %r: %s') %
323 332 (src, err.strerror), linkname)
324 333 else:
325 334 self.write(dst, src)
326 335
327 336 def join(self, path):
328 337 if path:
329 338 return os.path.join(self.base, path)
330 339 else:
331 340 return self.base
332 341
333 342 opener = vfs
334 343
335 344 class auditvfs(object):
336 345 def __init__(self, vfs):
337 346 self.vfs = vfs
338 347
339 348 def _getmustaudit(self):
340 349 return self.vfs.mustaudit
341 350
342 351 def _setmustaudit(self, onoff):
343 352 self.vfs.mustaudit = onoff
344 353
345 354 mustaudit = property(_getmustaudit, _setmustaudit)
346 355
347 356 class filtervfs(abstractvfs, auditvfs):
348 357 '''Wrapper vfs for filtering filenames with a function.'''
349 358
350 359 def __init__(self, vfs, filter):
351 360 auditvfs.__init__(self, vfs)
352 361 self._filter = filter
353 362
354 363 def __call__(self, path, *args, **kwargs):
355 364 return self.vfs(self._filter(path), *args, **kwargs)
356 365
357 366 def join(self, path):
358 367 if path:
359 368 return self.vfs.join(self._filter(path))
360 369 else:
361 370 return self.vfs.join(path)
362 371
363 372 filteropener = filtervfs
364 373
365 374 class readonlyvfs(abstractvfs, auditvfs):
366 375 '''Wrapper vfs preventing any writing.'''
367 376
368 377 def __init__(self, vfs):
369 378 auditvfs.__init__(self, vfs)
370 379
371 380 def __call__(self, path, mode='r', *args, **kw):
372 381 if mode not in ('r', 'rb'):
373 382 raise util.Abort('this vfs is read only')
374 383 return self.vfs(path, mode, *args, **kw)
375 384
376 385
377 386 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
378 387 '''yield every hg repository under path, always recursively.
379 388 The recurse flag will only control recursion into repo working dirs'''
380 389 def errhandler(err):
381 390 if err.filename == path:
382 391 raise err
383 392 samestat = getattr(os.path, 'samestat', None)
384 393 if followsym and samestat is not None:
385 394 def adddir(dirlst, dirname):
386 395 match = False
387 396 dirstat = os.stat(dirname)
388 397 for lstdirstat in dirlst:
389 398 if samestat(dirstat, lstdirstat):
390 399 match = True
391 400 break
392 401 if not match:
393 402 dirlst.append(dirstat)
394 403 return not match
395 404 else:
396 405 followsym = False
397 406
398 407 if (seen_dirs is None) and followsym:
399 408 seen_dirs = []
400 409 adddir(seen_dirs, path)
401 410 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
402 411 dirs.sort()
403 412 if '.hg' in dirs:
404 413 yield root # found a repository
405 414 qroot = os.path.join(root, '.hg', 'patches')
406 415 if os.path.isdir(os.path.join(qroot, '.hg')):
407 416 yield qroot # we have a patch queue repo here
408 417 if recurse:
409 418 # avoid recursing inside the .hg directory
410 419 dirs.remove('.hg')
411 420 else:
412 421 dirs[:] = [] # don't descend further
413 422 elif followsym:
414 423 newdirs = []
415 424 for d in dirs:
416 425 fname = os.path.join(root, d)
417 426 if adddir(seen_dirs, fname):
418 427 if os.path.islink(fname):
419 428 for hgname in walkrepos(fname, True, seen_dirs):
420 429 yield hgname
421 430 else:
422 431 newdirs.append(d)
423 432 dirs[:] = newdirs
424 433
425 434 def osrcpath():
426 435 '''return default os-specific hgrc search path'''
427 436 path = systemrcpath()
428 437 path.extend(userrcpath())
429 438 path = [os.path.normpath(f) for f in path]
430 439 return path
431 440
432 441 _rcpath = None
433 442
434 443 def rcpath():
435 444 '''return hgrc search path. if env var HGRCPATH is set, use it.
436 445 for each item in path, if directory, use files ending in .rc,
437 446 else use item.
438 447 make HGRCPATH empty to only look in .hg/hgrc of current repo.
439 448 if no HGRCPATH, use default os-specific path.'''
440 449 global _rcpath
441 450 if _rcpath is None:
442 451 if 'HGRCPATH' in os.environ:
443 452 _rcpath = []
444 453 for p in os.environ['HGRCPATH'].split(os.pathsep):
445 454 if not p:
446 455 continue
447 456 p = util.expandpath(p)
448 457 if os.path.isdir(p):
449 458 for f, kind in osutil.listdir(p):
450 459 if f.endswith('.rc'):
451 460 _rcpath.append(os.path.join(p, f))
452 461 else:
453 462 _rcpath.append(p)
454 463 else:
455 464 _rcpath = osrcpath()
456 465 return _rcpath
457 466
458 467 def revsingle(repo, revspec, default='.'):
459 468 if not revspec and revspec != 0:
460 469 return repo[default]
461 470
462 471 l = revrange(repo, [revspec])
463 472 if len(l) < 1:
464 473 raise util.Abort(_('empty revision set'))
465 474 return repo[l[-1]]
466 475
467 476 def revpair(repo, revs):
468 477 if not revs:
469 478 return repo.dirstate.p1(), None
470 479
471 480 l = revrange(repo, revs)
472 481
473 482 if not l:
474 483 first = second = None
475 484 elif l.isascending():
476 485 first = l.min()
477 486 second = l.max()
478 487 elif l.isdescending():
479 488 first = l.max()
480 489 second = l.min()
481 490 else:
482 491 l = list(l)
483 492 first = l[0]
484 493 second = l[-1]
485 494
486 495 if first is None:
487 496 raise util.Abort(_('empty revision range'))
488 497
489 498 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
490 499 return repo.lookup(first), None
491 500
492 501 return repo.lookup(first), repo.lookup(second)
493 502
494 503 _revrangesep = ':'
495 504
496 505 def revrange(repo, revs):
497 506 """Yield revision as strings from a list of revision specifications."""
498 507
499 508 def revfix(repo, val, defval):
500 509 if not val and val != 0 and defval is not None:
501 510 return defval
502 511 return repo[val].rev()
503 512
504 513 seen, l = set(), revset.baseset([])
505 514 for spec in revs:
506 515 if l and not seen:
507 516 seen = set(l)
508 517 # attempt to parse old-style ranges first to deal with
509 518 # things like old-tag which contain query metacharacters
510 519 try:
511 520 if isinstance(spec, int):
512 521 seen.add(spec)
513 522 l = l + revset.baseset([spec])
514 523 continue
515 524
516 525 if _revrangesep in spec:
517 526 start, end = spec.split(_revrangesep, 1)
518 527 start = revfix(repo, start, 0)
519 528 end = revfix(repo, end, len(repo) - 1)
520 529 if end == nullrev and start < 0:
521 530 start = nullrev
522 531 rangeiter = repo.changelog.revs(start, end)
523 532 if not seen and not l:
524 533 # by far the most common case: revs = ["-1:0"]
525 534 l = revset.baseset(rangeiter)
526 535 # defer syncing seen until next iteration
527 536 continue
528 537 newrevs = set(rangeiter)
529 538 if seen:
530 539 newrevs.difference_update(seen)
531 540 seen.update(newrevs)
532 541 else:
533 542 seen = newrevs
534 543 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
535 544 continue
536 545 elif spec and spec in repo: # single unquoted rev
537 546 rev = revfix(repo, spec, None)
538 547 if rev in seen:
539 548 continue
540 549 seen.add(rev)
541 550 l = l + revset.baseset([rev])
542 551 continue
543 552 except error.RepoLookupError:
544 553 pass
545 554
546 555 # fall through to new-style queries if old-style fails
547 556 m = revset.match(repo.ui, spec, repo)
548 557 if seen or l:
549 558 dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
550 559 l = l + revset.baseset(dl)
551 560 seen.update(dl)
552 561 else:
553 562 l = m(repo, revset.spanset(repo))
554 563
555 564 return l
556 565
557 566 def expandpats(pats):
558 567 if not util.expandglobs:
559 568 return list(pats)
560 569 ret = []
561 570 for p in pats:
562 571 kind, name = matchmod._patsplit(p, None)
563 572 if kind is None:
564 573 try:
565 574 globbed = glob.glob(name)
566 575 except re.error:
567 576 globbed = [name]
568 577 if globbed:
569 578 ret.extend(globbed)
570 579 continue
571 580 ret.append(p)
572 581 return ret
573 582
574 583 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
575 584 if pats == ("",):
576 585 pats = []
577 586 if not globbed and default == 'relpath':
578 587 pats = expandpats(pats or [])
579 588
580 589 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
581 590 default)
582 591 def badfn(f, msg):
583 592 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
584 593 m.bad = badfn
585 594 return m, pats
586 595
587 596 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
588 597 return matchandpats(ctx, pats, opts, globbed, default)[0]
589 598
590 599 def matchall(repo):
591 600 return matchmod.always(repo.root, repo.getcwd())
592 601
593 602 def matchfiles(repo, files):
594 603 return matchmod.exact(repo.root, repo.getcwd(), files)
595 604
596 605 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
597 606 if dry_run is None:
598 607 dry_run = opts.get('dry_run')
599 608 if similarity is None:
600 609 similarity = float(opts.get('similarity') or 0)
601 610 # we'd use status here, except handling of symlinks and ignore is tricky
602 611 m = match(repo[None], pats, opts)
603 612 rejected = []
604 613 m.bad = lambda x, y: rejected.append(x)
605 614
606 615 added, unknown, deleted, removed = _interestingfiles(repo, m)
607 616
608 617 unknownset = set(unknown)
609 618 toprint = unknownset.copy()
610 619 toprint.update(deleted)
611 620 for abs in sorted(toprint):
612 621 if repo.ui.verbose or not m.exact(abs):
613 622 rel = m.rel(abs)
614 623 if abs in unknownset:
615 624 status = _('adding %s\n') % ((pats and rel) or abs)
616 625 else:
617 626 status = _('removing %s\n') % ((pats and rel) or abs)
618 627 repo.ui.status(status)
619 628
620 629 renames = _findrenames(repo, m, added + unknown, removed + deleted,
621 630 similarity)
622 631
623 632 if not dry_run:
624 633 _markchanges(repo, unknown, deleted, renames)
625 634
626 635 for f in rejected:
627 636 if f in m.files():
628 637 return 1
629 638 return 0
630 639
631 640 def marktouched(repo, files, similarity=0.0):
632 641 '''Assert that files have somehow been operated upon. files are relative to
633 642 the repo root.'''
634 643 m = matchfiles(repo, files)
635 644 rejected = []
636 645 m.bad = lambda x, y: rejected.append(x)
637 646
638 647 added, unknown, deleted, removed = _interestingfiles(repo, m)
639 648
640 649 if repo.ui.verbose:
641 650 unknownset = set(unknown)
642 651 toprint = unknownset.copy()
643 652 toprint.update(deleted)
644 653 for abs in sorted(toprint):
645 654 if abs in unknownset:
646 655 status = _('adding %s\n') % abs
647 656 else:
648 657 status = _('removing %s\n') % abs
649 658 repo.ui.status(status)
650 659
651 660 renames = _findrenames(repo, m, added + unknown, removed + deleted,
652 661 similarity)
653 662
654 663 _markchanges(repo, unknown, deleted, renames)
655 664
656 665 for f in rejected:
657 666 if f in m.files():
658 667 return 1
659 668 return 0
660 669
661 670 def _interestingfiles(repo, matcher):
662 671 '''Walk dirstate with matcher, looking for files that addremove would care
663 672 about.
664 673
665 674 This is different from dirstate.status because it doesn't care about
666 675 whether files are modified or clean.'''
667 676 added, unknown, deleted, removed = [], [], [], []
668 677 audit_path = pathutil.pathauditor(repo.root)
669 678
670 679 ctx = repo[None]
671 680 dirstate = repo.dirstate
672 681 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
673 682 full=False)
674 683 for abs, st in walkresults.iteritems():
675 684 dstate = dirstate[abs]
676 685 if dstate == '?' and audit_path.check(abs):
677 686 unknown.append(abs)
678 687 elif dstate != 'r' and not st:
679 688 deleted.append(abs)
680 689 # for finding renames
681 690 elif dstate == 'r':
682 691 removed.append(abs)
683 692 elif dstate == 'a':
684 693 added.append(abs)
685 694
686 695 return added, unknown, deleted, removed
687 696
688 697 def _findrenames(repo, matcher, added, removed, similarity):
689 698 '''Find renames from removed files to added ones.'''
690 699 renames = {}
691 700 if similarity > 0:
692 701 for old, new, score in similar.findrenames(repo, added, removed,
693 702 similarity):
694 703 if (repo.ui.verbose or not matcher.exact(old)
695 704 or not matcher.exact(new)):
696 705 repo.ui.status(_('recording removal of %s as rename to %s '
697 706 '(%d%% similar)\n') %
698 707 (matcher.rel(old), matcher.rel(new),
699 708 score * 100))
700 709 renames[new] = old
701 710 return renames
702 711
703 712 def _markchanges(repo, unknown, deleted, renames):
704 713 '''Marks the files in unknown as added, the files in deleted as removed,
705 714 and the files in renames as copied.'''
706 715 wctx = repo[None]
707 716 wlock = repo.wlock()
708 717 try:
709 718 wctx.forget(deleted)
710 719 wctx.add(unknown)
711 720 for new, old in renames.iteritems():
712 721 wctx.copy(old, new)
713 722 finally:
714 723 wlock.release()
715 724
716 725 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
717 726 """Update the dirstate to reflect the intent of copying src to dst. For
718 727 different reasons it might not end with dst being marked as copied from src.
719 728 """
720 729 origsrc = repo.dirstate.copied(src) or src
721 730 if dst == origsrc: # copying back a copy?
722 731 if repo.dirstate[dst] not in 'mn' and not dryrun:
723 732 repo.dirstate.normallookup(dst)
724 733 else:
725 734 if repo.dirstate[origsrc] == 'a' and origsrc == src:
726 735 if not ui.quiet:
727 736 ui.warn(_("%s has not been committed yet, so no copy "
728 737 "data will be stored for %s.\n")
729 738 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
730 739 if repo.dirstate[dst] in '?r' and not dryrun:
731 740 wctx.add([dst])
732 741 elif not dryrun:
733 742 wctx.copy(origsrc, dst)
734 743
735 744 def readrequires(opener, supported):
736 745 '''Reads and parses .hg/requires and checks if all entries found
737 746 are in the list of supported features.'''
738 747 requirements = set(opener.read("requires").splitlines())
739 748 missings = []
740 749 for r in requirements:
741 750 if r not in supported:
742 751 if not r or not r[0].isalnum():
743 752 raise error.RequirementError(_(".hg/requires file is corrupt"))
744 753 missings.append(r)
745 754 missings.sort()
746 755 if missings:
747 756 raise error.RequirementError(
748 757 _("repository requires features unknown to this Mercurial: %s")
749 758 % " ".join(missings),
750 759 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
751 760 " for more information"))
752 761 return requirements
753 762
754 763 class filecachesubentry(object):
755 764 def __init__(self, path, stat):
756 765 self.path = path
757 766 self.cachestat = None
758 767 self._cacheable = None
759 768
760 769 if stat:
761 770 self.cachestat = filecachesubentry.stat(self.path)
762 771
763 772 if self.cachestat:
764 773 self._cacheable = self.cachestat.cacheable()
765 774 else:
766 775 # None means we don't know yet
767 776 self._cacheable = None
768 777
769 778 def refresh(self):
770 779 if self.cacheable():
771 780 self.cachestat = filecachesubentry.stat(self.path)
772 781
773 782 def cacheable(self):
774 783 if self._cacheable is not None:
775 784 return self._cacheable
776 785
777 786 # we don't know yet, assume it is for now
778 787 return True
779 788
780 789 def changed(self):
781 790 # no point in going further if we can't cache it
782 791 if not self.cacheable():
783 792 return True
784 793
785 794 newstat = filecachesubentry.stat(self.path)
786 795
787 796 # we may not know if it's cacheable yet, check again now
788 797 if newstat and self._cacheable is None:
789 798 self._cacheable = newstat.cacheable()
790 799
791 800 # check again
792 801 if not self._cacheable:
793 802 return True
794 803
795 804 if self.cachestat != newstat:
796 805 self.cachestat = newstat
797 806 return True
798 807 else:
799 808 return False
800 809
801 810 @staticmethod
802 811 def stat(path):
803 812 try:
804 813 return util.cachestat(path)
805 814 except OSError, e:
806 815 if e.errno != errno.ENOENT:
807 816 raise
808 817
809 818 class filecacheentry(object):
810 819 def __init__(self, paths, stat=True):
811 820 self._entries = []
812 821 for path in paths:
813 822 self._entries.append(filecachesubentry(path, stat))
814 823
815 824 def changed(self):
816 825 '''true if any entry has changed'''
817 826 for entry in self._entries:
818 827 if entry.changed():
819 828 return True
820 829 return False
821 830
822 831 def refresh(self):
823 832 for entry in self._entries:
824 833 entry.refresh()
825 834
826 835 class filecache(object):
827 836 '''A property like decorator that tracks files under .hg/ for updates.
828 837
829 838 Records stat info when called in _filecache.
830 839
831 840 On subsequent calls, compares old stat info with new info, and recreates the
832 841 object when any of the files changes, updating the new stat info in
833 842 _filecache.
834 843
835 844 Mercurial either atomic renames or appends for files under .hg,
836 845 so to ensure the cache is reliable we need the filesystem to be able
837 846 to tell us if a file has been replaced. If it can't, we fallback to
838 847 recreating the object on every call (essentially the same behaviour as
839 848 propertycache).
840 849
841 850 '''
842 851 def __init__(self, *paths):
843 852 self.paths = paths
844 853
845 854 def join(self, obj, fname):
846 855 """Used to compute the runtime path of a cached file.
847 856
848 857 Users should subclass filecache and provide their own version of this
849 858 function to call the appropriate join function on 'obj' (an instance
850 859 of the class that its member function was decorated).
851 860 """
852 861 return obj.join(fname)
853 862
854 863 def __call__(self, func):
855 864 self.func = func
856 865 self.name = func.__name__
857 866 return self
858 867
859 868 def __get__(self, obj, type=None):
860 869 # do we need to check if the file changed?
861 870 if self.name in obj.__dict__:
862 871 assert self.name in obj._filecache, self.name
863 872 return obj.__dict__[self.name]
864 873
865 874 entry = obj._filecache.get(self.name)
866 875
867 876 if entry:
868 877 if entry.changed():
869 878 entry.obj = self.func(obj)
870 879 else:
871 880 paths = [self.join(obj, path) for path in self.paths]
872 881
873 882 # We stat -before- creating the object so our cache doesn't lie if
874 883 # a writer modified between the time we read and stat
875 884 entry = filecacheentry(paths, True)
876 885 entry.obj = self.func(obj)
877 886
878 887 obj._filecache[self.name] = entry
879 888
880 889 obj.__dict__[self.name] = entry.obj
881 890 return entry.obj
882 891
883 892 def __set__(self, obj, value):
884 893 if self.name not in obj._filecache:
885 894 # we add an entry for the missing value because X in __dict__
886 895 # implies X in _filecache
887 896 paths = [self.join(obj, path) for path in self.paths]
888 897 ce = filecacheentry(paths, False)
889 898 obj._filecache[self.name] = ce
890 899 else:
891 900 ce = obj._filecache[self.name]
892 901
893 902 ce.obj = value # update cached copy
894 903 obj.__dict__[self.name] = value # update copy returned by obj.x
895 904
896 905 def __delete__(self, obj):
897 906 try:
898 907 del obj.__dict__[self.name]
899 908 except KeyError:
900 909 raise AttributeError(self.name)
901 910
902 911 class dirs(object):
903 912 '''a multiset of directory names from a dirstate or manifest'''
904 913
905 914 def __init__(self, map, skip=None):
906 915 self._dirs = {}
907 916 addpath = self.addpath
908 917 if util.safehasattr(map, 'iteritems') and skip is not None:
909 918 for f, s in map.iteritems():
910 919 if s[0] != skip:
911 920 addpath(f)
912 921 else:
913 922 for f in map:
914 923 addpath(f)
915 924
916 925 def addpath(self, path):
917 926 dirs = self._dirs
918 927 for base in finddirs(path):
919 928 if base in dirs:
920 929 dirs[base] += 1
921 930 return
922 931 dirs[base] = 1
923 932
924 933 def delpath(self, path):
925 934 dirs = self._dirs
926 935 for base in finddirs(path):
927 936 if dirs[base] > 1:
928 937 dirs[base] -= 1
929 938 return
930 939 del dirs[base]
931 940
932 941 def __iter__(self):
933 942 return self._dirs.iterkeys()
934 943
935 944 def __contains__(self, d):
936 945 return d in self._dirs
937 946
938 947 if util.safehasattr(parsers, 'dirs'):
939 948 dirs = parsers.dirs
940 949
941 950 def finddirs(path):
942 951 pos = path.rfind('/')
943 952 while pos != -1:
944 953 yield path[:pos]
945 954 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now