##// END OF EJS Templates
scmutil.filecache: support watching over multiple files
Siddharth Agarwal -
r20045:b3684fd2 default
parent child Browse files
Show More
@@ -1,903 +1,907 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 def nochangesfound(ui, repo, excluded=None):
24 24 '''Report no changes for push/pull, excluded is None or a list of
25 25 nodes excluded from the push/pull.
26 26 '''
27 27 secretlist = []
28 28 if excluded:
29 29 for n in excluded:
30 30 if n not in repo:
31 31 # discovery should not have included the filtered revision,
32 32 # we have to explicitly exclude it until discovery is cleanup.
33 33 continue
34 34 ctx = repo[n]
35 35 if ctx.phase() >= phases.secret and not ctx.extinct():
36 36 secretlist.append(n)
37 37
38 38 if secretlist:
39 39 ui.status(_("no changes found (ignored %d secret changesets)\n")
40 40 % len(secretlist))
41 41 else:
42 42 ui.status(_("no changes found\n"))
43 43
44 44 def checknewlabel(repo, lbl, kind):
45 45 # Do not use the "kind" parameter in ui output.
46 46 # It makes strings difficult to translate.
47 47 if lbl in ['tip', '.', 'null']:
48 48 raise util.Abort(_("the name '%s' is reserved") % lbl)
49 49 for c in (':', '\0', '\n', '\r'):
50 50 if c in lbl:
51 51 raise util.Abort(_("%r cannot be used in a name") % c)
52 52 try:
53 53 int(lbl)
54 54 raise util.Abort(_("cannot use an integer as a name"))
55 55 except ValueError:
56 56 pass
57 57
58 58 def checkfilename(f):
59 59 '''Check that the filename f is an acceptable filename for a tracked file'''
60 60 if '\r' in f or '\n' in f:
61 61 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
62 62
63 63 def checkportable(ui, f):
64 64 '''Check if filename f is portable and warn or abort depending on config'''
65 65 checkfilename(f)
66 66 abort, warn = checkportabilityalert(ui)
67 67 if abort or warn:
68 68 msg = util.checkwinfilename(f)
69 69 if msg:
70 70 msg = "%s: %r" % (msg, f)
71 71 if abort:
72 72 raise util.Abort(msg)
73 73 ui.warn(_("warning: %s\n") % msg)
74 74
75 75 def checkportabilityalert(ui):
76 76 '''check if the user's config requests nothing, a warning, or abort for
77 77 non-portable filenames'''
78 78 val = ui.config('ui', 'portablefilenames', 'warn')
79 79 lval = val.lower()
80 80 bval = util.parsebool(val)
81 81 abort = os.name == 'nt' or lval == 'abort'
82 82 warn = bval or lval == 'warn'
83 83 if bval is None and not (warn or abort or lval == 'ignore'):
84 84 raise error.ConfigError(
85 85 _("ui.portablefilenames value is invalid ('%s')") % val)
86 86 return abort, warn
87 87
88 88 class casecollisionauditor(object):
89 89 def __init__(self, ui, abort, dirstate):
90 90 self._ui = ui
91 91 self._abort = abort
92 92 allfiles = '\0'.join(dirstate._map)
93 93 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
94 94 self._dirstate = dirstate
95 95 # The purpose of _newfiles is so that we don't complain about
96 96 # case collisions if someone were to call this object with the
97 97 # same filename twice.
98 98 self._newfiles = set()
99 99
100 100 def __call__(self, f):
101 101 if f in self._newfiles:
102 102 return
103 103 fl = encoding.lower(f)
104 104 if fl in self._loweredfiles and f not in self._dirstate:
105 105 msg = _('possible case-folding collision for %s') % f
106 106 if self._abort:
107 107 raise util.Abort(msg)
108 108 self._ui.warn(_("warning: %s\n") % msg)
109 109 self._loweredfiles.add(fl)
110 110 self._newfiles.add(f)
111 111
112 112 class abstractvfs(object):
113 113 """Abstract base class; cannot be instantiated"""
114 114
115 115 def __init__(self, *args, **kwargs):
116 116 '''Prevent instantiation; don't call this from subclasses.'''
117 117 raise NotImplementedError('attempted instantiating ' + str(type(self)))
118 118
119 119 def tryread(self, path):
120 120 '''gracefully return an empty string for missing files'''
121 121 try:
122 122 return self.read(path)
123 123 except IOError, inst:
124 124 if inst.errno != errno.ENOENT:
125 125 raise
126 126 return ""
127 127
128 128 def open(self, path, mode="r", text=False, atomictemp=False):
129 129 self.open = self.__call__
130 130 return self.__call__(path, mode, text, atomictemp)
131 131
132 132 def read(self, path):
133 133 fp = self(path, 'rb')
134 134 try:
135 135 return fp.read()
136 136 finally:
137 137 fp.close()
138 138
139 139 def write(self, path, data):
140 140 fp = self(path, 'wb')
141 141 try:
142 142 return fp.write(data)
143 143 finally:
144 144 fp.close()
145 145
146 146 def append(self, path, data):
147 147 fp = self(path, 'ab')
148 148 try:
149 149 return fp.write(data)
150 150 finally:
151 151 fp.close()
152 152
153 153 def exists(self, path=None):
154 154 return os.path.exists(self.join(path))
155 155
156 156 def fstat(self, fp):
157 157 return util.fstat(fp)
158 158
159 159 def isdir(self, path=None):
160 160 return os.path.isdir(self.join(path))
161 161
162 162 def islink(self, path=None):
163 163 return os.path.islink(self.join(path))
164 164
165 165 def lstat(self, path=None):
166 166 return os.lstat(self.join(path))
167 167
168 168 def makedir(self, path=None, notindexed=True):
169 169 return util.makedir(self.join(path), notindexed)
170 170
171 171 def makedirs(self, path=None, mode=None):
172 172 return util.makedirs(self.join(path), mode)
173 173
174 174 def mkdir(self, path=None):
175 175 return os.mkdir(self.join(path))
176 176
177 177 def readdir(self, path=None, stat=None, skip=None):
178 178 return osutil.listdir(self.join(path), stat, skip)
179 179
180 180 def rename(self, src, dst):
181 181 return util.rename(self.join(src), self.join(dst))
182 182
183 183 def readlink(self, path):
184 184 return os.readlink(self.join(path))
185 185
186 186 def setflags(self, path, l, x):
187 187 return util.setflags(self.join(path), l, x)
188 188
189 189 def stat(self, path=None):
190 190 return os.stat(self.join(path))
191 191
192 192 def unlink(self, path=None):
193 193 return util.unlink(self.join(path))
194 194
195 195 def utime(self, path=None, t=None):
196 196 return os.utime(self.join(path), t)
197 197
198 198 class vfs(abstractvfs):
199 199 '''Operate files relative to a base directory
200 200
201 201 This class is used to hide the details of COW semantics and
202 202 remote file access from higher level code.
203 203 '''
204 204 def __init__(self, base, audit=True, expandpath=False, realpath=False):
205 205 if expandpath:
206 206 base = util.expandpath(base)
207 207 if realpath:
208 208 base = os.path.realpath(base)
209 209 self.base = base
210 210 self._setmustaudit(audit)
211 211 self.createmode = None
212 212 self._trustnlink = None
213 213
214 214 def _getmustaudit(self):
215 215 return self._audit
216 216
217 217 def _setmustaudit(self, onoff):
218 218 self._audit = onoff
219 219 if onoff:
220 220 self.audit = pathutil.pathauditor(self.base)
221 221 else:
222 222 self.audit = util.always
223 223
224 224 mustaudit = property(_getmustaudit, _setmustaudit)
225 225
226 226 @util.propertycache
227 227 def _cansymlink(self):
228 228 return util.checklink(self.base)
229 229
230 230 @util.propertycache
231 231 def _chmod(self):
232 232 return util.checkexec(self.base)
233 233
234 234 def _fixfilemode(self, name):
235 235 if self.createmode is None or not self._chmod:
236 236 return
237 237 os.chmod(name, self.createmode & 0666)
238 238
239 239 def __call__(self, path, mode="r", text=False, atomictemp=False):
240 240 if self._audit:
241 241 r = util.checkosfilename(path)
242 242 if r:
243 243 raise util.Abort("%s: %r" % (r, path))
244 244 self.audit(path)
245 245 f = self.join(path)
246 246
247 247 if not text and "b" not in mode:
248 248 mode += "b" # for that other OS
249 249
250 250 nlink = -1
251 251 if mode not in ('r', 'rb'):
252 252 dirname, basename = util.split(f)
253 253 # If basename is empty, then the path is malformed because it points
254 254 # to a directory. Let the posixfile() call below raise IOError.
255 255 if basename:
256 256 if atomictemp:
257 257 util.ensuredirs(dirname, self.createmode)
258 258 return util.atomictempfile(f, mode, self.createmode)
259 259 try:
260 260 if 'w' in mode:
261 261 util.unlink(f)
262 262 nlink = 0
263 263 else:
264 264 # nlinks() may behave differently for files on Windows
265 265 # shares if the file is open.
266 266 fd = util.posixfile(f)
267 267 nlink = util.nlinks(f)
268 268 if nlink < 1:
269 269 nlink = 2 # force mktempcopy (issue1922)
270 270 fd.close()
271 271 except (OSError, IOError), e:
272 272 if e.errno != errno.ENOENT:
273 273 raise
274 274 nlink = 0
275 275 util.ensuredirs(dirname, self.createmode)
276 276 if nlink > 0:
277 277 if self._trustnlink is None:
278 278 self._trustnlink = nlink > 1 or util.checknlink(f)
279 279 if nlink > 1 or not self._trustnlink:
280 280 util.rename(util.mktempcopy(f), f)
281 281 fp = util.posixfile(f, mode)
282 282 if nlink == 0:
283 283 self._fixfilemode(f)
284 284 return fp
285 285
286 286 def symlink(self, src, dst):
287 287 self.audit(dst)
288 288 linkname = self.join(dst)
289 289 try:
290 290 os.unlink(linkname)
291 291 except OSError:
292 292 pass
293 293
294 294 util.ensuredirs(os.path.dirname(linkname), self.createmode)
295 295
296 296 if self._cansymlink:
297 297 try:
298 298 os.symlink(src, linkname)
299 299 except OSError, err:
300 300 raise OSError(err.errno, _('could not symlink to %r: %s') %
301 301 (src, err.strerror), linkname)
302 302 else:
303 303 self.write(dst, src)
304 304
305 305 def join(self, path):
306 306 if path:
307 307 return os.path.join(self.base, path)
308 308 else:
309 309 return self.base
310 310
311 311 opener = vfs
312 312
313 313 class auditvfs(object):
314 314 def __init__(self, vfs):
315 315 self.vfs = vfs
316 316
317 317 def _getmustaudit(self):
318 318 return self.vfs.mustaudit
319 319
320 320 def _setmustaudit(self, onoff):
321 321 self.vfs.mustaudit = onoff
322 322
323 323 mustaudit = property(_getmustaudit, _setmustaudit)
324 324
325 325 class filtervfs(abstractvfs, auditvfs):
326 326 '''Wrapper vfs for filtering filenames with a function.'''
327 327
328 328 def __init__(self, vfs, filter):
329 329 auditvfs.__init__(self, vfs)
330 330 self._filter = filter
331 331
332 332 def __call__(self, path, *args, **kwargs):
333 333 return self.vfs(self._filter(path), *args, **kwargs)
334 334
335 335 def join(self, path):
336 336 if path:
337 337 return self.vfs.join(self._filter(path))
338 338 else:
339 339 return self.vfs.join(path)
340 340
341 341 filteropener = filtervfs
342 342
343 343 class readonlyvfs(abstractvfs, auditvfs):
344 344 '''Wrapper vfs preventing any writing.'''
345 345
346 346 def __init__(self, vfs):
347 347 auditvfs.__init__(self, vfs)
348 348
349 349 def __call__(self, path, mode='r', *args, **kw):
350 350 if mode not in ('r', 'rb'):
351 351 raise util.Abort('this vfs is read only')
352 352 return self.vfs(path, mode, *args, **kw)
353 353
354 354
355 355 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
356 356 '''yield every hg repository under path, always recursively.
357 357 The recurse flag will only control recursion into repo working dirs'''
358 358 def errhandler(err):
359 359 if err.filename == path:
360 360 raise err
361 361 samestat = getattr(os.path, 'samestat', None)
362 362 if followsym and samestat is not None:
363 363 def adddir(dirlst, dirname):
364 364 match = False
365 365 dirstat = os.stat(dirname)
366 366 for lstdirstat in dirlst:
367 367 if samestat(dirstat, lstdirstat):
368 368 match = True
369 369 break
370 370 if not match:
371 371 dirlst.append(dirstat)
372 372 return not match
373 373 else:
374 374 followsym = False
375 375
376 376 if (seen_dirs is None) and followsym:
377 377 seen_dirs = []
378 378 adddir(seen_dirs, path)
379 379 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
380 380 dirs.sort()
381 381 if '.hg' in dirs:
382 382 yield root # found a repository
383 383 qroot = os.path.join(root, '.hg', 'patches')
384 384 if os.path.isdir(os.path.join(qroot, '.hg')):
385 385 yield qroot # we have a patch queue repo here
386 386 if recurse:
387 387 # avoid recursing inside the .hg directory
388 388 dirs.remove('.hg')
389 389 else:
390 390 dirs[:] = [] # don't descend further
391 391 elif followsym:
392 392 newdirs = []
393 393 for d in dirs:
394 394 fname = os.path.join(root, d)
395 395 if adddir(seen_dirs, fname):
396 396 if os.path.islink(fname):
397 397 for hgname in walkrepos(fname, True, seen_dirs):
398 398 yield hgname
399 399 else:
400 400 newdirs.append(d)
401 401 dirs[:] = newdirs
402 402
403 403 def osrcpath():
404 404 '''return default os-specific hgrc search path'''
405 405 path = systemrcpath()
406 406 path.extend(userrcpath())
407 407 path = [os.path.normpath(f) for f in path]
408 408 return path
409 409
410 410 _rcpath = None
411 411
412 412 def rcpath():
413 413 '''return hgrc search path. if env var HGRCPATH is set, use it.
414 414 for each item in path, if directory, use files ending in .rc,
415 415 else use item.
416 416 make HGRCPATH empty to only look in .hg/hgrc of current repo.
417 417 if no HGRCPATH, use default os-specific path.'''
418 418 global _rcpath
419 419 if _rcpath is None:
420 420 if 'HGRCPATH' in os.environ:
421 421 _rcpath = []
422 422 for p in os.environ['HGRCPATH'].split(os.pathsep):
423 423 if not p:
424 424 continue
425 425 p = util.expandpath(p)
426 426 if os.path.isdir(p):
427 427 for f, kind in osutil.listdir(p):
428 428 if f.endswith('.rc'):
429 429 _rcpath.append(os.path.join(p, f))
430 430 else:
431 431 _rcpath.append(p)
432 432 else:
433 433 _rcpath = osrcpath()
434 434 return _rcpath
435 435
436 436 def revsingle(repo, revspec, default='.'):
437 437 if not revspec and revspec != 0:
438 438 return repo[default]
439 439
440 440 l = revrange(repo, [revspec])
441 441 if len(l) < 1:
442 442 raise util.Abort(_('empty revision set'))
443 443 return repo[l[-1]]
444 444
445 445 def revpair(repo, revs):
446 446 if not revs:
447 447 return repo.dirstate.p1(), None
448 448
449 449 l = revrange(repo, revs)
450 450
451 451 if len(l) == 0:
452 452 if revs:
453 453 raise util.Abort(_('empty revision range'))
454 454 return repo.dirstate.p1(), None
455 455
456 456 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
457 457 return repo.lookup(l[0]), None
458 458
459 459 return repo.lookup(l[0]), repo.lookup(l[-1])
460 460
461 461 _revrangesep = ':'
462 462
463 463 def revrange(repo, revs):
464 464 """Yield revision as strings from a list of revision specifications."""
465 465
466 466 def revfix(repo, val, defval):
467 467 if not val and val != 0 and defval is not None:
468 468 return defval
469 469 return repo[val].rev()
470 470
471 471 seen, l = set(), []
472 472 for spec in revs:
473 473 if l and not seen:
474 474 seen = set(l)
475 475 # attempt to parse old-style ranges first to deal with
476 476 # things like old-tag which contain query metacharacters
477 477 try:
478 478 if isinstance(spec, int):
479 479 seen.add(spec)
480 480 l.append(spec)
481 481 continue
482 482
483 483 if _revrangesep in spec:
484 484 start, end = spec.split(_revrangesep, 1)
485 485 start = revfix(repo, start, 0)
486 486 end = revfix(repo, end, len(repo) - 1)
487 487 if end == nullrev and start <= 0:
488 488 start = nullrev
489 489 rangeiter = repo.changelog.revs(start, end)
490 490 if not seen and not l:
491 491 # by far the most common case: revs = ["-1:0"]
492 492 l = list(rangeiter)
493 493 # defer syncing seen until next iteration
494 494 continue
495 495 newrevs = set(rangeiter)
496 496 if seen:
497 497 newrevs.difference_update(seen)
498 498 seen.update(newrevs)
499 499 else:
500 500 seen = newrevs
501 501 l.extend(sorted(newrevs, reverse=start > end))
502 502 continue
503 503 elif spec and spec in repo: # single unquoted rev
504 504 rev = revfix(repo, spec, None)
505 505 if rev in seen:
506 506 continue
507 507 seen.add(rev)
508 508 l.append(rev)
509 509 continue
510 510 except error.RepoLookupError:
511 511 pass
512 512
513 513 # fall through to new-style queries if old-style fails
514 514 m = revset.match(repo.ui, spec)
515 515 dl = [r for r in m(repo, list(repo)) if r not in seen]
516 516 l.extend(dl)
517 517 seen.update(dl)
518 518
519 519 return l
520 520
521 521 def expandpats(pats):
522 522 if not util.expandglobs:
523 523 return list(pats)
524 524 ret = []
525 525 for p in pats:
526 526 kind, name = matchmod._patsplit(p, None)
527 527 if kind is None:
528 528 try:
529 529 globbed = glob.glob(name)
530 530 except re.error:
531 531 globbed = [name]
532 532 if globbed:
533 533 ret.extend(globbed)
534 534 continue
535 535 ret.append(p)
536 536 return ret
537 537
538 538 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
539 539 if pats == ("",):
540 540 pats = []
541 541 if not globbed and default == 'relpath':
542 542 pats = expandpats(pats or [])
543 543
544 544 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
545 545 default)
546 546 def badfn(f, msg):
547 547 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
548 548 m.bad = badfn
549 549 return m, pats
550 550
551 551 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
552 552 return matchandpats(ctx, pats, opts, globbed, default)[0]
553 553
554 554 def matchall(repo):
555 555 return matchmod.always(repo.root, repo.getcwd())
556 556
557 557 def matchfiles(repo, files):
558 558 return matchmod.exact(repo.root, repo.getcwd(), files)
559 559
560 560 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
561 561 if dry_run is None:
562 562 dry_run = opts.get('dry_run')
563 563 if similarity is None:
564 564 similarity = float(opts.get('similarity') or 0)
565 565 # we'd use status here, except handling of symlinks and ignore is tricky
566 566 m = match(repo[None], pats, opts)
567 567 rejected = []
568 568 m.bad = lambda x, y: rejected.append(x)
569 569
570 570 added, unknown, deleted, removed = _interestingfiles(repo, m)
571 571
572 572 unknownset = set(unknown)
573 573 toprint = unknownset.copy()
574 574 toprint.update(deleted)
575 575 for abs in sorted(toprint):
576 576 if repo.ui.verbose or not m.exact(abs):
577 577 rel = m.rel(abs)
578 578 if abs in unknownset:
579 579 status = _('adding %s\n') % ((pats and rel) or abs)
580 580 else:
581 581 status = _('removing %s\n') % ((pats and rel) or abs)
582 582 repo.ui.status(status)
583 583
584 584 renames = _findrenames(repo, m, added + unknown, removed + deleted,
585 585 similarity)
586 586
587 587 if not dry_run:
588 588 _markchanges(repo, unknown, deleted, renames)
589 589
590 590 for f in rejected:
591 591 if f in m.files():
592 592 return 1
593 593 return 0
594 594
595 595 def marktouched(repo, files, similarity=0.0):
596 596 '''Assert that files have somehow been operated upon. files are relative to
597 597 the repo root.'''
598 598 m = matchfiles(repo, files)
599 599 rejected = []
600 600 m.bad = lambda x, y: rejected.append(x)
601 601
602 602 added, unknown, deleted, removed = _interestingfiles(repo, m)
603 603
604 604 if repo.ui.verbose:
605 605 unknownset = set(unknown)
606 606 toprint = unknownset.copy()
607 607 toprint.update(deleted)
608 608 for abs in sorted(toprint):
609 609 if abs in unknownset:
610 610 status = _('adding %s\n') % abs
611 611 else:
612 612 status = _('removing %s\n') % abs
613 613 repo.ui.status(status)
614 614
615 615 renames = _findrenames(repo, m, added + unknown, removed + deleted,
616 616 similarity)
617 617
618 618 _markchanges(repo, unknown, deleted, renames)
619 619
620 620 for f in rejected:
621 621 if f in m.files():
622 622 return 1
623 623 return 0
624 624
625 625 def _interestingfiles(repo, matcher):
626 626 '''Walk dirstate with matcher, looking for files that addremove would care
627 627 about.
628 628
629 629 This is different from dirstate.status because it doesn't care about
630 630 whether files are modified or clean.'''
631 631 added, unknown, deleted, removed = [], [], [], []
632 632 audit_path = pathutil.pathauditor(repo.root)
633 633
634 634 ctx = repo[None]
635 635 dirstate = repo.dirstate
636 636 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
637 637 full=False)
638 638 for abs, st in walkresults.iteritems():
639 639 dstate = dirstate[abs]
640 640 if dstate == '?' and audit_path.check(abs):
641 641 unknown.append(abs)
642 642 elif dstate != 'r' and not st:
643 643 deleted.append(abs)
644 644 # for finding renames
645 645 elif dstate == 'r':
646 646 removed.append(abs)
647 647 elif dstate == 'a':
648 648 added.append(abs)
649 649
650 650 return added, unknown, deleted, removed
651 651
652 652 def _findrenames(repo, matcher, added, removed, similarity):
653 653 '''Find renames from removed files to added ones.'''
654 654 renames = {}
655 655 if similarity > 0:
656 656 for old, new, score in similar.findrenames(repo, added, removed,
657 657 similarity):
658 658 if (repo.ui.verbose or not matcher.exact(old)
659 659 or not matcher.exact(new)):
660 660 repo.ui.status(_('recording removal of %s as rename to %s '
661 661 '(%d%% similar)\n') %
662 662 (matcher.rel(old), matcher.rel(new),
663 663 score * 100))
664 664 renames[new] = old
665 665 return renames
666 666
667 667 def _markchanges(repo, unknown, deleted, renames):
668 668 '''Marks the files in unknown as added, the files in deleted as removed,
669 669 and the files in renames as copied.'''
670 670 wctx = repo[None]
671 671 wlock = repo.wlock()
672 672 try:
673 673 wctx.forget(deleted)
674 674 wctx.add(unknown)
675 675 for new, old in renames.iteritems():
676 676 wctx.copy(old, new)
677 677 finally:
678 678 wlock.release()
679 679
680 680 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
681 681 """Update the dirstate to reflect the intent of copying src to dst. For
682 682 different reasons it might not end with dst being marked as copied from src.
683 683 """
684 684 origsrc = repo.dirstate.copied(src) or src
685 685 if dst == origsrc: # copying back a copy?
686 686 if repo.dirstate[dst] not in 'mn' and not dryrun:
687 687 repo.dirstate.normallookup(dst)
688 688 else:
689 689 if repo.dirstate[origsrc] == 'a' and origsrc == src:
690 690 if not ui.quiet:
691 691 ui.warn(_("%s has not been committed yet, so no copy "
692 692 "data will be stored for %s.\n")
693 693 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
694 694 if repo.dirstate[dst] in '?r' and not dryrun:
695 695 wctx.add([dst])
696 696 elif not dryrun:
697 697 wctx.copy(origsrc, dst)
698 698
699 699 def readrequires(opener, supported):
700 700 '''Reads and parses .hg/requires and checks if all entries found
701 701 are in the list of supported features.'''
702 702 requirements = set(opener.read("requires").splitlines())
703 703 missings = []
704 704 for r in requirements:
705 705 if r not in supported:
706 706 if not r or not r[0].isalnum():
707 707 raise error.RequirementError(_(".hg/requires file is corrupt"))
708 708 missings.append(r)
709 709 missings.sort()
710 710 if missings:
711 711 raise error.RequirementError(
712 712 _("unknown repository format: requires features '%s' (upgrade "
713 713 "Mercurial)") % "', '".join(missings))
714 714 return requirements
715 715
716 716 class filecachesubentry(object):
717 717 def __init__(self, path, stat):
718 718 self.path = path
719 719 self.cachestat = None
720 720 self._cacheable = None
721 721
722 722 if stat:
723 723 self.cachestat = filecachesubentry.stat(self.path)
724 724
725 725 if self.cachestat:
726 726 self._cacheable = self.cachestat.cacheable()
727 727 else:
728 728 # None means we don't know yet
729 729 self._cacheable = None
730 730
731 731 def refresh(self):
732 732 if self.cacheable():
733 733 self.cachestat = filecachesubentry.stat(self.path)
734 734
735 735 def cacheable(self):
736 736 if self._cacheable is not None:
737 737 return self._cacheable
738 738
739 739 # we don't know yet, assume it is for now
740 740 return True
741 741
742 742 def changed(self):
743 743 # no point in going further if we can't cache it
744 744 if not self.cacheable():
745 745 return True
746 746
747 747 newstat = filecachesubentry.stat(self.path)
748 748
749 749 # we may not know if it's cacheable yet, check again now
750 750 if newstat and self._cacheable is None:
751 751 self._cacheable = newstat.cacheable()
752 752
753 753 # check again
754 754 if not self._cacheable:
755 755 return True
756 756
757 757 if self.cachestat != newstat:
758 758 self.cachestat = newstat
759 759 return True
760 760 else:
761 761 return False
762 762
763 763 @staticmethod
764 764 def stat(path):
765 765 try:
766 766 return util.cachestat(path)
767 767 except OSError, e:
768 768 if e.errno != errno.ENOENT:
769 769 raise
770 770
771 771 class filecacheentry(object):
772 772 def __init__(self, paths, stat=True):
773 773 self._entries = []
774 774 for path in paths:
775 775 self._entries.append(filecachesubentry(path, stat))
776 776
777 777 def changed(self):
778 778 '''true if any entry has changed'''
779 779 for entry in self._entries:
780 780 if entry.changed():
781 781 return True
782 782 return False
783 783
784 784 def refresh(self):
785 785 for entry in self._entries:
786 786 entry.refresh()
787 787
788 788 class filecache(object):
789 '''A property like decorator that tracks a file under .hg/ for updates.
789 '''A property like decorator that tracks files under .hg/ for updates.
790 790
791 791 Records stat info when called in _filecache.
792 792
793 On subsequent calls, compares old stat info with new info, and recreates
794 the object when needed, updating the new stat info in _filecache.
793 On subsequent calls, compares old stat info with new info, and recreates the
794 object when any of the files changes, updating the new stat info in
795 _filecache.
795 796
796 797 Mercurial either atomic renames or appends for files under .hg,
797 798 so to ensure the cache is reliable we need the filesystem to be able
798 799 to tell us if a file has been replaced. If it can't, we fallback to
799 800 recreating the object on every call (essentially the same behaviour as
800 propertycache).'''
801 def __init__(self, path):
802 self.path = path
801 propertycache).
802
803 '''
804 def __init__(self, *paths):
805 self.paths = paths
803 806
804 807 def join(self, obj, fname):
805 """Used to compute the runtime path of the cached file.
808 """Used to compute the runtime path of a cached file.
806 809
807 810 Users should subclass filecache and provide their own version of this
808 811 function to call the appropriate join function on 'obj' (an instance
809 812 of the class that its member function was decorated).
810 813 """
811 814 return obj.join(fname)
812 815
813 816 def __call__(self, func):
814 817 self.func = func
815 818 self.name = func.__name__
816 819 return self
817 820
818 821 def __get__(self, obj, type=None):
819 822 # do we need to check if the file changed?
820 823 if self.name in obj.__dict__:
821 824 assert self.name in obj._filecache, self.name
822 825 return obj.__dict__[self.name]
823 826
824 827 entry = obj._filecache.get(self.name)
825 828
826 829 if entry:
827 830 if entry.changed():
828 831 entry.obj = self.func(obj)
829 832 else:
830 path = self.join(obj, self.path)
833 paths = [self.join(obj, path) for path in self.paths]
831 834
832 835 # We stat -before- creating the object so our cache doesn't lie if
833 836 # a writer modified between the time we read and stat
834 entry = filecachesubentry(path, True)
837 entry = filecacheentry(paths, True)
835 838 entry.obj = self.func(obj)
836 839
837 840 obj._filecache[self.name] = entry
838 841
839 842 obj.__dict__[self.name] = entry.obj
840 843 return entry.obj
841 844
842 845 def __set__(self, obj, value):
843 846 if self.name not in obj._filecache:
844 847 # we add an entry for the missing value because X in __dict__
845 848 # implies X in _filecache
846 ce = filecachesubentry(self.join(obj, self.path), False)
849 paths = [self.join(obj, path) for path in self.paths]
850 ce = filecacheentry(paths, False)
847 851 obj._filecache[self.name] = ce
848 852 else:
849 853 ce = obj._filecache[self.name]
850 854
851 855 ce.obj = value # update cached copy
852 856 obj.__dict__[self.name] = value # update copy returned by obj.x
853 857
854 858 def __delete__(self, obj):
855 859 try:
856 860 del obj.__dict__[self.name]
857 861 except KeyError:
858 862 raise AttributeError(self.name)
859 863
860 864 class dirs(object):
861 865 '''a multiset of directory names from a dirstate or manifest'''
862 866
863 867 def __init__(self, map, skip=None):
864 868 self._dirs = {}
865 869 addpath = self.addpath
866 870 if util.safehasattr(map, 'iteritems') and skip is not None:
867 871 for f, s in map.iteritems():
868 872 if s[0] != skip:
869 873 addpath(f)
870 874 else:
871 875 for f in map:
872 876 addpath(f)
873 877
874 878 def addpath(self, path):
875 879 dirs = self._dirs
876 880 for base in finddirs(path):
877 881 if base in dirs:
878 882 dirs[base] += 1
879 883 return
880 884 dirs[base] = 1
881 885
882 886 def delpath(self, path):
883 887 dirs = self._dirs
884 888 for base in finddirs(path):
885 889 if dirs[base] > 1:
886 890 dirs[base] -= 1
887 891 return
888 892 del dirs[base]
889 893
890 894 def __iter__(self):
891 895 return self._dirs.iterkeys()
892 896
893 897 def __contains__(self, d):
894 898 return d in self._dirs
895 899
896 900 if util.safehasattr(parsers, 'dirs'):
897 901 dirs = parsers.dirs
898 902
899 903 def finddirs(path):
900 904 pos = path.rfind('/')
901 905 while pos != -1:
902 906 yield path[:pos]
903 907 pos = path.rfind('/', 0, pos)
@@ -1,135 +1,184 b''
1 1 import sys, os, subprocess
2 2
3 3 if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
4 4 'cacheable']):
5 5 sys.exit(80)
6 6
7 7 from mercurial import util, scmutil, extensions, hg, ui
8 8
9 9 filecache = scmutil.filecache
10 10
11 11 class fakerepo(object):
12 12 def __init__(self):
13 13 self._filecache = {}
14 14
15 15 def join(self, p):
16 16 return p
17 17
18 18 def sjoin(self, p):
19 19 return p
20 20
21 @filecache('x')
21 @filecache('x', 'y')
22 22 def cached(self):
23 23 print 'creating'
24 24 return 'string from function'
25 25
26 26 def invalidate(self):
27 27 for k in self._filecache:
28 28 try:
29 29 delattr(self, k)
30 30 except AttributeError:
31 31 pass
32 32
33 33 def basic(repo):
34 print "* file doesn't exist"
34 print "* neither file exists"
35 35 # calls function
36 36 repo.cached
37 37
38 38 repo.invalidate()
39 print "* file still doesn't exist"
39 print "* neither file still exists"
40 40 # uses cache
41 41 repo.cached
42 42
43 43 # create empty file
44 44 f = open('x', 'w')
45 45 f.close()
46 46 repo.invalidate()
47 47 print "* empty file x created"
48 48 # should recreate the object
49 49 repo.cached
50 50
51 51 f = open('x', 'w')
52 52 f.write('a')
53 53 f.close()
54 54 repo.invalidate()
55 55 print "* file x changed size"
56 56 # should recreate the object
57 57 repo.cached
58 58
59 59 repo.invalidate()
60 print "* nothing changed with file x"
60 print "* nothing changed with either file"
61 61 # stats file again, reuses object
62 62 repo.cached
63 63
64 64 # atomic replace file, size doesn't change
65 65 # hopefully st_mtime doesn't change as well so this doesn't use the cache
66 66 # because of inode change
67 67 f = scmutil.opener('.')('x', 'w', atomictemp=True)
68 68 f.write('b')
69 69 f.close()
70 70
71 71 repo.invalidate()
72 72 print "* file x changed inode"
73 73 repo.cached
74 74
75 # create empty file y
76 f = open('y', 'w')
77 f.close()
78 repo.invalidate()
79 print "* empty file y created"
80 # should recreate the object
81 repo.cached
82
83 f = open('y', 'w')
84 f.write('A')
85 f.close()
86 repo.invalidate()
87 print "* file y changed size"
88 # should recreate the object
89 repo.cached
90
91 f = scmutil.opener('.')('y', 'w', atomictemp=True)
92 f.write('B')
93 f.close()
94
95 repo.invalidate()
96 print "* file y changed inode"
97 repo.cached
98
99 f = scmutil.opener('.')('x', 'w', atomictemp=True)
100 f.write('c')
101 f.close()
102 f = scmutil.opener('.')('y', 'w', atomictemp=True)
103 f.write('C')
104 f.close()
105
106 repo.invalidate()
107 print "* both files changed inode"
108 repo.cached
109
75 110 def fakeuncacheable():
76 111 def wrapcacheable(orig, *args, **kwargs):
77 112 return False
78 113
79 114 def wrapinit(orig, *args, **kwargs):
80 115 pass
81 116
82 117 originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit)
83 118 origcacheable = extensions.wrapfunction(util.cachestat, 'cacheable',
84 119 wrapcacheable)
85 120
86 try:
87 os.remove('x')
88 except OSError:
89 pass
121 for fn in ['x', 'y']:
122 try:
123 os.remove(fn)
124 except OSError:
125 pass
90 126
91 127 basic(fakerepo())
92 128
93 129 util.cachestat.cacheable = origcacheable
94 130 util.cachestat.__init__ = originit
95 131
96 132 def test_filecache_synced():
97 133 # test old behaviour that caused filecached properties to go out of sync
98 134 os.system('hg init && echo a >> a && hg ci -qAm.')
99 135 repo = hg.repository(ui.ui())
100 136 # first rollback clears the filecache, but changelog to stays in __dict__
101 137 repo.rollback()
102 138 repo.commit('.')
103 139 # second rollback comes along and touches the changelog externally
104 140 # (file is moved)
105 141 repo.rollback()
106 142 # but since changelog isn't under the filecache control anymore, we don't
107 143 # see that it changed, and return the old changelog without reconstructing
108 144 # it
109 145 repo.commit('.')
110 146
111 147 def setbeforeget(repo):
112 148 os.remove('x')
149 os.remove('y')
113 150 repo.cached = 'string set externally'
114 151 repo.invalidate()
115 print "* file x doesn't exist"
152 print "* neither file exists"
116 153 print repo.cached
117 154 repo.invalidate()
118 155 f = open('x', 'w')
119 156 f.write('a')
120 157 f.close()
121 158 print "* file x created"
122 159 print repo.cached
123 160
161 repo.cached = 'string 2 set externally'
162 repo.invalidate()
163 print "* string set externally again"
164 print repo.cached
165
166 repo.invalidate()
167 f = open('y', 'w')
168 f.write('b')
169 f.close()
170 print "* file y created"
171 print repo.cached
172
124 173 print 'basic:'
125 174 print
126 175 basic(fakerepo())
127 176 print
128 177 print 'fakeuncacheable:'
129 178 print
130 179 fakeuncacheable()
131 180 test_filecache_synced()
132 181 print
133 182 print 'setbeforeget:'
134 183 print
135 184 setbeforeget(fakerepo())
@@ -1,39 +1,60 b''
1 1 basic:
2 2
3 * file doesn't exist
3 * neither file exists
4 4 creating
5 * file still doesn't exist
5 * neither file still exists
6 6 * empty file x created
7 7 creating
8 8 * file x changed size
9 9 creating
10 * nothing changed with file x
10 * nothing changed with either file
11 11 * file x changed inode
12 12 creating
13 * empty file y created
14 creating
15 * file y changed size
16 creating
17 * file y changed inode
18 creating
19 * both files changed inode
20 creating
13 21
14 22 fakeuncacheable:
15 23
16 * file doesn't exist
24 * neither file exists
17 25 creating
18 * file still doesn't exist
26 * neither file still exists
19 27 creating
20 28 * empty file x created
21 29 creating
22 30 * file x changed size
23 31 creating
24 * nothing changed with file x
32 * nothing changed with either file
25 33 creating
26 34 * file x changed inode
27 35 creating
36 * empty file y created
37 creating
38 * file y changed size
39 creating
40 * file y changed inode
41 creating
42 * both files changed inode
43 creating
28 44 repository tip rolled back to revision -1 (undo commit)
29 45 working directory now based on revision -1
30 46 repository tip rolled back to revision -1 (undo commit)
31 47 working directory now based on revision -1
32 48
33 49 setbeforeget:
34 50
35 * file x doesn't exist
51 * neither file exists
36 52 string set externally
37 53 * file x created
38 54 creating
39 55 string from function
56 * string set externally again
57 string 2 set externally
58 * file y created
59 creating
60 string from function
General Comments 0
You need to be logged in to leave comments. Login now