##// END OF EJS Templates
changelog: load pending file directly...
Gregory Szorc -
r32292:0ad0d26f default
parent child Browse files
Show More
@@ -1,548 +1,541 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 bin,
15 15 hex,
16 16 nullid,
17 17 )
18 18
19 19 from . import (
20 20 encoding,
21 21 error,
22 22 revlog,
23 23 util,
24 24 )
25 25
26 26 _defaultextra = {'branch': 'default'}
27 27
28 28 def _string_escape(text):
29 29 """
30 30 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
31 31 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
32 32 >>> s
33 33 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
34 34 >>> res = _string_escape(s)
35 35 >>> s == util.unescapestr(res)
36 36 True
37 37 """
38 38 # subset of the string_escape codec
39 39 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
40 40 return text.replace('\0', '\\0')
41 41
42 42 def decodeextra(text):
43 43 """
44 44 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
45 45 ... ).iteritems())
46 46 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
47 47 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
48 48 ... 'baz': chr(92) + chr(0) + '2'})
49 49 ... ).iteritems())
50 50 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
51 51 """
52 52 extra = _defaultextra.copy()
53 53 for l in text.split('\0'):
54 54 if l:
55 55 if '\\0' in l:
56 56 # fix up \0 without getting into trouble with \\0
57 57 l = l.replace('\\\\', '\\\\\n')
58 58 l = l.replace('\\0', '\0')
59 59 l = l.replace('\n', '')
60 60 k, v = util.unescapestr(l).split(':', 1)
61 61 extra[k] = v
62 62 return extra
63 63
64 64 def encodeextra(d):
65 65 # keys must be sorted to produce a deterministic changelog entry
66 66 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
67 67 return "\0".join(items)
68 68
69 69 def stripdesc(desc):
70 70 """strip trailing whitespace and leading and trailing empty lines"""
71 71 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
72 72
73 73 class appender(object):
74 74 '''the changelog index must be updated last on disk, so we use this class
75 75 to delay writes to it'''
76 76 def __init__(self, vfs, name, mode, buf):
77 77 self.data = buf
78 78 fp = vfs(name, mode)
79 79 self.fp = fp
80 80 self.offset = fp.tell()
81 81 self.size = vfs.fstat(fp).st_size
82 82 self._end = self.size
83 83
84 84 def end(self):
85 85 return self._end
86 86 def tell(self):
87 87 return self.offset
88 88 def flush(self):
89 89 pass
90 90 def close(self):
91 91 self.fp.close()
92 92
93 93 def seek(self, offset, whence=0):
94 94 '''virtual file offset spans real file and data'''
95 95 if whence == 0:
96 96 self.offset = offset
97 97 elif whence == 1:
98 98 self.offset += offset
99 99 elif whence == 2:
100 100 self.offset = self.end() + offset
101 101 if self.offset < self.size:
102 102 self.fp.seek(self.offset)
103 103
104 104 def read(self, count=-1):
105 105 '''only trick here is reads that span real file and data'''
106 106 ret = ""
107 107 if self.offset < self.size:
108 108 s = self.fp.read(count)
109 109 ret = s
110 110 self.offset += len(s)
111 111 if count > 0:
112 112 count -= len(s)
113 113 if count != 0:
114 114 doff = self.offset - self.size
115 115 self.data.insert(0, "".join(self.data))
116 116 del self.data[1:]
117 117 s = self.data[0][doff:doff + count]
118 118 self.offset += len(s)
119 119 ret += s
120 120 return ret
121 121
122 122 def write(self, s):
123 123 self.data.append(bytes(s))
124 124 self.offset += len(s)
125 125 self._end += len(s)
126 126
127 127 def _divertopener(opener, target):
128 128 """build an opener that writes in 'target.a' instead of 'target'"""
129 129 def _divert(name, mode='r', checkambig=False):
130 130 if name != target:
131 131 return opener(name, mode)
132 132 return opener(name + ".a", mode)
133 133 return _divert
134 134
135 135 def _delayopener(opener, target, buf):
136 136 """build an opener that stores chunks in 'buf' instead of 'target'"""
137 137 def _delay(name, mode='r', checkambig=False):
138 138 if name != target:
139 139 return opener(name, mode)
140 140 return appender(opener, name, mode, buf)
141 141 return _delay
142 142
143 143 _changelogrevision = collections.namedtuple(u'changelogrevision',
144 144 (u'manifest', u'user', u'date',
145 145 u'files', u'description',
146 146 u'extra'))
147 147
148 148 class changelogrevision(object):
149 149 """Holds results of a parsed changelog revision.
150 150
151 151 Changelog revisions consist of multiple pieces of data, including
152 152 the manifest node, user, and date. This object exposes a view into
153 153 the parsed object.
154 154 """
155 155
156 156 __slots__ = (
157 157 u'_offsets',
158 158 u'_text',
159 159 )
160 160
161 161 def __new__(cls, text):
162 162 if not text:
163 163 return _changelogrevision(
164 164 manifest=nullid,
165 165 user='',
166 166 date=(0, 0),
167 167 files=[],
168 168 description='',
169 169 extra=_defaultextra,
170 170 )
171 171
172 172 self = super(changelogrevision, cls).__new__(cls)
173 173 # We could return here and implement the following as an __init__.
174 174 # But doing it here is equivalent and saves an extra function call.
175 175
176 176 # format used:
177 177 # nodeid\n : manifest node in ascii
178 178 # user\n : user, no \n or \r allowed
179 179 # time tz extra\n : date (time is int or float, timezone is int)
180 180 # : extra is metadata, encoded and separated by '\0'
181 181 # : older versions ignore it
182 182 # files\n\n : files modified by the cset, no \n or \r allowed
183 183 # (.*) : comment (free text, ideally utf-8)
184 184 #
185 185 # changelog v0 doesn't use extra
186 186
187 187 nl1 = text.index('\n')
188 188 nl2 = text.index('\n', nl1 + 1)
189 189 nl3 = text.index('\n', nl2 + 1)
190 190
191 191 # The list of files may be empty. Which means nl3 is the first of the
192 192 # double newline that precedes the description.
193 193 if text[nl3 + 1:nl3 + 2] == '\n':
194 194 doublenl = nl3
195 195 else:
196 196 doublenl = text.index('\n\n', nl3 + 1)
197 197
198 198 self._offsets = (nl1, nl2, nl3, doublenl)
199 199 self._text = text
200 200
201 201 return self
202 202
203 203 @property
204 204 def manifest(self):
205 205 return bin(self._text[0:self._offsets[0]])
206 206
207 207 @property
208 208 def user(self):
209 209 off = self._offsets
210 210 return encoding.tolocal(self._text[off[0] + 1:off[1]])
211 211
212 212 @property
213 213 def _rawdate(self):
214 214 off = self._offsets
215 215 dateextra = self._text[off[1] + 1:off[2]]
216 216 return dateextra.split(' ', 2)[0:2]
217 217
218 218 @property
219 219 def _rawextra(self):
220 220 off = self._offsets
221 221 dateextra = self._text[off[1] + 1:off[2]]
222 222 fields = dateextra.split(' ', 2)
223 223 if len(fields) != 3:
224 224 return None
225 225
226 226 return fields[2]
227 227
228 228 @property
229 229 def date(self):
230 230 raw = self._rawdate
231 231 time = float(raw[0])
232 232 # Various tools did silly things with the timezone.
233 233 try:
234 234 timezone = int(raw[1])
235 235 except ValueError:
236 236 timezone = 0
237 237
238 238 return time, timezone
239 239
240 240 @property
241 241 def extra(self):
242 242 raw = self._rawextra
243 243 if raw is None:
244 244 return _defaultextra
245 245
246 246 return decodeextra(raw)
247 247
248 248 @property
249 249 def files(self):
250 250 off = self._offsets
251 251 if off[2] == off[3]:
252 252 return []
253 253
254 254 return self._text[off[2] + 1:off[3]].split('\n')
255 255
256 256 @property
257 257 def description(self):
258 258 return encoding.tolocal(self._text[self._offsets[3] + 2:])
259 259
260 260 class changelog(revlog.revlog):
261 def __init__(self, opener):
262 revlog.revlog.__init__(self, opener, "00changelog.i",
263 checkambig=True)
261 def __init__(self, opener, trypending=False):
262 """Load a changelog revlog using an opener.
263
264 If ``trypending`` is true, we attempt to load the index from a
265 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
266 The ``00changelog.i.a`` file contains index (and possibly inline
267 revision) data for a transaction that hasn't been finalized yet.
268 It exists in a separate file to facilitate readers (such as
269 hooks processes) accessing data before a transaction is finalized.
270 """
271 if trypending and opener.exists('00changelog.i.a'):
272 indexfile = '00changelog.i.a'
273 else:
274 indexfile = '00changelog.i'
275
276 revlog.revlog.__init__(self, opener, indexfile, checkambig=True)
277
264 278 if self._initempty:
265 279 # changelogs don't benefit from generaldelta
266 280 self.version &= ~revlog.REVLOGGENERALDELTA
267 281 self._generaldelta = False
268 282
269 283 # Delta chains for changelogs tend to be very small because entries
270 284 # tend to be small and don't delta well with each. So disable delta
271 285 # chains.
272 286 self.storedeltachains = False
273 287
274 288 self._realopener = opener
275 289 self._delayed = False
276 290 self._delaybuf = None
277 291 self._divert = False
278 292 self.filteredrevs = frozenset()
279 293
280 294 def tip(self):
281 295 """filtered version of revlog.tip"""
282 296 for i in xrange(len(self) -1, -2, -1):
283 297 if i not in self.filteredrevs:
284 298 return self.node(i)
285 299
286 300 def __contains__(self, rev):
287 301 """filtered version of revlog.__contains__"""
288 302 return (0 <= rev < len(self)
289 303 and rev not in self.filteredrevs)
290 304
291 305 def __iter__(self):
292 306 """filtered version of revlog.__iter__"""
293 307 if len(self.filteredrevs) == 0:
294 308 return revlog.revlog.__iter__(self)
295 309
296 310 def filterediter():
297 311 for i in xrange(len(self)):
298 312 if i not in self.filteredrevs:
299 313 yield i
300 314
301 315 return filterediter()
302 316
303 317 def revs(self, start=0, stop=None):
304 318 """filtered version of revlog.revs"""
305 319 for i in super(changelog, self).revs(start, stop):
306 320 if i not in self.filteredrevs:
307 321 yield i
308 322
309 323 @util.propertycache
310 324 def nodemap(self):
311 325 # XXX need filtering too
312 326 self.rev(self.node(0))
313 327 return self._nodecache
314 328
315 329 def reachableroots(self, minroot, heads, roots, includepath=False):
316 330 return self.index.reachableroots2(minroot, heads, roots, includepath)
317 331
318 332 def headrevs(self):
319 333 if self.filteredrevs:
320 334 try:
321 335 return self.index.headrevsfiltered(self.filteredrevs)
322 336 # AttributeError covers non-c-extension environments and
323 337 # old c extensions without filter handling.
324 338 except AttributeError:
325 339 return self._headrevs()
326 340
327 341 return super(changelog, self).headrevs()
328 342
329 343 def strip(self, *args, **kwargs):
330 344 # XXX make something better than assert
331 345 # We can't expect proper strip behavior if we are filtered.
332 346 assert not self.filteredrevs
333 347 super(changelog, self).strip(*args, **kwargs)
334 348
335 349 def rev(self, node):
336 350 """filtered version of revlog.rev"""
337 351 r = super(changelog, self).rev(node)
338 352 if r in self.filteredrevs:
339 353 raise error.FilteredLookupError(hex(node), self.indexfile,
340 354 _('filtered node'))
341 355 return r
342 356
343 357 def node(self, rev):
344 358 """filtered version of revlog.node"""
345 359 if rev in self.filteredrevs:
346 360 raise error.FilteredIndexError(rev)
347 361 return super(changelog, self).node(rev)
348 362
349 363 def linkrev(self, rev):
350 364 """filtered version of revlog.linkrev"""
351 365 if rev in self.filteredrevs:
352 366 raise error.FilteredIndexError(rev)
353 367 return super(changelog, self).linkrev(rev)
354 368
355 369 def parentrevs(self, rev):
356 370 """filtered version of revlog.parentrevs"""
357 371 if rev in self.filteredrevs:
358 372 raise error.FilteredIndexError(rev)
359 373 return super(changelog, self).parentrevs(rev)
360 374
361 375 def flags(self, rev):
362 376 """filtered version of revlog.flags"""
363 377 if rev in self.filteredrevs:
364 378 raise error.FilteredIndexError(rev)
365 379 return super(changelog, self).flags(rev)
366 380
367 381 def delayupdate(self, tr):
368 382 "delay visibility of index updates to other readers"
369 383
370 384 if not self._delayed:
371 385 if len(self) == 0:
372 386 self._divert = True
373 387 if self._realopener.exists(self.indexfile + '.a'):
374 388 self._realopener.unlink(self.indexfile + '.a')
375 389 self.opener = _divertopener(self._realopener, self.indexfile)
376 390 else:
377 391 self._delaybuf = []
378 392 self.opener = _delayopener(self._realopener, self.indexfile,
379 393 self._delaybuf)
380 394 self._delayed = True
381 395 tr.addpending('cl-%i' % id(self), self._writepending)
382 396 tr.addfinalize('cl-%i' % id(self), self._finalize)
383 397
384 398 def _finalize(self, tr):
385 399 "finalize index updates"
386 400 self._delayed = False
387 401 self.opener = self._realopener
388 402 # move redirected index data back into place
389 403 if self._divert:
390 404 assert not self._delaybuf
391 405 tmpname = self.indexfile + ".a"
392 406 nfile = self.opener.open(tmpname)
393 407 nfile.close()
394 408 self.opener.rename(tmpname, self.indexfile, checkambig=True)
395 409 elif self._delaybuf:
396 410 fp = self.opener(self.indexfile, 'a', checkambig=True)
397 411 fp.write("".join(self._delaybuf))
398 412 fp.close()
399 413 self._delaybuf = None
400 414 self._divert = False
401 415 # split when we're done
402 416 self.checkinlinesize(tr)
403 417
404 def readpending(self, file):
405 """read index data from a "pending" file
406
407 During a transaction, the actual changeset data is already stored in the
408 main file, but not yet finalized in the on-disk index. Instead, a
409 "pending" index is written by the transaction logic. If this function
410 is running, we are likely in a subprocess invoked in a hook. The
411 subprocess is informed that it is within a transaction and needs to
412 access its content.
413
414 This function will read all the index data out of the pending file and
415 overwrite the main index."""
416
417 if not self.opener.exists(file):
418 return # no pending data for changelog
419 r = revlog.revlog(self.opener, file)
420 self.index = r.index
421 self.nodemap = r.nodemap
422 self._nodecache = r._nodecache
423 self._chunkcache = r._chunkcache
424
425 418 def _writepending(self, tr):
426 419 "create a file containing the unfinalized state for pretxnchangegroup"
427 420 if self._delaybuf:
428 421 # make a temporary copy of the index
429 422 fp1 = self._realopener(self.indexfile)
430 423 pendingfilename = self.indexfile + ".a"
431 424 # register as a temp file to ensure cleanup on failure
432 425 tr.registertmp(pendingfilename)
433 426 # write existing data
434 427 fp2 = self._realopener(pendingfilename, "w")
435 428 fp2.write(fp1.read())
436 429 # add pending data
437 430 fp2.write("".join(self._delaybuf))
438 431 fp2.close()
439 432 # switch modes so finalize can simply rename
440 433 self._delaybuf = None
441 434 self._divert = True
442 435 self.opener = _divertopener(self._realopener, self.indexfile)
443 436
444 437 if self._divert:
445 438 return True
446 439
447 440 return False
448 441
449 442 def checkinlinesize(self, tr, fp=None):
450 443 if not self._delayed:
451 444 revlog.revlog.checkinlinesize(self, tr, fp)
452 445
453 446 def read(self, node):
454 447 """Obtain data from a parsed changelog revision.
455 448
456 449 Returns a 6-tuple of:
457 450
458 451 - manifest node in binary
459 452 - author/user as a localstr
460 453 - date as a 2-tuple of (time, timezone)
461 454 - list of files
462 455 - commit message as a localstr
463 456 - dict of extra metadata
464 457
465 458 Unless you need to access all fields, consider calling
466 459 ``changelogrevision`` instead, as it is faster for partial object
467 460 access.
468 461 """
469 462 c = changelogrevision(self.revision(node))
470 463 return (
471 464 c.manifest,
472 465 c.user,
473 466 c.date,
474 467 c.files,
475 468 c.description,
476 469 c.extra
477 470 )
478 471
479 472 def changelogrevision(self, nodeorrev):
480 473 """Obtain a ``changelogrevision`` for a node or revision."""
481 474 return changelogrevision(self.revision(nodeorrev))
482 475
483 476 def readfiles(self, node):
484 477 """
485 478 short version of read that only returns the files modified by the cset
486 479 """
487 480 text = self.revision(node)
488 481 if not text:
489 482 return []
490 483 last = text.index("\n\n")
491 484 l = text[:last].split('\n')
492 485 return l[3:]
493 486
494 487 def add(self, manifest, files, desc, transaction, p1, p2,
495 488 user, date=None, extra=None):
496 489 # Convert to UTF-8 encoded bytestrings as the very first
497 490 # thing: calling any method on a localstr object will turn it
498 491 # into a str object and the cached UTF-8 string is thus lost.
499 492 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
500 493
501 494 user = user.strip()
502 495 # An empty username or a username with a "\n" will make the
503 496 # revision text contain two "\n\n" sequences -> corrupt
504 497 # repository since read cannot unpack the revision.
505 498 if not user:
506 499 raise error.RevlogError(_("empty username"))
507 500 if "\n" in user:
508 501 raise error.RevlogError(_("username %s contains a newline")
509 502 % repr(user))
510 503
511 504 desc = stripdesc(desc)
512 505
513 506 if date:
514 507 parseddate = "%d %d" % util.parsedate(date)
515 508 else:
516 509 parseddate = "%d %d" % util.makedate()
517 510 if extra:
518 511 branch = extra.get("branch")
519 512 if branch in ("default", ""):
520 513 del extra["branch"]
521 514 elif branch in (".", "null", "tip"):
522 515 raise error.RevlogError(_('the name \'%s\' is reserved')
523 516 % branch)
524 517 if extra:
525 518 extra = encodeextra(extra)
526 519 parseddate = "%s %s" % (parseddate, extra)
527 520 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
528 521 text = "\n".join(l)
529 522 return self.addrevision(text, transaction, len(self), p1, p2)
530 523
531 524 def branchinfo(self, rev):
532 525 """return the branch name and open/close state of a revision
533 526
534 527 This function exists because creating a changectx object
535 528 just to access this is costly."""
536 529 extra = self.read(rev)[5]
537 530 return encoding.tolocal(extra.get("branch")), 'close' in extra
538 531
539 532 def _addrevision(self, node, rawtext, transaction, *args, **kwargs):
540 533 # overlay over the standard revlog._addrevision to track the new
541 534 # revision on the transaction.
542 535 rev = len(self)
543 536 node = super(changelog, self)._addrevision(node, rawtext, transaction,
544 537 *args, **kwargs)
545 538 revs = transaction.changes.get('revs')
546 539 if revs is not None:
547 540 revs.add(rev)
548 541 return node
@@ -1,2050 +1,2048 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repoview,
54 54 revset,
55 55 revsetlang,
56 56 scmutil,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 class repofilecache(scmutil.filecache):
71 71 """All filecache usage on repo are done for logic that should be unfiltered
72 72 """
73 73
74 74 def join(self, obj, fname):
75 75 return obj.vfs.join(fname)
76 76 def __get__(self, repo, type=None):
77 77 if repo is None:
78 78 return self
79 79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 80 def __set__(self, repo, value):
81 81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 82 def __delete__(self, repo):
83 83 return super(repofilecache, self).__delete__(repo.unfiltered())
84 84
85 85 class storecache(repofilecache):
86 86 """filecache for files in the store"""
87 87 def join(self, obj, fname):
88 88 return obj.sjoin(fname)
89 89
90 90 class unfilteredpropertycache(util.propertycache):
91 91 """propertycache that apply to unfiltered repo only"""
92 92
93 93 def __get__(self, repo, type=None):
94 94 unfi = repo.unfiltered()
95 95 if unfi is repo:
96 96 return super(unfilteredpropertycache, self).__get__(unfi)
97 97 return getattr(unfi, self.name)
98 98
99 99 class filteredpropertycache(util.propertycache):
100 100 """propertycache that must take filtering in account"""
101 101
102 102 def cachevalue(self, obj, value):
103 103 object.__setattr__(obj, self.name, value)
104 104
105 105
106 106 def hasunfilteredcache(repo, name):
107 107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 108 return name in vars(repo.unfiltered())
109 109
110 110 def unfilteredmethod(orig):
111 111 """decorate method that always need to be run on unfiltered version"""
112 112 def wrapper(repo, *args, **kwargs):
113 113 return orig(repo.unfiltered(), *args, **kwargs)
114 114 return wrapper
115 115
116 116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 117 'unbundle'}
118 118 legacycaps = moderncaps.union({'changegroupsubset'})
119 119
120 120 class localpeer(peer.peerrepository):
121 121 '''peer for a local repo; reflects only the most recent API'''
122 122
123 123 def __init__(self, repo, caps=None):
124 124 if caps is None:
125 125 caps = moderncaps.copy()
126 126 peer.peerrepository.__init__(self)
127 127 self._repo = repo.filtered('served')
128 128 self.ui = repo.ui
129 129 self._caps = repo._restrictcapabilities(caps)
130 130 self.requirements = repo.requirements
131 131 self.supportedformats = repo.supportedformats
132 132
133 133 def close(self):
134 134 self._repo.close()
135 135
136 136 def _capabilities(self):
137 137 return self._caps
138 138
139 139 def local(self):
140 140 return self._repo
141 141
142 142 def canpush(self):
143 143 return True
144 144
145 145 def url(self):
146 146 return self._repo.url()
147 147
148 148 def lookup(self, key):
149 149 return self._repo.lookup(key)
150 150
151 151 def branchmap(self):
152 152 return self._repo.branchmap()
153 153
154 154 def heads(self):
155 155 return self._repo.heads()
156 156
157 157 def known(self, nodes):
158 158 return self._repo.known(nodes)
159 159
160 160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 161 **kwargs):
162 162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 163 common=common, bundlecaps=bundlecaps,
164 164 **kwargs)
165 165 cb = util.chunkbuffer(chunks)
166 166
167 167 if exchange.bundle2requested(bundlecaps):
168 168 # When requesting a bundle2, getbundle returns a stream to make the
169 169 # wire level function happier. We need to build a proper object
170 170 # from it in local peer.
171 171 return bundle2.getunbundler(self.ui, cb)
172 172 else:
173 173 return changegroup.getunbundler('01', cb, None)
174 174
175 175 # TODO We might want to move the next two calls into legacypeer and add
176 176 # unbundle instead.
177 177
178 178 def unbundle(self, cg, heads, url):
179 179 """apply a bundle on a repo
180 180
181 181 This function handles the repo locking itself."""
182 182 try:
183 183 try:
184 184 cg = exchange.readbundle(self.ui, cg, None)
185 185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 186 if util.safehasattr(ret, 'getchunks'):
187 187 # This is a bundle20 object, turn it into an unbundler.
188 188 # This little dance should be dropped eventually when the
189 189 # API is finally improved.
190 190 stream = util.chunkbuffer(ret.getchunks())
191 191 ret = bundle2.getunbundler(self.ui, stream)
192 192 return ret
193 193 except Exception as exc:
194 194 # If the exception contains output salvaged from a bundle2
195 195 # reply, we need to make sure it is printed before continuing
196 196 # to fail. So we build a bundle2 with such output and consume
197 197 # it directly.
198 198 #
199 199 # This is not very elegant but allows a "simple" solution for
200 200 # issue4594
201 201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 202 if output:
203 203 bundler = bundle2.bundle20(self._repo.ui)
204 204 for out in output:
205 205 bundler.addpart(out)
206 206 stream = util.chunkbuffer(bundler.getchunks())
207 207 b = bundle2.getunbundler(self.ui, stream)
208 208 bundle2.processbundle(self._repo, b)
209 209 raise
210 210 except error.PushRaced as exc:
211 211 raise error.ResponseError(_('push failed:'), str(exc))
212 212
213 213 def lock(self):
214 214 return self._repo.lock()
215 215
216 216 def addchangegroup(self, cg, source, url):
217 217 return cg.apply(self._repo, source, url)
218 218
219 219 def pushkey(self, namespace, key, old, new):
220 220 return self._repo.pushkey(namespace, key, old, new)
221 221
222 222 def listkeys(self, namespace):
223 223 return self._repo.listkeys(namespace)
224 224
225 225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 226 '''used to test argument passing over the wire'''
227 227 return "%s %s %s %s %s" % (one, two, three, four, five)
228 228
229 229 class locallegacypeer(localpeer):
230 230 '''peer extension which implements legacy methods too; used for tests with
231 231 restricted capabilities'''
232 232
233 233 def __init__(self, repo):
234 234 localpeer.__init__(self, repo, caps=legacycaps)
235 235
236 236 def branches(self, nodes):
237 237 return self._repo.branches(nodes)
238 238
239 239 def between(self, pairs):
240 240 return self._repo.between(pairs)
241 241
242 242 def changegroup(self, basenodes, source):
243 243 return changegroup.changegroup(self._repo, basenodes, source)
244 244
245 245 def changegroupsubset(self, bases, heads, source):
246 246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247 247
248 248 class localrepository(object):
249 249
250 250 supportedformats = {'revlogv1', 'generaldelta', 'treemanifest',
251 251 'manifestv2'}
252 252 _basesupported = supportedformats | {'store', 'fncache', 'shared',
253 253 'relshared', 'dotencode'}
254 254 openerreqs = {'revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'}
255 255 filtername = None
256 256
257 257 # a list of (ui, featureset) functions.
258 258 # only functions defined in module of enabled extensions are invoked
259 259 featuresetupfuncs = set()
260 260
261 261 def __init__(self, baseui, path, create=False):
262 262 self.requirements = set()
263 263 # wvfs: rooted at the repository root, used to access the working copy
264 264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 266 self.vfs = None
267 267 # svfs: usually rooted at .hg/store, used to access repository history
268 268 # If this is a shared repository, this vfs may point to another
269 269 # repository's .hg/store directory.
270 270 self.svfs = None
271 271 self.root = self.wvfs.base
272 272 self.path = self.wvfs.join(".hg")
273 273 self.origroot = path
274 274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 276 realfs=False)
277 277 self.vfs = vfsmod.vfs(self.path)
278 278 self.baseui = baseui
279 279 self.ui = baseui.copy()
280 280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 281 # A list of callback to shape the phase if no data were found.
282 282 # Callback are in the form: func(repo, roots) --> processed root.
283 283 # This list it to be filled by extension during repo setup
284 284 self._phasedefaults = []
285 285 try:
286 286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 287 self._loadextensions()
288 288 except IOError:
289 289 pass
290 290
291 291 if self.featuresetupfuncs:
292 292 self.supported = set(self._basesupported) # use private copy
293 293 extmods = set(m.__name__ for n, m
294 294 in extensions.extensions(self.ui))
295 295 for setupfunc in self.featuresetupfuncs:
296 296 if setupfunc.__module__ in extmods:
297 297 setupfunc(self.ui, self.supported)
298 298 else:
299 299 self.supported = self._basesupported
300 300 color.setup(self.ui)
301 301
302 302 # Add compression engines.
303 303 for name in util.compengines:
304 304 engine = util.compengines[name]
305 305 if engine.revlogheader():
306 306 self.supported.add('exp-compression-%s' % name)
307 307
308 308 if not self.vfs.isdir():
309 309 if create:
310 310 self.requirements = newreporequirements(self)
311 311
312 312 if not self.wvfs.exists():
313 313 self.wvfs.makedirs()
314 314 self.vfs.makedir(notindexed=True)
315 315
316 316 if 'store' in self.requirements:
317 317 self.vfs.mkdir("store")
318 318
319 319 # create an invalid changelog
320 320 self.vfs.append(
321 321 "00changelog.i",
322 322 '\0\0\0\2' # represents revlogv2
323 323 ' dummy changelog to prevent using the old repo layout'
324 324 )
325 325 else:
326 326 raise error.RepoError(_("repository %s not found") % path)
327 327 elif create:
328 328 raise error.RepoError(_("repository %s already exists") % path)
329 329 else:
330 330 try:
331 331 self.requirements = scmutil.readrequires(
332 332 self.vfs, self.supported)
333 333 except IOError as inst:
334 334 if inst.errno != errno.ENOENT:
335 335 raise
336 336
337 337 self.sharedpath = self.path
338 338 try:
339 339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 340 if 'relshared' in self.requirements:
341 341 sharedpath = self.vfs.join(sharedpath)
342 342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 343 s = vfs.base
344 344 if not vfs.exists():
345 345 raise error.RepoError(
346 346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 347 self.sharedpath = s
348 348 except IOError as inst:
349 349 if inst.errno != errno.ENOENT:
350 350 raise
351 351
352 352 self.store = store.store(
353 353 self.requirements, self.sharedpath, vfsmod.vfs)
354 354 self.spath = self.store.path
355 355 self.svfs = self.store.vfs
356 356 self.sjoin = self.store.join
357 357 self.vfs.createmode = self.store.createmode
358 358 self._applyopenerreqs()
359 359 if create:
360 360 self._writerequirements()
361 361
362 362 self._dirstatevalidatewarned = False
363 363
364 364 self._branchcaches = {}
365 365 self._revbranchcache = None
366 366 self.filterpats = {}
367 367 self._datafilters = {}
368 368 self._transref = self._lockref = self._wlockref = None
369 369
370 370 # A cache for various files under .hg/ that tracks file changes,
371 371 # (used by the filecache decorator)
372 372 #
373 373 # Maps a property name to its util.filecacheentry
374 374 self._filecache = {}
375 375
376 376 # hold sets of revision to be filtered
377 377 # should be cleared when something might have changed the filter value:
378 378 # - new changesets,
379 379 # - phase change,
380 380 # - new obsolescence marker,
381 381 # - working directory parent change,
382 382 # - bookmark changes
383 383 self.filteredrevcache = {}
384 384
385 385 # generic mapping between names and nodes
386 386 self.names = namespaces.namespaces()
387 387
388 388 def close(self):
389 389 self._writecaches()
390 390
391 391 def _loadextensions(self):
392 392 extensions.loadall(self.ui)
393 393
394 394 def _writecaches(self):
395 395 if self._revbranchcache:
396 396 self._revbranchcache.write()
397 397
398 398 def _restrictcapabilities(self, caps):
399 399 if self.ui.configbool('experimental', 'bundle2-advertise', True):
400 400 caps = set(caps)
401 401 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
402 402 caps.add('bundle2=' + urlreq.quote(capsblob))
403 403 return caps
404 404
405 405 def _applyopenerreqs(self):
406 406 self.svfs.options = dict((r, 1) for r in self.requirements
407 407 if r in self.openerreqs)
408 408 # experimental config: format.chunkcachesize
409 409 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
410 410 if chunkcachesize is not None:
411 411 self.svfs.options['chunkcachesize'] = chunkcachesize
412 412 # experimental config: format.maxchainlen
413 413 maxchainlen = self.ui.configint('format', 'maxchainlen')
414 414 if maxchainlen is not None:
415 415 self.svfs.options['maxchainlen'] = maxchainlen
416 416 # experimental config: format.manifestcachesize
417 417 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
418 418 if manifestcachesize is not None:
419 419 self.svfs.options['manifestcachesize'] = manifestcachesize
420 420 # experimental config: format.aggressivemergedeltas
421 421 aggressivemergedeltas = self.ui.configbool('format',
422 422 'aggressivemergedeltas', False)
423 423 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
424 424 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
425 425
426 426 for r in self.requirements:
427 427 if r.startswith('exp-compression-'):
428 428 self.svfs.options['compengine'] = r[len('exp-compression-'):]
429 429
430 430 def _writerequirements(self):
431 431 scmutil.writerequires(self.vfs, self.requirements)
432 432
433 433 def _checknested(self, path):
434 434 """Determine if path is a legal nested repository."""
435 435 if not path.startswith(self.root):
436 436 return False
437 437 subpath = path[len(self.root) + 1:]
438 438 normsubpath = util.pconvert(subpath)
439 439
440 440 # XXX: Checking against the current working copy is wrong in
441 441 # the sense that it can reject things like
442 442 #
443 443 # $ hg cat -r 10 sub/x.txt
444 444 #
445 445 # if sub/ is no longer a subrepository in the working copy
446 446 # parent revision.
447 447 #
448 448 # However, it can of course also allow things that would have
449 449 # been rejected before, such as the above cat command if sub/
450 450 # is a subrepository now, but was a normal directory before.
451 451 # The old path auditor would have rejected by mistake since it
452 452 # panics when it sees sub/.hg/.
453 453 #
454 454 # All in all, checking against the working copy seems sensible
455 455 # since we want to prevent access to nested repositories on
456 456 # the filesystem *now*.
457 457 ctx = self[None]
458 458 parts = util.splitpath(subpath)
459 459 while parts:
460 460 prefix = '/'.join(parts)
461 461 if prefix in ctx.substate:
462 462 if prefix == normsubpath:
463 463 return True
464 464 else:
465 465 sub = ctx.sub(prefix)
466 466 return sub.checknested(subpath[len(prefix) + 1:])
467 467 else:
468 468 parts.pop()
469 469 return False
470 470
471 471 def peer(self):
472 472 return localpeer(self) # not cached to avoid reference cycle
473 473
474 474 def unfiltered(self):
475 475 """Return unfiltered version of the repository
476 476
477 477 Intended to be overwritten by filtered repo."""
478 478 return self
479 479
480 480 def filtered(self, name):
481 481 """Return a filtered version of a repository"""
482 482 # build a new class with the mixin and the current class
483 483 # (possibly subclass of the repo)
484 484 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
485 485 pass
486 486 return filteredrepo(self, name)
487 487
488 488 @repofilecache('bookmarks', 'bookmarks.current')
489 489 def _bookmarks(self):
490 490 return bookmarks.bmstore(self)
491 491
492 492 @property
493 493 def _activebookmark(self):
494 494 return self._bookmarks.active
495 495
496 496 def bookmarkheads(self, bookmark):
497 497 name = bookmark.split('@', 1)[0]
498 498 heads = []
499 499 for mark, n in self._bookmarks.iteritems():
500 500 if mark.split('@', 1)[0] == name:
501 501 heads.append(n)
502 502 return heads
503 503
504 504 # _phaserevs and _phasesets depend on changelog. what we need is to
505 505 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
506 506 # can't be easily expressed in filecache mechanism.
507 507 @storecache('phaseroots', '00changelog.i')
508 508 def _phasecache(self):
509 509 return phases.phasecache(self, self._phasedefaults)
510 510
511 511 @storecache('obsstore')
512 512 def obsstore(self):
513 513 # read default format for new obsstore.
514 514 # developer config: format.obsstore-version
515 515 defaultformat = self.ui.configint('format', 'obsstore-version', None)
516 516 # rely on obsstore class default when possible.
517 517 kwargs = {}
518 518 if defaultformat is not None:
519 519 kwargs['defaultformat'] = defaultformat
520 520 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
521 521 store = obsolete.obsstore(self.svfs, readonly=readonly,
522 522 **kwargs)
523 523 if store and readonly:
524 524 self.ui.warn(
525 525 _('obsolete feature not enabled but %i markers found!\n')
526 526 % len(list(store)))
527 527 return store
528 528
529 529 @storecache('00changelog.i')
530 530 def changelog(self):
531 c = changelog.changelog(self.svfs)
532 if txnutil.mayhavepending(self.root):
533 c.readpending('00changelog.i.a')
534 return c
531 return changelog.changelog(self.svfs,
532 trypending=txnutil.mayhavepending(self.root))
535 533
536 534 def _constructmanifest(self):
537 535 # This is a temporary function while we migrate from manifest to
538 536 # manifestlog. It allows bundlerepo and unionrepo to intercept the
539 537 # manifest creation.
540 538 return manifest.manifestrevlog(self.svfs)
541 539
542 540 @storecache('00manifest.i')
543 541 def manifestlog(self):
544 542 return manifest.manifestlog(self.svfs, self)
545 543
546 544 @repofilecache('dirstate')
547 545 def dirstate(self):
548 546 return dirstate.dirstate(self.vfs, self.ui, self.root,
549 547 self._dirstatevalidate)
550 548
551 549 def _dirstatevalidate(self, node):
552 550 try:
553 551 self.changelog.rev(node)
554 552 return node
555 553 except error.LookupError:
556 554 if not self._dirstatevalidatewarned:
557 555 self._dirstatevalidatewarned = True
558 556 self.ui.warn(_("warning: ignoring unknown"
559 557 " working parent %s!\n") % short(node))
560 558 return nullid
561 559
562 560 def __getitem__(self, changeid):
563 561 if changeid is None or changeid == wdirrev:
564 562 return context.workingctx(self)
565 563 if isinstance(changeid, slice):
566 564 return [context.changectx(self, i)
567 565 for i in xrange(*changeid.indices(len(self)))
568 566 if i not in self.changelog.filteredrevs]
569 567 return context.changectx(self, changeid)
570 568
571 569 def __contains__(self, changeid):
572 570 try:
573 571 self[changeid]
574 572 return True
575 573 except error.RepoLookupError:
576 574 return False
577 575
578 576 def __nonzero__(self):
579 577 return True
580 578
581 579 __bool__ = __nonzero__
582 580
583 581 def __len__(self):
584 582 return len(self.changelog)
585 583
586 584 def __iter__(self):
587 585 return iter(self.changelog)
588 586
589 587 def revs(self, expr, *args):
590 588 '''Find revisions matching a revset.
591 589
592 590 The revset is specified as a string ``expr`` that may contain
593 591 %-formatting to escape certain types. See ``revsetlang.formatspec``.
594 592
595 593 Revset aliases from the configuration are not expanded. To expand
596 594 user aliases, consider calling ``scmutil.revrange()`` or
597 595 ``repo.anyrevs([expr], user=True)``.
598 596
599 597 Returns a revset.abstractsmartset, which is a list-like interface
600 598 that contains integer revisions.
601 599 '''
602 600 expr = revsetlang.formatspec(expr, *args)
603 601 m = revset.match(None, expr)
604 602 return m(self)
605 603
606 604 def set(self, expr, *args):
607 605 '''Find revisions matching a revset and emit changectx instances.
608 606
609 607 This is a convenience wrapper around ``revs()`` that iterates the
610 608 result and is a generator of changectx instances.
611 609
612 610 Revset aliases from the configuration are not expanded. To expand
613 611 user aliases, consider calling ``scmutil.revrange()``.
614 612 '''
615 613 for r in self.revs(expr, *args):
616 614 yield self[r]
617 615
618 616 def anyrevs(self, specs, user=False):
619 617 '''Find revisions matching one of the given revsets.
620 618
621 619 Revset aliases from the configuration are not expanded by default. To
622 620 expand user aliases, specify ``user=True``.
623 621 '''
624 622 if user:
625 623 m = revset.matchany(self.ui, specs, repo=self)
626 624 else:
627 625 m = revset.matchany(None, specs)
628 626 return m(self)
629 627
630 628 def url(self):
631 629 return 'file:' + self.root
632 630
633 631 def hook(self, name, throw=False, **args):
634 632 """Call a hook, passing this repo instance.
635 633
636 634 This a convenience method to aid invoking hooks. Extensions likely
637 635 won't call this unless they have registered a custom hook or are
638 636 replacing code that is expected to call a hook.
639 637 """
640 638 return hook.hook(self.ui, self, name, throw, **args)
641 639
642 640 @filteredpropertycache
643 641 def _tagscache(self):
644 642 '''Returns a tagscache object that contains various tags related
645 643 caches.'''
646 644
647 645 # This simplifies its cache management by having one decorated
648 646 # function (this one) and the rest simply fetch things from it.
649 647 class tagscache(object):
650 648 def __init__(self):
651 649 # These two define the set of tags for this repository. tags
652 650 # maps tag name to node; tagtypes maps tag name to 'global' or
653 651 # 'local'. (Global tags are defined by .hgtags across all
654 652 # heads, and local tags are defined in .hg/localtags.)
655 653 # They constitute the in-memory cache of tags.
656 654 self.tags = self.tagtypes = None
657 655
658 656 self.nodetagscache = self.tagslist = None
659 657
660 658 cache = tagscache()
661 659 cache.tags, cache.tagtypes = self._findtags()
662 660
663 661 return cache
664 662
665 663 def tags(self):
666 664 '''return a mapping of tag to node'''
667 665 t = {}
668 666 if self.changelog.filteredrevs:
669 667 tags, tt = self._findtags()
670 668 else:
671 669 tags = self._tagscache.tags
672 670 for k, v in tags.iteritems():
673 671 try:
674 672 # ignore tags to unknown nodes
675 673 self.changelog.rev(v)
676 674 t[k] = v
677 675 except (error.LookupError, ValueError):
678 676 pass
679 677 return t
680 678
681 679 def _findtags(self):
682 680 '''Do the hard work of finding tags. Return a pair of dicts
683 681 (tags, tagtypes) where tags maps tag name to node, and tagtypes
684 682 maps tag name to a string like \'global\' or \'local\'.
685 683 Subclasses or extensions are free to add their own tags, but
686 684 should be aware that the returned dicts will be retained for the
687 685 duration of the localrepo object.'''
688 686
689 687 # XXX what tagtype should subclasses/extensions use? Currently
690 688 # mq and bookmarks add tags, but do not set the tagtype at all.
691 689 # Should each extension invent its own tag type? Should there
692 690 # be one tagtype for all such "virtual" tags? Or is the status
693 691 # quo fine?
694 692
695 693
696 694 # map tag name to (node, hist)
697 695 alltags = tagsmod.findglobaltags(self.ui, self)
698 696 # map tag name to tag type
699 697 tagtypes = dict((tag, 'global') for tag in alltags)
700 698
701 699 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
702 700
703 701 # Build the return dicts. Have to re-encode tag names because
704 702 # the tags module always uses UTF-8 (in order not to lose info
705 703 # writing to the cache), but the rest of Mercurial wants them in
706 704 # local encoding.
707 705 tags = {}
708 706 for (name, (node, hist)) in alltags.iteritems():
709 707 if node != nullid:
710 708 tags[encoding.tolocal(name)] = node
711 709 tags['tip'] = self.changelog.tip()
712 710 tagtypes = dict([(encoding.tolocal(name), value)
713 711 for (name, value) in tagtypes.iteritems()])
714 712 return (tags, tagtypes)
715 713
716 714 def tagtype(self, tagname):
717 715 '''
718 716 return the type of the given tag. result can be:
719 717
720 718 'local' : a local tag
721 719 'global' : a global tag
722 720 None : tag does not exist
723 721 '''
724 722
725 723 return self._tagscache.tagtypes.get(tagname)
726 724
727 725 def tagslist(self):
728 726 '''return a list of tags ordered by revision'''
729 727 if not self._tagscache.tagslist:
730 728 l = []
731 729 for t, n in self.tags().iteritems():
732 730 l.append((self.changelog.rev(n), t, n))
733 731 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
734 732
735 733 return self._tagscache.tagslist
736 734
737 735 def nodetags(self, node):
738 736 '''return the tags associated with a node'''
739 737 if not self._tagscache.nodetagscache:
740 738 nodetagscache = {}
741 739 for t, n in self._tagscache.tags.iteritems():
742 740 nodetagscache.setdefault(n, []).append(t)
743 741 for tags in nodetagscache.itervalues():
744 742 tags.sort()
745 743 self._tagscache.nodetagscache = nodetagscache
746 744 return self._tagscache.nodetagscache.get(node, [])
747 745
748 746 def nodebookmarks(self, node):
749 747 """return the list of bookmarks pointing to the specified node"""
750 748 marks = []
751 749 for bookmark, n in self._bookmarks.iteritems():
752 750 if n == node:
753 751 marks.append(bookmark)
754 752 return sorted(marks)
755 753
756 754 def branchmap(self):
757 755 '''returns a dictionary {branch: [branchheads]} with branchheads
758 756 ordered by increasing revision number'''
759 757 branchmap.updatecache(self)
760 758 return self._branchcaches[self.filtername]
761 759
762 760 @unfilteredmethod
763 761 def revbranchcache(self):
764 762 if not self._revbranchcache:
765 763 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
766 764 return self._revbranchcache
767 765
768 766 def branchtip(self, branch, ignoremissing=False):
769 767 '''return the tip node for a given branch
770 768
771 769 If ignoremissing is True, then this method will not raise an error.
772 770 This is helpful for callers that only expect None for a missing branch
773 771 (e.g. namespace).
774 772
775 773 '''
776 774 try:
777 775 return self.branchmap().branchtip(branch)
778 776 except KeyError:
779 777 if not ignoremissing:
780 778 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
781 779 else:
782 780 pass
783 781
784 782 def lookup(self, key):
785 783 return self[key].node()
786 784
787 785 def lookupbranch(self, key, remote=None):
788 786 repo = remote or self
789 787 if key in repo.branchmap():
790 788 return key
791 789
792 790 repo = (remote and remote.local()) and remote or self
793 791 return repo[key].branch()
794 792
795 793 def known(self, nodes):
796 794 cl = self.changelog
797 795 nm = cl.nodemap
798 796 filtered = cl.filteredrevs
799 797 result = []
800 798 for n in nodes:
801 799 r = nm.get(n)
802 800 resp = not (r is None or r in filtered)
803 801 result.append(resp)
804 802 return result
805 803
806 804 def local(self):
807 805 return self
808 806
809 807 def publishing(self):
810 808 # it's safe (and desirable) to trust the publish flag unconditionally
811 809 # so that we don't finalize changes shared between users via ssh or nfs
812 810 return self.ui.configbool('phases', 'publish', True, untrusted=True)
813 811
814 812 def cancopy(self):
815 813 # so statichttprepo's override of local() works
816 814 if not self.local():
817 815 return False
818 816 if not self.publishing():
819 817 return True
820 818 # if publishing we can't copy if there is filtered content
821 819 return not self.filtered('visible').changelog.filteredrevs
822 820
823 821 def shared(self):
824 822 '''the type of shared repository (None if not shared)'''
825 823 if self.sharedpath != self.path:
826 824 return 'store'
827 825 return None
828 826
829 827 def wjoin(self, f, *insidef):
830 828 return self.vfs.reljoin(self.root, f, *insidef)
831 829
832 830 def file(self, f):
833 831 if f[0] == '/':
834 832 f = f[1:]
835 833 return filelog.filelog(self.svfs, f)
836 834
837 835 def changectx(self, changeid):
838 836 return self[changeid]
839 837
840 838 def setparents(self, p1, p2=nullid):
841 839 self.dirstate.beginparentchange()
842 840 copies = self.dirstate.setparents(p1, p2)
843 841 pctx = self[p1]
844 842 if copies:
845 843 # Adjust copy records, the dirstate cannot do it, it
846 844 # requires access to parents manifests. Preserve them
847 845 # only for entries added to first parent.
848 846 for f in copies:
849 847 if f not in pctx and copies[f] in pctx:
850 848 self.dirstate.copy(copies[f], f)
851 849 if p2 == nullid:
852 850 for f, s in sorted(self.dirstate.copies().items()):
853 851 if f not in pctx and s not in pctx:
854 852 self.dirstate.copy(None, f)
855 853 self.dirstate.endparentchange()
856 854
857 855 def filectx(self, path, changeid=None, fileid=None):
858 856 """changeid can be a changeset revision, node, or tag.
859 857 fileid can be a file revision or node."""
860 858 return context.filectx(self, path, changeid, fileid)
861 859
862 860 def getcwd(self):
863 861 return self.dirstate.getcwd()
864 862
865 863 def pathto(self, f, cwd=None):
866 864 return self.dirstate.pathto(f, cwd)
867 865
868 866 def _loadfilter(self, filter):
869 867 if filter not in self.filterpats:
870 868 l = []
871 869 for pat, cmd in self.ui.configitems(filter):
872 870 if cmd == '!':
873 871 continue
874 872 mf = matchmod.match(self.root, '', [pat])
875 873 fn = None
876 874 params = cmd
877 875 for name, filterfn in self._datafilters.iteritems():
878 876 if cmd.startswith(name):
879 877 fn = filterfn
880 878 params = cmd[len(name):].lstrip()
881 879 break
882 880 if not fn:
883 881 fn = lambda s, c, **kwargs: util.filter(s, c)
884 882 # Wrap old filters not supporting keyword arguments
885 883 if not inspect.getargspec(fn)[2]:
886 884 oldfn = fn
887 885 fn = lambda s, c, **kwargs: oldfn(s, c)
888 886 l.append((mf, fn, params))
889 887 self.filterpats[filter] = l
890 888 return self.filterpats[filter]
891 889
892 890 def _filter(self, filterpats, filename, data):
893 891 for mf, fn, cmd in filterpats:
894 892 if mf(filename):
895 893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
896 894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
897 895 break
898 896
899 897 return data
900 898
901 899 @unfilteredpropertycache
902 900 def _encodefilterpats(self):
903 901 return self._loadfilter('encode')
904 902
905 903 @unfilteredpropertycache
906 904 def _decodefilterpats(self):
907 905 return self._loadfilter('decode')
908 906
909 907 def adddatafilter(self, name, filter):
910 908 self._datafilters[name] = filter
911 909
912 910 def wread(self, filename):
913 911 if self.wvfs.islink(filename):
914 912 data = self.wvfs.readlink(filename)
915 913 else:
916 914 data = self.wvfs.read(filename)
917 915 return self._filter(self._encodefilterpats, filename, data)
918 916
919 917 def wwrite(self, filename, data, flags, backgroundclose=False):
920 918 """write ``data`` into ``filename`` in the working directory
921 919
922 920 This returns length of written (maybe decoded) data.
923 921 """
924 922 data = self._filter(self._decodefilterpats, filename, data)
925 923 if 'l' in flags:
926 924 self.wvfs.symlink(data, filename)
927 925 else:
928 926 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
929 927 if 'x' in flags:
930 928 self.wvfs.setflags(filename, False, True)
931 929 return len(data)
932 930
933 931 def wwritedata(self, filename, data):
934 932 return self._filter(self._decodefilterpats, filename, data)
935 933
936 934 def currenttransaction(self):
937 935 """return the current transaction or None if non exists"""
938 936 if self._transref:
939 937 tr = self._transref()
940 938 else:
941 939 tr = None
942 940
943 941 if tr and tr.running():
944 942 return tr
945 943 return None
946 944
947 945 def transaction(self, desc, report=None):
948 946 if (self.ui.configbool('devel', 'all-warnings')
949 947 or self.ui.configbool('devel', 'check-locks')):
950 948 if self._currentlock(self._lockref) is None:
951 949 raise error.ProgrammingError('transaction requires locking')
952 950 tr = self.currenttransaction()
953 951 if tr is not None:
954 952 return tr.nest()
955 953
956 954 # abort here if the journal already exists
957 955 if self.svfs.exists("journal"):
958 956 raise error.RepoError(
959 957 _("abandoned transaction found"),
960 958 hint=_("run 'hg recover' to clean up transaction"))
961 959
962 960 idbase = "%.40f#%f" % (random.random(), time.time())
963 961 ha = hex(hashlib.sha1(idbase).digest())
964 962 txnid = 'TXN:' + ha
965 963 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
966 964
967 965 self._writejournal(desc)
968 966 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
969 967 if report:
970 968 rp = report
971 969 else:
972 970 rp = self.ui.warn
973 971 vfsmap = {'plain': self.vfs} # root of .hg/
974 972 # we must avoid cyclic reference between repo and transaction.
975 973 reporef = weakref.ref(self)
976 974 # Code to track tag movement
977 975 #
978 976 # Since tags are all handled as file content, it is actually quite hard
979 977 # to track these movement from a code perspective. So we fallback to a
980 978 # tracking at the repository level. One could envision to track changes
981 979 # to the '.hgtags' file through changegroup apply but that fails to
982 980 # cope with case where transaction expose new heads without changegroup
983 981 # being involved (eg: phase movement).
984 982 #
985 983 # For now, We gate the feature behind a flag since this likely comes
986 984 # with performance impacts. The current code run more often than needed
987 985 # and do not use caches as much as it could. The current focus is on
988 986 # the behavior of the feature so we disable it by default. The flag
989 987 # will be removed when we are happy with the performance impact.
990 988 #
991 989 # Once this feature is no longer experimental move the following
992 990 # documentation to the appropriate help section:
993 991 #
994 992 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
995 993 # tags (new or changed or deleted tags). In addition the details of
996 994 # these changes are made available in a file at:
997 995 # ``REPOROOT/.hg/changes/tags.changes``.
998 996 # Make sure you check for HG_TAG_MOVED before reading that file as it
999 997 # might exist from a previous transaction even if no tag were touched
1000 998 # in this one. Changes are recorded in a line base format::
1001 999 #
1002 1000 # <action> <hex-node> <tag-name>\n
1003 1001 #
1004 1002 # Actions are defined as follow:
1005 1003 # "-R": tag is removed,
1006 1004 # "+A": tag is added,
1007 1005 # "-M": tag is moved (old value),
1008 1006 # "+M": tag is moved (new value),
1009 1007 tracktags = lambda x: None
1010 1008 # experimental config: experimental.hook-track-tags
1011 1009 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1012 1010 False)
1013 1011 if desc != 'strip' and shouldtracktags:
1014 1012 oldheads = self.changelog.headrevs()
1015 1013 def tracktags(tr2):
1016 1014 repo = reporef()
1017 1015 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1018 1016 newheads = repo.changelog.headrevs()
1019 1017 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1020 1018 # notes: we compare lists here.
1021 1019 # As we do it only once buiding set would not be cheaper
1022 1020 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1023 1021 if changes:
1024 1022 tr2.hookargs['tag_moved'] = '1'
1025 1023 with repo.vfs('changes/tags.changes', 'w',
1026 1024 atomictemp=True) as changesfile:
1027 1025 # note: we do not register the file to the transaction
1028 1026 # because we needs it to still exist on the transaction
1029 1027 # is close (for txnclose hooks)
1030 1028 tagsmod.writediff(changesfile, changes)
1031 1029 def validate(tr2):
1032 1030 """will run pre-closing hooks"""
1033 1031 # XXX the transaction API is a bit lacking here so we take a hacky
1034 1032 # path for now
1035 1033 #
1036 1034 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1037 1035 # dict is copied before these run. In addition we needs the data
1038 1036 # available to in memory hooks too.
1039 1037 #
1040 1038 # Moreover, we also need to make sure this runs before txnclose
1041 1039 # hooks and there is no "pending" mechanism that would execute
1042 1040 # logic only if hooks are about to run.
1043 1041 #
1044 1042 # Fixing this limitation of the transaction is also needed to track
1045 1043 # other families of changes (bookmarks, phases, obsolescence).
1046 1044 #
1047 1045 # This will have to be fixed before we remove the experimental
1048 1046 # gating.
1049 1047 tracktags(tr2)
1050 1048 reporef().hook('pretxnclose', throw=True,
1051 1049 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1052 1050 def releasefn(tr, success):
1053 1051 repo = reporef()
1054 1052 if success:
1055 1053 # this should be explicitly invoked here, because
1056 1054 # in-memory changes aren't written out at closing
1057 1055 # transaction, if tr.addfilegenerator (via
1058 1056 # dirstate.write or so) isn't invoked while
1059 1057 # transaction running
1060 1058 repo.dirstate.write(None)
1061 1059 else:
1062 1060 # discard all changes (including ones already written
1063 1061 # out) in this transaction
1064 1062 repo.dirstate.restorebackup(None, prefix='journal.')
1065 1063
1066 1064 repo.invalidate(clearfilecache=True)
1067 1065
1068 1066 tr = transaction.transaction(rp, self.svfs, vfsmap,
1069 1067 "journal",
1070 1068 "undo",
1071 1069 aftertrans(renames),
1072 1070 self.store.createmode,
1073 1071 validator=validate,
1074 1072 releasefn=releasefn)
1075 1073 tr.changes['revs'] = set()
1076 1074
1077 1075 tr.hookargs['txnid'] = txnid
1078 1076 # note: writing the fncache only during finalize mean that the file is
1079 1077 # outdated when running hooks. As fncache is used for streaming clone,
1080 1078 # this is not expected to break anything that happen during the hooks.
1081 1079 tr.addfinalize('flush-fncache', self.store.write)
1082 1080 def txnclosehook(tr2):
1083 1081 """To be run if transaction is successful, will schedule a hook run
1084 1082 """
1085 1083 # Don't reference tr2 in hook() so we don't hold a reference.
1086 1084 # This reduces memory consumption when there are multiple
1087 1085 # transactions per lock. This can likely go away if issue5045
1088 1086 # fixes the function accumulation.
1089 1087 hookargs = tr2.hookargs
1090 1088
1091 1089 def hook():
1092 1090 reporef().hook('txnclose', throw=False, txnname=desc,
1093 1091 **pycompat.strkwargs(hookargs))
1094 1092 reporef()._afterlock(hook)
1095 1093 tr.addfinalize('txnclose-hook', txnclosehook)
1096 1094 def warmscache(tr2):
1097 1095 repo = reporef()
1098 1096 repo.updatecaches(tr2)
1099 1097 tr.addpostclose('warms-cache', warmscache)
1100 1098 def txnaborthook(tr2):
1101 1099 """To be run if transaction is aborted
1102 1100 """
1103 1101 reporef().hook('txnabort', throw=False, txnname=desc,
1104 1102 **tr2.hookargs)
1105 1103 tr.addabort('txnabort-hook', txnaborthook)
1106 1104 # avoid eager cache invalidation. in-memory data should be identical
1107 1105 # to stored data if transaction has no error.
1108 1106 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1109 1107 self._transref = weakref.ref(tr)
1110 1108 return tr
1111 1109
1112 1110 def _journalfiles(self):
1113 1111 return ((self.svfs, 'journal'),
1114 1112 (self.vfs, 'journal.dirstate'),
1115 1113 (self.vfs, 'journal.branch'),
1116 1114 (self.vfs, 'journal.desc'),
1117 1115 (self.vfs, 'journal.bookmarks'),
1118 1116 (self.svfs, 'journal.phaseroots'))
1119 1117
1120 1118 def undofiles(self):
1121 1119 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1122 1120
1123 1121 def _writejournal(self, desc):
1124 1122 self.dirstate.savebackup(None, prefix='journal.')
1125 1123 self.vfs.write("journal.branch",
1126 1124 encoding.fromlocal(self.dirstate.branch()))
1127 1125 self.vfs.write("journal.desc",
1128 1126 "%d\n%s\n" % (len(self), desc))
1129 1127 self.vfs.write("journal.bookmarks",
1130 1128 self.vfs.tryread("bookmarks"))
1131 1129 self.svfs.write("journal.phaseroots",
1132 1130 self.svfs.tryread("phaseroots"))
1133 1131
1134 1132 def recover(self):
1135 1133 with self.lock():
1136 1134 if self.svfs.exists("journal"):
1137 1135 self.ui.status(_("rolling back interrupted transaction\n"))
1138 1136 vfsmap = {'': self.svfs,
1139 1137 'plain': self.vfs,}
1140 1138 transaction.rollback(self.svfs, vfsmap, "journal",
1141 1139 self.ui.warn)
1142 1140 self.invalidate()
1143 1141 return True
1144 1142 else:
1145 1143 self.ui.warn(_("no interrupted transaction available\n"))
1146 1144 return False
1147 1145
1148 1146 def rollback(self, dryrun=False, force=False):
1149 1147 wlock = lock = dsguard = None
1150 1148 try:
1151 1149 wlock = self.wlock()
1152 1150 lock = self.lock()
1153 1151 if self.svfs.exists("undo"):
1154 1152 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1155 1153
1156 1154 return self._rollback(dryrun, force, dsguard)
1157 1155 else:
1158 1156 self.ui.warn(_("no rollback information available\n"))
1159 1157 return 1
1160 1158 finally:
1161 1159 release(dsguard, lock, wlock)
1162 1160
1163 1161 @unfilteredmethod # Until we get smarter cache management
1164 1162 def _rollback(self, dryrun, force, dsguard):
1165 1163 ui = self.ui
1166 1164 try:
1167 1165 args = self.vfs.read('undo.desc').splitlines()
1168 1166 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1169 1167 if len(args) >= 3:
1170 1168 detail = args[2]
1171 1169 oldtip = oldlen - 1
1172 1170
1173 1171 if detail and ui.verbose:
1174 1172 msg = (_('repository tip rolled back to revision %s'
1175 1173 ' (undo %s: %s)\n')
1176 1174 % (oldtip, desc, detail))
1177 1175 else:
1178 1176 msg = (_('repository tip rolled back to revision %s'
1179 1177 ' (undo %s)\n')
1180 1178 % (oldtip, desc))
1181 1179 except IOError:
1182 1180 msg = _('rolling back unknown transaction\n')
1183 1181 desc = None
1184 1182
1185 1183 if not force and self['.'] != self['tip'] and desc == 'commit':
1186 1184 raise error.Abort(
1187 1185 _('rollback of last commit while not checked out '
1188 1186 'may lose data'), hint=_('use -f to force'))
1189 1187
1190 1188 ui.status(msg)
1191 1189 if dryrun:
1192 1190 return 0
1193 1191
1194 1192 parents = self.dirstate.parents()
1195 1193 self.destroying()
1196 1194 vfsmap = {'plain': self.vfs, '': self.svfs}
1197 1195 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1198 1196 if self.vfs.exists('undo.bookmarks'):
1199 1197 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1200 1198 if self.svfs.exists('undo.phaseroots'):
1201 1199 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1202 1200 self.invalidate()
1203 1201
1204 1202 parentgone = (parents[0] not in self.changelog.nodemap or
1205 1203 parents[1] not in self.changelog.nodemap)
1206 1204 if parentgone:
1207 1205 # prevent dirstateguard from overwriting already restored one
1208 1206 dsguard.close()
1209 1207
1210 1208 self.dirstate.restorebackup(None, prefix='undo.')
1211 1209 try:
1212 1210 branch = self.vfs.read('undo.branch')
1213 1211 self.dirstate.setbranch(encoding.tolocal(branch))
1214 1212 except IOError:
1215 1213 ui.warn(_('named branch could not be reset: '
1216 1214 'current branch is still \'%s\'\n')
1217 1215 % self.dirstate.branch())
1218 1216
1219 1217 parents = tuple([p.rev() for p in self[None].parents()])
1220 1218 if len(parents) > 1:
1221 1219 ui.status(_('working directory now based on '
1222 1220 'revisions %d and %d\n') % parents)
1223 1221 else:
1224 1222 ui.status(_('working directory now based on '
1225 1223 'revision %d\n') % parents)
1226 1224 mergemod.mergestate.clean(self, self['.'].node())
1227 1225
1228 1226 # TODO: if we know which new heads may result from this rollback, pass
1229 1227 # them to destroy(), which will prevent the branchhead cache from being
1230 1228 # invalidated.
1231 1229 self.destroyed()
1232 1230 return 0
1233 1231
1234 1232 @unfilteredmethod
1235 1233 def updatecaches(self, tr=None):
1236 1234 """warm appropriate caches
1237 1235
1238 1236 If this function is called after a transaction closed. The transaction
1239 1237 will be available in the 'tr' argument. This can be used to selectively
1240 1238 update caches relevant to the changes in that transaction.
1241 1239 """
1242 1240 if tr is not None and tr.hookargs.get('source') == 'strip':
1243 1241 # During strip, many caches are invalid but
1244 1242 # later call to `destroyed` will refresh them.
1245 1243 return
1246 1244
1247 1245 if tr is None or tr.changes['revs']:
1248 1246 # updating the unfiltered branchmap should refresh all the others,
1249 1247 self.ui.debug('updating the branch cache\n')
1250 1248 branchmap.updatecache(self.filtered('served'))
1251 1249
1252 1250 def invalidatecaches(self):
1253 1251
1254 1252 if '_tagscache' in vars(self):
1255 1253 # can't use delattr on proxy
1256 1254 del self.__dict__['_tagscache']
1257 1255
1258 1256 self.unfiltered()._branchcaches.clear()
1259 1257 self.invalidatevolatilesets()
1260 1258
1261 1259 def invalidatevolatilesets(self):
1262 1260 self.filteredrevcache.clear()
1263 1261 obsolete.clearobscaches(self)
1264 1262
1265 1263 def invalidatedirstate(self):
1266 1264 '''Invalidates the dirstate, causing the next call to dirstate
1267 1265 to check if it was modified since the last time it was read,
1268 1266 rereading it if it has.
1269 1267
1270 1268 This is different to dirstate.invalidate() that it doesn't always
1271 1269 rereads the dirstate. Use dirstate.invalidate() if you want to
1272 1270 explicitly read the dirstate again (i.e. restoring it to a previous
1273 1271 known good state).'''
1274 1272 if hasunfilteredcache(self, 'dirstate'):
1275 1273 for k in self.dirstate._filecache:
1276 1274 try:
1277 1275 delattr(self.dirstate, k)
1278 1276 except AttributeError:
1279 1277 pass
1280 1278 delattr(self.unfiltered(), 'dirstate')
1281 1279
1282 1280 def invalidate(self, clearfilecache=False):
1283 1281 '''Invalidates both store and non-store parts other than dirstate
1284 1282
1285 1283 If a transaction is running, invalidation of store is omitted,
1286 1284 because discarding in-memory changes might cause inconsistency
1287 1285 (e.g. incomplete fncache causes unintentional failure, but
1288 1286 redundant one doesn't).
1289 1287 '''
1290 1288 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1291 1289 for k in list(self._filecache.keys()):
1292 1290 # dirstate is invalidated separately in invalidatedirstate()
1293 1291 if k == 'dirstate':
1294 1292 continue
1295 1293
1296 1294 if clearfilecache:
1297 1295 del self._filecache[k]
1298 1296 try:
1299 1297 delattr(unfiltered, k)
1300 1298 except AttributeError:
1301 1299 pass
1302 1300 self.invalidatecaches()
1303 1301 if not self.currenttransaction():
1304 1302 # TODO: Changing contents of store outside transaction
1305 1303 # causes inconsistency. We should make in-memory store
1306 1304 # changes detectable, and abort if changed.
1307 1305 self.store.invalidatecaches()
1308 1306
1309 1307 def invalidateall(self):
1310 1308 '''Fully invalidates both store and non-store parts, causing the
1311 1309 subsequent operation to reread any outside changes.'''
1312 1310 # extension should hook this to invalidate its caches
1313 1311 self.invalidate()
1314 1312 self.invalidatedirstate()
1315 1313
1316 1314 @unfilteredmethod
1317 1315 def _refreshfilecachestats(self, tr):
1318 1316 """Reload stats of cached files so that they are flagged as valid"""
1319 1317 for k, ce in self._filecache.items():
1320 1318 if k == 'dirstate' or k not in self.__dict__:
1321 1319 continue
1322 1320 ce.refresh()
1323 1321
1324 1322 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1325 1323 inheritchecker=None, parentenvvar=None):
1326 1324 parentlock = None
1327 1325 # the contents of parentenvvar are used by the underlying lock to
1328 1326 # determine whether it can be inherited
1329 1327 if parentenvvar is not None:
1330 1328 parentlock = encoding.environ.get(parentenvvar)
1331 1329 try:
1332 1330 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1333 1331 acquirefn=acquirefn, desc=desc,
1334 1332 inheritchecker=inheritchecker,
1335 1333 parentlock=parentlock)
1336 1334 except error.LockHeld as inst:
1337 1335 if not wait:
1338 1336 raise
1339 1337 # show more details for new-style locks
1340 1338 if ':' in inst.locker:
1341 1339 host, pid = inst.locker.split(":", 1)
1342 1340 self.ui.warn(
1343 1341 _("waiting for lock on %s held by process %r "
1344 1342 "on host %r\n") % (desc, pid, host))
1345 1343 else:
1346 1344 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1347 1345 (desc, inst.locker))
1348 1346 # default to 600 seconds timeout
1349 1347 l = lockmod.lock(vfs, lockname,
1350 1348 int(self.ui.config("ui", "timeout", "600")),
1351 1349 releasefn=releasefn, acquirefn=acquirefn,
1352 1350 desc=desc)
1353 1351 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1354 1352 return l
1355 1353
1356 1354 def _afterlock(self, callback):
1357 1355 """add a callback to be run when the repository is fully unlocked
1358 1356
1359 1357 The callback will be executed when the outermost lock is released
1360 1358 (with wlock being higher level than 'lock')."""
1361 1359 for ref in (self._wlockref, self._lockref):
1362 1360 l = ref and ref()
1363 1361 if l and l.held:
1364 1362 l.postrelease.append(callback)
1365 1363 break
1366 1364 else: # no lock have been found.
1367 1365 callback()
1368 1366
1369 1367 def lock(self, wait=True):
1370 1368 '''Lock the repository store (.hg/store) and return a weak reference
1371 1369 to the lock. Use this before modifying the store (e.g. committing or
1372 1370 stripping). If you are opening a transaction, get a lock as well.)
1373 1371
1374 1372 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1375 1373 'wlock' first to avoid a dead-lock hazard.'''
1376 1374 l = self._currentlock(self._lockref)
1377 1375 if l is not None:
1378 1376 l.lock()
1379 1377 return l
1380 1378
1381 1379 l = self._lock(self.svfs, "lock", wait, None,
1382 1380 self.invalidate, _('repository %s') % self.origroot)
1383 1381 self._lockref = weakref.ref(l)
1384 1382 return l
1385 1383
1386 1384 def _wlockchecktransaction(self):
1387 1385 if self.currenttransaction() is not None:
1388 1386 raise error.LockInheritanceContractViolation(
1389 1387 'wlock cannot be inherited in the middle of a transaction')
1390 1388
1391 1389 def wlock(self, wait=True):
1392 1390 '''Lock the non-store parts of the repository (everything under
1393 1391 .hg except .hg/store) and return a weak reference to the lock.
1394 1392
1395 1393 Use this before modifying files in .hg.
1396 1394
1397 1395 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1398 1396 'wlock' first to avoid a dead-lock hazard.'''
1399 1397 l = self._wlockref and self._wlockref()
1400 1398 if l is not None and l.held:
1401 1399 l.lock()
1402 1400 return l
1403 1401
1404 1402 # We do not need to check for non-waiting lock acquisition. Such
1405 1403 # acquisition would not cause dead-lock as they would just fail.
1406 1404 if wait and (self.ui.configbool('devel', 'all-warnings')
1407 1405 or self.ui.configbool('devel', 'check-locks')):
1408 1406 if self._currentlock(self._lockref) is not None:
1409 1407 self.ui.develwarn('"wlock" acquired after "lock"')
1410 1408
1411 1409 def unlock():
1412 1410 if self.dirstate.pendingparentchange():
1413 1411 self.dirstate.invalidate()
1414 1412 else:
1415 1413 self.dirstate.write(None)
1416 1414
1417 1415 self._filecache['dirstate'].refresh()
1418 1416
1419 1417 l = self._lock(self.vfs, "wlock", wait, unlock,
1420 1418 self.invalidatedirstate, _('working directory of %s') %
1421 1419 self.origroot,
1422 1420 inheritchecker=self._wlockchecktransaction,
1423 1421 parentenvvar='HG_WLOCK_LOCKER')
1424 1422 self._wlockref = weakref.ref(l)
1425 1423 return l
1426 1424
1427 1425 def _currentlock(self, lockref):
1428 1426 """Returns the lock if it's held, or None if it's not."""
1429 1427 if lockref is None:
1430 1428 return None
1431 1429 l = lockref()
1432 1430 if l is None or not l.held:
1433 1431 return None
1434 1432 return l
1435 1433
1436 1434 def currentwlock(self):
1437 1435 """Returns the wlock if it's held, or None if it's not."""
1438 1436 return self._currentlock(self._wlockref)
1439 1437
1440 1438 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1441 1439 """
1442 1440 commit an individual file as part of a larger transaction
1443 1441 """
1444 1442
1445 1443 fname = fctx.path()
1446 1444 fparent1 = manifest1.get(fname, nullid)
1447 1445 fparent2 = manifest2.get(fname, nullid)
1448 1446 if isinstance(fctx, context.filectx):
1449 1447 node = fctx.filenode()
1450 1448 if node in [fparent1, fparent2]:
1451 1449 self.ui.debug('reusing %s filelog entry\n' % fname)
1452 1450 if manifest1.flags(fname) != fctx.flags():
1453 1451 changelist.append(fname)
1454 1452 return node
1455 1453
1456 1454 flog = self.file(fname)
1457 1455 meta = {}
1458 1456 copy = fctx.renamed()
1459 1457 if copy and copy[0] != fname:
1460 1458 # Mark the new revision of this file as a copy of another
1461 1459 # file. This copy data will effectively act as a parent
1462 1460 # of this new revision. If this is a merge, the first
1463 1461 # parent will be the nullid (meaning "look up the copy data")
1464 1462 # and the second one will be the other parent. For example:
1465 1463 #
1466 1464 # 0 --- 1 --- 3 rev1 changes file foo
1467 1465 # \ / rev2 renames foo to bar and changes it
1468 1466 # \- 2 -/ rev3 should have bar with all changes and
1469 1467 # should record that bar descends from
1470 1468 # bar in rev2 and foo in rev1
1471 1469 #
1472 1470 # this allows this merge to succeed:
1473 1471 #
1474 1472 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1475 1473 # \ / merging rev3 and rev4 should use bar@rev2
1476 1474 # \- 2 --- 4 as the merge base
1477 1475 #
1478 1476
1479 1477 cfname = copy[0]
1480 1478 crev = manifest1.get(cfname)
1481 1479 newfparent = fparent2
1482 1480
1483 1481 if manifest2: # branch merge
1484 1482 if fparent2 == nullid or crev is None: # copied on remote side
1485 1483 if cfname in manifest2:
1486 1484 crev = manifest2[cfname]
1487 1485 newfparent = fparent1
1488 1486
1489 1487 # Here, we used to search backwards through history to try to find
1490 1488 # where the file copy came from if the source of a copy was not in
1491 1489 # the parent directory. However, this doesn't actually make sense to
1492 1490 # do (what does a copy from something not in your working copy even
1493 1491 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1494 1492 # the user that copy information was dropped, so if they didn't
1495 1493 # expect this outcome it can be fixed, but this is the correct
1496 1494 # behavior in this circumstance.
1497 1495
1498 1496 if crev:
1499 1497 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1500 1498 meta["copy"] = cfname
1501 1499 meta["copyrev"] = hex(crev)
1502 1500 fparent1, fparent2 = nullid, newfparent
1503 1501 else:
1504 1502 self.ui.warn(_("warning: can't find ancestor for '%s' "
1505 1503 "copied from '%s'!\n") % (fname, cfname))
1506 1504
1507 1505 elif fparent1 == nullid:
1508 1506 fparent1, fparent2 = fparent2, nullid
1509 1507 elif fparent2 != nullid:
1510 1508 # is one parent an ancestor of the other?
1511 1509 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1512 1510 if fparent1 in fparentancestors:
1513 1511 fparent1, fparent2 = fparent2, nullid
1514 1512 elif fparent2 in fparentancestors:
1515 1513 fparent2 = nullid
1516 1514
1517 1515 # is the file changed?
1518 1516 text = fctx.data()
1519 1517 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1520 1518 changelist.append(fname)
1521 1519 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1522 1520 # are just the flags changed during merge?
1523 1521 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1524 1522 changelist.append(fname)
1525 1523
1526 1524 return fparent1
1527 1525
1528 1526 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1529 1527 """check for commit arguments that aren't committable"""
1530 1528 if match.isexact() or match.prefix():
1531 1529 matched = set(status.modified + status.added + status.removed)
1532 1530
1533 1531 for f in match.files():
1534 1532 f = self.dirstate.normalize(f)
1535 1533 if f == '.' or f in matched or f in wctx.substate:
1536 1534 continue
1537 1535 if f in status.deleted:
1538 1536 fail(f, _('file not found!'))
1539 1537 if f in vdirs: # visited directory
1540 1538 d = f + '/'
1541 1539 for mf in matched:
1542 1540 if mf.startswith(d):
1543 1541 break
1544 1542 else:
1545 1543 fail(f, _("no match under directory!"))
1546 1544 elif f not in self.dirstate:
1547 1545 fail(f, _("file not tracked!"))
1548 1546
1549 1547 @unfilteredmethod
1550 1548 def commit(self, text="", user=None, date=None, match=None, force=False,
1551 1549 editor=False, extra=None):
1552 1550 """Add a new revision to current repository.
1553 1551
1554 1552 Revision information is gathered from the working directory,
1555 1553 match can be used to filter the committed files. If editor is
1556 1554 supplied, it is called to get a commit message.
1557 1555 """
1558 1556 if extra is None:
1559 1557 extra = {}
1560 1558
1561 1559 def fail(f, msg):
1562 1560 raise error.Abort('%s: %s' % (f, msg))
1563 1561
1564 1562 if not match:
1565 1563 match = matchmod.always(self.root, '')
1566 1564
1567 1565 if not force:
1568 1566 vdirs = []
1569 1567 match.explicitdir = vdirs.append
1570 1568 match.bad = fail
1571 1569
1572 1570 wlock = lock = tr = None
1573 1571 try:
1574 1572 wlock = self.wlock()
1575 1573 lock = self.lock() # for recent changelog (see issue4368)
1576 1574
1577 1575 wctx = self[None]
1578 1576 merge = len(wctx.parents()) > 1
1579 1577
1580 1578 if not force and merge and match.ispartial():
1581 1579 raise error.Abort(_('cannot partially commit a merge '
1582 1580 '(do not specify files or patterns)'))
1583 1581
1584 1582 status = self.status(match=match, clean=force)
1585 1583 if force:
1586 1584 status.modified.extend(status.clean) # mq may commit clean files
1587 1585
1588 1586 # check subrepos
1589 1587 subs = []
1590 1588 commitsubs = set()
1591 1589 newstate = wctx.substate.copy()
1592 1590 # only manage subrepos and .hgsubstate if .hgsub is present
1593 1591 if '.hgsub' in wctx:
1594 1592 # we'll decide whether to track this ourselves, thanks
1595 1593 for c in status.modified, status.added, status.removed:
1596 1594 if '.hgsubstate' in c:
1597 1595 c.remove('.hgsubstate')
1598 1596
1599 1597 # compare current state to last committed state
1600 1598 # build new substate based on last committed state
1601 1599 oldstate = wctx.p1().substate
1602 1600 for s in sorted(newstate.keys()):
1603 1601 if not match(s):
1604 1602 # ignore working copy, use old state if present
1605 1603 if s in oldstate:
1606 1604 newstate[s] = oldstate[s]
1607 1605 continue
1608 1606 if not force:
1609 1607 raise error.Abort(
1610 1608 _("commit with new subrepo %s excluded") % s)
1611 1609 dirtyreason = wctx.sub(s).dirtyreason(True)
1612 1610 if dirtyreason:
1613 1611 if not self.ui.configbool('ui', 'commitsubrepos'):
1614 1612 raise error.Abort(dirtyreason,
1615 1613 hint=_("use --subrepos for recursive commit"))
1616 1614 subs.append(s)
1617 1615 commitsubs.add(s)
1618 1616 else:
1619 1617 bs = wctx.sub(s).basestate()
1620 1618 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1621 1619 if oldstate.get(s, (None, None, None))[1] != bs:
1622 1620 subs.append(s)
1623 1621
1624 1622 # check for removed subrepos
1625 1623 for p in wctx.parents():
1626 1624 r = [s for s in p.substate if s not in newstate]
1627 1625 subs += [s for s in r if match(s)]
1628 1626 if subs:
1629 1627 if (not match('.hgsub') and
1630 1628 '.hgsub' in (wctx.modified() + wctx.added())):
1631 1629 raise error.Abort(
1632 1630 _("can't commit subrepos without .hgsub"))
1633 1631 status.modified.insert(0, '.hgsubstate')
1634 1632
1635 1633 elif '.hgsub' in status.removed:
1636 1634 # clean up .hgsubstate when .hgsub is removed
1637 1635 if ('.hgsubstate' in wctx and
1638 1636 '.hgsubstate' not in (status.modified + status.added +
1639 1637 status.removed)):
1640 1638 status.removed.insert(0, '.hgsubstate')
1641 1639
1642 1640 # make sure all explicit patterns are matched
1643 1641 if not force:
1644 1642 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1645 1643
1646 1644 cctx = context.workingcommitctx(self, status,
1647 1645 text, user, date, extra)
1648 1646
1649 1647 # internal config: ui.allowemptycommit
1650 1648 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1651 1649 or extra.get('close') or merge or cctx.files()
1652 1650 or self.ui.configbool('ui', 'allowemptycommit'))
1653 1651 if not allowemptycommit:
1654 1652 return None
1655 1653
1656 1654 if merge and cctx.deleted():
1657 1655 raise error.Abort(_("cannot commit merge with missing files"))
1658 1656
1659 1657 ms = mergemod.mergestate.read(self)
1660 1658 mergeutil.checkunresolved(ms)
1661 1659
1662 1660 if editor:
1663 1661 cctx._text = editor(self, cctx, subs)
1664 1662 edited = (text != cctx._text)
1665 1663
1666 1664 # Save commit message in case this transaction gets rolled back
1667 1665 # (e.g. by a pretxncommit hook). Leave the content alone on
1668 1666 # the assumption that the user will use the same editor again.
1669 1667 msgfn = self.savecommitmessage(cctx._text)
1670 1668
1671 1669 # commit subs and write new state
1672 1670 if subs:
1673 1671 for s in sorted(commitsubs):
1674 1672 sub = wctx.sub(s)
1675 1673 self.ui.status(_('committing subrepository %s\n') %
1676 1674 subrepo.subrelpath(sub))
1677 1675 sr = sub.commit(cctx._text, user, date)
1678 1676 newstate[s] = (newstate[s][0], sr)
1679 1677 subrepo.writestate(self, newstate)
1680 1678
1681 1679 p1, p2 = self.dirstate.parents()
1682 1680 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1683 1681 try:
1684 1682 self.hook("precommit", throw=True, parent1=hookp1,
1685 1683 parent2=hookp2)
1686 1684 tr = self.transaction('commit')
1687 1685 ret = self.commitctx(cctx, True)
1688 1686 except: # re-raises
1689 1687 if edited:
1690 1688 self.ui.write(
1691 1689 _('note: commit message saved in %s\n') % msgfn)
1692 1690 raise
1693 1691 # update bookmarks, dirstate and mergestate
1694 1692 bookmarks.update(self, [p1, p2], ret)
1695 1693 cctx.markcommitted(ret)
1696 1694 ms.reset()
1697 1695 tr.close()
1698 1696
1699 1697 finally:
1700 1698 lockmod.release(tr, lock, wlock)
1701 1699
1702 1700 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1703 1701 # hack for command that use a temporary commit (eg: histedit)
1704 1702 # temporary commit got stripped before hook release
1705 1703 if self.changelog.hasnode(ret):
1706 1704 self.hook("commit", node=node, parent1=parent1,
1707 1705 parent2=parent2)
1708 1706 self._afterlock(commithook)
1709 1707 return ret
1710 1708
1711 1709 @unfilteredmethod
1712 1710 def commitctx(self, ctx, error=False):
1713 1711 """Add a new revision to current repository.
1714 1712 Revision information is passed via the context argument.
1715 1713 """
1716 1714
1717 1715 tr = None
1718 1716 p1, p2 = ctx.p1(), ctx.p2()
1719 1717 user = ctx.user()
1720 1718
1721 1719 lock = self.lock()
1722 1720 try:
1723 1721 tr = self.transaction("commit")
1724 1722 trp = weakref.proxy(tr)
1725 1723
1726 1724 if ctx.manifestnode():
1727 1725 # reuse an existing manifest revision
1728 1726 mn = ctx.manifestnode()
1729 1727 files = ctx.files()
1730 1728 elif ctx.files():
1731 1729 m1ctx = p1.manifestctx()
1732 1730 m2ctx = p2.manifestctx()
1733 1731 mctx = m1ctx.copy()
1734 1732
1735 1733 m = mctx.read()
1736 1734 m1 = m1ctx.read()
1737 1735 m2 = m2ctx.read()
1738 1736
1739 1737 # check in files
1740 1738 added = []
1741 1739 changed = []
1742 1740 removed = list(ctx.removed())
1743 1741 linkrev = len(self)
1744 1742 self.ui.note(_("committing files:\n"))
1745 1743 for f in sorted(ctx.modified() + ctx.added()):
1746 1744 self.ui.note(f + "\n")
1747 1745 try:
1748 1746 fctx = ctx[f]
1749 1747 if fctx is None:
1750 1748 removed.append(f)
1751 1749 else:
1752 1750 added.append(f)
1753 1751 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1754 1752 trp, changed)
1755 1753 m.setflag(f, fctx.flags())
1756 1754 except OSError as inst:
1757 1755 self.ui.warn(_("trouble committing %s!\n") % f)
1758 1756 raise
1759 1757 except IOError as inst:
1760 1758 errcode = getattr(inst, 'errno', errno.ENOENT)
1761 1759 if error or errcode and errcode != errno.ENOENT:
1762 1760 self.ui.warn(_("trouble committing %s!\n") % f)
1763 1761 raise
1764 1762
1765 1763 # update manifest
1766 1764 self.ui.note(_("committing manifest\n"))
1767 1765 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1768 1766 drop = [f for f in removed if f in m]
1769 1767 for f in drop:
1770 1768 del m[f]
1771 1769 mn = mctx.write(trp, linkrev,
1772 1770 p1.manifestnode(), p2.manifestnode(),
1773 1771 added, drop)
1774 1772 files = changed + removed
1775 1773 else:
1776 1774 mn = p1.manifestnode()
1777 1775 files = []
1778 1776
1779 1777 # update changelog
1780 1778 self.ui.note(_("committing changelog\n"))
1781 1779 self.changelog.delayupdate(tr)
1782 1780 n = self.changelog.add(mn, files, ctx.description(),
1783 1781 trp, p1.node(), p2.node(),
1784 1782 user, ctx.date(), ctx.extra().copy())
1785 1783 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1786 1784 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1787 1785 parent2=xp2)
1788 1786 # set the new commit is proper phase
1789 1787 targetphase = subrepo.newcommitphase(self.ui, ctx)
1790 1788 if targetphase:
1791 1789 # retract boundary do not alter parent changeset.
1792 1790 # if a parent have higher the resulting phase will
1793 1791 # be compliant anyway
1794 1792 #
1795 1793 # if minimal phase was 0 we don't need to retract anything
1796 1794 phases.retractboundary(self, tr, targetphase, [n])
1797 1795 tr.close()
1798 1796 return n
1799 1797 finally:
1800 1798 if tr:
1801 1799 tr.release()
1802 1800 lock.release()
1803 1801
1804 1802 @unfilteredmethod
1805 1803 def destroying(self):
1806 1804 '''Inform the repository that nodes are about to be destroyed.
1807 1805 Intended for use by strip and rollback, so there's a common
1808 1806 place for anything that has to be done before destroying history.
1809 1807
1810 1808 This is mostly useful for saving state that is in memory and waiting
1811 1809 to be flushed when the current lock is released. Because a call to
1812 1810 destroyed is imminent, the repo will be invalidated causing those
1813 1811 changes to stay in memory (waiting for the next unlock), or vanish
1814 1812 completely.
1815 1813 '''
1816 1814 # When using the same lock to commit and strip, the phasecache is left
1817 1815 # dirty after committing. Then when we strip, the repo is invalidated,
1818 1816 # causing those changes to disappear.
1819 1817 if '_phasecache' in vars(self):
1820 1818 self._phasecache.write()
1821 1819
1822 1820 @unfilteredmethod
1823 1821 def destroyed(self):
1824 1822 '''Inform the repository that nodes have been destroyed.
1825 1823 Intended for use by strip and rollback, so there's a common
1826 1824 place for anything that has to be done after destroying history.
1827 1825 '''
1828 1826 # When one tries to:
1829 1827 # 1) destroy nodes thus calling this method (e.g. strip)
1830 1828 # 2) use phasecache somewhere (e.g. commit)
1831 1829 #
1832 1830 # then 2) will fail because the phasecache contains nodes that were
1833 1831 # removed. We can either remove phasecache from the filecache,
1834 1832 # causing it to reload next time it is accessed, or simply filter
1835 1833 # the removed nodes now and write the updated cache.
1836 1834 self._phasecache.filterunknown(self)
1837 1835 self._phasecache.write()
1838 1836
1839 1837 # refresh all repository caches
1840 1838 self.updatecaches()
1841 1839
1842 1840 # Ensure the persistent tag cache is updated. Doing it now
1843 1841 # means that the tag cache only has to worry about destroyed
1844 1842 # heads immediately after a strip/rollback. That in turn
1845 1843 # guarantees that "cachetip == currenttip" (comparing both rev
1846 1844 # and node) always means no nodes have been added or destroyed.
1847 1845
1848 1846 # XXX this is suboptimal when qrefresh'ing: we strip the current
1849 1847 # head, refresh the tag cache, then immediately add a new head.
1850 1848 # But I think doing it this way is necessary for the "instant
1851 1849 # tag cache retrieval" case to work.
1852 1850 self.invalidate()
1853 1851
1854 1852 def walk(self, match, node=None):
1855 1853 '''
1856 1854 walk recursively through the directory tree or a given
1857 1855 changeset, finding all files matched by the match
1858 1856 function
1859 1857 '''
1860 1858 return self[node].walk(match)
1861 1859
1862 1860 def status(self, node1='.', node2=None, match=None,
1863 1861 ignored=False, clean=False, unknown=False,
1864 1862 listsubrepos=False):
1865 1863 '''a convenience method that calls node1.status(node2)'''
1866 1864 return self[node1].status(node2, match, ignored, clean, unknown,
1867 1865 listsubrepos)
1868 1866
1869 1867 def heads(self, start=None):
1870 1868 if start is None:
1871 1869 cl = self.changelog
1872 1870 headrevs = reversed(cl.headrevs())
1873 1871 return [cl.node(rev) for rev in headrevs]
1874 1872
1875 1873 heads = self.changelog.heads(start)
1876 1874 # sort the output in rev descending order
1877 1875 return sorted(heads, key=self.changelog.rev, reverse=True)
1878 1876
1879 1877 def branchheads(self, branch=None, start=None, closed=False):
1880 1878 '''return a (possibly filtered) list of heads for the given branch
1881 1879
1882 1880 Heads are returned in topological order, from newest to oldest.
1883 1881 If branch is None, use the dirstate branch.
1884 1882 If start is not None, return only heads reachable from start.
1885 1883 If closed is True, return heads that are marked as closed as well.
1886 1884 '''
1887 1885 if branch is None:
1888 1886 branch = self[None].branch()
1889 1887 branches = self.branchmap()
1890 1888 if branch not in branches:
1891 1889 return []
1892 1890 # the cache returns heads ordered lowest to highest
1893 1891 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1894 1892 if start is not None:
1895 1893 # filter out the heads that cannot be reached from startrev
1896 1894 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1897 1895 bheads = [h for h in bheads if h in fbheads]
1898 1896 return bheads
1899 1897
1900 1898 def branches(self, nodes):
1901 1899 if not nodes:
1902 1900 nodes = [self.changelog.tip()]
1903 1901 b = []
1904 1902 for n in nodes:
1905 1903 t = n
1906 1904 while True:
1907 1905 p = self.changelog.parents(n)
1908 1906 if p[1] != nullid or p[0] == nullid:
1909 1907 b.append((t, n, p[0], p[1]))
1910 1908 break
1911 1909 n = p[0]
1912 1910 return b
1913 1911
1914 1912 def between(self, pairs):
1915 1913 r = []
1916 1914
1917 1915 for top, bottom in pairs:
1918 1916 n, l, i = top, [], 0
1919 1917 f = 1
1920 1918
1921 1919 while n != bottom and n != nullid:
1922 1920 p = self.changelog.parents(n)[0]
1923 1921 if i == f:
1924 1922 l.append(n)
1925 1923 f = f * 2
1926 1924 n = p
1927 1925 i += 1
1928 1926
1929 1927 r.append(l)
1930 1928
1931 1929 return r
1932 1930
1933 1931 def checkpush(self, pushop):
1934 1932 """Extensions can override this function if additional checks have
1935 1933 to be performed before pushing, or call it if they override push
1936 1934 command.
1937 1935 """
1938 1936 pass
1939 1937
1940 1938 @unfilteredpropertycache
1941 1939 def prepushoutgoinghooks(self):
1942 1940 """Return util.hooks consists of a pushop with repo, remote, outgoing
1943 1941 methods, which are called before pushing changesets.
1944 1942 """
1945 1943 return util.hooks()
1946 1944
1947 1945 def pushkey(self, namespace, key, old, new):
1948 1946 try:
1949 1947 tr = self.currenttransaction()
1950 1948 hookargs = {}
1951 1949 if tr is not None:
1952 1950 hookargs.update(tr.hookargs)
1953 1951 hookargs['namespace'] = namespace
1954 1952 hookargs['key'] = key
1955 1953 hookargs['old'] = old
1956 1954 hookargs['new'] = new
1957 1955 self.hook('prepushkey', throw=True, **hookargs)
1958 1956 except error.HookAbort as exc:
1959 1957 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1960 1958 if exc.hint:
1961 1959 self.ui.write_err(_("(%s)\n") % exc.hint)
1962 1960 return False
1963 1961 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1964 1962 ret = pushkey.push(self, namespace, key, old, new)
1965 1963 def runhook():
1966 1964 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1967 1965 ret=ret)
1968 1966 self._afterlock(runhook)
1969 1967 return ret
1970 1968
1971 1969 def listkeys(self, namespace):
1972 1970 self.hook('prelistkeys', throw=True, namespace=namespace)
1973 1971 self.ui.debug('listing keys for "%s"\n' % namespace)
1974 1972 values = pushkey.list(self, namespace)
1975 1973 self.hook('listkeys', namespace=namespace, values=values)
1976 1974 return values
1977 1975
1978 1976 def debugwireargs(self, one, two, three=None, four=None, five=None):
1979 1977 '''used to test argument passing over the wire'''
1980 1978 return "%s %s %s %s %s" % (one, two, three, four, five)
1981 1979
1982 1980 def savecommitmessage(self, text):
1983 1981 fp = self.vfs('last-message.txt', 'wb')
1984 1982 try:
1985 1983 fp.write(text)
1986 1984 finally:
1987 1985 fp.close()
1988 1986 return self.pathto(fp.name[len(self.root) + 1:])
1989 1987
1990 1988 # used to avoid circular references so destructors work
1991 1989 def aftertrans(files):
1992 1990 renamefiles = [tuple(t) for t in files]
1993 1991 def a():
1994 1992 for vfs, src, dest in renamefiles:
1995 1993 # if src and dest refer to a same file, vfs.rename is a no-op,
1996 1994 # leaving both src and dest on disk. delete dest to make sure
1997 1995 # the rename couldn't be such a no-op.
1998 1996 vfs.tryunlink(dest)
1999 1997 try:
2000 1998 vfs.rename(src, dest)
2001 1999 except OSError: # journal file does not yet exist
2002 2000 pass
2003 2001 return a
2004 2002
2005 2003 def undoname(fn):
2006 2004 base, name = os.path.split(fn)
2007 2005 assert name.startswith('journal')
2008 2006 return os.path.join(base, name.replace('journal', 'undo', 1))
2009 2007
2010 2008 def instance(ui, path, create):
2011 2009 return localrepository(ui, util.urllocalpath(path), create)
2012 2010
2013 2011 def islocal(path):
2014 2012 return True
2015 2013
2016 2014 def newreporequirements(repo):
2017 2015 """Determine the set of requirements for a new local repository.
2018 2016
2019 2017 Extensions can wrap this function to specify custom requirements for
2020 2018 new repositories.
2021 2019 """
2022 2020 ui = repo.ui
2023 2021 requirements = {'revlogv1'}
2024 2022 if ui.configbool('format', 'usestore', True):
2025 2023 requirements.add('store')
2026 2024 if ui.configbool('format', 'usefncache', True):
2027 2025 requirements.add('fncache')
2028 2026 if ui.configbool('format', 'dotencode', True):
2029 2027 requirements.add('dotencode')
2030 2028
2031 2029 compengine = ui.config('experimental', 'format.compression', 'zlib')
2032 2030 if compengine not in util.compengines:
2033 2031 raise error.Abort(_('compression engine %s defined by '
2034 2032 'experimental.format.compression not available') %
2035 2033 compengine,
2036 2034 hint=_('run "hg debuginstall" to list available '
2037 2035 'compression engines'))
2038 2036
2039 2037 # zlib is the historical default and doesn't need an explicit requirement.
2040 2038 if compengine != 'zlib':
2041 2039 requirements.add('exp-compression-%s' % compengine)
2042 2040
2043 2041 if scmutil.gdinitconfig(ui):
2044 2042 requirements.add('generaldelta')
2045 2043 if ui.configbool('experimental', 'treemanifest', False):
2046 2044 requirements.add('treemanifest')
2047 2045 if ui.configbool('experimental', 'manifestv2', False):
2048 2046 requirements.add('manifestv2')
2049 2047
2050 2048 return requirements
General Comments 0
You need to be logged in to leave comments. Login now