##// END OF EJS Templates
changelog: load pending file directly...
Gregory Szorc -
r32292:0ad0d26f default
parent child Browse files
Show More
@@ -1,548 +1,541 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 bin,
14 bin,
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 encoding,
20 encoding,
21 error,
21 error,
22 revlog,
22 revlog,
23 util,
23 util,
24 )
24 )
25
25
26 _defaultextra = {'branch': 'default'}
26 _defaultextra = {'branch': 'default'}
27
27
28 def _string_escape(text):
28 def _string_escape(text):
29 """
29 """
30 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
30 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
31 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
31 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
32 >>> s
32 >>> s
33 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
33 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
34 >>> res = _string_escape(s)
34 >>> res = _string_escape(s)
35 >>> s == util.unescapestr(res)
35 >>> s == util.unescapestr(res)
36 True
36 True
37 """
37 """
38 # subset of the string_escape codec
38 # subset of the string_escape codec
39 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
39 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
40 return text.replace('\0', '\\0')
40 return text.replace('\0', '\\0')
41
41
42 def decodeextra(text):
42 def decodeextra(text):
43 """
43 """
44 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
44 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
45 ... ).iteritems())
45 ... ).iteritems())
46 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
46 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
47 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
47 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
48 ... 'baz': chr(92) + chr(0) + '2'})
48 ... 'baz': chr(92) + chr(0) + '2'})
49 ... ).iteritems())
49 ... ).iteritems())
50 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
50 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
51 """
51 """
52 extra = _defaultextra.copy()
52 extra = _defaultextra.copy()
53 for l in text.split('\0'):
53 for l in text.split('\0'):
54 if l:
54 if l:
55 if '\\0' in l:
55 if '\\0' in l:
56 # fix up \0 without getting into trouble with \\0
56 # fix up \0 without getting into trouble with \\0
57 l = l.replace('\\\\', '\\\\\n')
57 l = l.replace('\\\\', '\\\\\n')
58 l = l.replace('\\0', '\0')
58 l = l.replace('\\0', '\0')
59 l = l.replace('\n', '')
59 l = l.replace('\n', '')
60 k, v = util.unescapestr(l).split(':', 1)
60 k, v = util.unescapestr(l).split(':', 1)
61 extra[k] = v
61 extra[k] = v
62 return extra
62 return extra
63
63
64 def encodeextra(d):
64 def encodeextra(d):
65 # keys must be sorted to produce a deterministic changelog entry
65 # keys must be sorted to produce a deterministic changelog entry
66 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
66 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
67 return "\0".join(items)
67 return "\0".join(items)
68
68
69 def stripdesc(desc):
69 def stripdesc(desc):
70 """strip trailing whitespace and leading and trailing empty lines"""
70 """strip trailing whitespace and leading and trailing empty lines"""
71 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
71 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
72
72
73 class appender(object):
73 class appender(object):
74 '''the changelog index must be updated last on disk, so we use this class
74 '''the changelog index must be updated last on disk, so we use this class
75 to delay writes to it'''
75 to delay writes to it'''
76 def __init__(self, vfs, name, mode, buf):
76 def __init__(self, vfs, name, mode, buf):
77 self.data = buf
77 self.data = buf
78 fp = vfs(name, mode)
78 fp = vfs(name, mode)
79 self.fp = fp
79 self.fp = fp
80 self.offset = fp.tell()
80 self.offset = fp.tell()
81 self.size = vfs.fstat(fp).st_size
81 self.size = vfs.fstat(fp).st_size
82 self._end = self.size
82 self._end = self.size
83
83
84 def end(self):
84 def end(self):
85 return self._end
85 return self._end
86 def tell(self):
86 def tell(self):
87 return self.offset
87 return self.offset
88 def flush(self):
88 def flush(self):
89 pass
89 pass
90 def close(self):
90 def close(self):
91 self.fp.close()
91 self.fp.close()
92
92
93 def seek(self, offset, whence=0):
93 def seek(self, offset, whence=0):
94 '''virtual file offset spans real file and data'''
94 '''virtual file offset spans real file and data'''
95 if whence == 0:
95 if whence == 0:
96 self.offset = offset
96 self.offset = offset
97 elif whence == 1:
97 elif whence == 1:
98 self.offset += offset
98 self.offset += offset
99 elif whence == 2:
99 elif whence == 2:
100 self.offset = self.end() + offset
100 self.offset = self.end() + offset
101 if self.offset < self.size:
101 if self.offset < self.size:
102 self.fp.seek(self.offset)
102 self.fp.seek(self.offset)
103
103
104 def read(self, count=-1):
104 def read(self, count=-1):
105 '''only trick here is reads that span real file and data'''
105 '''only trick here is reads that span real file and data'''
106 ret = ""
106 ret = ""
107 if self.offset < self.size:
107 if self.offset < self.size:
108 s = self.fp.read(count)
108 s = self.fp.read(count)
109 ret = s
109 ret = s
110 self.offset += len(s)
110 self.offset += len(s)
111 if count > 0:
111 if count > 0:
112 count -= len(s)
112 count -= len(s)
113 if count != 0:
113 if count != 0:
114 doff = self.offset - self.size
114 doff = self.offset - self.size
115 self.data.insert(0, "".join(self.data))
115 self.data.insert(0, "".join(self.data))
116 del self.data[1:]
116 del self.data[1:]
117 s = self.data[0][doff:doff + count]
117 s = self.data[0][doff:doff + count]
118 self.offset += len(s)
118 self.offset += len(s)
119 ret += s
119 ret += s
120 return ret
120 return ret
121
121
122 def write(self, s):
122 def write(self, s):
123 self.data.append(bytes(s))
123 self.data.append(bytes(s))
124 self.offset += len(s)
124 self.offset += len(s)
125 self._end += len(s)
125 self._end += len(s)
126
126
127 def _divertopener(opener, target):
127 def _divertopener(opener, target):
128 """build an opener that writes in 'target.a' instead of 'target'"""
128 """build an opener that writes in 'target.a' instead of 'target'"""
129 def _divert(name, mode='r', checkambig=False):
129 def _divert(name, mode='r', checkambig=False):
130 if name != target:
130 if name != target:
131 return opener(name, mode)
131 return opener(name, mode)
132 return opener(name + ".a", mode)
132 return opener(name + ".a", mode)
133 return _divert
133 return _divert
134
134
135 def _delayopener(opener, target, buf):
135 def _delayopener(opener, target, buf):
136 """build an opener that stores chunks in 'buf' instead of 'target'"""
136 """build an opener that stores chunks in 'buf' instead of 'target'"""
137 def _delay(name, mode='r', checkambig=False):
137 def _delay(name, mode='r', checkambig=False):
138 if name != target:
138 if name != target:
139 return opener(name, mode)
139 return opener(name, mode)
140 return appender(opener, name, mode, buf)
140 return appender(opener, name, mode, buf)
141 return _delay
141 return _delay
142
142
143 _changelogrevision = collections.namedtuple(u'changelogrevision',
143 _changelogrevision = collections.namedtuple(u'changelogrevision',
144 (u'manifest', u'user', u'date',
144 (u'manifest', u'user', u'date',
145 u'files', u'description',
145 u'files', u'description',
146 u'extra'))
146 u'extra'))
147
147
148 class changelogrevision(object):
148 class changelogrevision(object):
149 """Holds results of a parsed changelog revision.
149 """Holds results of a parsed changelog revision.
150
150
151 Changelog revisions consist of multiple pieces of data, including
151 Changelog revisions consist of multiple pieces of data, including
152 the manifest node, user, and date. This object exposes a view into
152 the manifest node, user, and date. This object exposes a view into
153 the parsed object.
153 the parsed object.
154 """
154 """
155
155
156 __slots__ = (
156 __slots__ = (
157 u'_offsets',
157 u'_offsets',
158 u'_text',
158 u'_text',
159 )
159 )
160
160
161 def __new__(cls, text):
161 def __new__(cls, text):
162 if not text:
162 if not text:
163 return _changelogrevision(
163 return _changelogrevision(
164 manifest=nullid,
164 manifest=nullid,
165 user='',
165 user='',
166 date=(0, 0),
166 date=(0, 0),
167 files=[],
167 files=[],
168 description='',
168 description='',
169 extra=_defaultextra,
169 extra=_defaultextra,
170 )
170 )
171
171
172 self = super(changelogrevision, cls).__new__(cls)
172 self = super(changelogrevision, cls).__new__(cls)
173 # We could return here and implement the following as an __init__.
173 # We could return here and implement the following as an __init__.
174 # But doing it here is equivalent and saves an extra function call.
174 # But doing it here is equivalent and saves an extra function call.
175
175
176 # format used:
176 # format used:
177 # nodeid\n : manifest node in ascii
177 # nodeid\n : manifest node in ascii
178 # user\n : user, no \n or \r allowed
178 # user\n : user, no \n or \r allowed
179 # time tz extra\n : date (time is int or float, timezone is int)
179 # time tz extra\n : date (time is int or float, timezone is int)
180 # : extra is metadata, encoded and separated by '\0'
180 # : extra is metadata, encoded and separated by '\0'
181 # : older versions ignore it
181 # : older versions ignore it
182 # files\n\n : files modified by the cset, no \n or \r allowed
182 # files\n\n : files modified by the cset, no \n or \r allowed
183 # (.*) : comment (free text, ideally utf-8)
183 # (.*) : comment (free text, ideally utf-8)
184 #
184 #
185 # changelog v0 doesn't use extra
185 # changelog v0 doesn't use extra
186
186
187 nl1 = text.index('\n')
187 nl1 = text.index('\n')
188 nl2 = text.index('\n', nl1 + 1)
188 nl2 = text.index('\n', nl1 + 1)
189 nl3 = text.index('\n', nl2 + 1)
189 nl3 = text.index('\n', nl2 + 1)
190
190
191 # The list of files may be empty. Which means nl3 is the first of the
191 # The list of files may be empty. Which means nl3 is the first of the
192 # double newline that precedes the description.
192 # double newline that precedes the description.
193 if text[nl3 + 1:nl3 + 2] == '\n':
193 if text[nl3 + 1:nl3 + 2] == '\n':
194 doublenl = nl3
194 doublenl = nl3
195 else:
195 else:
196 doublenl = text.index('\n\n', nl3 + 1)
196 doublenl = text.index('\n\n', nl3 + 1)
197
197
198 self._offsets = (nl1, nl2, nl3, doublenl)
198 self._offsets = (nl1, nl2, nl3, doublenl)
199 self._text = text
199 self._text = text
200
200
201 return self
201 return self
202
202
203 @property
203 @property
204 def manifest(self):
204 def manifest(self):
205 return bin(self._text[0:self._offsets[0]])
205 return bin(self._text[0:self._offsets[0]])
206
206
207 @property
207 @property
208 def user(self):
208 def user(self):
209 off = self._offsets
209 off = self._offsets
210 return encoding.tolocal(self._text[off[0] + 1:off[1]])
210 return encoding.tolocal(self._text[off[0] + 1:off[1]])
211
211
212 @property
212 @property
213 def _rawdate(self):
213 def _rawdate(self):
214 off = self._offsets
214 off = self._offsets
215 dateextra = self._text[off[1] + 1:off[2]]
215 dateextra = self._text[off[1] + 1:off[2]]
216 return dateextra.split(' ', 2)[0:2]
216 return dateextra.split(' ', 2)[0:2]
217
217
218 @property
218 @property
219 def _rawextra(self):
219 def _rawextra(self):
220 off = self._offsets
220 off = self._offsets
221 dateextra = self._text[off[1] + 1:off[2]]
221 dateextra = self._text[off[1] + 1:off[2]]
222 fields = dateextra.split(' ', 2)
222 fields = dateextra.split(' ', 2)
223 if len(fields) != 3:
223 if len(fields) != 3:
224 return None
224 return None
225
225
226 return fields[2]
226 return fields[2]
227
227
228 @property
228 @property
229 def date(self):
229 def date(self):
230 raw = self._rawdate
230 raw = self._rawdate
231 time = float(raw[0])
231 time = float(raw[0])
232 # Various tools did silly things with the timezone.
232 # Various tools did silly things with the timezone.
233 try:
233 try:
234 timezone = int(raw[1])
234 timezone = int(raw[1])
235 except ValueError:
235 except ValueError:
236 timezone = 0
236 timezone = 0
237
237
238 return time, timezone
238 return time, timezone
239
239
240 @property
240 @property
241 def extra(self):
241 def extra(self):
242 raw = self._rawextra
242 raw = self._rawextra
243 if raw is None:
243 if raw is None:
244 return _defaultextra
244 return _defaultextra
245
245
246 return decodeextra(raw)
246 return decodeextra(raw)
247
247
248 @property
248 @property
249 def files(self):
249 def files(self):
250 off = self._offsets
250 off = self._offsets
251 if off[2] == off[3]:
251 if off[2] == off[3]:
252 return []
252 return []
253
253
254 return self._text[off[2] + 1:off[3]].split('\n')
254 return self._text[off[2] + 1:off[3]].split('\n')
255
255
256 @property
256 @property
257 def description(self):
257 def description(self):
258 return encoding.tolocal(self._text[self._offsets[3] + 2:])
258 return encoding.tolocal(self._text[self._offsets[3] + 2:])
259
259
260 class changelog(revlog.revlog):
260 class changelog(revlog.revlog):
261 def __init__(self, opener):
261 def __init__(self, opener, trypending=False):
262 revlog.revlog.__init__(self, opener, "00changelog.i",
262 """Load a changelog revlog using an opener.
263 checkambig=True)
263
264 If ``trypending`` is true, we attempt to load the index from a
265 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
266 The ``00changelog.i.a`` file contains index (and possibly inline
267 revision) data for a transaction that hasn't been finalized yet.
268 It exists in a separate file to facilitate readers (such as
269 hooks processes) accessing data before a transaction is finalized.
270 """
271 if trypending and opener.exists('00changelog.i.a'):
272 indexfile = '00changelog.i.a'
273 else:
274 indexfile = '00changelog.i'
275
276 revlog.revlog.__init__(self, opener, indexfile, checkambig=True)
277
264 if self._initempty:
278 if self._initempty:
265 # changelogs don't benefit from generaldelta
279 # changelogs don't benefit from generaldelta
266 self.version &= ~revlog.REVLOGGENERALDELTA
280 self.version &= ~revlog.REVLOGGENERALDELTA
267 self._generaldelta = False
281 self._generaldelta = False
268
282
269 # Delta chains for changelogs tend to be very small because entries
283 # Delta chains for changelogs tend to be very small because entries
270 # tend to be small and don't delta well with each. So disable delta
284 # tend to be small and don't delta well with each. So disable delta
271 # chains.
285 # chains.
272 self.storedeltachains = False
286 self.storedeltachains = False
273
287
274 self._realopener = opener
288 self._realopener = opener
275 self._delayed = False
289 self._delayed = False
276 self._delaybuf = None
290 self._delaybuf = None
277 self._divert = False
291 self._divert = False
278 self.filteredrevs = frozenset()
292 self.filteredrevs = frozenset()
279
293
280 def tip(self):
294 def tip(self):
281 """filtered version of revlog.tip"""
295 """filtered version of revlog.tip"""
282 for i in xrange(len(self) -1, -2, -1):
296 for i in xrange(len(self) -1, -2, -1):
283 if i not in self.filteredrevs:
297 if i not in self.filteredrevs:
284 return self.node(i)
298 return self.node(i)
285
299
286 def __contains__(self, rev):
300 def __contains__(self, rev):
287 """filtered version of revlog.__contains__"""
301 """filtered version of revlog.__contains__"""
288 return (0 <= rev < len(self)
302 return (0 <= rev < len(self)
289 and rev not in self.filteredrevs)
303 and rev not in self.filteredrevs)
290
304
291 def __iter__(self):
305 def __iter__(self):
292 """filtered version of revlog.__iter__"""
306 """filtered version of revlog.__iter__"""
293 if len(self.filteredrevs) == 0:
307 if len(self.filteredrevs) == 0:
294 return revlog.revlog.__iter__(self)
308 return revlog.revlog.__iter__(self)
295
309
296 def filterediter():
310 def filterediter():
297 for i in xrange(len(self)):
311 for i in xrange(len(self)):
298 if i not in self.filteredrevs:
312 if i not in self.filteredrevs:
299 yield i
313 yield i
300
314
301 return filterediter()
315 return filterediter()
302
316
303 def revs(self, start=0, stop=None):
317 def revs(self, start=0, stop=None):
304 """filtered version of revlog.revs"""
318 """filtered version of revlog.revs"""
305 for i in super(changelog, self).revs(start, stop):
319 for i in super(changelog, self).revs(start, stop):
306 if i not in self.filteredrevs:
320 if i not in self.filteredrevs:
307 yield i
321 yield i
308
322
309 @util.propertycache
323 @util.propertycache
310 def nodemap(self):
324 def nodemap(self):
311 # XXX need filtering too
325 # XXX need filtering too
312 self.rev(self.node(0))
326 self.rev(self.node(0))
313 return self._nodecache
327 return self._nodecache
314
328
315 def reachableroots(self, minroot, heads, roots, includepath=False):
329 def reachableroots(self, minroot, heads, roots, includepath=False):
316 return self.index.reachableroots2(minroot, heads, roots, includepath)
330 return self.index.reachableroots2(minroot, heads, roots, includepath)
317
331
318 def headrevs(self):
332 def headrevs(self):
319 if self.filteredrevs:
333 if self.filteredrevs:
320 try:
334 try:
321 return self.index.headrevsfiltered(self.filteredrevs)
335 return self.index.headrevsfiltered(self.filteredrevs)
322 # AttributeError covers non-c-extension environments and
336 # AttributeError covers non-c-extension environments and
323 # old c extensions without filter handling.
337 # old c extensions without filter handling.
324 except AttributeError:
338 except AttributeError:
325 return self._headrevs()
339 return self._headrevs()
326
340
327 return super(changelog, self).headrevs()
341 return super(changelog, self).headrevs()
328
342
329 def strip(self, *args, **kwargs):
343 def strip(self, *args, **kwargs):
330 # XXX make something better than assert
344 # XXX make something better than assert
331 # We can't expect proper strip behavior if we are filtered.
345 # We can't expect proper strip behavior if we are filtered.
332 assert not self.filteredrevs
346 assert not self.filteredrevs
333 super(changelog, self).strip(*args, **kwargs)
347 super(changelog, self).strip(*args, **kwargs)
334
348
335 def rev(self, node):
349 def rev(self, node):
336 """filtered version of revlog.rev"""
350 """filtered version of revlog.rev"""
337 r = super(changelog, self).rev(node)
351 r = super(changelog, self).rev(node)
338 if r in self.filteredrevs:
352 if r in self.filteredrevs:
339 raise error.FilteredLookupError(hex(node), self.indexfile,
353 raise error.FilteredLookupError(hex(node), self.indexfile,
340 _('filtered node'))
354 _('filtered node'))
341 return r
355 return r
342
356
343 def node(self, rev):
357 def node(self, rev):
344 """filtered version of revlog.node"""
358 """filtered version of revlog.node"""
345 if rev in self.filteredrevs:
359 if rev in self.filteredrevs:
346 raise error.FilteredIndexError(rev)
360 raise error.FilteredIndexError(rev)
347 return super(changelog, self).node(rev)
361 return super(changelog, self).node(rev)
348
362
349 def linkrev(self, rev):
363 def linkrev(self, rev):
350 """filtered version of revlog.linkrev"""
364 """filtered version of revlog.linkrev"""
351 if rev in self.filteredrevs:
365 if rev in self.filteredrevs:
352 raise error.FilteredIndexError(rev)
366 raise error.FilteredIndexError(rev)
353 return super(changelog, self).linkrev(rev)
367 return super(changelog, self).linkrev(rev)
354
368
355 def parentrevs(self, rev):
369 def parentrevs(self, rev):
356 """filtered version of revlog.parentrevs"""
370 """filtered version of revlog.parentrevs"""
357 if rev in self.filteredrevs:
371 if rev in self.filteredrevs:
358 raise error.FilteredIndexError(rev)
372 raise error.FilteredIndexError(rev)
359 return super(changelog, self).parentrevs(rev)
373 return super(changelog, self).parentrevs(rev)
360
374
361 def flags(self, rev):
375 def flags(self, rev):
362 """filtered version of revlog.flags"""
376 """filtered version of revlog.flags"""
363 if rev in self.filteredrevs:
377 if rev in self.filteredrevs:
364 raise error.FilteredIndexError(rev)
378 raise error.FilteredIndexError(rev)
365 return super(changelog, self).flags(rev)
379 return super(changelog, self).flags(rev)
366
380
367 def delayupdate(self, tr):
381 def delayupdate(self, tr):
368 "delay visibility of index updates to other readers"
382 "delay visibility of index updates to other readers"
369
383
370 if not self._delayed:
384 if not self._delayed:
371 if len(self) == 0:
385 if len(self) == 0:
372 self._divert = True
386 self._divert = True
373 if self._realopener.exists(self.indexfile + '.a'):
387 if self._realopener.exists(self.indexfile + '.a'):
374 self._realopener.unlink(self.indexfile + '.a')
388 self._realopener.unlink(self.indexfile + '.a')
375 self.opener = _divertopener(self._realopener, self.indexfile)
389 self.opener = _divertopener(self._realopener, self.indexfile)
376 else:
390 else:
377 self._delaybuf = []
391 self._delaybuf = []
378 self.opener = _delayopener(self._realopener, self.indexfile,
392 self.opener = _delayopener(self._realopener, self.indexfile,
379 self._delaybuf)
393 self._delaybuf)
380 self._delayed = True
394 self._delayed = True
381 tr.addpending('cl-%i' % id(self), self._writepending)
395 tr.addpending('cl-%i' % id(self), self._writepending)
382 tr.addfinalize('cl-%i' % id(self), self._finalize)
396 tr.addfinalize('cl-%i' % id(self), self._finalize)
383
397
384 def _finalize(self, tr):
398 def _finalize(self, tr):
385 "finalize index updates"
399 "finalize index updates"
386 self._delayed = False
400 self._delayed = False
387 self.opener = self._realopener
401 self.opener = self._realopener
388 # move redirected index data back into place
402 # move redirected index data back into place
389 if self._divert:
403 if self._divert:
390 assert not self._delaybuf
404 assert not self._delaybuf
391 tmpname = self.indexfile + ".a"
405 tmpname = self.indexfile + ".a"
392 nfile = self.opener.open(tmpname)
406 nfile = self.opener.open(tmpname)
393 nfile.close()
407 nfile.close()
394 self.opener.rename(tmpname, self.indexfile, checkambig=True)
408 self.opener.rename(tmpname, self.indexfile, checkambig=True)
395 elif self._delaybuf:
409 elif self._delaybuf:
396 fp = self.opener(self.indexfile, 'a', checkambig=True)
410 fp = self.opener(self.indexfile, 'a', checkambig=True)
397 fp.write("".join(self._delaybuf))
411 fp.write("".join(self._delaybuf))
398 fp.close()
412 fp.close()
399 self._delaybuf = None
413 self._delaybuf = None
400 self._divert = False
414 self._divert = False
401 # split when we're done
415 # split when we're done
402 self.checkinlinesize(tr)
416 self.checkinlinesize(tr)
403
417
404 def readpending(self, file):
405 """read index data from a "pending" file
406
407 During a transaction, the actual changeset data is already stored in the
408 main file, but not yet finalized in the on-disk index. Instead, a
409 "pending" index is written by the transaction logic. If this function
410 is running, we are likely in a subprocess invoked in a hook. The
411 subprocess is informed that it is within a transaction and needs to
412 access its content.
413
414 This function will read all the index data out of the pending file and
415 overwrite the main index."""
416
417 if not self.opener.exists(file):
418 return # no pending data for changelog
419 r = revlog.revlog(self.opener, file)
420 self.index = r.index
421 self.nodemap = r.nodemap
422 self._nodecache = r._nodecache
423 self._chunkcache = r._chunkcache
424
425 def _writepending(self, tr):
418 def _writepending(self, tr):
426 "create a file containing the unfinalized state for pretxnchangegroup"
419 "create a file containing the unfinalized state for pretxnchangegroup"
427 if self._delaybuf:
420 if self._delaybuf:
428 # make a temporary copy of the index
421 # make a temporary copy of the index
429 fp1 = self._realopener(self.indexfile)
422 fp1 = self._realopener(self.indexfile)
430 pendingfilename = self.indexfile + ".a"
423 pendingfilename = self.indexfile + ".a"
431 # register as a temp file to ensure cleanup on failure
424 # register as a temp file to ensure cleanup on failure
432 tr.registertmp(pendingfilename)
425 tr.registertmp(pendingfilename)
433 # write existing data
426 # write existing data
434 fp2 = self._realopener(pendingfilename, "w")
427 fp2 = self._realopener(pendingfilename, "w")
435 fp2.write(fp1.read())
428 fp2.write(fp1.read())
436 # add pending data
429 # add pending data
437 fp2.write("".join(self._delaybuf))
430 fp2.write("".join(self._delaybuf))
438 fp2.close()
431 fp2.close()
439 # switch modes so finalize can simply rename
432 # switch modes so finalize can simply rename
440 self._delaybuf = None
433 self._delaybuf = None
441 self._divert = True
434 self._divert = True
442 self.opener = _divertopener(self._realopener, self.indexfile)
435 self.opener = _divertopener(self._realopener, self.indexfile)
443
436
444 if self._divert:
437 if self._divert:
445 return True
438 return True
446
439
447 return False
440 return False
448
441
449 def checkinlinesize(self, tr, fp=None):
442 def checkinlinesize(self, tr, fp=None):
450 if not self._delayed:
443 if not self._delayed:
451 revlog.revlog.checkinlinesize(self, tr, fp)
444 revlog.revlog.checkinlinesize(self, tr, fp)
452
445
453 def read(self, node):
446 def read(self, node):
454 """Obtain data from a parsed changelog revision.
447 """Obtain data from a parsed changelog revision.
455
448
456 Returns a 6-tuple of:
449 Returns a 6-tuple of:
457
450
458 - manifest node in binary
451 - manifest node in binary
459 - author/user as a localstr
452 - author/user as a localstr
460 - date as a 2-tuple of (time, timezone)
453 - date as a 2-tuple of (time, timezone)
461 - list of files
454 - list of files
462 - commit message as a localstr
455 - commit message as a localstr
463 - dict of extra metadata
456 - dict of extra metadata
464
457
465 Unless you need to access all fields, consider calling
458 Unless you need to access all fields, consider calling
466 ``changelogrevision`` instead, as it is faster for partial object
459 ``changelogrevision`` instead, as it is faster for partial object
467 access.
460 access.
468 """
461 """
469 c = changelogrevision(self.revision(node))
462 c = changelogrevision(self.revision(node))
470 return (
463 return (
471 c.manifest,
464 c.manifest,
472 c.user,
465 c.user,
473 c.date,
466 c.date,
474 c.files,
467 c.files,
475 c.description,
468 c.description,
476 c.extra
469 c.extra
477 )
470 )
478
471
479 def changelogrevision(self, nodeorrev):
472 def changelogrevision(self, nodeorrev):
480 """Obtain a ``changelogrevision`` for a node or revision."""
473 """Obtain a ``changelogrevision`` for a node or revision."""
481 return changelogrevision(self.revision(nodeorrev))
474 return changelogrevision(self.revision(nodeorrev))
482
475
483 def readfiles(self, node):
476 def readfiles(self, node):
484 """
477 """
485 short version of read that only returns the files modified by the cset
478 short version of read that only returns the files modified by the cset
486 """
479 """
487 text = self.revision(node)
480 text = self.revision(node)
488 if not text:
481 if not text:
489 return []
482 return []
490 last = text.index("\n\n")
483 last = text.index("\n\n")
491 l = text[:last].split('\n')
484 l = text[:last].split('\n')
492 return l[3:]
485 return l[3:]
493
486
494 def add(self, manifest, files, desc, transaction, p1, p2,
487 def add(self, manifest, files, desc, transaction, p1, p2,
495 user, date=None, extra=None):
488 user, date=None, extra=None):
496 # Convert to UTF-8 encoded bytestrings as the very first
489 # Convert to UTF-8 encoded bytestrings as the very first
497 # thing: calling any method on a localstr object will turn it
490 # thing: calling any method on a localstr object will turn it
498 # into a str object and the cached UTF-8 string is thus lost.
491 # into a str object and the cached UTF-8 string is thus lost.
499 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
492 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
500
493
501 user = user.strip()
494 user = user.strip()
502 # An empty username or a username with a "\n" will make the
495 # An empty username or a username with a "\n" will make the
503 # revision text contain two "\n\n" sequences -> corrupt
496 # revision text contain two "\n\n" sequences -> corrupt
504 # repository since read cannot unpack the revision.
497 # repository since read cannot unpack the revision.
505 if not user:
498 if not user:
506 raise error.RevlogError(_("empty username"))
499 raise error.RevlogError(_("empty username"))
507 if "\n" in user:
500 if "\n" in user:
508 raise error.RevlogError(_("username %s contains a newline")
501 raise error.RevlogError(_("username %s contains a newline")
509 % repr(user))
502 % repr(user))
510
503
511 desc = stripdesc(desc)
504 desc = stripdesc(desc)
512
505
513 if date:
506 if date:
514 parseddate = "%d %d" % util.parsedate(date)
507 parseddate = "%d %d" % util.parsedate(date)
515 else:
508 else:
516 parseddate = "%d %d" % util.makedate()
509 parseddate = "%d %d" % util.makedate()
517 if extra:
510 if extra:
518 branch = extra.get("branch")
511 branch = extra.get("branch")
519 if branch in ("default", ""):
512 if branch in ("default", ""):
520 del extra["branch"]
513 del extra["branch"]
521 elif branch in (".", "null", "tip"):
514 elif branch in (".", "null", "tip"):
522 raise error.RevlogError(_('the name \'%s\' is reserved')
515 raise error.RevlogError(_('the name \'%s\' is reserved')
523 % branch)
516 % branch)
524 if extra:
517 if extra:
525 extra = encodeextra(extra)
518 extra = encodeextra(extra)
526 parseddate = "%s %s" % (parseddate, extra)
519 parseddate = "%s %s" % (parseddate, extra)
527 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
520 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
528 text = "\n".join(l)
521 text = "\n".join(l)
529 return self.addrevision(text, transaction, len(self), p1, p2)
522 return self.addrevision(text, transaction, len(self), p1, p2)
530
523
531 def branchinfo(self, rev):
524 def branchinfo(self, rev):
532 """return the branch name and open/close state of a revision
525 """return the branch name and open/close state of a revision
533
526
534 This function exists because creating a changectx object
527 This function exists because creating a changectx object
535 just to access this is costly."""
528 just to access this is costly."""
536 extra = self.read(rev)[5]
529 extra = self.read(rev)[5]
537 return encoding.tolocal(extra.get("branch")), 'close' in extra
530 return encoding.tolocal(extra.get("branch")), 'close' in extra
538
531
539 def _addrevision(self, node, rawtext, transaction, *args, **kwargs):
532 def _addrevision(self, node, rawtext, transaction, *args, **kwargs):
540 # overlay over the standard revlog._addrevision to track the new
533 # overlay over the standard revlog._addrevision to track the new
541 # revision on the transaction.
534 # revision on the transaction.
542 rev = len(self)
535 rev = len(self)
543 node = super(changelog, self)._addrevision(node, rawtext, transaction,
536 node = super(changelog, self)._addrevision(node, rawtext, transaction,
544 *args, **kwargs)
537 *args, **kwargs)
545 revs = transaction.changes.get('revs')
538 revs = transaction.changes.get('revs')
546 if revs is not None:
539 if revs is not None:
547 revs.add(rev)
540 revs.add(rev)
548 return node
541 return node
@@ -1,2050 +1,2048 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
72 """
72 """
73
73
74 def join(self, obj, fname):
74 def join(self, obj, fname):
75 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
76 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
77 if repo is None:
77 if repo is None:
78 return self
78 return self
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 def __set__(self, repo, value):
80 def __set__(self, repo, value):
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 def __delete__(self, repo):
82 def __delete__(self, repo):
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
84
84
85 class storecache(repofilecache):
85 class storecache(repofilecache):
86 """filecache for files in the store"""
86 """filecache for files in the store"""
87 def join(self, obj, fname):
87 def join(self, obj, fname):
88 return obj.sjoin(fname)
88 return obj.sjoin(fname)
89
89
90 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
91 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
92
92
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
95 if unfi is repo:
95 if unfi is repo:
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
97 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
98
98
99 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
100 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
101
101
102 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
103 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
104
104
105
105
106 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
109
109
110 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
111 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
112 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
114 return wrapper
114 return wrapper
115
115
116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 'unbundle'}
117 'unbundle'}
118 legacycaps = moderncaps.union({'changegroupsubset'})
118 legacycaps = moderncaps.union({'changegroupsubset'})
119
119
120 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
121 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
122
122
123 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
124 if caps is None:
124 if caps is None:
125 caps = moderncaps.copy()
125 caps = moderncaps.copy()
126 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
127 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
128 self.ui = repo.ui
128 self.ui = repo.ui
129 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
130 self.requirements = repo.requirements
130 self.requirements = repo.requirements
131 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
132
132
133 def close(self):
133 def close(self):
134 self._repo.close()
134 self._repo.close()
135
135
136 def _capabilities(self):
136 def _capabilities(self):
137 return self._caps
137 return self._caps
138
138
139 def local(self):
139 def local(self):
140 return self._repo
140 return self._repo
141
141
142 def canpush(self):
142 def canpush(self):
143 return True
143 return True
144
144
145 def url(self):
145 def url(self):
146 return self._repo.url()
146 return self._repo.url()
147
147
148 def lookup(self, key):
148 def lookup(self, key):
149 return self._repo.lookup(key)
149 return self._repo.lookup(key)
150
150
151 def branchmap(self):
151 def branchmap(self):
152 return self._repo.branchmap()
152 return self._repo.branchmap()
153
153
154 def heads(self):
154 def heads(self):
155 return self._repo.heads()
155 return self._repo.heads()
156
156
157 def known(self, nodes):
157 def known(self, nodes):
158 return self._repo.known(nodes)
158 return self._repo.known(nodes)
159
159
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 **kwargs):
161 **kwargs):
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
164 **kwargs)
164 **kwargs)
165 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
166
166
167 if exchange.bundle2requested(bundlecaps):
167 if exchange.bundle2requested(bundlecaps):
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
169 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
170 # from it in local peer.
170 # from it in local peer.
171 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
172 else:
172 else:
173 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
174
174
175 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
176 # unbundle instead.
176 # unbundle instead.
177
177
178 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
179 """apply a bundle on a repo
179 """apply a bundle on a repo
180
180
181 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
182 try:
182 try:
183 try:
183 try:
184 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
187 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
188 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
189 # API is finally improved.
189 # API is finally improved.
190 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
191 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
192 return ret
192 return ret
193 except Exception as exc:
193 except Exception as exc:
194 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
195 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
196 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
197 # it directly.
197 # it directly.
198 #
198 #
199 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
200 # issue4594
200 # issue4594
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 if output:
202 if output:
203 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
204 for out in output:
204 for out in output:
205 bundler.addpart(out)
205 bundler.addpart(out)
206 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
207 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
208 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
209 raise
209 raise
210 except error.PushRaced as exc:
210 except error.PushRaced as exc:
211 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
212
212
213 def lock(self):
213 def lock(self):
214 return self._repo.lock()
214 return self._repo.lock()
215
215
216 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
217 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
218
218
219 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
220 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
228
228
229 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
230 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
231 restricted capabilities'''
231 restricted capabilities'''
232
232
233 def __init__(self, repo):
233 def __init__(self, repo):
234 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
235
235
236 def branches(self, nodes):
236 def branches(self, nodes):
237 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
238
238
239 def between(self, pairs):
239 def between(self, pairs):
240 return self._repo.between(pairs)
240 return self._repo.between(pairs)
241
241
242 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
243 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
244
244
245 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247
247
248 class localrepository(object):
248 class localrepository(object):
249
249
250 supportedformats = {'revlogv1', 'generaldelta', 'treemanifest',
250 supportedformats = {'revlogv1', 'generaldelta', 'treemanifest',
251 'manifestv2'}
251 'manifestv2'}
252 _basesupported = supportedformats | {'store', 'fncache', 'shared',
252 _basesupported = supportedformats | {'store', 'fncache', 'shared',
253 'relshared', 'dotencode'}
253 'relshared', 'dotencode'}
254 openerreqs = {'revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'}
254 openerreqs = {'revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'}
255 filtername = None
255 filtername = None
256
256
257 # a list of (ui, featureset) functions.
257 # a list of (ui, featureset) functions.
258 # only functions defined in module of enabled extensions are invoked
258 # only functions defined in module of enabled extensions are invoked
259 featuresetupfuncs = set()
259 featuresetupfuncs = set()
260
260
261 def __init__(self, baseui, path, create=False):
261 def __init__(self, baseui, path, create=False):
262 self.requirements = set()
262 self.requirements = set()
263 # wvfs: rooted at the repository root, used to access the working copy
263 # wvfs: rooted at the repository root, used to access the working copy
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 self.vfs = None
266 self.vfs = None
267 # svfs: usually rooted at .hg/store, used to access repository history
267 # svfs: usually rooted at .hg/store, used to access repository history
268 # If this is a shared repository, this vfs may point to another
268 # If this is a shared repository, this vfs may point to another
269 # repository's .hg/store directory.
269 # repository's .hg/store directory.
270 self.svfs = None
270 self.svfs = None
271 self.root = self.wvfs.base
271 self.root = self.wvfs.base
272 self.path = self.wvfs.join(".hg")
272 self.path = self.wvfs.join(".hg")
273 self.origroot = path
273 self.origroot = path
274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 realfs=False)
276 realfs=False)
277 self.vfs = vfsmod.vfs(self.path)
277 self.vfs = vfsmod.vfs(self.path)
278 self.baseui = baseui
278 self.baseui = baseui
279 self.ui = baseui.copy()
279 self.ui = baseui.copy()
280 self.ui.copy = baseui.copy # prevent copying repo configuration
280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 # A list of callback to shape the phase if no data were found.
281 # A list of callback to shape the phase if no data were found.
282 # Callback are in the form: func(repo, roots) --> processed root.
282 # Callback are in the form: func(repo, roots) --> processed root.
283 # This list it to be filled by extension during repo setup
283 # This list it to be filled by extension during repo setup
284 self._phasedefaults = []
284 self._phasedefaults = []
285 try:
285 try:
286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 self._loadextensions()
287 self._loadextensions()
288 except IOError:
288 except IOError:
289 pass
289 pass
290
290
291 if self.featuresetupfuncs:
291 if self.featuresetupfuncs:
292 self.supported = set(self._basesupported) # use private copy
292 self.supported = set(self._basesupported) # use private copy
293 extmods = set(m.__name__ for n, m
293 extmods = set(m.__name__ for n, m
294 in extensions.extensions(self.ui))
294 in extensions.extensions(self.ui))
295 for setupfunc in self.featuresetupfuncs:
295 for setupfunc in self.featuresetupfuncs:
296 if setupfunc.__module__ in extmods:
296 if setupfunc.__module__ in extmods:
297 setupfunc(self.ui, self.supported)
297 setupfunc(self.ui, self.supported)
298 else:
298 else:
299 self.supported = self._basesupported
299 self.supported = self._basesupported
300 color.setup(self.ui)
300 color.setup(self.ui)
301
301
302 # Add compression engines.
302 # Add compression engines.
303 for name in util.compengines:
303 for name in util.compengines:
304 engine = util.compengines[name]
304 engine = util.compengines[name]
305 if engine.revlogheader():
305 if engine.revlogheader():
306 self.supported.add('exp-compression-%s' % name)
306 self.supported.add('exp-compression-%s' % name)
307
307
308 if not self.vfs.isdir():
308 if not self.vfs.isdir():
309 if create:
309 if create:
310 self.requirements = newreporequirements(self)
310 self.requirements = newreporequirements(self)
311
311
312 if not self.wvfs.exists():
312 if not self.wvfs.exists():
313 self.wvfs.makedirs()
313 self.wvfs.makedirs()
314 self.vfs.makedir(notindexed=True)
314 self.vfs.makedir(notindexed=True)
315
315
316 if 'store' in self.requirements:
316 if 'store' in self.requirements:
317 self.vfs.mkdir("store")
317 self.vfs.mkdir("store")
318
318
319 # create an invalid changelog
319 # create an invalid changelog
320 self.vfs.append(
320 self.vfs.append(
321 "00changelog.i",
321 "00changelog.i",
322 '\0\0\0\2' # represents revlogv2
322 '\0\0\0\2' # represents revlogv2
323 ' dummy changelog to prevent using the old repo layout'
323 ' dummy changelog to prevent using the old repo layout'
324 )
324 )
325 else:
325 else:
326 raise error.RepoError(_("repository %s not found") % path)
326 raise error.RepoError(_("repository %s not found") % path)
327 elif create:
327 elif create:
328 raise error.RepoError(_("repository %s already exists") % path)
328 raise error.RepoError(_("repository %s already exists") % path)
329 else:
329 else:
330 try:
330 try:
331 self.requirements = scmutil.readrequires(
331 self.requirements = scmutil.readrequires(
332 self.vfs, self.supported)
332 self.vfs, self.supported)
333 except IOError as inst:
333 except IOError as inst:
334 if inst.errno != errno.ENOENT:
334 if inst.errno != errno.ENOENT:
335 raise
335 raise
336
336
337 self.sharedpath = self.path
337 self.sharedpath = self.path
338 try:
338 try:
339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 if 'relshared' in self.requirements:
340 if 'relshared' in self.requirements:
341 sharedpath = self.vfs.join(sharedpath)
341 sharedpath = self.vfs.join(sharedpath)
342 vfs = vfsmod.vfs(sharedpath, realpath=True)
342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 s = vfs.base
343 s = vfs.base
344 if not vfs.exists():
344 if not vfs.exists():
345 raise error.RepoError(
345 raise error.RepoError(
346 _('.hg/sharedpath points to nonexistent directory %s') % s)
346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 self.sharedpath = s
347 self.sharedpath = s
348 except IOError as inst:
348 except IOError as inst:
349 if inst.errno != errno.ENOENT:
349 if inst.errno != errno.ENOENT:
350 raise
350 raise
351
351
352 self.store = store.store(
352 self.store = store.store(
353 self.requirements, self.sharedpath, vfsmod.vfs)
353 self.requirements, self.sharedpath, vfsmod.vfs)
354 self.spath = self.store.path
354 self.spath = self.store.path
355 self.svfs = self.store.vfs
355 self.svfs = self.store.vfs
356 self.sjoin = self.store.join
356 self.sjoin = self.store.join
357 self.vfs.createmode = self.store.createmode
357 self.vfs.createmode = self.store.createmode
358 self._applyopenerreqs()
358 self._applyopenerreqs()
359 if create:
359 if create:
360 self._writerequirements()
360 self._writerequirements()
361
361
362 self._dirstatevalidatewarned = False
362 self._dirstatevalidatewarned = False
363
363
364 self._branchcaches = {}
364 self._branchcaches = {}
365 self._revbranchcache = None
365 self._revbranchcache = None
366 self.filterpats = {}
366 self.filterpats = {}
367 self._datafilters = {}
367 self._datafilters = {}
368 self._transref = self._lockref = self._wlockref = None
368 self._transref = self._lockref = self._wlockref = None
369
369
370 # A cache for various files under .hg/ that tracks file changes,
370 # A cache for various files under .hg/ that tracks file changes,
371 # (used by the filecache decorator)
371 # (used by the filecache decorator)
372 #
372 #
373 # Maps a property name to its util.filecacheentry
373 # Maps a property name to its util.filecacheentry
374 self._filecache = {}
374 self._filecache = {}
375
375
376 # hold sets of revision to be filtered
376 # hold sets of revision to be filtered
377 # should be cleared when something might have changed the filter value:
377 # should be cleared when something might have changed the filter value:
378 # - new changesets,
378 # - new changesets,
379 # - phase change,
379 # - phase change,
380 # - new obsolescence marker,
380 # - new obsolescence marker,
381 # - working directory parent change,
381 # - working directory parent change,
382 # - bookmark changes
382 # - bookmark changes
383 self.filteredrevcache = {}
383 self.filteredrevcache = {}
384
384
385 # generic mapping between names and nodes
385 # generic mapping between names and nodes
386 self.names = namespaces.namespaces()
386 self.names = namespaces.namespaces()
387
387
388 def close(self):
388 def close(self):
389 self._writecaches()
389 self._writecaches()
390
390
391 def _loadextensions(self):
391 def _loadextensions(self):
392 extensions.loadall(self.ui)
392 extensions.loadall(self.ui)
393
393
394 def _writecaches(self):
394 def _writecaches(self):
395 if self._revbranchcache:
395 if self._revbranchcache:
396 self._revbranchcache.write()
396 self._revbranchcache.write()
397
397
398 def _restrictcapabilities(self, caps):
398 def _restrictcapabilities(self, caps):
399 if self.ui.configbool('experimental', 'bundle2-advertise', True):
399 if self.ui.configbool('experimental', 'bundle2-advertise', True):
400 caps = set(caps)
400 caps = set(caps)
401 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
401 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
402 caps.add('bundle2=' + urlreq.quote(capsblob))
402 caps.add('bundle2=' + urlreq.quote(capsblob))
403 return caps
403 return caps
404
404
405 def _applyopenerreqs(self):
405 def _applyopenerreqs(self):
406 self.svfs.options = dict((r, 1) for r in self.requirements
406 self.svfs.options = dict((r, 1) for r in self.requirements
407 if r in self.openerreqs)
407 if r in self.openerreqs)
408 # experimental config: format.chunkcachesize
408 # experimental config: format.chunkcachesize
409 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
409 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
410 if chunkcachesize is not None:
410 if chunkcachesize is not None:
411 self.svfs.options['chunkcachesize'] = chunkcachesize
411 self.svfs.options['chunkcachesize'] = chunkcachesize
412 # experimental config: format.maxchainlen
412 # experimental config: format.maxchainlen
413 maxchainlen = self.ui.configint('format', 'maxchainlen')
413 maxchainlen = self.ui.configint('format', 'maxchainlen')
414 if maxchainlen is not None:
414 if maxchainlen is not None:
415 self.svfs.options['maxchainlen'] = maxchainlen
415 self.svfs.options['maxchainlen'] = maxchainlen
416 # experimental config: format.manifestcachesize
416 # experimental config: format.manifestcachesize
417 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
417 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
418 if manifestcachesize is not None:
418 if manifestcachesize is not None:
419 self.svfs.options['manifestcachesize'] = manifestcachesize
419 self.svfs.options['manifestcachesize'] = manifestcachesize
420 # experimental config: format.aggressivemergedeltas
420 # experimental config: format.aggressivemergedeltas
421 aggressivemergedeltas = self.ui.configbool('format',
421 aggressivemergedeltas = self.ui.configbool('format',
422 'aggressivemergedeltas', False)
422 'aggressivemergedeltas', False)
423 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
423 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
424 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
424 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
425
425
426 for r in self.requirements:
426 for r in self.requirements:
427 if r.startswith('exp-compression-'):
427 if r.startswith('exp-compression-'):
428 self.svfs.options['compengine'] = r[len('exp-compression-'):]
428 self.svfs.options['compengine'] = r[len('exp-compression-'):]
429
429
430 def _writerequirements(self):
430 def _writerequirements(self):
431 scmutil.writerequires(self.vfs, self.requirements)
431 scmutil.writerequires(self.vfs, self.requirements)
432
432
433 def _checknested(self, path):
433 def _checknested(self, path):
434 """Determine if path is a legal nested repository."""
434 """Determine if path is a legal nested repository."""
435 if not path.startswith(self.root):
435 if not path.startswith(self.root):
436 return False
436 return False
437 subpath = path[len(self.root) + 1:]
437 subpath = path[len(self.root) + 1:]
438 normsubpath = util.pconvert(subpath)
438 normsubpath = util.pconvert(subpath)
439
439
440 # XXX: Checking against the current working copy is wrong in
440 # XXX: Checking against the current working copy is wrong in
441 # the sense that it can reject things like
441 # the sense that it can reject things like
442 #
442 #
443 # $ hg cat -r 10 sub/x.txt
443 # $ hg cat -r 10 sub/x.txt
444 #
444 #
445 # if sub/ is no longer a subrepository in the working copy
445 # if sub/ is no longer a subrepository in the working copy
446 # parent revision.
446 # parent revision.
447 #
447 #
448 # However, it can of course also allow things that would have
448 # However, it can of course also allow things that would have
449 # been rejected before, such as the above cat command if sub/
449 # been rejected before, such as the above cat command if sub/
450 # is a subrepository now, but was a normal directory before.
450 # is a subrepository now, but was a normal directory before.
451 # The old path auditor would have rejected by mistake since it
451 # The old path auditor would have rejected by mistake since it
452 # panics when it sees sub/.hg/.
452 # panics when it sees sub/.hg/.
453 #
453 #
454 # All in all, checking against the working copy seems sensible
454 # All in all, checking against the working copy seems sensible
455 # since we want to prevent access to nested repositories on
455 # since we want to prevent access to nested repositories on
456 # the filesystem *now*.
456 # the filesystem *now*.
457 ctx = self[None]
457 ctx = self[None]
458 parts = util.splitpath(subpath)
458 parts = util.splitpath(subpath)
459 while parts:
459 while parts:
460 prefix = '/'.join(parts)
460 prefix = '/'.join(parts)
461 if prefix in ctx.substate:
461 if prefix in ctx.substate:
462 if prefix == normsubpath:
462 if prefix == normsubpath:
463 return True
463 return True
464 else:
464 else:
465 sub = ctx.sub(prefix)
465 sub = ctx.sub(prefix)
466 return sub.checknested(subpath[len(prefix) + 1:])
466 return sub.checknested(subpath[len(prefix) + 1:])
467 else:
467 else:
468 parts.pop()
468 parts.pop()
469 return False
469 return False
470
470
471 def peer(self):
471 def peer(self):
472 return localpeer(self) # not cached to avoid reference cycle
472 return localpeer(self) # not cached to avoid reference cycle
473
473
474 def unfiltered(self):
474 def unfiltered(self):
475 """Return unfiltered version of the repository
475 """Return unfiltered version of the repository
476
476
477 Intended to be overwritten by filtered repo."""
477 Intended to be overwritten by filtered repo."""
478 return self
478 return self
479
479
480 def filtered(self, name):
480 def filtered(self, name):
481 """Return a filtered version of a repository"""
481 """Return a filtered version of a repository"""
482 # build a new class with the mixin and the current class
482 # build a new class with the mixin and the current class
483 # (possibly subclass of the repo)
483 # (possibly subclass of the repo)
484 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
484 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
485 pass
485 pass
486 return filteredrepo(self, name)
486 return filteredrepo(self, name)
487
487
488 @repofilecache('bookmarks', 'bookmarks.current')
488 @repofilecache('bookmarks', 'bookmarks.current')
489 def _bookmarks(self):
489 def _bookmarks(self):
490 return bookmarks.bmstore(self)
490 return bookmarks.bmstore(self)
491
491
492 @property
492 @property
493 def _activebookmark(self):
493 def _activebookmark(self):
494 return self._bookmarks.active
494 return self._bookmarks.active
495
495
496 def bookmarkheads(self, bookmark):
496 def bookmarkheads(self, bookmark):
497 name = bookmark.split('@', 1)[0]
497 name = bookmark.split('@', 1)[0]
498 heads = []
498 heads = []
499 for mark, n in self._bookmarks.iteritems():
499 for mark, n in self._bookmarks.iteritems():
500 if mark.split('@', 1)[0] == name:
500 if mark.split('@', 1)[0] == name:
501 heads.append(n)
501 heads.append(n)
502 return heads
502 return heads
503
503
504 # _phaserevs and _phasesets depend on changelog. what we need is to
504 # _phaserevs and _phasesets depend on changelog. what we need is to
505 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
505 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
506 # can't be easily expressed in filecache mechanism.
506 # can't be easily expressed in filecache mechanism.
507 @storecache('phaseroots', '00changelog.i')
507 @storecache('phaseroots', '00changelog.i')
508 def _phasecache(self):
508 def _phasecache(self):
509 return phases.phasecache(self, self._phasedefaults)
509 return phases.phasecache(self, self._phasedefaults)
510
510
511 @storecache('obsstore')
511 @storecache('obsstore')
512 def obsstore(self):
512 def obsstore(self):
513 # read default format for new obsstore.
513 # read default format for new obsstore.
514 # developer config: format.obsstore-version
514 # developer config: format.obsstore-version
515 defaultformat = self.ui.configint('format', 'obsstore-version', None)
515 defaultformat = self.ui.configint('format', 'obsstore-version', None)
516 # rely on obsstore class default when possible.
516 # rely on obsstore class default when possible.
517 kwargs = {}
517 kwargs = {}
518 if defaultformat is not None:
518 if defaultformat is not None:
519 kwargs['defaultformat'] = defaultformat
519 kwargs['defaultformat'] = defaultformat
520 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
520 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
521 store = obsolete.obsstore(self.svfs, readonly=readonly,
521 store = obsolete.obsstore(self.svfs, readonly=readonly,
522 **kwargs)
522 **kwargs)
523 if store and readonly:
523 if store and readonly:
524 self.ui.warn(
524 self.ui.warn(
525 _('obsolete feature not enabled but %i markers found!\n')
525 _('obsolete feature not enabled but %i markers found!\n')
526 % len(list(store)))
526 % len(list(store)))
527 return store
527 return store
528
528
529 @storecache('00changelog.i')
529 @storecache('00changelog.i')
530 def changelog(self):
530 def changelog(self):
531 c = changelog.changelog(self.svfs)
531 return changelog.changelog(self.svfs,
532 if txnutil.mayhavepending(self.root):
532 trypending=txnutil.mayhavepending(self.root))
533 c.readpending('00changelog.i.a')
534 return c
535
533
536 def _constructmanifest(self):
534 def _constructmanifest(self):
537 # This is a temporary function while we migrate from manifest to
535 # This is a temporary function while we migrate from manifest to
538 # manifestlog. It allows bundlerepo and unionrepo to intercept the
536 # manifestlog. It allows bundlerepo and unionrepo to intercept the
539 # manifest creation.
537 # manifest creation.
540 return manifest.manifestrevlog(self.svfs)
538 return manifest.manifestrevlog(self.svfs)
541
539
542 @storecache('00manifest.i')
540 @storecache('00manifest.i')
543 def manifestlog(self):
541 def manifestlog(self):
544 return manifest.manifestlog(self.svfs, self)
542 return manifest.manifestlog(self.svfs, self)
545
543
546 @repofilecache('dirstate')
544 @repofilecache('dirstate')
547 def dirstate(self):
545 def dirstate(self):
548 return dirstate.dirstate(self.vfs, self.ui, self.root,
546 return dirstate.dirstate(self.vfs, self.ui, self.root,
549 self._dirstatevalidate)
547 self._dirstatevalidate)
550
548
551 def _dirstatevalidate(self, node):
549 def _dirstatevalidate(self, node):
552 try:
550 try:
553 self.changelog.rev(node)
551 self.changelog.rev(node)
554 return node
552 return node
555 except error.LookupError:
553 except error.LookupError:
556 if not self._dirstatevalidatewarned:
554 if not self._dirstatevalidatewarned:
557 self._dirstatevalidatewarned = True
555 self._dirstatevalidatewarned = True
558 self.ui.warn(_("warning: ignoring unknown"
556 self.ui.warn(_("warning: ignoring unknown"
559 " working parent %s!\n") % short(node))
557 " working parent %s!\n") % short(node))
560 return nullid
558 return nullid
561
559
562 def __getitem__(self, changeid):
560 def __getitem__(self, changeid):
563 if changeid is None or changeid == wdirrev:
561 if changeid is None or changeid == wdirrev:
564 return context.workingctx(self)
562 return context.workingctx(self)
565 if isinstance(changeid, slice):
563 if isinstance(changeid, slice):
566 return [context.changectx(self, i)
564 return [context.changectx(self, i)
567 for i in xrange(*changeid.indices(len(self)))
565 for i in xrange(*changeid.indices(len(self)))
568 if i not in self.changelog.filteredrevs]
566 if i not in self.changelog.filteredrevs]
569 return context.changectx(self, changeid)
567 return context.changectx(self, changeid)
570
568
571 def __contains__(self, changeid):
569 def __contains__(self, changeid):
572 try:
570 try:
573 self[changeid]
571 self[changeid]
574 return True
572 return True
575 except error.RepoLookupError:
573 except error.RepoLookupError:
576 return False
574 return False
577
575
578 def __nonzero__(self):
576 def __nonzero__(self):
579 return True
577 return True
580
578
581 __bool__ = __nonzero__
579 __bool__ = __nonzero__
582
580
583 def __len__(self):
581 def __len__(self):
584 return len(self.changelog)
582 return len(self.changelog)
585
583
586 def __iter__(self):
584 def __iter__(self):
587 return iter(self.changelog)
585 return iter(self.changelog)
588
586
589 def revs(self, expr, *args):
587 def revs(self, expr, *args):
590 '''Find revisions matching a revset.
588 '''Find revisions matching a revset.
591
589
592 The revset is specified as a string ``expr`` that may contain
590 The revset is specified as a string ``expr`` that may contain
593 %-formatting to escape certain types. See ``revsetlang.formatspec``.
591 %-formatting to escape certain types. See ``revsetlang.formatspec``.
594
592
595 Revset aliases from the configuration are not expanded. To expand
593 Revset aliases from the configuration are not expanded. To expand
596 user aliases, consider calling ``scmutil.revrange()`` or
594 user aliases, consider calling ``scmutil.revrange()`` or
597 ``repo.anyrevs([expr], user=True)``.
595 ``repo.anyrevs([expr], user=True)``.
598
596
599 Returns a revset.abstractsmartset, which is a list-like interface
597 Returns a revset.abstractsmartset, which is a list-like interface
600 that contains integer revisions.
598 that contains integer revisions.
601 '''
599 '''
602 expr = revsetlang.formatspec(expr, *args)
600 expr = revsetlang.formatspec(expr, *args)
603 m = revset.match(None, expr)
601 m = revset.match(None, expr)
604 return m(self)
602 return m(self)
605
603
606 def set(self, expr, *args):
604 def set(self, expr, *args):
607 '''Find revisions matching a revset and emit changectx instances.
605 '''Find revisions matching a revset and emit changectx instances.
608
606
609 This is a convenience wrapper around ``revs()`` that iterates the
607 This is a convenience wrapper around ``revs()`` that iterates the
610 result and is a generator of changectx instances.
608 result and is a generator of changectx instances.
611
609
612 Revset aliases from the configuration are not expanded. To expand
610 Revset aliases from the configuration are not expanded. To expand
613 user aliases, consider calling ``scmutil.revrange()``.
611 user aliases, consider calling ``scmutil.revrange()``.
614 '''
612 '''
615 for r in self.revs(expr, *args):
613 for r in self.revs(expr, *args):
616 yield self[r]
614 yield self[r]
617
615
618 def anyrevs(self, specs, user=False):
616 def anyrevs(self, specs, user=False):
619 '''Find revisions matching one of the given revsets.
617 '''Find revisions matching one of the given revsets.
620
618
621 Revset aliases from the configuration are not expanded by default. To
619 Revset aliases from the configuration are not expanded by default. To
622 expand user aliases, specify ``user=True``.
620 expand user aliases, specify ``user=True``.
623 '''
621 '''
624 if user:
622 if user:
625 m = revset.matchany(self.ui, specs, repo=self)
623 m = revset.matchany(self.ui, specs, repo=self)
626 else:
624 else:
627 m = revset.matchany(None, specs)
625 m = revset.matchany(None, specs)
628 return m(self)
626 return m(self)
629
627
630 def url(self):
628 def url(self):
631 return 'file:' + self.root
629 return 'file:' + self.root
632
630
633 def hook(self, name, throw=False, **args):
631 def hook(self, name, throw=False, **args):
634 """Call a hook, passing this repo instance.
632 """Call a hook, passing this repo instance.
635
633
636 This a convenience method to aid invoking hooks. Extensions likely
634 This a convenience method to aid invoking hooks. Extensions likely
637 won't call this unless they have registered a custom hook or are
635 won't call this unless they have registered a custom hook or are
638 replacing code that is expected to call a hook.
636 replacing code that is expected to call a hook.
639 """
637 """
640 return hook.hook(self.ui, self, name, throw, **args)
638 return hook.hook(self.ui, self, name, throw, **args)
641
639
642 @filteredpropertycache
640 @filteredpropertycache
643 def _tagscache(self):
641 def _tagscache(self):
644 '''Returns a tagscache object that contains various tags related
642 '''Returns a tagscache object that contains various tags related
645 caches.'''
643 caches.'''
646
644
647 # This simplifies its cache management by having one decorated
645 # This simplifies its cache management by having one decorated
648 # function (this one) and the rest simply fetch things from it.
646 # function (this one) and the rest simply fetch things from it.
649 class tagscache(object):
647 class tagscache(object):
650 def __init__(self):
648 def __init__(self):
651 # These two define the set of tags for this repository. tags
649 # These two define the set of tags for this repository. tags
652 # maps tag name to node; tagtypes maps tag name to 'global' or
650 # maps tag name to node; tagtypes maps tag name to 'global' or
653 # 'local'. (Global tags are defined by .hgtags across all
651 # 'local'. (Global tags are defined by .hgtags across all
654 # heads, and local tags are defined in .hg/localtags.)
652 # heads, and local tags are defined in .hg/localtags.)
655 # They constitute the in-memory cache of tags.
653 # They constitute the in-memory cache of tags.
656 self.tags = self.tagtypes = None
654 self.tags = self.tagtypes = None
657
655
658 self.nodetagscache = self.tagslist = None
656 self.nodetagscache = self.tagslist = None
659
657
660 cache = tagscache()
658 cache = tagscache()
661 cache.tags, cache.tagtypes = self._findtags()
659 cache.tags, cache.tagtypes = self._findtags()
662
660
663 return cache
661 return cache
664
662
665 def tags(self):
663 def tags(self):
666 '''return a mapping of tag to node'''
664 '''return a mapping of tag to node'''
667 t = {}
665 t = {}
668 if self.changelog.filteredrevs:
666 if self.changelog.filteredrevs:
669 tags, tt = self._findtags()
667 tags, tt = self._findtags()
670 else:
668 else:
671 tags = self._tagscache.tags
669 tags = self._tagscache.tags
672 for k, v in tags.iteritems():
670 for k, v in tags.iteritems():
673 try:
671 try:
674 # ignore tags to unknown nodes
672 # ignore tags to unknown nodes
675 self.changelog.rev(v)
673 self.changelog.rev(v)
676 t[k] = v
674 t[k] = v
677 except (error.LookupError, ValueError):
675 except (error.LookupError, ValueError):
678 pass
676 pass
679 return t
677 return t
680
678
681 def _findtags(self):
679 def _findtags(self):
682 '''Do the hard work of finding tags. Return a pair of dicts
680 '''Do the hard work of finding tags. Return a pair of dicts
683 (tags, tagtypes) where tags maps tag name to node, and tagtypes
681 (tags, tagtypes) where tags maps tag name to node, and tagtypes
684 maps tag name to a string like \'global\' or \'local\'.
682 maps tag name to a string like \'global\' or \'local\'.
685 Subclasses or extensions are free to add their own tags, but
683 Subclasses or extensions are free to add their own tags, but
686 should be aware that the returned dicts will be retained for the
684 should be aware that the returned dicts will be retained for the
687 duration of the localrepo object.'''
685 duration of the localrepo object.'''
688
686
689 # XXX what tagtype should subclasses/extensions use? Currently
687 # XXX what tagtype should subclasses/extensions use? Currently
690 # mq and bookmarks add tags, but do not set the tagtype at all.
688 # mq and bookmarks add tags, but do not set the tagtype at all.
691 # Should each extension invent its own tag type? Should there
689 # Should each extension invent its own tag type? Should there
692 # be one tagtype for all such "virtual" tags? Or is the status
690 # be one tagtype for all such "virtual" tags? Or is the status
693 # quo fine?
691 # quo fine?
694
692
695
693
696 # map tag name to (node, hist)
694 # map tag name to (node, hist)
697 alltags = tagsmod.findglobaltags(self.ui, self)
695 alltags = tagsmod.findglobaltags(self.ui, self)
698 # map tag name to tag type
696 # map tag name to tag type
699 tagtypes = dict((tag, 'global') for tag in alltags)
697 tagtypes = dict((tag, 'global') for tag in alltags)
700
698
701 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
699 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
702
700
703 # Build the return dicts. Have to re-encode tag names because
701 # Build the return dicts. Have to re-encode tag names because
704 # the tags module always uses UTF-8 (in order not to lose info
702 # the tags module always uses UTF-8 (in order not to lose info
705 # writing to the cache), but the rest of Mercurial wants them in
703 # writing to the cache), but the rest of Mercurial wants them in
706 # local encoding.
704 # local encoding.
707 tags = {}
705 tags = {}
708 for (name, (node, hist)) in alltags.iteritems():
706 for (name, (node, hist)) in alltags.iteritems():
709 if node != nullid:
707 if node != nullid:
710 tags[encoding.tolocal(name)] = node
708 tags[encoding.tolocal(name)] = node
711 tags['tip'] = self.changelog.tip()
709 tags['tip'] = self.changelog.tip()
712 tagtypes = dict([(encoding.tolocal(name), value)
710 tagtypes = dict([(encoding.tolocal(name), value)
713 for (name, value) in tagtypes.iteritems()])
711 for (name, value) in tagtypes.iteritems()])
714 return (tags, tagtypes)
712 return (tags, tagtypes)
715
713
716 def tagtype(self, tagname):
714 def tagtype(self, tagname):
717 '''
715 '''
718 return the type of the given tag. result can be:
716 return the type of the given tag. result can be:
719
717
720 'local' : a local tag
718 'local' : a local tag
721 'global' : a global tag
719 'global' : a global tag
722 None : tag does not exist
720 None : tag does not exist
723 '''
721 '''
724
722
725 return self._tagscache.tagtypes.get(tagname)
723 return self._tagscache.tagtypes.get(tagname)
726
724
727 def tagslist(self):
725 def tagslist(self):
728 '''return a list of tags ordered by revision'''
726 '''return a list of tags ordered by revision'''
729 if not self._tagscache.tagslist:
727 if not self._tagscache.tagslist:
730 l = []
728 l = []
731 for t, n in self.tags().iteritems():
729 for t, n in self.tags().iteritems():
732 l.append((self.changelog.rev(n), t, n))
730 l.append((self.changelog.rev(n), t, n))
733 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
731 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
734
732
735 return self._tagscache.tagslist
733 return self._tagscache.tagslist
736
734
737 def nodetags(self, node):
735 def nodetags(self, node):
738 '''return the tags associated with a node'''
736 '''return the tags associated with a node'''
739 if not self._tagscache.nodetagscache:
737 if not self._tagscache.nodetagscache:
740 nodetagscache = {}
738 nodetagscache = {}
741 for t, n in self._tagscache.tags.iteritems():
739 for t, n in self._tagscache.tags.iteritems():
742 nodetagscache.setdefault(n, []).append(t)
740 nodetagscache.setdefault(n, []).append(t)
743 for tags in nodetagscache.itervalues():
741 for tags in nodetagscache.itervalues():
744 tags.sort()
742 tags.sort()
745 self._tagscache.nodetagscache = nodetagscache
743 self._tagscache.nodetagscache = nodetagscache
746 return self._tagscache.nodetagscache.get(node, [])
744 return self._tagscache.nodetagscache.get(node, [])
747
745
748 def nodebookmarks(self, node):
746 def nodebookmarks(self, node):
749 """return the list of bookmarks pointing to the specified node"""
747 """return the list of bookmarks pointing to the specified node"""
750 marks = []
748 marks = []
751 for bookmark, n in self._bookmarks.iteritems():
749 for bookmark, n in self._bookmarks.iteritems():
752 if n == node:
750 if n == node:
753 marks.append(bookmark)
751 marks.append(bookmark)
754 return sorted(marks)
752 return sorted(marks)
755
753
756 def branchmap(self):
754 def branchmap(self):
757 '''returns a dictionary {branch: [branchheads]} with branchheads
755 '''returns a dictionary {branch: [branchheads]} with branchheads
758 ordered by increasing revision number'''
756 ordered by increasing revision number'''
759 branchmap.updatecache(self)
757 branchmap.updatecache(self)
760 return self._branchcaches[self.filtername]
758 return self._branchcaches[self.filtername]
761
759
762 @unfilteredmethod
760 @unfilteredmethod
763 def revbranchcache(self):
761 def revbranchcache(self):
764 if not self._revbranchcache:
762 if not self._revbranchcache:
765 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
763 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
766 return self._revbranchcache
764 return self._revbranchcache
767
765
768 def branchtip(self, branch, ignoremissing=False):
766 def branchtip(self, branch, ignoremissing=False):
769 '''return the tip node for a given branch
767 '''return the tip node for a given branch
770
768
771 If ignoremissing is True, then this method will not raise an error.
769 If ignoremissing is True, then this method will not raise an error.
772 This is helpful for callers that only expect None for a missing branch
770 This is helpful for callers that only expect None for a missing branch
773 (e.g. namespace).
771 (e.g. namespace).
774
772
775 '''
773 '''
776 try:
774 try:
777 return self.branchmap().branchtip(branch)
775 return self.branchmap().branchtip(branch)
778 except KeyError:
776 except KeyError:
779 if not ignoremissing:
777 if not ignoremissing:
780 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
778 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
781 else:
779 else:
782 pass
780 pass
783
781
784 def lookup(self, key):
782 def lookup(self, key):
785 return self[key].node()
783 return self[key].node()
786
784
787 def lookupbranch(self, key, remote=None):
785 def lookupbranch(self, key, remote=None):
788 repo = remote or self
786 repo = remote or self
789 if key in repo.branchmap():
787 if key in repo.branchmap():
790 return key
788 return key
791
789
792 repo = (remote and remote.local()) and remote or self
790 repo = (remote and remote.local()) and remote or self
793 return repo[key].branch()
791 return repo[key].branch()
794
792
795 def known(self, nodes):
793 def known(self, nodes):
796 cl = self.changelog
794 cl = self.changelog
797 nm = cl.nodemap
795 nm = cl.nodemap
798 filtered = cl.filteredrevs
796 filtered = cl.filteredrevs
799 result = []
797 result = []
800 for n in nodes:
798 for n in nodes:
801 r = nm.get(n)
799 r = nm.get(n)
802 resp = not (r is None or r in filtered)
800 resp = not (r is None or r in filtered)
803 result.append(resp)
801 result.append(resp)
804 return result
802 return result
805
803
806 def local(self):
804 def local(self):
807 return self
805 return self
808
806
809 def publishing(self):
807 def publishing(self):
810 # it's safe (and desirable) to trust the publish flag unconditionally
808 # it's safe (and desirable) to trust the publish flag unconditionally
811 # so that we don't finalize changes shared between users via ssh or nfs
809 # so that we don't finalize changes shared between users via ssh or nfs
812 return self.ui.configbool('phases', 'publish', True, untrusted=True)
810 return self.ui.configbool('phases', 'publish', True, untrusted=True)
813
811
814 def cancopy(self):
812 def cancopy(self):
815 # so statichttprepo's override of local() works
813 # so statichttprepo's override of local() works
816 if not self.local():
814 if not self.local():
817 return False
815 return False
818 if not self.publishing():
816 if not self.publishing():
819 return True
817 return True
820 # if publishing we can't copy if there is filtered content
818 # if publishing we can't copy if there is filtered content
821 return not self.filtered('visible').changelog.filteredrevs
819 return not self.filtered('visible').changelog.filteredrevs
822
820
823 def shared(self):
821 def shared(self):
824 '''the type of shared repository (None if not shared)'''
822 '''the type of shared repository (None if not shared)'''
825 if self.sharedpath != self.path:
823 if self.sharedpath != self.path:
826 return 'store'
824 return 'store'
827 return None
825 return None
828
826
829 def wjoin(self, f, *insidef):
827 def wjoin(self, f, *insidef):
830 return self.vfs.reljoin(self.root, f, *insidef)
828 return self.vfs.reljoin(self.root, f, *insidef)
831
829
832 def file(self, f):
830 def file(self, f):
833 if f[0] == '/':
831 if f[0] == '/':
834 f = f[1:]
832 f = f[1:]
835 return filelog.filelog(self.svfs, f)
833 return filelog.filelog(self.svfs, f)
836
834
837 def changectx(self, changeid):
835 def changectx(self, changeid):
838 return self[changeid]
836 return self[changeid]
839
837
840 def setparents(self, p1, p2=nullid):
838 def setparents(self, p1, p2=nullid):
841 self.dirstate.beginparentchange()
839 self.dirstate.beginparentchange()
842 copies = self.dirstate.setparents(p1, p2)
840 copies = self.dirstate.setparents(p1, p2)
843 pctx = self[p1]
841 pctx = self[p1]
844 if copies:
842 if copies:
845 # Adjust copy records, the dirstate cannot do it, it
843 # Adjust copy records, the dirstate cannot do it, it
846 # requires access to parents manifests. Preserve them
844 # requires access to parents manifests. Preserve them
847 # only for entries added to first parent.
845 # only for entries added to first parent.
848 for f in copies:
846 for f in copies:
849 if f not in pctx and copies[f] in pctx:
847 if f not in pctx and copies[f] in pctx:
850 self.dirstate.copy(copies[f], f)
848 self.dirstate.copy(copies[f], f)
851 if p2 == nullid:
849 if p2 == nullid:
852 for f, s in sorted(self.dirstate.copies().items()):
850 for f, s in sorted(self.dirstate.copies().items()):
853 if f not in pctx and s not in pctx:
851 if f not in pctx and s not in pctx:
854 self.dirstate.copy(None, f)
852 self.dirstate.copy(None, f)
855 self.dirstate.endparentchange()
853 self.dirstate.endparentchange()
856
854
857 def filectx(self, path, changeid=None, fileid=None):
855 def filectx(self, path, changeid=None, fileid=None):
858 """changeid can be a changeset revision, node, or tag.
856 """changeid can be a changeset revision, node, or tag.
859 fileid can be a file revision or node."""
857 fileid can be a file revision or node."""
860 return context.filectx(self, path, changeid, fileid)
858 return context.filectx(self, path, changeid, fileid)
861
859
862 def getcwd(self):
860 def getcwd(self):
863 return self.dirstate.getcwd()
861 return self.dirstate.getcwd()
864
862
865 def pathto(self, f, cwd=None):
863 def pathto(self, f, cwd=None):
866 return self.dirstate.pathto(f, cwd)
864 return self.dirstate.pathto(f, cwd)
867
865
868 def _loadfilter(self, filter):
866 def _loadfilter(self, filter):
869 if filter not in self.filterpats:
867 if filter not in self.filterpats:
870 l = []
868 l = []
871 for pat, cmd in self.ui.configitems(filter):
869 for pat, cmd in self.ui.configitems(filter):
872 if cmd == '!':
870 if cmd == '!':
873 continue
871 continue
874 mf = matchmod.match(self.root, '', [pat])
872 mf = matchmod.match(self.root, '', [pat])
875 fn = None
873 fn = None
876 params = cmd
874 params = cmd
877 for name, filterfn in self._datafilters.iteritems():
875 for name, filterfn in self._datafilters.iteritems():
878 if cmd.startswith(name):
876 if cmd.startswith(name):
879 fn = filterfn
877 fn = filterfn
880 params = cmd[len(name):].lstrip()
878 params = cmd[len(name):].lstrip()
881 break
879 break
882 if not fn:
880 if not fn:
883 fn = lambda s, c, **kwargs: util.filter(s, c)
881 fn = lambda s, c, **kwargs: util.filter(s, c)
884 # Wrap old filters not supporting keyword arguments
882 # Wrap old filters not supporting keyword arguments
885 if not inspect.getargspec(fn)[2]:
883 if not inspect.getargspec(fn)[2]:
886 oldfn = fn
884 oldfn = fn
887 fn = lambda s, c, **kwargs: oldfn(s, c)
885 fn = lambda s, c, **kwargs: oldfn(s, c)
888 l.append((mf, fn, params))
886 l.append((mf, fn, params))
889 self.filterpats[filter] = l
887 self.filterpats[filter] = l
890 return self.filterpats[filter]
888 return self.filterpats[filter]
891
889
892 def _filter(self, filterpats, filename, data):
890 def _filter(self, filterpats, filename, data):
893 for mf, fn, cmd in filterpats:
891 for mf, fn, cmd in filterpats:
894 if mf(filename):
892 if mf(filename):
895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
897 break
895 break
898
896
899 return data
897 return data
900
898
901 @unfilteredpropertycache
899 @unfilteredpropertycache
902 def _encodefilterpats(self):
900 def _encodefilterpats(self):
903 return self._loadfilter('encode')
901 return self._loadfilter('encode')
904
902
905 @unfilteredpropertycache
903 @unfilteredpropertycache
906 def _decodefilterpats(self):
904 def _decodefilterpats(self):
907 return self._loadfilter('decode')
905 return self._loadfilter('decode')
908
906
909 def adddatafilter(self, name, filter):
907 def adddatafilter(self, name, filter):
910 self._datafilters[name] = filter
908 self._datafilters[name] = filter
911
909
912 def wread(self, filename):
910 def wread(self, filename):
913 if self.wvfs.islink(filename):
911 if self.wvfs.islink(filename):
914 data = self.wvfs.readlink(filename)
912 data = self.wvfs.readlink(filename)
915 else:
913 else:
916 data = self.wvfs.read(filename)
914 data = self.wvfs.read(filename)
917 return self._filter(self._encodefilterpats, filename, data)
915 return self._filter(self._encodefilterpats, filename, data)
918
916
919 def wwrite(self, filename, data, flags, backgroundclose=False):
917 def wwrite(self, filename, data, flags, backgroundclose=False):
920 """write ``data`` into ``filename`` in the working directory
918 """write ``data`` into ``filename`` in the working directory
921
919
922 This returns length of written (maybe decoded) data.
920 This returns length of written (maybe decoded) data.
923 """
921 """
924 data = self._filter(self._decodefilterpats, filename, data)
922 data = self._filter(self._decodefilterpats, filename, data)
925 if 'l' in flags:
923 if 'l' in flags:
926 self.wvfs.symlink(data, filename)
924 self.wvfs.symlink(data, filename)
927 else:
925 else:
928 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
926 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
929 if 'x' in flags:
927 if 'x' in flags:
930 self.wvfs.setflags(filename, False, True)
928 self.wvfs.setflags(filename, False, True)
931 return len(data)
929 return len(data)
932
930
933 def wwritedata(self, filename, data):
931 def wwritedata(self, filename, data):
934 return self._filter(self._decodefilterpats, filename, data)
932 return self._filter(self._decodefilterpats, filename, data)
935
933
936 def currenttransaction(self):
934 def currenttransaction(self):
937 """return the current transaction or None if non exists"""
935 """return the current transaction or None if non exists"""
938 if self._transref:
936 if self._transref:
939 tr = self._transref()
937 tr = self._transref()
940 else:
938 else:
941 tr = None
939 tr = None
942
940
943 if tr and tr.running():
941 if tr and tr.running():
944 return tr
942 return tr
945 return None
943 return None
946
944
947 def transaction(self, desc, report=None):
945 def transaction(self, desc, report=None):
948 if (self.ui.configbool('devel', 'all-warnings')
946 if (self.ui.configbool('devel', 'all-warnings')
949 or self.ui.configbool('devel', 'check-locks')):
947 or self.ui.configbool('devel', 'check-locks')):
950 if self._currentlock(self._lockref) is None:
948 if self._currentlock(self._lockref) is None:
951 raise error.ProgrammingError('transaction requires locking')
949 raise error.ProgrammingError('transaction requires locking')
952 tr = self.currenttransaction()
950 tr = self.currenttransaction()
953 if tr is not None:
951 if tr is not None:
954 return tr.nest()
952 return tr.nest()
955
953
956 # abort here if the journal already exists
954 # abort here if the journal already exists
957 if self.svfs.exists("journal"):
955 if self.svfs.exists("journal"):
958 raise error.RepoError(
956 raise error.RepoError(
959 _("abandoned transaction found"),
957 _("abandoned transaction found"),
960 hint=_("run 'hg recover' to clean up transaction"))
958 hint=_("run 'hg recover' to clean up transaction"))
961
959
962 idbase = "%.40f#%f" % (random.random(), time.time())
960 idbase = "%.40f#%f" % (random.random(), time.time())
963 ha = hex(hashlib.sha1(idbase).digest())
961 ha = hex(hashlib.sha1(idbase).digest())
964 txnid = 'TXN:' + ha
962 txnid = 'TXN:' + ha
965 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
963 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
966
964
967 self._writejournal(desc)
965 self._writejournal(desc)
968 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
966 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
969 if report:
967 if report:
970 rp = report
968 rp = report
971 else:
969 else:
972 rp = self.ui.warn
970 rp = self.ui.warn
973 vfsmap = {'plain': self.vfs} # root of .hg/
971 vfsmap = {'plain': self.vfs} # root of .hg/
974 # we must avoid cyclic reference between repo and transaction.
972 # we must avoid cyclic reference between repo and transaction.
975 reporef = weakref.ref(self)
973 reporef = weakref.ref(self)
976 # Code to track tag movement
974 # Code to track tag movement
977 #
975 #
978 # Since tags are all handled as file content, it is actually quite hard
976 # Since tags are all handled as file content, it is actually quite hard
979 # to track these movement from a code perspective. So we fallback to a
977 # to track these movement from a code perspective. So we fallback to a
980 # tracking at the repository level. One could envision to track changes
978 # tracking at the repository level. One could envision to track changes
981 # to the '.hgtags' file through changegroup apply but that fails to
979 # to the '.hgtags' file through changegroup apply but that fails to
982 # cope with case where transaction expose new heads without changegroup
980 # cope with case where transaction expose new heads without changegroup
983 # being involved (eg: phase movement).
981 # being involved (eg: phase movement).
984 #
982 #
985 # For now, We gate the feature behind a flag since this likely comes
983 # For now, We gate the feature behind a flag since this likely comes
986 # with performance impacts. The current code run more often than needed
984 # with performance impacts. The current code run more often than needed
987 # and do not use caches as much as it could. The current focus is on
985 # and do not use caches as much as it could. The current focus is on
988 # the behavior of the feature so we disable it by default. The flag
986 # the behavior of the feature so we disable it by default. The flag
989 # will be removed when we are happy with the performance impact.
987 # will be removed when we are happy with the performance impact.
990 #
988 #
991 # Once this feature is no longer experimental move the following
989 # Once this feature is no longer experimental move the following
992 # documentation to the appropriate help section:
990 # documentation to the appropriate help section:
993 #
991 #
994 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
992 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
995 # tags (new or changed or deleted tags). In addition the details of
993 # tags (new or changed or deleted tags). In addition the details of
996 # these changes are made available in a file at:
994 # these changes are made available in a file at:
997 # ``REPOROOT/.hg/changes/tags.changes``.
995 # ``REPOROOT/.hg/changes/tags.changes``.
998 # Make sure you check for HG_TAG_MOVED before reading that file as it
996 # Make sure you check for HG_TAG_MOVED before reading that file as it
999 # might exist from a previous transaction even if no tag were touched
997 # might exist from a previous transaction even if no tag were touched
1000 # in this one. Changes are recorded in a line base format::
998 # in this one. Changes are recorded in a line base format::
1001 #
999 #
1002 # <action> <hex-node> <tag-name>\n
1000 # <action> <hex-node> <tag-name>\n
1003 #
1001 #
1004 # Actions are defined as follow:
1002 # Actions are defined as follow:
1005 # "-R": tag is removed,
1003 # "-R": tag is removed,
1006 # "+A": tag is added,
1004 # "+A": tag is added,
1007 # "-M": tag is moved (old value),
1005 # "-M": tag is moved (old value),
1008 # "+M": tag is moved (new value),
1006 # "+M": tag is moved (new value),
1009 tracktags = lambda x: None
1007 tracktags = lambda x: None
1010 # experimental config: experimental.hook-track-tags
1008 # experimental config: experimental.hook-track-tags
1011 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1009 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1012 False)
1010 False)
1013 if desc != 'strip' and shouldtracktags:
1011 if desc != 'strip' and shouldtracktags:
1014 oldheads = self.changelog.headrevs()
1012 oldheads = self.changelog.headrevs()
1015 def tracktags(tr2):
1013 def tracktags(tr2):
1016 repo = reporef()
1014 repo = reporef()
1017 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1015 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1018 newheads = repo.changelog.headrevs()
1016 newheads = repo.changelog.headrevs()
1019 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1017 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1020 # notes: we compare lists here.
1018 # notes: we compare lists here.
1021 # As we do it only once buiding set would not be cheaper
1019 # As we do it only once buiding set would not be cheaper
1022 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1020 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1023 if changes:
1021 if changes:
1024 tr2.hookargs['tag_moved'] = '1'
1022 tr2.hookargs['tag_moved'] = '1'
1025 with repo.vfs('changes/tags.changes', 'w',
1023 with repo.vfs('changes/tags.changes', 'w',
1026 atomictemp=True) as changesfile:
1024 atomictemp=True) as changesfile:
1027 # note: we do not register the file to the transaction
1025 # note: we do not register the file to the transaction
1028 # because we needs it to still exist on the transaction
1026 # because we needs it to still exist on the transaction
1029 # is close (for txnclose hooks)
1027 # is close (for txnclose hooks)
1030 tagsmod.writediff(changesfile, changes)
1028 tagsmod.writediff(changesfile, changes)
1031 def validate(tr2):
1029 def validate(tr2):
1032 """will run pre-closing hooks"""
1030 """will run pre-closing hooks"""
1033 # XXX the transaction API is a bit lacking here so we take a hacky
1031 # XXX the transaction API is a bit lacking here so we take a hacky
1034 # path for now
1032 # path for now
1035 #
1033 #
1036 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1034 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1037 # dict is copied before these run. In addition we needs the data
1035 # dict is copied before these run. In addition we needs the data
1038 # available to in memory hooks too.
1036 # available to in memory hooks too.
1039 #
1037 #
1040 # Moreover, we also need to make sure this runs before txnclose
1038 # Moreover, we also need to make sure this runs before txnclose
1041 # hooks and there is no "pending" mechanism that would execute
1039 # hooks and there is no "pending" mechanism that would execute
1042 # logic only if hooks are about to run.
1040 # logic only if hooks are about to run.
1043 #
1041 #
1044 # Fixing this limitation of the transaction is also needed to track
1042 # Fixing this limitation of the transaction is also needed to track
1045 # other families of changes (bookmarks, phases, obsolescence).
1043 # other families of changes (bookmarks, phases, obsolescence).
1046 #
1044 #
1047 # This will have to be fixed before we remove the experimental
1045 # This will have to be fixed before we remove the experimental
1048 # gating.
1046 # gating.
1049 tracktags(tr2)
1047 tracktags(tr2)
1050 reporef().hook('pretxnclose', throw=True,
1048 reporef().hook('pretxnclose', throw=True,
1051 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1049 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1052 def releasefn(tr, success):
1050 def releasefn(tr, success):
1053 repo = reporef()
1051 repo = reporef()
1054 if success:
1052 if success:
1055 # this should be explicitly invoked here, because
1053 # this should be explicitly invoked here, because
1056 # in-memory changes aren't written out at closing
1054 # in-memory changes aren't written out at closing
1057 # transaction, if tr.addfilegenerator (via
1055 # transaction, if tr.addfilegenerator (via
1058 # dirstate.write or so) isn't invoked while
1056 # dirstate.write or so) isn't invoked while
1059 # transaction running
1057 # transaction running
1060 repo.dirstate.write(None)
1058 repo.dirstate.write(None)
1061 else:
1059 else:
1062 # discard all changes (including ones already written
1060 # discard all changes (including ones already written
1063 # out) in this transaction
1061 # out) in this transaction
1064 repo.dirstate.restorebackup(None, prefix='journal.')
1062 repo.dirstate.restorebackup(None, prefix='journal.')
1065
1063
1066 repo.invalidate(clearfilecache=True)
1064 repo.invalidate(clearfilecache=True)
1067
1065
1068 tr = transaction.transaction(rp, self.svfs, vfsmap,
1066 tr = transaction.transaction(rp, self.svfs, vfsmap,
1069 "journal",
1067 "journal",
1070 "undo",
1068 "undo",
1071 aftertrans(renames),
1069 aftertrans(renames),
1072 self.store.createmode,
1070 self.store.createmode,
1073 validator=validate,
1071 validator=validate,
1074 releasefn=releasefn)
1072 releasefn=releasefn)
1075 tr.changes['revs'] = set()
1073 tr.changes['revs'] = set()
1076
1074
1077 tr.hookargs['txnid'] = txnid
1075 tr.hookargs['txnid'] = txnid
1078 # note: writing the fncache only during finalize mean that the file is
1076 # note: writing the fncache only during finalize mean that the file is
1079 # outdated when running hooks. As fncache is used for streaming clone,
1077 # outdated when running hooks. As fncache is used for streaming clone,
1080 # this is not expected to break anything that happen during the hooks.
1078 # this is not expected to break anything that happen during the hooks.
1081 tr.addfinalize('flush-fncache', self.store.write)
1079 tr.addfinalize('flush-fncache', self.store.write)
1082 def txnclosehook(tr2):
1080 def txnclosehook(tr2):
1083 """To be run if transaction is successful, will schedule a hook run
1081 """To be run if transaction is successful, will schedule a hook run
1084 """
1082 """
1085 # Don't reference tr2 in hook() so we don't hold a reference.
1083 # Don't reference tr2 in hook() so we don't hold a reference.
1086 # This reduces memory consumption when there are multiple
1084 # This reduces memory consumption when there are multiple
1087 # transactions per lock. This can likely go away if issue5045
1085 # transactions per lock. This can likely go away if issue5045
1088 # fixes the function accumulation.
1086 # fixes the function accumulation.
1089 hookargs = tr2.hookargs
1087 hookargs = tr2.hookargs
1090
1088
1091 def hook():
1089 def hook():
1092 reporef().hook('txnclose', throw=False, txnname=desc,
1090 reporef().hook('txnclose', throw=False, txnname=desc,
1093 **pycompat.strkwargs(hookargs))
1091 **pycompat.strkwargs(hookargs))
1094 reporef()._afterlock(hook)
1092 reporef()._afterlock(hook)
1095 tr.addfinalize('txnclose-hook', txnclosehook)
1093 tr.addfinalize('txnclose-hook', txnclosehook)
1096 def warmscache(tr2):
1094 def warmscache(tr2):
1097 repo = reporef()
1095 repo = reporef()
1098 repo.updatecaches(tr2)
1096 repo.updatecaches(tr2)
1099 tr.addpostclose('warms-cache', warmscache)
1097 tr.addpostclose('warms-cache', warmscache)
1100 def txnaborthook(tr2):
1098 def txnaborthook(tr2):
1101 """To be run if transaction is aborted
1099 """To be run if transaction is aborted
1102 """
1100 """
1103 reporef().hook('txnabort', throw=False, txnname=desc,
1101 reporef().hook('txnabort', throw=False, txnname=desc,
1104 **tr2.hookargs)
1102 **tr2.hookargs)
1105 tr.addabort('txnabort-hook', txnaborthook)
1103 tr.addabort('txnabort-hook', txnaborthook)
1106 # avoid eager cache invalidation. in-memory data should be identical
1104 # avoid eager cache invalidation. in-memory data should be identical
1107 # to stored data if transaction has no error.
1105 # to stored data if transaction has no error.
1108 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1106 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1109 self._transref = weakref.ref(tr)
1107 self._transref = weakref.ref(tr)
1110 return tr
1108 return tr
1111
1109
1112 def _journalfiles(self):
1110 def _journalfiles(self):
1113 return ((self.svfs, 'journal'),
1111 return ((self.svfs, 'journal'),
1114 (self.vfs, 'journal.dirstate'),
1112 (self.vfs, 'journal.dirstate'),
1115 (self.vfs, 'journal.branch'),
1113 (self.vfs, 'journal.branch'),
1116 (self.vfs, 'journal.desc'),
1114 (self.vfs, 'journal.desc'),
1117 (self.vfs, 'journal.bookmarks'),
1115 (self.vfs, 'journal.bookmarks'),
1118 (self.svfs, 'journal.phaseroots'))
1116 (self.svfs, 'journal.phaseroots'))
1119
1117
1120 def undofiles(self):
1118 def undofiles(self):
1121 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1119 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1122
1120
1123 def _writejournal(self, desc):
1121 def _writejournal(self, desc):
1124 self.dirstate.savebackup(None, prefix='journal.')
1122 self.dirstate.savebackup(None, prefix='journal.')
1125 self.vfs.write("journal.branch",
1123 self.vfs.write("journal.branch",
1126 encoding.fromlocal(self.dirstate.branch()))
1124 encoding.fromlocal(self.dirstate.branch()))
1127 self.vfs.write("journal.desc",
1125 self.vfs.write("journal.desc",
1128 "%d\n%s\n" % (len(self), desc))
1126 "%d\n%s\n" % (len(self), desc))
1129 self.vfs.write("journal.bookmarks",
1127 self.vfs.write("journal.bookmarks",
1130 self.vfs.tryread("bookmarks"))
1128 self.vfs.tryread("bookmarks"))
1131 self.svfs.write("journal.phaseroots",
1129 self.svfs.write("journal.phaseroots",
1132 self.svfs.tryread("phaseroots"))
1130 self.svfs.tryread("phaseroots"))
1133
1131
1134 def recover(self):
1132 def recover(self):
1135 with self.lock():
1133 with self.lock():
1136 if self.svfs.exists("journal"):
1134 if self.svfs.exists("journal"):
1137 self.ui.status(_("rolling back interrupted transaction\n"))
1135 self.ui.status(_("rolling back interrupted transaction\n"))
1138 vfsmap = {'': self.svfs,
1136 vfsmap = {'': self.svfs,
1139 'plain': self.vfs,}
1137 'plain': self.vfs,}
1140 transaction.rollback(self.svfs, vfsmap, "journal",
1138 transaction.rollback(self.svfs, vfsmap, "journal",
1141 self.ui.warn)
1139 self.ui.warn)
1142 self.invalidate()
1140 self.invalidate()
1143 return True
1141 return True
1144 else:
1142 else:
1145 self.ui.warn(_("no interrupted transaction available\n"))
1143 self.ui.warn(_("no interrupted transaction available\n"))
1146 return False
1144 return False
1147
1145
1148 def rollback(self, dryrun=False, force=False):
1146 def rollback(self, dryrun=False, force=False):
1149 wlock = lock = dsguard = None
1147 wlock = lock = dsguard = None
1150 try:
1148 try:
1151 wlock = self.wlock()
1149 wlock = self.wlock()
1152 lock = self.lock()
1150 lock = self.lock()
1153 if self.svfs.exists("undo"):
1151 if self.svfs.exists("undo"):
1154 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1152 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1155
1153
1156 return self._rollback(dryrun, force, dsguard)
1154 return self._rollback(dryrun, force, dsguard)
1157 else:
1155 else:
1158 self.ui.warn(_("no rollback information available\n"))
1156 self.ui.warn(_("no rollback information available\n"))
1159 return 1
1157 return 1
1160 finally:
1158 finally:
1161 release(dsguard, lock, wlock)
1159 release(dsguard, lock, wlock)
1162
1160
1163 @unfilteredmethod # Until we get smarter cache management
1161 @unfilteredmethod # Until we get smarter cache management
1164 def _rollback(self, dryrun, force, dsguard):
1162 def _rollback(self, dryrun, force, dsguard):
1165 ui = self.ui
1163 ui = self.ui
1166 try:
1164 try:
1167 args = self.vfs.read('undo.desc').splitlines()
1165 args = self.vfs.read('undo.desc').splitlines()
1168 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1166 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1169 if len(args) >= 3:
1167 if len(args) >= 3:
1170 detail = args[2]
1168 detail = args[2]
1171 oldtip = oldlen - 1
1169 oldtip = oldlen - 1
1172
1170
1173 if detail and ui.verbose:
1171 if detail and ui.verbose:
1174 msg = (_('repository tip rolled back to revision %s'
1172 msg = (_('repository tip rolled back to revision %s'
1175 ' (undo %s: %s)\n')
1173 ' (undo %s: %s)\n')
1176 % (oldtip, desc, detail))
1174 % (oldtip, desc, detail))
1177 else:
1175 else:
1178 msg = (_('repository tip rolled back to revision %s'
1176 msg = (_('repository tip rolled back to revision %s'
1179 ' (undo %s)\n')
1177 ' (undo %s)\n')
1180 % (oldtip, desc))
1178 % (oldtip, desc))
1181 except IOError:
1179 except IOError:
1182 msg = _('rolling back unknown transaction\n')
1180 msg = _('rolling back unknown transaction\n')
1183 desc = None
1181 desc = None
1184
1182
1185 if not force and self['.'] != self['tip'] and desc == 'commit':
1183 if not force and self['.'] != self['tip'] and desc == 'commit':
1186 raise error.Abort(
1184 raise error.Abort(
1187 _('rollback of last commit while not checked out '
1185 _('rollback of last commit while not checked out '
1188 'may lose data'), hint=_('use -f to force'))
1186 'may lose data'), hint=_('use -f to force'))
1189
1187
1190 ui.status(msg)
1188 ui.status(msg)
1191 if dryrun:
1189 if dryrun:
1192 return 0
1190 return 0
1193
1191
1194 parents = self.dirstate.parents()
1192 parents = self.dirstate.parents()
1195 self.destroying()
1193 self.destroying()
1196 vfsmap = {'plain': self.vfs, '': self.svfs}
1194 vfsmap = {'plain': self.vfs, '': self.svfs}
1197 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1195 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1198 if self.vfs.exists('undo.bookmarks'):
1196 if self.vfs.exists('undo.bookmarks'):
1199 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1197 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1200 if self.svfs.exists('undo.phaseroots'):
1198 if self.svfs.exists('undo.phaseroots'):
1201 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1199 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1202 self.invalidate()
1200 self.invalidate()
1203
1201
1204 parentgone = (parents[0] not in self.changelog.nodemap or
1202 parentgone = (parents[0] not in self.changelog.nodemap or
1205 parents[1] not in self.changelog.nodemap)
1203 parents[1] not in self.changelog.nodemap)
1206 if parentgone:
1204 if parentgone:
1207 # prevent dirstateguard from overwriting already restored one
1205 # prevent dirstateguard from overwriting already restored one
1208 dsguard.close()
1206 dsguard.close()
1209
1207
1210 self.dirstate.restorebackup(None, prefix='undo.')
1208 self.dirstate.restorebackup(None, prefix='undo.')
1211 try:
1209 try:
1212 branch = self.vfs.read('undo.branch')
1210 branch = self.vfs.read('undo.branch')
1213 self.dirstate.setbranch(encoding.tolocal(branch))
1211 self.dirstate.setbranch(encoding.tolocal(branch))
1214 except IOError:
1212 except IOError:
1215 ui.warn(_('named branch could not be reset: '
1213 ui.warn(_('named branch could not be reset: '
1216 'current branch is still \'%s\'\n')
1214 'current branch is still \'%s\'\n')
1217 % self.dirstate.branch())
1215 % self.dirstate.branch())
1218
1216
1219 parents = tuple([p.rev() for p in self[None].parents()])
1217 parents = tuple([p.rev() for p in self[None].parents()])
1220 if len(parents) > 1:
1218 if len(parents) > 1:
1221 ui.status(_('working directory now based on '
1219 ui.status(_('working directory now based on '
1222 'revisions %d and %d\n') % parents)
1220 'revisions %d and %d\n') % parents)
1223 else:
1221 else:
1224 ui.status(_('working directory now based on '
1222 ui.status(_('working directory now based on '
1225 'revision %d\n') % parents)
1223 'revision %d\n') % parents)
1226 mergemod.mergestate.clean(self, self['.'].node())
1224 mergemod.mergestate.clean(self, self['.'].node())
1227
1225
1228 # TODO: if we know which new heads may result from this rollback, pass
1226 # TODO: if we know which new heads may result from this rollback, pass
1229 # them to destroy(), which will prevent the branchhead cache from being
1227 # them to destroy(), which will prevent the branchhead cache from being
1230 # invalidated.
1228 # invalidated.
1231 self.destroyed()
1229 self.destroyed()
1232 return 0
1230 return 0
1233
1231
1234 @unfilteredmethod
1232 @unfilteredmethod
1235 def updatecaches(self, tr=None):
1233 def updatecaches(self, tr=None):
1236 """warm appropriate caches
1234 """warm appropriate caches
1237
1235
1238 If this function is called after a transaction closed. The transaction
1236 If this function is called after a transaction closed. The transaction
1239 will be available in the 'tr' argument. This can be used to selectively
1237 will be available in the 'tr' argument. This can be used to selectively
1240 update caches relevant to the changes in that transaction.
1238 update caches relevant to the changes in that transaction.
1241 """
1239 """
1242 if tr is not None and tr.hookargs.get('source') == 'strip':
1240 if tr is not None and tr.hookargs.get('source') == 'strip':
1243 # During strip, many caches are invalid but
1241 # During strip, many caches are invalid but
1244 # later call to `destroyed` will refresh them.
1242 # later call to `destroyed` will refresh them.
1245 return
1243 return
1246
1244
1247 if tr is None or tr.changes['revs']:
1245 if tr is None or tr.changes['revs']:
1248 # updating the unfiltered branchmap should refresh all the others,
1246 # updating the unfiltered branchmap should refresh all the others,
1249 self.ui.debug('updating the branch cache\n')
1247 self.ui.debug('updating the branch cache\n')
1250 branchmap.updatecache(self.filtered('served'))
1248 branchmap.updatecache(self.filtered('served'))
1251
1249
1252 def invalidatecaches(self):
1250 def invalidatecaches(self):
1253
1251
1254 if '_tagscache' in vars(self):
1252 if '_tagscache' in vars(self):
1255 # can't use delattr on proxy
1253 # can't use delattr on proxy
1256 del self.__dict__['_tagscache']
1254 del self.__dict__['_tagscache']
1257
1255
1258 self.unfiltered()._branchcaches.clear()
1256 self.unfiltered()._branchcaches.clear()
1259 self.invalidatevolatilesets()
1257 self.invalidatevolatilesets()
1260
1258
1261 def invalidatevolatilesets(self):
1259 def invalidatevolatilesets(self):
1262 self.filteredrevcache.clear()
1260 self.filteredrevcache.clear()
1263 obsolete.clearobscaches(self)
1261 obsolete.clearobscaches(self)
1264
1262
1265 def invalidatedirstate(self):
1263 def invalidatedirstate(self):
1266 '''Invalidates the dirstate, causing the next call to dirstate
1264 '''Invalidates the dirstate, causing the next call to dirstate
1267 to check if it was modified since the last time it was read,
1265 to check if it was modified since the last time it was read,
1268 rereading it if it has.
1266 rereading it if it has.
1269
1267
1270 This is different to dirstate.invalidate() that it doesn't always
1268 This is different to dirstate.invalidate() that it doesn't always
1271 rereads the dirstate. Use dirstate.invalidate() if you want to
1269 rereads the dirstate. Use dirstate.invalidate() if you want to
1272 explicitly read the dirstate again (i.e. restoring it to a previous
1270 explicitly read the dirstate again (i.e. restoring it to a previous
1273 known good state).'''
1271 known good state).'''
1274 if hasunfilteredcache(self, 'dirstate'):
1272 if hasunfilteredcache(self, 'dirstate'):
1275 for k in self.dirstate._filecache:
1273 for k in self.dirstate._filecache:
1276 try:
1274 try:
1277 delattr(self.dirstate, k)
1275 delattr(self.dirstate, k)
1278 except AttributeError:
1276 except AttributeError:
1279 pass
1277 pass
1280 delattr(self.unfiltered(), 'dirstate')
1278 delattr(self.unfiltered(), 'dirstate')
1281
1279
1282 def invalidate(self, clearfilecache=False):
1280 def invalidate(self, clearfilecache=False):
1283 '''Invalidates both store and non-store parts other than dirstate
1281 '''Invalidates both store and non-store parts other than dirstate
1284
1282
1285 If a transaction is running, invalidation of store is omitted,
1283 If a transaction is running, invalidation of store is omitted,
1286 because discarding in-memory changes might cause inconsistency
1284 because discarding in-memory changes might cause inconsistency
1287 (e.g. incomplete fncache causes unintentional failure, but
1285 (e.g. incomplete fncache causes unintentional failure, but
1288 redundant one doesn't).
1286 redundant one doesn't).
1289 '''
1287 '''
1290 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1288 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1291 for k in list(self._filecache.keys()):
1289 for k in list(self._filecache.keys()):
1292 # dirstate is invalidated separately in invalidatedirstate()
1290 # dirstate is invalidated separately in invalidatedirstate()
1293 if k == 'dirstate':
1291 if k == 'dirstate':
1294 continue
1292 continue
1295
1293
1296 if clearfilecache:
1294 if clearfilecache:
1297 del self._filecache[k]
1295 del self._filecache[k]
1298 try:
1296 try:
1299 delattr(unfiltered, k)
1297 delattr(unfiltered, k)
1300 except AttributeError:
1298 except AttributeError:
1301 pass
1299 pass
1302 self.invalidatecaches()
1300 self.invalidatecaches()
1303 if not self.currenttransaction():
1301 if not self.currenttransaction():
1304 # TODO: Changing contents of store outside transaction
1302 # TODO: Changing contents of store outside transaction
1305 # causes inconsistency. We should make in-memory store
1303 # causes inconsistency. We should make in-memory store
1306 # changes detectable, and abort if changed.
1304 # changes detectable, and abort if changed.
1307 self.store.invalidatecaches()
1305 self.store.invalidatecaches()
1308
1306
1309 def invalidateall(self):
1307 def invalidateall(self):
1310 '''Fully invalidates both store and non-store parts, causing the
1308 '''Fully invalidates both store and non-store parts, causing the
1311 subsequent operation to reread any outside changes.'''
1309 subsequent operation to reread any outside changes.'''
1312 # extension should hook this to invalidate its caches
1310 # extension should hook this to invalidate its caches
1313 self.invalidate()
1311 self.invalidate()
1314 self.invalidatedirstate()
1312 self.invalidatedirstate()
1315
1313
1316 @unfilteredmethod
1314 @unfilteredmethod
1317 def _refreshfilecachestats(self, tr):
1315 def _refreshfilecachestats(self, tr):
1318 """Reload stats of cached files so that they are flagged as valid"""
1316 """Reload stats of cached files so that they are flagged as valid"""
1319 for k, ce in self._filecache.items():
1317 for k, ce in self._filecache.items():
1320 if k == 'dirstate' or k not in self.__dict__:
1318 if k == 'dirstate' or k not in self.__dict__:
1321 continue
1319 continue
1322 ce.refresh()
1320 ce.refresh()
1323
1321
1324 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1322 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1325 inheritchecker=None, parentenvvar=None):
1323 inheritchecker=None, parentenvvar=None):
1326 parentlock = None
1324 parentlock = None
1327 # the contents of parentenvvar are used by the underlying lock to
1325 # the contents of parentenvvar are used by the underlying lock to
1328 # determine whether it can be inherited
1326 # determine whether it can be inherited
1329 if parentenvvar is not None:
1327 if parentenvvar is not None:
1330 parentlock = encoding.environ.get(parentenvvar)
1328 parentlock = encoding.environ.get(parentenvvar)
1331 try:
1329 try:
1332 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1330 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1333 acquirefn=acquirefn, desc=desc,
1331 acquirefn=acquirefn, desc=desc,
1334 inheritchecker=inheritchecker,
1332 inheritchecker=inheritchecker,
1335 parentlock=parentlock)
1333 parentlock=parentlock)
1336 except error.LockHeld as inst:
1334 except error.LockHeld as inst:
1337 if not wait:
1335 if not wait:
1338 raise
1336 raise
1339 # show more details for new-style locks
1337 # show more details for new-style locks
1340 if ':' in inst.locker:
1338 if ':' in inst.locker:
1341 host, pid = inst.locker.split(":", 1)
1339 host, pid = inst.locker.split(":", 1)
1342 self.ui.warn(
1340 self.ui.warn(
1343 _("waiting for lock on %s held by process %r "
1341 _("waiting for lock on %s held by process %r "
1344 "on host %r\n") % (desc, pid, host))
1342 "on host %r\n") % (desc, pid, host))
1345 else:
1343 else:
1346 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1344 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1347 (desc, inst.locker))
1345 (desc, inst.locker))
1348 # default to 600 seconds timeout
1346 # default to 600 seconds timeout
1349 l = lockmod.lock(vfs, lockname,
1347 l = lockmod.lock(vfs, lockname,
1350 int(self.ui.config("ui", "timeout", "600")),
1348 int(self.ui.config("ui", "timeout", "600")),
1351 releasefn=releasefn, acquirefn=acquirefn,
1349 releasefn=releasefn, acquirefn=acquirefn,
1352 desc=desc)
1350 desc=desc)
1353 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1351 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1354 return l
1352 return l
1355
1353
1356 def _afterlock(self, callback):
1354 def _afterlock(self, callback):
1357 """add a callback to be run when the repository is fully unlocked
1355 """add a callback to be run when the repository is fully unlocked
1358
1356
1359 The callback will be executed when the outermost lock is released
1357 The callback will be executed when the outermost lock is released
1360 (with wlock being higher level than 'lock')."""
1358 (with wlock being higher level than 'lock')."""
1361 for ref in (self._wlockref, self._lockref):
1359 for ref in (self._wlockref, self._lockref):
1362 l = ref and ref()
1360 l = ref and ref()
1363 if l and l.held:
1361 if l and l.held:
1364 l.postrelease.append(callback)
1362 l.postrelease.append(callback)
1365 break
1363 break
1366 else: # no lock have been found.
1364 else: # no lock have been found.
1367 callback()
1365 callback()
1368
1366
1369 def lock(self, wait=True):
1367 def lock(self, wait=True):
1370 '''Lock the repository store (.hg/store) and return a weak reference
1368 '''Lock the repository store (.hg/store) and return a weak reference
1371 to the lock. Use this before modifying the store (e.g. committing or
1369 to the lock. Use this before modifying the store (e.g. committing or
1372 stripping). If you are opening a transaction, get a lock as well.)
1370 stripping). If you are opening a transaction, get a lock as well.)
1373
1371
1374 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1372 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1375 'wlock' first to avoid a dead-lock hazard.'''
1373 'wlock' first to avoid a dead-lock hazard.'''
1376 l = self._currentlock(self._lockref)
1374 l = self._currentlock(self._lockref)
1377 if l is not None:
1375 if l is not None:
1378 l.lock()
1376 l.lock()
1379 return l
1377 return l
1380
1378
1381 l = self._lock(self.svfs, "lock", wait, None,
1379 l = self._lock(self.svfs, "lock", wait, None,
1382 self.invalidate, _('repository %s') % self.origroot)
1380 self.invalidate, _('repository %s') % self.origroot)
1383 self._lockref = weakref.ref(l)
1381 self._lockref = weakref.ref(l)
1384 return l
1382 return l
1385
1383
1386 def _wlockchecktransaction(self):
1384 def _wlockchecktransaction(self):
1387 if self.currenttransaction() is not None:
1385 if self.currenttransaction() is not None:
1388 raise error.LockInheritanceContractViolation(
1386 raise error.LockInheritanceContractViolation(
1389 'wlock cannot be inherited in the middle of a transaction')
1387 'wlock cannot be inherited in the middle of a transaction')
1390
1388
1391 def wlock(self, wait=True):
1389 def wlock(self, wait=True):
1392 '''Lock the non-store parts of the repository (everything under
1390 '''Lock the non-store parts of the repository (everything under
1393 .hg except .hg/store) and return a weak reference to the lock.
1391 .hg except .hg/store) and return a weak reference to the lock.
1394
1392
1395 Use this before modifying files in .hg.
1393 Use this before modifying files in .hg.
1396
1394
1397 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1395 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1398 'wlock' first to avoid a dead-lock hazard.'''
1396 'wlock' first to avoid a dead-lock hazard.'''
1399 l = self._wlockref and self._wlockref()
1397 l = self._wlockref and self._wlockref()
1400 if l is not None and l.held:
1398 if l is not None and l.held:
1401 l.lock()
1399 l.lock()
1402 return l
1400 return l
1403
1401
1404 # We do not need to check for non-waiting lock acquisition. Such
1402 # We do not need to check for non-waiting lock acquisition. Such
1405 # acquisition would not cause dead-lock as they would just fail.
1403 # acquisition would not cause dead-lock as they would just fail.
1406 if wait and (self.ui.configbool('devel', 'all-warnings')
1404 if wait and (self.ui.configbool('devel', 'all-warnings')
1407 or self.ui.configbool('devel', 'check-locks')):
1405 or self.ui.configbool('devel', 'check-locks')):
1408 if self._currentlock(self._lockref) is not None:
1406 if self._currentlock(self._lockref) is not None:
1409 self.ui.develwarn('"wlock" acquired after "lock"')
1407 self.ui.develwarn('"wlock" acquired after "lock"')
1410
1408
1411 def unlock():
1409 def unlock():
1412 if self.dirstate.pendingparentchange():
1410 if self.dirstate.pendingparentchange():
1413 self.dirstate.invalidate()
1411 self.dirstate.invalidate()
1414 else:
1412 else:
1415 self.dirstate.write(None)
1413 self.dirstate.write(None)
1416
1414
1417 self._filecache['dirstate'].refresh()
1415 self._filecache['dirstate'].refresh()
1418
1416
1419 l = self._lock(self.vfs, "wlock", wait, unlock,
1417 l = self._lock(self.vfs, "wlock", wait, unlock,
1420 self.invalidatedirstate, _('working directory of %s') %
1418 self.invalidatedirstate, _('working directory of %s') %
1421 self.origroot,
1419 self.origroot,
1422 inheritchecker=self._wlockchecktransaction,
1420 inheritchecker=self._wlockchecktransaction,
1423 parentenvvar='HG_WLOCK_LOCKER')
1421 parentenvvar='HG_WLOCK_LOCKER')
1424 self._wlockref = weakref.ref(l)
1422 self._wlockref = weakref.ref(l)
1425 return l
1423 return l
1426
1424
1427 def _currentlock(self, lockref):
1425 def _currentlock(self, lockref):
1428 """Returns the lock if it's held, or None if it's not."""
1426 """Returns the lock if it's held, or None if it's not."""
1429 if lockref is None:
1427 if lockref is None:
1430 return None
1428 return None
1431 l = lockref()
1429 l = lockref()
1432 if l is None or not l.held:
1430 if l is None or not l.held:
1433 return None
1431 return None
1434 return l
1432 return l
1435
1433
1436 def currentwlock(self):
1434 def currentwlock(self):
1437 """Returns the wlock if it's held, or None if it's not."""
1435 """Returns the wlock if it's held, or None if it's not."""
1438 return self._currentlock(self._wlockref)
1436 return self._currentlock(self._wlockref)
1439
1437
1440 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1438 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1441 """
1439 """
1442 commit an individual file as part of a larger transaction
1440 commit an individual file as part of a larger transaction
1443 """
1441 """
1444
1442
1445 fname = fctx.path()
1443 fname = fctx.path()
1446 fparent1 = manifest1.get(fname, nullid)
1444 fparent1 = manifest1.get(fname, nullid)
1447 fparent2 = manifest2.get(fname, nullid)
1445 fparent2 = manifest2.get(fname, nullid)
1448 if isinstance(fctx, context.filectx):
1446 if isinstance(fctx, context.filectx):
1449 node = fctx.filenode()
1447 node = fctx.filenode()
1450 if node in [fparent1, fparent2]:
1448 if node in [fparent1, fparent2]:
1451 self.ui.debug('reusing %s filelog entry\n' % fname)
1449 self.ui.debug('reusing %s filelog entry\n' % fname)
1452 if manifest1.flags(fname) != fctx.flags():
1450 if manifest1.flags(fname) != fctx.flags():
1453 changelist.append(fname)
1451 changelist.append(fname)
1454 return node
1452 return node
1455
1453
1456 flog = self.file(fname)
1454 flog = self.file(fname)
1457 meta = {}
1455 meta = {}
1458 copy = fctx.renamed()
1456 copy = fctx.renamed()
1459 if copy and copy[0] != fname:
1457 if copy and copy[0] != fname:
1460 # Mark the new revision of this file as a copy of another
1458 # Mark the new revision of this file as a copy of another
1461 # file. This copy data will effectively act as a parent
1459 # file. This copy data will effectively act as a parent
1462 # of this new revision. If this is a merge, the first
1460 # of this new revision. If this is a merge, the first
1463 # parent will be the nullid (meaning "look up the copy data")
1461 # parent will be the nullid (meaning "look up the copy data")
1464 # and the second one will be the other parent. For example:
1462 # and the second one will be the other parent. For example:
1465 #
1463 #
1466 # 0 --- 1 --- 3 rev1 changes file foo
1464 # 0 --- 1 --- 3 rev1 changes file foo
1467 # \ / rev2 renames foo to bar and changes it
1465 # \ / rev2 renames foo to bar and changes it
1468 # \- 2 -/ rev3 should have bar with all changes and
1466 # \- 2 -/ rev3 should have bar with all changes and
1469 # should record that bar descends from
1467 # should record that bar descends from
1470 # bar in rev2 and foo in rev1
1468 # bar in rev2 and foo in rev1
1471 #
1469 #
1472 # this allows this merge to succeed:
1470 # this allows this merge to succeed:
1473 #
1471 #
1474 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1472 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1475 # \ / merging rev3 and rev4 should use bar@rev2
1473 # \ / merging rev3 and rev4 should use bar@rev2
1476 # \- 2 --- 4 as the merge base
1474 # \- 2 --- 4 as the merge base
1477 #
1475 #
1478
1476
1479 cfname = copy[0]
1477 cfname = copy[0]
1480 crev = manifest1.get(cfname)
1478 crev = manifest1.get(cfname)
1481 newfparent = fparent2
1479 newfparent = fparent2
1482
1480
1483 if manifest2: # branch merge
1481 if manifest2: # branch merge
1484 if fparent2 == nullid or crev is None: # copied on remote side
1482 if fparent2 == nullid or crev is None: # copied on remote side
1485 if cfname in manifest2:
1483 if cfname in manifest2:
1486 crev = manifest2[cfname]
1484 crev = manifest2[cfname]
1487 newfparent = fparent1
1485 newfparent = fparent1
1488
1486
1489 # Here, we used to search backwards through history to try to find
1487 # Here, we used to search backwards through history to try to find
1490 # where the file copy came from if the source of a copy was not in
1488 # where the file copy came from if the source of a copy was not in
1491 # the parent directory. However, this doesn't actually make sense to
1489 # the parent directory. However, this doesn't actually make sense to
1492 # do (what does a copy from something not in your working copy even
1490 # do (what does a copy from something not in your working copy even
1493 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1491 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1494 # the user that copy information was dropped, so if they didn't
1492 # the user that copy information was dropped, so if they didn't
1495 # expect this outcome it can be fixed, but this is the correct
1493 # expect this outcome it can be fixed, but this is the correct
1496 # behavior in this circumstance.
1494 # behavior in this circumstance.
1497
1495
1498 if crev:
1496 if crev:
1499 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1497 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1500 meta["copy"] = cfname
1498 meta["copy"] = cfname
1501 meta["copyrev"] = hex(crev)
1499 meta["copyrev"] = hex(crev)
1502 fparent1, fparent2 = nullid, newfparent
1500 fparent1, fparent2 = nullid, newfparent
1503 else:
1501 else:
1504 self.ui.warn(_("warning: can't find ancestor for '%s' "
1502 self.ui.warn(_("warning: can't find ancestor for '%s' "
1505 "copied from '%s'!\n") % (fname, cfname))
1503 "copied from '%s'!\n") % (fname, cfname))
1506
1504
1507 elif fparent1 == nullid:
1505 elif fparent1 == nullid:
1508 fparent1, fparent2 = fparent2, nullid
1506 fparent1, fparent2 = fparent2, nullid
1509 elif fparent2 != nullid:
1507 elif fparent2 != nullid:
1510 # is one parent an ancestor of the other?
1508 # is one parent an ancestor of the other?
1511 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1509 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1512 if fparent1 in fparentancestors:
1510 if fparent1 in fparentancestors:
1513 fparent1, fparent2 = fparent2, nullid
1511 fparent1, fparent2 = fparent2, nullid
1514 elif fparent2 in fparentancestors:
1512 elif fparent2 in fparentancestors:
1515 fparent2 = nullid
1513 fparent2 = nullid
1516
1514
1517 # is the file changed?
1515 # is the file changed?
1518 text = fctx.data()
1516 text = fctx.data()
1519 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1517 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1520 changelist.append(fname)
1518 changelist.append(fname)
1521 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1519 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1522 # are just the flags changed during merge?
1520 # are just the flags changed during merge?
1523 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1521 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1524 changelist.append(fname)
1522 changelist.append(fname)
1525
1523
1526 return fparent1
1524 return fparent1
1527
1525
1528 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1526 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1529 """check for commit arguments that aren't committable"""
1527 """check for commit arguments that aren't committable"""
1530 if match.isexact() or match.prefix():
1528 if match.isexact() or match.prefix():
1531 matched = set(status.modified + status.added + status.removed)
1529 matched = set(status.modified + status.added + status.removed)
1532
1530
1533 for f in match.files():
1531 for f in match.files():
1534 f = self.dirstate.normalize(f)
1532 f = self.dirstate.normalize(f)
1535 if f == '.' or f in matched or f in wctx.substate:
1533 if f == '.' or f in matched or f in wctx.substate:
1536 continue
1534 continue
1537 if f in status.deleted:
1535 if f in status.deleted:
1538 fail(f, _('file not found!'))
1536 fail(f, _('file not found!'))
1539 if f in vdirs: # visited directory
1537 if f in vdirs: # visited directory
1540 d = f + '/'
1538 d = f + '/'
1541 for mf in matched:
1539 for mf in matched:
1542 if mf.startswith(d):
1540 if mf.startswith(d):
1543 break
1541 break
1544 else:
1542 else:
1545 fail(f, _("no match under directory!"))
1543 fail(f, _("no match under directory!"))
1546 elif f not in self.dirstate:
1544 elif f not in self.dirstate:
1547 fail(f, _("file not tracked!"))
1545 fail(f, _("file not tracked!"))
1548
1546
1549 @unfilteredmethod
1547 @unfilteredmethod
1550 def commit(self, text="", user=None, date=None, match=None, force=False,
1548 def commit(self, text="", user=None, date=None, match=None, force=False,
1551 editor=False, extra=None):
1549 editor=False, extra=None):
1552 """Add a new revision to current repository.
1550 """Add a new revision to current repository.
1553
1551
1554 Revision information is gathered from the working directory,
1552 Revision information is gathered from the working directory,
1555 match can be used to filter the committed files. If editor is
1553 match can be used to filter the committed files. If editor is
1556 supplied, it is called to get a commit message.
1554 supplied, it is called to get a commit message.
1557 """
1555 """
1558 if extra is None:
1556 if extra is None:
1559 extra = {}
1557 extra = {}
1560
1558
1561 def fail(f, msg):
1559 def fail(f, msg):
1562 raise error.Abort('%s: %s' % (f, msg))
1560 raise error.Abort('%s: %s' % (f, msg))
1563
1561
1564 if not match:
1562 if not match:
1565 match = matchmod.always(self.root, '')
1563 match = matchmod.always(self.root, '')
1566
1564
1567 if not force:
1565 if not force:
1568 vdirs = []
1566 vdirs = []
1569 match.explicitdir = vdirs.append
1567 match.explicitdir = vdirs.append
1570 match.bad = fail
1568 match.bad = fail
1571
1569
1572 wlock = lock = tr = None
1570 wlock = lock = tr = None
1573 try:
1571 try:
1574 wlock = self.wlock()
1572 wlock = self.wlock()
1575 lock = self.lock() # for recent changelog (see issue4368)
1573 lock = self.lock() # for recent changelog (see issue4368)
1576
1574
1577 wctx = self[None]
1575 wctx = self[None]
1578 merge = len(wctx.parents()) > 1
1576 merge = len(wctx.parents()) > 1
1579
1577
1580 if not force and merge and match.ispartial():
1578 if not force and merge and match.ispartial():
1581 raise error.Abort(_('cannot partially commit a merge '
1579 raise error.Abort(_('cannot partially commit a merge '
1582 '(do not specify files or patterns)'))
1580 '(do not specify files or patterns)'))
1583
1581
1584 status = self.status(match=match, clean=force)
1582 status = self.status(match=match, clean=force)
1585 if force:
1583 if force:
1586 status.modified.extend(status.clean) # mq may commit clean files
1584 status.modified.extend(status.clean) # mq may commit clean files
1587
1585
1588 # check subrepos
1586 # check subrepos
1589 subs = []
1587 subs = []
1590 commitsubs = set()
1588 commitsubs = set()
1591 newstate = wctx.substate.copy()
1589 newstate = wctx.substate.copy()
1592 # only manage subrepos and .hgsubstate if .hgsub is present
1590 # only manage subrepos and .hgsubstate if .hgsub is present
1593 if '.hgsub' in wctx:
1591 if '.hgsub' in wctx:
1594 # we'll decide whether to track this ourselves, thanks
1592 # we'll decide whether to track this ourselves, thanks
1595 for c in status.modified, status.added, status.removed:
1593 for c in status.modified, status.added, status.removed:
1596 if '.hgsubstate' in c:
1594 if '.hgsubstate' in c:
1597 c.remove('.hgsubstate')
1595 c.remove('.hgsubstate')
1598
1596
1599 # compare current state to last committed state
1597 # compare current state to last committed state
1600 # build new substate based on last committed state
1598 # build new substate based on last committed state
1601 oldstate = wctx.p1().substate
1599 oldstate = wctx.p1().substate
1602 for s in sorted(newstate.keys()):
1600 for s in sorted(newstate.keys()):
1603 if not match(s):
1601 if not match(s):
1604 # ignore working copy, use old state if present
1602 # ignore working copy, use old state if present
1605 if s in oldstate:
1603 if s in oldstate:
1606 newstate[s] = oldstate[s]
1604 newstate[s] = oldstate[s]
1607 continue
1605 continue
1608 if not force:
1606 if not force:
1609 raise error.Abort(
1607 raise error.Abort(
1610 _("commit with new subrepo %s excluded") % s)
1608 _("commit with new subrepo %s excluded") % s)
1611 dirtyreason = wctx.sub(s).dirtyreason(True)
1609 dirtyreason = wctx.sub(s).dirtyreason(True)
1612 if dirtyreason:
1610 if dirtyreason:
1613 if not self.ui.configbool('ui', 'commitsubrepos'):
1611 if not self.ui.configbool('ui', 'commitsubrepos'):
1614 raise error.Abort(dirtyreason,
1612 raise error.Abort(dirtyreason,
1615 hint=_("use --subrepos for recursive commit"))
1613 hint=_("use --subrepos for recursive commit"))
1616 subs.append(s)
1614 subs.append(s)
1617 commitsubs.add(s)
1615 commitsubs.add(s)
1618 else:
1616 else:
1619 bs = wctx.sub(s).basestate()
1617 bs = wctx.sub(s).basestate()
1620 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1618 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1621 if oldstate.get(s, (None, None, None))[1] != bs:
1619 if oldstate.get(s, (None, None, None))[1] != bs:
1622 subs.append(s)
1620 subs.append(s)
1623
1621
1624 # check for removed subrepos
1622 # check for removed subrepos
1625 for p in wctx.parents():
1623 for p in wctx.parents():
1626 r = [s for s in p.substate if s not in newstate]
1624 r = [s for s in p.substate if s not in newstate]
1627 subs += [s for s in r if match(s)]
1625 subs += [s for s in r if match(s)]
1628 if subs:
1626 if subs:
1629 if (not match('.hgsub') and
1627 if (not match('.hgsub') and
1630 '.hgsub' in (wctx.modified() + wctx.added())):
1628 '.hgsub' in (wctx.modified() + wctx.added())):
1631 raise error.Abort(
1629 raise error.Abort(
1632 _("can't commit subrepos without .hgsub"))
1630 _("can't commit subrepos without .hgsub"))
1633 status.modified.insert(0, '.hgsubstate')
1631 status.modified.insert(0, '.hgsubstate')
1634
1632
1635 elif '.hgsub' in status.removed:
1633 elif '.hgsub' in status.removed:
1636 # clean up .hgsubstate when .hgsub is removed
1634 # clean up .hgsubstate when .hgsub is removed
1637 if ('.hgsubstate' in wctx and
1635 if ('.hgsubstate' in wctx and
1638 '.hgsubstate' not in (status.modified + status.added +
1636 '.hgsubstate' not in (status.modified + status.added +
1639 status.removed)):
1637 status.removed)):
1640 status.removed.insert(0, '.hgsubstate')
1638 status.removed.insert(0, '.hgsubstate')
1641
1639
1642 # make sure all explicit patterns are matched
1640 # make sure all explicit patterns are matched
1643 if not force:
1641 if not force:
1644 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1642 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1645
1643
1646 cctx = context.workingcommitctx(self, status,
1644 cctx = context.workingcommitctx(self, status,
1647 text, user, date, extra)
1645 text, user, date, extra)
1648
1646
1649 # internal config: ui.allowemptycommit
1647 # internal config: ui.allowemptycommit
1650 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1648 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1651 or extra.get('close') or merge or cctx.files()
1649 or extra.get('close') or merge or cctx.files()
1652 or self.ui.configbool('ui', 'allowemptycommit'))
1650 or self.ui.configbool('ui', 'allowemptycommit'))
1653 if not allowemptycommit:
1651 if not allowemptycommit:
1654 return None
1652 return None
1655
1653
1656 if merge and cctx.deleted():
1654 if merge and cctx.deleted():
1657 raise error.Abort(_("cannot commit merge with missing files"))
1655 raise error.Abort(_("cannot commit merge with missing files"))
1658
1656
1659 ms = mergemod.mergestate.read(self)
1657 ms = mergemod.mergestate.read(self)
1660 mergeutil.checkunresolved(ms)
1658 mergeutil.checkunresolved(ms)
1661
1659
1662 if editor:
1660 if editor:
1663 cctx._text = editor(self, cctx, subs)
1661 cctx._text = editor(self, cctx, subs)
1664 edited = (text != cctx._text)
1662 edited = (text != cctx._text)
1665
1663
1666 # Save commit message in case this transaction gets rolled back
1664 # Save commit message in case this transaction gets rolled back
1667 # (e.g. by a pretxncommit hook). Leave the content alone on
1665 # (e.g. by a pretxncommit hook). Leave the content alone on
1668 # the assumption that the user will use the same editor again.
1666 # the assumption that the user will use the same editor again.
1669 msgfn = self.savecommitmessage(cctx._text)
1667 msgfn = self.savecommitmessage(cctx._text)
1670
1668
1671 # commit subs and write new state
1669 # commit subs and write new state
1672 if subs:
1670 if subs:
1673 for s in sorted(commitsubs):
1671 for s in sorted(commitsubs):
1674 sub = wctx.sub(s)
1672 sub = wctx.sub(s)
1675 self.ui.status(_('committing subrepository %s\n') %
1673 self.ui.status(_('committing subrepository %s\n') %
1676 subrepo.subrelpath(sub))
1674 subrepo.subrelpath(sub))
1677 sr = sub.commit(cctx._text, user, date)
1675 sr = sub.commit(cctx._text, user, date)
1678 newstate[s] = (newstate[s][0], sr)
1676 newstate[s] = (newstate[s][0], sr)
1679 subrepo.writestate(self, newstate)
1677 subrepo.writestate(self, newstate)
1680
1678
1681 p1, p2 = self.dirstate.parents()
1679 p1, p2 = self.dirstate.parents()
1682 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1680 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1683 try:
1681 try:
1684 self.hook("precommit", throw=True, parent1=hookp1,
1682 self.hook("precommit", throw=True, parent1=hookp1,
1685 parent2=hookp2)
1683 parent2=hookp2)
1686 tr = self.transaction('commit')
1684 tr = self.transaction('commit')
1687 ret = self.commitctx(cctx, True)
1685 ret = self.commitctx(cctx, True)
1688 except: # re-raises
1686 except: # re-raises
1689 if edited:
1687 if edited:
1690 self.ui.write(
1688 self.ui.write(
1691 _('note: commit message saved in %s\n') % msgfn)
1689 _('note: commit message saved in %s\n') % msgfn)
1692 raise
1690 raise
1693 # update bookmarks, dirstate and mergestate
1691 # update bookmarks, dirstate and mergestate
1694 bookmarks.update(self, [p1, p2], ret)
1692 bookmarks.update(self, [p1, p2], ret)
1695 cctx.markcommitted(ret)
1693 cctx.markcommitted(ret)
1696 ms.reset()
1694 ms.reset()
1697 tr.close()
1695 tr.close()
1698
1696
1699 finally:
1697 finally:
1700 lockmod.release(tr, lock, wlock)
1698 lockmod.release(tr, lock, wlock)
1701
1699
1702 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1700 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1703 # hack for command that use a temporary commit (eg: histedit)
1701 # hack for command that use a temporary commit (eg: histedit)
1704 # temporary commit got stripped before hook release
1702 # temporary commit got stripped before hook release
1705 if self.changelog.hasnode(ret):
1703 if self.changelog.hasnode(ret):
1706 self.hook("commit", node=node, parent1=parent1,
1704 self.hook("commit", node=node, parent1=parent1,
1707 parent2=parent2)
1705 parent2=parent2)
1708 self._afterlock(commithook)
1706 self._afterlock(commithook)
1709 return ret
1707 return ret
1710
1708
1711 @unfilteredmethod
1709 @unfilteredmethod
1712 def commitctx(self, ctx, error=False):
1710 def commitctx(self, ctx, error=False):
1713 """Add a new revision to current repository.
1711 """Add a new revision to current repository.
1714 Revision information is passed via the context argument.
1712 Revision information is passed via the context argument.
1715 """
1713 """
1716
1714
1717 tr = None
1715 tr = None
1718 p1, p2 = ctx.p1(), ctx.p2()
1716 p1, p2 = ctx.p1(), ctx.p2()
1719 user = ctx.user()
1717 user = ctx.user()
1720
1718
1721 lock = self.lock()
1719 lock = self.lock()
1722 try:
1720 try:
1723 tr = self.transaction("commit")
1721 tr = self.transaction("commit")
1724 trp = weakref.proxy(tr)
1722 trp = weakref.proxy(tr)
1725
1723
1726 if ctx.manifestnode():
1724 if ctx.manifestnode():
1727 # reuse an existing manifest revision
1725 # reuse an existing manifest revision
1728 mn = ctx.manifestnode()
1726 mn = ctx.manifestnode()
1729 files = ctx.files()
1727 files = ctx.files()
1730 elif ctx.files():
1728 elif ctx.files():
1731 m1ctx = p1.manifestctx()
1729 m1ctx = p1.manifestctx()
1732 m2ctx = p2.manifestctx()
1730 m2ctx = p2.manifestctx()
1733 mctx = m1ctx.copy()
1731 mctx = m1ctx.copy()
1734
1732
1735 m = mctx.read()
1733 m = mctx.read()
1736 m1 = m1ctx.read()
1734 m1 = m1ctx.read()
1737 m2 = m2ctx.read()
1735 m2 = m2ctx.read()
1738
1736
1739 # check in files
1737 # check in files
1740 added = []
1738 added = []
1741 changed = []
1739 changed = []
1742 removed = list(ctx.removed())
1740 removed = list(ctx.removed())
1743 linkrev = len(self)
1741 linkrev = len(self)
1744 self.ui.note(_("committing files:\n"))
1742 self.ui.note(_("committing files:\n"))
1745 for f in sorted(ctx.modified() + ctx.added()):
1743 for f in sorted(ctx.modified() + ctx.added()):
1746 self.ui.note(f + "\n")
1744 self.ui.note(f + "\n")
1747 try:
1745 try:
1748 fctx = ctx[f]
1746 fctx = ctx[f]
1749 if fctx is None:
1747 if fctx is None:
1750 removed.append(f)
1748 removed.append(f)
1751 else:
1749 else:
1752 added.append(f)
1750 added.append(f)
1753 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1751 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1754 trp, changed)
1752 trp, changed)
1755 m.setflag(f, fctx.flags())
1753 m.setflag(f, fctx.flags())
1756 except OSError as inst:
1754 except OSError as inst:
1757 self.ui.warn(_("trouble committing %s!\n") % f)
1755 self.ui.warn(_("trouble committing %s!\n") % f)
1758 raise
1756 raise
1759 except IOError as inst:
1757 except IOError as inst:
1760 errcode = getattr(inst, 'errno', errno.ENOENT)
1758 errcode = getattr(inst, 'errno', errno.ENOENT)
1761 if error or errcode and errcode != errno.ENOENT:
1759 if error or errcode and errcode != errno.ENOENT:
1762 self.ui.warn(_("trouble committing %s!\n") % f)
1760 self.ui.warn(_("trouble committing %s!\n") % f)
1763 raise
1761 raise
1764
1762
1765 # update manifest
1763 # update manifest
1766 self.ui.note(_("committing manifest\n"))
1764 self.ui.note(_("committing manifest\n"))
1767 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1765 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1768 drop = [f for f in removed if f in m]
1766 drop = [f for f in removed if f in m]
1769 for f in drop:
1767 for f in drop:
1770 del m[f]
1768 del m[f]
1771 mn = mctx.write(trp, linkrev,
1769 mn = mctx.write(trp, linkrev,
1772 p1.manifestnode(), p2.manifestnode(),
1770 p1.manifestnode(), p2.manifestnode(),
1773 added, drop)
1771 added, drop)
1774 files = changed + removed
1772 files = changed + removed
1775 else:
1773 else:
1776 mn = p1.manifestnode()
1774 mn = p1.manifestnode()
1777 files = []
1775 files = []
1778
1776
1779 # update changelog
1777 # update changelog
1780 self.ui.note(_("committing changelog\n"))
1778 self.ui.note(_("committing changelog\n"))
1781 self.changelog.delayupdate(tr)
1779 self.changelog.delayupdate(tr)
1782 n = self.changelog.add(mn, files, ctx.description(),
1780 n = self.changelog.add(mn, files, ctx.description(),
1783 trp, p1.node(), p2.node(),
1781 trp, p1.node(), p2.node(),
1784 user, ctx.date(), ctx.extra().copy())
1782 user, ctx.date(), ctx.extra().copy())
1785 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1783 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1786 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1784 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1787 parent2=xp2)
1785 parent2=xp2)
1788 # set the new commit is proper phase
1786 # set the new commit is proper phase
1789 targetphase = subrepo.newcommitphase(self.ui, ctx)
1787 targetphase = subrepo.newcommitphase(self.ui, ctx)
1790 if targetphase:
1788 if targetphase:
1791 # retract boundary do not alter parent changeset.
1789 # retract boundary do not alter parent changeset.
1792 # if a parent have higher the resulting phase will
1790 # if a parent have higher the resulting phase will
1793 # be compliant anyway
1791 # be compliant anyway
1794 #
1792 #
1795 # if minimal phase was 0 we don't need to retract anything
1793 # if minimal phase was 0 we don't need to retract anything
1796 phases.retractboundary(self, tr, targetphase, [n])
1794 phases.retractboundary(self, tr, targetphase, [n])
1797 tr.close()
1795 tr.close()
1798 return n
1796 return n
1799 finally:
1797 finally:
1800 if tr:
1798 if tr:
1801 tr.release()
1799 tr.release()
1802 lock.release()
1800 lock.release()
1803
1801
1804 @unfilteredmethod
1802 @unfilteredmethod
1805 def destroying(self):
1803 def destroying(self):
1806 '''Inform the repository that nodes are about to be destroyed.
1804 '''Inform the repository that nodes are about to be destroyed.
1807 Intended for use by strip and rollback, so there's a common
1805 Intended for use by strip and rollback, so there's a common
1808 place for anything that has to be done before destroying history.
1806 place for anything that has to be done before destroying history.
1809
1807
1810 This is mostly useful for saving state that is in memory and waiting
1808 This is mostly useful for saving state that is in memory and waiting
1811 to be flushed when the current lock is released. Because a call to
1809 to be flushed when the current lock is released. Because a call to
1812 destroyed is imminent, the repo will be invalidated causing those
1810 destroyed is imminent, the repo will be invalidated causing those
1813 changes to stay in memory (waiting for the next unlock), or vanish
1811 changes to stay in memory (waiting for the next unlock), or vanish
1814 completely.
1812 completely.
1815 '''
1813 '''
1816 # When using the same lock to commit and strip, the phasecache is left
1814 # When using the same lock to commit and strip, the phasecache is left
1817 # dirty after committing. Then when we strip, the repo is invalidated,
1815 # dirty after committing. Then when we strip, the repo is invalidated,
1818 # causing those changes to disappear.
1816 # causing those changes to disappear.
1819 if '_phasecache' in vars(self):
1817 if '_phasecache' in vars(self):
1820 self._phasecache.write()
1818 self._phasecache.write()
1821
1819
1822 @unfilteredmethod
1820 @unfilteredmethod
1823 def destroyed(self):
1821 def destroyed(self):
1824 '''Inform the repository that nodes have been destroyed.
1822 '''Inform the repository that nodes have been destroyed.
1825 Intended for use by strip and rollback, so there's a common
1823 Intended for use by strip and rollback, so there's a common
1826 place for anything that has to be done after destroying history.
1824 place for anything that has to be done after destroying history.
1827 '''
1825 '''
1828 # When one tries to:
1826 # When one tries to:
1829 # 1) destroy nodes thus calling this method (e.g. strip)
1827 # 1) destroy nodes thus calling this method (e.g. strip)
1830 # 2) use phasecache somewhere (e.g. commit)
1828 # 2) use phasecache somewhere (e.g. commit)
1831 #
1829 #
1832 # then 2) will fail because the phasecache contains nodes that were
1830 # then 2) will fail because the phasecache contains nodes that were
1833 # removed. We can either remove phasecache from the filecache,
1831 # removed. We can either remove phasecache from the filecache,
1834 # causing it to reload next time it is accessed, or simply filter
1832 # causing it to reload next time it is accessed, or simply filter
1835 # the removed nodes now and write the updated cache.
1833 # the removed nodes now and write the updated cache.
1836 self._phasecache.filterunknown(self)
1834 self._phasecache.filterunknown(self)
1837 self._phasecache.write()
1835 self._phasecache.write()
1838
1836
1839 # refresh all repository caches
1837 # refresh all repository caches
1840 self.updatecaches()
1838 self.updatecaches()
1841
1839
1842 # Ensure the persistent tag cache is updated. Doing it now
1840 # Ensure the persistent tag cache is updated. Doing it now
1843 # means that the tag cache only has to worry about destroyed
1841 # means that the tag cache only has to worry about destroyed
1844 # heads immediately after a strip/rollback. That in turn
1842 # heads immediately after a strip/rollback. That in turn
1845 # guarantees that "cachetip == currenttip" (comparing both rev
1843 # guarantees that "cachetip == currenttip" (comparing both rev
1846 # and node) always means no nodes have been added or destroyed.
1844 # and node) always means no nodes have been added or destroyed.
1847
1845
1848 # XXX this is suboptimal when qrefresh'ing: we strip the current
1846 # XXX this is suboptimal when qrefresh'ing: we strip the current
1849 # head, refresh the tag cache, then immediately add a new head.
1847 # head, refresh the tag cache, then immediately add a new head.
1850 # But I think doing it this way is necessary for the "instant
1848 # But I think doing it this way is necessary for the "instant
1851 # tag cache retrieval" case to work.
1849 # tag cache retrieval" case to work.
1852 self.invalidate()
1850 self.invalidate()
1853
1851
1854 def walk(self, match, node=None):
1852 def walk(self, match, node=None):
1855 '''
1853 '''
1856 walk recursively through the directory tree or a given
1854 walk recursively through the directory tree or a given
1857 changeset, finding all files matched by the match
1855 changeset, finding all files matched by the match
1858 function
1856 function
1859 '''
1857 '''
1860 return self[node].walk(match)
1858 return self[node].walk(match)
1861
1859
1862 def status(self, node1='.', node2=None, match=None,
1860 def status(self, node1='.', node2=None, match=None,
1863 ignored=False, clean=False, unknown=False,
1861 ignored=False, clean=False, unknown=False,
1864 listsubrepos=False):
1862 listsubrepos=False):
1865 '''a convenience method that calls node1.status(node2)'''
1863 '''a convenience method that calls node1.status(node2)'''
1866 return self[node1].status(node2, match, ignored, clean, unknown,
1864 return self[node1].status(node2, match, ignored, clean, unknown,
1867 listsubrepos)
1865 listsubrepos)
1868
1866
1869 def heads(self, start=None):
1867 def heads(self, start=None):
1870 if start is None:
1868 if start is None:
1871 cl = self.changelog
1869 cl = self.changelog
1872 headrevs = reversed(cl.headrevs())
1870 headrevs = reversed(cl.headrevs())
1873 return [cl.node(rev) for rev in headrevs]
1871 return [cl.node(rev) for rev in headrevs]
1874
1872
1875 heads = self.changelog.heads(start)
1873 heads = self.changelog.heads(start)
1876 # sort the output in rev descending order
1874 # sort the output in rev descending order
1877 return sorted(heads, key=self.changelog.rev, reverse=True)
1875 return sorted(heads, key=self.changelog.rev, reverse=True)
1878
1876
1879 def branchheads(self, branch=None, start=None, closed=False):
1877 def branchheads(self, branch=None, start=None, closed=False):
1880 '''return a (possibly filtered) list of heads for the given branch
1878 '''return a (possibly filtered) list of heads for the given branch
1881
1879
1882 Heads are returned in topological order, from newest to oldest.
1880 Heads are returned in topological order, from newest to oldest.
1883 If branch is None, use the dirstate branch.
1881 If branch is None, use the dirstate branch.
1884 If start is not None, return only heads reachable from start.
1882 If start is not None, return only heads reachable from start.
1885 If closed is True, return heads that are marked as closed as well.
1883 If closed is True, return heads that are marked as closed as well.
1886 '''
1884 '''
1887 if branch is None:
1885 if branch is None:
1888 branch = self[None].branch()
1886 branch = self[None].branch()
1889 branches = self.branchmap()
1887 branches = self.branchmap()
1890 if branch not in branches:
1888 if branch not in branches:
1891 return []
1889 return []
1892 # the cache returns heads ordered lowest to highest
1890 # the cache returns heads ordered lowest to highest
1893 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1891 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1894 if start is not None:
1892 if start is not None:
1895 # filter out the heads that cannot be reached from startrev
1893 # filter out the heads that cannot be reached from startrev
1896 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1894 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1897 bheads = [h for h in bheads if h in fbheads]
1895 bheads = [h for h in bheads if h in fbheads]
1898 return bheads
1896 return bheads
1899
1897
1900 def branches(self, nodes):
1898 def branches(self, nodes):
1901 if not nodes:
1899 if not nodes:
1902 nodes = [self.changelog.tip()]
1900 nodes = [self.changelog.tip()]
1903 b = []
1901 b = []
1904 for n in nodes:
1902 for n in nodes:
1905 t = n
1903 t = n
1906 while True:
1904 while True:
1907 p = self.changelog.parents(n)
1905 p = self.changelog.parents(n)
1908 if p[1] != nullid or p[0] == nullid:
1906 if p[1] != nullid or p[0] == nullid:
1909 b.append((t, n, p[0], p[1]))
1907 b.append((t, n, p[0], p[1]))
1910 break
1908 break
1911 n = p[0]
1909 n = p[0]
1912 return b
1910 return b
1913
1911
1914 def between(self, pairs):
1912 def between(self, pairs):
1915 r = []
1913 r = []
1916
1914
1917 for top, bottom in pairs:
1915 for top, bottom in pairs:
1918 n, l, i = top, [], 0
1916 n, l, i = top, [], 0
1919 f = 1
1917 f = 1
1920
1918
1921 while n != bottom and n != nullid:
1919 while n != bottom and n != nullid:
1922 p = self.changelog.parents(n)[0]
1920 p = self.changelog.parents(n)[0]
1923 if i == f:
1921 if i == f:
1924 l.append(n)
1922 l.append(n)
1925 f = f * 2
1923 f = f * 2
1926 n = p
1924 n = p
1927 i += 1
1925 i += 1
1928
1926
1929 r.append(l)
1927 r.append(l)
1930
1928
1931 return r
1929 return r
1932
1930
1933 def checkpush(self, pushop):
1931 def checkpush(self, pushop):
1934 """Extensions can override this function if additional checks have
1932 """Extensions can override this function if additional checks have
1935 to be performed before pushing, or call it if they override push
1933 to be performed before pushing, or call it if they override push
1936 command.
1934 command.
1937 """
1935 """
1938 pass
1936 pass
1939
1937
1940 @unfilteredpropertycache
1938 @unfilteredpropertycache
1941 def prepushoutgoinghooks(self):
1939 def prepushoutgoinghooks(self):
1942 """Return util.hooks consists of a pushop with repo, remote, outgoing
1940 """Return util.hooks consists of a pushop with repo, remote, outgoing
1943 methods, which are called before pushing changesets.
1941 methods, which are called before pushing changesets.
1944 """
1942 """
1945 return util.hooks()
1943 return util.hooks()
1946
1944
1947 def pushkey(self, namespace, key, old, new):
1945 def pushkey(self, namespace, key, old, new):
1948 try:
1946 try:
1949 tr = self.currenttransaction()
1947 tr = self.currenttransaction()
1950 hookargs = {}
1948 hookargs = {}
1951 if tr is not None:
1949 if tr is not None:
1952 hookargs.update(tr.hookargs)
1950 hookargs.update(tr.hookargs)
1953 hookargs['namespace'] = namespace
1951 hookargs['namespace'] = namespace
1954 hookargs['key'] = key
1952 hookargs['key'] = key
1955 hookargs['old'] = old
1953 hookargs['old'] = old
1956 hookargs['new'] = new
1954 hookargs['new'] = new
1957 self.hook('prepushkey', throw=True, **hookargs)
1955 self.hook('prepushkey', throw=True, **hookargs)
1958 except error.HookAbort as exc:
1956 except error.HookAbort as exc:
1959 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1957 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1960 if exc.hint:
1958 if exc.hint:
1961 self.ui.write_err(_("(%s)\n") % exc.hint)
1959 self.ui.write_err(_("(%s)\n") % exc.hint)
1962 return False
1960 return False
1963 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1961 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1964 ret = pushkey.push(self, namespace, key, old, new)
1962 ret = pushkey.push(self, namespace, key, old, new)
1965 def runhook():
1963 def runhook():
1966 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1964 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1967 ret=ret)
1965 ret=ret)
1968 self._afterlock(runhook)
1966 self._afterlock(runhook)
1969 return ret
1967 return ret
1970
1968
1971 def listkeys(self, namespace):
1969 def listkeys(self, namespace):
1972 self.hook('prelistkeys', throw=True, namespace=namespace)
1970 self.hook('prelistkeys', throw=True, namespace=namespace)
1973 self.ui.debug('listing keys for "%s"\n' % namespace)
1971 self.ui.debug('listing keys for "%s"\n' % namespace)
1974 values = pushkey.list(self, namespace)
1972 values = pushkey.list(self, namespace)
1975 self.hook('listkeys', namespace=namespace, values=values)
1973 self.hook('listkeys', namespace=namespace, values=values)
1976 return values
1974 return values
1977
1975
1978 def debugwireargs(self, one, two, three=None, four=None, five=None):
1976 def debugwireargs(self, one, two, three=None, four=None, five=None):
1979 '''used to test argument passing over the wire'''
1977 '''used to test argument passing over the wire'''
1980 return "%s %s %s %s %s" % (one, two, three, four, five)
1978 return "%s %s %s %s %s" % (one, two, three, four, five)
1981
1979
1982 def savecommitmessage(self, text):
1980 def savecommitmessage(self, text):
1983 fp = self.vfs('last-message.txt', 'wb')
1981 fp = self.vfs('last-message.txt', 'wb')
1984 try:
1982 try:
1985 fp.write(text)
1983 fp.write(text)
1986 finally:
1984 finally:
1987 fp.close()
1985 fp.close()
1988 return self.pathto(fp.name[len(self.root) + 1:])
1986 return self.pathto(fp.name[len(self.root) + 1:])
1989
1987
1990 # used to avoid circular references so destructors work
1988 # used to avoid circular references so destructors work
1991 def aftertrans(files):
1989 def aftertrans(files):
1992 renamefiles = [tuple(t) for t in files]
1990 renamefiles = [tuple(t) for t in files]
1993 def a():
1991 def a():
1994 for vfs, src, dest in renamefiles:
1992 for vfs, src, dest in renamefiles:
1995 # if src and dest refer to a same file, vfs.rename is a no-op,
1993 # if src and dest refer to a same file, vfs.rename is a no-op,
1996 # leaving both src and dest on disk. delete dest to make sure
1994 # leaving both src and dest on disk. delete dest to make sure
1997 # the rename couldn't be such a no-op.
1995 # the rename couldn't be such a no-op.
1998 vfs.tryunlink(dest)
1996 vfs.tryunlink(dest)
1999 try:
1997 try:
2000 vfs.rename(src, dest)
1998 vfs.rename(src, dest)
2001 except OSError: # journal file does not yet exist
1999 except OSError: # journal file does not yet exist
2002 pass
2000 pass
2003 return a
2001 return a
2004
2002
2005 def undoname(fn):
2003 def undoname(fn):
2006 base, name = os.path.split(fn)
2004 base, name = os.path.split(fn)
2007 assert name.startswith('journal')
2005 assert name.startswith('journal')
2008 return os.path.join(base, name.replace('journal', 'undo', 1))
2006 return os.path.join(base, name.replace('journal', 'undo', 1))
2009
2007
2010 def instance(ui, path, create):
2008 def instance(ui, path, create):
2011 return localrepository(ui, util.urllocalpath(path), create)
2009 return localrepository(ui, util.urllocalpath(path), create)
2012
2010
2013 def islocal(path):
2011 def islocal(path):
2014 return True
2012 return True
2015
2013
2016 def newreporequirements(repo):
2014 def newreporequirements(repo):
2017 """Determine the set of requirements for a new local repository.
2015 """Determine the set of requirements for a new local repository.
2018
2016
2019 Extensions can wrap this function to specify custom requirements for
2017 Extensions can wrap this function to specify custom requirements for
2020 new repositories.
2018 new repositories.
2021 """
2019 """
2022 ui = repo.ui
2020 ui = repo.ui
2023 requirements = {'revlogv1'}
2021 requirements = {'revlogv1'}
2024 if ui.configbool('format', 'usestore', True):
2022 if ui.configbool('format', 'usestore', True):
2025 requirements.add('store')
2023 requirements.add('store')
2026 if ui.configbool('format', 'usefncache', True):
2024 if ui.configbool('format', 'usefncache', True):
2027 requirements.add('fncache')
2025 requirements.add('fncache')
2028 if ui.configbool('format', 'dotencode', True):
2026 if ui.configbool('format', 'dotencode', True):
2029 requirements.add('dotencode')
2027 requirements.add('dotencode')
2030
2028
2031 compengine = ui.config('experimental', 'format.compression', 'zlib')
2029 compengine = ui.config('experimental', 'format.compression', 'zlib')
2032 if compengine not in util.compengines:
2030 if compengine not in util.compengines:
2033 raise error.Abort(_('compression engine %s defined by '
2031 raise error.Abort(_('compression engine %s defined by '
2034 'experimental.format.compression not available') %
2032 'experimental.format.compression not available') %
2035 compengine,
2033 compengine,
2036 hint=_('run "hg debuginstall" to list available '
2034 hint=_('run "hg debuginstall" to list available '
2037 'compression engines'))
2035 'compression engines'))
2038
2036
2039 # zlib is the historical default and doesn't need an explicit requirement.
2037 # zlib is the historical default and doesn't need an explicit requirement.
2040 if compengine != 'zlib':
2038 if compengine != 'zlib':
2041 requirements.add('exp-compression-%s' % compengine)
2039 requirements.add('exp-compression-%s' % compengine)
2042
2040
2043 if scmutil.gdinitconfig(ui):
2041 if scmutil.gdinitconfig(ui):
2044 requirements.add('generaldelta')
2042 requirements.add('generaldelta')
2045 if ui.configbool('experimental', 'treemanifest', False):
2043 if ui.configbool('experimental', 'treemanifest', False):
2046 requirements.add('treemanifest')
2044 requirements.add('treemanifest')
2047 if ui.configbool('experimental', 'manifestv2', False):
2045 if ui.configbool('experimental', 'manifestv2', False):
2048 requirements.add('manifestv2')
2046 requirements.add('manifestv2')
2049
2047
2050 return requirements
2048 return requirements
General Comments 0
You need to be logged in to leave comments. Login now