##// END OF EJS Templates
py3: slice over bytes to prevent getting ascii values
Pulkit Goyal -
r32153:6f173560 default
parent child Browse files
Show More
@@ -1,537 +1,537 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 bin,
14 bin,
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 encoding,
20 encoding,
21 error,
21 error,
22 revlog,
22 revlog,
23 util,
23 util,
24 )
24 )
25
25
26 _defaultextra = {'branch': 'default'}
26 _defaultextra = {'branch': 'default'}
27
27
28 def _string_escape(text):
28 def _string_escape(text):
29 """
29 """
30 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
30 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
31 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
31 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
32 >>> s
32 >>> s
33 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
33 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
34 >>> res = _string_escape(s)
34 >>> res = _string_escape(s)
35 >>> s == util.unescapestr(res)
35 >>> s == util.unescapestr(res)
36 True
36 True
37 """
37 """
38 # subset of the string_escape codec
38 # subset of the string_escape codec
39 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
39 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
40 return text.replace('\0', '\\0')
40 return text.replace('\0', '\\0')
41
41
42 def decodeextra(text):
42 def decodeextra(text):
43 """
43 """
44 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
44 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
45 ... ).iteritems())
45 ... ).iteritems())
46 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
46 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
47 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
47 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
48 ... 'baz': chr(92) + chr(0) + '2'})
48 ... 'baz': chr(92) + chr(0) + '2'})
49 ... ).iteritems())
49 ... ).iteritems())
50 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
50 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
51 """
51 """
52 extra = _defaultextra.copy()
52 extra = _defaultextra.copy()
53 for l in text.split('\0'):
53 for l in text.split('\0'):
54 if l:
54 if l:
55 if '\\0' in l:
55 if '\\0' in l:
56 # fix up \0 without getting into trouble with \\0
56 # fix up \0 without getting into trouble with \\0
57 l = l.replace('\\\\', '\\\\\n')
57 l = l.replace('\\\\', '\\\\\n')
58 l = l.replace('\\0', '\0')
58 l = l.replace('\\0', '\0')
59 l = l.replace('\n', '')
59 l = l.replace('\n', '')
60 k, v = util.unescapestr(l).split(':', 1)
60 k, v = util.unescapestr(l).split(':', 1)
61 extra[k] = v
61 extra[k] = v
62 return extra
62 return extra
63
63
64 def encodeextra(d):
64 def encodeextra(d):
65 # keys must be sorted to produce a deterministic changelog entry
65 # keys must be sorted to produce a deterministic changelog entry
66 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
66 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
67 return "\0".join(items)
67 return "\0".join(items)
68
68
69 def stripdesc(desc):
69 def stripdesc(desc):
70 """strip trailing whitespace and leading and trailing empty lines"""
70 """strip trailing whitespace and leading and trailing empty lines"""
71 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
71 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
72
72
73 class appender(object):
73 class appender(object):
74 '''the changelog index must be updated last on disk, so we use this class
74 '''the changelog index must be updated last on disk, so we use this class
75 to delay writes to it'''
75 to delay writes to it'''
76 def __init__(self, vfs, name, mode, buf):
76 def __init__(self, vfs, name, mode, buf):
77 self.data = buf
77 self.data = buf
78 fp = vfs(name, mode)
78 fp = vfs(name, mode)
79 self.fp = fp
79 self.fp = fp
80 self.offset = fp.tell()
80 self.offset = fp.tell()
81 self.size = vfs.fstat(fp).st_size
81 self.size = vfs.fstat(fp).st_size
82 self._end = self.size
82 self._end = self.size
83
83
84 def end(self):
84 def end(self):
85 return self._end
85 return self._end
86 def tell(self):
86 def tell(self):
87 return self.offset
87 return self.offset
88 def flush(self):
88 def flush(self):
89 pass
89 pass
90 def close(self):
90 def close(self):
91 self.fp.close()
91 self.fp.close()
92
92
93 def seek(self, offset, whence=0):
93 def seek(self, offset, whence=0):
94 '''virtual file offset spans real file and data'''
94 '''virtual file offset spans real file and data'''
95 if whence == 0:
95 if whence == 0:
96 self.offset = offset
96 self.offset = offset
97 elif whence == 1:
97 elif whence == 1:
98 self.offset += offset
98 self.offset += offset
99 elif whence == 2:
99 elif whence == 2:
100 self.offset = self.end() + offset
100 self.offset = self.end() + offset
101 if self.offset < self.size:
101 if self.offset < self.size:
102 self.fp.seek(self.offset)
102 self.fp.seek(self.offset)
103
103
104 def read(self, count=-1):
104 def read(self, count=-1):
105 '''only trick here is reads that span real file and data'''
105 '''only trick here is reads that span real file and data'''
106 ret = ""
106 ret = ""
107 if self.offset < self.size:
107 if self.offset < self.size:
108 s = self.fp.read(count)
108 s = self.fp.read(count)
109 ret = s
109 ret = s
110 self.offset += len(s)
110 self.offset += len(s)
111 if count > 0:
111 if count > 0:
112 count -= len(s)
112 count -= len(s)
113 if count != 0:
113 if count != 0:
114 doff = self.offset - self.size
114 doff = self.offset - self.size
115 self.data.insert(0, "".join(self.data))
115 self.data.insert(0, "".join(self.data))
116 del self.data[1:]
116 del self.data[1:]
117 s = self.data[0][doff:doff + count]
117 s = self.data[0][doff:doff + count]
118 self.offset += len(s)
118 self.offset += len(s)
119 ret += s
119 ret += s
120 return ret
120 return ret
121
121
122 def write(self, s):
122 def write(self, s):
123 self.data.append(bytes(s))
123 self.data.append(bytes(s))
124 self.offset += len(s)
124 self.offset += len(s)
125 self._end += len(s)
125 self._end += len(s)
126
126
127 def _divertopener(opener, target):
127 def _divertopener(opener, target):
128 """build an opener that writes in 'target.a' instead of 'target'"""
128 """build an opener that writes in 'target.a' instead of 'target'"""
129 def _divert(name, mode='r', checkambig=False):
129 def _divert(name, mode='r', checkambig=False):
130 if name != target:
130 if name != target:
131 return opener(name, mode)
131 return opener(name, mode)
132 return opener(name + ".a", mode)
132 return opener(name + ".a", mode)
133 return _divert
133 return _divert
134
134
135 def _delayopener(opener, target, buf):
135 def _delayopener(opener, target, buf):
136 """build an opener that stores chunks in 'buf' instead of 'target'"""
136 """build an opener that stores chunks in 'buf' instead of 'target'"""
137 def _delay(name, mode='r', checkambig=False):
137 def _delay(name, mode='r', checkambig=False):
138 if name != target:
138 if name != target:
139 return opener(name, mode)
139 return opener(name, mode)
140 return appender(opener, name, mode, buf)
140 return appender(opener, name, mode, buf)
141 return _delay
141 return _delay
142
142
143 _changelogrevision = collections.namedtuple(u'changelogrevision',
143 _changelogrevision = collections.namedtuple(u'changelogrevision',
144 (u'manifest', u'user', u'date',
144 (u'manifest', u'user', u'date',
145 u'files', u'description',
145 u'files', u'description',
146 u'extra'))
146 u'extra'))
147
147
148 class changelogrevision(object):
148 class changelogrevision(object):
149 """Holds results of a parsed changelog revision.
149 """Holds results of a parsed changelog revision.
150
150
151 Changelog revisions consist of multiple pieces of data, including
151 Changelog revisions consist of multiple pieces of data, including
152 the manifest node, user, and date. This object exposes a view into
152 the manifest node, user, and date. This object exposes a view into
153 the parsed object.
153 the parsed object.
154 """
154 """
155
155
156 __slots__ = (
156 __slots__ = (
157 u'_offsets',
157 u'_offsets',
158 u'_text',
158 u'_text',
159 )
159 )
160
160
161 def __new__(cls, text):
161 def __new__(cls, text):
162 if not text:
162 if not text:
163 return _changelogrevision(
163 return _changelogrevision(
164 manifest=nullid,
164 manifest=nullid,
165 user='',
165 user='',
166 date=(0, 0),
166 date=(0, 0),
167 files=[],
167 files=[],
168 description='',
168 description='',
169 extra=_defaultextra,
169 extra=_defaultextra,
170 )
170 )
171
171
172 self = super(changelogrevision, cls).__new__(cls)
172 self = super(changelogrevision, cls).__new__(cls)
173 # We could return here and implement the following as an __init__.
173 # We could return here and implement the following as an __init__.
174 # But doing it here is equivalent and saves an extra function call.
174 # But doing it here is equivalent and saves an extra function call.
175
175
176 # format used:
176 # format used:
177 # nodeid\n : manifest node in ascii
177 # nodeid\n : manifest node in ascii
178 # user\n : user, no \n or \r allowed
178 # user\n : user, no \n or \r allowed
179 # time tz extra\n : date (time is int or float, timezone is int)
179 # time tz extra\n : date (time is int or float, timezone is int)
180 # : extra is metadata, encoded and separated by '\0'
180 # : extra is metadata, encoded and separated by '\0'
181 # : older versions ignore it
181 # : older versions ignore it
182 # files\n\n : files modified by the cset, no \n or \r allowed
182 # files\n\n : files modified by the cset, no \n or \r allowed
183 # (.*) : comment (free text, ideally utf-8)
183 # (.*) : comment (free text, ideally utf-8)
184 #
184 #
185 # changelog v0 doesn't use extra
185 # changelog v0 doesn't use extra
186
186
187 nl1 = text.index('\n')
187 nl1 = text.index('\n')
188 nl2 = text.index('\n', nl1 + 1)
188 nl2 = text.index('\n', nl1 + 1)
189 nl3 = text.index('\n', nl2 + 1)
189 nl3 = text.index('\n', nl2 + 1)
190
190
191 # The list of files may be empty. Which means nl3 is the first of the
191 # The list of files may be empty. Which means nl3 is the first of the
192 # double newline that precedes the description.
192 # double newline that precedes the description.
193 if text[nl3 + 1] == '\n':
193 if text[nl3 + 1:nl3 + 2] == '\n':
194 doublenl = nl3
194 doublenl = nl3
195 else:
195 else:
196 doublenl = text.index('\n\n', nl3 + 1)
196 doublenl = text.index('\n\n', nl3 + 1)
197
197
198 self._offsets = (nl1, nl2, nl3, doublenl)
198 self._offsets = (nl1, nl2, nl3, doublenl)
199 self._text = text
199 self._text = text
200
200
201 return self
201 return self
202
202
203 @property
203 @property
204 def manifest(self):
204 def manifest(self):
205 return bin(self._text[0:self._offsets[0]])
205 return bin(self._text[0:self._offsets[0]])
206
206
207 @property
207 @property
208 def user(self):
208 def user(self):
209 off = self._offsets
209 off = self._offsets
210 return encoding.tolocal(self._text[off[0] + 1:off[1]])
210 return encoding.tolocal(self._text[off[0] + 1:off[1]])
211
211
212 @property
212 @property
213 def _rawdate(self):
213 def _rawdate(self):
214 off = self._offsets
214 off = self._offsets
215 dateextra = self._text[off[1] + 1:off[2]]
215 dateextra = self._text[off[1] + 1:off[2]]
216 return dateextra.split(' ', 2)[0:2]
216 return dateextra.split(' ', 2)[0:2]
217
217
218 @property
218 @property
219 def _rawextra(self):
219 def _rawextra(self):
220 off = self._offsets
220 off = self._offsets
221 dateextra = self._text[off[1] + 1:off[2]]
221 dateextra = self._text[off[1] + 1:off[2]]
222 fields = dateextra.split(' ', 2)
222 fields = dateextra.split(' ', 2)
223 if len(fields) != 3:
223 if len(fields) != 3:
224 return None
224 return None
225
225
226 return fields[2]
226 return fields[2]
227
227
228 @property
228 @property
229 def date(self):
229 def date(self):
230 raw = self._rawdate
230 raw = self._rawdate
231 time = float(raw[0])
231 time = float(raw[0])
232 # Various tools did silly things with the timezone.
232 # Various tools did silly things with the timezone.
233 try:
233 try:
234 timezone = int(raw[1])
234 timezone = int(raw[1])
235 except ValueError:
235 except ValueError:
236 timezone = 0
236 timezone = 0
237
237
238 return time, timezone
238 return time, timezone
239
239
240 @property
240 @property
241 def extra(self):
241 def extra(self):
242 raw = self._rawextra
242 raw = self._rawextra
243 if raw is None:
243 if raw is None:
244 return _defaultextra
244 return _defaultextra
245
245
246 return decodeextra(raw)
246 return decodeextra(raw)
247
247
248 @property
248 @property
249 def files(self):
249 def files(self):
250 off = self._offsets
250 off = self._offsets
251 if off[2] == off[3]:
251 if off[2] == off[3]:
252 return []
252 return []
253
253
254 return self._text[off[2] + 1:off[3]].split('\n')
254 return self._text[off[2] + 1:off[3]].split('\n')
255
255
256 @property
256 @property
257 def description(self):
257 def description(self):
258 return encoding.tolocal(self._text[self._offsets[3] + 2:])
258 return encoding.tolocal(self._text[self._offsets[3] + 2:])
259
259
260 class changelog(revlog.revlog):
260 class changelog(revlog.revlog):
261 def __init__(self, opener):
261 def __init__(self, opener):
262 revlog.revlog.__init__(self, opener, "00changelog.i",
262 revlog.revlog.__init__(self, opener, "00changelog.i",
263 checkambig=True)
263 checkambig=True)
264 if self._initempty:
264 if self._initempty:
265 # changelogs don't benefit from generaldelta
265 # changelogs don't benefit from generaldelta
266 self.version &= ~revlog.REVLOGGENERALDELTA
266 self.version &= ~revlog.REVLOGGENERALDELTA
267 self._generaldelta = False
267 self._generaldelta = False
268
268
269 # Delta chains for changelogs tend to be very small because entries
269 # Delta chains for changelogs tend to be very small because entries
270 # tend to be small and don't delta well with each. So disable delta
270 # tend to be small and don't delta well with each. So disable delta
271 # chains.
271 # chains.
272 self.storedeltachains = False
272 self.storedeltachains = False
273
273
274 self._realopener = opener
274 self._realopener = opener
275 self._delayed = False
275 self._delayed = False
276 self._delaybuf = None
276 self._delaybuf = None
277 self._divert = False
277 self._divert = False
278 self.filteredrevs = frozenset()
278 self.filteredrevs = frozenset()
279
279
280 def tip(self):
280 def tip(self):
281 """filtered version of revlog.tip"""
281 """filtered version of revlog.tip"""
282 for i in xrange(len(self) -1, -2, -1):
282 for i in xrange(len(self) -1, -2, -1):
283 if i not in self.filteredrevs:
283 if i not in self.filteredrevs:
284 return self.node(i)
284 return self.node(i)
285
285
286 def __contains__(self, rev):
286 def __contains__(self, rev):
287 """filtered version of revlog.__contains__"""
287 """filtered version of revlog.__contains__"""
288 return (0 <= rev < len(self)
288 return (0 <= rev < len(self)
289 and rev not in self.filteredrevs)
289 and rev not in self.filteredrevs)
290
290
291 def __iter__(self):
291 def __iter__(self):
292 """filtered version of revlog.__iter__"""
292 """filtered version of revlog.__iter__"""
293 if len(self.filteredrevs) == 0:
293 if len(self.filteredrevs) == 0:
294 return revlog.revlog.__iter__(self)
294 return revlog.revlog.__iter__(self)
295
295
296 def filterediter():
296 def filterediter():
297 for i in xrange(len(self)):
297 for i in xrange(len(self)):
298 if i not in self.filteredrevs:
298 if i not in self.filteredrevs:
299 yield i
299 yield i
300
300
301 return filterediter()
301 return filterediter()
302
302
303 def revs(self, start=0, stop=None):
303 def revs(self, start=0, stop=None):
304 """filtered version of revlog.revs"""
304 """filtered version of revlog.revs"""
305 for i in super(changelog, self).revs(start, stop):
305 for i in super(changelog, self).revs(start, stop):
306 if i not in self.filteredrevs:
306 if i not in self.filteredrevs:
307 yield i
307 yield i
308
308
309 @util.propertycache
309 @util.propertycache
310 def nodemap(self):
310 def nodemap(self):
311 # XXX need filtering too
311 # XXX need filtering too
312 self.rev(self.node(0))
312 self.rev(self.node(0))
313 return self._nodecache
313 return self._nodecache
314
314
315 def reachableroots(self, minroot, heads, roots, includepath=False):
315 def reachableroots(self, minroot, heads, roots, includepath=False):
316 return self.index.reachableroots2(minroot, heads, roots, includepath)
316 return self.index.reachableroots2(minroot, heads, roots, includepath)
317
317
318 def headrevs(self):
318 def headrevs(self):
319 if self.filteredrevs:
319 if self.filteredrevs:
320 try:
320 try:
321 return self.index.headrevsfiltered(self.filteredrevs)
321 return self.index.headrevsfiltered(self.filteredrevs)
322 # AttributeError covers non-c-extension environments and
322 # AttributeError covers non-c-extension environments and
323 # old c extensions without filter handling.
323 # old c extensions without filter handling.
324 except AttributeError:
324 except AttributeError:
325 return self._headrevs()
325 return self._headrevs()
326
326
327 return super(changelog, self).headrevs()
327 return super(changelog, self).headrevs()
328
328
329 def strip(self, *args, **kwargs):
329 def strip(self, *args, **kwargs):
330 # XXX make something better than assert
330 # XXX make something better than assert
331 # We can't expect proper strip behavior if we are filtered.
331 # We can't expect proper strip behavior if we are filtered.
332 assert not self.filteredrevs
332 assert not self.filteredrevs
333 super(changelog, self).strip(*args, **kwargs)
333 super(changelog, self).strip(*args, **kwargs)
334
334
335 def rev(self, node):
335 def rev(self, node):
336 """filtered version of revlog.rev"""
336 """filtered version of revlog.rev"""
337 r = super(changelog, self).rev(node)
337 r = super(changelog, self).rev(node)
338 if r in self.filteredrevs:
338 if r in self.filteredrevs:
339 raise error.FilteredLookupError(hex(node), self.indexfile,
339 raise error.FilteredLookupError(hex(node), self.indexfile,
340 _('filtered node'))
340 _('filtered node'))
341 return r
341 return r
342
342
343 def node(self, rev):
343 def node(self, rev):
344 """filtered version of revlog.node"""
344 """filtered version of revlog.node"""
345 if rev in self.filteredrevs:
345 if rev in self.filteredrevs:
346 raise error.FilteredIndexError(rev)
346 raise error.FilteredIndexError(rev)
347 return super(changelog, self).node(rev)
347 return super(changelog, self).node(rev)
348
348
349 def linkrev(self, rev):
349 def linkrev(self, rev):
350 """filtered version of revlog.linkrev"""
350 """filtered version of revlog.linkrev"""
351 if rev in self.filteredrevs:
351 if rev in self.filteredrevs:
352 raise error.FilteredIndexError(rev)
352 raise error.FilteredIndexError(rev)
353 return super(changelog, self).linkrev(rev)
353 return super(changelog, self).linkrev(rev)
354
354
355 def parentrevs(self, rev):
355 def parentrevs(self, rev):
356 """filtered version of revlog.parentrevs"""
356 """filtered version of revlog.parentrevs"""
357 if rev in self.filteredrevs:
357 if rev in self.filteredrevs:
358 raise error.FilteredIndexError(rev)
358 raise error.FilteredIndexError(rev)
359 return super(changelog, self).parentrevs(rev)
359 return super(changelog, self).parentrevs(rev)
360
360
361 def flags(self, rev):
361 def flags(self, rev):
362 """filtered version of revlog.flags"""
362 """filtered version of revlog.flags"""
363 if rev in self.filteredrevs:
363 if rev in self.filteredrevs:
364 raise error.FilteredIndexError(rev)
364 raise error.FilteredIndexError(rev)
365 return super(changelog, self).flags(rev)
365 return super(changelog, self).flags(rev)
366
366
367 def delayupdate(self, tr):
367 def delayupdate(self, tr):
368 "delay visibility of index updates to other readers"
368 "delay visibility of index updates to other readers"
369
369
370 if not self._delayed:
370 if not self._delayed:
371 if len(self) == 0:
371 if len(self) == 0:
372 self._divert = True
372 self._divert = True
373 if self._realopener.exists(self.indexfile + '.a'):
373 if self._realopener.exists(self.indexfile + '.a'):
374 self._realopener.unlink(self.indexfile + '.a')
374 self._realopener.unlink(self.indexfile + '.a')
375 self.opener = _divertopener(self._realopener, self.indexfile)
375 self.opener = _divertopener(self._realopener, self.indexfile)
376 else:
376 else:
377 self._delaybuf = []
377 self._delaybuf = []
378 self.opener = _delayopener(self._realopener, self.indexfile,
378 self.opener = _delayopener(self._realopener, self.indexfile,
379 self._delaybuf)
379 self._delaybuf)
380 self._delayed = True
380 self._delayed = True
381 tr.addpending('cl-%i' % id(self), self._writepending)
381 tr.addpending('cl-%i' % id(self), self._writepending)
382 tr.addfinalize('cl-%i' % id(self), self._finalize)
382 tr.addfinalize('cl-%i' % id(self), self._finalize)
383
383
384 def _finalize(self, tr):
384 def _finalize(self, tr):
385 "finalize index updates"
385 "finalize index updates"
386 self._delayed = False
386 self._delayed = False
387 self.opener = self._realopener
387 self.opener = self._realopener
388 # move redirected index data back into place
388 # move redirected index data back into place
389 if self._divert:
389 if self._divert:
390 assert not self._delaybuf
390 assert not self._delaybuf
391 tmpname = self.indexfile + ".a"
391 tmpname = self.indexfile + ".a"
392 nfile = self.opener.open(tmpname)
392 nfile = self.opener.open(tmpname)
393 nfile.close()
393 nfile.close()
394 self.opener.rename(tmpname, self.indexfile, checkambig=True)
394 self.opener.rename(tmpname, self.indexfile, checkambig=True)
395 elif self._delaybuf:
395 elif self._delaybuf:
396 fp = self.opener(self.indexfile, 'a', checkambig=True)
396 fp = self.opener(self.indexfile, 'a', checkambig=True)
397 fp.write("".join(self._delaybuf))
397 fp.write("".join(self._delaybuf))
398 fp.close()
398 fp.close()
399 self._delaybuf = None
399 self._delaybuf = None
400 self._divert = False
400 self._divert = False
401 # split when we're done
401 # split when we're done
402 self.checkinlinesize(tr)
402 self.checkinlinesize(tr)
403
403
404 def readpending(self, file):
404 def readpending(self, file):
405 """read index data from a "pending" file
405 """read index data from a "pending" file
406
406
407 During a transaction, the actual changeset data is already stored in the
407 During a transaction, the actual changeset data is already stored in the
408 main file, but not yet finalized in the on-disk index. Instead, a
408 main file, but not yet finalized in the on-disk index. Instead, a
409 "pending" index is written by the transaction logic. If this function
409 "pending" index is written by the transaction logic. If this function
410 is running, we are likely in a subprocess invoked in a hook. The
410 is running, we are likely in a subprocess invoked in a hook. The
411 subprocess is informed that it is within a transaction and needs to
411 subprocess is informed that it is within a transaction and needs to
412 access its content.
412 access its content.
413
413
414 This function will read all the index data out of the pending file and
414 This function will read all the index data out of the pending file and
415 overwrite the main index."""
415 overwrite the main index."""
416
416
417 if not self.opener.exists(file):
417 if not self.opener.exists(file):
418 return # no pending data for changelog
418 return # no pending data for changelog
419 r = revlog.revlog(self.opener, file)
419 r = revlog.revlog(self.opener, file)
420 self.index = r.index
420 self.index = r.index
421 self.nodemap = r.nodemap
421 self.nodemap = r.nodemap
422 self._nodecache = r._nodecache
422 self._nodecache = r._nodecache
423 self._chunkcache = r._chunkcache
423 self._chunkcache = r._chunkcache
424
424
425 def _writepending(self, tr):
425 def _writepending(self, tr):
426 "create a file containing the unfinalized state for pretxnchangegroup"
426 "create a file containing the unfinalized state for pretxnchangegroup"
427 if self._delaybuf:
427 if self._delaybuf:
428 # make a temporary copy of the index
428 # make a temporary copy of the index
429 fp1 = self._realopener(self.indexfile)
429 fp1 = self._realopener(self.indexfile)
430 pendingfilename = self.indexfile + ".a"
430 pendingfilename = self.indexfile + ".a"
431 # register as a temp file to ensure cleanup on failure
431 # register as a temp file to ensure cleanup on failure
432 tr.registertmp(pendingfilename)
432 tr.registertmp(pendingfilename)
433 # write existing data
433 # write existing data
434 fp2 = self._realopener(pendingfilename, "w")
434 fp2 = self._realopener(pendingfilename, "w")
435 fp2.write(fp1.read())
435 fp2.write(fp1.read())
436 # add pending data
436 # add pending data
437 fp2.write("".join(self._delaybuf))
437 fp2.write("".join(self._delaybuf))
438 fp2.close()
438 fp2.close()
439 # switch modes so finalize can simply rename
439 # switch modes so finalize can simply rename
440 self._delaybuf = None
440 self._delaybuf = None
441 self._divert = True
441 self._divert = True
442 self.opener = _divertopener(self._realopener, self.indexfile)
442 self.opener = _divertopener(self._realopener, self.indexfile)
443
443
444 if self._divert:
444 if self._divert:
445 return True
445 return True
446
446
447 return False
447 return False
448
448
449 def checkinlinesize(self, tr, fp=None):
449 def checkinlinesize(self, tr, fp=None):
450 if not self._delayed:
450 if not self._delayed:
451 revlog.revlog.checkinlinesize(self, tr, fp)
451 revlog.revlog.checkinlinesize(self, tr, fp)
452
452
453 def read(self, node):
453 def read(self, node):
454 """Obtain data from a parsed changelog revision.
454 """Obtain data from a parsed changelog revision.
455
455
456 Returns a 6-tuple of:
456 Returns a 6-tuple of:
457
457
458 - manifest node in binary
458 - manifest node in binary
459 - author/user as a localstr
459 - author/user as a localstr
460 - date as a 2-tuple of (time, timezone)
460 - date as a 2-tuple of (time, timezone)
461 - list of files
461 - list of files
462 - commit message as a localstr
462 - commit message as a localstr
463 - dict of extra metadata
463 - dict of extra metadata
464
464
465 Unless you need to access all fields, consider calling
465 Unless you need to access all fields, consider calling
466 ``changelogrevision`` instead, as it is faster for partial object
466 ``changelogrevision`` instead, as it is faster for partial object
467 access.
467 access.
468 """
468 """
469 c = changelogrevision(self.revision(node))
469 c = changelogrevision(self.revision(node))
470 return (
470 return (
471 c.manifest,
471 c.manifest,
472 c.user,
472 c.user,
473 c.date,
473 c.date,
474 c.files,
474 c.files,
475 c.description,
475 c.description,
476 c.extra
476 c.extra
477 )
477 )
478
478
479 def changelogrevision(self, nodeorrev):
479 def changelogrevision(self, nodeorrev):
480 """Obtain a ``changelogrevision`` for a node or revision."""
480 """Obtain a ``changelogrevision`` for a node or revision."""
481 return changelogrevision(self.revision(nodeorrev))
481 return changelogrevision(self.revision(nodeorrev))
482
482
483 def readfiles(self, node):
483 def readfiles(self, node):
484 """
484 """
485 short version of read that only returns the files modified by the cset
485 short version of read that only returns the files modified by the cset
486 """
486 """
487 text = self.revision(node)
487 text = self.revision(node)
488 if not text:
488 if not text:
489 return []
489 return []
490 last = text.index("\n\n")
490 last = text.index("\n\n")
491 l = text[:last].split('\n')
491 l = text[:last].split('\n')
492 return l[3:]
492 return l[3:]
493
493
494 def add(self, manifest, files, desc, transaction, p1, p2,
494 def add(self, manifest, files, desc, transaction, p1, p2,
495 user, date=None, extra=None):
495 user, date=None, extra=None):
496 # Convert to UTF-8 encoded bytestrings as the very first
496 # Convert to UTF-8 encoded bytestrings as the very first
497 # thing: calling any method on a localstr object will turn it
497 # thing: calling any method on a localstr object will turn it
498 # into a str object and the cached UTF-8 string is thus lost.
498 # into a str object and the cached UTF-8 string is thus lost.
499 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
499 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
500
500
501 user = user.strip()
501 user = user.strip()
502 # An empty username or a username with a "\n" will make the
502 # An empty username or a username with a "\n" will make the
503 # revision text contain two "\n\n" sequences -> corrupt
503 # revision text contain two "\n\n" sequences -> corrupt
504 # repository since read cannot unpack the revision.
504 # repository since read cannot unpack the revision.
505 if not user:
505 if not user:
506 raise error.RevlogError(_("empty username"))
506 raise error.RevlogError(_("empty username"))
507 if "\n" in user:
507 if "\n" in user:
508 raise error.RevlogError(_("username %s contains a newline")
508 raise error.RevlogError(_("username %s contains a newline")
509 % repr(user))
509 % repr(user))
510
510
511 desc = stripdesc(desc)
511 desc = stripdesc(desc)
512
512
513 if date:
513 if date:
514 parseddate = "%d %d" % util.parsedate(date)
514 parseddate = "%d %d" % util.parsedate(date)
515 else:
515 else:
516 parseddate = "%d %d" % util.makedate()
516 parseddate = "%d %d" % util.makedate()
517 if extra:
517 if extra:
518 branch = extra.get("branch")
518 branch = extra.get("branch")
519 if branch in ("default", ""):
519 if branch in ("default", ""):
520 del extra["branch"]
520 del extra["branch"]
521 elif branch in (".", "null", "tip"):
521 elif branch in (".", "null", "tip"):
522 raise error.RevlogError(_('the name \'%s\' is reserved')
522 raise error.RevlogError(_('the name \'%s\' is reserved')
523 % branch)
523 % branch)
524 if extra:
524 if extra:
525 extra = encodeextra(extra)
525 extra = encodeextra(extra)
526 parseddate = "%s %s" % (parseddate, extra)
526 parseddate = "%s %s" % (parseddate, extra)
527 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
527 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
528 text = "\n".join(l)
528 text = "\n".join(l)
529 return self.addrevision(text, transaction, len(self), p1, p2)
529 return self.addrevision(text, transaction, len(self), p1, p2)
530
530
531 def branchinfo(self, rev):
531 def branchinfo(self, rev):
532 """return the branch name and open/close state of a revision
532 """return the branch name and open/close state of a revision
533
533
534 This function exists because creating a changectx object
534 This function exists because creating a changectx object
535 just to access this is costly."""
535 just to access this is costly."""
536 extra = self.read(rev)[5]
536 extra = self.read(rev)[5]
537 return encoding.tolocal(extra.get("branch")), 'close' in extra
537 return encoding.tolocal(extra.get("branch")), 'close' in extra
@@ -1,3486 +1,3486 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import itertools
11 import itertools
12 import os
12 import os
13 import re
13 import re
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 changelog,
27 changelog,
28 copies,
28 copies,
29 crecord as crecordmod,
29 crecord as crecordmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 formatter,
32 formatter,
33 graphmod,
33 graphmod,
34 lock as lockmod,
34 lock as lockmod,
35 match as matchmod,
35 match as matchmod,
36 obsolete,
36 obsolete,
37 patch,
37 patch,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 pycompat,
40 pycompat,
41 repair,
41 repair,
42 revlog,
42 revlog,
43 revset,
43 revset,
44 scmutil,
44 scmutil,
45 smartset,
45 smartset,
46 templatekw,
46 templatekw,
47 templater,
47 templater,
48 util,
48 util,
49 vfs as vfsmod,
49 vfs as vfsmod,
50 )
50 )
51 stringio = util.stringio
51 stringio = util.stringio
52
52
53 # special string such that everything below this line will be ingored in the
53 # special string such that everything below this line will be ingored in the
54 # editor text
54 # editor text
55 _linebelow = "^HG: ------------------------ >8 ------------------------$"
55 _linebelow = "^HG: ------------------------ >8 ------------------------$"
56
56
57 def ishunk(x):
57 def ishunk(x):
58 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
58 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
59 return isinstance(x, hunkclasses)
59 return isinstance(x, hunkclasses)
60
60
61 def newandmodified(chunks, originalchunks):
61 def newandmodified(chunks, originalchunks):
62 newlyaddedandmodifiedfiles = set()
62 newlyaddedandmodifiedfiles = set()
63 for chunk in chunks:
63 for chunk in chunks:
64 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
64 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
65 originalchunks:
65 originalchunks:
66 newlyaddedandmodifiedfiles.add(chunk.header.filename())
66 newlyaddedandmodifiedfiles.add(chunk.header.filename())
67 return newlyaddedandmodifiedfiles
67 return newlyaddedandmodifiedfiles
68
68
69 def parsealiases(cmd):
69 def parsealiases(cmd):
70 return cmd.lstrip("^").split("|")
70 return cmd.lstrip("^").split("|")
71
71
72 def setupwrapcolorwrite(ui):
72 def setupwrapcolorwrite(ui):
73 # wrap ui.write so diff output can be labeled/colorized
73 # wrap ui.write so diff output can be labeled/colorized
74 def wrapwrite(orig, *args, **kw):
74 def wrapwrite(orig, *args, **kw):
75 label = kw.pop('label', '')
75 label = kw.pop('label', '')
76 for chunk, l in patch.difflabel(lambda: args):
76 for chunk, l in patch.difflabel(lambda: args):
77 orig(chunk, label=label + l)
77 orig(chunk, label=label + l)
78
78
79 oldwrite = ui.write
79 oldwrite = ui.write
80 def wrap(*args, **kwargs):
80 def wrap(*args, **kwargs):
81 return wrapwrite(oldwrite, *args, **kwargs)
81 return wrapwrite(oldwrite, *args, **kwargs)
82 setattr(ui, 'write', wrap)
82 setattr(ui, 'write', wrap)
83 return oldwrite
83 return oldwrite
84
84
85 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
85 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
86 if usecurses:
86 if usecurses:
87 if testfile:
87 if testfile:
88 recordfn = crecordmod.testdecorator(testfile,
88 recordfn = crecordmod.testdecorator(testfile,
89 crecordmod.testchunkselector)
89 crecordmod.testchunkselector)
90 else:
90 else:
91 recordfn = crecordmod.chunkselector
91 recordfn = crecordmod.chunkselector
92
92
93 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
93 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
94
94
95 else:
95 else:
96 return patch.filterpatch(ui, originalhunks, operation)
96 return patch.filterpatch(ui, originalhunks, operation)
97
97
98 def recordfilter(ui, originalhunks, operation=None):
98 def recordfilter(ui, originalhunks, operation=None):
99 """ Prompts the user to filter the originalhunks and return a list of
99 """ Prompts the user to filter the originalhunks and return a list of
100 selected hunks.
100 selected hunks.
101 *operation* is used for to build ui messages to indicate the user what
101 *operation* is used for to build ui messages to indicate the user what
102 kind of filtering they are doing: reverting, committing, shelving, etc.
102 kind of filtering they are doing: reverting, committing, shelving, etc.
103 (see patch.filterpatch).
103 (see patch.filterpatch).
104 """
104 """
105 usecurses = crecordmod.checkcurses(ui)
105 usecurses = crecordmod.checkcurses(ui)
106 testfile = ui.config('experimental', 'crecordtest', None)
106 testfile = ui.config('experimental', 'crecordtest', None)
107 oldwrite = setupwrapcolorwrite(ui)
107 oldwrite = setupwrapcolorwrite(ui)
108 try:
108 try:
109 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
109 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
110 testfile, operation)
110 testfile, operation)
111 finally:
111 finally:
112 ui.write = oldwrite
112 ui.write = oldwrite
113 return newchunks, newopts
113 return newchunks, newopts
114
114
115 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
115 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
116 filterfn, *pats, **opts):
116 filterfn, *pats, **opts):
117 from . import merge as mergemod
117 from . import merge as mergemod
118 opts = pycompat.byteskwargs(opts)
118 opts = pycompat.byteskwargs(opts)
119 if not ui.interactive():
119 if not ui.interactive():
120 if cmdsuggest:
120 if cmdsuggest:
121 msg = _('running non-interactively, use %s instead') % cmdsuggest
121 msg = _('running non-interactively, use %s instead') % cmdsuggest
122 else:
122 else:
123 msg = _('running non-interactively')
123 msg = _('running non-interactively')
124 raise error.Abort(msg)
124 raise error.Abort(msg)
125
125
126 # make sure username is set before going interactive
126 # make sure username is set before going interactive
127 if not opts.get('user'):
127 if not opts.get('user'):
128 ui.username() # raise exception, username not provided
128 ui.username() # raise exception, username not provided
129
129
130 def recordfunc(ui, repo, message, match, opts):
130 def recordfunc(ui, repo, message, match, opts):
131 """This is generic record driver.
131 """This is generic record driver.
132
132
133 Its job is to interactively filter local changes, and
133 Its job is to interactively filter local changes, and
134 accordingly prepare working directory into a state in which the
134 accordingly prepare working directory into a state in which the
135 job can be delegated to a non-interactive commit command such as
135 job can be delegated to a non-interactive commit command such as
136 'commit' or 'qrefresh'.
136 'commit' or 'qrefresh'.
137
137
138 After the actual job is done by non-interactive command, the
138 After the actual job is done by non-interactive command, the
139 working directory is restored to its original state.
139 working directory is restored to its original state.
140
140
141 In the end we'll record interesting changes, and everything else
141 In the end we'll record interesting changes, and everything else
142 will be left in place, so the user can continue working.
142 will be left in place, so the user can continue working.
143 """
143 """
144
144
145 checkunfinished(repo, commit=True)
145 checkunfinished(repo, commit=True)
146 wctx = repo[None]
146 wctx = repo[None]
147 merge = len(wctx.parents()) > 1
147 merge = len(wctx.parents()) > 1
148 if merge:
148 if merge:
149 raise error.Abort(_('cannot partially commit a merge '
149 raise error.Abort(_('cannot partially commit a merge '
150 '(use "hg commit" instead)'))
150 '(use "hg commit" instead)'))
151
151
152 def fail(f, msg):
152 def fail(f, msg):
153 raise error.Abort('%s: %s' % (f, msg))
153 raise error.Abort('%s: %s' % (f, msg))
154
154
155 force = opts.get('force')
155 force = opts.get('force')
156 if not force:
156 if not force:
157 vdirs = []
157 vdirs = []
158 match.explicitdir = vdirs.append
158 match.explicitdir = vdirs.append
159 match.bad = fail
159 match.bad = fail
160
160
161 status = repo.status(match=match)
161 status = repo.status(match=match)
162 if not force:
162 if not force:
163 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
163 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
164 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
164 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
165 diffopts.nodates = True
165 diffopts.nodates = True
166 diffopts.git = True
166 diffopts.git = True
167 diffopts.showfunc = True
167 diffopts.showfunc = True
168 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
168 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
169 originalchunks = patch.parsepatch(originaldiff)
169 originalchunks = patch.parsepatch(originaldiff)
170
170
171 # 1. filter patch, since we are intending to apply subset of it
171 # 1. filter patch, since we are intending to apply subset of it
172 try:
172 try:
173 chunks, newopts = filterfn(ui, originalchunks)
173 chunks, newopts = filterfn(ui, originalchunks)
174 except patch.PatchError as err:
174 except patch.PatchError as err:
175 raise error.Abort(_('error parsing patch: %s') % err)
175 raise error.Abort(_('error parsing patch: %s') % err)
176 opts.update(newopts)
176 opts.update(newopts)
177
177
178 # We need to keep a backup of files that have been newly added and
178 # We need to keep a backup of files that have been newly added and
179 # modified during the recording process because there is a previous
179 # modified during the recording process because there is a previous
180 # version without the edit in the workdir
180 # version without the edit in the workdir
181 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
181 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
182 contenders = set()
182 contenders = set()
183 for h in chunks:
183 for h in chunks:
184 try:
184 try:
185 contenders.update(set(h.files()))
185 contenders.update(set(h.files()))
186 except AttributeError:
186 except AttributeError:
187 pass
187 pass
188
188
189 changed = status.modified + status.added + status.removed
189 changed = status.modified + status.added + status.removed
190 newfiles = [f for f in changed if f in contenders]
190 newfiles = [f for f in changed if f in contenders]
191 if not newfiles:
191 if not newfiles:
192 ui.status(_('no changes to record\n'))
192 ui.status(_('no changes to record\n'))
193 return 0
193 return 0
194
194
195 modified = set(status.modified)
195 modified = set(status.modified)
196
196
197 # 2. backup changed files, so we can restore them in the end
197 # 2. backup changed files, so we can restore them in the end
198
198
199 if backupall:
199 if backupall:
200 tobackup = changed
200 tobackup = changed
201 else:
201 else:
202 tobackup = [f for f in newfiles if f in modified or f in \
202 tobackup = [f for f in newfiles if f in modified or f in \
203 newlyaddedandmodifiedfiles]
203 newlyaddedandmodifiedfiles]
204 backups = {}
204 backups = {}
205 if tobackup:
205 if tobackup:
206 backupdir = repo.vfs.join('record-backups')
206 backupdir = repo.vfs.join('record-backups')
207 try:
207 try:
208 os.mkdir(backupdir)
208 os.mkdir(backupdir)
209 except OSError as err:
209 except OSError as err:
210 if err.errno != errno.EEXIST:
210 if err.errno != errno.EEXIST:
211 raise
211 raise
212 try:
212 try:
213 # backup continues
213 # backup continues
214 for f in tobackup:
214 for f in tobackup:
215 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
215 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
216 dir=backupdir)
216 dir=backupdir)
217 os.close(fd)
217 os.close(fd)
218 ui.debug('backup %r as %r\n' % (f, tmpname))
218 ui.debug('backup %r as %r\n' % (f, tmpname))
219 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
219 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
220 backups[f] = tmpname
220 backups[f] = tmpname
221
221
222 fp = stringio()
222 fp = stringio()
223 for c in chunks:
223 for c in chunks:
224 fname = c.filename()
224 fname = c.filename()
225 if fname in backups:
225 if fname in backups:
226 c.write(fp)
226 c.write(fp)
227 dopatch = fp.tell()
227 dopatch = fp.tell()
228 fp.seek(0)
228 fp.seek(0)
229
229
230 # 2.5 optionally review / modify patch in text editor
230 # 2.5 optionally review / modify patch in text editor
231 if opts.get('review', False):
231 if opts.get('review', False):
232 patchtext = (crecordmod.diffhelptext
232 patchtext = (crecordmod.diffhelptext
233 + crecordmod.patchhelptext
233 + crecordmod.patchhelptext
234 + fp.read())
234 + fp.read())
235 reviewedpatch = ui.edit(patchtext, "",
235 reviewedpatch = ui.edit(patchtext, "",
236 extra={"suffix": ".diff"},
236 extra={"suffix": ".diff"},
237 repopath=repo.path)
237 repopath=repo.path)
238 fp.truncate(0)
238 fp.truncate(0)
239 fp.write(reviewedpatch)
239 fp.write(reviewedpatch)
240 fp.seek(0)
240 fp.seek(0)
241
241
242 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
242 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
243 # 3a. apply filtered patch to clean repo (clean)
243 # 3a. apply filtered patch to clean repo (clean)
244 if backups:
244 if backups:
245 # Equivalent to hg.revert
245 # Equivalent to hg.revert
246 m = scmutil.matchfiles(repo, backups.keys())
246 m = scmutil.matchfiles(repo, backups.keys())
247 mergemod.update(repo, repo.dirstate.p1(),
247 mergemod.update(repo, repo.dirstate.p1(),
248 False, True, matcher=m)
248 False, True, matcher=m)
249
249
250 # 3b. (apply)
250 # 3b. (apply)
251 if dopatch:
251 if dopatch:
252 try:
252 try:
253 ui.debug('applying patch\n')
253 ui.debug('applying patch\n')
254 ui.debug(fp.getvalue())
254 ui.debug(fp.getvalue())
255 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
255 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
256 except patch.PatchError as err:
256 except patch.PatchError as err:
257 raise error.Abort(str(err))
257 raise error.Abort(str(err))
258 del fp
258 del fp
259
259
260 # 4. We prepared working directory according to filtered
260 # 4. We prepared working directory according to filtered
261 # patch. Now is the time to delegate the job to
261 # patch. Now is the time to delegate the job to
262 # commit/qrefresh or the like!
262 # commit/qrefresh or the like!
263
263
264 # Make all of the pathnames absolute.
264 # Make all of the pathnames absolute.
265 newfiles = [repo.wjoin(nf) for nf in newfiles]
265 newfiles = [repo.wjoin(nf) for nf in newfiles]
266 return commitfunc(ui, repo, *newfiles, **opts)
266 return commitfunc(ui, repo, *newfiles, **opts)
267 finally:
267 finally:
268 # 5. finally restore backed-up files
268 # 5. finally restore backed-up files
269 try:
269 try:
270 dirstate = repo.dirstate
270 dirstate = repo.dirstate
271 for realname, tmpname in backups.iteritems():
271 for realname, tmpname in backups.iteritems():
272 ui.debug('restoring %r to %r\n' % (tmpname, realname))
272 ui.debug('restoring %r to %r\n' % (tmpname, realname))
273
273
274 if dirstate[realname] == 'n':
274 if dirstate[realname] == 'n':
275 # without normallookup, restoring timestamp
275 # without normallookup, restoring timestamp
276 # may cause partially committed files
276 # may cause partially committed files
277 # to be treated as unmodified
277 # to be treated as unmodified
278 dirstate.normallookup(realname)
278 dirstate.normallookup(realname)
279
279
280 # copystat=True here and above are a hack to trick any
280 # copystat=True here and above are a hack to trick any
281 # editors that have f open that we haven't modified them.
281 # editors that have f open that we haven't modified them.
282 #
282 #
283 # Also note that this racy as an editor could notice the
283 # Also note that this racy as an editor could notice the
284 # file's mtime before we've finished writing it.
284 # file's mtime before we've finished writing it.
285 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
285 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
286 os.unlink(tmpname)
286 os.unlink(tmpname)
287 if tobackup:
287 if tobackup:
288 os.rmdir(backupdir)
288 os.rmdir(backupdir)
289 except OSError:
289 except OSError:
290 pass
290 pass
291
291
292 def recordinwlock(ui, repo, message, match, opts):
292 def recordinwlock(ui, repo, message, match, opts):
293 with repo.wlock():
293 with repo.wlock():
294 return recordfunc(ui, repo, message, match, opts)
294 return recordfunc(ui, repo, message, match, opts)
295
295
296 return commit(ui, repo, recordinwlock, pats, opts)
296 return commit(ui, repo, recordinwlock, pats, opts)
297
297
298 def findpossible(cmd, table, strict=False):
298 def findpossible(cmd, table, strict=False):
299 """
299 """
300 Return cmd -> (aliases, command table entry)
300 Return cmd -> (aliases, command table entry)
301 for each matching command.
301 for each matching command.
302 Return debug commands (or their aliases) only if no normal command matches.
302 Return debug commands (or their aliases) only if no normal command matches.
303 """
303 """
304 choice = {}
304 choice = {}
305 debugchoice = {}
305 debugchoice = {}
306
306
307 if cmd in table:
307 if cmd in table:
308 # short-circuit exact matches, "log" alias beats "^log|history"
308 # short-circuit exact matches, "log" alias beats "^log|history"
309 keys = [cmd]
309 keys = [cmd]
310 else:
310 else:
311 keys = table.keys()
311 keys = table.keys()
312
312
313 allcmds = []
313 allcmds = []
314 for e in keys:
314 for e in keys:
315 aliases = parsealiases(e)
315 aliases = parsealiases(e)
316 allcmds.extend(aliases)
316 allcmds.extend(aliases)
317 found = None
317 found = None
318 if cmd in aliases:
318 if cmd in aliases:
319 found = cmd
319 found = cmd
320 elif not strict:
320 elif not strict:
321 for a in aliases:
321 for a in aliases:
322 if a.startswith(cmd):
322 if a.startswith(cmd):
323 found = a
323 found = a
324 break
324 break
325 if found is not None:
325 if found is not None:
326 if aliases[0].startswith("debug") or found.startswith("debug"):
326 if aliases[0].startswith("debug") or found.startswith("debug"):
327 debugchoice[found] = (aliases, table[e])
327 debugchoice[found] = (aliases, table[e])
328 else:
328 else:
329 choice[found] = (aliases, table[e])
329 choice[found] = (aliases, table[e])
330
330
331 if not choice and debugchoice:
331 if not choice and debugchoice:
332 choice = debugchoice
332 choice = debugchoice
333
333
334 return choice, allcmds
334 return choice, allcmds
335
335
336 def findcmd(cmd, table, strict=True):
336 def findcmd(cmd, table, strict=True):
337 """Return (aliases, command table entry) for command string."""
337 """Return (aliases, command table entry) for command string."""
338 choice, allcmds = findpossible(cmd, table, strict)
338 choice, allcmds = findpossible(cmd, table, strict)
339
339
340 if cmd in choice:
340 if cmd in choice:
341 return choice[cmd]
341 return choice[cmd]
342
342
343 if len(choice) > 1:
343 if len(choice) > 1:
344 clist = choice.keys()
344 clist = choice.keys()
345 clist.sort()
345 clist.sort()
346 raise error.AmbiguousCommand(cmd, clist)
346 raise error.AmbiguousCommand(cmd, clist)
347
347
348 if choice:
348 if choice:
349 return choice.values()[0]
349 return choice.values()[0]
350
350
351 raise error.UnknownCommand(cmd, allcmds)
351 raise error.UnknownCommand(cmd, allcmds)
352
352
353 def findrepo(p):
353 def findrepo(p):
354 while not os.path.isdir(os.path.join(p, ".hg")):
354 while not os.path.isdir(os.path.join(p, ".hg")):
355 oldp, p = p, os.path.dirname(p)
355 oldp, p = p, os.path.dirname(p)
356 if p == oldp:
356 if p == oldp:
357 return None
357 return None
358
358
359 return p
359 return p
360
360
361 def bailifchanged(repo, merge=True, hint=None):
361 def bailifchanged(repo, merge=True, hint=None):
362 """ enforce the precondition that working directory must be clean.
362 """ enforce the precondition that working directory must be clean.
363
363
364 'merge' can be set to false if a pending uncommitted merge should be
364 'merge' can be set to false if a pending uncommitted merge should be
365 ignored (such as when 'update --check' runs).
365 ignored (such as when 'update --check' runs).
366
366
367 'hint' is the usual hint given to Abort exception.
367 'hint' is the usual hint given to Abort exception.
368 """
368 """
369
369
370 if merge and repo.dirstate.p2() != nullid:
370 if merge and repo.dirstate.p2() != nullid:
371 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
371 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
372 modified, added, removed, deleted = repo.status()[:4]
372 modified, added, removed, deleted = repo.status()[:4]
373 if modified or added or removed or deleted:
373 if modified or added or removed or deleted:
374 raise error.Abort(_('uncommitted changes'), hint=hint)
374 raise error.Abort(_('uncommitted changes'), hint=hint)
375 ctx = repo[None]
375 ctx = repo[None]
376 for s in sorted(ctx.substate):
376 for s in sorted(ctx.substate):
377 ctx.sub(s).bailifchanged(hint=hint)
377 ctx.sub(s).bailifchanged(hint=hint)
378
378
379 def logmessage(ui, opts):
379 def logmessage(ui, opts):
380 """ get the log message according to -m and -l option """
380 """ get the log message according to -m and -l option """
381 message = opts.get('message')
381 message = opts.get('message')
382 logfile = opts.get('logfile')
382 logfile = opts.get('logfile')
383
383
384 if message and logfile:
384 if message and logfile:
385 raise error.Abort(_('options --message and --logfile are mutually '
385 raise error.Abort(_('options --message and --logfile are mutually '
386 'exclusive'))
386 'exclusive'))
387 if not message and logfile:
387 if not message and logfile:
388 try:
388 try:
389 if logfile == '-':
389 if logfile == '-':
390 message = ui.fin.read()
390 message = ui.fin.read()
391 else:
391 else:
392 message = '\n'.join(util.readfile(logfile).splitlines())
392 message = '\n'.join(util.readfile(logfile).splitlines())
393 except IOError as inst:
393 except IOError as inst:
394 raise error.Abort(_("can't read commit message '%s': %s") %
394 raise error.Abort(_("can't read commit message '%s': %s") %
395 (logfile, inst.strerror))
395 (logfile, inst.strerror))
396 return message
396 return message
397
397
398 def mergeeditform(ctxorbool, baseformname):
398 def mergeeditform(ctxorbool, baseformname):
399 """return appropriate editform name (referencing a committemplate)
399 """return appropriate editform name (referencing a committemplate)
400
400
401 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
401 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
402 merging is committed.
402 merging is committed.
403
403
404 This returns baseformname with '.merge' appended if it is a merge,
404 This returns baseformname with '.merge' appended if it is a merge,
405 otherwise '.normal' is appended.
405 otherwise '.normal' is appended.
406 """
406 """
407 if isinstance(ctxorbool, bool):
407 if isinstance(ctxorbool, bool):
408 if ctxorbool:
408 if ctxorbool:
409 return baseformname + ".merge"
409 return baseformname + ".merge"
410 elif 1 < len(ctxorbool.parents()):
410 elif 1 < len(ctxorbool.parents()):
411 return baseformname + ".merge"
411 return baseformname + ".merge"
412
412
413 return baseformname + ".normal"
413 return baseformname + ".normal"
414
414
415 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
415 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
416 editform='', **opts):
416 editform='', **opts):
417 """get appropriate commit message editor according to '--edit' option
417 """get appropriate commit message editor according to '--edit' option
418
418
419 'finishdesc' is a function to be called with edited commit message
419 'finishdesc' is a function to be called with edited commit message
420 (= 'description' of the new changeset) just after editing, but
420 (= 'description' of the new changeset) just after editing, but
421 before checking empty-ness. It should return actual text to be
421 before checking empty-ness. It should return actual text to be
422 stored into history. This allows to change description before
422 stored into history. This allows to change description before
423 storing.
423 storing.
424
424
425 'extramsg' is a extra message to be shown in the editor instead of
425 'extramsg' is a extra message to be shown in the editor instead of
426 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
426 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
427 is automatically added.
427 is automatically added.
428
428
429 'editform' is a dot-separated list of names, to distinguish
429 'editform' is a dot-separated list of names, to distinguish
430 the purpose of commit text editing.
430 the purpose of commit text editing.
431
431
432 'getcommiteditor' returns 'commitforceeditor' regardless of
432 'getcommiteditor' returns 'commitforceeditor' regardless of
433 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
433 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
434 they are specific for usage in MQ.
434 they are specific for usage in MQ.
435 """
435 """
436 if edit or finishdesc or extramsg:
436 if edit or finishdesc or extramsg:
437 return lambda r, c, s: commitforceeditor(r, c, s,
437 return lambda r, c, s: commitforceeditor(r, c, s,
438 finishdesc=finishdesc,
438 finishdesc=finishdesc,
439 extramsg=extramsg,
439 extramsg=extramsg,
440 editform=editform)
440 editform=editform)
441 elif editform:
441 elif editform:
442 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
442 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
443 else:
443 else:
444 return commiteditor
444 return commiteditor
445
445
446 def loglimit(opts):
446 def loglimit(opts):
447 """get the log limit according to option -l/--limit"""
447 """get the log limit according to option -l/--limit"""
448 limit = opts.get('limit')
448 limit = opts.get('limit')
449 if limit:
449 if limit:
450 try:
450 try:
451 limit = int(limit)
451 limit = int(limit)
452 except ValueError:
452 except ValueError:
453 raise error.Abort(_('limit must be a positive integer'))
453 raise error.Abort(_('limit must be a positive integer'))
454 if limit <= 0:
454 if limit <= 0:
455 raise error.Abort(_('limit must be positive'))
455 raise error.Abort(_('limit must be positive'))
456 else:
456 else:
457 limit = None
457 limit = None
458 return limit
458 return limit
459
459
460 def makefilename(repo, pat, node, desc=None,
460 def makefilename(repo, pat, node, desc=None,
461 total=None, seqno=None, revwidth=None, pathname=None):
461 total=None, seqno=None, revwidth=None, pathname=None):
462 node_expander = {
462 node_expander = {
463 'H': lambda: hex(node),
463 'H': lambda: hex(node),
464 'R': lambda: str(repo.changelog.rev(node)),
464 'R': lambda: str(repo.changelog.rev(node)),
465 'h': lambda: short(node),
465 'h': lambda: short(node),
466 'm': lambda: re.sub('[^\w]', '_', str(desc))
466 'm': lambda: re.sub('[^\w]', '_', str(desc))
467 }
467 }
468 expander = {
468 expander = {
469 '%': lambda: '%',
469 '%': lambda: '%',
470 'b': lambda: os.path.basename(repo.root),
470 'b': lambda: os.path.basename(repo.root),
471 }
471 }
472
472
473 try:
473 try:
474 if node:
474 if node:
475 expander.update(node_expander)
475 expander.update(node_expander)
476 if node:
476 if node:
477 expander['r'] = (lambda:
477 expander['r'] = (lambda:
478 str(repo.changelog.rev(node)).zfill(revwidth or 0))
478 str(repo.changelog.rev(node)).zfill(revwidth or 0))
479 if total is not None:
479 if total is not None:
480 expander['N'] = lambda: str(total)
480 expander['N'] = lambda: str(total)
481 if seqno is not None:
481 if seqno is not None:
482 expander['n'] = lambda: str(seqno)
482 expander['n'] = lambda: str(seqno)
483 if total is not None and seqno is not None:
483 if total is not None and seqno is not None:
484 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
484 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
485 if pathname is not None:
485 if pathname is not None:
486 expander['s'] = lambda: os.path.basename(pathname)
486 expander['s'] = lambda: os.path.basename(pathname)
487 expander['d'] = lambda: os.path.dirname(pathname) or '.'
487 expander['d'] = lambda: os.path.dirname(pathname) or '.'
488 expander['p'] = lambda: pathname
488 expander['p'] = lambda: pathname
489
489
490 newname = []
490 newname = []
491 patlen = len(pat)
491 patlen = len(pat)
492 i = 0
492 i = 0
493 while i < patlen:
493 while i < patlen:
494 c = pat[i]
494 c = pat[i:i + 1]
495 if c == '%':
495 if c == '%':
496 i += 1
496 i += 1
497 c = pat[i]
497 c = pat[i:i + 1]
498 c = expander[c]()
498 c = expander[c]()
499 newname.append(c)
499 newname.append(c)
500 i += 1
500 i += 1
501 return ''.join(newname)
501 return ''.join(newname)
502 except KeyError as inst:
502 except KeyError as inst:
503 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
503 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
504 inst.args[0])
504 inst.args[0])
505
505
506 class _unclosablefile(object):
506 class _unclosablefile(object):
507 def __init__(self, fp):
507 def __init__(self, fp):
508 self._fp = fp
508 self._fp = fp
509
509
510 def close(self):
510 def close(self):
511 pass
511 pass
512
512
513 def __iter__(self):
513 def __iter__(self):
514 return iter(self._fp)
514 return iter(self._fp)
515
515
516 def __getattr__(self, attr):
516 def __getattr__(self, attr):
517 return getattr(self._fp, attr)
517 return getattr(self._fp, attr)
518
518
519 def __enter__(self):
519 def __enter__(self):
520 return self
520 return self
521
521
522 def __exit__(self, exc_type, exc_value, exc_tb):
522 def __exit__(self, exc_type, exc_value, exc_tb):
523 pass
523 pass
524
524
525 def makefileobj(repo, pat, node=None, desc=None, total=None,
525 def makefileobj(repo, pat, node=None, desc=None, total=None,
526 seqno=None, revwidth=None, mode='wb', modemap=None,
526 seqno=None, revwidth=None, mode='wb', modemap=None,
527 pathname=None):
527 pathname=None):
528
528
529 writable = mode not in ('r', 'rb')
529 writable = mode not in ('r', 'rb')
530
530
531 if not pat or pat == '-':
531 if not pat or pat == '-':
532 if writable:
532 if writable:
533 fp = repo.ui.fout
533 fp = repo.ui.fout
534 else:
534 else:
535 fp = repo.ui.fin
535 fp = repo.ui.fin
536 return _unclosablefile(fp)
536 return _unclosablefile(fp)
537 if util.safehasattr(pat, 'write') and writable:
537 if util.safehasattr(pat, 'write') and writable:
538 return pat
538 return pat
539 if util.safehasattr(pat, 'read') and 'r' in mode:
539 if util.safehasattr(pat, 'read') and 'r' in mode:
540 return pat
540 return pat
541 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
541 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
542 if modemap is not None:
542 if modemap is not None:
543 mode = modemap.get(fn, mode)
543 mode = modemap.get(fn, mode)
544 if mode == 'wb':
544 if mode == 'wb':
545 modemap[fn] = 'ab'
545 modemap[fn] = 'ab'
546 return open(fn, mode)
546 return open(fn, mode)
547
547
548 def openrevlog(repo, cmd, file_, opts):
548 def openrevlog(repo, cmd, file_, opts):
549 """opens the changelog, manifest, a filelog or a given revlog"""
549 """opens the changelog, manifest, a filelog or a given revlog"""
550 cl = opts['changelog']
550 cl = opts['changelog']
551 mf = opts['manifest']
551 mf = opts['manifest']
552 dir = opts['dir']
552 dir = opts['dir']
553 msg = None
553 msg = None
554 if cl and mf:
554 if cl and mf:
555 msg = _('cannot specify --changelog and --manifest at the same time')
555 msg = _('cannot specify --changelog and --manifest at the same time')
556 elif cl and dir:
556 elif cl and dir:
557 msg = _('cannot specify --changelog and --dir at the same time')
557 msg = _('cannot specify --changelog and --dir at the same time')
558 elif cl or mf or dir:
558 elif cl or mf or dir:
559 if file_:
559 if file_:
560 msg = _('cannot specify filename with --changelog or --manifest')
560 msg = _('cannot specify filename with --changelog or --manifest')
561 elif not repo:
561 elif not repo:
562 msg = _('cannot specify --changelog or --manifest or --dir '
562 msg = _('cannot specify --changelog or --manifest or --dir '
563 'without a repository')
563 'without a repository')
564 if msg:
564 if msg:
565 raise error.Abort(msg)
565 raise error.Abort(msg)
566
566
567 r = None
567 r = None
568 if repo:
568 if repo:
569 if cl:
569 if cl:
570 r = repo.unfiltered().changelog
570 r = repo.unfiltered().changelog
571 elif dir:
571 elif dir:
572 if 'treemanifest' not in repo.requirements:
572 if 'treemanifest' not in repo.requirements:
573 raise error.Abort(_("--dir can only be used on repos with "
573 raise error.Abort(_("--dir can only be used on repos with "
574 "treemanifest enabled"))
574 "treemanifest enabled"))
575 dirlog = repo.manifestlog._revlog.dirlog(dir)
575 dirlog = repo.manifestlog._revlog.dirlog(dir)
576 if len(dirlog):
576 if len(dirlog):
577 r = dirlog
577 r = dirlog
578 elif mf:
578 elif mf:
579 r = repo.manifestlog._revlog
579 r = repo.manifestlog._revlog
580 elif file_:
580 elif file_:
581 filelog = repo.file(file_)
581 filelog = repo.file(file_)
582 if len(filelog):
582 if len(filelog):
583 r = filelog
583 r = filelog
584 if not r:
584 if not r:
585 if not file_:
585 if not file_:
586 raise error.CommandError(cmd, _('invalid arguments'))
586 raise error.CommandError(cmd, _('invalid arguments'))
587 if not os.path.isfile(file_):
587 if not os.path.isfile(file_):
588 raise error.Abort(_("revlog '%s' not found") % file_)
588 raise error.Abort(_("revlog '%s' not found") % file_)
589 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
589 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
590 file_[:-2] + ".i")
590 file_[:-2] + ".i")
591 return r
591 return r
592
592
593 def copy(ui, repo, pats, opts, rename=False):
593 def copy(ui, repo, pats, opts, rename=False):
594 # called with the repo lock held
594 # called with the repo lock held
595 #
595 #
596 # hgsep => pathname that uses "/" to separate directories
596 # hgsep => pathname that uses "/" to separate directories
597 # ossep => pathname that uses os.sep to separate directories
597 # ossep => pathname that uses os.sep to separate directories
598 cwd = repo.getcwd()
598 cwd = repo.getcwd()
599 targets = {}
599 targets = {}
600 after = opts.get("after")
600 after = opts.get("after")
601 dryrun = opts.get("dry_run")
601 dryrun = opts.get("dry_run")
602 wctx = repo[None]
602 wctx = repo[None]
603
603
604 def walkpat(pat):
604 def walkpat(pat):
605 srcs = []
605 srcs = []
606 if after:
606 if after:
607 badstates = '?'
607 badstates = '?'
608 else:
608 else:
609 badstates = '?r'
609 badstates = '?r'
610 m = scmutil.match(repo[None], [pat], opts, globbed=True)
610 m = scmutil.match(repo[None], [pat], opts, globbed=True)
611 for abs in repo.walk(m):
611 for abs in repo.walk(m):
612 state = repo.dirstate[abs]
612 state = repo.dirstate[abs]
613 rel = m.rel(abs)
613 rel = m.rel(abs)
614 exact = m.exact(abs)
614 exact = m.exact(abs)
615 if state in badstates:
615 if state in badstates:
616 if exact and state == '?':
616 if exact and state == '?':
617 ui.warn(_('%s: not copying - file is not managed\n') % rel)
617 ui.warn(_('%s: not copying - file is not managed\n') % rel)
618 if exact and state == 'r':
618 if exact and state == 'r':
619 ui.warn(_('%s: not copying - file has been marked for'
619 ui.warn(_('%s: not copying - file has been marked for'
620 ' remove\n') % rel)
620 ' remove\n') % rel)
621 continue
621 continue
622 # abs: hgsep
622 # abs: hgsep
623 # rel: ossep
623 # rel: ossep
624 srcs.append((abs, rel, exact))
624 srcs.append((abs, rel, exact))
625 return srcs
625 return srcs
626
626
627 # abssrc: hgsep
627 # abssrc: hgsep
628 # relsrc: ossep
628 # relsrc: ossep
629 # otarget: ossep
629 # otarget: ossep
630 def copyfile(abssrc, relsrc, otarget, exact):
630 def copyfile(abssrc, relsrc, otarget, exact):
631 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
631 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
632 if '/' in abstarget:
632 if '/' in abstarget:
633 # We cannot normalize abstarget itself, this would prevent
633 # We cannot normalize abstarget itself, this would prevent
634 # case only renames, like a => A.
634 # case only renames, like a => A.
635 abspath, absname = abstarget.rsplit('/', 1)
635 abspath, absname = abstarget.rsplit('/', 1)
636 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
636 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
637 reltarget = repo.pathto(abstarget, cwd)
637 reltarget = repo.pathto(abstarget, cwd)
638 target = repo.wjoin(abstarget)
638 target = repo.wjoin(abstarget)
639 src = repo.wjoin(abssrc)
639 src = repo.wjoin(abssrc)
640 state = repo.dirstate[abstarget]
640 state = repo.dirstate[abstarget]
641
641
642 scmutil.checkportable(ui, abstarget)
642 scmutil.checkportable(ui, abstarget)
643
643
644 # check for collisions
644 # check for collisions
645 prevsrc = targets.get(abstarget)
645 prevsrc = targets.get(abstarget)
646 if prevsrc is not None:
646 if prevsrc is not None:
647 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
647 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
648 (reltarget, repo.pathto(abssrc, cwd),
648 (reltarget, repo.pathto(abssrc, cwd),
649 repo.pathto(prevsrc, cwd)))
649 repo.pathto(prevsrc, cwd)))
650 return
650 return
651
651
652 # check for overwrites
652 # check for overwrites
653 exists = os.path.lexists(target)
653 exists = os.path.lexists(target)
654 samefile = False
654 samefile = False
655 if exists and abssrc != abstarget:
655 if exists and abssrc != abstarget:
656 if (repo.dirstate.normalize(abssrc) ==
656 if (repo.dirstate.normalize(abssrc) ==
657 repo.dirstate.normalize(abstarget)):
657 repo.dirstate.normalize(abstarget)):
658 if not rename:
658 if not rename:
659 ui.warn(_("%s: can't copy - same file\n") % reltarget)
659 ui.warn(_("%s: can't copy - same file\n") % reltarget)
660 return
660 return
661 exists = False
661 exists = False
662 samefile = True
662 samefile = True
663
663
664 if not after and exists or after and state in 'mn':
664 if not after and exists or after and state in 'mn':
665 if not opts['force']:
665 if not opts['force']:
666 if state in 'mn':
666 if state in 'mn':
667 msg = _('%s: not overwriting - file already committed\n')
667 msg = _('%s: not overwriting - file already committed\n')
668 if after:
668 if after:
669 flags = '--after --force'
669 flags = '--after --force'
670 else:
670 else:
671 flags = '--force'
671 flags = '--force'
672 if rename:
672 if rename:
673 hint = _('(hg rename %s to replace the file by '
673 hint = _('(hg rename %s to replace the file by '
674 'recording a rename)\n') % flags
674 'recording a rename)\n') % flags
675 else:
675 else:
676 hint = _('(hg copy %s to replace the file by '
676 hint = _('(hg copy %s to replace the file by '
677 'recording a copy)\n') % flags
677 'recording a copy)\n') % flags
678 else:
678 else:
679 msg = _('%s: not overwriting - file exists\n')
679 msg = _('%s: not overwriting - file exists\n')
680 if rename:
680 if rename:
681 hint = _('(hg rename --after to record the rename)\n')
681 hint = _('(hg rename --after to record the rename)\n')
682 else:
682 else:
683 hint = _('(hg copy --after to record the copy)\n')
683 hint = _('(hg copy --after to record the copy)\n')
684 ui.warn(msg % reltarget)
684 ui.warn(msg % reltarget)
685 ui.warn(hint)
685 ui.warn(hint)
686 return
686 return
687
687
688 if after:
688 if after:
689 if not exists:
689 if not exists:
690 if rename:
690 if rename:
691 ui.warn(_('%s: not recording move - %s does not exist\n') %
691 ui.warn(_('%s: not recording move - %s does not exist\n') %
692 (relsrc, reltarget))
692 (relsrc, reltarget))
693 else:
693 else:
694 ui.warn(_('%s: not recording copy - %s does not exist\n') %
694 ui.warn(_('%s: not recording copy - %s does not exist\n') %
695 (relsrc, reltarget))
695 (relsrc, reltarget))
696 return
696 return
697 elif not dryrun:
697 elif not dryrun:
698 try:
698 try:
699 if exists:
699 if exists:
700 os.unlink(target)
700 os.unlink(target)
701 targetdir = os.path.dirname(target) or '.'
701 targetdir = os.path.dirname(target) or '.'
702 if not os.path.isdir(targetdir):
702 if not os.path.isdir(targetdir):
703 os.makedirs(targetdir)
703 os.makedirs(targetdir)
704 if samefile:
704 if samefile:
705 tmp = target + "~hgrename"
705 tmp = target + "~hgrename"
706 os.rename(src, tmp)
706 os.rename(src, tmp)
707 os.rename(tmp, target)
707 os.rename(tmp, target)
708 else:
708 else:
709 util.copyfile(src, target)
709 util.copyfile(src, target)
710 srcexists = True
710 srcexists = True
711 except IOError as inst:
711 except IOError as inst:
712 if inst.errno == errno.ENOENT:
712 if inst.errno == errno.ENOENT:
713 ui.warn(_('%s: deleted in working directory\n') % relsrc)
713 ui.warn(_('%s: deleted in working directory\n') % relsrc)
714 srcexists = False
714 srcexists = False
715 else:
715 else:
716 ui.warn(_('%s: cannot copy - %s\n') %
716 ui.warn(_('%s: cannot copy - %s\n') %
717 (relsrc, inst.strerror))
717 (relsrc, inst.strerror))
718 return True # report a failure
718 return True # report a failure
719
719
720 if ui.verbose or not exact:
720 if ui.verbose or not exact:
721 if rename:
721 if rename:
722 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
722 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
723 else:
723 else:
724 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
724 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
725
725
726 targets[abstarget] = abssrc
726 targets[abstarget] = abssrc
727
727
728 # fix up dirstate
728 # fix up dirstate
729 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
729 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
730 dryrun=dryrun, cwd=cwd)
730 dryrun=dryrun, cwd=cwd)
731 if rename and not dryrun:
731 if rename and not dryrun:
732 if not after and srcexists and not samefile:
732 if not after and srcexists and not samefile:
733 repo.wvfs.unlinkpath(abssrc)
733 repo.wvfs.unlinkpath(abssrc)
734 wctx.forget([abssrc])
734 wctx.forget([abssrc])
735
735
736 # pat: ossep
736 # pat: ossep
737 # dest ossep
737 # dest ossep
738 # srcs: list of (hgsep, hgsep, ossep, bool)
738 # srcs: list of (hgsep, hgsep, ossep, bool)
739 # return: function that takes hgsep and returns ossep
739 # return: function that takes hgsep and returns ossep
740 def targetpathfn(pat, dest, srcs):
740 def targetpathfn(pat, dest, srcs):
741 if os.path.isdir(pat):
741 if os.path.isdir(pat):
742 abspfx = pathutil.canonpath(repo.root, cwd, pat)
742 abspfx = pathutil.canonpath(repo.root, cwd, pat)
743 abspfx = util.localpath(abspfx)
743 abspfx = util.localpath(abspfx)
744 if destdirexists:
744 if destdirexists:
745 striplen = len(os.path.split(abspfx)[0])
745 striplen = len(os.path.split(abspfx)[0])
746 else:
746 else:
747 striplen = len(abspfx)
747 striplen = len(abspfx)
748 if striplen:
748 if striplen:
749 striplen += len(pycompat.ossep)
749 striplen += len(pycompat.ossep)
750 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
750 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
751 elif destdirexists:
751 elif destdirexists:
752 res = lambda p: os.path.join(dest,
752 res = lambda p: os.path.join(dest,
753 os.path.basename(util.localpath(p)))
753 os.path.basename(util.localpath(p)))
754 else:
754 else:
755 res = lambda p: dest
755 res = lambda p: dest
756 return res
756 return res
757
757
758 # pat: ossep
758 # pat: ossep
759 # dest ossep
759 # dest ossep
760 # srcs: list of (hgsep, hgsep, ossep, bool)
760 # srcs: list of (hgsep, hgsep, ossep, bool)
761 # return: function that takes hgsep and returns ossep
761 # return: function that takes hgsep and returns ossep
762 def targetpathafterfn(pat, dest, srcs):
762 def targetpathafterfn(pat, dest, srcs):
763 if matchmod.patkind(pat):
763 if matchmod.patkind(pat):
764 # a mercurial pattern
764 # a mercurial pattern
765 res = lambda p: os.path.join(dest,
765 res = lambda p: os.path.join(dest,
766 os.path.basename(util.localpath(p)))
766 os.path.basename(util.localpath(p)))
767 else:
767 else:
768 abspfx = pathutil.canonpath(repo.root, cwd, pat)
768 abspfx = pathutil.canonpath(repo.root, cwd, pat)
769 if len(abspfx) < len(srcs[0][0]):
769 if len(abspfx) < len(srcs[0][0]):
770 # A directory. Either the target path contains the last
770 # A directory. Either the target path contains the last
771 # component of the source path or it does not.
771 # component of the source path or it does not.
772 def evalpath(striplen):
772 def evalpath(striplen):
773 score = 0
773 score = 0
774 for s in srcs:
774 for s in srcs:
775 t = os.path.join(dest, util.localpath(s[0])[striplen:])
775 t = os.path.join(dest, util.localpath(s[0])[striplen:])
776 if os.path.lexists(t):
776 if os.path.lexists(t):
777 score += 1
777 score += 1
778 return score
778 return score
779
779
780 abspfx = util.localpath(abspfx)
780 abspfx = util.localpath(abspfx)
781 striplen = len(abspfx)
781 striplen = len(abspfx)
782 if striplen:
782 if striplen:
783 striplen += len(pycompat.ossep)
783 striplen += len(pycompat.ossep)
784 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
784 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
785 score = evalpath(striplen)
785 score = evalpath(striplen)
786 striplen1 = len(os.path.split(abspfx)[0])
786 striplen1 = len(os.path.split(abspfx)[0])
787 if striplen1:
787 if striplen1:
788 striplen1 += len(pycompat.ossep)
788 striplen1 += len(pycompat.ossep)
789 if evalpath(striplen1) > score:
789 if evalpath(striplen1) > score:
790 striplen = striplen1
790 striplen = striplen1
791 res = lambda p: os.path.join(dest,
791 res = lambda p: os.path.join(dest,
792 util.localpath(p)[striplen:])
792 util.localpath(p)[striplen:])
793 else:
793 else:
794 # a file
794 # a file
795 if destdirexists:
795 if destdirexists:
796 res = lambda p: os.path.join(dest,
796 res = lambda p: os.path.join(dest,
797 os.path.basename(util.localpath(p)))
797 os.path.basename(util.localpath(p)))
798 else:
798 else:
799 res = lambda p: dest
799 res = lambda p: dest
800 return res
800 return res
801
801
802 pats = scmutil.expandpats(pats)
802 pats = scmutil.expandpats(pats)
803 if not pats:
803 if not pats:
804 raise error.Abort(_('no source or destination specified'))
804 raise error.Abort(_('no source or destination specified'))
805 if len(pats) == 1:
805 if len(pats) == 1:
806 raise error.Abort(_('no destination specified'))
806 raise error.Abort(_('no destination specified'))
807 dest = pats.pop()
807 dest = pats.pop()
808 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
808 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
809 if not destdirexists:
809 if not destdirexists:
810 if len(pats) > 1 or matchmod.patkind(pats[0]):
810 if len(pats) > 1 or matchmod.patkind(pats[0]):
811 raise error.Abort(_('with multiple sources, destination must be an '
811 raise error.Abort(_('with multiple sources, destination must be an '
812 'existing directory'))
812 'existing directory'))
813 if util.endswithsep(dest):
813 if util.endswithsep(dest):
814 raise error.Abort(_('destination %s is not a directory') % dest)
814 raise error.Abort(_('destination %s is not a directory') % dest)
815
815
816 tfn = targetpathfn
816 tfn = targetpathfn
817 if after:
817 if after:
818 tfn = targetpathafterfn
818 tfn = targetpathafterfn
819 copylist = []
819 copylist = []
820 for pat in pats:
820 for pat in pats:
821 srcs = walkpat(pat)
821 srcs = walkpat(pat)
822 if not srcs:
822 if not srcs:
823 continue
823 continue
824 copylist.append((tfn(pat, dest, srcs), srcs))
824 copylist.append((tfn(pat, dest, srcs), srcs))
825 if not copylist:
825 if not copylist:
826 raise error.Abort(_('no files to copy'))
826 raise error.Abort(_('no files to copy'))
827
827
828 errors = 0
828 errors = 0
829 for targetpath, srcs in copylist:
829 for targetpath, srcs in copylist:
830 for abssrc, relsrc, exact in srcs:
830 for abssrc, relsrc, exact in srcs:
831 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
831 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
832 errors += 1
832 errors += 1
833
833
834 if errors:
834 if errors:
835 ui.warn(_('(consider using --after)\n'))
835 ui.warn(_('(consider using --after)\n'))
836
836
837 return errors != 0
837 return errors != 0
838
838
839 ## facility to let extension process additional data into an import patch
839 ## facility to let extension process additional data into an import patch
840 # list of identifier to be executed in order
840 # list of identifier to be executed in order
841 extrapreimport = [] # run before commit
841 extrapreimport = [] # run before commit
842 extrapostimport = [] # run after commit
842 extrapostimport = [] # run after commit
843 # mapping from identifier to actual import function
843 # mapping from identifier to actual import function
844 #
844 #
845 # 'preimport' are run before the commit is made and are provided the following
845 # 'preimport' are run before the commit is made and are provided the following
846 # arguments:
846 # arguments:
847 # - repo: the localrepository instance,
847 # - repo: the localrepository instance,
848 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
848 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
849 # - extra: the future extra dictionary of the changeset, please mutate it,
849 # - extra: the future extra dictionary of the changeset, please mutate it,
850 # - opts: the import options.
850 # - opts: the import options.
851 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
851 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
852 # mutation of in memory commit and more. Feel free to rework the code to get
852 # mutation of in memory commit and more. Feel free to rework the code to get
853 # there.
853 # there.
854 extrapreimportmap = {}
854 extrapreimportmap = {}
855 # 'postimport' are run after the commit is made and are provided the following
855 # 'postimport' are run after the commit is made and are provided the following
856 # argument:
856 # argument:
857 # - ctx: the changectx created by import.
857 # - ctx: the changectx created by import.
858 extrapostimportmap = {}
858 extrapostimportmap = {}
859
859
860 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
860 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
861 """Utility function used by commands.import to import a single patch
861 """Utility function used by commands.import to import a single patch
862
862
863 This function is explicitly defined here to help the evolve extension to
863 This function is explicitly defined here to help the evolve extension to
864 wrap this part of the import logic.
864 wrap this part of the import logic.
865
865
866 The API is currently a bit ugly because it a simple code translation from
866 The API is currently a bit ugly because it a simple code translation from
867 the import command. Feel free to make it better.
867 the import command. Feel free to make it better.
868
868
869 :hunk: a patch (as a binary string)
869 :hunk: a patch (as a binary string)
870 :parents: nodes that will be parent of the created commit
870 :parents: nodes that will be parent of the created commit
871 :opts: the full dict of option passed to the import command
871 :opts: the full dict of option passed to the import command
872 :msgs: list to save commit message to.
872 :msgs: list to save commit message to.
873 (used in case we need to save it when failing)
873 (used in case we need to save it when failing)
874 :updatefunc: a function that update a repo to a given node
874 :updatefunc: a function that update a repo to a given node
875 updatefunc(<repo>, <node>)
875 updatefunc(<repo>, <node>)
876 """
876 """
877 # avoid cycle context -> subrepo -> cmdutil
877 # avoid cycle context -> subrepo -> cmdutil
878 from . import context
878 from . import context
879 extractdata = patch.extract(ui, hunk)
879 extractdata = patch.extract(ui, hunk)
880 tmpname = extractdata.get('filename')
880 tmpname = extractdata.get('filename')
881 message = extractdata.get('message')
881 message = extractdata.get('message')
882 user = opts.get('user') or extractdata.get('user')
882 user = opts.get('user') or extractdata.get('user')
883 date = opts.get('date') or extractdata.get('date')
883 date = opts.get('date') or extractdata.get('date')
884 branch = extractdata.get('branch')
884 branch = extractdata.get('branch')
885 nodeid = extractdata.get('nodeid')
885 nodeid = extractdata.get('nodeid')
886 p1 = extractdata.get('p1')
886 p1 = extractdata.get('p1')
887 p2 = extractdata.get('p2')
887 p2 = extractdata.get('p2')
888
888
889 nocommit = opts.get('no_commit')
889 nocommit = opts.get('no_commit')
890 importbranch = opts.get('import_branch')
890 importbranch = opts.get('import_branch')
891 update = not opts.get('bypass')
891 update = not opts.get('bypass')
892 strip = opts["strip"]
892 strip = opts["strip"]
893 prefix = opts["prefix"]
893 prefix = opts["prefix"]
894 sim = float(opts.get('similarity') or 0)
894 sim = float(opts.get('similarity') or 0)
895 if not tmpname:
895 if not tmpname:
896 return (None, None, False)
896 return (None, None, False)
897
897
898 rejects = False
898 rejects = False
899
899
900 try:
900 try:
901 cmdline_message = logmessage(ui, opts)
901 cmdline_message = logmessage(ui, opts)
902 if cmdline_message:
902 if cmdline_message:
903 # pickup the cmdline msg
903 # pickup the cmdline msg
904 message = cmdline_message
904 message = cmdline_message
905 elif message:
905 elif message:
906 # pickup the patch msg
906 # pickup the patch msg
907 message = message.strip()
907 message = message.strip()
908 else:
908 else:
909 # launch the editor
909 # launch the editor
910 message = None
910 message = None
911 ui.debug('message:\n%s\n' % message)
911 ui.debug('message:\n%s\n' % message)
912
912
913 if len(parents) == 1:
913 if len(parents) == 1:
914 parents.append(repo[nullid])
914 parents.append(repo[nullid])
915 if opts.get('exact'):
915 if opts.get('exact'):
916 if not nodeid or not p1:
916 if not nodeid or not p1:
917 raise error.Abort(_('not a Mercurial patch'))
917 raise error.Abort(_('not a Mercurial patch'))
918 p1 = repo[p1]
918 p1 = repo[p1]
919 p2 = repo[p2 or nullid]
919 p2 = repo[p2 or nullid]
920 elif p2:
920 elif p2:
921 try:
921 try:
922 p1 = repo[p1]
922 p1 = repo[p1]
923 p2 = repo[p2]
923 p2 = repo[p2]
924 # Without any options, consider p2 only if the
924 # Without any options, consider p2 only if the
925 # patch is being applied on top of the recorded
925 # patch is being applied on top of the recorded
926 # first parent.
926 # first parent.
927 if p1 != parents[0]:
927 if p1 != parents[0]:
928 p1 = parents[0]
928 p1 = parents[0]
929 p2 = repo[nullid]
929 p2 = repo[nullid]
930 except error.RepoError:
930 except error.RepoError:
931 p1, p2 = parents
931 p1, p2 = parents
932 if p2.node() == nullid:
932 if p2.node() == nullid:
933 ui.warn(_("warning: import the patch as a normal revision\n"
933 ui.warn(_("warning: import the patch as a normal revision\n"
934 "(use --exact to import the patch as a merge)\n"))
934 "(use --exact to import the patch as a merge)\n"))
935 else:
935 else:
936 p1, p2 = parents
936 p1, p2 = parents
937
937
938 n = None
938 n = None
939 if update:
939 if update:
940 if p1 != parents[0]:
940 if p1 != parents[0]:
941 updatefunc(repo, p1.node())
941 updatefunc(repo, p1.node())
942 if p2 != parents[1]:
942 if p2 != parents[1]:
943 repo.setparents(p1.node(), p2.node())
943 repo.setparents(p1.node(), p2.node())
944
944
945 if opts.get('exact') or importbranch:
945 if opts.get('exact') or importbranch:
946 repo.dirstate.setbranch(branch or 'default')
946 repo.dirstate.setbranch(branch or 'default')
947
947
948 partial = opts.get('partial', False)
948 partial = opts.get('partial', False)
949 files = set()
949 files = set()
950 try:
950 try:
951 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
951 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
952 files=files, eolmode=None, similarity=sim / 100.0)
952 files=files, eolmode=None, similarity=sim / 100.0)
953 except patch.PatchError as e:
953 except patch.PatchError as e:
954 if not partial:
954 if not partial:
955 raise error.Abort(str(e))
955 raise error.Abort(str(e))
956 if partial:
956 if partial:
957 rejects = True
957 rejects = True
958
958
959 files = list(files)
959 files = list(files)
960 if nocommit:
960 if nocommit:
961 if message:
961 if message:
962 msgs.append(message)
962 msgs.append(message)
963 else:
963 else:
964 if opts.get('exact') or p2:
964 if opts.get('exact') or p2:
965 # If you got here, you either use --force and know what
965 # If you got here, you either use --force and know what
966 # you are doing or used --exact or a merge patch while
966 # you are doing or used --exact or a merge patch while
967 # being updated to its first parent.
967 # being updated to its first parent.
968 m = None
968 m = None
969 else:
969 else:
970 m = scmutil.matchfiles(repo, files or [])
970 m = scmutil.matchfiles(repo, files or [])
971 editform = mergeeditform(repo[None], 'import.normal')
971 editform = mergeeditform(repo[None], 'import.normal')
972 if opts.get('exact'):
972 if opts.get('exact'):
973 editor = None
973 editor = None
974 else:
974 else:
975 editor = getcommiteditor(editform=editform, **opts)
975 editor = getcommiteditor(editform=editform, **opts)
976 extra = {}
976 extra = {}
977 for idfunc in extrapreimport:
977 for idfunc in extrapreimport:
978 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
978 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
979 overrides = {}
979 overrides = {}
980 if partial:
980 if partial:
981 overrides[('ui', 'allowemptycommit')] = True
981 overrides[('ui', 'allowemptycommit')] = True
982 with repo.ui.configoverride(overrides, 'import'):
982 with repo.ui.configoverride(overrides, 'import'):
983 n = repo.commit(message, user,
983 n = repo.commit(message, user,
984 date, match=m,
984 date, match=m,
985 editor=editor, extra=extra)
985 editor=editor, extra=extra)
986 for idfunc in extrapostimport:
986 for idfunc in extrapostimport:
987 extrapostimportmap[idfunc](repo[n])
987 extrapostimportmap[idfunc](repo[n])
988 else:
988 else:
989 if opts.get('exact') or importbranch:
989 if opts.get('exact') or importbranch:
990 branch = branch or 'default'
990 branch = branch or 'default'
991 else:
991 else:
992 branch = p1.branch()
992 branch = p1.branch()
993 store = patch.filestore()
993 store = patch.filestore()
994 try:
994 try:
995 files = set()
995 files = set()
996 try:
996 try:
997 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
997 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
998 files, eolmode=None)
998 files, eolmode=None)
999 except patch.PatchError as e:
999 except patch.PatchError as e:
1000 raise error.Abort(str(e))
1000 raise error.Abort(str(e))
1001 if opts.get('exact'):
1001 if opts.get('exact'):
1002 editor = None
1002 editor = None
1003 else:
1003 else:
1004 editor = getcommiteditor(editform='import.bypass')
1004 editor = getcommiteditor(editform='import.bypass')
1005 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1005 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1006 message,
1006 message,
1007 user,
1007 user,
1008 date,
1008 date,
1009 branch, files, store,
1009 branch, files, store,
1010 editor=editor)
1010 editor=editor)
1011 n = memctx.commit()
1011 n = memctx.commit()
1012 finally:
1012 finally:
1013 store.close()
1013 store.close()
1014 if opts.get('exact') and nocommit:
1014 if opts.get('exact') and nocommit:
1015 # --exact with --no-commit is still useful in that it does merge
1015 # --exact with --no-commit is still useful in that it does merge
1016 # and branch bits
1016 # and branch bits
1017 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1017 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1018 elif opts.get('exact') and hex(n) != nodeid:
1018 elif opts.get('exact') and hex(n) != nodeid:
1019 raise error.Abort(_('patch is damaged or loses information'))
1019 raise error.Abort(_('patch is damaged or loses information'))
1020 msg = _('applied to working directory')
1020 msg = _('applied to working directory')
1021 if n:
1021 if n:
1022 # i18n: refers to a short changeset id
1022 # i18n: refers to a short changeset id
1023 msg = _('created %s') % short(n)
1023 msg = _('created %s') % short(n)
1024 return (msg, n, rejects)
1024 return (msg, n, rejects)
1025 finally:
1025 finally:
1026 os.unlink(tmpname)
1026 os.unlink(tmpname)
1027
1027
1028 # facility to let extensions include additional data in an exported patch
1028 # facility to let extensions include additional data in an exported patch
1029 # list of identifiers to be executed in order
1029 # list of identifiers to be executed in order
1030 extraexport = []
1030 extraexport = []
1031 # mapping from identifier to actual export function
1031 # mapping from identifier to actual export function
1032 # function as to return a string to be added to the header or None
1032 # function as to return a string to be added to the header or None
1033 # it is given two arguments (sequencenumber, changectx)
1033 # it is given two arguments (sequencenumber, changectx)
1034 extraexportmap = {}
1034 extraexportmap = {}
1035
1035
1036 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1036 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1037 opts=None, match=None):
1037 opts=None, match=None):
1038 '''export changesets as hg patches.'''
1038 '''export changesets as hg patches.'''
1039
1039
1040 total = len(revs)
1040 total = len(revs)
1041 revwidth = max([len(str(rev)) for rev in revs])
1041 revwidth = max([len(str(rev)) for rev in revs])
1042 filemode = {}
1042 filemode = {}
1043
1043
1044 def single(rev, seqno, fp):
1044 def single(rev, seqno, fp):
1045 ctx = repo[rev]
1045 ctx = repo[rev]
1046 node = ctx.node()
1046 node = ctx.node()
1047 parents = [p.node() for p in ctx.parents() if p]
1047 parents = [p.node() for p in ctx.parents() if p]
1048 branch = ctx.branch()
1048 branch = ctx.branch()
1049 if switch_parent:
1049 if switch_parent:
1050 parents.reverse()
1050 parents.reverse()
1051
1051
1052 if parents:
1052 if parents:
1053 prev = parents[0]
1053 prev = parents[0]
1054 else:
1054 else:
1055 prev = nullid
1055 prev = nullid
1056
1056
1057 shouldclose = False
1057 shouldclose = False
1058 if not fp and len(template) > 0:
1058 if not fp and len(template) > 0:
1059 desc_lines = ctx.description().rstrip().split('\n')
1059 desc_lines = ctx.description().rstrip().split('\n')
1060 desc = desc_lines[0] #Commit always has a first line.
1060 desc = desc_lines[0] #Commit always has a first line.
1061 fp = makefileobj(repo, template, node, desc=desc, total=total,
1061 fp = makefileobj(repo, template, node, desc=desc, total=total,
1062 seqno=seqno, revwidth=revwidth, mode='wb',
1062 seqno=seqno, revwidth=revwidth, mode='wb',
1063 modemap=filemode)
1063 modemap=filemode)
1064 shouldclose = True
1064 shouldclose = True
1065 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1065 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1066 repo.ui.note("%s\n" % fp.name)
1066 repo.ui.note("%s\n" % fp.name)
1067
1067
1068 if not fp:
1068 if not fp:
1069 write = repo.ui.write
1069 write = repo.ui.write
1070 else:
1070 else:
1071 def write(s, **kw):
1071 def write(s, **kw):
1072 fp.write(s)
1072 fp.write(s)
1073
1073
1074 write("# HG changeset patch\n")
1074 write("# HG changeset patch\n")
1075 write("# User %s\n" % ctx.user())
1075 write("# User %s\n" % ctx.user())
1076 write("# Date %d %d\n" % ctx.date())
1076 write("# Date %d %d\n" % ctx.date())
1077 write("# %s\n" % util.datestr(ctx.date()))
1077 write("# %s\n" % util.datestr(ctx.date()))
1078 if branch and branch != 'default':
1078 if branch and branch != 'default':
1079 write("# Branch %s\n" % branch)
1079 write("# Branch %s\n" % branch)
1080 write("# Node ID %s\n" % hex(node))
1080 write("# Node ID %s\n" % hex(node))
1081 write("# Parent %s\n" % hex(prev))
1081 write("# Parent %s\n" % hex(prev))
1082 if len(parents) > 1:
1082 if len(parents) > 1:
1083 write("# Parent %s\n" % hex(parents[1]))
1083 write("# Parent %s\n" % hex(parents[1]))
1084
1084
1085 for headerid in extraexport:
1085 for headerid in extraexport:
1086 header = extraexportmap[headerid](seqno, ctx)
1086 header = extraexportmap[headerid](seqno, ctx)
1087 if header is not None:
1087 if header is not None:
1088 write('# %s\n' % header)
1088 write('# %s\n' % header)
1089 write(ctx.description().rstrip())
1089 write(ctx.description().rstrip())
1090 write("\n\n")
1090 write("\n\n")
1091
1091
1092 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1092 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1093 write(chunk, label=label)
1093 write(chunk, label=label)
1094
1094
1095 if shouldclose:
1095 if shouldclose:
1096 fp.close()
1096 fp.close()
1097
1097
1098 for seqno, rev in enumerate(revs):
1098 for seqno, rev in enumerate(revs):
1099 single(rev, seqno + 1, fp)
1099 single(rev, seqno + 1, fp)
1100
1100
1101 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1101 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1102 changes=None, stat=False, fp=None, prefix='',
1102 changes=None, stat=False, fp=None, prefix='',
1103 root='', listsubrepos=False):
1103 root='', listsubrepos=False):
1104 '''show diff or diffstat.'''
1104 '''show diff or diffstat.'''
1105 if fp is None:
1105 if fp is None:
1106 write = ui.write
1106 write = ui.write
1107 else:
1107 else:
1108 def write(s, **kw):
1108 def write(s, **kw):
1109 fp.write(s)
1109 fp.write(s)
1110
1110
1111 if root:
1111 if root:
1112 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1112 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1113 else:
1113 else:
1114 relroot = ''
1114 relroot = ''
1115 if relroot != '':
1115 if relroot != '':
1116 # XXX relative roots currently don't work if the root is within a
1116 # XXX relative roots currently don't work if the root is within a
1117 # subrepo
1117 # subrepo
1118 uirelroot = match.uipath(relroot)
1118 uirelroot = match.uipath(relroot)
1119 relroot += '/'
1119 relroot += '/'
1120 for matchroot in match.files():
1120 for matchroot in match.files():
1121 if not matchroot.startswith(relroot):
1121 if not matchroot.startswith(relroot):
1122 ui.warn(_('warning: %s not inside relative root %s\n') % (
1122 ui.warn(_('warning: %s not inside relative root %s\n') % (
1123 match.uipath(matchroot), uirelroot))
1123 match.uipath(matchroot), uirelroot))
1124
1124
1125 if stat:
1125 if stat:
1126 diffopts = diffopts.copy(context=0)
1126 diffopts = diffopts.copy(context=0)
1127 width = 80
1127 width = 80
1128 if not ui.plain():
1128 if not ui.plain():
1129 width = ui.termwidth()
1129 width = ui.termwidth()
1130 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1130 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1131 prefix=prefix, relroot=relroot)
1131 prefix=prefix, relroot=relroot)
1132 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1132 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1133 width=width):
1133 width=width):
1134 write(chunk, label=label)
1134 write(chunk, label=label)
1135 else:
1135 else:
1136 for chunk, label in patch.diffui(repo, node1, node2, match,
1136 for chunk, label in patch.diffui(repo, node1, node2, match,
1137 changes, diffopts, prefix=prefix,
1137 changes, diffopts, prefix=prefix,
1138 relroot=relroot):
1138 relroot=relroot):
1139 write(chunk, label=label)
1139 write(chunk, label=label)
1140
1140
1141 if listsubrepos:
1141 if listsubrepos:
1142 ctx1 = repo[node1]
1142 ctx1 = repo[node1]
1143 ctx2 = repo[node2]
1143 ctx2 = repo[node2]
1144 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1144 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1145 tempnode2 = node2
1145 tempnode2 = node2
1146 try:
1146 try:
1147 if node2 is not None:
1147 if node2 is not None:
1148 tempnode2 = ctx2.substate[subpath][1]
1148 tempnode2 = ctx2.substate[subpath][1]
1149 except KeyError:
1149 except KeyError:
1150 # A subrepo that existed in node1 was deleted between node1 and
1150 # A subrepo that existed in node1 was deleted between node1 and
1151 # node2 (inclusive). Thus, ctx2's substate won't contain that
1151 # node2 (inclusive). Thus, ctx2's substate won't contain that
1152 # subpath. The best we can do is to ignore it.
1152 # subpath. The best we can do is to ignore it.
1153 tempnode2 = None
1153 tempnode2 = None
1154 submatch = matchmod.subdirmatcher(subpath, match)
1154 submatch = matchmod.subdirmatcher(subpath, match)
1155 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1155 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1156 stat=stat, fp=fp, prefix=prefix)
1156 stat=stat, fp=fp, prefix=prefix)
1157
1157
1158 def _changesetlabels(ctx):
1158 def _changesetlabels(ctx):
1159 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1159 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1160 if ctx.obsolete():
1160 if ctx.obsolete():
1161 labels.append('changeset.obsolete')
1161 labels.append('changeset.obsolete')
1162 if ctx.troubled():
1162 if ctx.troubled():
1163 labels.append('changeset.troubled')
1163 labels.append('changeset.troubled')
1164 for trouble in ctx.troubles():
1164 for trouble in ctx.troubles():
1165 labels.append('trouble.%s' % trouble)
1165 labels.append('trouble.%s' % trouble)
1166 return ' '.join(labels)
1166 return ' '.join(labels)
1167
1167
1168 class changeset_printer(object):
1168 class changeset_printer(object):
1169 '''show changeset information when templating not requested.'''
1169 '''show changeset information when templating not requested.'''
1170
1170
1171 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1171 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1172 self.ui = ui
1172 self.ui = ui
1173 self.repo = repo
1173 self.repo = repo
1174 self.buffered = buffered
1174 self.buffered = buffered
1175 self.matchfn = matchfn
1175 self.matchfn = matchfn
1176 self.diffopts = diffopts
1176 self.diffopts = diffopts
1177 self.header = {}
1177 self.header = {}
1178 self.hunk = {}
1178 self.hunk = {}
1179 self.lastheader = None
1179 self.lastheader = None
1180 self.footer = None
1180 self.footer = None
1181
1181
1182 def flush(self, ctx):
1182 def flush(self, ctx):
1183 rev = ctx.rev()
1183 rev = ctx.rev()
1184 if rev in self.header:
1184 if rev in self.header:
1185 h = self.header[rev]
1185 h = self.header[rev]
1186 if h != self.lastheader:
1186 if h != self.lastheader:
1187 self.lastheader = h
1187 self.lastheader = h
1188 self.ui.write(h)
1188 self.ui.write(h)
1189 del self.header[rev]
1189 del self.header[rev]
1190 if rev in self.hunk:
1190 if rev in self.hunk:
1191 self.ui.write(self.hunk[rev])
1191 self.ui.write(self.hunk[rev])
1192 del self.hunk[rev]
1192 del self.hunk[rev]
1193 return 1
1193 return 1
1194 return 0
1194 return 0
1195
1195
1196 def close(self):
1196 def close(self):
1197 if self.footer:
1197 if self.footer:
1198 self.ui.write(self.footer)
1198 self.ui.write(self.footer)
1199
1199
1200 def show(self, ctx, copies=None, matchfn=None, **props):
1200 def show(self, ctx, copies=None, matchfn=None, **props):
1201 if self.buffered:
1201 if self.buffered:
1202 self.ui.pushbuffer(labeled=True)
1202 self.ui.pushbuffer(labeled=True)
1203 self._show(ctx, copies, matchfn, props)
1203 self._show(ctx, copies, matchfn, props)
1204 self.hunk[ctx.rev()] = self.ui.popbuffer()
1204 self.hunk[ctx.rev()] = self.ui.popbuffer()
1205 else:
1205 else:
1206 self._show(ctx, copies, matchfn, props)
1206 self._show(ctx, copies, matchfn, props)
1207
1207
1208 def _show(self, ctx, copies, matchfn, props):
1208 def _show(self, ctx, copies, matchfn, props):
1209 '''show a single changeset or file revision'''
1209 '''show a single changeset or file revision'''
1210 changenode = ctx.node()
1210 changenode = ctx.node()
1211 rev = ctx.rev()
1211 rev = ctx.rev()
1212 if self.ui.debugflag:
1212 if self.ui.debugflag:
1213 hexfunc = hex
1213 hexfunc = hex
1214 else:
1214 else:
1215 hexfunc = short
1215 hexfunc = short
1216 # as of now, wctx.node() and wctx.rev() return None, but we want to
1216 # as of now, wctx.node() and wctx.rev() return None, but we want to
1217 # show the same values as {node} and {rev} templatekw
1217 # show the same values as {node} and {rev} templatekw
1218 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1218 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1219
1219
1220 if self.ui.quiet:
1220 if self.ui.quiet:
1221 self.ui.write("%d:%s\n" % revnode, label='log.node')
1221 self.ui.write("%d:%s\n" % revnode, label='log.node')
1222 return
1222 return
1223
1223
1224 date = util.datestr(ctx.date())
1224 date = util.datestr(ctx.date())
1225
1225
1226 # i18n: column positioning for "hg log"
1226 # i18n: column positioning for "hg log"
1227 self.ui.write(_("changeset: %d:%s\n") % revnode,
1227 self.ui.write(_("changeset: %d:%s\n") % revnode,
1228 label=_changesetlabels(ctx))
1228 label=_changesetlabels(ctx))
1229
1229
1230 # branches are shown first before any other names due to backwards
1230 # branches are shown first before any other names due to backwards
1231 # compatibility
1231 # compatibility
1232 branch = ctx.branch()
1232 branch = ctx.branch()
1233 # don't show the default branch name
1233 # don't show the default branch name
1234 if branch != 'default':
1234 if branch != 'default':
1235 # i18n: column positioning for "hg log"
1235 # i18n: column positioning for "hg log"
1236 self.ui.write(_("branch: %s\n") % branch,
1236 self.ui.write(_("branch: %s\n") % branch,
1237 label='log.branch')
1237 label='log.branch')
1238
1238
1239 for nsname, ns in self.repo.names.iteritems():
1239 for nsname, ns in self.repo.names.iteritems():
1240 # branches has special logic already handled above, so here we just
1240 # branches has special logic already handled above, so here we just
1241 # skip it
1241 # skip it
1242 if nsname == 'branches':
1242 if nsname == 'branches':
1243 continue
1243 continue
1244 # we will use the templatename as the color name since those two
1244 # we will use the templatename as the color name since those two
1245 # should be the same
1245 # should be the same
1246 for name in ns.names(self.repo, changenode):
1246 for name in ns.names(self.repo, changenode):
1247 self.ui.write(ns.logfmt % name,
1247 self.ui.write(ns.logfmt % name,
1248 label='log.%s' % ns.colorname)
1248 label='log.%s' % ns.colorname)
1249 if self.ui.debugflag:
1249 if self.ui.debugflag:
1250 # i18n: column positioning for "hg log"
1250 # i18n: column positioning for "hg log"
1251 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1251 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1252 label='log.phase')
1252 label='log.phase')
1253 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1253 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1254 label = 'log.parent changeset.%s' % pctx.phasestr()
1254 label = 'log.parent changeset.%s' % pctx.phasestr()
1255 # i18n: column positioning for "hg log"
1255 # i18n: column positioning for "hg log"
1256 self.ui.write(_("parent: %d:%s\n")
1256 self.ui.write(_("parent: %d:%s\n")
1257 % (pctx.rev(), hexfunc(pctx.node())),
1257 % (pctx.rev(), hexfunc(pctx.node())),
1258 label=label)
1258 label=label)
1259
1259
1260 if self.ui.debugflag and rev is not None:
1260 if self.ui.debugflag and rev is not None:
1261 mnode = ctx.manifestnode()
1261 mnode = ctx.manifestnode()
1262 # i18n: column positioning for "hg log"
1262 # i18n: column positioning for "hg log"
1263 self.ui.write(_("manifest: %d:%s\n") %
1263 self.ui.write(_("manifest: %d:%s\n") %
1264 (self.repo.manifestlog._revlog.rev(mnode),
1264 (self.repo.manifestlog._revlog.rev(mnode),
1265 hex(mnode)),
1265 hex(mnode)),
1266 label='ui.debug log.manifest')
1266 label='ui.debug log.manifest')
1267 # i18n: column positioning for "hg log"
1267 # i18n: column positioning for "hg log"
1268 self.ui.write(_("user: %s\n") % ctx.user(),
1268 self.ui.write(_("user: %s\n") % ctx.user(),
1269 label='log.user')
1269 label='log.user')
1270 # i18n: column positioning for "hg log"
1270 # i18n: column positioning for "hg log"
1271 self.ui.write(_("date: %s\n") % date,
1271 self.ui.write(_("date: %s\n") % date,
1272 label='log.date')
1272 label='log.date')
1273
1273
1274 if ctx.troubled():
1274 if ctx.troubled():
1275 # i18n: column positioning for "hg log"
1275 # i18n: column positioning for "hg log"
1276 self.ui.write(_("trouble: %s\n") % ', '.join(ctx.troubles()),
1276 self.ui.write(_("trouble: %s\n") % ', '.join(ctx.troubles()),
1277 label='log.trouble')
1277 label='log.trouble')
1278
1278
1279 if self.ui.debugflag:
1279 if self.ui.debugflag:
1280 files = ctx.p1().status(ctx)[:3]
1280 files = ctx.p1().status(ctx)[:3]
1281 for key, value in zip([# i18n: column positioning for "hg log"
1281 for key, value in zip([# i18n: column positioning for "hg log"
1282 _("files:"),
1282 _("files:"),
1283 # i18n: column positioning for "hg log"
1283 # i18n: column positioning for "hg log"
1284 _("files+:"),
1284 _("files+:"),
1285 # i18n: column positioning for "hg log"
1285 # i18n: column positioning for "hg log"
1286 _("files-:")], files):
1286 _("files-:")], files):
1287 if value:
1287 if value:
1288 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1288 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1289 label='ui.debug log.files')
1289 label='ui.debug log.files')
1290 elif ctx.files() and self.ui.verbose:
1290 elif ctx.files() and self.ui.verbose:
1291 # i18n: column positioning for "hg log"
1291 # i18n: column positioning for "hg log"
1292 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1292 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1293 label='ui.note log.files')
1293 label='ui.note log.files')
1294 if copies and self.ui.verbose:
1294 if copies and self.ui.verbose:
1295 copies = ['%s (%s)' % c for c in copies]
1295 copies = ['%s (%s)' % c for c in copies]
1296 # i18n: column positioning for "hg log"
1296 # i18n: column positioning for "hg log"
1297 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1297 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1298 label='ui.note log.copies')
1298 label='ui.note log.copies')
1299
1299
1300 extra = ctx.extra()
1300 extra = ctx.extra()
1301 if extra and self.ui.debugflag:
1301 if extra and self.ui.debugflag:
1302 for key, value in sorted(extra.items()):
1302 for key, value in sorted(extra.items()):
1303 # i18n: column positioning for "hg log"
1303 # i18n: column positioning for "hg log"
1304 self.ui.write(_("extra: %s=%s\n")
1304 self.ui.write(_("extra: %s=%s\n")
1305 % (key, util.escapestr(value)),
1305 % (key, util.escapestr(value)),
1306 label='ui.debug log.extra')
1306 label='ui.debug log.extra')
1307
1307
1308 description = ctx.description().strip()
1308 description = ctx.description().strip()
1309 if description:
1309 if description:
1310 if self.ui.verbose:
1310 if self.ui.verbose:
1311 self.ui.write(_("description:\n"),
1311 self.ui.write(_("description:\n"),
1312 label='ui.note log.description')
1312 label='ui.note log.description')
1313 self.ui.write(description,
1313 self.ui.write(description,
1314 label='ui.note log.description')
1314 label='ui.note log.description')
1315 self.ui.write("\n\n")
1315 self.ui.write("\n\n")
1316 else:
1316 else:
1317 # i18n: column positioning for "hg log"
1317 # i18n: column positioning for "hg log"
1318 self.ui.write(_("summary: %s\n") %
1318 self.ui.write(_("summary: %s\n") %
1319 description.splitlines()[0],
1319 description.splitlines()[0],
1320 label='log.summary')
1320 label='log.summary')
1321 self.ui.write("\n")
1321 self.ui.write("\n")
1322
1322
1323 self.showpatch(ctx, matchfn)
1323 self.showpatch(ctx, matchfn)
1324
1324
1325 def showpatch(self, ctx, matchfn):
1325 def showpatch(self, ctx, matchfn):
1326 if not matchfn:
1326 if not matchfn:
1327 matchfn = self.matchfn
1327 matchfn = self.matchfn
1328 if matchfn:
1328 if matchfn:
1329 stat = self.diffopts.get('stat')
1329 stat = self.diffopts.get('stat')
1330 diff = self.diffopts.get('patch')
1330 diff = self.diffopts.get('patch')
1331 diffopts = patch.diffallopts(self.ui, self.diffopts)
1331 diffopts = patch.diffallopts(self.ui, self.diffopts)
1332 node = ctx.node()
1332 node = ctx.node()
1333 prev = ctx.p1().node()
1333 prev = ctx.p1().node()
1334 if stat:
1334 if stat:
1335 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1335 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1336 match=matchfn, stat=True)
1336 match=matchfn, stat=True)
1337 if diff:
1337 if diff:
1338 if stat:
1338 if stat:
1339 self.ui.write("\n")
1339 self.ui.write("\n")
1340 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1340 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1341 match=matchfn, stat=False)
1341 match=matchfn, stat=False)
1342 self.ui.write("\n")
1342 self.ui.write("\n")
1343
1343
1344 class jsonchangeset(changeset_printer):
1344 class jsonchangeset(changeset_printer):
1345 '''format changeset information.'''
1345 '''format changeset information.'''
1346
1346
1347 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1347 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1348 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1348 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1349 self.cache = {}
1349 self.cache = {}
1350 self._first = True
1350 self._first = True
1351
1351
1352 def close(self):
1352 def close(self):
1353 if not self._first:
1353 if not self._first:
1354 self.ui.write("\n]\n")
1354 self.ui.write("\n]\n")
1355 else:
1355 else:
1356 self.ui.write("[]\n")
1356 self.ui.write("[]\n")
1357
1357
1358 def _show(self, ctx, copies, matchfn, props):
1358 def _show(self, ctx, copies, matchfn, props):
1359 '''show a single changeset or file revision'''
1359 '''show a single changeset or file revision'''
1360 rev = ctx.rev()
1360 rev = ctx.rev()
1361 if rev is None:
1361 if rev is None:
1362 jrev = jnode = 'null'
1362 jrev = jnode = 'null'
1363 else:
1363 else:
1364 jrev = str(rev)
1364 jrev = str(rev)
1365 jnode = '"%s"' % hex(ctx.node())
1365 jnode = '"%s"' % hex(ctx.node())
1366 j = encoding.jsonescape
1366 j = encoding.jsonescape
1367
1367
1368 if self._first:
1368 if self._first:
1369 self.ui.write("[\n {")
1369 self.ui.write("[\n {")
1370 self._first = False
1370 self._first = False
1371 else:
1371 else:
1372 self.ui.write(",\n {")
1372 self.ui.write(",\n {")
1373
1373
1374 if self.ui.quiet:
1374 if self.ui.quiet:
1375 self.ui.write(('\n "rev": %s') % jrev)
1375 self.ui.write(('\n "rev": %s') % jrev)
1376 self.ui.write((',\n "node": %s') % jnode)
1376 self.ui.write((',\n "node": %s') % jnode)
1377 self.ui.write('\n }')
1377 self.ui.write('\n }')
1378 return
1378 return
1379
1379
1380 self.ui.write(('\n "rev": %s') % jrev)
1380 self.ui.write(('\n "rev": %s') % jrev)
1381 self.ui.write((',\n "node": %s') % jnode)
1381 self.ui.write((',\n "node": %s') % jnode)
1382 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1382 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1383 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1383 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1384 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1384 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1385 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1385 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1386 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1386 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1387
1387
1388 self.ui.write((',\n "bookmarks": [%s]') %
1388 self.ui.write((',\n "bookmarks": [%s]') %
1389 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1389 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1390 self.ui.write((',\n "tags": [%s]') %
1390 self.ui.write((',\n "tags": [%s]') %
1391 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1391 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1392 self.ui.write((',\n "parents": [%s]') %
1392 self.ui.write((',\n "parents": [%s]') %
1393 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1393 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1394
1394
1395 if self.ui.debugflag:
1395 if self.ui.debugflag:
1396 if rev is None:
1396 if rev is None:
1397 jmanifestnode = 'null'
1397 jmanifestnode = 'null'
1398 else:
1398 else:
1399 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1399 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1400 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1400 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1401
1401
1402 self.ui.write((',\n "extra": {%s}') %
1402 self.ui.write((',\n "extra": {%s}') %
1403 ", ".join('"%s": "%s"' % (j(k), j(v))
1403 ", ".join('"%s": "%s"' % (j(k), j(v))
1404 for k, v in ctx.extra().items()))
1404 for k, v in ctx.extra().items()))
1405
1405
1406 files = ctx.p1().status(ctx)
1406 files = ctx.p1().status(ctx)
1407 self.ui.write((',\n "modified": [%s]') %
1407 self.ui.write((',\n "modified": [%s]') %
1408 ", ".join('"%s"' % j(f) for f in files[0]))
1408 ", ".join('"%s"' % j(f) for f in files[0]))
1409 self.ui.write((',\n "added": [%s]') %
1409 self.ui.write((',\n "added": [%s]') %
1410 ", ".join('"%s"' % j(f) for f in files[1]))
1410 ", ".join('"%s"' % j(f) for f in files[1]))
1411 self.ui.write((',\n "removed": [%s]') %
1411 self.ui.write((',\n "removed": [%s]') %
1412 ", ".join('"%s"' % j(f) for f in files[2]))
1412 ", ".join('"%s"' % j(f) for f in files[2]))
1413
1413
1414 elif self.ui.verbose:
1414 elif self.ui.verbose:
1415 self.ui.write((',\n "files": [%s]') %
1415 self.ui.write((',\n "files": [%s]') %
1416 ", ".join('"%s"' % j(f) for f in ctx.files()))
1416 ", ".join('"%s"' % j(f) for f in ctx.files()))
1417
1417
1418 if copies:
1418 if copies:
1419 self.ui.write((',\n "copies": {%s}') %
1419 self.ui.write((',\n "copies": {%s}') %
1420 ", ".join('"%s": "%s"' % (j(k), j(v))
1420 ", ".join('"%s": "%s"' % (j(k), j(v))
1421 for k, v in copies))
1421 for k, v in copies))
1422
1422
1423 matchfn = self.matchfn
1423 matchfn = self.matchfn
1424 if matchfn:
1424 if matchfn:
1425 stat = self.diffopts.get('stat')
1425 stat = self.diffopts.get('stat')
1426 diff = self.diffopts.get('patch')
1426 diff = self.diffopts.get('patch')
1427 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1427 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1428 node, prev = ctx.node(), ctx.p1().node()
1428 node, prev = ctx.node(), ctx.p1().node()
1429 if stat:
1429 if stat:
1430 self.ui.pushbuffer()
1430 self.ui.pushbuffer()
1431 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1431 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1432 match=matchfn, stat=True)
1432 match=matchfn, stat=True)
1433 self.ui.write((',\n "diffstat": "%s"')
1433 self.ui.write((',\n "diffstat": "%s"')
1434 % j(self.ui.popbuffer()))
1434 % j(self.ui.popbuffer()))
1435 if diff:
1435 if diff:
1436 self.ui.pushbuffer()
1436 self.ui.pushbuffer()
1437 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1437 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1438 match=matchfn, stat=False)
1438 match=matchfn, stat=False)
1439 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1439 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1440
1440
1441 self.ui.write("\n }")
1441 self.ui.write("\n }")
1442
1442
1443 class changeset_templater(changeset_printer):
1443 class changeset_templater(changeset_printer):
1444 '''format changeset information.'''
1444 '''format changeset information.'''
1445
1445
1446 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1446 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1447 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1447 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1448 assert not (tmpl and mapfile)
1448 assert not (tmpl and mapfile)
1449 defaulttempl = templatekw.defaulttempl
1449 defaulttempl = templatekw.defaulttempl
1450 if mapfile:
1450 if mapfile:
1451 self.t = templater.templater.frommapfile(mapfile,
1451 self.t = templater.templater.frommapfile(mapfile,
1452 cache=defaulttempl)
1452 cache=defaulttempl)
1453 else:
1453 else:
1454 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1454 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1455 cache=defaulttempl)
1455 cache=defaulttempl)
1456
1456
1457 self._counter = itertools.count()
1457 self._counter = itertools.count()
1458 self.cache = {}
1458 self.cache = {}
1459
1459
1460 # find correct templates for current mode
1460 # find correct templates for current mode
1461 tmplmodes = [
1461 tmplmodes = [
1462 (True, None),
1462 (True, None),
1463 (self.ui.verbose, 'verbose'),
1463 (self.ui.verbose, 'verbose'),
1464 (self.ui.quiet, 'quiet'),
1464 (self.ui.quiet, 'quiet'),
1465 (self.ui.debugflag, 'debug'),
1465 (self.ui.debugflag, 'debug'),
1466 ]
1466 ]
1467
1467
1468 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1468 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1469 'docheader': '', 'docfooter': ''}
1469 'docheader': '', 'docfooter': ''}
1470 for mode, postfix in tmplmodes:
1470 for mode, postfix in tmplmodes:
1471 for t in self._parts:
1471 for t in self._parts:
1472 cur = t
1472 cur = t
1473 if postfix:
1473 if postfix:
1474 cur += "_" + postfix
1474 cur += "_" + postfix
1475 if mode and cur in self.t:
1475 if mode and cur in self.t:
1476 self._parts[t] = cur
1476 self._parts[t] = cur
1477
1477
1478 if self._parts['docheader']:
1478 if self._parts['docheader']:
1479 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1479 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1480
1480
1481 def close(self):
1481 def close(self):
1482 if self._parts['docfooter']:
1482 if self._parts['docfooter']:
1483 if not self.footer:
1483 if not self.footer:
1484 self.footer = ""
1484 self.footer = ""
1485 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1485 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1486 return super(changeset_templater, self).close()
1486 return super(changeset_templater, self).close()
1487
1487
1488 def _show(self, ctx, copies, matchfn, props):
1488 def _show(self, ctx, copies, matchfn, props):
1489 '''show a single changeset or file revision'''
1489 '''show a single changeset or file revision'''
1490 props = props.copy()
1490 props = props.copy()
1491 props.update(templatekw.keywords)
1491 props.update(templatekw.keywords)
1492 props['templ'] = self.t
1492 props['templ'] = self.t
1493 props['ctx'] = ctx
1493 props['ctx'] = ctx
1494 props['repo'] = self.repo
1494 props['repo'] = self.repo
1495 props['ui'] = self.repo.ui
1495 props['ui'] = self.repo.ui
1496 props['index'] = next(self._counter)
1496 props['index'] = next(self._counter)
1497 props['revcache'] = {'copies': copies}
1497 props['revcache'] = {'copies': copies}
1498 props['cache'] = self.cache
1498 props['cache'] = self.cache
1499
1499
1500 # write header
1500 # write header
1501 if self._parts['header']:
1501 if self._parts['header']:
1502 h = templater.stringify(self.t(self._parts['header'], **props))
1502 h = templater.stringify(self.t(self._parts['header'], **props))
1503 if self.buffered:
1503 if self.buffered:
1504 self.header[ctx.rev()] = h
1504 self.header[ctx.rev()] = h
1505 else:
1505 else:
1506 if self.lastheader != h:
1506 if self.lastheader != h:
1507 self.lastheader = h
1507 self.lastheader = h
1508 self.ui.write(h)
1508 self.ui.write(h)
1509
1509
1510 # write changeset metadata, then patch if requested
1510 # write changeset metadata, then patch if requested
1511 key = self._parts['changeset']
1511 key = self._parts['changeset']
1512 self.ui.write(templater.stringify(self.t(key, **props)))
1512 self.ui.write(templater.stringify(self.t(key, **props)))
1513 self.showpatch(ctx, matchfn)
1513 self.showpatch(ctx, matchfn)
1514
1514
1515 if self._parts['footer']:
1515 if self._parts['footer']:
1516 if not self.footer:
1516 if not self.footer:
1517 self.footer = templater.stringify(
1517 self.footer = templater.stringify(
1518 self.t(self._parts['footer'], **props))
1518 self.t(self._parts['footer'], **props))
1519
1519
1520 def gettemplate(ui, tmpl, style):
1520 def gettemplate(ui, tmpl, style):
1521 """
1521 """
1522 Find the template matching the given template spec or style.
1522 Find the template matching the given template spec or style.
1523 """
1523 """
1524
1524
1525 # ui settings
1525 # ui settings
1526 if not tmpl and not style: # template are stronger than style
1526 if not tmpl and not style: # template are stronger than style
1527 tmpl = ui.config('ui', 'logtemplate')
1527 tmpl = ui.config('ui', 'logtemplate')
1528 if tmpl:
1528 if tmpl:
1529 return templater.unquotestring(tmpl), None
1529 return templater.unquotestring(tmpl), None
1530 else:
1530 else:
1531 style = util.expandpath(ui.config('ui', 'style', ''))
1531 style = util.expandpath(ui.config('ui', 'style', ''))
1532
1532
1533 if not tmpl and style:
1533 if not tmpl and style:
1534 mapfile = style
1534 mapfile = style
1535 if not os.path.split(mapfile)[0]:
1535 if not os.path.split(mapfile)[0]:
1536 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1536 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1537 or templater.templatepath(mapfile))
1537 or templater.templatepath(mapfile))
1538 if mapname:
1538 if mapname:
1539 mapfile = mapname
1539 mapfile = mapname
1540 return None, mapfile
1540 return None, mapfile
1541
1541
1542 if not tmpl:
1542 if not tmpl:
1543 return None, None
1543 return None, None
1544
1544
1545 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1545 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1546
1546
1547 def show_changeset(ui, repo, opts, buffered=False):
1547 def show_changeset(ui, repo, opts, buffered=False):
1548 """show one changeset using template or regular display.
1548 """show one changeset using template or regular display.
1549
1549
1550 Display format will be the first non-empty hit of:
1550 Display format will be the first non-empty hit of:
1551 1. option 'template'
1551 1. option 'template'
1552 2. option 'style'
1552 2. option 'style'
1553 3. [ui] setting 'logtemplate'
1553 3. [ui] setting 'logtemplate'
1554 4. [ui] setting 'style'
1554 4. [ui] setting 'style'
1555 If all of these values are either the unset or the empty string,
1555 If all of these values are either the unset or the empty string,
1556 regular display via changeset_printer() is done.
1556 regular display via changeset_printer() is done.
1557 """
1557 """
1558 # options
1558 # options
1559 matchfn = None
1559 matchfn = None
1560 if opts.get('patch') or opts.get('stat'):
1560 if opts.get('patch') or opts.get('stat'):
1561 matchfn = scmutil.matchall(repo)
1561 matchfn = scmutil.matchall(repo)
1562
1562
1563 if opts.get('template') == 'json':
1563 if opts.get('template') == 'json':
1564 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1564 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1565
1565
1566 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1566 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1567
1567
1568 if not tmpl and not mapfile:
1568 if not tmpl and not mapfile:
1569 return changeset_printer(ui, repo, matchfn, opts, buffered)
1569 return changeset_printer(ui, repo, matchfn, opts, buffered)
1570
1570
1571 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1571 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1572
1572
1573 def showmarker(fm, marker, index=None):
1573 def showmarker(fm, marker, index=None):
1574 """utility function to display obsolescence marker in a readable way
1574 """utility function to display obsolescence marker in a readable way
1575
1575
1576 To be used by debug function."""
1576 To be used by debug function."""
1577 if index is not None:
1577 if index is not None:
1578 fm.write('index', '%i ', index)
1578 fm.write('index', '%i ', index)
1579 fm.write('precnode', '%s ', hex(marker.precnode()))
1579 fm.write('precnode', '%s ', hex(marker.precnode()))
1580 succs = marker.succnodes()
1580 succs = marker.succnodes()
1581 fm.condwrite(succs, 'succnodes', '%s ',
1581 fm.condwrite(succs, 'succnodes', '%s ',
1582 fm.formatlist(map(hex, succs), name='node'))
1582 fm.formatlist(map(hex, succs), name='node'))
1583 fm.write('flag', '%X ', marker.flags())
1583 fm.write('flag', '%X ', marker.flags())
1584 parents = marker.parentnodes()
1584 parents = marker.parentnodes()
1585 if parents is not None:
1585 if parents is not None:
1586 fm.write('parentnodes', '{%s} ',
1586 fm.write('parentnodes', '{%s} ',
1587 fm.formatlist(map(hex, parents), name='node', sep=', '))
1587 fm.formatlist(map(hex, parents), name='node', sep=', '))
1588 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1588 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1589 meta = marker.metadata().copy()
1589 meta = marker.metadata().copy()
1590 meta.pop('date', None)
1590 meta.pop('date', None)
1591 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1591 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1592 fm.plain('\n')
1592 fm.plain('\n')
1593
1593
1594 def finddate(ui, repo, date):
1594 def finddate(ui, repo, date):
1595 """Find the tipmost changeset that matches the given date spec"""
1595 """Find the tipmost changeset that matches the given date spec"""
1596
1596
1597 df = util.matchdate(date)
1597 df = util.matchdate(date)
1598 m = scmutil.matchall(repo)
1598 m = scmutil.matchall(repo)
1599 results = {}
1599 results = {}
1600
1600
1601 def prep(ctx, fns):
1601 def prep(ctx, fns):
1602 d = ctx.date()
1602 d = ctx.date()
1603 if df(d[0]):
1603 if df(d[0]):
1604 results[ctx.rev()] = d
1604 results[ctx.rev()] = d
1605
1605
1606 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1606 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1607 rev = ctx.rev()
1607 rev = ctx.rev()
1608 if rev in results:
1608 if rev in results:
1609 ui.status(_("found revision %s from %s\n") %
1609 ui.status(_("found revision %s from %s\n") %
1610 (rev, util.datestr(results[rev])))
1610 (rev, util.datestr(results[rev])))
1611 return str(rev)
1611 return str(rev)
1612
1612
1613 raise error.Abort(_("revision matching date not found"))
1613 raise error.Abort(_("revision matching date not found"))
1614
1614
1615 def increasingwindows(windowsize=8, sizelimit=512):
1615 def increasingwindows(windowsize=8, sizelimit=512):
1616 while True:
1616 while True:
1617 yield windowsize
1617 yield windowsize
1618 if windowsize < sizelimit:
1618 if windowsize < sizelimit:
1619 windowsize *= 2
1619 windowsize *= 2
1620
1620
1621 class FileWalkError(Exception):
1621 class FileWalkError(Exception):
1622 pass
1622 pass
1623
1623
1624 def walkfilerevs(repo, match, follow, revs, fncache):
1624 def walkfilerevs(repo, match, follow, revs, fncache):
1625 '''Walks the file history for the matched files.
1625 '''Walks the file history for the matched files.
1626
1626
1627 Returns the changeset revs that are involved in the file history.
1627 Returns the changeset revs that are involved in the file history.
1628
1628
1629 Throws FileWalkError if the file history can't be walked using
1629 Throws FileWalkError if the file history can't be walked using
1630 filelogs alone.
1630 filelogs alone.
1631 '''
1631 '''
1632 wanted = set()
1632 wanted = set()
1633 copies = []
1633 copies = []
1634 minrev, maxrev = min(revs), max(revs)
1634 minrev, maxrev = min(revs), max(revs)
1635 def filerevgen(filelog, last):
1635 def filerevgen(filelog, last):
1636 """
1636 """
1637 Only files, no patterns. Check the history of each file.
1637 Only files, no patterns. Check the history of each file.
1638
1638
1639 Examines filelog entries within minrev, maxrev linkrev range
1639 Examines filelog entries within minrev, maxrev linkrev range
1640 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1640 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1641 tuples in backwards order
1641 tuples in backwards order
1642 """
1642 """
1643 cl_count = len(repo)
1643 cl_count = len(repo)
1644 revs = []
1644 revs = []
1645 for j in xrange(0, last + 1):
1645 for j in xrange(0, last + 1):
1646 linkrev = filelog.linkrev(j)
1646 linkrev = filelog.linkrev(j)
1647 if linkrev < minrev:
1647 if linkrev < minrev:
1648 continue
1648 continue
1649 # only yield rev for which we have the changelog, it can
1649 # only yield rev for which we have the changelog, it can
1650 # happen while doing "hg log" during a pull or commit
1650 # happen while doing "hg log" during a pull or commit
1651 if linkrev >= cl_count:
1651 if linkrev >= cl_count:
1652 break
1652 break
1653
1653
1654 parentlinkrevs = []
1654 parentlinkrevs = []
1655 for p in filelog.parentrevs(j):
1655 for p in filelog.parentrevs(j):
1656 if p != nullrev:
1656 if p != nullrev:
1657 parentlinkrevs.append(filelog.linkrev(p))
1657 parentlinkrevs.append(filelog.linkrev(p))
1658 n = filelog.node(j)
1658 n = filelog.node(j)
1659 revs.append((linkrev, parentlinkrevs,
1659 revs.append((linkrev, parentlinkrevs,
1660 follow and filelog.renamed(n)))
1660 follow and filelog.renamed(n)))
1661
1661
1662 return reversed(revs)
1662 return reversed(revs)
1663 def iterfiles():
1663 def iterfiles():
1664 pctx = repo['.']
1664 pctx = repo['.']
1665 for filename in match.files():
1665 for filename in match.files():
1666 if follow:
1666 if follow:
1667 if filename not in pctx:
1667 if filename not in pctx:
1668 raise error.Abort(_('cannot follow file not in parent '
1668 raise error.Abort(_('cannot follow file not in parent '
1669 'revision: "%s"') % filename)
1669 'revision: "%s"') % filename)
1670 yield filename, pctx[filename].filenode()
1670 yield filename, pctx[filename].filenode()
1671 else:
1671 else:
1672 yield filename, None
1672 yield filename, None
1673 for filename_node in copies:
1673 for filename_node in copies:
1674 yield filename_node
1674 yield filename_node
1675
1675
1676 for file_, node in iterfiles():
1676 for file_, node in iterfiles():
1677 filelog = repo.file(file_)
1677 filelog = repo.file(file_)
1678 if not len(filelog):
1678 if not len(filelog):
1679 if node is None:
1679 if node is None:
1680 # A zero count may be a directory or deleted file, so
1680 # A zero count may be a directory or deleted file, so
1681 # try to find matching entries on the slow path.
1681 # try to find matching entries on the slow path.
1682 if follow:
1682 if follow:
1683 raise error.Abort(
1683 raise error.Abort(
1684 _('cannot follow nonexistent file: "%s"') % file_)
1684 _('cannot follow nonexistent file: "%s"') % file_)
1685 raise FileWalkError("Cannot walk via filelog")
1685 raise FileWalkError("Cannot walk via filelog")
1686 else:
1686 else:
1687 continue
1687 continue
1688
1688
1689 if node is None:
1689 if node is None:
1690 last = len(filelog) - 1
1690 last = len(filelog) - 1
1691 else:
1691 else:
1692 last = filelog.rev(node)
1692 last = filelog.rev(node)
1693
1693
1694 # keep track of all ancestors of the file
1694 # keep track of all ancestors of the file
1695 ancestors = set([filelog.linkrev(last)])
1695 ancestors = set([filelog.linkrev(last)])
1696
1696
1697 # iterate from latest to oldest revision
1697 # iterate from latest to oldest revision
1698 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1698 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1699 if not follow:
1699 if not follow:
1700 if rev > maxrev:
1700 if rev > maxrev:
1701 continue
1701 continue
1702 else:
1702 else:
1703 # Note that last might not be the first interesting
1703 # Note that last might not be the first interesting
1704 # rev to us:
1704 # rev to us:
1705 # if the file has been changed after maxrev, we'll
1705 # if the file has been changed after maxrev, we'll
1706 # have linkrev(last) > maxrev, and we still need
1706 # have linkrev(last) > maxrev, and we still need
1707 # to explore the file graph
1707 # to explore the file graph
1708 if rev not in ancestors:
1708 if rev not in ancestors:
1709 continue
1709 continue
1710 # XXX insert 1327 fix here
1710 # XXX insert 1327 fix here
1711 if flparentlinkrevs:
1711 if flparentlinkrevs:
1712 ancestors.update(flparentlinkrevs)
1712 ancestors.update(flparentlinkrevs)
1713
1713
1714 fncache.setdefault(rev, []).append(file_)
1714 fncache.setdefault(rev, []).append(file_)
1715 wanted.add(rev)
1715 wanted.add(rev)
1716 if copied:
1716 if copied:
1717 copies.append(copied)
1717 copies.append(copied)
1718
1718
1719 return wanted
1719 return wanted
1720
1720
1721 class _followfilter(object):
1721 class _followfilter(object):
1722 def __init__(self, repo, onlyfirst=False):
1722 def __init__(self, repo, onlyfirst=False):
1723 self.repo = repo
1723 self.repo = repo
1724 self.startrev = nullrev
1724 self.startrev = nullrev
1725 self.roots = set()
1725 self.roots = set()
1726 self.onlyfirst = onlyfirst
1726 self.onlyfirst = onlyfirst
1727
1727
1728 def match(self, rev):
1728 def match(self, rev):
1729 def realparents(rev):
1729 def realparents(rev):
1730 if self.onlyfirst:
1730 if self.onlyfirst:
1731 return self.repo.changelog.parentrevs(rev)[0:1]
1731 return self.repo.changelog.parentrevs(rev)[0:1]
1732 else:
1732 else:
1733 return filter(lambda x: x != nullrev,
1733 return filter(lambda x: x != nullrev,
1734 self.repo.changelog.parentrevs(rev))
1734 self.repo.changelog.parentrevs(rev))
1735
1735
1736 if self.startrev == nullrev:
1736 if self.startrev == nullrev:
1737 self.startrev = rev
1737 self.startrev = rev
1738 return True
1738 return True
1739
1739
1740 if rev > self.startrev:
1740 if rev > self.startrev:
1741 # forward: all descendants
1741 # forward: all descendants
1742 if not self.roots:
1742 if not self.roots:
1743 self.roots.add(self.startrev)
1743 self.roots.add(self.startrev)
1744 for parent in realparents(rev):
1744 for parent in realparents(rev):
1745 if parent in self.roots:
1745 if parent in self.roots:
1746 self.roots.add(rev)
1746 self.roots.add(rev)
1747 return True
1747 return True
1748 else:
1748 else:
1749 # backwards: all parents
1749 # backwards: all parents
1750 if not self.roots:
1750 if not self.roots:
1751 self.roots.update(realparents(self.startrev))
1751 self.roots.update(realparents(self.startrev))
1752 if rev in self.roots:
1752 if rev in self.roots:
1753 self.roots.remove(rev)
1753 self.roots.remove(rev)
1754 self.roots.update(realparents(rev))
1754 self.roots.update(realparents(rev))
1755 return True
1755 return True
1756
1756
1757 return False
1757 return False
1758
1758
1759 def walkchangerevs(repo, match, opts, prepare):
1759 def walkchangerevs(repo, match, opts, prepare):
1760 '''Iterate over files and the revs in which they changed.
1760 '''Iterate over files and the revs in which they changed.
1761
1761
1762 Callers most commonly need to iterate backwards over the history
1762 Callers most commonly need to iterate backwards over the history
1763 in which they are interested. Doing so has awful (quadratic-looking)
1763 in which they are interested. Doing so has awful (quadratic-looking)
1764 performance, so we use iterators in a "windowed" way.
1764 performance, so we use iterators in a "windowed" way.
1765
1765
1766 We walk a window of revisions in the desired order. Within the
1766 We walk a window of revisions in the desired order. Within the
1767 window, we first walk forwards to gather data, then in the desired
1767 window, we first walk forwards to gather data, then in the desired
1768 order (usually backwards) to display it.
1768 order (usually backwards) to display it.
1769
1769
1770 This function returns an iterator yielding contexts. Before
1770 This function returns an iterator yielding contexts. Before
1771 yielding each context, the iterator will first call the prepare
1771 yielding each context, the iterator will first call the prepare
1772 function on each context in the window in forward order.'''
1772 function on each context in the window in forward order.'''
1773
1773
1774 follow = opts.get('follow') or opts.get('follow_first')
1774 follow = opts.get('follow') or opts.get('follow_first')
1775 revs = _logrevs(repo, opts)
1775 revs = _logrevs(repo, opts)
1776 if not revs:
1776 if not revs:
1777 return []
1777 return []
1778 wanted = set()
1778 wanted = set()
1779 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1779 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1780 opts.get('removed'))
1780 opts.get('removed'))
1781 fncache = {}
1781 fncache = {}
1782 change = repo.changectx
1782 change = repo.changectx
1783
1783
1784 # First step is to fill wanted, the set of revisions that we want to yield.
1784 # First step is to fill wanted, the set of revisions that we want to yield.
1785 # When it does not induce extra cost, we also fill fncache for revisions in
1785 # When it does not induce extra cost, we also fill fncache for revisions in
1786 # wanted: a cache of filenames that were changed (ctx.files()) and that
1786 # wanted: a cache of filenames that were changed (ctx.files()) and that
1787 # match the file filtering conditions.
1787 # match the file filtering conditions.
1788
1788
1789 if match.always():
1789 if match.always():
1790 # No files, no patterns. Display all revs.
1790 # No files, no patterns. Display all revs.
1791 wanted = revs
1791 wanted = revs
1792 elif not slowpath:
1792 elif not slowpath:
1793 # We only have to read through the filelog to find wanted revisions
1793 # We only have to read through the filelog to find wanted revisions
1794
1794
1795 try:
1795 try:
1796 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1796 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1797 except FileWalkError:
1797 except FileWalkError:
1798 slowpath = True
1798 slowpath = True
1799
1799
1800 # We decided to fall back to the slowpath because at least one
1800 # We decided to fall back to the slowpath because at least one
1801 # of the paths was not a file. Check to see if at least one of them
1801 # of the paths was not a file. Check to see if at least one of them
1802 # existed in history, otherwise simply return
1802 # existed in history, otherwise simply return
1803 for path in match.files():
1803 for path in match.files():
1804 if path == '.' or path in repo.store:
1804 if path == '.' or path in repo.store:
1805 break
1805 break
1806 else:
1806 else:
1807 return []
1807 return []
1808
1808
1809 if slowpath:
1809 if slowpath:
1810 # We have to read the changelog to match filenames against
1810 # We have to read the changelog to match filenames against
1811 # changed files
1811 # changed files
1812
1812
1813 if follow:
1813 if follow:
1814 raise error.Abort(_('can only follow copies/renames for explicit '
1814 raise error.Abort(_('can only follow copies/renames for explicit '
1815 'filenames'))
1815 'filenames'))
1816
1816
1817 # The slow path checks files modified in every changeset.
1817 # The slow path checks files modified in every changeset.
1818 # This is really slow on large repos, so compute the set lazily.
1818 # This is really slow on large repos, so compute the set lazily.
1819 class lazywantedset(object):
1819 class lazywantedset(object):
1820 def __init__(self):
1820 def __init__(self):
1821 self.set = set()
1821 self.set = set()
1822 self.revs = set(revs)
1822 self.revs = set(revs)
1823
1823
1824 # No need to worry about locality here because it will be accessed
1824 # No need to worry about locality here because it will be accessed
1825 # in the same order as the increasing window below.
1825 # in the same order as the increasing window below.
1826 def __contains__(self, value):
1826 def __contains__(self, value):
1827 if value in self.set:
1827 if value in self.set:
1828 return True
1828 return True
1829 elif not value in self.revs:
1829 elif not value in self.revs:
1830 return False
1830 return False
1831 else:
1831 else:
1832 self.revs.discard(value)
1832 self.revs.discard(value)
1833 ctx = change(value)
1833 ctx = change(value)
1834 matches = filter(match, ctx.files())
1834 matches = filter(match, ctx.files())
1835 if matches:
1835 if matches:
1836 fncache[value] = matches
1836 fncache[value] = matches
1837 self.set.add(value)
1837 self.set.add(value)
1838 return True
1838 return True
1839 return False
1839 return False
1840
1840
1841 def discard(self, value):
1841 def discard(self, value):
1842 self.revs.discard(value)
1842 self.revs.discard(value)
1843 self.set.discard(value)
1843 self.set.discard(value)
1844
1844
1845 wanted = lazywantedset()
1845 wanted = lazywantedset()
1846
1846
1847 # it might be worthwhile to do this in the iterator if the rev range
1847 # it might be worthwhile to do this in the iterator if the rev range
1848 # is descending and the prune args are all within that range
1848 # is descending and the prune args are all within that range
1849 for rev in opts.get('prune', ()):
1849 for rev in opts.get('prune', ()):
1850 rev = repo[rev].rev()
1850 rev = repo[rev].rev()
1851 ff = _followfilter(repo)
1851 ff = _followfilter(repo)
1852 stop = min(revs[0], revs[-1])
1852 stop = min(revs[0], revs[-1])
1853 for x in xrange(rev, stop - 1, -1):
1853 for x in xrange(rev, stop - 1, -1):
1854 if ff.match(x):
1854 if ff.match(x):
1855 wanted = wanted - [x]
1855 wanted = wanted - [x]
1856
1856
1857 # Now that wanted is correctly initialized, we can iterate over the
1857 # Now that wanted is correctly initialized, we can iterate over the
1858 # revision range, yielding only revisions in wanted.
1858 # revision range, yielding only revisions in wanted.
1859 def iterate():
1859 def iterate():
1860 if follow and match.always():
1860 if follow and match.always():
1861 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1861 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1862 def want(rev):
1862 def want(rev):
1863 return ff.match(rev) and rev in wanted
1863 return ff.match(rev) and rev in wanted
1864 else:
1864 else:
1865 def want(rev):
1865 def want(rev):
1866 return rev in wanted
1866 return rev in wanted
1867
1867
1868 it = iter(revs)
1868 it = iter(revs)
1869 stopiteration = False
1869 stopiteration = False
1870 for windowsize in increasingwindows():
1870 for windowsize in increasingwindows():
1871 nrevs = []
1871 nrevs = []
1872 for i in xrange(windowsize):
1872 for i in xrange(windowsize):
1873 rev = next(it, None)
1873 rev = next(it, None)
1874 if rev is None:
1874 if rev is None:
1875 stopiteration = True
1875 stopiteration = True
1876 break
1876 break
1877 elif want(rev):
1877 elif want(rev):
1878 nrevs.append(rev)
1878 nrevs.append(rev)
1879 for rev in sorted(nrevs):
1879 for rev in sorted(nrevs):
1880 fns = fncache.get(rev)
1880 fns = fncache.get(rev)
1881 ctx = change(rev)
1881 ctx = change(rev)
1882 if not fns:
1882 if not fns:
1883 def fns_generator():
1883 def fns_generator():
1884 for f in ctx.files():
1884 for f in ctx.files():
1885 if match(f):
1885 if match(f):
1886 yield f
1886 yield f
1887 fns = fns_generator()
1887 fns = fns_generator()
1888 prepare(ctx, fns)
1888 prepare(ctx, fns)
1889 for rev in nrevs:
1889 for rev in nrevs:
1890 yield change(rev)
1890 yield change(rev)
1891
1891
1892 if stopiteration:
1892 if stopiteration:
1893 break
1893 break
1894
1894
1895 return iterate()
1895 return iterate()
1896
1896
1897 def _makefollowlogfilematcher(repo, files, followfirst):
1897 def _makefollowlogfilematcher(repo, files, followfirst):
1898 # When displaying a revision with --patch --follow FILE, we have
1898 # When displaying a revision with --patch --follow FILE, we have
1899 # to know which file of the revision must be diffed. With
1899 # to know which file of the revision must be diffed. With
1900 # --follow, we want the names of the ancestors of FILE in the
1900 # --follow, we want the names of the ancestors of FILE in the
1901 # revision, stored in "fcache". "fcache" is populated by
1901 # revision, stored in "fcache". "fcache" is populated by
1902 # reproducing the graph traversal already done by --follow revset
1902 # reproducing the graph traversal already done by --follow revset
1903 # and relating revs to file names (which is not "correct" but
1903 # and relating revs to file names (which is not "correct" but
1904 # good enough).
1904 # good enough).
1905 fcache = {}
1905 fcache = {}
1906 fcacheready = [False]
1906 fcacheready = [False]
1907 pctx = repo['.']
1907 pctx = repo['.']
1908
1908
1909 def populate():
1909 def populate():
1910 for fn in files:
1910 for fn in files:
1911 fctx = pctx[fn]
1911 fctx = pctx[fn]
1912 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1912 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1913 for c in fctx.ancestors(followfirst=followfirst):
1913 for c in fctx.ancestors(followfirst=followfirst):
1914 fcache.setdefault(c.rev(), set()).add(c.path())
1914 fcache.setdefault(c.rev(), set()).add(c.path())
1915
1915
1916 def filematcher(rev):
1916 def filematcher(rev):
1917 if not fcacheready[0]:
1917 if not fcacheready[0]:
1918 # Lazy initialization
1918 # Lazy initialization
1919 fcacheready[0] = True
1919 fcacheready[0] = True
1920 populate()
1920 populate()
1921 return scmutil.matchfiles(repo, fcache.get(rev, []))
1921 return scmutil.matchfiles(repo, fcache.get(rev, []))
1922
1922
1923 return filematcher
1923 return filematcher
1924
1924
1925 def _makenofollowlogfilematcher(repo, pats, opts):
1925 def _makenofollowlogfilematcher(repo, pats, opts):
1926 '''hook for extensions to override the filematcher for non-follow cases'''
1926 '''hook for extensions to override the filematcher for non-follow cases'''
1927 return None
1927 return None
1928
1928
1929 def _makelogrevset(repo, pats, opts, revs):
1929 def _makelogrevset(repo, pats, opts, revs):
1930 """Return (expr, filematcher) where expr is a revset string built
1930 """Return (expr, filematcher) where expr is a revset string built
1931 from log options and file patterns or None. If --stat or --patch
1931 from log options and file patterns or None. If --stat or --patch
1932 are not passed filematcher is None. Otherwise it is a callable
1932 are not passed filematcher is None. Otherwise it is a callable
1933 taking a revision number and returning a match objects filtering
1933 taking a revision number and returning a match objects filtering
1934 the files to be detailed when displaying the revision.
1934 the files to be detailed when displaying the revision.
1935 """
1935 """
1936 opt2revset = {
1936 opt2revset = {
1937 'no_merges': ('not merge()', None),
1937 'no_merges': ('not merge()', None),
1938 'only_merges': ('merge()', None),
1938 'only_merges': ('merge()', None),
1939 '_ancestors': ('ancestors(%(val)s)', None),
1939 '_ancestors': ('ancestors(%(val)s)', None),
1940 '_fancestors': ('_firstancestors(%(val)s)', None),
1940 '_fancestors': ('_firstancestors(%(val)s)', None),
1941 '_descendants': ('descendants(%(val)s)', None),
1941 '_descendants': ('descendants(%(val)s)', None),
1942 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1942 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1943 '_matchfiles': ('_matchfiles(%(val)s)', None),
1943 '_matchfiles': ('_matchfiles(%(val)s)', None),
1944 'date': ('date(%(val)r)', None),
1944 'date': ('date(%(val)r)', None),
1945 'branch': ('branch(%(val)r)', ' or '),
1945 'branch': ('branch(%(val)r)', ' or '),
1946 '_patslog': ('filelog(%(val)r)', ' or '),
1946 '_patslog': ('filelog(%(val)r)', ' or '),
1947 '_patsfollow': ('follow(%(val)r)', ' or '),
1947 '_patsfollow': ('follow(%(val)r)', ' or '),
1948 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1948 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1949 'keyword': ('keyword(%(val)r)', ' or '),
1949 'keyword': ('keyword(%(val)r)', ' or '),
1950 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1950 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1951 'user': ('user(%(val)r)', ' or '),
1951 'user': ('user(%(val)r)', ' or '),
1952 }
1952 }
1953
1953
1954 opts = dict(opts)
1954 opts = dict(opts)
1955 # follow or not follow?
1955 # follow or not follow?
1956 follow = opts.get('follow') or opts.get('follow_first')
1956 follow = opts.get('follow') or opts.get('follow_first')
1957 if opts.get('follow_first'):
1957 if opts.get('follow_first'):
1958 followfirst = 1
1958 followfirst = 1
1959 else:
1959 else:
1960 followfirst = 0
1960 followfirst = 0
1961 # --follow with FILE behavior depends on revs...
1961 # --follow with FILE behavior depends on revs...
1962 it = iter(revs)
1962 it = iter(revs)
1963 startrev = next(it)
1963 startrev = next(it)
1964 followdescendants = startrev < next(it, startrev)
1964 followdescendants = startrev < next(it, startrev)
1965
1965
1966 # branch and only_branch are really aliases and must be handled at
1966 # branch and only_branch are really aliases and must be handled at
1967 # the same time
1967 # the same time
1968 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1968 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1969 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1969 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1970 # pats/include/exclude are passed to match.match() directly in
1970 # pats/include/exclude are passed to match.match() directly in
1971 # _matchfiles() revset but walkchangerevs() builds its matcher with
1971 # _matchfiles() revset but walkchangerevs() builds its matcher with
1972 # scmutil.match(). The difference is input pats are globbed on
1972 # scmutil.match(). The difference is input pats are globbed on
1973 # platforms without shell expansion (windows).
1973 # platforms without shell expansion (windows).
1974 wctx = repo[None]
1974 wctx = repo[None]
1975 match, pats = scmutil.matchandpats(wctx, pats, opts)
1975 match, pats = scmutil.matchandpats(wctx, pats, opts)
1976 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1976 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1977 opts.get('removed'))
1977 opts.get('removed'))
1978 if not slowpath:
1978 if not slowpath:
1979 for f in match.files():
1979 for f in match.files():
1980 if follow and f not in wctx:
1980 if follow and f not in wctx:
1981 # If the file exists, it may be a directory, so let it
1981 # If the file exists, it may be a directory, so let it
1982 # take the slow path.
1982 # take the slow path.
1983 if os.path.exists(repo.wjoin(f)):
1983 if os.path.exists(repo.wjoin(f)):
1984 slowpath = True
1984 slowpath = True
1985 continue
1985 continue
1986 else:
1986 else:
1987 raise error.Abort(_('cannot follow file not in parent '
1987 raise error.Abort(_('cannot follow file not in parent '
1988 'revision: "%s"') % f)
1988 'revision: "%s"') % f)
1989 filelog = repo.file(f)
1989 filelog = repo.file(f)
1990 if not filelog:
1990 if not filelog:
1991 # A zero count may be a directory or deleted file, so
1991 # A zero count may be a directory or deleted file, so
1992 # try to find matching entries on the slow path.
1992 # try to find matching entries on the slow path.
1993 if follow:
1993 if follow:
1994 raise error.Abort(
1994 raise error.Abort(
1995 _('cannot follow nonexistent file: "%s"') % f)
1995 _('cannot follow nonexistent file: "%s"') % f)
1996 slowpath = True
1996 slowpath = True
1997
1997
1998 # We decided to fall back to the slowpath because at least one
1998 # We decided to fall back to the slowpath because at least one
1999 # of the paths was not a file. Check to see if at least one of them
1999 # of the paths was not a file. Check to see if at least one of them
2000 # existed in history - in that case, we'll continue down the
2000 # existed in history - in that case, we'll continue down the
2001 # slowpath; otherwise, we can turn off the slowpath
2001 # slowpath; otherwise, we can turn off the slowpath
2002 if slowpath:
2002 if slowpath:
2003 for path in match.files():
2003 for path in match.files():
2004 if path == '.' or path in repo.store:
2004 if path == '.' or path in repo.store:
2005 break
2005 break
2006 else:
2006 else:
2007 slowpath = False
2007 slowpath = False
2008
2008
2009 fpats = ('_patsfollow', '_patsfollowfirst')
2009 fpats = ('_patsfollow', '_patsfollowfirst')
2010 fnopats = (('_ancestors', '_fancestors'),
2010 fnopats = (('_ancestors', '_fancestors'),
2011 ('_descendants', '_fdescendants'))
2011 ('_descendants', '_fdescendants'))
2012 if slowpath:
2012 if slowpath:
2013 # See walkchangerevs() slow path.
2013 # See walkchangerevs() slow path.
2014 #
2014 #
2015 # pats/include/exclude cannot be represented as separate
2015 # pats/include/exclude cannot be represented as separate
2016 # revset expressions as their filtering logic applies at file
2016 # revset expressions as their filtering logic applies at file
2017 # level. For instance "-I a -X a" matches a revision touching
2017 # level. For instance "-I a -X a" matches a revision touching
2018 # "a" and "b" while "file(a) and not file(b)" does
2018 # "a" and "b" while "file(a) and not file(b)" does
2019 # not. Besides, filesets are evaluated against the working
2019 # not. Besides, filesets are evaluated against the working
2020 # directory.
2020 # directory.
2021 matchargs = ['r:', 'd:relpath']
2021 matchargs = ['r:', 'd:relpath']
2022 for p in pats:
2022 for p in pats:
2023 matchargs.append('p:' + p)
2023 matchargs.append('p:' + p)
2024 for p in opts.get('include', []):
2024 for p in opts.get('include', []):
2025 matchargs.append('i:' + p)
2025 matchargs.append('i:' + p)
2026 for p in opts.get('exclude', []):
2026 for p in opts.get('exclude', []):
2027 matchargs.append('x:' + p)
2027 matchargs.append('x:' + p)
2028 matchargs = ','.join(('%r' % p) for p in matchargs)
2028 matchargs = ','.join(('%r' % p) for p in matchargs)
2029 opts['_matchfiles'] = matchargs
2029 opts['_matchfiles'] = matchargs
2030 if follow:
2030 if follow:
2031 opts[fnopats[0][followfirst]] = '.'
2031 opts[fnopats[0][followfirst]] = '.'
2032 else:
2032 else:
2033 if follow:
2033 if follow:
2034 if pats:
2034 if pats:
2035 # follow() revset interprets its file argument as a
2035 # follow() revset interprets its file argument as a
2036 # manifest entry, so use match.files(), not pats.
2036 # manifest entry, so use match.files(), not pats.
2037 opts[fpats[followfirst]] = list(match.files())
2037 opts[fpats[followfirst]] = list(match.files())
2038 else:
2038 else:
2039 op = fnopats[followdescendants][followfirst]
2039 op = fnopats[followdescendants][followfirst]
2040 opts[op] = 'rev(%d)' % startrev
2040 opts[op] = 'rev(%d)' % startrev
2041 else:
2041 else:
2042 opts['_patslog'] = list(pats)
2042 opts['_patslog'] = list(pats)
2043
2043
2044 filematcher = None
2044 filematcher = None
2045 if opts.get('patch') or opts.get('stat'):
2045 if opts.get('patch') or opts.get('stat'):
2046 # When following files, track renames via a special matcher.
2046 # When following files, track renames via a special matcher.
2047 # If we're forced to take the slowpath it means we're following
2047 # If we're forced to take the slowpath it means we're following
2048 # at least one pattern/directory, so don't bother with rename tracking.
2048 # at least one pattern/directory, so don't bother with rename tracking.
2049 if follow and not match.always() and not slowpath:
2049 if follow and not match.always() and not slowpath:
2050 # _makefollowlogfilematcher expects its files argument to be
2050 # _makefollowlogfilematcher expects its files argument to be
2051 # relative to the repo root, so use match.files(), not pats.
2051 # relative to the repo root, so use match.files(), not pats.
2052 filematcher = _makefollowlogfilematcher(repo, match.files(),
2052 filematcher = _makefollowlogfilematcher(repo, match.files(),
2053 followfirst)
2053 followfirst)
2054 else:
2054 else:
2055 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2055 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2056 if filematcher is None:
2056 if filematcher is None:
2057 filematcher = lambda rev: match
2057 filematcher = lambda rev: match
2058
2058
2059 expr = []
2059 expr = []
2060 for op, val in sorted(opts.iteritems()):
2060 for op, val in sorted(opts.iteritems()):
2061 if not val:
2061 if not val:
2062 continue
2062 continue
2063 if op not in opt2revset:
2063 if op not in opt2revset:
2064 continue
2064 continue
2065 revop, andor = opt2revset[op]
2065 revop, andor = opt2revset[op]
2066 if '%(val)' not in revop:
2066 if '%(val)' not in revop:
2067 expr.append(revop)
2067 expr.append(revop)
2068 else:
2068 else:
2069 if not isinstance(val, list):
2069 if not isinstance(val, list):
2070 e = revop % {'val': val}
2070 e = revop % {'val': val}
2071 else:
2071 else:
2072 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2072 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2073 expr.append(e)
2073 expr.append(e)
2074
2074
2075 if expr:
2075 if expr:
2076 expr = '(' + ' and '.join(expr) + ')'
2076 expr = '(' + ' and '.join(expr) + ')'
2077 else:
2077 else:
2078 expr = None
2078 expr = None
2079 return expr, filematcher
2079 return expr, filematcher
2080
2080
2081 def _logrevs(repo, opts):
2081 def _logrevs(repo, opts):
2082 # Default --rev value depends on --follow but --follow behavior
2082 # Default --rev value depends on --follow but --follow behavior
2083 # depends on revisions resolved from --rev...
2083 # depends on revisions resolved from --rev...
2084 follow = opts.get('follow') or opts.get('follow_first')
2084 follow = opts.get('follow') or opts.get('follow_first')
2085 if opts.get('rev'):
2085 if opts.get('rev'):
2086 revs = scmutil.revrange(repo, opts['rev'])
2086 revs = scmutil.revrange(repo, opts['rev'])
2087 elif follow and repo.dirstate.p1() == nullid:
2087 elif follow and repo.dirstate.p1() == nullid:
2088 revs = smartset.baseset()
2088 revs = smartset.baseset()
2089 elif follow:
2089 elif follow:
2090 revs = repo.revs('reverse(:.)')
2090 revs = repo.revs('reverse(:.)')
2091 else:
2091 else:
2092 revs = smartset.spanset(repo)
2092 revs = smartset.spanset(repo)
2093 revs.reverse()
2093 revs.reverse()
2094 return revs
2094 return revs
2095
2095
2096 def getgraphlogrevs(repo, pats, opts):
2096 def getgraphlogrevs(repo, pats, opts):
2097 """Return (revs, expr, filematcher) where revs is an iterable of
2097 """Return (revs, expr, filematcher) where revs is an iterable of
2098 revision numbers, expr is a revset string built from log options
2098 revision numbers, expr is a revset string built from log options
2099 and file patterns or None, and used to filter 'revs'. If --stat or
2099 and file patterns or None, and used to filter 'revs'. If --stat or
2100 --patch are not passed filematcher is None. Otherwise it is a
2100 --patch are not passed filematcher is None. Otherwise it is a
2101 callable taking a revision number and returning a match objects
2101 callable taking a revision number and returning a match objects
2102 filtering the files to be detailed when displaying the revision.
2102 filtering the files to be detailed when displaying the revision.
2103 """
2103 """
2104 limit = loglimit(opts)
2104 limit = loglimit(opts)
2105 revs = _logrevs(repo, opts)
2105 revs = _logrevs(repo, opts)
2106 if not revs:
2106 if not revs:
2107 return smartset.baseset(), None, None
2107 return smartset.baseset(), None, None
2108 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2108 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2109 if opts.get('rev'):
2109 if opts.get('rev'):
2110 # User-specified revs might be unsorted, but don't sort before
2110 # User-specified revs might be unsorted, but don't sort before
2111 # _makelogrevset because it might depend on the order of revs
2111 # _makelogrevset because it might depend on the order of revs
2112 if not (revs.isdescending() or revs.istopo()):
2112 if not (revs.isdescending() or revs.istopo()):
2113 revs.sort(reverse=True)
2113 revs.sort(reverse=True)
2114 if expr:
2114 if expr:
2115 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2115 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2116 revs = matcher(repo, revs)
2116 revs = matcher(repo, revs)
2117 if limit is not None:
2117 if limit is not None:
2118 limitedrevs = []
2118 limitedrevs = []
2119 for idx, rev in enumerate(revs):
2119 for idx, rev in enumerate(revs):
2120 if idx >= limit:
2120 if idx >= limit:
2121 break
2121 break
2122 limitedrevs.append(rev)
2122 limitedrevs.append(rev)
2123 revs = smartset.baseset(limitedrevs)
2123 revs = smartset.baseset(limitedrevs)
2124
2124
2125 return revs, expr, filematcher
2125 return revs, expr, filematcher
2126
2126
2127 def getlogrevs(repo, pats, opts):
2127 def getlogrevs(repo, pats, opts):
2128 """Return (revs, expr, filematcher) where revs is an iterable of
2128 """Return (revs, expr, filematcher) where revs is an iterable of
2129 revision numbers, expr is a revset string built from log options
2129 revision numbers, expr is a revset string built from log options
2130 and file patterns or None, and used to filter 'revs'. If --stat or
2130 and file patterns or None, and used to filter 'revs'. If --stat or
2131 --patch are not passed filematcher is None. Otherwise it is a
2131 --patch are not passed filematcher is None. Otherwise it is a
2132 callable taking a revision number and returning a match objects
2132 callable taking a revision number and returning a match objects
2133 filtering the files to be detailed when displaying the revision.
2133 filtering the files to be detailed when displaying the revision.
2134 """
2134 """
2135 limit = loglimit(opts)
2135 limit = loglimit(opts)
2136 revs = _logrevs(repo, opts)
2136 revs = _logrevs(repo, opts)
2137 if not revs:
2137 if not revs:
2138 return smartset.baseset([]), None, None
2138 return smartset.baseset([]), None, None
2139 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2139 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2140 if expr:
2140 if expr:
2141 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2141 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2142 revs = matcher(repo, revs)
2142 revs = matcher(repo, revs)
2143 if limit is not None:
2143 if limit is not None:
2144 limitedrevs = []
2144 limitedrevs = []
2145 for idx, r in enumerate(revs):
2145 for idx, r in enumerate(revs):
2146 if limit <= idx:
2146 if limit <= idx:
2147 break
2147 break
2148 limitedrevs.append(r)
2148 limitedrevs.append(r)
2149 revs = smartset.baseset(limitedrevs)
2149 revs = smartset.baseset(limitedrevs)
2150
2150
2151 return revs, expr, filematcher
2151 return revs, expr, filematcher
2152
2152
2153 def _graphnodeformatter(ui, displayer):
2153 def _graphnodeformatter(ui, displayer):
2154 spec = ui.config('ui', 'graphnodetemplate')
2154 spec = ui.config('ui', 'graphnodetemplate')
2155 if not spec:
2155 if not spec:
2156 return templatekw.showgraphnode # fast path for "{graphnode}"
2156 return templatekw.showgraphnode # fast path for "{graphnode}"
2157
2157
2158 spec = templater.unquotestring(spec)
2158 spec = templater.unquotestring(spec)
2159 templ = formatter.gettemplater(ui, 'graphnode', spec)
2159 templ = formatter.gettemplater(ui, 'graphnode', spec)
2160 cache = {}
2160 cache = {}
2161 if isinstance(displayer, changeset_templater):
2161 if isinstance(displayer, changeset_templater):
2162 cache = displayer.cache # reuse cache of slow templates
2162 cache = displayer.cache # reuse cache of slow templates
2163 props = templatekw.keywords.copy()
2163 props = templatekw.keywords.copy()
2164 props['templ'] = templ
2164 props['templ'] = templ
2165 props['cache'] = cache
2165 props['cache'] = cache
2166 def formatnode(repo, ctx):
2166 def formatnode(repo, ctx):
2167 props['ctx'] = ctx
2167 props['ctx'] = ctx
2168 props['repo'] = repo
2168 props['repo'] = repo
2169 props['ui'] = repo.ui
2169 props['ui'] = repo.ui
2170 props['revcache'] = {}
2170 props['revcache'] = {}
2171 return templater.stringify(templ('graphnode', **props))
2171 return templater.stringify(templ('graphnode', **props))
2172 return formatnode
2172 return formatnode
2173
2173
2174 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2174 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2175 filematcher=None):
2175 filematcher=None):
2176 formatnode = _graphnodeformatter(ui, displayer)
2176 formatnode = _graphnodeformatter(ui, displayer)
2177 state = graphmod.asciistate()
2177 state = graphmod.asciistate()
2178 styles = state['styles']
2178 styles = state['styles']
2179
2179
2180 # only set graph styling if HGPLAIN is not set.
2180 # only set graph styling if HGPLAIN is not set.
2181 if ui.plain('graph'):
2181 if ui.plain('graph'):
2182 # set all edge styles to |, the default pre-3.8 behaviour
2182 # set all edge styles to |, the default pre-3.8 behaviour
2183 styles.update(dict.fromkeys(styles, '|'))
2183 styles.update(dict.fromkeys(styles, '|'))
2184 else:
2184 else:
2185 edgetypes = {
2185 edgetypes = {
2186 'parent': graphmod.PARENT,
2186 'parent': graphmod.PARENT,
2187 'grandparent': graphmod.GRANDPARENT,
2187 'grandparent': graphmod.GRANDPARENT,
2188 'missing': graphmod.MISSINGPARENT
2188 'missing': graphmod.MISSINGPARENT
2189 }
2189 }
2190 for name, key in edgetypes.items():
2190 for name, key in edgetypes.items():
2191 # experimental config: experimental.graphstyle.*
2191 # experimental config: experimental.graphstyle.*
2192 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2192 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2193 styles[key])
2193 styles[key])
2194 if not styles[key]:
2194 if not styles[key]:
2195 styles[key] = None
2195 styles[key] = None
2196
2196
2197 # experimental config: experimental.graphshorten
2197 # experimental config: experimental.graphshorten
2198 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2198 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2199
2199
2200 for rev, type, ctx, parents in dag:
2200 for rev, type, ctx, parents in dag:
2201 char = formatnode(repo, ctx)
2201 char = formatnode(repo, ctx)
2202 copies = None
2202 copies = None
2203 if getrenamed and ctx.rev():
2203 if getrenamed and ctx.rev():
2204 copies = []
2204 copies = []
2205 for fn in ctx.files():
2205 for fn in ctx.files():
2206 rename = getrenamed(fn, ctx.rev())
2206 rename = getrenamed(fn, ctx.rev())
2207 if rename:
2207 if rename:
2208 copies.append((fn, rename[0]))
2208 copies.append((fn, rename[0]))
2209 revmatchfn = None
2209 revmatchfn = None
2210 if filematcher is not None:
2210 if filematcher is not None:
2211 revmatchfn = filematcher(ctx.rev())
2211 revmatchfn = filematcher(ctx.rev())
2212 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2212 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2213 lines = displayer.hunk.pop(rev).split('\n')
2213 lines = displayer.hunk.pop(rev).split('\n')
2214 if not lines[-1]:
2214 if not lines[-1]:
2215 del lines[-1]
2215 del lines[-1]
2216 displayer.flush(ctx)
2216 displayer.flush(ctx)
2217 edges = edgefn(type, char, lines, state, rev, parents)
2217 edges = edgefn(type, char, lines, state, rev, parents)
2218 for type, char, lines, coldata in edges:
2218 for type, char, lines, coldata in edges:
2219 graphmod.ascii(ui, state, type, char, lines, coldata)
2219 graphmod.ascii(ui, state, type, char, lines, coldata)
2220 displayer.close()
2220 displayer.close()
2221
2221
2222 def graphlog(ui, repo, pats, opts):
2222 def graphlog(ui, repo, pats, opts):
2223 # Parameters are identical to log command ones
2223 # Parameters are identical to log command ones
2224 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2224 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2225 revdag = graphmod.dagwalker(repo, revs)
2225 revdag = graphmod.dagwalker(repo, revs)
2226
2226
2227 getrenamed = None
2227 getrenamed = None
2228 if opts.get('copies'):
2228 if opts.get('copies'):
2229 endrev = None
2229 endrev = None
2230 if opts.get('rev'):
2230 if opts.get('rev'):
2231 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2231 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2232 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2232 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2233
2233
2234 ui.pager('log')
2234 ui.pager('log')
2235 displayer = show_changeset(ui, repo, opts, buffered=True)
2235 displayer = show_changeset(ui, repo, opts, buffered=True)
2236 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2236 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2237 filematcher)
2237 filematcher)
2238
2238
2239 def checkunsupportedgraphflags(pats, opts):
2239 def checkunsupportedgraphflags(pats, opts):
2240 for op in ["newest_first"]:
2240 for op in ["newest_first"]:
2241 if op in opts and opts[op]:
2241 if op in opts and opts[op]:
2242 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2242 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2243 % op.replace("_", "-"))
2243 % op.replace("_", "-"))
2244
2244
2245 def graphrevs(repo, nodes, opts):
2245 def graphrevs(repo, nodes, opts):
2246 limit = loglimit(opts)
2246 limit = loglimit(opts)
2247 nodes.reverse()
2247 nodes.reverse()
2248 if limit is not None:
2248 if limit is not None:
2249 nodes = nodes[:limit]
2249 nodes = nodes[:limit]
2250 return graphmod.nodes(repo, nodes)
2250 return graphmod.nodes(repo, nodes)
2251
2251
2252 def add(ui, repo, match, prefix, explicitonly, **opts):
2252 def add(ui, repo, match, prefix, explicitonly, **opts):
2253 join = lambda f: os.path.join(prefix, f)
2253 join = lambda f: os.path.join(prefix, f)
2254 bad = []
2254 bad = []
2255
2255
2256 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2256 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2257 names = []
2257 names = []
2258 wctx = repo[None]
2258 wctx = repo[None]
2259 cca = None
2259 cca = None
2260 abort, warn = scmutil.checkportabilityalert(ui)
2260 abort, warn = scmutil.checkportabilityalert(ui)
2261 if abort or warn:
2261 if abort or warn:
2262 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2262 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2263
2263
2264 badmatch = matchmod.badmatch(match, badfn)
2264 badmatch = matchmod.badmatch(match, badfn)
2265 dirstate = repo.dirstate
2265 dirstate = repo.dirstate
2266 # We don't want to just call wctx.walk here, since it would return a lot of
2266 # We don't want to just call wctx.walk here, since it would return a lot of
2267 # clean files, which we aren't interested in and takes time.
2267 # clean files, which we aren't interested in and takes time.
2268 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2268 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2269 True, False, full=False)):
2269 True, False, full=False)):
2270 exact = match.exact(f)
2270 exact = match.exact(f)
2271 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2271 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2272 if cca:
2272 if cca:
2273 cca(f)
2273 cca(f)
2274 names.append(f)
2274 names.append(f)
2275 if ui.verbose or not exact:
2275 if ui.verbose or not exact:
2276 ui.status(_('adding %s\n') % match.rel(f))
2276 ui.status(_('adding %s\n') % match.rel(f))
2277
2277
2278 for subpath in sorted(wctx.substate):
2278 for subpath in sorted(wctx.substate):
2279 sub = wctx.sub(subpath)
2279 sub = wctx.sub(subpath)
2280 try:
2280 try:
2281 submatch = matchmod.subdirmatcher(subpath, match)
2281 submatch = matchmod.subdirmatcher(subpath, match)
2282 if opts.get(r'subrepos'):
2282 if opts.get(r'subrepos'):
2283 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2283 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2284 else:
2284 else:
2285 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2285 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2286 except error.LookupError:
2286 except error.LookupError:
2287 ui.status(_("skipping missing subrepository: %s\n")
2287 ui.status(_("skipping missing subrepository: %s\n")
2288 % join(subpath))
2288 % join(subpath))
2289
2289
2290 if not opts.get(r'dry_run'):
2290 if not opts.get(r'dry_run'):
2291 rejected = wctx.add(names, prefix)
2291 rejected = wctx.add(names, prefix)
2292 bad.extend(f for f in rejected if f in match.files())
2292 bad.extend(f for f in rejected if f in match.files())
2293 return bad
2293 return bad
2294
2294
2295 def addwebdirpath(repo, serverpath, webconf):
2295 def addwebdirpath(repo, serverpath, webconf):
2296 webconf[serverpath] = repo.root
2296 webconf[serverpath] = repo.root
2297 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2297 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2298
2298
2299 for r in repo.revs('filelog("path:.hgsub")'):
2299 for r in repo.revs('filelog("path:.hgsub")'):
2300 ctx = repo[r]
2300 ctx = repo[r]
2301 for subpath in ctx.substate:
2301 for subpath in ctx.substate:
2302 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2302 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2303
2303
2304 def forget(ui, repo, match, prefix, explicitonly):
2304 def forget(ui, repo, match, prefix, explicitonly):
2305 join = lambda f: os.path.join(prefix, f)
2305 join = lambda f: os.path.join(prefix, f)
2306 bad = []
2306 bad = []
2307 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2307 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2308 wctx = repo[None]
2308 wctx = repo[None]
2309 forgot = []
2309 forgot = []
2310
2310
2311 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2311 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2312 forget = sorted(s[0] + s[1] + s[3] + s[6])
2312 forget = sorted(s[0] + s[1] + s[3] + s[6])
2313 if explicitonly:
2313 if explicitonly:
2314 forget = [f for f in forget if match.exact(f)]
2314 forget = [f for f in forget if match.exact(f)]
2315
2315
2316 for subpath in sorted(wctx.substate):
2316 for subpath in sorted(wctx.substate):
2317 sub = wctx.sub(subpath)
2317 sub = wctx.sub(subpath)
2318 try:
2318 try:
2319 submatch = matchmod.subdirmatcher(subpath, match)
2319 submatch = matchmod.subdirmatcher(subpath, match)
2320 subbad, subforgot = sub.forget(submatch, prefix)
2320 subbad, subforgot = sub.forget(submatch, prefix)
2321 bad.extend([subpath + '/' + f for f in subbad])
2321 bad.extend([subpath + '/' + f for f in subbad])
2322 forgot.extend([subpath + '/' + f for f in subforgot])
2322 forgot.extend([subpath + '/' + f for f in subforgot])
2323 except error.LookupError:
2323 except error.LookupError:
2324 ui.status(_("skipping missing subrepository: %s\n")
2324 ui.status(_("skipping missing subrepository: %s\n")
2325 % join(subpath))
2325 % join(subpath))
2326
2326
2327 if not explicitonly:
2327 if not explicitonly:
2328 for f in match.files():
2328 for f in match.files():
2329 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2329 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2330 if f not in forgot:
2330 if f not in forgot:
2331 if repo.wvfs.exists(f):
2331 if repo.wvfs.exists(f):
2332 # Don't complain if the exact case match wasn't given.
2332 # Don't complain if the exact case match wasn't given.
2333 # But don't do this until after checking 'forgot', so
2333 # But don't do this until after checking 'forgot', so
2334 # that subrepo files aren't normalized, and this op is
2334 # that subrepo files aren't normalized, and this op is
2335 # purely from data cached by the status walk above.
2335 # purely from data cached by the status walk above.
2336 if repo.dirstate.normalize(f) in repo.dirstate:
2336 if repo.dirstate.normalize(f) in repo.dirstate:
2337 continue
2337 continue
2338 ui.warn(_('not removing %s: '
2338 ui.warn(_('not removing %s: '
2339 'file is already untracked\n')
2339 'file is already untracked\n')
2340 % match.rel(f))
2340 % match.rel(f))
2341 bad.append(f)
2341 bad.append(f)
2342
2342
2343 for f in forget:
2343 for f in forget:
2344 if ui.verbose or not match.exact(f):
2344 if ui.verbose or not match.exact(f):
2345 ui.status(_('removing %s\n') % match.rel(f))
2345 ui.status(_('removing %s\n') % match.rel(f))
2346
2346
2347 rejected = wctx.forget(forget, prefix)
2347 rejected = wctx.forget(forget, prefix)
2348 bad.extend(f for f in rejected if f in match.files())
2348 bad.extend(f for f in rejected if f in match.files())
2349 forgot.extend(f for f in forget if f not in rejected)
2349 forgot.extend(f for f in forget if f not in rejected)
2350 return bad, forgot
2350 return bad, forgot
2351
2351
2352 def files(ui, ctx, m, fm, fmt, subrepos):
2352 def files(ui, ctx, m, fm, fmt, subrepos):
2353 rev = ctx.rev()
2353 rev = ctx.rev()
2354 ret = 1
2354 ret = 1
2355 ds = ctx.repo().dirstate
2355 ds = ctx.repo().dirstate
2356
2356
2357 for f in ctx.matches(m):
2357 for f in ctx.matches(m):
2358 if rev is None and ds[f] == 'r':
2358 if rev is None and ds[f] == 'r':
2359 continue
2359 continue
2360 fm.startitem()
2360 fm.startitem()
2361 if ui.verbose:
2361 if ui.verbose:
2362 fc = ctx[f]
2362 fc = ctx[f]
2363 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2363 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2364 fm.data(abspath=f)
2364 fm.data(abspath=f)
2365 fm.write('path', fmt, m.rel(f))
2365 fm.write('path', fmt, m.rel(f))
2366 ret = 0
2366 ret = 0
2367
2367
2368 for subpath in sorted(ctx.substate):
2368 for subpath in sorted(ctx.substate):
2369 submatch = matchmod.subdirmatcher(subpath, m)
2369 submatch = matchmod.subdirmatcher(subpath, m)
2370 if (subrepos or m.exact(subpath) or any(submatch.files())):
2370 if (subrepos or m.exact(subpath) or any(submatch.files())):
2371 sub = ctx.sub(subpath)
2371 sub = ctx.sub(subpath)
2372 try:
2372 try:
2373 recurse = m.exact(subpath) or subrepos
2373 recurse = m.exact(subpath) or subrepos
2374 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2374 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2375 ret = 0
2375 ret = 0
2376 except error.LookupError:
2376 except error.LookupError:
2377 ui.status(_("skipping missing subrepository: %s\n")
2377 ui.status(_("skipping missing subrepository: %s\n")
2378 % m.abs(subpath))
2378 % m.abs(subpath))
2379
2379
2380 return ret
2380 return ret
2381
2381
2382 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2382 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2383 join = lambda f: os.path.join(prefix, f)
2383 join = lambda f: os.path.join(prefix, f)
2384 ret = 0
2384 ret = 0
2385 s = repo.status(match=m, clean=True)
2385 s = repo.status(match=m, clean=True)
2386 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2386 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2387
2387
2388 wctx = repo[None]
2388 wctx = repo[None]
2389
2389
2390 if warnings is None:
2390 if warnings is None:
2391 warnings = []
2391 warnings = []
2392 warn = True
2392 warn = True
2393 else:
2393 else:
2394 warn = False
2394 warn = False
2395
2395
2396 subs = sorted(wctx.substate)
2396 subs = sorted(wctx.substate)
2397 total = len(subs)
2397 total = len(subs)
2398 count = 0
2398 count = 0
2399 for subpath in subs:
2399 for subpath in subs:
2400 count += 1
2400 count += 1
2401 submatch = matchmod.subdirmatcher(subpath, m)
2401 submatch = matchmod.subdirmatcher(subpath, m)
2402 if subrepos or m.exact(subpath) or any(submatch.files()):
2402 if subrepos or m.exact(subpath) or any(submatch.files()):
2403 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2403 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2404 sub = wctx.sub(subpath)
2404 sub = wctx.sub(subpath)
2405 try:
2405 try:
2406 if sub.removefiles(submatch, prefix, after, force, subrepos,
2406 if sub.removefiles(submatch, prefix, after, force, subrepos,
2407 warnings):
2407 warnings):
2408 ret = 1
2408 ret = 1
2409 except error.LookupError:
2409 except error.LookupError:
2410 warnings.append(_("skipping missing subrepository: %s\n")
2410 warnings.append(_("skipping missing subrepository: %s\n")
2411 % join(subpath))
2411 % join(subpath))
2412 ui.progress(_('searching'), None)
2412 ui.progress(_('searching'), None)
2413
2413
2414 # warn about failure to delete explicit files/dirs
2414 # warn about failure to delete explicit files/dirs
2415 deleteddirs = util.dirs(deleted)
2415 deleteddirs = util.dirs(deleted)
2416 files = m.files()
2416 files = m.files()
2417 total = len(files)
2417 total = len(files)
2418 count = 0
2418 count = 0
2419 for f in files:
2419 for f in files:
2420 def insubrepo():
2420 def insubrepo():
2421 for subpath in wctx.substate:
2421 for subpath in wctx.substate:
2422 if f.startswith(subpath + '/'):
2422 if f.startswith(subpath + '/'):
2423 return True
2423 return True
2424 return False
2424 return False
2425
2425
2426 count += 1
2426 count += 1
2427 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2427 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2428 isdir = f in deleteddirs or wctx.hasdir(f)
2428 isdir = f in deleteddirs or wctx.hasdir(f)
2429 if (f in repo.dirstate or isdir or f == '.'
2429 if (f in repo.dirstate or isdir or f == '.'
2430 or insubrepo() or f in subs):
2430 or insubrepo() or f in subs):
2431 continue
2431 continue
2432
2432
2433 if repo.wvfs.exists(f):
2433 if repo.wvfs.exists(f):
2434 if repo.wvfs.isdir(f):
2434 if repo.wvfs.isdir(f):
2435 warnings.append(_('not removing %s: no tracked files\n')
2435 warnings.append(_('not removing %s: no tracked files\n')
2436 % m.rel(f))
2436 % m.rel(f))
2437 else:
2437 else:
2438 warnings.append(_('not removing %s: file is untracked\n')
2438 warnings.append(_('not removing %s: file is untracked\n')
2439 % m.rel(f))
2439 % m.rel(f))
2440 # missing files will generate a warning elsewhere
2440 # missing files will generate a warning elsewhere
2441 ret = 1
2441 ret = 1
2442 ui.progress(_('deleting'), None)
2442 ui.progress(_('deleting'), None)
2443
2443
2444 if force:
2444 if force:
2445 list = modified + deleted + clean + added
2445 list = modified + deleted + clean + added
2446 elif after:
2446 elif after:
2447 list = deleted
2447 list = deleted
2448 remaining = modified + added + clean
2448 remaining = modified + added + clean
2449 total = len(remaining)
2449 total = len(remaining)
2450 count = 0
2450 count = 0
2451 for f in remaining:
2451 for f in remaining:
2452 count += 1
2452 count += 1
2453 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2453 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2454 warnings.append(_('not removing %s: file still exists\n')
2454 warnings.append(_('not removing %s: file still exists\n')
2455 % m.rel(f))
2455 % m.rel(f))
2456 ret = 1
2456 ret = 1
2457 ui.progress(_('skipping'), None)
2457 ui.progress(_('skipping'), None)
2458 else:
2458 else:
2459 list = deleted + clean
2459 list = deleted + clean
2460 total = len(modified) + len(added)
2460 total = len(modified) + len(added)
2461 count = 0
2461 count = 0
2462 for f in modified:
2462 for f in modified:
2463 count += 1
2463 count += 1
2464 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2464 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2465 warnings.append(_('not removing %s: file is modified (use -f'
2465 warnings.append(_('not removing %s: file is modified (use -f'
2466 ' to force removal)\n') % m.rel(f))
2466 ' to force removal)\n') % m.rel(f))
2467 ret = 1
2467 ret = 1
2468 for f in added:
2468 for f in added:
2469 count += 1
2469 count += 1
2470 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2470 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2471 warnings.append(_("not removing %s: file has been marked for add"
2471 warnings.append(_("not removing %s: file has been marked for add"
2472 " (use 'hg forget' to undo add)\n") % m.rel(f))
2472 " (use 'hg forget' to undo add)\n") % m.rel(f))
2473 ret = 1
2473 ret = 1
2474 ui.progress(_('skipping'), None)
2474 ui.progress(_('skipping'), None)
2475
2475
2476 list = sorted(list)
2476 list = sorted(list)
2477 total = len(list)
2477 total = len(list)
2478 count = 0
2478 count = 0
2479 for f in list:
2479 for f in list:
2480 count += 1
2480 count += 1
2481 if ui.verbose or not m.exact(f):
2481 if ui.verbose or not m.exact(f):
2482 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2482 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2483 ui.status(_('removing %s\n') % m.rel(f))
2483 ui.status(_('removing %s\n') % m.rel(f))
2484 ui.progress(_('deleting'), None)
2484 ui.progress(_('deleting'), None)
2485
2485
2486 with repo.wlock():
2486 with repo.wlock():
2487 if not after:
2487 if not after:
2488 for f in list:
2488 for f in list:
2489 if f in added:
2489 if f in added:
2490 continue # we never unlink added files on remove
2490 continue # we never unlink added files on remove
2491 repo.wvfs.unlinkpath(f, ignoremissing=True)
2491 repo.wvfs.unlinkpath(f, ignoremissing=True)
2492 repo[None].forget(list)
2492 repo[None].forget(list)
2493
2493
2494 if warn:
2494 if warn:
2495 for warning in warnings:
2495 for warning in warnings:
2496 ui.warn(warning)
2496 ui.warn(warning)
2497
2497
2498 return ret
2498 return ret
2499
2499
2500 def cat(ui, repo, ctx, matcher, prefix, **opts):
2500 def cat(ui, repo, ctx, matcher, prefix, **opts):
2501 err = 1
2501 err = 1
2502
2502
2503 def write(path):
2503 def write(path):
2504 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2504 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2505 pathname=os.path.join(prefix, path))
2505 pathname=os.path.join(prefix, path))
2506 data = ctx[path].data()
2506 data = ctx[path].data()
2507 if opts.get('decode'):
2507 if opts.get('decode'):
2508 data = repo.wwritedata(path, data)
2508 data = repo.wwritedata(path, data)
2509 fp.write(data)
2509 fp.write(data)
2510 fp.close()
2510 fp.close()
2511
2511
2512 # Automation often uses hg cat on single files, so special case it
2512 # Automation often uses hg cat on single files, so special case it
2513 # for performance to avoid the cost of parsing the manifest.
2513 # for performance to avoid the cost of parsing the manifest.
2514 if len(matcher.files()) == 1 and not matcher.anypats():
2514 if len(matcher.files()) == 1 and not matcher.anypats():
2515 file = matcher.files()[0]
2515 file = matcher.files()[0]
2516 mfl = repo.manifestlog
2516 mfl = repo.manifestlog
2517 mfnode = ctx.manifestnode()
2517 mfnode = ctx.manifestnode()
2518 try:
2518 try:
2519 if mfnode and mfl[mfnode].find(file)[0]:
2519 if mfnode and mfl[mfnode].find(file)[0]:
2520 write(file)
2520 write(file)
2521 return 0
2521 return 0
2522 except KeyError:
2522 except KeyError:
2523 pass
2523 pass
2524
2524
2525 for abs in ctx.walk(matcher):
2525 for abs in ctx.walk(matcher):
2526 write(abs)
2526 write(abs)
2527 err = 0
2527 err = 0
2528
2528
2529 for subpath in sorted(ctx.substate):
2529 for subpath in sorted(ctx.substate):
2530 sub = ctx.sub(subpath)
2530 sub = ctx.sub(subpath)
2531 try:
2531 try:
2532 submatch = matchmod.subdirmatcher(subpath, matcher)
2532 submatch = matchmod.subdirmatcher(subpath, matcher)
2533
2533
2534 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2534 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2535 **opts):
2535 **opts):
2536 err = 0
2536 err = 0
2537 except error.RepoLookupError:
2537 except error.RepoLookupError:
2538 ui.status(_("skipping missing subrepository: %s\n")
2538 ui.status(_("skipping missing subrepository: %s\n")
2539 % os.path.join(prefix, subpath))
2539 % os.path.join(prefix, subpath))
2540
2540
2541 return err
2541 return err
2542
2542
2543 def commit(ui, repo, commitfunc, pats, opts):
2543 def commit(ui, repo, commitfunc, pats, opts):
2544 '''commit the specified files or all outstanding changes'''
2544 '''commit the specified files or all outstanding changes'''
2545 date = opts.get('date')
2545 date = opts.get('date')
2546 if date:
2546 if date:
2547 opts['date'] = util.parsedate(date)
2547 opts['date'] = util.parsedate(date)
2548 message = logmessage(ui, opts)
2548 message = logmessage(ui, opts)
2549 matcher = scmutil.match(repo[None], pats, opts)
2549 matcher = scmutil.match(repo[None], pats, opts)
2550
2550
2551 # extract addremove carefully -- this function can be called from a command
2551 # extract addremove carefully -- this function can be called from a command
2552 # that doesn't support addremove
2552 # that doesn't support addremove
2553 if opts.get('addremove'):
2553 if opts.get('addremove'):
2554 if scmutil.addremove(repo, matcher, "", opts) != 0:
2554 if scmutil.addremove(repo, matcher, "", opts) != 0:
2555 raise error.Abort(
2555 raise error.Abort(
2556 _("failed to mark all new/missing files as added/removed"))
2556 _("failed to mark all new/missing files as added/removed"))
2557
2557
2558 return commitfunc(ui, repo, message, matcher, opts)
2558 return commitfunc(ui, repo, message, matcher, opts)
2559
2559
2560 def samefile(f, ctx1, ctx2):
2560 def samefile(f, ctx1, ctx2):
2561 if f in ctx1.manifest():
2561 if f in ctx1.manifest():
2562 a = ctx1.filectx(f)
2562 a = ctx1.filectx(f)
2563 if f in ctx2.manifest():
2563 if f in ctx2.manifest():
2564 b = ctx2.filectx(f)
2564 b = ctx2.filectx(f)
2565 return (not a.cmp(b)
2565 return (not a.cmp(b)
2566 and a.flags() == b.flags())
2566 and a.flags() == b.flags())
2567 else:
2567 else:
2568 return False
2568 return False
2569 else:
2569 else:
2570 return f not in ctx2.manifest()
2570 return f not in ctx2.manifest()
2571
2571
2572 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2572 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2573 # avoid cycle context -> subrepo -> cmdutil
2573 # avoid cycle context -> subrepo -> cmdutil
2574 from . import context
2574 from . import context
2575
2575
2576 # amend will reuse the existing user if not specified, but the obsolete
2576 # amend will reuse the existing user if not specified, but the obsolete
2577 # marker creation requires that the current user's name is specified.
2577 # marker creation requires that the current user's name is specified.
2578 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2578 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2579 ui.username() # raise exception if username not set
2579 ui.username() # raise exception if username not set
2580
2580
2581 ui.note(_('amending changeset %s\n') % old)
2581 ui.note(_('amending changeset %s\n') % old)
2582 base = old.p1()
2582 base = old.p1()
2583 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2583 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2584
2584
2585 wlock = lock = newid = None
2585 wlock = lock = newid = None
2586 try:
2586 try:
2587 wlock = repo.wlock()
2587 wlock = repo.wlock()
2588 lock = repo.lock()
2588 lock = repo.lock()
2589 with repo.transaction('amend') as tr:
2589 with repo.transaction('amend') as tr:
2590 # See if we got a message from -m or -l, if not, open the editor
2590 # See if we got a message from -m or -l, if not, open the editor
2591 # with the message of the changeset to amend
2591 # with the message of the changeset to amend
2592 message = logmessage(ui, opts)
2592 message = logmessage(ui, opts)
2593 # ensure logfile does not conflict with later enforcement of the
2593 # ensure logfile does not conflict with later enforcement of the
2594 # message. potential logfile content has been processed by
2594 # message. potential logfile content has been processed by
2595 # `logmessage` anyway.
2595 # `logmessage` anyway.
2596 opts.pop('logfile')
2596 opts.pop('logfile')
2597 # First, do a regular commit to record all changes in the working
2597 # First, do a regular commit to record all changes in the working
2598 # directory (if there are any)
2598 # directory (if there are any)
2599 ui.callhooks = False
2599 ui.callhooks = False
2600 activebookmark = repo._bookmarks.active
2600 activebookmark = repo._bookmarks.active
2601 try:
2601 try:
2602 repo._bookmarks.active = None
2602 repo._bookmarks.active = None
2603 opts['message'] = 'temporary amend commit for %s' % old
2603 opts['message'] = 'temporary amend commit for %s' % old
2604 node = commit(ui, repo, commitfunc, pats, opts)
2604 node = commit(ui, repo, commitfunc, pats, opts)
2605 finally:
2605 finally:
2606 repo._bookmarks.active = activebookmark
2606 repo._bookmarks.active = activebookmark
2607 repo._bookmarks.recordchange(tr)
2607 repo._bookmarks.recordchange(tr)
2608 ui.callhooks = True
2608 ui.callhooks = True
2609 ctx = repo[node]
2609 ctx = repo[node]
2610
2610
2611 # Participating changesets:
2611 # Participating changesets:
2612 #
2612 #
2613 # node/ctx o - new (intermediate) commit that contains changes
2613 # node/ctx o - new (intermediate) commit that contains changes
2614 # | from working dir to go into amending commit
2614 # | from working dir to go into amending commit
2615 # | (or a workingctx if there were no changes)
2615 # | (or a workingctx if there were no changes)
2616 # |
2616 # |
2617 # old o - changeset to amend
2617 # old o - changeset to amend
2618 # |
2618 # |
2619 # base o - parent of amending changeset
2619 # base o - parent of amending changeset
2620
2620
2621 # Update extra dict from amended commit (e.g. to preserve graft
2621 # Update extra dict from amended commit (e.g. to preserve graft
2622 # source)
2622 # source)
2623 extra.update(old.extra())
2623 extra.update(old.extra())
2624
2624
2625 # Also update it from the intermediate commit or from the wctx
2625 # Also update it from the intermediate commit or from the wctx
2626 extra.update(ctx.extra())
2626 extra.update(ctx.extra())
2627
2627
2628 if len(old.parents()) > 1:
2628 if len(old.parents()) > 1:
2629 # ctx.files() isn't reliable for merges, so fall back to the
2629 # ctx.files() isn't reliable for merges, so fall back to the
2630 # slower repo.status() method
2630 # slower repo.status() method
2631 files = set([fn for st in repo.status(base, old)[:3]
2631 files = set([fn for st in repo.status(base, old)[:3]
2632 for fn in st])
2632 for fn in st])
2633 else:
2633 else:
2634 files = set(old.files())
2634 files = set(old.files())
2635
2635
2636 # Second, we use either the commit we just did, or if there were no
2636 # Second, we use either the commit we just did, or if there were no
2637 # changes the parent of the working directory as the version of the
2637 # changes the parent of the working directory as the version of the
2638 # files in the final amend commit
2638 # files in the final amend commit
2639 if node:
2639 if node:
2640 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2640 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2641
2641
2642 user = ctx.user()
2642 user = ctx.user()
2643 date = ctx.date()
2643 date = ctx.date()
2644 # Recompute copies (avoid recording a -> b -> a)
2644 # Recompute copies (avoid recording a -> b -> a)
2645 copied = copies.pathcopies(base, ctx)
2645 copied = copies.pathcopies(base, ctx)
2646 if old.p2:
2646 if old.p2:
2647 copied.update(copies.pathcopies(old.p2(), ctx))
2647 copied.update(copies.pathcopies(old.p2(), ctx))
2648
2648
2649 # Prune files which were reverted by the updates: if old
2649 # Prune files which were reverted by the updates: if old
2650 # introduced file X and our intermediate commit, node,
2650 # introduced file X and our intermediate commit, node,
2651 # renamed that file, then those two files are the same and
2651 # renamed that file, then those two files are the same and
2652 # we can discard X from our list of files. Likewise if X
2652 # we can discard X from our list of files. Likewise if X
2653 # was deleted, it's no longer relevant
2653 # was deleted, it's no longer relevant
2654 files.update(ctx.files())
2654 files.update(ctx.files())
2655 files = [f for f in files if not samefile(f, ctx, base)]
2655 files = [f for f in files if not samefile(f, ctx, base)]
2656
2656
2657 def filectxfn(repo, ctx_, path):
2657 def filectxfn(repo, ctx_, path):
2658 try:
2658 try:
2659 fctx = ctx[path]
2659 fctx = ctx[path]
2660 flags = fctx.flags()
2660 flags = fctx.flags()
2661 mctx = context.memfilectx(repo,
2661 mctx = context.memfilectx(repo,
2662 fctx.path(), fctx.data(),
2662 fctx.path(), fctx.data(),
2663 islink='l' in flags,
2663 islink='l' in flags,
2664 isexec='x' in flags,
2664 isexec='x' in flags,
2665 copied=copied.get(path))
2665 copied=copied.get(path))
2666 return mctx
2666 return mctx
2667 except KeyError:
2667 except KeyError:
2668 return None
2668 return None
2669 else:
2669 else:
2670 ui.note(_('copying changeset %s to %s\n') % (old, base))
2670 ui.note(_('copying changeset %s to %s\n') % (old, base))
2671
2671
2672 # Use version of files as in the old cset
2672 # Use version of files as in the old cset
2673 def filectxfn(repo, ctx_, path):
2673 def filectxfn(repo, ctx_, path):
2674 try:
2674 try:
2675 return old.filectx(path)
2675 return old.filectx(path)
2676 except KeyError:
2676 except KeyError:
2677 return None
2677 return None
2678
2678
2679 user = opts.get('user') or old.user()
2679 user = opts.get('user') or old.user()
2680 date = opts.get('date') or old.date()
2680 date = opts.get('date') or old.date()
2681 editform = mergeeditform(old, 'commit.amend')
2681 editform = mergeeditform(old, 'commit.amend')
2682 editor = getcommiteditor(editform=editform, **opts)
2682 editor = getcommiteditor(editform=editform, **opts)
2683 if not message:
2683 if not message:
2684 editor = getcommiteditor(edit=True, editform=editform)
2684 editor = getcommiteditor(edit=True, editform=editform)
2685 message = old.description()
2685 message = old.description()
2686
2686
2687 pureextra = extra.copy()
2687 pureextra = extra.copy()
2688 extra['amend_source'] = old.hex()
2688 extra['amend_source'] = old.hex()
2689
2689
2690 new = context.memctx(repo,
2690 new = context.memctx(repo,
2691 parents=[base.node(), old.p2().node()],
2691 parents=[base.node(), old.p2().node()],
2692 text=message,
2692 text=message,
2693 files=files,
2693 files=files,
2694 filectxfn=filectxfn,
2694 filectxfn=filectxfn,
2695 user=user,
2695 user=user,
2696 date=date,
2696 date=date,
2697 extra=extra,
2697 extra=extra,
2698 editor=editor)
2698 editor=editor)
2699
2699
2700 newdesc = changelog.stripdesc(new.description())
2700 newdesc = changelog.stripdesc(new.description())
2701 if ((not node)
2701 if ((not node)
2702 and newdesc == old.description()
2702 and newdesc == old.description()
2703 and user == old.user()
2703 and user == old.user()
2704 and date == old.date()
2704 and date == old.date()
2705 and pureextra == old.extra()):
2705 and pureextra == old.extra()):
2706 # nothing changed. continuing here would create a new node
2706 # nothing changed. continuing here would create a new node
2707 # anyway because of the amend_source noise.
2707 # anyway because of the amend_source noise.
2708 #
2708 #
2709 # This not what we expect from amend.
2709 # This not what we expect from amend.
2710 return old.node()
2710 return old.node()
2711
2711
2712 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2712 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2713 try:
2713 try:
2714 if opts.get('secret'):
2714 if opts.get('secret'):
2715 commitphase = 'secret'
2715 commitphase = 'secret'
2716 else:
2716 else:
2717 commitphase = old.phase()
2717 commitphase = old.phase()
2718 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2718 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2719 newid = repo.commitctx(new)
2719 newid = repo.commitctx(new)
2720 finally:
2720 finally:
2721 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2721 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2722 if newid != old.node():
2722 if newid != old.node():
2723 # Reroute the working copy parent to the new changeset
2723 # Reroute the working copy parent to the new changeset
2724 repo.setparents(newid, nullid)
2724 repo.setparents(newid, nullid)
2725
2725
2726 # Move bookmarks from old parent to amend commit
2726 # Move bookmarks from old parent to amend commit
2727 bms = repo.nodebookmarks(old.node())
2727 bms = repo.nodebookmarks(old.node())
2728 if bms:
2728 if bms:
2729 marks = repo._bookmarks
2729 marks = repo._bookmarks
2730 for bm in bms:
2730 for bm in bms:
2731 ui.debug('moving bookmarks %r from %s to %s\n' %
2731 ui.debug('moving bookmarks %r from %s to %s\n' %
2732 (marks, old.hex(), hex(newid)))
2732 (marks, old.hex(), hex(newid)))
2733 marks[bm] = newid
2733 marks[bm] = newid
2734 marks.recordchange(tr)
2734 marks.recordchange(tr)
2735 #commit the whole amend process
2735 #commit the whole amend process
2736 if createmarkers:
2736 if createmarkers:
2737 # mark the new changeset as successor of the rewritten one
2737 # mark the new changeset as successor of the rewritten one
2738 new = repo[newid]
2738 new = repo[newid]
2739 obs = [(old, (new,))]
2739 obs = [(old, (new,))]
2740 if node:
2740 if node:
2741 obs.append((ctx, ()))
2741 obs.append((ctx, ()))
2742
2742
2743 obsolete.createmarkers(repo, obs)
2743 obsolete.createmarkers(repo, obs)
2744 if not createmarkers and newid != old.node():
2744 if not createmarkers and newid != old.node():
2745 # Strip the intermediate commit (if there was one) and the amended
2745 # Strip the intermediate commit (if there was one) and the amended
2746 # commit
2746 # commit
2747 if node:
2747 if node:
2748 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2748 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2749 ui.note(_('stripping amended changeset %s\n') % old)
2749 ui.note(_('stripping amended changeset %s\n') % old)
2750 repair.strip(ui, repo, old.node(), topic='amend-backup')
2750 repair.strip(ui, repo, old.node(), topic='amend-backup')
2751 finally:
2751 finally:
2752 lockmod.release(lock, wlock)
2752 lockmod.release(lock, wlock)
2753 return newid
2753 return newid
2754
2754
2755 def commiteditor(repo, ctx, subs, editform=''):
2755 def commiteditor(repo, ctx, subs, editform=''):
2756 if ctx.description():
2756 if ctx.description():
2757 return ctx.description()
2757 return ctx.description()
2758 return commitforceeditor(repo, ctx, subs, editform=editform,
2758 return commitforceeditor(repo, ctx, subs, editform=editform,
2759 unchangedmessagedetection=True)
2759 unchangedmessagedetection=True)
2760
2760
2761 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2761 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2762 editform='', unchangedmessagedetection=False):
2762 editform='', unchangedmessagedetection=False):
2763 if not extramsg:
2763 if not extramsg:
2764 extramsg = _("Leave message empty to abort commit.")
2764 extramsg = _("Leave message empty to abort commit.")
2765
2765
2766 forms = [e for e in editform.split('.') if e]
2766 forms = [e for e in editform.split('.') if e]
2767 forms.insert(0, 'changeset')
2767 forms.insert(0, 'changeset')
2768 templatetext = None
2768 templatetext = None
2769 while forms:
2769 while forms:
2770 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2770 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2771 if tmpl:
2771 if tmpl:
2772 tmpl = templater.unquotestring(tmpl)
2772 tmpl = templater.unquotestring(tmpl)
2773 templatetext = committext = buildcommittemplate(
2773 templatetext = committext = buildcommittemplate(
2774 repo, ctx, subs, extramsg, tmpl)
2774 repo, ctx, subs, extramsg, tmpl)
2775 break
2775 break
2776 forms.pop()
2776 forms.pop()
2777 else:
2777 else:
2778 committext = buildcommittext(repo, ctx, subs, extramsg)
2778 committext = buildcommittext(repo, ctx, subs, extramsg)
2779
2779
2780 # run editor in the repository root
2780 # run editor in the repository root
2781 olddir = pycompat.getcwd()
2781 olddir = pycompat.getcwd()
2782 os.chdir(repo.root)
2782 os.chdir(repo.root)
2783
2783
2784 # make in-memory changes visible to external process
2784 # make in-memory changes visible to external process
2785 tr = repo.currenttransaction()
2785 tr = repo.currenttransaction()
2786 repo.dirstate.write(tr)
2786 repo.dirstate.write(tr)
2787 pending = tr and tr.writepending() and repo.root
2787 pending = tr and tr.writepending() and repo.root
2788
2788
2789 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2789 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2790 editform=editform, pending=pending,
2790 editform=editform, pending=pending,
2791 repopath=repo.path)
2791 repopath=repo.path)
2792 text = editortext
2792 text = editortext
2793
2793
2794 # strip away anything below this special string (used for editors that want
2794 # strip away anything below this special string (used for editors that want
2795 # to display the diff)
2795 # to display the diff)
2796 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2796 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2797 if stripbelow:
2797 if stripbelow:
2798 text = text[:stripbelow.start()]
2798 text = text[:stripbelow.start()]
2799
2799
2800 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2800 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2801 os.chdir(olddir)
2801 os.chdir(olddir)
2802
2802
2803 if finishdesc:
2803 if finishdesc:
2804 text = finishdesc(text)
2804 text = finishdesc(text)
2805 if not text.strip():
2805 if not text.strip():
2806 raise error.Abort(_("empty commit message"))
2806 raise error.Abort(_("empty commit message"))
2807 if unchangedmessagedetection and editortext == templatetext:
2807 if unchangedmessagedetection and editortext == templatetext:
2808 raise error.Abort(_("commit message unchanged"))
2808 raise error.Abort(_("commit message unchanged"))
2809
2809
2810 return text
2810 return text
2811
2811
2812 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2812 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2813 ui = repo.ui
2813 ui = repo.ui
2814 tmpl, mapfile = gettemplate(ui, tmpl, None)
2814 tmpl, mapfile = gettemplate(ui, tmpl, None)
2815
2815
2816 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2816 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2817
2817
2818 for k, v in repo.ui.configitems('committemplate'):
2818 for k, v in repo.ui.configitems('committemplate'):
2819 if k != 'changeset':
2819 if k != 'changeset':
2820 t.t.cache[k] = v
2820 t.t.cache[k] = v
2821
2821
2822 if not extramsg:
2822 if not extramsg:
2823 extramsg = '' # ensure that extramsg is string
2823 extramsg = '' # ensure that extramsg is string
2824
2824
2825 ui.pushbuffer()
2825 ui.pushbuffer()
2826 t.show(ctx, extramsg=extramsg)
2826 t.show(ctx, extramsg=extramsg)
2827 return ui.popbuffer()
2827 return ui.popbuffer()
2828
2828
2829 def hgprefix(msg):
2829 def hgprefix(msg):
2830 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2830 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2831
2831
2832 def buildcommittext(repo, ctx, subs, extramsg):
2832 def buildcommittext(repo, ctx, subs, extramsg):
2833 edittext = []
2833 edittext = []
2834 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2834 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2835 if ctx.description():
2835 if ctx.description():
2836 edittext.append(ctx.description())
2836 edittext.append(ctx.description())
2837 edittext.append("")
2837 edittext.append("")
2838 edittext.append("") # Empty line between message and comments.
2838 edittext.append("") # Empty line between message and comments.
2839 edittext.append(hgprefix(_("Enter commit message."
2839 edittext.append(hgprefix(_("Enter commit message."
2840 " Lines beginning with 'HG:' are removed.")))
2840 " Lines beginning with 'HG:' are removed.")))
2841 edittext.append(hgprefix(extramsg))
2841 edittext.append(hgprefix(extramsg))
2842 edittext.append("HG: --")
2842 edittext.append("HG: --")
2843 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2843 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2844 if ctx.p2():
2844 if ctx.p2():
2845 edittext.append(hgprefix(_("branch merge")))
2845 edittext.append(hgprefix(_("branch merge")))
2846 if ctx.branch():
2846 if ctx.branch():
2847 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2847 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2848 if bookmarks.isactivewdirparent(repo):
2848 if bookmarks.isactivewdirparent(repo):
2849 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2849 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2850 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2850 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2851 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2851 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2852 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2852 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2853 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2853 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2854 if not added and not modified and not removed:
2854 if not added and not modified and not removed:
2855 edittext.append(hgprefix(_("no files changed")))
2855 edittext.append(hgprefix(_("no files changed")))
2856 edittext.append("")
2856 edittext.append("")
2857
2857
2858 return "\n".join(edittext)
2858 return "\n".join(edittext)
2859
2859
2860 def commitstatus(repo, node, branch, bheads=None, opts=None):
2860 def commitstatus(repo, node, branch, bheads=None, opts=None):
2861 if opts is None:
2861 if opts is None:
2862 opts = {}
2862 opts = {}
2863 ctx = repo[node]
2863 ctx = repo[node]
2864 parents = ctx.parents()
2864 parents = ctx.parents()
2865
2865
2866 if (not opts.get('amend') and bheads and node not in bheads and not
2866 if (not opts.get('amend') and bheads and node not in bheads and not
2867 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2867 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2868 repo.ui.status(_('created new head\n'))
2868 repo.ui.status(_('created new head\n'))
2869 # The message is not printed for initial roots. For the other
2869 # The message is not printed for initial roots. For the other
2870 # changesets, it is printed in the following situations:
2870 # changesets, it is printed in the following situations:
2871 #
2871 #
2872 # Par column: for the 2 parents with ...
2872 # Par column: for the 2 parents with ...
2873 # N: null or no parent
2873 # N: null or no parent
2874 # B: parent is on another named branch
2874 # B: parent is on another named branch
2875 # C: parent is a regular non head changeset
2875 # C: parent is a regular non head changeset
2876 # H: parent was a branch head of the current branch
2876 # H: parent was a branch head of the current branch
2877 # Msg column: whether we print "created new head" message
2877 # Msg column: whether we print "created new head" message
2878 # In the following, it is assumed that there already exists some
2878 # In the following, it is assumed that there already exists some
2879 # initial branch heads of the current branch, otherwise nothing is
2879 # initial branch heads of the current branch, otherwise nothing is
2880 # printed anyway.
2880 # printed anyway.
2881 #
2881 #
2882 # Par Msg Comment
2882 # Par Msg Comment
2883 # N N y additional topo root
2883 # N N y additional topo root
2884 #
2884 #
2885 # B N y additional branch root
2885 # B N y additional branch root
2886 # C N y additional topo head
2886 # C N y additional topo head
2887 # H N n usual case
2887 # H N n usual case
2888 #
2888 #
2889 # B B y weird additional branch root
2889 # B B y weird additional branch root
2890 # C B y branch merge
2890 # C B y branch merge
2891 # H B n merge with named branch
2891 # H B n merge with named branch
2892 #
2892 #
2893 # C C y additional head from merge
2893 # C C y additional head from merge
2894 # C H n merge with a head
2894 # C H n merge with a head
2895 #
2895 #
2896 # H H n head merge: head count decreases
2896 # H H n head merge: head count decreases
2897
2897
2898 if not opts.get('close_branch'):
2898 if not opts.get('close_branch'):
2899 for r in parents:
2899 for r in parents:
2900 if r.closesbranch() and r.branch() == branch:
2900 if r.closesbranch() and r.branch() == branch:
2901 repo.ui.status(_('reopening closed branch head %d\n') % r)
2901 repo.ui.status(_('reopening closed branch head %d\n') % r)
2902
2902
2903 if repo.ui.debugflag:
2903 if repo.ui.debugflag:
2904 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2904 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2905 elif repo.ui.verbose:
2905 elif repo.ui.verbose:
2906 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2906 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2907
2907
2908 def postcommitstatus(repo, pats, opts):
2908 def postcommitstatus(repo, pats, opts):
2909 return repo.status(match=scmutil.match(repo[None], pats, opts))
2909 return repo.status(match=scmutil.match(repo[None], pats, opts))
2910
2910
2911 def revert(ui, repo, ctx, parents, *pats, **opts):
2911 def revert(ui, repo, ctx, parents, *pats, **opts):
2912 parent, p2 = parents
2912 parent, p2 = parents
2913 node = ctx.node()
2913 node = ctx.node()
2914
2914
2915 mf = ctx.manifest()
2915 mf = ctx.manifest()
2916 if node == p2:
2916 if node == p2:
2917 parent = p2
2917 parent = p2
2918
2918
2919 # need all matching names in dirstate and manifest of target rev,
2919 # need all matching names in dirstate and manifest of target rev,
2920 # so have to walk both. do not print errors if files exist in one
2920 # so have to walk both. do not print errors if files exist in one
2921 # but not other. in both cases, filesets should be evaluated against
2921 # but not other. in both cases, filesets should be evaluated against
2922 # workingctx to get consistent result (issue4497). this means 'set:**'
2922 # workingctx to get consistent result (issue4497). this means 'set:**'
2923 # cannot be used to select missing files from target rev.
2923 # cannot be used to select missing files from target rev.
2924
2924
2925 # `names` is a mapping for all elements in working copy and target revision
2925 # `names` is a mapping for all elements in working copy and target revision
2926 # The mapping is in the form:
2926 # The mapping is in the form:
2927 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2927 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2928 names = {}
2928 names = {}
2929
2929
2930 with repo.wlock():
2930 with repo.wlock():
2931 ## filling of the `names` mapping
2931 ## filling of the `names` mapping
2932 # walk dirstate to fill `names`
2932 # walk dirstate to fill `names`
2933
2933
2934 interactive = opts.get('interactive', False)
2934 interactive = opts.get('interactive', False)
2935 wctx = repo[None]
2935 wctx = repo[None]
2936 m = scmutil.match(wctx, pats, opts)
2936 m = scmutil.match(wctx, pats, opts)
2937
2937
2938 # we'll need this later
2938 # we'll need this later
2939 targetsubs = sorted(s for s in wctx.substate if m(s))
2939 targetsubs = sorted(s for s in wctx.substate if m(s))
2940
2940
2941 if not m.always():
2941 if not m.always():
2942 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2942 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2943 names[abs] = m.rel(abs), m.exact(abs)
2943 names[abs] = m.rel(abs), m.exact(abs)
2944
2944
2945 # walk target manifest to fill `names`
2945 # walk target manifest to fill `names`
2946
2946
2947 def badfn(path, msg):
2947 def badfn(path, msg):
2948 if path in names:
2948 if path in names:
2949 return
2949 return
2950 if path in ctx.substate:
2950 if path in ctx.substate:
2951 return
2951 return
2952 path_ = path + '/'
2952 path_ = path + '/'
2953 for f in names:
2953 for f in names:
2954 if f.startswith(path_):
2954 if f.startswith(path_):
2955 return
2955 return
2956 ui.warn("%s: %s\n" % (m.rel(path), msg))
2956 ui.warn("%s: %s\n" % (m.rel(path), msg))
2957
2957
2958 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2958 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2959 if abs not in names:
2959 if abs not in names:
2960 names[abs] = m.rel(abs), m.exact(abs)
2960 names[abs] = m.rel(abs), m.exact(abs)
2961
2961
2962 # Find status of all file in `names`.
2962 # Find status of all file in `names`.
2963 m = scmutil.matchfiles(repo, names)
2963 m = scmutil.matchfiles(repo, names)
2964
2964
2965 changes = repo.status(node1=node, match=m,
2965 changes = repo.status(node1=node, match=m,
2966 unknown=True, ignored=True, clean=True)
2966 unknown=True, ignored=True, clean=True)
2967 else:
2967 else:
2968 changes = repo.status(node1=node, match=m)
2968 changes = repo.status(node1=node, match=m)
2969 for kind in changes:
2969 for kind in changes:
2970 for abs in kind:
2970 for abs in kind:
2971 names[abs] = m.rel(abs), m.exact(abs)
2971 names[abs] = m.rel(abs), m.exact(abs)
2972
2972
2973 m = scmutil.matchfiles(repo, names)
2973 m = scmutil.matchfiles(repo, names)
2974
2974
2975 modified = set(changes.modified)
2975 modified = set(changes.modified)
2976 added = set(changes.added)
2976 added = set(changes.added)
2977 removed = set(changes.removed)
2977 removed = set(changes.removed)
2978 _deleted = set(changes.deleted)
2978 _deleted = set(changes.deleted)
2979 unknown = set(changes.unknown)
2979 unknown = set(changes.unknown)
2980 unknown.update(changes.ignored)
2980 unknown.update(changes.ignored)
2981 clean = set(changes.clean)
2981 clean = set(changes.clean)
2982 modadded = set()
2982 modadded = set()
2983
2983
2984 # We need to account for the state of the file in the dirstate,
2984 # We need to account for the state of the file in the dirstate,
2985 # even when we revert against something else than parent. This will
2985 # even when we revert against something else than parent. This will
2986 # slightly alter the behavior of revert (doing back up or not, delete
2986 # slightly alter the behavior of revert (doing back up or not, delete
2987 # or just forget etc).
2987 # or just forget etc).
2988 if parent == node:
2988 if parent == node:
2989 dsmodified = modified
2989 dsmodified = modified
2990 dsadded = added
2990 dsadded = added
2991 dsremoved = removed
2991 dsremoved = removed
2992 # store all local modifications, useful later for rename detection
2992 # store all local modifications, useful later for rename detection
2993 localchanges = dsmodified | dsadded
2993 localchanges = dsmodified | dsadded
2994 modified, added, removed = set(), set(), set()
2994 modified, added, removed = set(), set(), set()
2995 else:
2995 else:
2996 changes = repo.status(node1=parent, match=m)
2996 changes = repo.status(node1=parent, match=m)
2997 dsmodified = set(changes.modified)
2997 dsmodified = set(changes.modified)
2998 dsadded = set(changes.added)
2998 dsadded = set(changes.added)
2999 dsremoved = set(changes.removed)
2999 dsremoved = set(changes.removed)
3000 # store all local modifications, useful later for rename detection
3000 # store all local modifications, useful later for rename detection
3001 localchanges = dsmodified | dsadded
3001 localchanges = dsmodified | dsadded
3002
3002
3003 # only take into account for removes between wc and target
3003 # only take into account for removes between wc and target
3004 clean |= dsremoved - removed
3004 clean |= dsremoved - removed
3005 dsremoved &= removed
3005 dsremoved &= removed
3006 # distinct between dirstate remove and other
3006 # distinct between dirstate remove and other
3007 removed -= dsremoved
3007 removed -= dsremoved
3008
3008
3009 modadded = added & dsmodified
3009 modadded = added & dsmodified
3010 added -= modadded
3010 added -= modadded
3011
3011
3012 # tell newly modified apart.
3012 # tell newly modified apart.
3013 dsmodified &= modified
3013 dsmodified &= modified
3014 dsmodified |= modified & dsadded # dirstate added may need backup
3014 dsmodified |= modified & dsadded # dirstate added may need backup
3015 modified -= dsmodified
3015 modified -= dsmodified
3016
3016
3017 # We need to wait for some post-processing to update this set
3017 # We need to wait for some post-processing to update this set
3018 # before making the distinction. The dirstate will be used for
3018 # before making the distinction. The dirstate will be used for
3019 # that purpose.
3019 # that purpose.
3020 dsadded = added
3020 dsadded = added
3021
3021
3022 # in case of merge, files that are actually added can be reported as
3022 # in case of merge, files that are actually added can be reported as
3023 # modified, we need to post process the result
3023 # modified, we need to post process the result
3024 if p2 != nullid:
3024 if p2 != nullid:
3025 mergeadd = set(dsmodified)
3025 mergeadd = set(dsmodified)
3026 for path in dsmodified:
3026 for path in dsmodified:
3027 if path in mf:
3027 if path in mf:
3028 mergeadd.remove(path)
3028 mergeadd.remove(path)
3029 dsadded |= mergeadd
3029 dsadded |= mergeadd
3030 dsmodified -= mergeadd
3030 dsmodified -= mergeadd
3031
3031
3032 # if f is a rename, update `names` to also revert the source
3032 # if f is a rename, update `names` to also revert the source
3033 cwd = repo.getcwd()
3033 cwd = repo.getcwd()
3034 for f in localchanges:
3034 for f in localchanges:
3035 src = repo.dirstate.copied(f)
3035 src = repo.dirstate.copied(f)
3036 # XXX should we check for rename down to target node?
3036 # XXX should we check for rename down to target node?
3037 if src and src not in names and repo.dirstate[src] == 'r':
3037 if src and src not in names and repo.dirstate[src] == 'r':
3038 dsremoved.add(src)
3038 dsremoved.add(src)
3039 names[src] = (repo.pathto(src, cwd), True)
3039 names[src] = (repo.pathto(src, cwd), True)
3040
3040
3041 # determine the exact nature of the deleted changesets
3041 # determine the exact nature of the deleted changesets
3042 deladded = set(_deleted)
3042 deladded = set(_deleted)
3043 for path in _deleted:
3043 for path in _deleted:
3044 if path in mf:
3044 if path in mf:
3045 deladded.remove(path)
3045 deladded.remove(path)
3046 deleted = _deleted - deladded
3046 deleted = _deleted - deladded
3047
3047
3048 # distinguish between file to forget and the other
3048 # distinguish between file to forget and the other
3049 added = set()
3049 added = set()
3050 for abs in dsadded:
3050 for abs in dsadded:
3051 if repo.dirstate[abs] != 'a':
3051 if repo.dirstate[abs] != 'a':
3052 added.add(abs)
3052 added.add(abs)
3053 dsadded -= added
3053 dsadded -= added
3054
3054
3055 for abs in deladded:
3055 for abs in deladded:
3056 if repo.dirstate[abs] == 'a':
3056 if repo.dirstate[abs] == 'a':
3057 dsadded.add(abs)
3057 dsadded.add(abs)
3058 deladded -= dsadded
3058 deladded -= dsadded
3059
3059
3060 # For files marked as removed, we check if an unknown file is present at
3060 # For files marked as removed, we check if an unknown file is present at
3061 # the same path. If a such file exists it may need to be backed up.
3061 # the same path. If a such file exists it may need to be backed up.
3062 # Making the distinction at this stage helps have simpler backup
3062 # Making the distinction at this stage helps have simpler backup
3063 # logic.
3063 # logic.
3064 removunk = set()
3064 removunk = set()
3065 for abs in removed:
3065 for abs in removed:
3066 target = repo.wjoin(abs)
3066 target = repo.wjoin(abs)
3067 if os.path.lexists(target):
3067 if os.path.lexists(target):
3068 removunk.add(abs)
3068 removunk.add(abs)
3069 removed -= removunk
3069 removed -= removunk
3070
3070
3071 dsremovunk = set()
3071 dsremovunk = set()
3072 for abs in dsremoved:
3072 for abs in dsremoved:
3073 target = repo.wjoin(abs)
3073 target = repo.wjoin(abs)
3074 if os.path.lexists(target):
3074 if os.path.lexists(target):
3075 dsremovunk.add(abs)
3075 dsremovunk.add(abs)
3076 dsremoved -= dsremovunk
3076 dsremoved -= dsremovunk
3077
3077
3078 # action to be actually performed by revert
3078 # action to be actually performed by revert
3079 # (<list of file>, message>) tuple
3079 # (<list of file>, message>) tuple
3080 actions = {'revert': ([], _('reverting %s\n')),
3080 actions = {'revert': ([], _('reverting %s\n')),
3081 'add': ([], _('adding %s\n')),
3081 'add': ([], _('adding %s\n')),
3082 'remove': ([], _('removing %s\n')),
3082 'remove': ([], _('removing %s\n')),
3083 'drop': ([], _('removing %s\n')),
3083 'drop': ([], _('removing %s\n')),
3084 'forget': ([], _('forgetting %s\n')),
3084 'forget': ([], _('forgetting %s\n')),
3085 'undelete': ([], _('undeleting %s\n')),
3085 'undelete': ([], _('undeleting %s\n')),
3086 'noop': (None, _('no changes needed to %s\n')),
3086 'noop': (None, _('no changes needed to %s\n')),
3087 'unknown': (None, _('file not managed: %s\n')),
3087 'unknown': (None, _('file not managed: %s\n')),
3088 }
3088 }
3089
3089
3090 # "constant" that convey the backup strategy.
3090 # "constant" that convey the backup strategy.
3091 # All set to `discard` if `no-backup` is set do avoid checking
3091 # All set to `discard` if `no-backup` is set do avoid checking
3092 # no_backup lower in the code.
3092 # no_backup lower in the code.
3093 # These values are ordered for comparison purposes
3093 # These values are ordered for comparison purposes
3094 backupinteractive = 3 # do backup if interactively modified
3094 backupinteractive = 3 # do backup if interactively modified
3095 backup = 2 # unconditionally do backup
3095 backup = 2 # unconditionally do backup
3096 check = 1 # check if the existing file differs from target
3096 check = 1 # check if the existing file differs from target
3097 discard = 0 # never do backup
3097 discard = 0 # never do backup
3098 if opts.get('no_backup'):
3098 if opts.get('no_backup'):
3099 backupinteractive = backup = check = discard
3099 backupinteractive = backup = check = discard
3100 if interactive:
3100 if interactive:
3101 dsmodifiedbackup = backupinteractive
3101 dsmodifiedbackup = backupinteractive
3102 else:
3102 else:
3103 dsmodifiedbackup = backup
3103 dsmodifiedbackup = backup
3104 tobackup = set()
3104 tobackup = set()
3105
3105
3106 backupanddel = actions['remove']
3106 backupanddel = actions['remove']
3107 if not opts.get('no_backup'):
3107 if not opts.get('no_backup'):
3108 backupanddel = actions['drop']
3108 backupanddel = actions['drop']
3109
3109
3110 disptable = (
3110 disptable = (
3111 # dispatch table:
3111 # dispatch table:
3112 # file state
3112 # file state
3113 # action
3113 # action
3114 # make backup
3114 # make backup
3115
3115
3116 ## Sets that results that will change file on disk
3116 ## Sets that results that will change file on disk
3117 # Modified compared to target, no local change
3117 # Modified compared to target, no local change
3118 (modified, actions['revert'], discard),
3118 (modified, actions['revert'], discard),
3119 # Modified compared to target, but local file is deleted
3119 # Modified compared to target, but local file is deleted
3120 (deleted, actions['revert'], discard),
3120 (deleted, actions['revert'], discard),
3121 # Modified compared to target, local change
3121 # Modified compared to target, local change
3122 (dsmodified, actions['revert'], dsmodifiedbackup),
3122 (dsmodified, actions['revert'], dsmodifiedbackup),
3123 # Added since target
3123 # Added since target
3124 (added, actions['remove'], discard),
3124 (added, actions['remove'], discard),
3125 # Added in working directory
3125 # Added in working directory
3126 (dsadded, actions['forget'], discard),
3126 (dsadded, actions['forget'], discard),
3127 # Added since target, have local modification
3127 # Added since target, have local modification
3128 (modadded, backupanddel, backup),
3128 (modadded, backupanddel, backup),
3129 # Added since target but file is missing in working directory
3129 # Added since target but file is missing in working directory
3130 (deladded, actions['drop'], discard),
3130 (deladded, actions['drop'], discard),
3131 # Removed since target, before working copy parent
3131 # Removed since target, before working copy parent
3132 (removed, actions['add'], discard),
3132 (removed, actions['add'], discard),
3133 # Same as `removed` but an unknown file exists at the same path
3133 # Same as `removed` but an unknown file exists at the same path
3134 (removunk, actions['add'], check),
3134 (removunk, actions['add'], check),
3135 # Removed since targe, marked as such in working copy parent
3135 # Removed since targe, marked as such in working copy parent
3136 (dsremoved, actions['undelete'], discard),
3136 (dsremoved, actions['undelete'], discard),
3137 # Same as `dsremoved` but an unknown file exists at the same path
3137 # Same as `dsremoved` but an unknown file exists at the same path
3138 (dsremovunk, actions['undelete'], check),
3138 (dsremovunk, actions['undelete'], check),
3139 ## the following sets does not result in any file changes
3139 ## the following sets does not result in any file changes
3140 # File with no modification
3140 # File with no modification
3141 (clean, actions['noop'], discard),
3141 (clean, actions['noop'], discard),
3142 # Existing file, not tracked anywhere
3142 # Existing file, not tracked anywhere
3143 (unknown, actions['unknown'], discard),
3143 (unknown, actions['unknown'], discard),
3144 )
3144 )
3145
3145
3146 for abs, (rel, exact) in sorted(names.items()):
3146 for abs, (rel, exact) in sorted(names.items()):
3147 # target file to be touch on disk (relative to cwd)
3147 # target file to be touch on disk (relative to cwd)
3148 target = repo.wjoin(abs)
3148 target = repo.wjoin(abs)
3149 # search the entry in the dispatch table.
3149 # search the entry in the dispatch table.
3150 # if the file is in any of these sets, it was touched in the working
3150 # if the file is in any of these sets, it was touched in the working
3151 # directory parent and we are sure it needs to be reverted.
3151 # directory parent and we are sure it needs to be reverted.
3152 for table, (xlist, msg), dobackup in disptable:
3152 for table, (xlist, msg), dobackup in disptable:
3153 if abs not in table:
3153 if abs not in table:
3154 continue
3154 continue
3155 if xlist is not None:
3155 if xlist is not None:
3156 xlist.append(abs)
3156 xlist.append(abs)
3157 if dobackup:
3157 if dobackup:
3158 # If in interactive mode, don't automatically create
3158 # If in interactive mode, don't automatically create
3159 # .orig files (issue4793)
3159 # .orig files (issue4793)
3160 if dobackup == backupinteractive:
3160 if dobackup == backupinteractive:
3161 tobackup.add(abs)
3161 tobackup.add(abs)
3162 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3162 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3163 bakname = scmutil.origpath(ui, repo, rel)
3163 bakname = scmutil.origpath(ui, repo, rel)
3164 ui.note(_('saving current version of %s as %s\n') %
3164 ui.note(_('saving current version of %s as %s\n') %
3165 (rel, bakname))
3165 (rel, bakname))
3166 if not opts.get('dry_run'):
3166 if not opts.get('dry_run'):
3167 if interactive:
3167 if interactive:
3168 util.copyfile(target, bakname)
3168 util.copyfile(target, bakname)
3169 else:
3169 else:
3170 util.rename(target, bakname)
3170 util.rename(target, bakname)
3171 if ui.verbose or not exact:
3171 if ui.verbose or not exact:
3172 if not isinstance(msg, basestring):
3172 if not isinstance(msg, basestring):
3173 msg = msg(abs)
3173 msg = msg(abs)
3174 ui.status(msg % rel)
3174 ui.status(msg % rel)
3175 elif exact:
3175 elif exact:
3176 ui.warn(msg % rel)
3176 ui.warn(msg % rel)
3177 break
3177 break
3178
3178
3179 if not opts.get('dry_run'):
3179 if not opts.get('dry_run'):
3180 needdata = ('revert', 'add', 'undelete')
3180 needdata = ('revert', 'add', 'undelete')
3181 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3181 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3182 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3182 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3183
3183
3184 if targetsubs:
3184 if targetsubs:
3185 # Revert the subrepos on the revert list
3185 # Revert the subrepos on the revert list
3186 for sub in targetsubs:
3186 for sub in targetsubs:
3187 try:
3187 try:
3188 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3188 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3189 except KeyError:
3189 except KeyError:
3190 raise error.Abort("subrepository '%s' does not exist in %s!"
3190 raise error.Abort("subrepository '%s' does not exist in %s!"
3191 % (sub, short(ctx.node())))
3191 % (sub, short(ctx.node())))
3192
3192
3193 def _revertprefetch(repo, ctx, *files):
3193 def _revertprefetch(repo, ctx, *files):
3194 """Let extension changing the storage layer prefetch content"""
3194 """Let extension changing the storage layer prefetch content"""
3195 pass
3195 pass
3196
3196
3197 def _performrevert(repo, parents, ctx, actions, interactive=False,
3197 def _performrevert(repo, parents, ctx, actions, interactive=False,
3198 tobackup=None):
3198 tobackup=None):
3199 """function that actually perform all the actions computed for revert
3199 """function that actually perform all the actions computed for revert
3200
3200
3201 This is an independent function to let extension to plug in and react to
3201 This is an independent function to let extension to plug in and react to
3202 the imminent revert.
3202 the imminent revert.
3203
3203
3204 Make sure you have the working directory locked when calling this function.
3204 Make sure you have the working directory locked when calling this function.
3205 """
3205 """
3206 parent, p2 = parents
3206 parent, p2 = parents
3207 node = ctx.node()
3207 node = ctx.node()
3208 excluded_files = []
3208 excluded_files = []
3209 matcher_opts = {"exclude": excluded_files}
3209 matcher_opts = {"exclude": excluded_files}
3210
3210
3211 def checkout(f):
3211 def checkout(f):
3212 fc = ctx[f]
3212 fc = ctx[f]
3213 repo.wwrite(f, fc.data(), fc.flags())
3213 repo.wwrite(f, fc.data(), fc.flags())
3214
3214
3215 def doremove(f):
3215 def doremove(f):
3216 try:
3216 try:
3217 repo.wvfs.unlinkpath(f)
3217 repo.wvfs.unlinkpath(f)
3218 except OSError:
3218 except OSError:
3219 pass
3219 pass
3220 repo.dirstate.remove(f)
3220 repo.dirstate.remove(f)
3221
3221
3222 audit_path = pathutil.pathauditor(repo.root)
3222 audit_path = pathutil.pathauditor(repo.root)
3223 for f in actions['forget'][0]:
3223 for f in actions['forget'][0]:
3224 if interactive:
3224 if interactive:
3225 choice = repo.ui.promptchoice(
3225 choice = repo.ui.promptchoice(
3226 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3226 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3227 if choice == 0:
3227 if choice == 0:
3228 repo.dirstate.drop(f)
3228 repo.dirstate.drop(f)
3229 else:
3229 else:
3230 excluded_files.append(repo.wjoin(f))
3230 excluded_files.append(repo.wjoin(f))
3231 else:
3231 else:
3232 repo.dirstate.drop(f)
3232 repo.dirstate.drop(f)
3233 for f in actions['remove'][0]:
3233 for f in actions['remove'][0]:
3234 audit_path(f)
3234 audit_path(f)
3235 if interactive:
3235 if interactive:
3236 choice = repo.ui.promptchoice(
3236 choice = repo.ui.promptchoice(
3237 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3237 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3238 if choice == 0:
3238 if choice == 0:
3239 doremove(f)
3239 doremove(f)
3240 else:
3240 else:
3241 excluded_files.append(repo.wjoin(f))
3241 excluded_files.append(repo.wjoin(f))
3242 else:
3242 else:
3243 doremove(f)
3243 doremove(f)
3244 for f in actions['drop'][0]:
3244 for f in actions['drop'][0]:
3245 audit_path(f)
3245 audit_path(f)
3246 repo.dirstate.remove(f)
3246 repo.dirstate.remove(f)
3247
3247
3248 normal = None
3248 normal = None
3249 if node == parent:
3249 if node == parent:
3250 # We're reverting to our parent. If possible, we'd like status
3250 # We're reverting to our parent. If possible, we'd like status
3251 # to report the file as clean. We have to use normallookup for
3251 # to report the file as clean. We have to use normallookup for
3252 # merges to avoid losing information about merged/dirty files.
3252 # merges to avoid losing information about merged/dirty files.
3253 if p2 != nullid:
3253 if p2 != nullid:
3254 normal = repo.dirstate.normallookup
3254 normal = repo.dirstate.normallookup
3255 else:
3255 else:
3256 normal = repo.dirstate.normal
3256 normal = repo.dirstate.normal
3257
3257
3258 newlyaddedandmodifiedfiles = set()
3258 newlyaddedandmodifiedfiles = set()
3259 if interactive:
3259 if interactive:
3260 # Prompt the user for changes to revert
3260 # Prompt the user for changes to revert
3261 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3261 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3262 m = scmutil.match(ctx, torevert, matcher_opts)
3262 m = scmutil.match(ctx, torevert, matcher_opts)
3263 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3263 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3264 diffopts.nodates = True
3264 diffopts.nodates = True
3265 diffopts.git = True
3265 diffopts.git = True
3266 operation = 'discard'
3266 operation = 'discard'
3267 reversehunks = True
3267 reversehunks = True
3268 if node != parent:
3268 if node != parent:
3269 operation = 'revert'
3269 operation = 'revert'
3270 reversehunks = repo.ui.configbool('experimental',
3270 reversehunks = repo.ui.configbool('experimental',
3271 'revertalternateinteractivemode',
3271 'revertalternateinteractivemode',
3272 True)
3272 True)
3273 if reversehunks:
3273 if reversehunks:
3274 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3274 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3275 else:
3275 else:
3276 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3276 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3277 originalchunks = patch.parsepatch(diff)
3277 originalchunks = patch.parsepatch(diff)
3278
3278
3279 try:
3279 try:
3280
3280
3281 chunks, opts = recordfilter(repo.ui, originalchunks,
3281 chunks, opts = recordfilter(repo.ui, originalchunks,
3282 operation=operation)
3282 operation=operation)
3283 if reversehunks:
3283 if reversehunks:
3284 chunks = patch.reversehunks(chunks)
3284 chunks = patch.reversehunks(chunks)
3285
3285
3286 except patch.PatchError as err:
3286 except patch.PatchError as err:
3287 raise error.Abort(_('error parsing patch: %s') % err)
3287 raise error.Abort(_('error parsing patch: %s') % err)
3288
3288
3289 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3289 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3290 if tobackup is None:
3290 if tobackup is None:
3291 tobackup = set()
3291 tobackup = set()
3292 # Apply changes
3292 # Apply changes
3293 fp = stringio()
3293 fp = stringio()
3294 for c in chunks:
3294 for c in chunks:
3295 # Create a backup file only if this hunk should be backed up
3295 # Create a backup file only if this hunk should be backed up
3296 if ishunk(c) and c.header.filename() in tobackup:
3296 if ishunk(c) and c.header.filename() in tobackup:
3297 abs = c.header.filename()
3297 abs = c.header.filename()
3298 target = repo.wjoin(abs)
3298 target = repo.wjoin(abs)
3299 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3299 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3300 util.copyfile(target, bakname)
3300 util.copyfile(target, bakname)
3301 tobackup.remove(abs)
3301 tobackup.remove(abs)
3302 c.write(fp)
3302 c.write(fp)
3303 dopatch = fp.tell()
3303 dopatch = fp.tell()
3304 fp.seek(0)
3304 fp.seek(0)
3305 if dopatch:
3305 if dopatch:
3306 try:
3306 try:
3307 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3307 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3308 except patch.PatchError as err:
3308 except patch.PatchError as err:
3309 raise error.Abort(str(err))
3309 raise error.Abort(str(err))
3310 del fp
3310 del fp
3311 else:
3311 else:
3312 for f in actions['revert'][0]:
3312 for f in actions['revert'][0]:
3313 checkout(f)
3313 checkout(f)
3314 if normal:
3314 if normal:
3315 normal(f)
3315 normal(f)
3316
3316
3317 for f in actions['add'][0]:
3317 for f in actions['add'][0]:
3318 # Don't checkout modified files, they are already created by the diff
3318 # Don't checkout modified files, they are already created by the diff
3319 if f not in newlyaddedandmodifiedfiles:
3319 if f not in newlyaddedandmodifiedfiles:
3320 checkout(f)
3320 checkout(f)
3321 repo.dirstate.add(f)
3321 repo.dirstate.add(f)
3322
3322
3323 normal = repo.dirstate.normallookup
3323 normal = repo.dirstate.normallookup
3324 if node == parent and p2 == nullid:
3324 if node == parent and p2 == nullid:
3325 normal = repo.dirstate.normal
3325 normal = repo.dirstate.normal
3326 for f in actions['undelete'][0]:
3326 for f in actions['undelete'][0]:
3327 checkout(f)
3327 checkout(f)
3328 normal(f)
3328 normal(f)
3329
3329
3330 copied = copies.pathcopies(repo[parent], ctx)
3330 copied = copies.pathcopies(repo[parent], ctx)
3331
3331
3332 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3332 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3333 if f in copied:
3333 if f in copied:
3334 repo.dirstate.copy(copied[f], f)
3334 repo.dirstate.copy(copied[f], f)
3335
3335
3336 def command(table):
3336 def command(table):
3337 """Returns a function object to be used as a decorator for making commands.
3337 """Returns a function object to be used as a decorator for making commands.
3338
3338
3339 This function receives a command table as its argument. The table should
3339 This function receives a command table as its argument. The table should
3340 be a dict.
3340 be a dict.
3341
3341
3342 The returned function can be used as a decorator for adding commands
3342 The returned function can be used as a decorator for adding commands
3343 to that command table. This function accepts multiple arguments to define
3343 to that command table. This function accepts multiple arguments to define
3344 a command.
3344 a command.
3345
3345
3346 The first argument is the command name.
3346 The first argument is the command name.
3347
3347
3348 The options argument is an iterable of tuples defining command arguments.
3348 The options argument is an iterable of tuples defining command arguments.
3349 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3349 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3350
3350
3351 The synopsis argument defines a short, one line summary of how to use the
3351 The synopsis argument defines a short, one line summary of how to use the
3352 command. This shows up in the help output.
3352 command. This shows up in the help output.
3353
3353
3354 The norepo argument defines whether the command does not require a
3354 The norepo argument defines whether the command does not require a
3355 local repository. Most commands operate against a repository, thus the
3355 local repository. Most commands operate against a repository, thus the
3356 default is False.
3356 default is False.
3357
3357
3358 The optionalrepo argument defines whether the command optionally requires
3358 The optionalrepo argument defines whether the command optionally requires
3359 a local repository.
3359 a local repository.
3360
3360
3361 The inferrepo argument defines whether to try to find a repository from the
3361 The inferrepo argument defines whether to try to find a repository from the
3362 command line arguments. If True, arguments will be examined for potential
3362 command line arguments. If True, arguments will be examined for potential
3363 repository locations. See ``findrepo()``. If a repository is found, it
3363 repository locations. See ``findrepo()``. If a repository is found, it
3364 will be used.
3364 will be used.
3365 """
3365 """
3366 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3366 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3367 inferrepo=False):
3367 inferrepo=False):
3368 def decorator(func):
3368 def decorator(func):
3369 func.norepo = norepo
3369 func.norepo = norepo
3370 func.optionalrepo = optionalrepo
3370 func.optionalrepo = optionalrepo
3371 func.inferrepo = inferrepo
3371 func.inferrepo = inferrepo
3372 if synopsis:
3372 if synopsis:
3373 table[name] = func, list(options), synopsis
3373 table[name] = func, list(options), synopsis
3374 else:
3374 else:
3375 table[name] = func, list(options)
3375 table[name] = func, list(options)
3376 return func
3376 return func
3377 return decorator
3377 return decorator
3378
3378
3379 return cmd
3379 return cmd
3380
3380
3381 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3381 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3382 # commands.outgoing. "missing" is "missing" of the result of
3382 # commands.outgoing. "missing" is "missing" of the result of
3383 # "findcommonoutgoing()"
3383 # "findcommonoutgoing()"
3384 outgoinghooks = util.hooks()
3384 outgoinghooks = util.hooks()
3385
3385
3386 # a list of (ui, repo) functions called by commands.summary
3386 # a list of (ui, repo) functions called by commands.summary
3387 summaryhooks = util.hooks()
3387 summaryhooks = util.hooks()
3388
3388
3389 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3389 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3390 #
3390 #
3391 # functions should return tuple of booleans below, if 'changes' is None:
3391 # functions should return tuple of booleans below, if 'changes' is None:
3392 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3392 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3393 #
3393 #
3394 # otherwise, 'changes' is a tuple of tuples below:
3394 # otherwise, 'changes' is a tuple of tuples below:
3395 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3395 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3396 # - (desturl, destbranch, destpeer, outgoing)
3396 # - (desturl, destbranch, destpeer, outgoing)
3397 summaryremotehooks = util.hooks()
3397 summaryremotehooks = util.hooks()
3398
3398
3399 # A list of state files kept by multistep operations like graft.
3399 # A list of state files kept by multistep operations like graft.
3400 # Since graft cannot be aborted, it is considered 'clearable' by update.
3400 # Since graft cannot be aborted, it is considered 'clearable' by update.
3401 # note: bisect is intentionally excluded
3401 # note: bisect is intentionally excluded
3402 # (state file, clearable, allowcommit, error, hint)
3402 # (state file, clearable, allowcommit, error, hint)
3403 unfinishedstates = [
3403 unfinishedstates = [
3404 ('graftstate', True, False, _('graft in progress'),
3404 ('graftstate', True, False, _('graft in progress'),
3405 _("use 'hg graft --continue' or 'hg update' to abort")),
3405 _("use 'hg graft --continue' or 'hg update' to abort")),
3406 ('updatestate', True, False, _('last update was interrupted'),
3406 ('updatestate', True, False, _('last update was interrupted'),
3407 _("use 'hg update' to get a consistent checkout"))
3407 _("use 'hg update' to get a consistent checkout"))
3408 ]
3408 ]
3409
3409
3410 def checkunfinished(repo, commit=False):
3410 def checkunfinished(repo, commit=False):
3411 '''Look for an unfinished multistep operation, like graft, and abort
3411 '''Look for an unfinished multistep operation, like graft, and abort
3412 if found. It's probably good to check this right before
3412 if found. It's probably good to check this right before
3413 bailifchanged().
3413 bailifchanged().
3414 '''
3414 '''
3415 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3415 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3416 if commit and allowcommit:
3416 if commit and allowcommit:
3417 continue
3417 continue
3418 if repo.vfs.exists(f):
3418 if repo.vfs.exists(f):
3419 raise error.Abort(msg, hint=hint)
3419 raise error.Abort(msg, hint=hint)
3420
3420
3421 def clearunfinished(repo):
3421 def clearunfinished(repo):
3422 '''Check for unfinished operations (as above), and clear the ones
3422 '''Check for unfinished operations (as above), and clear the ones
3423 that are clearable.
3423 that are clearable.
3424 '''
3424 '''
3425 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3425 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3426 if not clearable and repo.vfs.exists(f):
3426 if not clearable and repo.vfs.exists(f):
3427 raise error.Abort(msg, hint=hint)
3427 raise error.Abort(msg, hint=hint)
3428 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3428 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3429 if clearable and repo.vfs.exists(f):
3429 if clearable and repo.vfs.exists(f):
3430 util.unlink(repo.vfs.join(f))
3430 util.unlink(repo.vfs.join(f))
3431
3431
3432 afterresolvedstates = [
3432 afterresolvedstates = [
3433 ('graftstate',
3433 ('graftstate',
3434 _('hg graft --continue')),
3434 _('hg graft --continue')),
3435 ]
3435 ]
3436
3436
3437 def howtocontinue(repo):
3437 def howtocontinue(repo):
3438 '''Check for an unfinished operation and return the command to finish
3438 '''Check for an unfinished operation and return the command to finish
3439 it.
3439 it.
3440
3440
3441 afterresolvedstates tuples define a .hg/{file} and the corresponding
3441 afterresolvedstates tuples define a .hg/{file} and the corresponding
3442 command needed to finish it.
3442 command needed to finish it.
3443
3443
3444 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3444 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3445 a boolean.
3445 a boolean.
3446 '''
3446 '''
3447 contmsg = _("continue: %s")
3447 contmsg = _("continue: %s")
3448 for f, msg in afterresolvedstates:
3448 for f, msg in afterresolvedstates:
3449 if repo.vfs.exists(f):
3449 if repo.vfs.exists(f):
3450 return contmsg % msg, True
3450 return contmsg % msg, True
3451 workingctx = repo[None]
3451 workingctx = repo[None]
3452 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3452 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3453 for s in workingctx.substate)
3453 for s in workingctx.substate)
3454 if dirty:
3454 if dirty:
3455 return contmsg % _("hg commit"), False
3455 return contmsg % _("hg commit"), False
3456 return None, None
3456 return None, None
3457
3457
3458 def checkafterresolved(repo):
3458 def checkafterresolved(repo):
3459 '''Inform the user about the next action after completing hg resolve
3459 '''Inform the user about the next action after completing hg resolve
3460
3460
3461 If there's a matching afterresolvedstates, howtocontinue will yield
3461 If there's a matching afterresolvedstates, howtocontinue will yield
3462 repo.ui.warn as the reporter.
3462 repo.ui.warn as the reporter.
3463
3463
3464 Otherwise, it will yield repo.ui.note.
3464 Otherwise, it will yield repo.ui.note.
3465 '''
3465 '''
3466 msg, warning = howtocontinue(repo)
3466 msg, warning = howtocontinue(repo)
3467 if msg is not None:
3467 if msg is not None:
3468 if warning:
3468 if warning:
3469 repo.ui.warn("%s\n" % msg)
3469 repo.ui.warn("%s\n" % msg)
3470 else:
3470 else:
3471 repo.ui.note("%s\n" % msg)
3471 repo.ui.note("%s\n" % msg)
3472
3472
3473 def wrongtooltocontinue(repo, task):
3473 def wrongtooltocontinue(repo, task):
3474 '''Raise an abort suggesting how to properly continue if there is an
3474 '''Raise an abort suggesting how to properly continue if there is an
3475 active task.
3475 active task.
3476
3476
3477 Uses howtocontinue() to find the active task.
3477 Uses howtocontinue() to find the active task.
3478
3478
3479 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3479 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3480 a hint.
3480 a hint.
3481 '''
3481 '''
3482 after = howtocontinue(repo)
3482 after = howtocontinue(repo)
3483 hint = None
3483 hint = None
3484 if after[1]:
3484 if after[1]:
3485 hint = after[0]
3485 hint = after[0]
3486 raise error.Abort(_('no %s in progress') % task, hint=hint)
3486 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,3745 +1,3745 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import codecs
20 import codecs
21 import collections
21 import collections
22 import datetime
22 import datetime
23 import errno
23 import errno
24 import gc
24 import gc
25 import hashlib
25 import hashlib
26 import imp
26 import imp
27 import os
27 import os
28 import platform as pyplatform
28 import platform as pyplatform
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import signal
31 import signal
32 import socket
32 import socket
33 import stat
33 import stat
34 import string
34 import string
35 import subprocess
35 import subprocess
36 import sys
36 import sys
37 import tempfile
37 import tempfile
38 import textwrap
38 import textwrap
39 import time
39 import time
40 import traceback
40 import traceback
41 import warnings
41 import warnings
42 import zlib
42 import zlib
43
43
44 from . import (
44 from . import (
45 encoding,
45 encoding,
46 error,
46 error,
47 i18n,
47 i18n,
48 osutil,
48 osutil,
49 parsers,
49 parsers,
50 pycompat,
50 pycompat,
51 )
51 )
52
52
53 cookielib = pycompat.cookielib
53 cookielib = pycompat.cookielib
54 empty = pycompat.empty
54 empty = pycompat.empty
55 httplib = pycompat.httplib
55 httplib = pycompat.httplib
56 httpserver = pycompat.httpserver
56 httpserver = pycompat.httpserver
57 pickle = pycompat.pickle
57 pickle = pycompat.pickle
58 queue = pycompat.queue
58 queue = pycompat.queue
59 socketserver = pycompat.socketserver
59 socketserver = pycompat.socketserver
60 stderr = pycompat.stderr
60 stderr = pycompat.stderr
61 stdin = pycompat.stdin
61 stdin = pycompat.stdin
62 stdout = pycompat.stdout
62 stdout = pycompat.stdout
63 stringio = pycompat.stringio
63 stringio = pycompat.stringio
64 urlerr = pycompat.urlerr
64 urlerr = pycompat.urlerr
65 urlreq = pycompat.urlreq
65 urlreq = pycompat.urlreq
66 xmlrpclib = pycompat.xmlrpclib
66 xmlrpclib = pycompat.xmlrpclib
67
67
68 def isatty(fp):
68 def isatty(fp):
69 try:
69 try:
70 return fp.isatty()
70 return fp.isatty()
71 except AttributeError:
71 except AttributeError:
72 return False
72 return False
73
73
74 # glibc determines buffering on first write to stdout - if we replace a TTY
74 # glibc determines buffering on first write to stdout - if we replace a TTY
75 # destined stdout with a pipe destined stdout (e.g. pager), we want line
75 # destined stdout with a pipe destined stdout (e.g. pager), we want line
76 # buffering
76 # buffering
77 if isatty(stdout):
77 if isatty(stdout):
78 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
78 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
79
79
80 if pycompat.osname == 'nt':
80 if pycompat.osname == 'nt':
81 from . import windows as platform
81 from . import windows as platform
82 stdout = platform.winstdout(stdout)
82 stdout = platform.winstdout(stdout)
83 else:
83 else:
84 from . import posix as platform
84 from . import posix as platform
85
85
86 _ = i18n._
86 _ = i18n._
87
87
88 bindunixsocket = platform.bindunixsocket
88 bindunixsocket = platform.bindunixsocket
89 cachestat = platform.cachestat
89 cachestat = platform.cachestat
90 checkexec = platform.checkexec
90 checkexec = platform.checkexec
91 checklink = platform.checklink
91 checklink = platform.checklink
92 copymode = platform.copymode
92 copymode = platform.copymode
93 executablepath = platform.executablepath
93 executablepath = platform.executablepath
94 expandglobs = platform.expandglobs
94 expandglobs = platform.expandglobs
95 explainexit = platform.explainexit
95 explainexit = platform.explainexit
96 findexe = platform.findexe
96 findexe = platform.findexe
97 gethgcmd = platform.gethgcmd
97 gethgcmd = platform.gethgcmd
98 getuser = platform.getuser
98 getuser = platform.getuser
99 getpid = os.getpid
99 getpid = os.getpid
100 groupmembers = platform.groupmembers
100 groupmembers = platform.groupmembers
101 groupname = platform.groupname
101 groupname = platform.groupname
102 hidewindow = platform.hidewindow
102 hidewindow = platform.hidewindow
103 isexec = platform.isexec
103 isexec = platform.isexec
104 isowner = platform.isowner
104 isowner = platform.isowner
105 localpath = platform.localpath
105 localpath = platform.localpath
106 lookupreg = platform.lookupreg
106 lookupreg = platform.lookupreg
107 makedir = platform.makedir
107 makedir = platform.makedir
108 nlinks = platform.nlinks
108 nlinks = platform.nlinks
109 normpath = platform.normpath
109 normpath = platform.normpath
110 normcase = platform.normcase
110 normcase = platform.normcase
111 normcasespec = platform.normcasespec
111 normcasespec = platform.normcasespec
112 normcasefallback = platform.normcasefallback
112 normcasefallback = platform.normcasefallback
113 openhardlinks = platform.openhardlinks
113 openhardlinks = platform.openhardlinks
114 oslink = platform.oslink
114 oslink = platform.oslink
115 parsepatchoutput = platform.parsepatchoutput
115 parsepatchoutput = platform.parsepatchoutput
116 pconvert = platform.pconvert
116 pconvert = platform.pconvert
117 poll = platform.poll
117 poll = platform.poll
118 popen = platform.popen
118 popen = platform.popen
119 posixfile = platform.posixfile
119 posixfile = platform.posixfile
120 quotecommand = platform.quotecommand
120 quotecommand = platform.quotecommand
121 readpipe = platform.readpipe
121 readpipe = platform.readpipe
122 rename = platform.rename
122 rename = platform.rename
123 removedirs = platform.removedirs
123 removedirs = platform.removedirs
124 samedevice = platform.samedevice
124 samedevice = platform.samedevice
125 samefile = platform.samefile
125 samefile = platform.samefile
126 samestat = platform.samestat
126 samestat = platform.samestat
127 setbinary = platform.setbinary
127 setbinary = platform.setbinary
128 setflags = platform.setflags
128 setflags = platform.setflags
129 setsignalhandler = platform.setsignalhandler
129 setsignalhandler = platform.setsignalhandler
130 shellquote = platform.shellquote
130 shellquote = platform.shellquote
131 spawndetached = platform.spawndetached
131 spawndetached = platform.spawndetached
132 split = platform.split
132 split = platform.split
133 sshargs = platform.sshargs
133 sshargs = platform.sshargs
134 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
134 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
135 statisexec = platform.statisexec
135 statisexec = platform.statisexec
136 statislink = platform.statislink
136 statislink = platform.statislink
137 testpid = platform.testpid
137 testpid = platform.testpid
138 umask = platform.umask
138 umask = platform.umask
139 unlink = platform.unlink
139 unlink = platform.unlink
140 username = platform.username
140 username = platform.username
141
141
142 # Python compatibility
142 # Python compatibility
143
143
144 _notset = object()
144 _notset = object()
145
145
146 # disable Python's problematic floating point timestamps (issue4836)
146 # disable Python's problematic floating point timestamps (issue4836)
147 # (Python hypocritically says you shouldn't change this behavior in
147 # (Python hypocritically says you shouldn't change this behavior in
148 # libraries, and sure enough Mercurial is not a library.)
148 # libraries, and sure enough Mercurial is not a library.)
149 os.stat_float_times(False)
149 os.stat_float_times(False)
150
150
151 def safehasattr(thing, attr):
151 def safehasattr(thing, attr):
152 return getattr(thing, attr, _notset) is not _notset
152 return getattr(thing, attr, _notset) is not _notset
153
153
154 def bitsfrom(container):
154 def bitsfrom(container):
155 bits = 0
155 bits = 0
156 for bit in container:
156 for bit in container:
157 bits |= bit
157 bits |= bit
158 return bits
158 return bits
159
159
160 # python 2.6 still have deprecation warning enabled by default. We do not want
160 # python 2.6 still have deprecation warning enabled by default. We do not want
161 # to display anything to standard user so detect if we are running test and
161 # to display anything to standard user so detect if we are running test and
162 # only use python deprecation warning in this case.
162 # only use python deprecation warning in this case.
163 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
163 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
164 if _dowarn:
164 if _dowarn:
165 # explicitly unfilter our warning for python 2.7
165 # explicitly unfilter our warning for python 2.7
166 #
166 #
167 # The option of setting PYTHONWARNINGS in the test runner was investigated.
167 # The option of setting PYTHONWARNINGS in the test runner was investigated.
168 # However, module name set through PYTHONWARNINGS was exactly matched, so
168 # However, module name set through PYTHONWARNINGS was exactly matched, so
169 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
169 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
170 # makes the whole PYTHONWARNINGS thing useless for our usecase.
170 # makes the whole PYTHONWARNINGS thing useless for our usecase.
171 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
171 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
172 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
172 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
173 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
173 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
174
174
175 def nouideprecwarn(msg, version, stacklevel=1):
175 def nouideprecwarn(msg, version, stacklevel=1):
176 """Issue an python native deprecation warning
176 """Issue an python native deprecation warning
177
177
178 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
178 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
179 """
179 """
180 if _dowarn:
180 if _dowarn:
181 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
181 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
182 " update your code.)") % version
182 " update your code.)") % version
183 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
183 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
184
184
185 DIGESTS = {
185 DIGESTS = {
186 'md5': hashlib.md5,
186 'md5': hashlib.md5,
187 'sha1': hashlib.sha1,
187 'sha1': hashlib.sha1,
188 'sha512': hashlib.sha512,
188 'sha512': hashlib.sha512,
189 }
189 }
190 # List of digest types from strongest to weakest
190 # List of digest types from strongest to weakest
191 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
191 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
192
192
193 for k in DIGESTS_BY_STRENGTH:
193 for k in DIGESTS_BY_STRENGTH:
194 assert k in DIGESTS
194 assert k in DIGESTS
195
195
196 class digester(object):
196 class digester(object):
197 """helper to compute digests.
197 """helper to compute digests.
198
198
199 This helper can be used to compute one or more digests given their name.
199 This helper can be used to compute one or more digests given their name.
200
200
201 >>> d = digester(['md5', 'sha1'])
201 >>> d = digester(['md5', 'sha1'])
202 >>> d.update('foo')
202 >>> d.update('foo')
203 >>> [k for k in sorted(d)]
203 >>> [k for k in sorted(d)]
204 ['md5', 'sha1']
204 ['md5', 'sha1']
205 >>> d['md5']
205 >>> d['md5']
206 'acbd18db4cc2f85cedef654fccc4a4d8'
206 'acbd18db4cc2f85cedef654fccc4a4d8'
207 >>> d['sha1']
207 >>> d['sha1']
208 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
208 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
209 >>> digester.preferred(['md5', 'sha1'])
209 >>> digester.preferred(['md5', 'sha1'])
210 'sha1'
210 'sha1'
211 """
211 """
212
212
213 def __init__(self, digests, s=''):
213 def __init__(self, digests, s=''):
214 self._hashes = {}
214 self._hashes = {}
215 for k in digests:
215 for k in digests:
216 if k not in DIGESTS:
216 if k not in DIGESTS:
217 raise Abort(_('unknown digest type: %s') % k)
217 raise Abort(_('unknown digest type: %s') % k)
218 self._hashes[k] = DIGESTS[k]()
218 self._hashes[k] = DIGESTS[k]()
219 if s:
219 if s:
220 self.update(s)
220 self.update(s)
221
221
222 def update(self, data):
222 def update(self, data):
223 for h in self._hashes.values():
223 for h in self._hashes.values():
224 h.update(data)
224 h.update(data)
225
225
226 def __getitem__(self, key):
226 def __getitem__(self, key):
227 if key not in DIGESTS:
227 if key not in DIGESTS:
228 raise Abort(_('unknown digest type: %s') % k)
228 raise Abort(_('unknown digest type: %s') % k)
229 return self._hashes[key].hexdigest()
229 return self._hashes[key].hexdigest()
230
230
231 def __iter__(self):
231 def __iter__(self):
232 return iter(self._hashes)
232 return iter(self._hashes)
233
233
234 @staticmethod
234 @staticmethod
235 def preferred(supported):
235 def preferred(supported):
236 """returns the strongest digest type in both supported and DIGESTS."""
236 """returns the strongest digest type in both supported and DIGESTS."""
237
237
238 for k in DIGESTS_BY_STRENGTH:
238 for k in DIGESTS_BY_STRENGTH:
239 if k in supported:
239 if k in supported:
240 return k
240 return k
241 return None
241 return None
242
242
243 class digestchecker(object):
243 class digestchecker(object):
244 """file handle wrapper that additionally checks content against a given
244 """file handle wrapper that additionally checks content against a given
245 size and digests.
245 size and digests.
246
246
247 d = digestchecker(fh, size, {'md5': '...'})
247 d = digestchecker(fh, size, {'md5': '...'})
248
248
249 When multiple digests are given, all of them are validated.
249 When multiple digests are given, all of them are validated.
250 """
250 """
251
251
252 def __init__(self, fh, size, digests):
252 def __init__(self, fh, size, digests):
253 self._fh = fh
253 self._fh = fh
254 self._size = size
254 self._size = size
255 self._got = 0
255 self._got = 0
256 self._digests = dict(digests)
256 self._digests = dict(digests)
257 self._digester = digester(self._digests.keys())
257 self._digester = digester(self._digests.keys())
258
258
259 def read(self, length=-1):
259 def read(self, length=-1):
260 content = self._fh.read(length)
260 content = self._fh.read(length)
261 self._digester.update(content)
261 self._digester.update(content)
262 self._got += len(content)
262 self._got += len(content)
263 return content
263 return content
264
264
265 def validate(self):
265 def validate(self):
266 if self._size != self._got:
266 if self._size != self._got:
267 raise Abort(_('size mismatch: expected %d, got %d') %
267 raise Abort(_('size mismatch: expected %d, got %d') %
268 (self._size, self._got))
268 (self._size, self._got))
269 for k, v in self._digests.items():
269 for k, v in self._digests.items():
270 if v != self._digester[k]:
270 if v != self._digester[k]:
271 # i18n: first parameter is a digest name
271 # i18n: first parameter is a digest name
272 raise Abort(_('%s mismatch: expected %s, got %s') %
272 raise Abort(_('%s mismatch: expected %s, got %s') %
273 (k, v, self._digester[k]))
273 (k, v, self._digester[k]))
274
274
275 try:
275 try:
276 buffer = buffer
276 buffer = buffer
277 except NameError:
277 except NameError:
278 if not pycompat.ispy3:
278 if not pycompat.ispy3:
279 def buffer(sliceable, offset=0, length=None):
279 def buffer(sliceable, offset=0, length=None):
280 if length is not None:
280 if length is not None:
281 return sliceable[offset:offset + length]
281 return sliceable[offset:offset + length]
282 return sliceable[offset:]
282 return sliceable[offset:]
283 else:
283 else:
284 def buffer(sliceable, offset=0, length=None):
284 def buffer(sliceable, offset=0, length=None):
285 if length is not None:
285 if length is not None:
286 return memoryview(sliceable)[offset:offset + length]
286 return memoryview(sliceable)[offset:offset + length]
287 return memoryview(sliceable)[offset:]
287 return memoryview(sliceable)[offset:]
288
288
289 closefds = pycompat.osname == 'posix'
289 closefds = pycompat.osname == 'posix'
290
290
291 _chunksize = 4096
291 _chunksize = 4096
292
292
293 class bufferedinputpipe(object):
293 class bufferedinputpipe(object):
294 """a manually buffered input pipe
294 """a manually buffered input pipe
295
295
296 Python will not let us use buffered IO and lazy reading with 'polling' at
296 Python will not let us use buffered IO and lazy reading with 'polling' at
297 the same time. We cannot probe the buffer state and select will not detect
297 the same time. We cannot probe the buffer state and select will not detect
298 that data are ready to read if they are already buffered.
298 that data are ready to read if they are already buffered.
299
299
300 This class let us work around that by implementing its own buffering
300 This class let us work around that by implementing its own buffering
301 (allowing efficient readline) while offering a way to know if the buffer is
301 (allowing efficient readline) while offering a way to know if the buffer is
302 empty from the output (allowing collaboration of the buffer with polling).
302 empty from the output (allowing collaboration of the buffer with polling).
303
303
304 This class lives in the 'util' module because it makes use of the 'os'
304 This class lives in the 'util' module because it makes use of the 'os'
305 module from the python stdlib.
305 module from the python stdlib.
306 """
306 """
307
307
308 def __init__(self, input):
308 def __init__(self, input):
309 self._input = input
309 self._input = input
310 self._buffer = []
310 self._buffer = []
311 self._eof = False
311 self._eof = False
312 self._lenbuf = 0
312 self._lenbuf = 0
313
313
314 @property
314 @property
315 def hasbuffer(self):
315 def hasbuffer(self):
316 """True is any data is currently buffered
316 """True is any data is currently buffered
317
317
318 This will be used externally a pre-step for polling IO. If there is
318 This will be used externally a pre-step for polling IO. If there is
319 already data then no polling should be set in place."""
319 already data then no polling should be set in place."""
320 return bool(self._buffer)
320 return bool(self._buffer)
321
321
322 @property
322 @property
323 def closed(self):
323 def closed(self):
324 return self._input.closed
324 return self._input.closed
325
325
326 def fileno(self):
326 def fileno(self):
327 return self._input.fileno()
327 return self._input.fileno()
328
328
329 def close(self):
329 def close(self):
330 return self._input.close()
330 return self._input.close()
331
331
332 def read(self, size):
332 def read(self, size):
333 while (not self._eof) and (self._lenbuf < size):
333 while (not self._eof) and (self._lenbuf < size):
334 self._fillbuffer()
334 self._fillbuffer()
335 return self._frombuffer(size)
335 return self._frombuffer(size)
336
336
337 def readline(self, *args, **kwargs):
337 def readline(self, *args, **kwargs):
338 if 1 < len(self._buffer):
338 if 1 < len(self._buffer):
339 # this should not happen because both read and readline end with a
339 # this should not happen because both read and readline end with a
340 # _frombuffer call that collapse it.
340 # _frombuffer call that collapse it.
341 self._buffer = [''.join(self._buffer)]
341 self._buffer = [''.join(self._buffer)]
342 self._lenbuf = len(self._buffer[0])
342 self._lenbuf = len(self._buffer[0])
343 lfi = -1
343 lfi = -1
344 if self._buffer:
344 if self._buffer:
345 lfi = self._buffer[-1].find('\n')
345 lfi = self._buffer[-1].find('\n')
346 while (not self._eof) and lfi < 0:
346 while (not self._eof) and lfi < 0:
347 self._fillbuffer()
347 self._fillbuffer()
348 if self._buffer:
348 if self._buffer:
349 lfi = self._buffer[-1].find('\n')
349 lfi = self._buffer[-1].find('\n')
350 size = lfi + 1
350 size = lfi + 1
351 if lfi < 0: # end of file
351 if lfi < 0: # end of file
352 size = self._lenbuf
352 size = self._lenbuf
353 elif 1 < len(self._buffer):
353 elif 1 < len(self._buffer):
354 # we need to take previous chunks into account
354 # we need to take previous chunks into account
355 size += self._lenbuf - len(self._buffer[-1])
355 size += self._lenbuf - len(self._buffer[-1])
356 return self._frombuffer(size)
356 return self._frombuffer(size)
357
357
358 def _frombuffer(self, size):
358 def _frombuffer(self, size):
359 """return at most 'size' data from the buffer
359 """return at most 'size' data from the buffer
360
360
361 The data are removed from the buffer."""
361 The data are removed from the buffer."""
362 if size == 0 or not self._buffer:
362 if size == 0 or not self._buffer:
363 return ''
363 return ''
364 buf = self._buffer[0]
364 buf = self._buffer[0]
365 if 1 < len(self._buffer):
365 if 1 < len(self._buffer):
366 buf = ''.join(self._buffer)
366 buf = ''.join(self._buffer)
367
367
368 data = buf[:size]
368 data = buf[:size]
369 buf = buf[len(data):]
369 buf = buf[len(data):]
370 if buf:
370 if buf:
371 self._buffer = [buf]
371 self._buffer = [buf]
372 self._lenbuf = len(buf)
372 self._lenbuf = len(buf)
373 else:
373 else:
374 self._buffer = []
374 self._buffer = []
375 self._lenbuf = 0
375 self._lenbuf = 0
376 return data
376 return data
377
377
378 def _fillbuffer(self):
378 def _fillbuffer(self):
379 """read data to the buffer"""
379 """read data to the buffer"""
380 data = os.read(self._input.fileno(), _chunksize)
380 data = os.read(self._input.fileno(), _chunksize)
381 if not data:
381 if not data:
382 self._eof = True
382 self._eof = True
383 else:
383 else:
384 self._lenbuf += len(data)
384 self._lenbuf += len(data)
385 self._buffer.append(data)
385 self._buffer.append(data)
386
386
387 def popen2(cmd, env=None, newlines=False):
387 def popen2(cmd, env=None, newlines=False):
388 # Setting bufsize to -1 lets the system decide the buffer size.
388 # Setting bufsize to -1 lets the system decide the buffer size.
389 # The default for bufsize is 0, meaning unbuffered. This leads to
389 # The default for bufsize is 0, meaning unbuffered. This leads to
390 # poor performance on Mac OS X: http://bugs.python.org/issue4194
390 # poor performance on Mac OS X: http://bugs.python.org/issue4194
391 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
391 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
392 close_fds=closefds,
392 close_fds=closefds,
393 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
393 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
394 universal_newlines=newlines,
394 universal_newlines=newlines,
395 env=env)
395 env=env)
396 return p.stdin, p.stdout
396 return p.stdin, p.stdout
397
397
398 def popen3(cmd, env=None, newlines=False):
398 def popen3(cmd, env=None, newlines=False):
399 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
399 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
400 return stdin, stdout, stderr
400 return stdin, stdout, stderr
401
401
402 def popen4(cmd, env=None, newlines=False, bufsize=-1):
402 def popen4(cmd, env=None, newlines=False, bufsize=-1):
403 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
403 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
404 close_fds=closefds,
404 close_fds=closefds,
405 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
405 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
406 stderr=subprocess.PIPE,
406 stderr=subprocess.PIPE,
407 universal_newlines=newlines,
407 universal_newlines=newlines,
408 env=env)
408 env=env)
409 return p.stdin, p.stdout, p.stderr, p
409 return p.stdin, p.stdout, p.stderr, p
410
410
411 def version():
411 def version():
412 """Return version information if available."""
412 """Return version information if available."""
413 try:
413 try:
414 from . import __version__
414 from . import __version__
415 return __version__.version
415 return __version__.version
416 except ImportError:
416 except ImportError:
417 return 'unknown'
417 return 'unknown'
418
418
419 def versiontuple(v=None, n=4):
419 def versiontuple(v=None, n=4):
420 """Parses a Mercurial version string into an N-tuple.
420 """Parses a Mercurial version string into an N-tuple.
421
421
422 The version string to be parsed is specified with the ``v`` argument.
422 The version string to be parsed is specified with the ``v`` argument.
423 If it isn't defined, the current Mercurial version string will be parsed.
423 If it isn't defined, the current Mercurial version string will be parsed.
424
424
425 ``n`` can be 2, 3, or 4. Here is how some version strings map to
425 ``n`` can be 2, 3, or 4. Here is how some version strings map to
426 returned values:
426 returned values:
427
427
428 >>> v = '3.6.1+190-df9b73d2d444'
428 >>> v = '3.6.1+190-df9b73d2d444'
429 >>> versiontuple(v, 2)
429 >>> versiontuple(v, 2)
430 (3, 6)
430 (3, 6)
431 >>> versiontuple(v, 3)
431 >>> versiontuple(v, 3)
432 (3, 6, 1)
432 (3, 6, 1)
433 >>> versiontuple(v, 4)
433 >>> versiontuple(v, 4)
434 (3, 6, 1, '190-df9b73d2d444')
434 (3, 6, 1, '190-df9b73d2d444')
435
435
436 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
436 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
437 (3, 6, 1, '190-df9b73d2d444+20151118')
437 (3, 6, 1, '190-df9b73d2d444+20151118')
438
438
439 >>> v = '3.6'
439 >>> v = '3.6'
440 >>> versiontuple(v, 2)
440 >>> versiontuple(v, 2)
441 (3, 6)
441 (3, 6)
442 >>> versiontuple(v, 3)
442 >>> versiontuple(v, 3)
443 (3, 6, None)
443 (3, 6, None)
444 >>> versiontuple(v, 4)
444 >>> versiontuple(v, 4)
445 (3, 6, None, None)
445 (3, 6, None, None)
446
446
447 >>> v = '3.9-rc'
447 >>> v = '3.9-rc'
448 >>> versiontuple(v, 2)
448 >>> versiontuple(v, 2)
449 (3, 9)
449 (3, 9)
450 >>> versiontuple(v, 3)
450 >>> versiontuple(v, 3)
451 (3, 9, None)
451 (3, 9, None)
452 >>> versiontuple(v, 4)
452 >>> versiontuple(v, 4)
453 (3, 9, None, 'rc')
453 (3, 9, None, 'rc')
454
454
455 >>> v = '3.9-rc+2-02a8fea4289b'
455 >>> v = '3.9-rc+2-02a8fea4289b'
456 >>> versiontuple(v, 2)
456 >>> versiontuple(v, 2)
457 (3, 9)
457 (3, 9)
458 >>> versiontuple(v, 3)
458 >>> versiontuple(v, 3)
459 (3, 9, None)
459 (3, 9, None)
460 >>> versiontuple(v, 4)
460 >>> versiontuple(v, 4)
461 (3, 9, None, 'rc+2-02a8fea4289b')
461 (3, 9, None, 'rc+2-02a8fea4289b')
462 """
462 """
463 if not v:
463 if not v:
464 v = version()
464 v = version()
465 parts = remod.split('[\+-]', v, 1)
465 parts = remod.split('[\+-]', v, 1)
466 if len(parts) == 1:
466 if len(parts) == 1:
467 vparts, extra = parts[0], None
467 vparts, extra = parts[0], None
468 else:
468 else:
469 vparts, extra = parts
469 vparts, extra = parts
470
470
471 vints = []
471 vints = []
472 for i in vparts.split('.'):
472 for i in vparts.split('.'):
473 try:
473 try:
474 vints.append(int(i))
474 vints.append(int(i))
475 except ValueError:
475 except ValueError:
476 break
476 break
477 # (3, 6) -> (3, 6, None)
477 # (3, 6) -> (3, 6, None)
478 while len(vints) < 3:
478 while len(vints) < 3:
479 vints.append(None)
479 vints.append(None)
480
480
481 if n == 2:
481 if n == 2:
482 return (vints[0], vints[1])
482 return (vints[0], vints[1])
483 if n == 3:
483 if n == 3:
484 return (vints[0], vints[1], vints[2])
484 return (vints[0], vints[1], vints[2])
485 if n == 4:
485 if n == 4:
486 return (vints[0], vints[1], vints[2], extra)
486 return (vints[0], vints[1], vints[2], extra)
487
487
488 # used by parsedate
488 # used by parsedate
489 defaultdateformats = (
489 defaultdateformats = (
490 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
490 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
491 '%Y-%m-%dT%H:%M', # without seconds
491 '%Y-%m-%dT%H:%M', # without seconds
492 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
492 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
493 '%Y-%m-%dT%H%M', # without seconds
493 '%Y-%m-%dT%H%M', # without seconds
494 '%Y-%m-%d %H:%M:%S', # our common legal variant
494 '%Y-%m-%d %H:%M:%S', # our common legal variant
495 '%Y-%m-%d %H:%M', # without seconds
495 '%Y-%m-%d %H:%M', # without seconds
496 '%Y-%m-%d %H%M%S', # without :
496 '%Y-%m-%d %H%M%S', # without :
497 '%Y-%m-%d %H%M', # without seconds
497 '%Y-%m-%d %H%M', # without seconds
498 '%Y-%m-%d %I:%M:%S%p',
498 '%Y-%m-%d %I:%M:%S%p',
499 '%Y-%m-%d %H:%M',
499 '%Y-%m-%d %H:%M',
500 '%Y-%m-%d %I:%M%p',
500 '%Y-%m-%d %I:%M%p',
501 '%Y-%m-%d',
501 '%Y-%m-%d',
502 '%m-%d',
502 '%m-%d',
503 '%m/%d',
503 '%m/%d',
504 '%m/%d/%y',
504 '%m/%d/%y',
505 '%m/%d/%Y',
505 '%m/%d/%Y',
506 '%a %b %d %H:%M:%S %Y',
506 '%a %b %d %H:%M:%S %Y',
507 '%a %b %d %I:%M:%S%p %Y',
507 '%a %b %d %I:%M:%S%p %Y',
508 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
508 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
509 '%b %d %H:%M:%S %Y',
509 '%b %d %H:%M:%S %Y',
510 '%b %d %I:%M:%S%p %Y',
510 '%b %d %I:%M:%S%p %Y',
511 '%b %d %H:%M:%S',
511 '%b %d %H:%M:%S',
512 '%b %d %I:%M:%S%p',
512 '%b %d %I:%M:%S%p',
513 '%b %d %H:%M',
513 '%b %d %H:%M',
514 '%b %d %I:%M%p',
514 '%b %d %I:%M%p',
515 '%b %d %Y',
515 '%b %d %Y',
516 '%b %d',
516 '%b %d',
517 '%H:%M:%S',
517 '%H:%M:%S',
518 '%I:%M:%S%p',
518 '%I:%M:%S%p',
519 '%H:%M',
519 '%H:%M',
520 '%I:%M%p',
520 '%I:%M%p',
521 )
521 )
522
522
523 extendeddateformats = defaultdateformats + (
523 extendeddateformats = defaultdateformats + (
524 "%Y",
524 "%Y",
525 "%Y-%m",
525 "%Y-%m",
526 "%b",
526 "%b",
527 "%b %Y",
527 "%b %Y",
528 )
528 )
529
529
530 def cachefunc(func):
530 def cachefunc(func):
531 '''cache the result of function calls'''
531 '''cache the result of function calls'''
532 # XXX doesn't handle keywords args
532 # XXX doesn't handle keywords args
533 if func.__code__.co_argcount == 0:
533 if func.__code__.co_argcount == 0:
534 cache = []
534 cache = []
535 def f():
535 def f():
536 if len(cache) == 0:
536 if len(cache) == 0:
537 cache.append(func())
537 cache.append(func())
538 return cache[0]
538 return cache[0]
539 return f
539 return f
540 cache = {}
540 cache = {}
541 if func.__code__.co_argcount == 1:
541 if func.__code__.co_argcount == 1:
542 # we gain a small amount of time because
542 # we gain a small amount of time because
543 # we don't need to pack/unpack the list
543 # we don't need to pack/unpack the list
544 def f(arg):
544 def f(arg):
545 if arg not in cache:
545 if arg not in cache:
546 cache[arg] = func(arg)
546 cache[arg] = func(arg)
547 return cache[arg]
547 return cache[arg]
548 else:
548 else:
549 def f(*args):
549 def f(*args):
550 if args not in cache:
550 if args not in cache:
551 cache[args] = func(*args)
551 cache[args] = func(*args)
552 return cache[args]
552 return cache[args]
553
553
554 return f
554 return f
555
555
556 class sortdict(dict):
556 class sortdict(dict):
557 '''a simple sorted dictionary'''
557 '''a simple sorted dictionary'''
558 def __init__(self, data=None):
558 def __init__(self, data=None):
559 self._list = []
559 self._list = []
560 if data:
560 if data:
561 self.update(data)
561 self.update(data)
562 def copy(self):
562 def copy(self):
563 return sortdict(self)
563 return sortdict(self)
564 def __setitem__(self, key, val):
564 def __setitem__(self, key, val):
565 if key in self:
565 if key in self:
566 self._list.remove(key)
566 self._list.remove(key)
567 self._list.append(key)
567 self._list.append(key)
568 dict.__setitem__(self, key, val)
568 dict.__setitem__(self, key, val)
569 def __iter__(self):
569 def __iter__(self):
570 return self._list.__iter__()
570 return self._list.__iter__()
571 def update(self, src):
571 def update(self, src):
572 if isinstance(src, dict):
572 if isinstance(src, dict):
573 src = src.iteritems()
573 src = src.iteritems()
574 for k, v in src:
574 for k, v in src:
575 self[k] = v
575 self[k] = v
576 def clear(self):
576 def clear(self):
577 dict.clear(self)
577 dict.clear(self)
578 self._list = []
578 self._list = []
579 def items(self):
579 def items(self):
580 return [(k, self[k]) for k in self._list]
580 return [(k, self[k]) for k in self._list]
581 def __delitem__(self, key):
581 def __delitem__(self, key):
582 dict.__delitem__(self, key)
582 dict.__delitem__(self, key)
583 self._list.remove(key)
583 self._list.remove(key)
584 def pop(self, key, *args, **kwargs):
584 def pop(self, key, *args, **kwargs):
585 try:
585 try:
586 self._list.remove(key)
586 self._list.remove(key)
587 except ValueError:
587 except ValueError:
588 pass
588 pass
589 return dict.pop(self, key, *args, **kwargs)
589 return dict.pop(self, key, *args, **kwargs)
590 def keys(self):
590 def keys(self):
591 return self._list[:]
591 return self._list[:]
592 def iterkeys(self):
592 def iterkeys(self):
593 return self._list.__iter__()
593 return self._list.__iter__()
594 def iteritems(self):
594 def iteritems(self):
595 for k in self._list:
595 for k in self._list:
596 yield k, self[k]
596 yield k, self[k]
597 def insert(self, index, key, val):
597 def insert(self, index, key, val):
598 self._list.insert(index, key)
598 self._list.insert(index, key)
599 dict.__setitem__(self, key, val)
599 dict.__setitem__(self, key, val)
600 def __repr__(self):
600 def __repr__(self):
601 if not self:
601 if not self:
602 return '%s()' % self.__class__.__name__
602 return '%s()' % self.__class__.__name__
603 return '%s(%r)' % (self.__class__.__name__, self.items())
603 return '%s(%r)' % (self.__class__.__name__, self.items())
604
604
605 class _lrucachenode(object):
605 class _lrucachenode(object):
606 """A node in a doubly linked list.
606 """A node in a doubly linked list.
607
607
608 Holds a reference to nodes on either side as well as a key-value
608 Holds a reference to nodes on either side as well as a key-value
609 pair for the dictionary entry.
609 pair for the dictionary entry.
610 """
610 """
611 __slots__ = (u'next', u'prev', u'key', u'value')
611 __slots__ = (u'next', u'prev', u'key', u'value')
612
612
613 def __init__(self):
613 def __init__(self):
614 self.next = None
614 self.next = None
615 self.prev = None
615 self.prev = None
616
616
617 self.key = _notset
617 self.key = _notset
618 self.value = None
618 self.value = None
619
619
620 def markempty(self):
620 def markempty(self):
621 """Mark the node as emptied."""
621 """Mark the node as emptied."""
622 self.key = _notset
622 self.key = _notset
623
623
624 class lrucachedict(object):
624 class lrucachedict(object):
625 """Dict that caches most recent accesses and sets.
625 """Dict that caches most recent accesses and sets.
626
626
627 The dict consists of an actual backing dict - indexed by original
627 The dict consists of an actual backing dict - indexed by original
628 key - and a doubly linked circular list defining the order of entries in
628 key - and a doubly linked circular list defining the order of entries in
629 the cache.
629 the cache.
630
630
631 The head node is the newest entry in the cache. If the cache is full,
631 The head node is the newest entry in the cache. If the cache is full,
632 we recycle head.prev and make it the new head. Cache accesses result in
632 we recycle head.prev and make it the new head. Cache accesses result in
633 the node being moved to before the existing head and being marked as the
633 the node being moved to before the existing head and being marked as the
634 new head node.
634 new head node.
635 """
635 """
636 def __init__(self, max):
636 def __init__(self, max):
637 self._cache = {}
637 self._cache = {}
638
638
639 self._head = head = _lrucachenode()
639 self._head = head = _lrucachenode()
640 head.prev = head
640 head.prev = head
641 head.next = head
641 head.next = head
642 self._size = 1
642 self._size = 1
643 self._capacity = max
643 self._capacity = max
644
644
645 def __len__(self):
645 def __len__(self):
646 return len(self._cache)
646 return len(self._cache)
647
647
648 def __contains__(self, k):
648 def __contains__(self, k):
649 return k in self._cache
649 return k in self._cache
650
650
651 def __iter__(self):
651 def __iter__(self):
652 # We don't have to iterate in cache order, but why not.
652 # We don't have to iterate in cache order, but why not.
653 n = self._head
653 n = self._head
654 for i in range(len(self._cache)):
654 for i in range(len(self._cache)):
655 yield n.key
655 yield n.key
656 n = n.next
656 n = n.next
657
657
658 def __getitem__(self, k):
658 def __getitem__(self, k):
659 node = self._cache[k]
659 node = self._cache[k]
660 self._movetohead(node)
660 self._movetohead(node)
661 return node.value
661 return node.value
662
662
663 def __setitem__(self, k, v):
663 def __setitem__(self, k, v):
664 node = self._cache.get(k)
664 node = self._cache.get(k)
665 # Replace existing value and mark as newest.
665 # Replace existing value and mark as newest.
666 if node is not None:
666 if node is not None:
667 node.value = v
667 node.value = v
668 self._movetohead(node)
668 self._movetohead(node)
669 return
669 return
670
670
671 if self._size < self._capacity:
671 if self._size < self._capacity:
672 node = self._addcapacity()
672 node = self._addcapacity()
673 else:
673 else:
674 # Grab the last/oldest item.
674 # Grab the last/oldest item.
675 node = self._head.prev
675 node = self._head.prev
676
676
677 # At capacity. Kill the old entry.
677 # At capacity. Kill the old entry.
678 if node.key is not _notset:
678 if node.key is not _notset:
679 del self._cache[node.key]
679 del self._cache[node.key]
680
680
681 node.key = k
681 node.key = k
682 node.value = v
682 node.value = v
683 self._cache[k] = node
683 self._cache[k] = node
684 # And mark it as newest entry. No need to adjust order since it
684 # And mark it as newest entry. No need to adjust order since it
685 # is already self._head.prev.
685 # is already self._head.prev.
686 self._head = node
686 self._head = node
687
687
688 def __delitem__(self, k):
688 def __delitem__(self, k):
689 node = self._cache.pop(k)
689 node = self._cache.pop(k)
690 node.markempty()
690 node.markempty()
691
691
692 # Temporarily mark as newest item before re-adjusting head to make
692 # Temporarily mark as newest item before re-adjusting head to make
693 # this node the oldest item.
693 # this node the oldest item.
694 self._movetohead(node)
694 self._movetohead(node)
695 self._head = node.next
695 self._head = node.next
696
696
697 # Additional dict methods.
697 # Additional dict methods.
698
698
699 def get(self, k, default=None):
699 def get(self, k, default=None):
700 try:
700 try:
701 return self._cache[k].value
701 return self._cache[k].value
702 except KeyError:
702 except KeyError:
703 return default
703 return default
704
704
705 def clear(self):
705 def clear(self):
706 n = self._head
706 n = self._head
707 while n.key is not _notset:
707 while n.key is not _notset:
708 n.markempty()
708 n.markempty()
709 n = n.next
709 n = n.next
710
710
711 self._cache.clear()
711 self._cache.clear()
712
712
713 def copy(self):
713 def copy(self):
714 result = lrucachedict(self._capacity)
714 result = lrucachedict(self._capacity)
715 n = self._head.prev
715 n = self._head.prev
716 # Iterate in oldest-to-newest order, so the copy has the right ordering
716 # Iterate in oldest-to-newest order, so the copy has the right ordering
717 for i in range(len(self._cache)):
717 for i in range(len(self._cache)):
718 result[n.key] = n.value
718 result[n.key] = n.value
719 n = n.prev
719 n = n.prev
720 return result
720 return result
721
721
722 def _movetohead(self, node):
722 def _movetohead(self, node):
723 """Mark a node as the newest, making it the new head.
723 """Mark a node as the newest, making it the new head.
724
724
725 When a node is accessed, it becomes the freshest entry in the LRU
725 When a node is accessed, it becomes the freshest entry in the LRU
726 list, which is denoted by self._head.
726 list, which is denoted by self._head.
727
727
728 Visually, let's make ``N`` the new head node (* denotes head):
728 Visually, let's make ``N`` the new head node (* denotes head):
729
729
730 previous/oldest <-> head <-> next/next newest
730 previous/oldest <-> head <-> next/next newest
731
731
732 ----<->--- A* ---<->-----
732 ----<->--- A* ---<->-----
733 | |
733 | |
734 E <-> D <-> N <-> C <-> B
734 E <-> D <-> N <-> C <-> B
735
735
736 To:
736 To:
737
737
738 ----<->--- N* ---<->-----
738 ----<->--- N* ---<->-----
739 | |
739 | |
740 E <-> D <-> C <-> B <-> A
740 E <-> D <-> C <-> B <-> A
741
741
742 This requires the following moves:
742 This requires the following moves:
743
743
744 C.next = D (node.prev.next = node.next)
744 C.next = D (node.prev.next = node.next)
745 D.prev = C (node.next.prev = node.prev)
745 D.prev = C (node.next.prev = node.prev)
746 E.next = N (head.prev.next = node)
746 E.next = N (head.prev.next = node)
747 N.prev = E (node.prev = head.prev)
747 N.prev = E (node.prev = head.prev)
748 N.next = A (node.next = head)
748 N.next = A (node.next = head)
749 A.prev = N (head.prev = node)
749 A.prev = N (head.prev = node)
750 """
750 """
751 head = self._head
751 head = self._head
752 # C.next = D
752 # C.next = D
753 node.prev.next = node.next
753 node.prev.next = node.next
754 # D.prev = C
754 # D.prev = C
755 node.next.prev = node.prev
755 node.next.prev = node.prev
756 # N.prev = E
756 # N.prev = E
757 node.prev = head.prev
757 node.prev = head.prev
758 # N.next = A
758 # N.next = A
759 # It is tempting to do just "head" here, however if node is
759 # It is tempting to do just "head" here, however if node is
760 # adjacent to head, this will do bad things.
760 # adjacent to head, this will do bad things.
761 node.next = head.prev.next
761 node.next = head.prev.next
762 # E.next = N
762 # E.next = N
763 node.next.prev = node
763 node.next.prev = node
764 # A.prev = N
764 # A.prev = N
765 node.prev.next = node
765 node.prev.next = node
766
766
767 self._head = node
767 self._head = node
768
768
769 def _addcapacity(self):
769 def _addcapacity(self):
770 """Add a node to the circular linked list.
770 """Add a node to the circular linked list.
771
771
772 The new node is inserted before the head node.
772 The new node is inserted before the head node.
773 """
773 """
774 head = self._head
774 head = self._head
775 node = _lrucachenode()
775 node = _lrucachenode()
776 head.prev.next = node
776 head.prev.next = node
777 node.prev = head.prev
777 node.prev = head.prev
778 node.next = head
778 node.next = head
779 head.prev = node
779 head.prev = node
780 self._size += 1
780 self._size += 1
781 return node
781 return node
782
782
783 def lrucachefunc(func):
783 def lrucachefunc(func):
784 '''cache most recent results of function calls'''
784 '''cache most recent results of function calls'''
785 cache = {}
785 cache = {}
786 order = collections.deque()
786 order = collections.deque()
787 if func.__code__.co_argcount == 1:
787 if func.__code__.co_argcount == 1:
788 def f(arg):
788 def f(arg):
789 if arg not in cache:
789 if arg not in cache:
790 if len(cache) > 20:
790 if len(cache) > 20:
791 del cache[order.popleft()]
791 del cache[order.popleft()]
792 cache[arg] = func(arg)
792 cache[arg] = func(arg)
793 else:
793 else:
794 order.remove(arg)
794 order.remove(arg)
795 order.append(arg)
795 order.append(arg)
796 return cache[arg]
796 return cache[arg]
797 else:
797 else:
798 def f(*args):
798 def f(*args):
799 if args not in cache:
799 if args not in cache:
800 if len(cache) > 20:
800 if len(cache) > 20:
801 del cache[order.popleft()]
801 del cache[order.popleft()]
802 cache[args] = func(*args)
802 cache[args] = func(*args)
803 else:
803 else:
804 order.remove(args)
804 order.remove(args)
805 order.append(args)
805 order.append(args)
806 return cache[args]
806 return cache[args]
807
807
808 return f
808 return f
809
809
810 class propertycache(object):
810 class propertycache(object):
811 def __init__(self, func):
811 def __init__(self, func):
812 self.func = func
812 self.func = func
813 self.name = func.__name__
813 self.name = func.__name__
814 def __get__(self, obj, type=None):
814 def __get__(self, obj, type=None):
815 result = self.func(obj)
815 result = self.func(obj)
816 self.cachevalue(obj, result)
816 self.cachevalue(obj, result)
817 return result
817 return result
818
818
819 def cachevalue(self, obj, value):
819 def cachevalue(self, obj, value):
820 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
820 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
821 obj.__dict__[self.name] = value
821 obj.__dict__[self.name] = value
822
822
823 def pipefilter(s, cmd):
823 def pipefilter(s, cmd):
824 '''filter string S through command CMD, returning its output'''
824 '''filter string S through command CMD, returning its output'''
825 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
825 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
826 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
826 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
827 pout, perr = p.communicate(s)
827 pout, perr = p.communicate(s)
828 return pout
828 return pout
829
829
830 def tempfilter(s, cmd):
830 def tempfilter(s, cmd):
831 '''filter string S through a pair of temporary files with CMD.
831 '''filter string S through a pair of temporary files with CMD.
832 CMD is used as a template to create the real command to be run,
832 CMD is used as a template to create the real command to be run,
833 with the strings INFILE and OUTFILE replaced by the real names of
833 with the strings INFILE and OUTFILE replaced by the real names of
834 the temporary files generated.'''
834 the temporary files generated.'''
835 inname, outname = None, None
835 inname, outname = None, None
836 try:
836 try:
837 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
837 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
838 fp = os.fdopen(infd, pycompat.sysstr('wb'))
838 fp = os.fdopen(infd, pycompat.sysstr('wb'))
839 fp.write(s)
839 fp.write(s)
840 fp.close()
840 fp.close()
841 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
841 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
842 os.close(outfd)
842 os.close(outfd)
843 cmd = cmd.replace('INFILE', inname)
843 cmd = cmd.replace('INFILE', inname)
844 cmd = cmd.replace('OUTFILE', outname)
844 cmd = cmd.replace('OUTFILE', outname)
845 code = os.system(cmd)
845 code = os.system(cmd)
846 if pycompat.sysplatform == 'OpenVMS' and code & 1:
846 if pycompat.sysplatform == 'OpenVMS' and code & 1:
847 code = 0
847 code = 0
848 if code:
848 if code:
849 raise Abort(_("command '%s' failed: %s") %
849 raise Abort(_("command '%s' failed: %s") %
850 (cmd, explainexit(code)))
850 (cmd, explainexit(code)))
851 return readfile(outname)
851 return readfile(outname)
852 finally:
852 finally:
853 try:
853 try:
854 if inname:
854 if inname:
855 os.unlink(inname)
855 os.unlink(inname)
856 except OSError:
856 except OSError:
857 pass
857 pass
858 try:
858 try:
859 if outname:
859 if outname:
860 os.unlink(outname)
860 os.unlink(outname)
861 except OSError:
861 except OSError:
862 pass
862 pass
863
863
864 filtertable = {
864 filtertable = {
865 'tempfile:': tempfilter,
865 'tempfile:': tempfilter,
866 'pipe:': pipefilter,
866 'pipe:': pipefilter,
867 }
867 }
868
868
869 def filter(s, cmd):
869 def filter(s, cmd):
870 "filter a string through a command that transforms its input to its output"
870 "filter a string through a command that transforms its input to its output"
871 for name, fn in filtertable.iteritems():
871 for name, fn in filtertable.iteritems():
872 if cmd.startswith(name):
872 if cmd.startswith(name):
873 return fn(s, cmd[len(name):].lstrip())
873 return fn(s, cmd[len(name):].lstrip())
874 return pipefilter(s, cmd)
874 return pipefilter(s, cmd)
875
875
876 def binary(s):
876 def binary(s):
877 """return true if a string is binary data"""
877 """return true if a string is binary data"""
878 return bool(s and '\0' in s)
878 return bool(s and '\0' in s)
879
879
880 def increasingchunks(source, min=1024, max=65536):
880 def increasingchunks(source, min=1024, max=65536):
881 '''return no less than min bytes per chunk while data remains,
881 '''return no less than min bytes per chunk while data remains,
882 doubling min after each chunk until it reaches max'''
882 doubling min after each chunk until it reaches max'''
883 def log2(x):
883 def log2(x):
884 if not x:
884 if not x:
885 return 0
885 return 0
886 i = 0
886 i = 0
887 while x:
887 while x:
888 x >>= 1
888 x >>= 1
889 i += 1
889 i += 1
890 return i - 1
890 return i - 1
891
891
892 buf = []
892 buf = []
893 blen = 0
893 blen = 0
894 for chunk in source:
894 for chunk in source:
895 buf.append(chunk)
895 buf.append(chunk)
896 blen += len(chunk)
896 blen += len(chunk)
897 if blen >= min:
897 if blen >= min:
898 if min < max:
898 if min < max:
899 min = min << 1
899 min = min << 1
900 nmin = 1 << log2(blen)
900 nmin = 1 << log2(blen)
901 if nmin > min:
901 if nmin > min:
902 min = nmin
902 min = nmin
903 if min > max:
903 if min > max:
904 min = max
904 min = max
905 yield ''.join(buf)
905 yield ''.join(buf)
906 blen = 0
906 blen = 0
907 buf = []
907 buf = []
908 if buf:
908 if buf:
909 yield ''.join(buf)
909 yield ''.join(buf)
910
910
911 Abort = error.Abort
911 Abort = error.Abort
912
912
913 def always(fn):
913 def always(fn):
914 return True
914 return True
915
915
916 def never(fn):
916 def never(fn):
917 return False
917 return False
918
918
919 def nogc(func):
919 def nogc(func):
920 """disable garbage collector
920 """disable garbage collector
921
921
922 Python's garbage collector triggers a GC each time a certain number of
922 Python's garbage collector triggers a GC each time a certain number of
923 container objects (the number being defined by gc.get_threshold()) are
923 container objects (the number being defined by gc.get_threshold()) are
924 allocated even when marked not to be tracked by the collector. Tracking has
924 allocated even when marked not to be tracked by the collector. Tracking has
925 no effect on when GCs are triggered, only on what objects the GC looks
925 no effect on when GCs are triggered, only on what objects the GC looks
926 into. As a workaround, disable GC while building complex (huge)
926 into. As a workaround, disable GC while building complex (huge)
927 containers.
927 containers.
928
928
929 This garbage collector issue have been fixed in 2.7.
929 This garbage collector issue have been fixed in 2.7.
930 """
930 """
931 if sys.version_info >= (2, 7):
931 if sys.version_info >= (2, 7):
932 return func
932 return func
933 def wrapper(*args, **kwargs):
933 def wrapper(*args, **kwargs):
934 gcenabled = gc.isenabled()
934 gcenabled = gc.isenabled()
935 gc.disable()
935 gc.disable()
936 try:
936 try:
937 return func(*args, **kwargs)
937 return func(*args, **kwargs)
938 finally:
938 finally:
939 if gcenabled:
939 if gcenabled:
940 gc.enable()
940 gc.enable()
941 return wrapper
941 return wrapper
942
942
943 def pathto(root, n1, n2):
943 def pathto(root, n1, n2):
944 '''return the relative path from one place to another.
944 '''return the relative path from one place to another.
945 root should use os.sep to separate directories
945 root should use os.sep to separate directories
946 n1 should use os.sep to separate directories
946 n1 should use os.sep to separate directories
947 n2 should use "/" to separate directories
947 n2 should use "/" to separate directories
948 returns an os.sep-separated path.
948 returns an os.sep-separated path.
949
949
950 If n1 is a relative path, it's assumed it's
950 If n1 is a relative path, it's assumed it's
951 relative to root.
951 relative to root.
952 n2 should always be relative to root.
952 n2 should always be relative to root.
953 '''
953 '''
954 if not n1:
954 if not n1:
955 return localpath(n2)
955 return localpath(n2)
956 if os.path.isabs(n1):
956 if os.path.isabs(n1):
957 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
957 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
958 return os.path.join(root, localpath(n2))
958 return os.path.join(root, localpath(n2))
959 n2 = '/'.join((pconvert(root), n2))
959 n2 = '/'.join((pconvert(root), n2))
960 a, b = splitpath(n1), n2.split('/')
960 a, b = splitpath(n1), n2.split('/')
961 a.reverse()
961 a.reverse()
962 b.reverse()
962 b.reverse()
963 while a and b and a[-1] == b[-1]:
963 while a and b and a[-1] == b[-1]:
964 a.pop()
964 a.pop()
965 b.pop()
965 b.pop()
966 b.reverse()
966 b.reverse()
967 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
967 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
968
968
969 def mainfrozen():
969 def mainfrozen():
970 """return True if we are a frozen executable.
970 """return True if we are a frozen executable.
971
971
972 The code supports py2exe (most common, Windows only) and tools/freeze
972 The code supports py2exe (most common, Windows only) and tools/freeze
973 (portable, not much used).
973 (portable, not much used).
974 """
974 """
975 return (safehasattr(sys, "frozen") or # new py2exe
975 return (safehasattr(sys, "frozen") or # new py2exe
976 safehasattr(sys, "importers") or # old py2exe
976 safehasattr(sys, "importers") or # old py2exe
977 imp.is_frozen(u"__main__")) # tools/freeze
977 imp.is_frozen(u"__main__")) # tools/freeze
978
978
979 # the location of data files matching the source code
979 # the location of data files matching the source code
980 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
980 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
981 # executable version (py2exe) doesn't support __file__
981 # executable version (py2exe) doesn't support __file__
982 datapath = os.path.dirname(pycompat.sysexecutable)
982 datapath = os.path.dirname(pycompat.sysexecutable)
983 else:
983 else:
984 datapath = os.path.dirname(pycompat.fsencode(__file__))
984 datapath = os.path.dirname(pycompat.fsencode(__file__))
985
985
986 i18n.setdatapath(datapath)
986 i18n.setdatapath(datapath)
987
987
988 _hgexecutable = None
988 _hgexecutable = None
989
989
990 def hgexecutable():
990 def hgexecutable():
991 """return location of the 'hg' executable.
991 """return location of the 'hg' executable.
992
992
993 Defaults to $HG or 'hg' in the search path.
993 Defaults to $HG or 'hg' in the search path.
994 """
994 """
995 if _hgexecutable is None:
995 if _hgexecutable is None:
996 hg = encoding.environ.get('HG')
996 hg = encoding.environ.get('HG')
997 mainmod = sys.modules[pycompat.sysstr('__main__')]
997 mainmod = sys.modules[pycompat.sysstr('__main__')]
998 if hg:
998 if hg:
999 _sethgexecutable(hg)
999 _sethgexecutable(hg)
1000 elif mainfrozen():
1000 elif mainfrozen():
1001 if getattr(sys, 'frozen', None) == 'macosx_app':
1001 if getattr(sys, 'frozen', None) == 'macosx_app':
1002 # Env variable set by py2app
1002 # Env variable set by py2app
1003 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1003 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1004 else:
1004 else:
1005 _sethgexecutable(pycompat.sysexecutable)
1005 _sethgexecutable(pycompat.sysexecutable)
1006 elif (os.path.basename(
1006 elif (os.path.basename(
1007 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1007 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1008 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1008 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1009 else:
1009 else:
1010 exe = findexe('hg') or os.path.basename(sys.argv[0])
1010 exe = findexe('hg') or os.path.basename(sys.argv[0])
1011 _sethgexecutable(exe)
1011 _sethgexecutable(exe)
1012 return _hgexecutable
1012 return _hgexecutable
1013
1013
1014 def _sethgexecutable(path):
1014 def _sethgexecutable(path):
1015 """set location of the 'hg' executable"""
1015 """set location of the 'hg' executable"""
1016 global _hgexecutable
1016 global _hgexecutable
1017 _hgexecutable = path
1017 _hgexecutable = path
1018
1018
1019 def _isstdout(f):
1019 def _isstdout(f):
1020 fileno = getattr(f, 'fileno', None)
1020 fileno = getattr(f, 'fileno', None)
1021 return fileno and fileno() == sys.__stdout__.fileno()
1021 return fileno and fileno() == sys.__stdout__.fileno()
1022
1022
1023 def shellenviron(environ=None):
1023 def shellenviron(environ=None):
1024 """return environ with optional override, useful for shelling out"""
1024 """return environ with optional override, useful for shelling out"""
1025 def py2shell(val):
1025 def py2shell(val):
1026 'convert python object into string that is useful to shell'
1026 'convert python object into string that is useful to shell'
1027 if val is None or val is False:
1027 if val is None or val is False:
1028 return '0'
1028 return '0'
1029 if val is True:
1029 if val is True:
1030 return '1'
1030 return '1'
1031 return str(val)
1031 return str(val)
1032 env = dict(encoding.environ)
1032 env = dict(encoding.environ)
1033 if environ:
1033 if environ:
1034 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1034 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1035 env['HG'] = hgexecutable()
1035 env['HG'] = hgexecutable()
1036 return env
1036 return env
1037
1037
1038 def system(cmd, environ=None, cwd=None, out=None):
1038 def system(cmd, environ=None, cwd=None, out=None):
1039 '''enhanced shell command execution.
1039 '''enhanced shell command execution.
1040 run with environment maybe modified, maybe in different dir.
1040 run with environment maybe modified, maybe in different dir.
1041
1041
1042 if out is specified, it is assumed to be a file-like object that has a
1042 if out is specified, it is assumed to be a file-like object that has a
1043 write() method. stdout and stderr will be redirected to out.'''
1043 write() method. stdout and stderr will be redirected to out.'''
1044 try:
1044 try:
1045 stdout.flush()
1045 stdout.flush()
1046 except Exception:
1046 except Exception:
1047 pass
1047 pass
1048 cmd = quotecommand(cmd)
1048 cmd = quotecommand(cmd)
1049 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1049 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1050 and sys.version_info[1] < 7):
1050 and sys.version_info[1] < 7):
1051 # subprocess kludge to work around issues in half-baked Python
1051 # subprocess kludge to work around issues in half-baked Python
1052 # ports, notably bichued/python:
1052 # ports, notably bichued/python:
1053 if not cwd is None:
1053 if not cwd is None:
1054 os.chdir(cwd)
1054 os.chdir(cwd)
1055 rc = os.system(cmd)
1055 rc = os.system(cmd)
1056 else:
1056 else:
1057 env = shellenviron(environ)
1057 env = shellenviron(environ)
1058 if out is None or _isstdout(out):
1058 if out is None or _isstdout(out):
1059 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1059 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1060 env=env, cwd=cwd)
1060 env=env, cwd=cwd)
1061 else:
1061 else:
1062 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1062 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1063 env=env, cwd=cwd, stdout=subprocess.PIPE,
1063 env=env, cwd=cwd, stdout=subprocess.PIPE,
1064 stderr=subprocess.STDOUT)
1064 stderr=subprocess.STDOUT)
1065 for line in iter(proc.stdout.readline, ''):
1065 for line in iter(proc.stdout.readline, ''):
1066 out.write(line)
1066 out.write(line)
1067 proc.wait()
1067 proc.wait()
1068 rc = proc.returncode
1068 rc = proc.returncode
1069 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1069 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1070 rc = 0
1070 rc = 0
1071 return rc
1071 return rc
1072
1072
1073 def checksignature(func):
1073 def checksignature(func):
1074 '''wrap a function with code to check for calling errors'''
1074 '''wrap a function with code to check for calling errors'''
1075 def check(*args, **kwargs):
1075 def check(*args, **kwargs):
1076 try:
1076 try:
1077 return func(*args, **kwargs)
1077 return func(*args, **kwargs)
1078 except TypeError:
1078 except TypeError:
1079 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1079 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1080 raise error.SignatureError
1080 raise error.SignatureError
1081 raise
1081 raise
1082
1082
1083 return check
1083 return check
1084
1084
1085 # a whilelist of known filesystems where hardlink works reliably
1085 # a whilelist of known filesystems where hardlink works reliably
1086 _hardlinkfswhitelist = set([
1086 _hardlinkfswhitelist = set([
1087 'btrfs',
1087 'btrfs',
1088 'ext2',
1088 'ext2',
1089 'ext3',
1089 'ext3',
1090 'ext4',
1090 'ext4',
1091 'hfs',
1091 'hfs',
1092 'jfs',
1092 'jfs',
1093 'reiserfs',
1093 'reiserfs',
1094 'tmpfs',
1094 'tmpfs',
1095 'ufs',
1095 'ufs',
1096 'xfs',
1096 'xfs',
1097 'zfs',
1097 'zfs',
1098 ])
1098 ])
1099
1099
1100 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1100 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1101 '''copy a file, preserving mode and optionally other stat info like
1101 '''copy a file, preserving mode and optionally other stat info like
1102 atime/mtime
1102 atime/mtime
1103
1103
1104 checkambig argument is used with filestat, and is useful only if
1104 checkambig argument is used with filestat, and is useful only if
1105 destination file is guarded by any lock (e.g. repo.lock or
1105 destination file is guarded by any lock (e.g. repo.lock or
1106 repo.wlock).
1106 repo.wlock).
1107
1107
1108 copystat and checkambig should be exclusive.
1108 copystat and checkambig should be exclusive.
1109 '''
1109 '''
1110 assert not (copystat and checkambig)
1110 assert not (copystat and checkambig)
1111 oldstat = None
1111 oldstat = None
1112 if os.path.lexists(dest):
1112 if os.path.lexists(dest):
1113 if checkambig:
1113 if checkambig:
1114 oldstat = checkambig and filestat(dest)
1114 oldstat = checkambig and filestat(dest)
1115 unlink(dest)
1115 unlink(dest)
1116 if hardlink:
1116 if hardlink:
1117 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1117 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1118 # unless we are confident that dest is on a whitelisted filesystem.
1118 # unless we are confident that dest is on a whitelisted filesystem.
1119 try:
1119 try:
1120 fstype = getfstype(os.path.dirname(dest))
1120 fstype = getfstype(os.path.dirname(dest))
1121 except OSError:
1121 except OSError:
1122 fstype = None
1122 fstype = None
1123 if fstype not in _hardlinkfswhitelist:
1123 if fstype not in _hardlinkfswhitelist:
1124 hardlink = False
1124 hardlink = False
1125 if hardlink:
1125 if hardlink:
1126 try:
1126 try:
1127 oslink(src, dest)
1127 oslink(src, dest)
1128 return
1128 return
1129 except (IOError, OSError):
1129 except (IOError, OSError):
1130 pass # fall back to normal copy
1130 pass # fall back to normal copy
1131 if os.path.islink(src):
1131 if os.path.islink(src):
1132 os.symlink(os.readlink(src), dest)
1132 os.symlink(os.readlink(src), dest)
1133 # copytime is ignored for symlinks, but in general copytime isn't needed
1133 # copytime is ignored for symlinks, but in general copytime isn't needed
1134 # for them anyway
1134 # for them anyway
1135 else:
1135 else:
1136 try:
1136 try:
1137 shutil.copyfile(src, dest)
1137 shutil.copyfile(src, dest)
1138 if copystat:
1138 if copystat:
1139 # copystat also copies mode
1139 # copystat also copies mode
1140 shutil.copystat(src, dest)
1140 shutil.copystat(src, dest)
1141 else:
1141 else:
1142 shutil.copymode(src, dest)
1142 shutil.copymode(src, dest)
1143 if oldstat and oldstat.stat:
1143 if oldstat and oldstat.stat:
1144 newstat = filestat(dest)
1144 newstat = filestat(dest)
1145 if newstat.isambig(oldstat):
1145 if newstat.isambig(oldstat):
1146 # stat of copied file is ambiguous to original one
1146 # stat of copied file is ambiguous to original one
1147 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1147 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1148 os.utime(dest, (advanced, advanced))
1148 os.utime(dest, (advanced, advanced))
1149 except shutil.Error as inst:
1149 except shutil.Error as inst:
1150 raise Abort(str(inst))
1150 raise Abort(str(inst))
1151
1151
1152 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1152 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1153 """Copy a directory tree using hardlinks if possible."""
1153 """Copy a directory tree using hardlinks if possible."""
1154 num = 0
1154 num = 0
1155
1155
1156 gettopic = lambda: hardlink and _('linking') or _('copying')
1156 gettopic = lambda: hardlink and _('linking') or _('copying')
1157
1157
1158 if os.path.isdir(src):
1158 if os.path.isdir(src):
1159 if hardlink is None:
1159 if hardlink is None:
1160 hardlink = (os.stat(src).st_dev ==
1160 hardlink = (os.stat(src).st_dev ==
1161 os.stat(os.path.dirname(dst)).st_dev)
1161 os.stat(os.path.dirname(dst)).st_dev)
1162 topic = gettopic()
1162 topic = gettopic()
1163 os.mkdir(dst)
1163 os.mkdir(dst)
1164 for name, kind in osutil.listdir(src):
1164 for name, kind in osutil.listdir(src):
1165 srcname = os.path.join(src, name)
1165 srcname = os.path.join(src, name)
1166 dstname = os.path.join(dst, name)
1166 dstname = os.path.join(dst, name)
1167 def nprog(t, pos):
1167 def nprog(t, pos):
1168 if pos is not None:
1168 if pos is not None:
1169 return progress(t, pos + num)
1169 return progress(t, pos + num)
1170 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1170 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1171 num += n
1171 num += n
1172 else:
1172 else:
1173 if hardlink is None:
1173 if hardlink is None:
1174 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1174 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1175 os.stat(os.path.dirname(dst)).st_dev)
1175 os.stat(os.path.dirname(dst)).st_dev)
1176 topic = gettopic()
1176 topic = gettopic()
1177
1177
1178 if hardlink:
1178 if hardlink:
1179 try:
1179 try:
1180 oslink(src, dst)
1180 oslink(src, dst)
1181 except (IOError, OSError):
1181 except (IOError, OSError):
1182 hardlink = False
1182 hardlink = False
1183 shutil.copy(src, dst)
1183 shutil.copy(src, dst)
1184 else:
1184 else:
1185 shutil.copy(src, dst)
1185 shutil.copy(src, dst)
1186 num += 1
1186 num += 1
1187 progress(topic, num)
1187 progress(topic, num)
1188 progress(topic, None)
1188 progress(topic, None)
1189
1189
1190 return hardlink, num
1190 return hardlink, num
1191
1191
1192 _winreservednames = '''con prn aux nul
1192 _winreservednames = '''con prn aux nul
1193 com1 com2 com3 com4 com5 com6 com7 com8 com9
1193 com1 com2 com3 com4 com5 com6 com7 com8 com9
1194 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1194 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1195 _winreservedchars = ':*?"<>|'
1195 _winreservedchars = ':*?"<>|'
1196 def checkwinfilename(path):
1196 def checkwinfilename(path):
1197 r'''Check that the base-relative path is a valid filename on Windows.
1197 r'''Check that the base-relative path is a valid filename on Windows.
1198 Returns None if the path is ok, or a UI string describing the problem.
1198 Returns None if the path is ok, or a UI string describing the problem.
1199
1199
1200 >>> checkwinfilename("just/a/normal/path")
1200 >>> checkwinfilename("just/a/normal/path")
1201 >>> checkwinfilename("foo/bar/con.xml")
1201 >>> checkwinfilename("foo/bar/con.xml")
1202 "filename contains 'con', which is reserved on Windows"
1202 "filename contains 'con', which is reserved on Windows"
1203 >>> checkwinfilename("foo/con.xml/bar")
1203 >>> checkwinfilename("foo/con.xml/bar")
1204 "filename contains 'con', which is reserved on Windows"
1204 "filename contains 'con', which is reserved on Windows"
1205 >>> checkwinfilename("foo/bar/xml.con")
1205 >>> checkwinfilename("foo/bar/xml.con")
1206 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1206 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1207 "filename contains 'AUX', which is reserved on Windows"
1207 "filename contains 'AUX', which is reserved on Windows"
1208 >>> checkwinfilename("foo/bar/bla:.txt")
1208 >>> checkwinfilename("foo/bar/bla:.txt")
1209 "filename contains ':', which is reserved on Windows"
1209 "filename contains ':', which is reserved on Windows"
1210 >>> checkwinfilename("foo/bar/b\07la.txt")
1210 >>> checkwinfilename("foo/bar/b\07la.txt")
1211 "filename contains '\\x07', which is invalid on Windows"
1211 "filename contains '\\x07', which is invalid on Windows"
1212 >>> checkwinfilename("foo/bar/bla ")
1212 >>> checkwinfilename("foo/bar/bla ")
1213 "filename ends with ' ', which is not allowed on Windows"
1213 "filename ends with ' ', which is not allowed on Windows"
1214 >>> checkwinfilename("../bar")
1214 >>> checkwinfilename("../bar")
1215 >>> checkwinfilename("foo\\")
1215 >>> checkwinfilename("foo\\")
1216 "filename ends with '\\', which is invalid on Windows"
1216 "filename ends with '\\', which is invalid on Windows"
1217 >>> checkwinfilename("foo\\/bar")
1217 >>> checkwinfilename("foo\\/bar")
1218 "directory name ends with '\\', which is invalid on Windows"
1218 "directory name ends with '\\', which is invalid on Windows"
1219 '''
1219 '''
1220 if path.endswith('\\'):
1220 if path.endswith('\\'):
1221 return _("filename ends with '\\', which is invalid on Windows")
1221 return _("filename ends with '\\', which is invalid on Windows")
1222 if '\\/' in path:
1222 if '\\/' in path:
1223 return _("directory name ends with '\\', which is invalid on Windows")
1223 return _("directory name ends with '\\', which is invalid on Windows")
1224 for n in path.replace('\\', '/').split('/'):
1224 for n in path.replace('\\', '/').split('/'):
1225 if not n:
1225 if not n:
1226 continue
1226 continue
1227 for c in pycompat.bytestr(n):
1227 for c in pycompat.bytestr(n):
1228 if c in _winreservedchars:
1228 if c in _winreservedchars:
1229 return _("filename contains '%s', which is reserved "
1229 return _("filename contains '%s', which is reserved "
1230 "on Windows") % c
1230 "on Windows") % c
1231 if ord(c) <= 31:
1231 if ord(c) <= 31:
1232 return _("filename contains %r, which is invalid "
1232 return _("filename contains %r, which is invalid "
1233 "on Windows") % c
1233 "on Windows") % c
1234 base = n.split('.')[0]
1234 base = n.split('.')[0]
1235 if base and base.lower() in _winreservednames:
1235 if base and base.lower() in _winreservednames:
1236 return _("filename contains '%s', which is reserved "
1236 return _("filename contains '%s', which is reserved "
1237 "on Windows") % base
1237 "on Windows") % base
1238 t = n[-1]
1238 t = n[-1]
1239 if t in '. ' and n not in '..':
1239 if t in '. ' and n not in '..':
1240 return _("filename ends with '%s', which is not allowed "
1240 return _("filename ends with '%s', which is not allowed "
1241 "on Windows") % t
1241 "on Windows") % t
1242
1242
1243 if pycompat.osname == 'nt':
1243 if pycompat.osname == 'nt':
1244 checkosfilename = checkwinfilename
1244 checkosfilename = checkwinfilename
1245 timer = time.clock
1245 timer = time.clock
1246 else:
1246 else:
1247 checkosfilename = platform.checkosfilename
1247 checkosfilename = platform.checkosfilename
1248 timer = time.time
1248 timer = time.time
1249
1249
1250 if safehasattr(time, "perf_counter"):
1250 if safehasattr(time, "perf_counter"):
1251 timer = time.perf_counter
1251 timer = time.perf_counter
1252
1252
1253 def makelock(info, pathname):
1253 def makelock(info, pathname):
1254 try:
1254 try:
1255 return os.symlink(info, pathname)
1255 return os.symlink(info, pathname)
1256 except OSError as why:
1256 except OSError as why:
1257 if why.errno == errno.EEXIST:
1257 if why.errno == errno.EEXIST:
1258 raise
1258 raise
1259 except AttributeError: # no symlink in os
1259 except AttributeError: # no symlink in os
1260 pass
1260 pass
1261
1261
1262 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1262 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1263 os.write(ld, info)
1263 os.write(ld, info)
1264 os.close(ld)
1264 os.close(ld)
1265
1265
1266 def readlock(pathname):
1266 def readlock(pathname):
1267 try:
1267 try:
1268 return os.readlink(pathname)
1268 return os.readlink(pathname)
1269 except OSError as why:
1269 except OSError as why:
1270 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1270 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1271 raise
1271 raise
1272 except AttributeError: # no symlink in os
1272 except AttributeError: # no symlink in os
1273 pass
1273 pass
1274 fp = posixfile(pathname)
1274 fp = posixfile(pathname)
1275 r = fp.read()
1275 r = fp.read()
1276 fp.close()
1276 fp.close()
1277 return r
1277 return r
1278
1278
1279 def fstat(fp):
1279 def fstat(fp):
1280 '''stat file object that may not have fileno method.'''
1280 '''stat file object that may not have fileno method.'''
1281 try:
1281 try:
1282 return os.fstat(fp.fileno())
1282 return os.fstat(fp.fileno())
1283 except AttributeError:
1283 except AttributeError:
1284 return os.stat(fp.name)
1284 return os.stat(fp.name)
1285
1285
1286 # File system features
1286 # File system features
1287
1287
1288 def fscasesensitive(path):
1288 def fscasesensitive(path):
1289 """
1289 """
1290 Return true if the given path is on a case-sensitive filesystem
1290 Return true if the given path is on a case-sensitive filesystem
1291
1291
1292 Requires a path (like /foo/.hg) ending with a foldable final
1292 Requires a path (like /foo/.hg) ending with a foldable final
1293 directory component.
1293 directory component.
1294 """
1294 """
1295 s1 = os.lstat(path)
1295 s1 = os.lstat(path)
1296 d, b = os.path.split(path)
1296 d, b = os.path.split(path)
1297 b2 = b.upper()
1297 b2 = b.upper()
1298 if b == b2:
1298 if b == b2:
1299 b2 = b.lower()
1299 b2 = b.lower()
1300 if b == b2:
1300 if b == b2:
1301 return True # no evidence against case sensitivity
1301 return True # no evidence against case sensitivity
1302 p2 = os.path.join(d, b2)
1302 p2 = os.path.join(d, b2)
1303 try:
1303 try:
1304 s2 = os.lstat(p2)
1304 s2 = os.lstat(p2)
1305 if s2 == s1:
1305 if s2 == s1:
1306 return False
1306 return False
1307 return True
1307 return True
1308 except OSError:
1308 except OSError:
1309 return True
1309 return True
1310
1310
1311 try:
1311 try:
1312 import re2
1312 import re2
1313 _re2 = None
1313 _re2 = None
1314 except ImportError:
1314 except ImportError:
1315 _re2 = False
1315 _re2 = False
1316
1316
1317 class _re(object):
1317 class _re(object):
1318 def _checkre2(self):
1318 def _checkre2(self):
1319 global _re2
1319 global _re2
1320 try:
1320 try:
1321 # check if match works, see issue3964
1321 # check if match works, see issue3964
1322 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1322 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1323 except ImportError:
1323 except ImportError:
1324 _re2 = False
1324 _re2 = False
1325
1325
1326 def compile(self, pat, flags=0):
1326 def compile(self, pat, flags=0):
1327 '''Compile a regular expression, using re2 if possible
1327 '''Compile a regular expression, using re2 if possible
1328
1328
1329 For best performance, use only re2-compatible regexp features. The
1329 For best performance, use only re2-compatible regexp features. The
1330 only flags from the re module that are re2-compatible are
1330 only flags from the re module that are re2-compatible are
1331 IGNORECASE and MULTILINE.'''
1331 IGNORECASE and MULTILINE.'''
1332 if _re2 is None:
1332 if _re2 is None:
1333 self._checkre2()
1333 self._checkre2()
1334 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1334 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1335 if flags & remod.IGNORECASE:
1335 if flags & remod.IGNORECASE:
1336 pat = '(?i)' + pat
1336 pat = '(?i)' + pat
1337 if flags & remod.MULTILINE:
1337 if flags & remod.MULTILINE:
1338 pat = '(?m)' + pat
1338 pat = '(?m)' + pat
1339 try:
1339 try:
1340 return re2.compile(pat)
1340 return re2.compile(pat)
1341 except re2.error:
1341 except re2.error:
1342 pass
1342 pass
1343 return remod.compile(pat, flags)
1343 return remod.compile(pat, flags)
1344
1344
1345 @propertycache
1345 @propertycache
1346 def escape(self):
1346 def escape(self):
1347 '''Return the version of escape corresponding to self.compile.
1347 '''Return the version of escape corresponding to self.compile.
1348
1348
1349 This is imperfect because whether re2 or re is used for a particular
1349 This is imperfect because whether re2 or re is used for a particular
1350 function depends on the flags, etc, but it's the best we can do.
1350 function depends on the flags, etc, but it's the best we can do.
1351 '''
1351 '''
1352 global _re2
1352 global _re2
1353 if _re2 is None:
1353 if _re2 is None:
1354 self._checkre2()
1354 self._checkre2()
1355 if _re2:
1355 if _re2:
1356 return re2.escape
1356 return re2.escape
1357 else:
1357 else:
1358 return remod.escape
1358 return remod.escape
1359
1359
1360 re = _re()
1360 re = _re()
1361
1361
1362 _fspathcache = {}
1362 _fspathcache = {}
1363 def fspath(name, root):
1363 def fspath(name, root):
1364 '''Get name in the case stored in the filesystem
1364 '''Get name in the case stored in the filesystem
1365
1365
1366 The name should be relative to root, and be normcase-ed for efficiency.
1366 The name should be relative to root, and be normcase-ed for efficiency.
1367
1367
1368 Note that this function is unnecessary, and should not be
1368 Note that this function is unnecessary, and should not be
1369 called, for case-sensitive filesystems (simply because it's expensive).
1369 called, for case-sensitive filesystems (simply because it's expensive).
1370
1370
1371 The root should be normcase-ed, too.
1371 The root should be normcase-ed, too.
1372 '''
1372 '''
1373 def _makefspathcacheentry(dir):
1373 def _makefspathcacheentry(dir):
1374 return dict((normcase(n), n) for n in os.listdir(dir))
1374 return dict((normcase(n), n) for n in os.listdir(dir))
1375
1375
1376 seps = pycompat.ossep
1376 seps = pycompat.ossep
1377 if pycompat.osaltsep:
1377 if pycompat.osaltsep:
1378 seps = seps + pycompat.osaltsep
1378 seps = seps + pycompat.osaltsep
1379 # Protect backslashes. This gets silly very quickly.
1379 # Protect backslashes. This gets silly very quickly.
1380 seps.replace('\\','\\\\')
1380 seps.replace('\\','\\\\')
1381 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1381 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1382 dir = os.path.normpath(root)
1382 dir = os.path.normpath(root)
1383 result = []
1383 result = []
1384 for part, sep in pattern.findall(name):
1384 for part, sep in pattern.findall(name):
1385 if sep:
1385 if sep:
1386 result.append(sep)
1386 result.append(sep)
1387 continue
1387 continue
1388
1388
1389 if dir not in _fspathcache:
1389 if dir not in _fspathcache:
1390 _fspathcache[dir] = _makefspathcacheentry(dir)
1390 _fspathcache[dir] = _makefspathcacheentry(dir)
1391 contents = _fspathcache[dir]
1391 contents = _fspathcache[dir]
1392
1392
1393 found = contents.get(part)
1393 found = contents.get(part)
1394 if not found:
1394 if not found:
1395 # retry "once per directory" per "dirstate.walk" which
1395 # retry "once per directory" per "dirstate.walk" which
1396 # may take place for each patches of "hg qpush", for example
1396 # may take place for each patches of "hg qpush", for example
1397 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1397 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1398 found = contents.get(part)
1398 found = contents.get(part)
1399
1399
1400 result.append(found or part)
1400 result.append(found or part)
1401 dir = os.path.join(dir, part)
1401 dir = os.path.join(dir, part)
1402
1402
1403 return ''.join(result)
1403 return ''.join(result)
1404
1404
1405 def getfstype(dirpath):
1405 def getfstype(dirpath):
1406 '''Get the filesystem type name from a directory (best-effort)
1406 '''Get the filesystem type name from a directory (best-effort)
1407
1407
1408 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1408 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1409 '''
1409 '''
1410 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1410 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1411
1411
1412 def checknlink(testfile):
1412 def checknlink(testfile):
1413 '''check whether hardlink count reporting works properly'''
1413 '''check whether hardlink count reporting works properly'''
1414
1414
1415 # testfile may be open, so we need a separate file for checking to
1415 # testfile may be open, so we need a separate file for checking to
1416 # work around issue2543 (or testfile may get lost on Samba shares)
1416 # work around issue2543 (or testfile may get lost on Samba shares)
1417 f1 = testfile + ".hgtmp1"
1417 f1 = testfile + ".hgtmp1"
1418 if os.path.lexists(f1):
1418 if os.path.lexists(f1):
1419 return False
1419 return False
1420 try:
1420 try:
1421 posixfile(f1, 'w').close()
1421 posixfile(f1, 'w').close()
1422 except IOError:
1422 except IOError:
1423 try:
1423 try:
1424 os.unlink(f1)
1424 os.unlink(f1)
1425 except OSError:
1425 except OSError:
1426 pass
1426 pass
1427 return False
1427 return False
1428
1428
1429 f2 = testfile + ".hgtmp2"
1429 f2 = testfile + ".hgtmp2"
1430 fd = None
1430 fd = None
1431 try:
1431 try:
1432 oslink(f1, f2)
1432 oslink(f1, f2)
1433 # nlinks() may behave differently for files on Windows shares if
1433 # nlinks() may behave differently for files on Windows shares if
1434 # the file is open.
1434 # the file is open.
1435 fd = posixfile(f2)
1435 fd = posixfile(f2)
1436 return nlinks(f2) > 1
1436 return nlinks(f2) > 1
1437 except OSError:
1437 except OSError:
1438 return False
1438 return False
1439 finally:
1439 finally:
1440 if fd is not None:
1440 if fd is not None:
1441 fd.close()
1441 fd.close()
1442 for f in (f1, f2):
1442 for f in (f1, f2):
1443 try:
1443 try:
1444 os.unlink(f)
1444 os.unlink(f)
1445 except OSError:
1445 except OSError:
1446 pass
1446 pass
1447
1447
1448 def endswithsep(path):
1448 def endswithsep(path):
1449 '''Check path ends with os.sep or os.altsep.'''
1449 '''Check path ends with os.sep or os.altsep.'''
1450 return (path.endswith(pycompat.ossep)
1450 return (path.endswith(pycompat.ossep)
1451 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1451 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1452
1452
1453 def splitpath(path):
1453 def splitpath(path):
1454 '''Split path by os.sep.
1454 '''Split path by os.sep.
1455 Note that this function does not use os.altsep because this is
1455 Note that this function does not use os.altsep because this is
1456 an alternative of simple "xxx.split(os.sep)".
1456 an alternative of simple "xxx.split(os.sep)".
1457 It is recommended to use os.path.normpath() before using this
1457 It is recommended to use os.path.normpath() before using this
1458 function if need.'''
1458 function if need.'''
1459 return path.split(pycompat.ossep)
1459 return path.split(pycompat.ossep)
1460
1460
1461 def gui():
1461 def gui():
1462 '''Are we running in a GUI?'''
1462 '''Are we running in a GUI?'''
1463 if pycompat.sysplatform == 'darwin':
1463 if pycompat.sysplatform == 'darwin':
1464 if 'SSH_CONNECTION' in encoding.environ:
1464 if 'SSH_CONNECTION' in encoding.environ:
1465 # handle SSH access to a box where the user is logged in
1465 # handle SSH access to a box where the user is logged in
1466 return False
1466 return False
1467 elif getattr(osutil, 'isgui', None):
1467 elif getattr(osutil, 'isgui', None):
1468 # check if a CoreGraphics session is available
1468 # check if a CoreGraphics session is available
1469 return osutil.isgui()
1469 return osutil.isgui()
1470 else:
1470 else:
1471 # pure build; use a safe default
1471 # pure build; use a safe default
1472 return True
1472 return True
1473 else:
1473 else:
1474 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1474 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1475
1475
1476 def mktempcopy(name, emptyok=False, createmode=None):
1476 def mktempcopy(name, emptyok=False, createmode=None):
1477 """Create a temporary file with the same contents from name
1477 """Create a temporary file with the same contents from name
1478
1478
1479 The permission bits are copied from the original file.
1479 The permission bits are copied from the original file.
1480
1480
1481 If the temporary file is going to be truncated immediately, you
1481 If the temporary file is going to be truncated immediately, you
1482 can use emptyok=True as an optimization.
1482 can use emptyok=True as an optimization.
1483
1483
1484 Returns the name of the temporary file.
1484 Returns the name of the temporary file.
1485 """
1485 """
1486 d, fn = os.path.split(name)
1486 d, fn = os.path.split(name)
1487 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1487 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1488 os.close(fd)
1488 os.close(fd)
1489 # Temporary files are created with mode 0600, which is usually not
1489 # Temporary files are created with mode 0600, which is usually not
1490 # what we want. If the original file already exists, just copy
1490 # what we want. If the original file already exists, just copy
1491 # its mode. Otherwise, manually obey umask.
1491 # its mode. Otherwise, manually obey umask.
1492 copymode(name, temp, createmode)
1492 copymode(name, temp, createmode)
1493 if emptyok:
1493 if emptyok:
1494 return temp
1494 return temp
1495 try:
1495 try:
1496 try:
1496 try:
1497 ifp = posixfile(name, "rb")
1497 ifp = posixfile(name, "rb")
1498 except IOError as inst:
1498 except IOError as inst:
1499 if inst.errno == errno.ENOENT:
1499 if inst.errno == errno.ENOENT:
1500 return temp
1500 return temp
1501 if not getattr(inst, 'filename', None):
1501 if not getattr(inst, 'filename', None):
1502 inst.filename = name
1502 inst.filename = name
1503 raise
1503 raise
1504 ofp = posixfile(temp, "wb")
1504 ofp = posixfile(temp, "wb")
1505 for chunk in filechunkiter(ifp):
1505 for chunk in filechunkiter(ifp):
1506 ofp.write(chunk)
1506 ofp.write(chunk)
1507 ifp.close()
1507 ifp.close()
1508 ofp.close()
1508 ofp.close()
1509 except: # re-raises
1509 except: # re-raises
1510 try: os.unlink(temp)
1510 try: os.unlink(temp)
1511 except OSError: pass
1511 except OSError: pass
1512 raise
1512 raise
1513 return temp
1513 return temp
1514
1514
1515 class filestat(object):
1515 class filestat(object):
1516 """help to exactly detect change of a file
1516 """help to exactly detect change of a file
1517
1517
1518 'stat' attribute is result of 'os.stat()' if specified 'path'
1518 'stat' attribute is result of 'os.stat()' if specified 'path'
1519 exists. Otherwise, it is None. This can avoid preparative
1519 exists. Otherwise, it is None. This can avoid preparative
1520 'exists()' examination on client side of this class.
1520 'exists()' examination on client side of this class.
1521 """
1521 """
1522 def __init__(self, path):
1522 def __init__(self, path):
1523 try:
1523 try:
1524 self.stat = os.stat(path)
1524 self.stat = os.stat(path)
1525 except OSError as err:
1525 except OSError as err:
1526 if err.errno != errno.ENOENT:
1526 if err.errno != errno.ENOENT:
1527 raise
1527 raise
1528 self.stat = None
1528 self.stat = None
1529
1529
1530 __hash__ = object.__hash__
1530 __hash__ = object.__hash__
1531
1531
1532 def __eq__(self, old):
1532 def __eq__(self, old):
1533 try:
1533 try:
1534 # if ambiguity between stat of new and old file is
1534 # if ambiguity between stat of new and old file is
1535 # avoided, comparison of size, ctime and mtime is enough
1535 # avoided, comparison of size, ctime and mtime is enough
1536 # to exactly detect change of a file regardless of platform
1536 # to exactly detect change of a file regardless of platform
1537 return (self.stat.st_size == old.stat.st_size and
1537 return (self.stat.st_size == old.stat.st_size and
1538 self.stat.st_ctime == old.stat.st_ctime and
1538 self.stat.st_ctime == old.stat.st_ctime and
1539 self.stat.st_mtime == old.stat.st_mtime)
1539 self.stat.st_mtime == old.stat.st_mtime)
1540 except AttributeError:
1540 except AttributeError:
1541 return False
1541 return False
1542
1542
1543 def isambig(self, old):
1543 def isambig(self, old):
1544 """Examine whether new (= self) stat is ambiguous against old one
1544 """Examine whether new (= self) stat is ambiguous against old one
1545
1545
1546 "S[N]" below means stat of a file at N-th change:
1546 "S[N]" below means stat of a file at N-th change:
1547
1547
1548 - S[n-1].ctime < S[n].ctime: can detect change of a file
1548 - S[n-1].ctime < S[n].ctime: can detect change of a file
1549 - S[n-1].ctime == S[n].ctime
1549 - S[n-1].ctime == S[n].ctime
1550 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1550 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1551 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1551 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1552 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1552 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1553 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1553 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1554
1554
1555 Case (*2) above means that a file was changed twice or more at
1555 Case (*2) above means that a file was changed twice or more at
1556 same time in sec (= S[n-1].ctime), and comparison of timestamp
1556 same time in sec (= S[n-1].ctime), and comparison of timestamp
1557 is ambiguous.
1557 is ambiguous.
1558
1558
1559 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1559 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1560 timestamp is ambiguous".
1560 timestamp is ambiguous".
1561
1561
1562 But advancing mtime only in case (*2) doesn't work as
1562 But advancing mtime only in case (*2) doesn't work as
1563 expected, because naturally advanced S[n].mtime in case (*1)
1563 expected, because naturally advanced S[n].mtime in case (*1)
1564 might be equal to manually advanced S[n-1 or earlier].mtime.
1564 might be equal to manually advanced S[n-1 or earlier].mtime.
1565
1565
1566 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1566 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1567 treated as ambiguous regardless of mtime, to avoid overlooking
1567 treated as ambiguous regardless of mtime, to avoid overlooking
1568 by confliction between such mtime.
1568 by confliction between such mtime.
1569
1569
1570 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1570 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1571 S[n].mtime", even if size of a file isn't changed.
1571 S[n].mtime", even if size of a file isn't changed.
1572 """
1572 """
1573 try:
1573 try:
1574 return (self.stat.st_ctime == old.stat.st_ctime)
1574 return (self.stat.st_ctime == old.stat.st_ctime)
1575 except AttributeError:
1575 except AttributeError:
1576 return False
1576 return False
1577
1577
1578 def avoidambig(self, path, old):
1578 def avoidambig(self, path, old):
1579 """Change file stat of specified path to avoid ambiguity
1579 """Change file stat of specified path to avoid ambiguity
1580
1580
1581 'old' should be previous filestat of 'path'.
1581 'old' should be previous filestat of 'path'.
1582
1582
1583 This skips avoiding ambiguity, if a process doesn't have
1583 This skips avoiding ambiguity, if a process doesn't have
1584 appropriate privileges for 'path'.
1584 appropriate privileges for 'path'.
1585 """
1585 """
1586 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1586 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1587 try:
1587 try:
1588 os.utime(path, (advanced, advanced))
1588 os.utime(path, (advanced, advanced))
1589 except OSError as inst:
1589 except OSError as inst:
1590 if inst.errno == errno.EPERM:
1590 if inst.errno == errno.EPERM:
1591 # utime() on the file created by another user causes EPERM,
1591 # utime() on the file created by another user causes EPERM,
1592 # if a process doesn't have appropriate privileges
1592 # if a process doesn't have appropriate privileges
1593 return
1593 return
1594 raise
1594 raise
1595
1595
1596 def __ne__(self, other):
1596 def __ne__(self, other):
1597 return not self == other
1597 return not self == other
1598
1598
1599 class atomictempfile(object):
1599 class atomictempfile(object):
1600 '''writable file object that atomically updates a file
1600 '''writable file object that atomically updates a file
1601
1601
1602 All writes will go to a temporary copy of the original file. Call
1602 All writes will go to a temporary copy of the original file. Call
1603 close() when you are done writing, and atomictempfile will rename
1603 close() when you are done writing, and atomictempfile will rename
1604 the temporary copy to the original name, making the changes
1604 the temporary copy to the original name, making the changes
1605 visible. If the object is destroyed without being closed, all your
1605 visible. If the object is destroyed without being closed, all your
1606 writes are discarded.
1606 writes are discarded.
1607
1607
1608 checkambig argument of constructor is used with filestat, and is
1608 checkambig argument of constructor is used with filestat, and is
1609 useful only if target file is guarded by any lock (e.g. repo.lock
1609 useful only if target file is guarded by any lock (e.g. repo.lock
1610 or repo.wlock).
1610 or repo.wlock).
1611 '''
1611 '''
1612 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1612 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1613 self.__name = name # permanent name
1613 self.__name = name # permanent name
1614 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1614 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1615 createmode=createmode)
1615 createmode=createmode)
1616 self._fp = posixfile(self._tempname, mode)
1616 self._fp = posixfile(self._tempname, mode)
1617 self._checkambig = checkambig
1617 self._checkambig = checkambig
1618
1618
1619 # delegated methods
1619 # delegated methods
1620 self.read = self._fp.read
1620 self.read = self._fp.read
1621 self.write = self._fp.write
1621 self.write = self._fp.write
1622 self.seek = self._fp.seek
1622 self.seek = self._fp.seek
1623 self.tell = self._fp.tell
1623 self.tell = self._fp.tell
1624 self.fileno = self._fp.fileno
1624 self.fileno = self._fp.fileno
1625
1625
1626 def close(self):
1626 def close(self):
1627 if not self._fp.closed:
1627 if not self._fp.closed:
1628 self._fp.close()
1628 self._fp.close()
1629 filename = localpath(self.__name)
1629 filename = localpath(self.__name)
1630 oldstat = self._checkambig and filestat(filename)
1630 oldstat = self._checkambig and filestat(filename)
1631 if oldstat and oldstat.stat:
1631 if oldstat and oldstat.stat:
1632 rename(self._tempname, filename)
1632 rename(self._tempname, filename)
1633 newstat = filestat(filename)
1633 newstat = filestat(filename)
1634 if newstat.isambig(oldstat):
1634 if newstat.isambig(oldstat):
1635 # stat of changed file is ambiguous to original one
1635 # stat of changed file is ambiguous to original one
1636 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1636 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1637 os.utime(filename, (advanced, advanced))
1637 os.utime(filename, (advanced, advanced))
1638 else:
1638 else:
1639 rename(self._tempname, filename)
1639 rename(self._tempname, filename)
1640
1640
1641 def discard(self):
1641 def discard(self):
1642 if not self._fp.closed:
1642 if not self._fp.closed:
1643 try:
1643 try:
1644 os.unlink(self._tempname)
1644 os.unlink(self._tempname)
1645 except OSError:
1645 except OSError:
1646 pass
1646 pass
1647 self._fp.close()
1647 self._fp.close()
1648
1648
1649 def __del__(self):
1649 def __del__(self):
1650 if safehasattr(self, '_fp'): # constructor actually did something
1650 if safehasattr(self, '_fp'): # constructor actually did something
1651 self.discard()
1651 self.discard()
1652
1652
1653 def __enter__(self):
1653 def __enter__(self):
1654 return self
1654 return self
1655
1655
1656 def __exit__(self, exctype, excvalue, traceback):
1656 def __exit__(self, exctype, excvalue, traceback):
1657 if exctype is not None:
1657 if exctype is not None:
1658 self.discard()
1658 self.discard()
1659 else:
1659 else:
1660 self.close()
1660 self.close()
1661
1661
1662 def unlinkpath(f, ignoremissing=False):
1662 def unlinkpath(f, ignoremissing=False):
1663 """unlink and remove the directory if it is empty"""
1663 """unlink and remove the directory if it is empty"""
1664 if ignoremissing:
1664 if ignoremissing:
1665 tryunlink(f)
1665 tryunlink(f)
1666 else:
1666 else:
1667 unlink(f)
1667 unlink(f)
1668 # try removing directories that might now be empty
1668 # try removing directories that might now be empty
1669 try:
1669 try:
1670 removedirs(os.path.dirname(f))
1670 removedirs(os.path.dirname(f))
1671 except OSError:
1671 except OSError:
1672 pass
1672 pass
1673
1673
1674 def tryunlink(f):
1674 def tryunlink(f):
1675 """Attempt to remove a file, ignoring ENOENT errors."""
1675 """Attempt to remove a file, ignoring ENOENT errors."""
1676 try:
1676 try:
1677 unlink(f)
1677 unlink(f)
1678 except OSError as e:
1678 except OSError as e:
1679 if e.errno != errno.ENOENT:
1679 if e.errno != errno.ENOENT:
1680 raise
1680 raise
1681
1681
1682 def makedirs(name, mode=None, notindexed=False):
1682 def makedirs(name, mode=None, notindexed=False):
1683 """recursive directory creation with parent mode inheritance
1683 """recursive directory creation with parent mode inheritance
1684
1684
1685 Newly created directories are marked as "not to be indexed by
1685 Newly created directories are marked as "not to be indexed by
1686 the content indexing service", if ``notindexed`` is specified
1686 the content indexing service", if ``notindexed`` is specified
1687 for "write" mode access.
1687 for "write" mode access.
1688 """
1688 """
1689 try:
1689 try:
1690 makedir(name, notindexed)
1690 makedir(name, notindexed)
1691 except OSError as err:
1691 except OSError as err:
1692 if err.errno == errno.EEXIST:
1692 if err.errno == errno.EEXIST:
1693 return
1693 return
1694 if err.errno != errno.ENOENT or not name:
1694 if err.errno != errno.ENOENT or not name:
1695 raise
1695 raise
1696 parent = os.path.dirname(os.path.abspath(name))
1696 parent = os.path.dirname(os.path.abspath(name))
1697 if parent == name:
1697 if parent == name:
1698 raise
1698 raise
1699 makedirs(parent, mode, notindexed)
1699 makedirs(parent, mode, notindexed)
1700 try:
1700 try:
1701 makedir(name, notindexed)
1701 makedir(name, notindexed)
1702 except OSError as err:
1702 except OSError as err:
1703 # Catch EEXIST to handle races
1703 # Catch EEXIST to handle races
1704 if err.errno == errno.EEXIST:
1704 if err.errno == errno.EEXIST:
1705 return
1705 return
1706 raise
1706 raise
1707 if mode is not None:
1707 if mode is not None:
1708 os.chmod(name, mode)
1708 os.chmod(name, mode)
1709
1709
1710 def readfile(path):
1710 def readfile(path):
1711 with open(path, 'rb') as fp:
1711 with open(path, 'rb') as fp:
1712 return fp.read()
1712 return fp.read()
1713
1713
1714 def writefile(path, text):
1714 def writefile(path, text):
1715 with open(path, 'wb') as fp:
1715 with open(path, 'wb') as fp:
1716 fp.write(text)
1716 fp.write(text)
1717
1717
1718 def appendfile(path, text):
1718 def appendfile(path, text):
1719 with open(path, 'ab') as fp:
1719 with open(path, 'ab') as fp:
1720 fp.write(text)
1720 fp.write(text)
1721
1721
1722 class chunkbuffer(object):
1722 class chunkbuffer(object):
1723 """Allow arbitrary sized chunks of data to be efficiently read from an
1723 """Allow arbitrary sized chunks of data to be efficiently read from an
1724 iterator over chunks of arbitrary size."""
1724 iterator over chunks of arbitrary size."""
1725
1725
1726 def __init__(self, in_iter):
1726 def __init__(self, in_iter):
1727 """in_iter is the iterator that's iterating over the input chunks."""
1727 """in_iter is the iterator that's iterating over the input chunks."""
1728 def splitbig(chunks):
1728 def splitbig(chunks):
1729 for chunk in chunks:
1729 for chunk in chunks:
1730 if len(chunk) > 2**20:
1730 if len(chunk) > 2**20:
1731 pos = 0
1731 pos = 0
1732 while pos < len(chunk):
1732 while pos < len(chunk):
1733 end = pos + 2 ** 18
1733 end = pos + 2 ** 18
1734 yield chunk[pos:end]
1734 yield chunk[pos:end]
1735 pos = end
1735 pos = end
1736 else:
1736 else:
1737 yield chunk
1737 yield chunk
1738 self.iter = splitbig(in_iter)
1738 self.iter = splitbig(in_iter)
1739 self._queue = collections.deque()
1739 self._queue = collections.deque()
1740 self._chunkoffset = 0
1740 self._chunkoffset = 0
1741
1741
1742 def read(self, l=None):
1742 def read(self, l=None):
1743 """Read L bytes of data from the iterator of chunks of data.
1743 """Read L bytes of data from the iterator of chunks of data.
1744 Returns less than L bytes if the iterator runs dry.
1744 Returns less than L bytes if the iterator runs dry.
1745
1745
1746 If size parameter is omitted, read everything"""
1746 If size parameter is omitted, read everything"""
1747 if l is None:
1747 if l is None:
1748 return ''.join(self.iter)
1748 return ''.join(self.iter)
1749
1749
1750 left = l
1750 left = l
1751 buf = []
1751 buf = []
1752 queue = self._queue
1752 queue = self._queue
1753 while left > 0:
1753 while left > 0:
1754 # refill the queue
1754 # refill the queue
1755 if not queue:
1755 if not queue:
1756 target = 2**18
1756 target = 2**18
1757 for chunk in self.iter:
1757 for chunk in self.iter:
1758 queue.append(chunk)
1758 queue.append(chunk)
1759 target -= len(chunk)
1759 target -= len(chunk)
1760 if target <= 0:
1760 if target <= 0:
1761 break
1761 break
1762 if not queue:
1762 if not queue:
1763 break
1763 break
1764
1764
1765 # The easy way to do this would be to queue.popleft(), modify the
1765 # The easy way to do this would be to queue.popleft(), modify the
1766 # chunk (if necessary), then queue.appendleft(). However, for cases
1766 # chunk (if necessary), then queue.appendleft(). However, for cases
1767 # where we read partial chunk content, this incurs 2 dequeue
1767 # where we read partial chunk content, this incurs 2 dequeue
1768 # mutations and creates a new str for the remaining chunk in the
1768 # mutations and creates a new str for the remaining chunk in the
1769 # queue. Our code below avoids this overhead.
1769 # queue. Our code below avoids this overhead.
1770
1770
1771 chunk = queue[0]
1771 chunk = queue[0]
1772 chunkl = len(chunk)
1772 chunkl = len(chunk)
1773 offset = self._chunkoffset
1773 offset = self._chunkoffset
1774
1774
1775 # Use full chunk.
1775 # Use full chunk.
1776 if offset == 0 and left >= chunkl:
1776 if offset == 0 and left >= chunkl:
1777 left -= chunkl
1777 left -= chunkl
1778 queue.popleft()
1778 queue.popleft()
1779 buf.append(chunk)
1779 buf.append(chunk)
1780 # self._chunkoffset remains at 0.
1780 # self._chunkoffset remains at 0.
1781 continue
1781 continue
1782
1782
1783 chunkremaining = chunkl - offset
1783 chunkremaining = chunkl - offset
1784
1784
1785 # Use all of unconsumed part of chunk.
1785 # Use all of unconsumed part of chunk.
1786 if left >= chunkremaining:
1786 if left >= chunkremaining:
1787 left -= chunkremaining
1787 left -= chunkremaining
1788 queue.popleft()
1788 queue.popleft()
1789 # offset == 0 is enabled by block above, so this won't merely
1789 # offset == 0 is enabled by block above, so this won't merely
1790 # copy via ``chunk[0:]``.
1790 # copy via ``chunk[0:]``.
1791 buf.append(chunk[offset:])
1791 buf.append(chunk[offset:])
1792 self._chunkoffset = 0
1792 self._chunkoffset = 0
1793
1793
1794 # Partial chunk needed.
1794 # Partial chunk needed.
1795 else:
1795 else:
1796 buf.append(chunk[offset:offset + left])
1796 buf.append(chunk[offset:offset + left])
1797 self._chunkoffset += left
1797 self._chunkoffset += left
1798 left -= chunkremaining
1798 left -= chunkremaining
1799
1799
1800 return ''.join(buf)
1800 return ''.join(buf)
1801
1801
1802 def filechunkiter(f, size=131072, limit=None):
1802 def filechunkiter(f, size=131072, limit=None):
1803 """Create a generator that produces the data in the file size
1803 """Create a generator that produces the data in the file size
1804 (default 131072) bytes at a time, up to optional limit (default is
1804 (default 131072) bytes at a time, up to optional limit (default is
1805 to read all data). Chunks may be less than size bytes if the
1805 to read all data). Chunks may be less than size bytes if the
1806 chunk is the last chunk in the file, or the file is a socket or
1806 chunk is the last chunk in the file, or the file is a socket or
1807 some other type of file that sometimes reads less data than is
1807 some other type of file that sometimes reads less data than is
1808 requested."""
1808 requested."""
1809 assert size >= 0
1809 assert size >= 0
1810 assert limit is None or limit >= 0
1810 assert limit is None or limit >= 0
1811 while True:
1811 while True:
1812 if limit is None:
1812 if limit is None:
1813 nbytes = size
1813 nbytes = size
1814 else:
1814 else:
1815 nbytes = min(limit, size)
1815 nbytes = min(limit, size)
1816 s = nbytes and f.read(nbytes)
1816 s = nbytes and f.read(nbytes)
1817 if not s:
1817 if not s:
1818 break
1818 break
1819 if limit:
1819 if limit:
1820 limit -= len(s)
1820 limit -= len(s)
1821 yield s
1821 yield s
1822
1822
1823 def makedate(timestamp=None):
1823 def makedate(timestamp=None):
1824 '''Return a unix timestamp (or the current time) as a (unixtime,
1824 '''Return a unix timestamp (or the current time) as a (unixtime,
1825 offset) tuple based off the local timezone.'''
1825 offset) tuple based off the local timezone.'''
1826 if timestamp is None:
1826 if timestamp is None:
1827 timestamp = time.time()
1827 timestamp = time.time()
1828 if timestamp < 0:
1828 if timestamp < 0:
1829 hint = _("check your clock")
1829 hint = _("check your clock")
1830 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1830 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1831 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1831 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1832 datetime.datetime.fromtimestamp(timestamp))
1832 datetime.datetime.fromtimestamp(timestamp))
1833 tz = delta.days * 86400 + delta.seconds
1833 tz = delta.days * 86400 + delta.seconds
1834 return timestamp, tz
1834 return timestamp, tz
1835
1835
1836 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1836 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1837 """represent a (unixtime, offset) tuple as a localized time.
1837 """represent a (unixtime, offset) tuple as a localized time.
1838 unixtime is seconds since the epoch, and offset is the time zone's
1838 unixtime is seconds since the epoch, and offset is the time zone's
1839 number of seconds away from UTC.
1839 number of seconds away from UTC.
1840
1840
1841 >>> datestr((0, 0))
1841 >>> datestr((0, 0))
1842 'Thu Jan 01 00:00:00 1970 +0000'
1842 'Thu Jan 01 00:00:00 1970 +0000'
1843 >>> datestr((42, 0))
1843 >>> datestr((42, 0))
1844 'Thu Jan 01 00:00:42 1970 +0000'
1844 'Thu Jan 01 00:00:42 1970 +0000'
1845 >>> datestr((-42, 0))
1845 >>> datestr((-42, 0))
1846 'Wed Dec 31 23:59:18 1969 +0000'
1846 'Wed Dec 31 23:59:18 1969 +0000'
1847 >>> datestr((0x7fffffff, 0))
1847 >>> datestr((0x7fffffff, 0))
1848 'Tue Jan 19 03:14:07 2038 +0000'
1848 'Tue Jan 19 03:14:07 2038 +0000'
1849 >>> datestr((-0x80000000, 0))
1849 >>> datestr((-0x80000000, 0))
1850 'Fri Dec 13 20:45:52 1901 +0000'
1850 'Fri Dec 13 20:45:52 1901 +0000'
1851 """
1851 """
1852 t, tz = date or makedate()
1852 t, tz = date or makedate()
1853 if "%1" in format or "%2" in format or "%z" in format:
1853 if "%1" in format or "%2" in format or "%z" in format:
1854 sign = (tz > 0) and "-" or "+"
1854 sign = (tz > 0) and "-" or "+"
1855 minutes = abs(tz) // 60
1855 minutes = abs(tz) // 60
1856 q, r = divmod(minutes, 60)
1856 q, r = divmod(minutes, 60)
1857 format = format.replace("%z", "%1%2")
1857 format = format.replace("%z", "%1%2")
1858 format = format.replace("%1", "%c%02d" % (sign, q))
1858 format = format.replace("%1", "%c%02d" % (sign, q))
1859 format = format.replace("%2", "%02d" % r)
1859 format = format.replace("%2", "%02d" % r)
1860 d = t - tz
1860 d = t - tz
1861 if d > 0x7fffffff:
1861 if d > 0x7fffffff:
1862 d = 0x7fffffff
1862 d = 0x7fffffff
1863 elif d < -0x80000000:
1863 elif d < -0x80000000:
1864 d = -0x80000000
1864 d = -0x80000000
1865 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1865 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1866 # because they use the gmtime() system call which is buggy on Windows
1866 # because they use the gmtime() system call which is buggy on Windows
1867 # for negative values.
1867 # for negative values.
1868 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1868 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1869 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1869 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1870 return s
1870 return s
1871
1871
1872 def shortdate(date=None):
1872 def shortdate(date=None):
1873 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1873 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1874 return datestr(date, format='%Y-%m-%d')
1874 return datestr(date, format='%Y-%m-%d')
1875
1875
1876 def parsetimezone(s):
1876 def parsetimezone(s):
1877 """find a trailing timezone, if any, in string, and return a
1877 """find a trailing timezone, if any, in string, and return a
1878 (offset, remainder) pair"""
1878 (offset, remainder) pair"""
1879
1879
1880 if s.endswith("GMT") or s.endswith("UTC"):
1880 if s.endswith("GMT") or s.endswith("UTC"):
1881 return 0, s[:-3].rstrip()
1881 return 0, s[:-3].rstrip()
1882
1882
1883 # Unix-style timezones [+-]hhmm
1883 # Unix-style timezones [+-]hhmm
1884 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1884 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1885 sign = (s[-5] == "+") and 1 or -1
1885 sign = (s[-5] == "+") and 1 or -1
1886 hours = int(s[-4:-2])
1886 hours = int(s[-4:-2])
1887 minutes = int(s[-2:])
1887 minutes = int(s[-2:])
1888 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1888 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1889
1889
1890 # ISO8601 trailing Z
1890 # ISO8601 trailing Z
1891 if s.endswith("Z") and s[-2:-1].isdigit():
1891 if s.endswith("Z") and s[-2:-1].isdigit():
1892 return 0, s[:-1]
1892 return 0, s[:-1]
1893
1893
1894 # ISO8601-style [+-]hh:mm
1894 # ISO8601-style [+-]hh:mm
1895 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1895 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1896 s[-5:-3].isdigit() and s[-2:].isdigit()):
1896 s[-5:-3].isdigit() and s[-2:].isdigit()):
1897 sign = (s[-6] == "+") and 1 or -1
1897 sign = (s[-6] == "+") and 1 or -1
1898 hours = int(s[-5:-3])
1898 hours = int(s[-5:-3])
1899 minutes = int(s[-2:])
1899 minutes = int(s[-2:])
1900 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1900 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1901
1901
1902 return None, s
1902 return None, s
1903
1903
1904 def strdate(string, format, defaults=None):
1904 def strdate(string, format, defaults=None):
1905 """parse a localized time string and return a (unixtime, offset) tuple.
1905 """parse a localized time string and return a (unixtime, offset) tuple.
1906 if the string cannot be parsed, ValueError is raised."""
1906 if the string cannot be parsed, ValueError is raised."""
1907 if defaults is None:
1907 if defaults is None:
1908 defaults = {}
1908 defaults = {}
1909
1909
1910 # NOTE: unixtime = localunixtime + offset
1910 # NOTE: unixtime = localunixtime + offset
1911 offset, date = parsetimezone(string)
1911 offset, date = parsetimezone(string)
1912
1912
1913 # add missing elements from defaults
1913 # add missing elements from defaults
1914 usenow = False # default to using biased defaults
1914 usenow = False # default to using biased defaults
1915 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1915 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1916 found = [True for p in part if ("%"+p) in format]
1916 found = [True for p in part if ("%"+p) in format]
1917 if not found:
1917 if not found:
1918 date += "@" + defaults[part][usenow]
1918 date += "@" + defaults[part][usenow]
1919 format += "@%" + part[0]
1919 format += "@%" + part[0]
1920 else:
1920 else:
1921 # We've found a specific time element, less specific time
1921 # We've found a specific time element, less specific time
1922 # elements are relative to today
1922 # elements are relative to today
1923 usenow = True
1923 usenow = True
1924
1924
1925 timetuple = time.strptime(date, format)
1925 timetuple = time.strptime(date, format)
1926 localunixtime = int(calendar.timegm(timetuple))
1926 localunixtime = int(calendar.timegm(timetuple))
1927 if offset is None:
1927 if offset is None:
1928 # local timezone
1928 # local timezone
1929 unixtime = int(time.mktime(timetuple))
1929 unixtime = int(time.mktime(timetuple))
1930 offset = unixtime - localunixtime
1930 offset = unixtime - localunixtime
1931 else:
1931 else:
1932 unixtime = localunixtime + offset
1932 unixtime = localunixtime + offset
1933 return unixtime, offset
1933 return unixtime, offset
1934
1934
1935 def parsedate(date, formats=None, bias=None):
1935 def parsedate(date, formats=None, bias=None):
1936 """parse a localized date/time and return a (unixtime, offset) tuple.
1936 """parse a localized date/time and return a (unixtime, offset) tuple.
1937
1937
1938 The date may be a "unixtime offset" string or in one of the specified
1938 The date may be a "unixtime offset" string or in one of the specified
1939 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1939 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1940
1940
1941 >>> parsedate(' today ') == parsedate(\
1941 >>> parsedate(' today ') == parsedate(\
1942 datetime.date.today().strftime('%b %d'))
1942 datetime.date.today().strftime('%b %d'))
1943 True
1943 True
1944 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1944 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1945 datetime.timedelta(days=1)\
1945 datetime.timedelta(days=1)\
1946 ).strftime('%b %d'))
1946 ).strftime('%b %d'))
1947 True
1947 True
1948 >>> now, tz = makedate()
1948 >>> now, tz = makedate()
1949 >>> strnow, strtz = parsedate('now')
1949 >>> strnow, strtz = parsedate('now')
1950 >>> (strnow - now) < 1
1950 >>> (strnow - now) < 1
1951 True
1951 True
1952 >>> tz == strtz
1952 >>> tz == strtz
1953 True
1953 True
1954 """
1954 """
1955 if bias is None:
1955 if bias is None:
1956 bias = {}
1956 bias = {}
1957 if not date:
1957 if not date:
1958 return 0, 0
1958 return 0, 0
1959 if isinstance(date, tuple) and len(date) == 2:
1959 if isinstance(date, tuple) and len(date) == 2:
1960 return date
1960 return date
1961 if not formats:
1961 if not formats:
1962 formats = defaultdateformats
1962 formats = defaultdateformats
1963 date = date.strip()
1963 date = date.strip()
1964
1964
1965 if date == 'now' or date == _('now'):
1965 if date == 'now' or date == _('now'):
1966 return makedate()
1966 return makedate()
1967 if date == 'today' or date == _('today'):
1967 if date == 'today' or date == _('today'):
1968 date = datetime.date.today().strftime('%b %d')
1968 date = datetime.date.today().strftime('%b %d')
1969 elif date == 'yesterday' or date == _('yesterday'):
1969 elif date == 'yesterday' or date == _('yesterday'):
1970 date = (datetime.date.today() -
1970 date = (datetime.date.today() -
1971 datetime.timedelta(days=1)).strftime('%b %d')
1971 datetime.timedelta(days=1)).strftime('%b %d')
1972
1972
1973 try:
1973 try:
1974 when, offset = map(int, date.split(' '))
1974 when, offset = map(int, date.split(' '))
1975 except ValueError:
1975 except ValueError:
1976 # fill out defaults
1976 # fill out defaults
1977 now = makedate()
1977 now = makedate()
1978 defaults = {}
1978 defaults = {}
1979 for part in ("d", "mb", "yY", "HI", "M", "S"):
1979 for part in ("d", "mb", "yY", "HI", "M", "S"):
1980 # this piece is for rounding the specific end of unknowns
1980 # this piece is for rounding the specific end of unknowns
1981 b = bias.get(part)
1981 b = bias.get(part)
1982 if b is None:
1982 if b is None:
1983 if part[0] in "HMS":
1983 if part[0:1] in "HMS":
1984 b = "00"
1984 b = "00"
1985 else:
1985 else:
1986 b = "0"
1986 b = "0"
1987
1987
1988 # this piece is for matching the generic end to today's date
1988 # this piece is for matching the generic end to today's date
1989 n = datestr(now, "%" + part[0])
1989 n = datestr(now, "%" + part[0:1])
1990
1990
1991 defaults[part] = (b, n)
1991 defaults[part] = (b, n)
1992
1992
1993 for format in formats:
1993 for format in formats:
1994 try:
1994 try:
1995 when, offset = strdate(date, format, defaults)
1995 when, offset = strdate(date, format, defaults)
1996 except (ValueError, OverflowError):
1996 except (ValueError, OverflowError):
1997 pass
1997 pass
1998 else:
1998 else:
1999 break
1999 break
2000 else:
2000 else:
2001 raise Abort(_('invalid date: %r') % date)
2001 raise Abort(_('invalid date: %r') % date)
2002 # validate explicit (probably user-specified) date and
2002 # validate explicit (probably user-specified) date and
2003 # time zone offset. values must fit in signed 32 bits for
2003 # time zone offset. values must fit in signed 32 bits for
2004 # current 32-bit linux runtimes. timezones go from UTC-12
2004 # current 32-bit linux runtimes. timezones go from UTC-12
2005 # to UTC+14
2005 # to UTC+14
2006 if when < -0x80000000 or when > 0x7fffffff:
2006 if when < -0x80000000 or when > 0x7fffffff:
2007 raise Abort(_('date exceeds 32 bits: %d') % when)
2007 raise Abort(_('date exceeds 32 bits: %d') % when)
2008 if offset < -50400 or offset > 43200:
2008 if offset < -50400 or offset > 43200:
2009 raise Abort(_('impossible time zone offset: %d') % offset)
2009 raise Abort(_('impossible time zone offset: %d') % offset)
2010 return when, offset
2010 return when, offset
2011
2011
2012 def matchdate(date):
2012 def matchdate(date):
2013 """Return a function that matches a given date match specifier
2013 """Return a function that matches a given date match specifier
2014
2014
2015 Formats include:
2015 Formats include:
2016
2016
2017 '{date}' match a given date to the accuracy provided
2017 '{date}' match a given date to the accuracy provided
2018
2018
2019 '<{date}' on or before a given date
2019 '<{date}' on or before a given date
2020
2020
2021 '>{date}' on or after a given date
2021 '>{date}' on or after a given date
2022
2022
2023 >>> p1 = parsedate("10:29:59")
2023 >>> p1 = parsedate("10:29:59")
2024 >>> p2 = parsedate("10:30:00")
2024 >>> p2 = parsedate("10:30:00")
2025 >>> p3 = parsedate("10:30:59")
2025 >>> p3 = parsedate("10:30:59")
2026 >>> p4 = parsedate("10:31:00")
2026 >>> p4 = parsedate("10:31:00")
2027 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2027 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2028 >>> f = matchdate("10:30")
2028 >>> f = matchdate("10:30")
2029 >>> f(p1[0])
2029 >>> f(p1[0])
2030 False
2030 False
2031 >>> f(p2[0])
2031 >>> f(p2[0])
2032 True
2032 True
2033 >>> f(p3[0])
2033 >>> f(p3[0])
2034 True
2034 True
2035 >>> f(p4[0])
2035 >>> f(p4[0])
2036 False
2036 False
2037 >>> f(p5[0])
2037 >>> f(p5[0])
2038 False
2038 False
2039 """
2039 """
2040
2040
2041 def lower(date):
2041 def lower(date):
2042 d = {'mb': "1", 'd': "1"}
2042 d = {'mb': "1", 'd': "1"}
2043 return parsedate(date, extendeddateformats, d)[0]
2043 return parsedate(date, extendeddateformats, d)[0]
2044
2044
2045 def upper(date):
2045 def upper(date):
2046 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2046 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2047 for days in ("31", "30", "29"):
2047 for days in ("31", "30", "29"):
2048 try:
2048 try:
2049 d["d"] = days
2049 d["d"] = days
2050 return parsedate(date, extendeddateformats, d)[0]
2050 return parsedate(date, extendeddateformats, d)[0]
2051 except Abort:
2051 except Abort:
2052 pass
2052 pass
2053 d["d"] = "28"
2053 d["d"] = "28"
2054 return parsedate(date, extendeddateformats, d)[0]
2054 return parsedate(date, extendeddateformats, d)[0]
2055
2055
2056 date = date.strip()
2056 date = date.strip()
2057
2057
2058 if not date:
2058 if not date:
2059 raise Abort(_("dates cannot consist entirely of whitespace"))
2059 raise Abort(_("dates cannot consist entirely of whitespace"))
2060 elif date[0] == "<":
2060 elif date[0] == "<":
2061 if not date[1:]:
2061 if not date[1:]:
2062 raise Abort(_("invalid day spec, use '<DATE'"))
2062 raise Abort(_("invalid day spec, use '<DATE'"))
2063 when = upper(date[1:])
2063 when = upper(date[1:])
2064 return lambda x: x <= when
2064 return lambda x: x <= when
2065 elif date[0] == ">":
2065 elif date[0] == ">":
2066 if not date[1:]:
2066 if not date[1:]:
2067 raise Abort(_("invalid day spec, use '>DATE'"))
2067 raise Abort(_("invalid day spec, use '>DATE'"))
2068 when = lower(date[1:])
2068 when = lower(date[1:])
2069 return lambda x: x >= when
2069 return lambda x: x >= when
2070 elif date[0] == "-":
2070 elif date[0] == "-":
2071 try:
2071 try:
2072 days = int(date[1:])
2072 days = int(date[1:])
2073 except ValueError:
2073 except ValueError:
2074 raise Abort(_("invalid day spec: %s") % date[1:])
2074 raise Abort(_("invalid day spec: %s") % date[1:])
2075 if days < 0:
2075 if days < 0:
2076 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2076 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2077 % date[1:])
2077 % date[1:])
2078 when = makedate()[0] - days * 3600 * 24
2078 when = makedate()[0] - days * 3600 * 24
2079 return lambda x: x >= when
2079 return lambda x: x >= when
2080 elif " to " in date:
2080 elif " to " in date:
2081 a, b = date.split(" to ")
2081 a, b = date.split(" to ")
2082 start, stop = lower(a), upper(b)
2082 start, stop = lower(a), upper(b)
2083 return lambda x: x >= start and x <= stop
2083 return lambda x: x >= start and x <= stop
2084 else:
2084 else:
2085 start, stop = lower(date), upper(date)
2085 start, stop = lower(date), upper(date)
2086 return lambda x: x >= start and x <= stop
2086 return lambda x: x >= start and x <= stop
2087
2087
2088 def stringmatcher(pattern, casesensitive=True):
2088 def stringmatcher(pattern, casesensitive=True):
2089 """
2089 """
2090 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2090 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2091 returns the matcher name, pattern, and matcher function.
2091 returns the matcher name, pattern, and matcher function.
2092 missing or unknown prefixes are treated as literal matches.
2092 missing or unknown prefixes are treated as literal matches.
2093
2093
2094 helper for tests:
2094 helper for tests:
2095 >>> def test(pattern, *tests):
2095 >>> def test(pattern, *tests):
2096 ... kind, pattern, matcher = stringmatcher(pattern)
2096 ... kind, pattern, matcher = stringmatcher(pattern)
2097 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2097 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2098 >>> def itest(pattern, *tests):
2098 >>> def itest(pattern, *tests):
2099 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2099 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2100 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2100 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2101
2101
2102 exact matching (no prefix):
2102 exact matching (no prefix):
2103 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2103 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2104 ('literal', 'abcdefg', [False, False, True])
2104 ('literal', 'abcdefg', [False, False, True])
2105
2105
2106 regex matching ('re:' prefix)
2106 regex matching ('re:' prefix)
2107 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2107 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2108 ('re', 'a.+b', [False, False, True])
2108 ('re', 'a.+b', [False, False, True])
2109
2109
2110 force exact matches ('literal:' prefix)
2110 force exact matches ('literal:' prefix)
2111 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2111 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2112 ('literal', 're:foobar', [False, True])
2112 ('literal', 're:foobar', [False, True])
2113
2113
2114 unknown prefixes are ignored and treated as literals
2114 unknown prefixes are ignored and treated as literals
2115 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2115 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2116 ('literal', 'foo:bar', [False, False, True])
2116 ('literal', 'foo:bar', [False, False, True])
2117
2117
2118 case insensitive regex matches
2118 case insensitive regex matches
2119 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2119 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2120 ('re', 'A.+b', [False, False, True])
2120 ('re', 'A.+b', [False, False, True])
2121
2121
2122 case insensitive literal matches
2122 case insensitive literal matches
2123 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2123 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2124 ('literal', 'ABCDEFG', [False, False, True])
2124 ('literal', 'ABCDEFG', [False, False, True])
2125 """
2125 """
2126 if pattern.startswith('re:'):
2126 if pattern.startswith('re:'):
2127 pattern = pattern[3:]
2127 pattern = pattern[3:]
2128 try:
2128 try:
2129 flags = 0
2129 flags = 0
2130 if not casesensitive:
2130 if not casesensitive:
2131 flags = remod.I
2131 flags = remod.I
2132 regex = remod.compile(pattern, flags)
2132 regex = remod.compile(pattern, flags)
2133 except remod.error as e:
2133 except remod.error as e:
2134 raise error.ParseError(_('invalid regular expression: %s')
2134 raise error.ParseError(_('invalid regular expression: %s')
2135 % e)
2135 % e)
2136 return 're', pattern, regex.search
2136 return 're', pattern, regex.search
2137 elif pattern.startswith('literal:'):
2137 elif pattern.startswith('literal:'):
2138 pattern = pattern[8:]
2138 pattern = pattern[8:]
2139
2139
2140 match = pattern.__eq__
2140 match = pattern.__eq__
2141
2141
2142 if not casesensitive:
2142 if not casesensitive:
2143 ipat = encoding.lower(pattern)
2143 ipat = encoding.lower(pattern)
2144 match = lambda s: ipat == encoding.lower(s)
2144 match = lambda s: ipat == encoding.lower(s)
2145 return 'literal', pattern, match
2145 return 'literal', pattern, match
2146
2146
2147 def shortuser(user):
2147 def shortuser(user):
2148 """Return a short representation of a user name or email address."""
2148 """Return a short representation of a user name or email address."""
2149 f = user.find('@')
2149 f = user.find('@')
2150 if f >= 0:
2150 if f >= 0:
2151 user = user[:f]
2151 user = user[:f]
2152 f = user.find('<')
2152 f = user.find('<')
2153 if f >= 0:
2153 if f >= 0:
2154 user = user[f + 1:]
2154 user = user[f + 1:]
2155 f = user.find(' ')
2155 f = user.find(' ')
2156 if f >= 0:
2156 if f >= 0:
2157 user = user[:f]
2157 user = user[:f]
2158 f = user.find('.')
2158 f = user.find('.')
2159 if f >= 0:
2159 if f >= 0:
2160 user = user[:f]
2160 user = user[:f]
2161 return user
2161 return user
2162
2162
2163 def emailuser(user):
2163 def emailuser(user):
2164 """Return the user portion of an email address."""
2164 """Return the user portion of an email address."""
2165 f = user.find('@')
2165 f = user.find('@')
2166 if f >= 0:
2166 if f >= 0:
2167 user = user[:f]
2167 user = user[:f]
2168 f = user.find('<')
2168 f = user.find('<')
2169 if f >= 0:
2169 if f >= 0:
2170 user = user[f + 1:]
2170 user = user[f + 1:]
2171 return user
2171 return user
2172
2172
2173 def email(author):
2173 def email(author):
2174 '''get email of author.'''
2174 '''get email of author.'''
2175 r = author.find('>')
2175 r = author.find('>')
2176 if r == -1:
2176 if r == -1:
2177 r = None
2177 r = None
2178 return author[author.find('<') + 1:r]
2178 return author[author.find('<') + 1:r]
2179
2179
2180 def ellipsis(text, maxlength=400):
2180 def ellipsis(text, maxlength=400):
2181 """Trim string to at most maxlength (default: 400) columns in display."""
2181 """Trim string to at most maxlength (default: 400) columns in display."""
2182 return encoding.trim(text, maxlength, ellipsis='...')
2182 return encoding.trim(text, maxlength, ellipsis='...')
2183
2183
2184 def unitcountfn(*unittable):
2184 def unitcountfn(*unittable):
2185 '''return a function that renders a readable count of some quantity'''
2185 '''return a function that renders a readable count of some quantity'''
2186
2186
2187 def go(count):
2187 def go(count):
2188 for multiplier, divisor, format in unittable:
2188 for multiplier, divisor, format in unittable:
2189 if abs(count) >= divisor * multiplier:
2189 if abs(count) >= divisor * multiplier:
2190 return format % (count / float(divisor))
2190 return format % (count / float(divisor))
2191 return unittable[-1][2] % count
2191 return unittable[-1][2] % count
2192
2192
2193 return go
2193 return go
2194
2194
2195 def processlinerange(fromline, toline):
2195 def processlinerange(fromline, toline):
2196 """Check that linerange <fromline>:<toline> makes sense and return a
2196 """Check that linerange <fromline>:<toline> makes sense and return a
2197 0-based range.
2197 0-based range.
2198
2198
2199 >>> processlinerange(10, 20)
2199 >>> processlinerange(10, 20)
2200 (9, 20)
2200 (9, 20)
2201 >>> processlinerange(2, 1)
2201 >>> processlinerange(2, 1)
2202 Traceback (most recent call last):
2202 Traceback (most recent call last):
2203 ...
2203 ...
2204 ParseError: line range must be positive
2204 ParseError: line range must be positive
2205 >>> processlinerange(0, 5)
2205 >>> processlinerange(0, 5)
2206 Traceback (most recent call last):
2206 Traceback (most recent call last):
2207 ...
2207 ...
2208 ParseError: fromline must be strictly positive
2208 ParseError: fromline must be strictly positive
2209 """
2209 """
2210 if toline - fromline < 0:
2210 if toline - fromline < 0:
2211 raise error.ParseError(_("line range must be positive"))
2211 raise error.ParseError(_("line range must be positive"))
2212 if fromline < 1:
2212 if fromline < 1:
2213 raise error.ParseError(_("fromline must be strictly positive"))
2213 raise error.ParseError(_("fromline must be strictly positive"))
2214 return fromline - 1, toline
2214 return fromline - 1, toline
2215
2215
2216 bytecount = unitcountfn(
2216 bytecount = unitcountfn(
2217 (100, 1 << 30, _('%.0f GB')),
2217 (100, 1 << 30, _('%.0f GB')),
2218 (10, 1 << 30, _('%.1f GB')),
2218 (10, 1 << 30, _('%.1f GB')),
2219 (1, 1 << 30, _('%.2f GB')),
2219 (1, 1 << 30, _('%.2f GB')),
2220 (100, 1 << 20, _('%.0f MB')),
2220 (100, 1 << 20, _('%.0f MB')),
2221 (10, 1 << 20, _('%.1f MB')),
2221 (10, 1 << 20, _('%.1f MB')),
2222 (1, 1 << 20, _('%.2f MB')),
2222 (1, 1 << 20, _('%.2f MB')),
2223 (100, 1 << 10, _('%.0f KB')),
2223 (100, 1 << 10, _('%.0f KB')),
2224 (10, 1 << 10, _('%.1f KB')),
2224 (10, 1 << 10, _('%.1f KB')),
2225 (1, 1 << 10, _('%.2f KB')),
2225 (1, 1 << 10, _('%.2f KB')),
2226 (1, 1, _('%.0f bytes')),
2226 (1, 1, _('%.0f bytes')),
2227 )
2227 )
2228
2228
2229 # Matches a single EOL which can either be a CRLF where repeated CR
2229 # Matches a single EOL which can either be a CRLF where repeated CR
2230 # are removed or a LF. We do not care about old Macintosh files, so a
2230 # are removed or a LF. We do not care about old Macintosh files, so a
2231 # stray CR is an error.
2231 # stray CR is an error.
2232 _eolre = remod.compile(br'\r*\n')
2232 _eolre = remod.compile(br'\r*\n')
2233
2233
2234 def tolf(s):
2234 def tolf(s):
2235 return _eolre.sub('\n', s)
2235 return _eolre.sub('\n', s)
2236
2236
2237 def tocrlf(s):
2237 def tocrlf(s):
2238 return _eolre.sub('\r\n', s)
2238 return _eolre.sub('\r\n', s)
2239
2239
2240 if pycompat.oslinesep == '\r\n':
2240 if pycompat.oslinesep == '\r\n':
2241 tonativeeol = tocrlf
2241 tonativeeol = tocrlf
2242 fromnativeeol = tolf
2242 fromnativeeol = tolf
2243 else:
2243 else:
2244 tonativeeol = pycompat.identity
2244 tonativeeol = pycompat.identity
2245 fromnativeeol = pycompat.identity
2245 fromnativeeol = pycompat.identity
2246
2246
2247 def escapestr(s):
2247 def escapestr(s):
2248 # call underlying function of s.encode('string_escape') directly for
2248 # call underlying function of s.encode('string_escape') directly for
2249 # Python 3 compatibility
2249 # Python 3 compatibility
2250 return codecs.escape_encode(s)[0]
2250 return codecs.escape_encode(s)[0]
2251
2251
2252 def unescapestr(s):
2252 def unescapestr(s):
2253 return codecs.escape_decode(s)[0]
2253 return codecs.escape_decode(s)[0]
2254
2254
2255 def uirepr(s):
2255 def uirepr(s):
2256 # Avoid double backslash in Windows path repr()
2256 # Avoid double backslash in Windows path repr()
2257 return repr(s).replace('\\\\', '\\')
2257 return repr(s).replace('\\\\', '\\')
2258
2258
2259 # delay import of textwrap
2259 # delay import of textwrap
2260 def MBTextWrapper(**kwargs):
2260 def MBTextWrapper(**kwargs):
2261 class tw(textwrap.TextWrapper):
2261 class tw(textwrap.TextWrapper):
2262 """
2262 """
2263 Extend TextWrapper for width-awareness.
2263 Extend TextWrapper for width-awareness.
2264
2264
2265 Neither number of 'bytes' in any encoding nor 'characters' is
2265 Neither number of 'bytes' in any encoding nor 'characters' is
2266 appropriate to calculate terminal columns for specified string.
2266 appropriate to calculate terminal columns for specified string.
2267
2267
2268 Original TextWrapper implementation uses built-in 'len()' directly,
2268 Original TextWrapper implementation uses built-in 'len()' directly,
2269 so overriding is needed to use width information of each characters.
2269 so overriding is needed to use width information of each characters.
2270
2270
2271 In addition, characters classified into 'ambiguous' width are
2271 In addition, characters classified into 'ambiguous' width are
2272 treated as wide in East Asian area, but as narrow in other.
2272 treated as wide in East Asian area, but as narrow in other.
2273
2273
2274 This requires use decision to determine width of such characters.
2274 This requires use decision to determine width of such characters.
2275 """
2275 """
2276 def _cutdown(self, ucstr, space_left):
2276 def _cutdown(self, ucstr, space_left):
2277 l = 0
2277 l = 0
2278 colwidth = encoding.ucolwidth
2278 colwidth = encoding.ucolwidth
2279 for i in xrange(len(ucstr)):
2279 for i in xrange(len(ucstr)):
2280 l += colwidth(ucstr[i])
2280 l += colwidth(ucstr[i])
2281 if space_left < l:
2281 if space_left < l:
2282 return (ucstr[:i], ucstr[i:])
2282 return (ucstr[:i], ucstr[i:])
2283 return ucstr, ''
2283 return ucstr, ''
2284
2284
2285 # overriding of base class
2285 # overriding of base class
2286 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2286 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2287 space_left = max(width - cur_len, 1)
2287 space_left = max(width - cur_len, 1)
2288
2288
2289 if self.break_long_words:
2289 if self.break_long_words:
2290 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2290 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2291 cur_line.append(cut)
2291 cur_line.append(cut)
2292 reversed_chunks[-1] = res
2292 reversed_chunks[-1] = res
2293 elif not cur_line:
2293 elif not cur_line:
2294 cur_line.append(reversed_chunks.pop())
2294 cur_line.append(reversed_chunks.pop())
2295
2295
2296 # this overriding code is imported from TextWrapper of Python 2.6
2296 # this overriding code is imported from TextWrapper of Python 2.6
2297 # to calculate columns of string by 'encoding.ucolwidth()'
2297 # to calculate columns of string by 'encoding.ucolwidth()'
2298 def _wrap_chunks(self, chunks):
2298 def _wrap_chunks(self, chunks):
2299 colwidth = encoding.ucolwidth
2299 colwidth = encoding.ucolwidth
2300
2300
2301 lines = []
2301 lines = []
2302 if self.width <= 0:
2302 if self.width <= 0:
2303 raise ValueError("invalid width %r (must be > 0)" % self.width)
2303 raise ValueError("invalid width %r (must be > 0)" % self.width)
2304
2304
2305 # Arrange in reverse order so items can be efficiently popped
2305 # Arrange in reverse order so items can be efficiently popped
2306 # from a stack of chucks.
2306 # from a stack of chucks.
2307 chunks.reverse()
2307 chunks.reverse()
2308
2308
2309 while chunks:
2309 while chunks:
2310
2310
2311 # Start the list of chunks that will make up the current line.
2311 # Start the list of chunks that will make up the current line.
2312 # cur_len is just the length of all the chunks in cur_line.
2312 # cur_len is just the length of all the chunks in cur_line.
2313 cur_line = []
2313 cur_line = []
2314 cur_len = 0
2314 cur_len = 0
2315
2315
2316 # Figure out which static string will prefix this line.
2316 # Figure out which static string will prefix this line.
2317 if lines:
2317 if lines:
2318 indent = self.subsequent_indent
2318 indent = self.subsequent_indent
2319 else:
2319 else:
2320 indent = self.initial_indent
2320 indent = self.initial_indent
2321
2321
2322 # Maximum width for this line.
2322 # Maximum width for this line.
2323 width = self.width - len(indent)
2323 width = self.width - len(indent)
2324
2324
2325 # First chunk on line is whitespace -- drop it, unless this
2325 # First chunk on line is whitespace -- drop it, unless this
2326 # is the very beginning of the text (i.e. no lines started yet).
2326 # is the very beginning of the text (i.e. no lines started yet).
2327 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2327 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2328 del chunks[-1]
2328 del chunks[-1]
2329
2329
2330 while chunks:
2330 while chunks:
2331 l = colwidth(chunks[-1])
2331 l = colwidth(chunks[-1])
2332
2332
2333 # Can at least squeeze this chunk onto the current line.
2333 # Can at least squeeze this chunk onto the current line.
2334 if cur_len + l <= width:
2334 if cur_len + l <= width:
2335 cur_line.append(chunks.pop())
2335 cur_line.append(chunks.pop())
2336 cur_len += l
2336 cur_len += l
2337
2337
2338 # Nope, this line is full.
2338 # Nope, this line is full.
2339 else:
2339 else:
2340 break
2340 break
2341
2341
2342 # The current line is full, and the next chunk is too big to
2342 # The current line is full, and the next chunk is too big to
2343 # fit on *any* line (not just this one).
2343 # fit on *any* line (not just this one).
2344 if chunks and colwidth(chunks[-1]) > width:
2344 if chunks and colwidth(chunks[-1]) > width:
2345 self._handle_long_word(chunks, cur_line, cur_len, width)
2345 self._handle_long_word(chunks, cur_line, cur_len, width)
2346
2346
2347 # If the last chunk on this line is all whitespace, drop it.
2347 # If the last chunk on this line is all whitespace, drop it.
2348 if (self.drop_whitespace and
2348 if (self.drop_whitespace and
2349 cur_line and cur_line[-1].strip() == ''):
2349 cur_line and cur_line[-1].strip() == ''):
2350 del cur_line[-1]
2350 del cur_line[-1]
2351
2351
2352 # Convert current line back to a string and store it in list
2352 # Convert current line back to a string and store it in list
2353 # of all lines (return value).
2353 # of all lines (return value).
2354 if cur_line:
2354 if cur_line:
2355 lines.append(indent + ''.join(cur_line))
2355 lines.append(indent + ''.join(cur_line))
2356
2356
2357 return lines
2357 return lines
2358
2358
2359 global MBTextWrapper
2359 global MBTextWrapper
2360 MBTextWrapper = tw
2360 MBTextWrapper = tw
2361 return tw(**kwargs)
2361 return tw(**kwargs)
2362
2362
2363 def wrap(line, width, initindent='', hangindent=''):
2363 def wrap(line, width, initindent='', hangindent=''):
2364 maxindent = max(len(hangindent), len(initindent))
2364 maxindent = max(len(hangindent), len(initindent))
2365 if width <= maxindent:
2365 if width <= maxindent:
2366 # adjust for weird terminal size
2366 # adjust for weird terminal size
2367 width = max(78, maxindent + 1)
2367 width = max(78, maxindent + 1)
2368 line = line.decode(pycompat.sysstr(encoding.encoding),
2368 line = line.decode(pycompat.sysstr(encoding.encoding),
2369 pycompat.sysstr(encoding.encodingmode))
2369 pycompat.sysstr(encoding.encodingmode))
2370 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2370 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2371 pycompat.sysstr(encoding.encodingmode))
2371 pycompat.sysstr(encoding.encodingmode))
2372 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2372 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2373 pycompat.sysstr(encoding.encodingmode))
2373 pycompat.sysstr(encoding.encodingmode))
2374 wrapper = MBTextWrapper(width=width,
2374 wrapper = MBTextWrapper(width=width,
2375 initial_indent=initindent,
2375 initial_indent=initindent,
2376 subsequent_indent=hangindent)
2376 subsequent_indent=hangindent)
2377 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2377 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2378
2378
2379 if (pyplatform.python_implementation() == 'CPython' and
2379 if (pyplatform.python_implementation() == 'CPython' and
2380 sys.version_info < (3, 0)):
2380 sys.version_info < (3, 0)):
2381 # There is an issue in CPython that some IO methods do not handle EINTR
2381 # There is an issue in CPython that some IO methods do not handle EINTR
2382 # correctly. The following table shows what CPython version (and functions)
2382 # correctly. The following table shows what CPython version (and functions)
2383 # are affected (buggy: has the EINTR bug, okay: otherwise):
2383 # are affected (buggy: has the EINTR bug, okay: otherwise):
2384 #
2384 #
2385 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2385 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2386 # --------------------------------------------------
2386 # --------------------------------------------------
2387 # fp.__iter__ | buggy | buggy | okay
2387 # fp.__iter__ | buggy | buggy | okay
2388 # fp.read* | buggy | okay [1] | okay
2388 # fp.read* | buggy | okay [1] | okay
2389 #
2389 #
2390 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2390 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2391 #
2391 #
2392 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2392 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2393 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2393 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2394 #
2394 #
2395 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2395 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2396 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2396 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2397 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2397 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2398 # fp.__iter__ but not other fp.read* methods.
2398 # fp.__iter__ but not other fp.read* methods.
2399 #
2399 #
2400 # On modern systems like Linux, the "read" syscall cannot be interrupted
2400 # On modern systems like Linux, the "read" syscall cannot be interrupted
2401 # when reading "fast" files like on-disk files. So the EINTR issue only
2401 # when reading "fast" files like on-disk files. So the EINTR issue only
2402 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2402 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2403 # files approximately as "fast" files and use the fast (unsafe) code path,
2403 # files approximately as "fast" files and use the fast (unsafe) code path,
2404 # to minimize the performance impact.
2404 # to minimize the performance impact.
2405 if sys.version_info >= (2, 7, 4):
2405 if sys.version_info >= (2, 7, 4):
2406 # fp.readline deals with EINTR correctly, use it as a workaround.
2406 # fp.readline deals with EINTR correctly, use it as a workaround.
2407 def _safeiterfile(fp):
2407 def _safeiterfile(fp):
2408 return iter(fp.readline, '')
2408 return iter(fp.readline, '')
2409 else:
2409 else:
2410 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2410 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2411 # note: this may block longer than necessary because of bufsize.
2411 # note: this may block longer than necessary because of bufsize.
2412 def _safeiterfile(fp, bufsize=4096):
2412 def _safeiterfile(fp, bufsize=4096):
2413 fd = fp.fileno()
2413 fd = fp.fileno()
2414 line = ''
2414 line = ''
2415 while True:
2415 while True:
2416 try:
2416 try:
2417 buf = os.read(fd, bufsize)
2417 buf = os.read(fd, bufsize)
2418 except OSError as ex:
2418 except OSError as ex:
2419 # os.read only raises EINTR before any data is read
2419 # os.read only raises EINTR before any data is read
2420 if ex.errno == errno.EINTR:
2420 if ex.errno == errno.EINTR:
2421 continue
2421 continue
2422 else:
2422 else:
2423 raise
2423 raise
2424 line += buf
2424 line += buf
2425 if '\n' in buf:
2425 if '\n' in buf:
2426 splitted = line.splitlines(True)
2426 splitted = line.splitlines(True)
2427 line = ''
2427 line = ''
2428 for l in splitted:
2428 for l in splitted:
2429 if l[-1] == '\n':
2429 if l[-1] == '\n':
2430 yield l
2430 yield l
2431 else:
2431 else:
2432 line = l
2432 line = l
2433 if not buf:
2433 if not buf:
2434 break
2434 break
2435 if line:
2435 if line:
2436 yield line
2436 yield line
2437
2437
2438 def iterfile(fp):
2438 def iterfile(fp):
2439 fastpath = True
2439 fastpath = True
2440 if type(fp) is file:
2440 if type(fp) is file:
2441 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2441 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2442 if fastpath:
2442 if fastpath:
2443 return fp
2443 return fp
2444 else:
2444 else:
2445 return _safeiterfile(fp)
2445 return _safeiterfile(fp)
2446 else:
2446 else:
2447 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2447 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2448 def iterfile(fp):
2448 def iterfile(fp):
2449 return fp
2449 return fp
2450
2450
2451 def iterlines(iterator):
2451 def iterlines(iterator):
2452 for chunk in iterator:
2452 for chunk in iterator:
2453 for line in chunk.splitlines():
2453 for line in chunk.splitlines():
2454 yield line
2454 yield line
2455
2455
2456 def expandpath(path):
2456 def expandpath(path):
2457 return os.path.expanduser(os.path.expandvars(path))
2457 return os.path.expanduser(os.path.expandvars(path))
2458
2458
2459 def hgcmd():
2459 def hgcmd():
2460 """Return the command used to execute current hg
2460 """Return the command used to execute current hg
2461
2461
2462 This is different from hgexecutable() because on Windows we want
2462 This is different from hgexecutable() because on Windows we want
2463 to avoid things opening new shell windows like batch files, so we
2463 to avoid things opening new shell windows like batch files, so we
2464 get either the python call or current executable.
2464 get either the python call or current executable.
2465 """
2465 """
2466 if mainfrozen():
2466 if mainfrozen():
2467 if getattr(sys, 'frozen', None) == 'macosx_app':
2467 if getattr(sys, 'frozen', None) == 'macosx_app':
2468 # Env variable set by py2app
2468 # Env variable set by py2app
2469 return [encoding.environ['EXECUTABLEPATH']]
2469 return [encoding.environ['EXECUTABLEPATH']]
2470 else:
2470 else:
2471 return [pycompat.sysexecutable]
2471 return [pycompat.sysexecutable]
2472 return gethgcmd()
2472 return gethgcmd()
2473
2473
2474 def rundetached(args, condfn):
2474 def rundetached(args, condfn):
2475 """Execute the argument list in a detached process.
2475 """Execute the argument list in a detached process.
2476
2476
2477 condfn is a callable which is called repeatedly and should return
2477 condfn is a callable which is called repeatedly and should return
2478 True once the child process is known to have started successfully.
2478 True once the child process is known to have started successfully.
2479 At this point, the child process PID is returned. If the child
2479 At this point, the child process PID is returned. If the child
2480 process fails to start or finishes before condfn() evaluates to
2480 process fails to start or finishes before condfn() evaluates to
2481 True, return -1.
2481 True, return -1.
2482 """
2482 """
2483 # Windows case is easier because the child process is either
2483 # Windows case is easier because the child process is either
2484 # successfully starting and validating the condition or exiting
2484 # successfully starting and validating the condition or exiting
2485 # on failure. We just poll on its PID. On Unix, if the child
2485 # on failure. We just poll on its PID. On Unix, if the child
2486 # process fails to start, it will be left in a zombie state until
2486 # process fails to start, it will be left in a zombie state until
2487 # the parent wait on it, which we cannot do since we expect a long
2487 # the parent wait on it, which we cannot do since we expect a long
2488 # running process on success. Instead we listen for SIGCHLD telling
2488 # running process on success. Instead we listen for SIGCHLD telling
2489 # us our child process terminated.
2489 # us our child process terminated.
2490 terminated = set()
2490 terminated = set()
2491 def handler(signum, frame):
2491 def handler(signum, frame):
2492 terminated.add(os.wait())
2492 terminated.add(os.wait())
2493 prevhandler = None
2493 prevhandler = None
2494 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2494 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2495 if SIGCHLD is not None:
2495 if SIGCHLD is not None:
2496 prevhandler = signal.signal(SIGCHLD, handler)
2496 prevhandler = signal.signal(SIGCHLD, handler)
2497 try:
2497 try:
2498 pid = spawndetached(args)
2498 pid = spawndetached(args)
2499 while not condfn():
2499 while not condfn():
2500 if ((pid in terminated or not testpid(pid))
2500 if ((pid in terminated or not testpid(pid))
2501 and not condfn()):
2501 and not condfn()):
2502 return -1
2502 return -1
2503 time.sleep(0.1)
2503 time.sleep(0.1)
2504 return pid
2504 return pid
2505 finally:
2505 finally:
2506 if prevhandler is not None:
2506 if prevhandler is not None:
2507 signal.signal(signal.SIGCHLD, prevhandler)
2507 signal.signal(signal.SIGCHLD, prevhandler)
2508
2508
2509 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2509 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2510 """Return the result of interpolating items in the mapping into string s.
2510 """Return the result of interpolating items in the mapping into string s.
2511
2511
2512 prefix is a single character string, or a two character string with
2512 prefix is a single character string, or a two character string with
2513 a backslash as the first character if the prefix needs to be escaped in
2513 a backslash as the first character if the prefix needs to be escaped in
2514 a regular expression.
2514 a regular expression.
2515
2515
2516 fn is an optional function that will be applied to the replacement text
2516 fn is an optional function that will be applied to the replacement text
2517 just before replacement.
2517 just before replacement.
2518
2518
2519 escape_prefix is an optional flag that allows using doubled prefix for
2519 escape_prefix is an optional flag that allows using doubled prefix for
2520 its escaping.
2520 its escaping.
2521 """
2521 """
2522 fn = fn or (lambda s: s)
2522 fn = fn or (lambda s: s)
2523 patterns = '|'.join(mapping.keys())
2523 patterns = '|'.join(mapping.keys())
2524 if escape_prefix:
2524 if escape_prefix:
2525 patterns += '|' + prefix
2525 patterns += '|' + prefix
2526 if len(prefix) > 1:
2526 if len(prefix) > 1:
2527 prefix_char = prefix[1:]
2527 prefix_char = prefix[1:]
2528 else:
2528 else:
2529 prefix_char = prefix
2529 prefix_char = prefix
2530 mapping[prefix_char] = prefix_char
2530 mapping[prefix_char] = prefix_char
2531 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2531 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2532 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2532 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2533
2533
2534 def getport(port):
2534 def getport(port):
2535 """Return the port for a given network service.
2535 """Return the port for a given network service.
2536
2536
2537 If port is an integer, it's returned as is. If it's a string, it's
2537 If port is an integer, it's returned as is. If it's a string, it's
2538 looked up using socket.getservbyname(). If there's no matching
2538 looked up using socket.getservbyname(). If there's no matching
2539 service, error.Abort is raised.
2539 service, error.Abort is raised.
2540 """
2540 """
2541 try:
2541 try:
2542 return int(port)
2542 return int(port)
2543 except ValueError:
2543 except ValueError:
2544 pass
2544 pass
2545
2545
2546 try:
2546 try:
2547 return socket.getservbyname(port)
2547 return socket.getservbyname(port)
2548 except socket.error:
2548 except socket.error:
2549 raise Abort(_("no port number associated with service '%s'") % port)
2549 raise Abort(_("no port number associated with service '%s'") % port)
2550
2550
2551 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2551 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2552 '0': False, 'no': False, 'false': False, 'off': False,
2552 '0': False, 'no': False, 'false': False, 'off': False,
2553 'never': False}
2553 'never': False}
2554
2554
2555 def parsebool(s):
2555 def parsebool(s):
2556 """Parse s into a boolean.
2556 """Parse s into a boolean.
2557
2557
2558 If s is not a valid boolean, returns None.
2558 If s is not a valid boolean, returns None.
2559 """
2559 """
2560 return _booleans.get(s.lower(), None)
2560 return _booleans.get(s.lower(), None)
2561
2561
2562 _hextochr = dict((a + b, chr(int(a + b, 16)))
2562 _hextochr = dict((a + b, chr(int(a + b, 16)))
2563 for a in string.hexdigits for b in string.hexdigits)
2563 for a in string.hexdigits for b in string.hexdigits)
2564
2564
2565 class url(object):
2565 class url(object):
2566 r"""Reliable URL parser.
2566 r"""Reliable URL parser.
2567
2567
2568 This parses URLs and provides attributes for the following
2568 This parses URLs and provides attributes for the following
2569 components:
2569 components:
2570
2570
2571 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2571 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2572
2572
2573 Missing components are set to None. The only exception is
2573 Missing components are set to None. The only exception is
2574 fragment, which is set to '' if present but empty.
2574 fragment, which is set to '' if present but empty.
2575
2575
2576 If parsefragment is False, fragment is included in query. If
2576 If parsefragment is False, fragment is included in query. If
2577 parsequery is False, query is included in path. If both are
2577 parsequery is False, query is included in path. If both are
2578 False, both fragment and query are included in path.
2578 False, both fragment and query are included in path.
2579
2579
2580 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2580 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2581
2581
2582 Note that for backward compatibility reasons, bundle URLs do not
2582 Note that for backward compatibility reasons, bundle URLs do not
2583 take host names. That means 'bundle://../' has a path of '../'.
2583 take host names. That means 'bundle://../' has a path of '../'.
2584
2584
2585 Examples:
2585 Examples:
2586
2586
2587 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2587 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2588 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2588 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2589 >>> url('ssh://[::1]:2200//home/joe/repo')
2589 >>> url('ssh://[::1]:2200//home/joe/repo')
2590 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2590 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2591 >>> url('file:///home/joe/repo')
2591 >>> url('file:///home/joe/repo')
2592 <url scheme: 'file', path: '/home/joe/repo'>
2592 <url scheme: 'file', path: '/home/joe/repo'>
2593 >>> url('file:///c:/temp/foo/')
2593 >>> url('file:///c:/temp/foo/')
2594 <url scheme: 'file', path: 'c:/temp/foo/'>
2594 <url scheme: 'file', path: 'c:/temp/foo/'>
2595 >>> url('bundle:foo')
2595 >>> url('bundle:foo')
2596 <url scheme: 'bundle', path: 'foo'>
2596 <url scheme: 'bundle', path: 'foo'>
2597 >>> url('bundle://../foo')
2597 >>> url('bundle://../foo')
2598 <url scheme: 'bundle', path: '../foo'>
2598 <url scheme: 'bundle', path: '../foo'>
2599 >>> url(r'c:\foo\bar')
2599 >>> url(r'c:\foo\bar')
2600 <url path: 'c:\\foo\\bar'>
2600 <url path: 'c:\\foo\\bar'>
2601 >>> url(r'\\blah\blah\blah')
2601 >>> url(r'\\blah\blah\blah')
2602 <url path: '\\\\blah\\blah\\blah'>
2602 <url path: '\\\\blah\\blah\\blah'>
2603 >>> url(r'\\blah\blah\blah#baz')
2603 >>> url(r'\\blah\blah\blah#baz')
2604 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2604 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2605 >>> url(r'file:///C:\users\me')
2605 >>> url(r'file:///C:\users\me')
2606 <url scheme: 'file', path: 'C:\\users\\me'>
2606 <url scheme: 'file', path: 'C:\\users\\me'>
2607
2607
2608 Authentication credentials:
2608 Authentication credentials:
2609
2609
2610 >>> url('ssh://joe:xyz@x/repo')
2610 >>> url('ssh://joe:xyz@x/repo')
2611 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2611 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2612 >>> url('ssh://joe@x/repo')
2612 >>> url('ssh://joe@x/repo')
2613 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2613 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2614
2614
2615 Query strings and fragments:
2615 Query strings and fragments:
2616
2616
2617 >>> url('http://host/a?b#c')
2617 >>> url('http://host/a?b#c')
2618 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2618 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2619 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2619 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2620 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2620 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2621
2621
2622 Empty path:
2622 Empty path:
2623
2623
2624 >>> url('')
2624 >>> url('')
2625 <url path: ''>
2625 <url path: ''>
2626 >>> url('#a')
2626 >>> url('#a')
2627 <url path: '', fragment: 'a'>
2627 <url path: '', fragment: 'a'>
2628 >>> url('http://host/')
2628 >>> url('http://host/')
2629 <url scheme: 'http', host: 'host', path: ''>
2629 <url scheme: 'http', host: 'host', path: ''>
2630 >>> url('http://host/#a')
2630 >>> url('http://host/#a')
2631 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2631 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2632
2632
2633 Only scheme:
2633 Only scheme:
2634
2634
2635 >>> url('http:')
2635 >>> url('http:')
2636 <url scheme: 'http'>
2636 <url scheme: 'http'>
2637 """
2637 """
2638
2638
2639 _safechars = "!~*'()+"
2639 _safechars = "!~*'()+"
2640 _safepchars = "/!~*'()+:\\"
2640 _safepchars = "/!~*'()+:\\"
2641 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2641 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2642
2642
2643 def __init__(self, path, parsequery=True, parsefragment=True):
2643 def __init__(self, path, parsequery=True, parsefragment=True):
2644 # We slowly chomp away at path until we have only the path left
2644 # We slowly chomp away at path until we have only the path left
2645 self.scheme = self.user = self.passwd = self.host = None
2645 self.scheme = self.user = self.passwd = self.host = None
2646 self.port = self.path = self.query = self.fragment = None
2646 self.port = self.path = self.query = self.fragment = None
2647 self._localpath = True
2647 self._localpath = True
2648 self._hostport = ''
2648 self._hostport = ''
2649 self._origpath = path
2649 self._origpath = path
2650
2650
2651 if parsefragment and '#' in path:
2651 if parsefragment and '#' in path:
2652 path, self.fragment = path.split('#', 1)
2652 path, self.fragment = path.split('#', 1)
2653
2653
2654 # special case for Windows drive letters and UNC paths
2654 # special case for Windows drive letters and UNC paths
2655 if hasdriveletter(path) or path.startswith('\\\\'):
2655 if hasdriveletter(path) or path.startswith('\\\\'):
2656 self.path = path
2656 self.path = path
2657 return
2657 return
2658
2658
2659 # For compatibility reasons, we can't handle bundle paths as
2659 # For compatibility reasons, we can't handle bundle paths as
2660 # normal URLS
2660 # normal URLS
2661 if path.startswith('bundle:'):
2661 if path.startswith('bundle:'):
2662 self.scheme = 'bundle'
2662 self.scheme = 'bundle'
2663 path = path[7:]
2663 path = path[7:]
2664 if path.startswith('//'):
2664 if path.startswith('//'):
2665 path = path[2:]
2665 path = path[2:]
2666 self.path = path
2666 self.path = path
2667 return
2667 return
2668
2668
2669 if self._matchscheme(path):
2669 if self._matchscheme(path):
2670 parts = path.split(':', 1)
2670 parts = path.split(':', 1)
2671 if parts[0]:
2671 if parts[0]:
2672 self.scheme, path = parts
2672 self.scheme, path = parts
2673 self._localpath = False
2673 self._localpath = False
2674
2674
2675 if not path:
2675 if not path:
2676 path = None
2676 path = None
2677 if self._localpath:
2677 if self._localpath:
2678 self.path = ''
2678 self.path = ''
2679 return
2679 return
2680 else:
2680 else:
2681 if self._localpath:
2681 if self._localpath:
2682 self.path = path
2682 self.path = path
2683 return
2683 return
2684
2684
2685 if parsequery and '?' in path:
2685 if parsequery and '?' in path:
2686 path, self.query = path.split('?', 1)
2686 path, self.query = path.split('?', 1)
2687 if not path:
2687 if not path:
2688 path = None
2688 path = None
2689 if not self.query:
2689 if not self.query:
2690 self.query = None
2690 self.query = None
2691
2691
2692 # // is required to specify a host/authority
2692 # // is required to specify a host/authority
2693 if path and path.startswith('//'):
2693 if path and path.startswith('//'):
2694 parts = path[2:].split('/', 1)
2694 parts = path[2:].split('/', 1)
2695 if len(parts) > 1:
2695 if len(parts) > 1:
2696 self.host, path = parts
2696 self.host, path = parts
2697 else:
2697 else:
2698 self.host = parts[0]
2698 self.host = parts[0]
2699 path = None
2699 path = None
2700 if not self.host:
2700 if not self.host:
2701 self.host = None
2701 self.host = None
2702 # path of file:///d is /d
2702 # path of file:///d is /d
2703 # path of file:///d:/ is d:/, not /d:/
2703 # path of file:///d:/ is d:/, not /d:/
2704 if path and not hasdriveletter(path):
2704 if path and not hasdriveletter(path):
2705 path = '/' + path
2705 path = '/' + path
2706
2706
2707 if self.host and '@' in self.host:
2707 if self.host and '@' in self.host:
2708 self.user, self.host = self.host.rsplit('@', 1)
2708 self.user, self.host = self.host.rsplit('@', 1)
2709 if ':' in self.user:
2709 if ':' in self.user:
2710 self.user, self.passwd = self.user.split(':', 1)
2710 self.user, self.passwd = self.user.split(':', 1)
2711 if not self.host:
2711 if not self.host:
2712 self.host = None
2712 self.host = None
2713
2713
2714 # Don't split on colons in IPv6 addresses without ports
2714 # Don't split on colons in IPv6 addresses without ports
2715 if (self.host and ':' in self.host and
2715 if (self.host and ':' in self.host and
2716 not (self.host.startswith('[') and self.host.endswith(']'))):
2716 not (self.host.startswith('[') and self.host.endswith(']'))):
2717 self._hostport = self.host
2717 self._hostport = self.host
2718 self.host, self.port = self.host.rsplit(':', 1)
2718 self.host, self.port = self.host.rsplit(':', 1)
2719 if not self.host:
2719 if not self.host:
2720 self.host = None
2720 self.host = None
2721
2721
2722 if (self.host and self.scheme == 'file' and
2722 if (self.host and self.scheme == 'file' and
2723 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2723 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2724 raise Abort(_('file:// URLs can only refer to localhost'))
2724 raise Abort(_('file:// URLs can only refer to localhost'))
2725
2725
2726 self.path = path
2726 self.path = path
2727
2727
2728 # leave the query string escaped
2728 # leave the query string escaped
2729 for a in ('user', 'passwd', 'host', 'port',
2729 for a in ('user', 'passwd', 'host', 'port',
2730 'path', 'fragment'):
2730 'path', 'fragment'):
2731 v = getattr(self, a)
2731 v = getattr(self, a)
2732 if v is not None:
2732 if v is not None:
2733 setattr(self, a, urlreq.unquote(v))
2733 setattr(self, a, urlreq.unquote(v))
2734
2734
2735 def __repr__(self):
2735 def __repr__(self):
2736 attrs = []
2736 attrs = []
2737 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2737 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2738 'query', 'fragment'):
2738 'query', 'fragment'):
2739 v = getattr(self, a)
2739 v = getattr(self, a)
2740 if v is not None:
2740 if v is not None:
2741 attrs.append('%s: %r' % (a, v))
2741 attrs.append('%s: %r' % (a, v))
2742 return '<url %s>' % ', '.join(attrs)
2742 return '<url %s>' % ', '.join(attrs)
2743
2743
2744 def __str__(self):
2744 def __str__(self):
2745 r"""Join the URL's components back into a URL string.
2745 r"""Join the URL's components back into a URL string.
2746
2746
2747 Examples:
2747 Examples:
2748
2748
2749 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2749 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2750 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2750 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2751 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2751 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2752 'http://user:pw@host:80/?foo=bar&baz=42'
2752 'http://user:pw@host:80/?foo=bar&baz=42'
2753 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2753 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2754 'http://user:pw@host:80/?foo=bar%3dbaz'
2754 'http://user:pw@host:80/?foo=bar%3dbaz'
2755 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2755 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2756 'ssh://user:pw@[::1]:2200//home/joe#'
2756 'ssh://user:pw@[::1]:2200//home/joe#'
2757 >>> str(url('http://localhost:80//'))
2757 >>> str(url('http://localhost:80//'))
2758 'http://localhost:80//'
2758 'http://localhost:80//'
2759 >>> str(url('http://localhost:80/'))
2759 >>> str(url('http://localhost:80/'))
2760 'http://localhost:80/'
2760 'http://localhost:80/'
2761 >>> str(url('http://localhost:80'))
2761 >>> str(url('http://localhost:80'))
2762 'http://localhost:80/'
2762 'http://localhost:80/'
2763 >>> str(url('bundle:foo'))
2763 >>> str(url('bundle:foo'))
2764 'bundle:foo'
2764 'bundle:foo'
2765 >>> str(url('bundle://../foo'))
2765 >>> str(url('bundle://../foo'))
2766 'bundle:../foo'
2766 'bundle:../foo'
2767 >>> str(url('path'))
2767 >>> str(url('path'))
2768 'path'
2768 'path'
2769 >>> str(url('file:///tmp/foo/bar'))
2769 >>> str(url('file:///tmp/foo/bar'))
2770 'file:///tmp/foo/bar'
2770 'file:///tmp/foo/bar'
2771 >>> str(url('file:///c:/tmp/foo/bar'))
2771 >>> str(url('file:///c:/tmp/foo/bar'))
2772 'file:///c:/tmp/foo/bar'
2772 'file:///c:/tmp/foo/bar'
2773 >>> print url(r'bundle:foo\bar')
2773 >>> print url(r'bundle:foo\bar')
2774 bundle:foo\bar
2774 bundle:foo\bar
2775 >>> print url(r'file:///D:\data\hg')
2775 >>> print url(r'file:///D:\data\hg')
2776 file:///D:\data\hg
2776 file:///D:\data\hg
2777 """
2777 """
2778 return encoding.strfromlocal(self.__bytes__())
2778 return encoding.strfromlocal(self.__bytes__())
2779
2779
2780 def __bytes__(self):
2780 def __bytes__(self):
2781 if self._localpath:
2781 if self._localpath:
2782 s = self.path
2782 s = self.path
2783 if self.scheme == 'bundle':
2783 if self.scheme == 'bundle':
2784 s = 'bundle:' + s
2784 s = 'bundle:' + s
2785 if self.fragment:
2785 if self.fragment:
2786 s += '#' + self.fragment
2786 s += '#' + self.fragment
2787 return s
2787 return s
2788
2788
2789 s = self.scheme + ':'
2789 s = self.scheme + ':'
2790 if self.user or self.passwd or self.host:
2790 if self.user or self.passwd or self.host:
2791 s += '//'
2791 s += '//'
2792 elif self.scheme and (not self.path or self.path.startswith('/')
2792 elif self.scheme and (not self.path or self.path.startswith('/')
2793 or hasdriveletter(self.path)):
2793 or hasdriveletter(self.path)):
2794 s += '//'
2794 s += '//'
2795 if hasdriveletter(self.path):
2795 if hasdriveletter(self.path):
2796 s += '/'
2796 s += '/'
2797 if self.user:
2797 if self.user:
2798 s += urlreq.quote(self.user, safe=self._safechars)
2798 s += urlreq.quote(self.user, safe=self._safechars)
2799 if self.passwd:
2799 if self.passwd:
2800 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2800 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2801 if self.user or self.passwd:
2801 if self.user or self.passwd:
2802 s += '@'
2802 s += '@'
2803 if self.host:
2803 if self.host:
2804 if not (self.host.startswith('[') and self.host.endswith(']')):
2804 if not (self.host.startswith('[') and self.host.endswith(']')):
2805 s += urlreq.quote(self.host)
2805 s += urlreq.quote(self.host)
2806 else:
2806 else:
2807 s += self.host
2807 s += self.host
2808 if self.port:
2808 if self.port:
2809 s += ':' + urlreq.quote(self.port)
2809 s += ':' + urlreq.quote(self.port)
2810 if self.host:
2810 if self.host:
2811 s += '/'
2811 s += '/'
2812 if self.path:
2812 if self.path:
2813 # TODO: similar to the query string, we should not unescape the
2813 # TODO: similar to the query string, we should not unescape the
2814 # path when we store it, the path might contain '%2f' = '/',
2814 # path when we store it, the path might contain '%2f' = '/',
2815 # which we should *not* escape.
2815 # which we should *not* escape.
2816 s += urlreq.quote(self.path, safe=self._safepchars)
2816 s += urlreq.quote(self.path, safe=self._safepchars)
2817 if self.query:
2817 if self.query:
2818 # we store the query in escaped form.
2818 # we store the query in escaped form.
2819 s += '?' + self.query
2819 s += '?' + self.query
2820 if self.fragment is not None:
2820 if self.fragment is not None:
2821 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2821 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2822 return s
2822 return s
2823
2823
2824 def authinfo(self):
2824 def authinfo(self):
2825 user, passwd = self.user, self.passwd
2825 user, passwd = self.user, self.passwd
2826 try:
2826 try:
2827 self.user, self.passwd = None, None
2827 self.user, self.passwd = None, None
2828 s = bytes(self)
2828 s = bytes(self)
2829 finally:
2829 finally:
2830 self.user, self.passwd = user, passwd
2830 self.user, self.passwd = user, passwd
2831 if not self.user:
2831 if not self.user:
2832 return (s, None)
2832 return (s, None)
2833 # authinfo[1] is passed to urllib2 password manager, and its
2833 # authinfo[1] is passed to urllib2 password manager, and its
2834 # URIs must not contain credentials. The host is passed in the
2834 # URIs must not contain credentials. The host is passed in the
2835 # URIs list because Python < 2.4.3 uses only that to search for
2835 # URIs list because Python < 2.4.3 uses only that to search for
2836 # a password.
2836 # a password.
2837 return (s, (None, (s, self.host),
2837 return (s, (None, (s, self.host),
2838 self.user, self.passwd or ''))
2838 self.user, self.passwd or ''))
2839
2839
2840 def isabs(self):
2840 def isabs(self):
2841 if self.scheme and self.scheme != 'file':
2841 if self.scheme and self.scheme != 'file':
2842 return True # remote URL
2842 return True # remote URL
2843 if hasdriveletter(self.path):
2843 if hasdriveletter(self.path):
2844 return True # absolute for our purposes - can't be joined()
2844 return True # absolute for our purposes - can't be joined()
2845 if self.path.startswith(r'\\'):
2845 if self.path.startswith(r'\\'):
2846 return True # Windows UNC path
2846 return True # Windows UNC path
2847 if self.path.startswith('/'):
2847 if self.path.startswith('/'):
2848 return True # POSIX-style
2848 return True # POSIX-style
2849 return False
2849 return False
2850
2850
2851 def localpath(self):
2851 def localpath(self):
2852 if self.scheme == 'file' or self.scheme == 'bundle':
2852 if self.scheme == 'file' or self.scheme == 'bundle':
2853 path = self.path or '/'
2853 path = self.path or '/'
2854 # For Windows, we need to promote hosts containing drive
2854 # For Windows, we need to promote hosts containing drive
2855 # letters to paths with drive letters.
2855 # letters to paths with drive letters.
2856 if hasdriveletter(self._hostport):
2856 if hasdriveletter(self._hostport):
2857 path = self._hostport + '/' + self.path
2857 path = self._hostport + '/' + self.path
2858 elif (self.host is not None and self.path
2858 elif (self.host is not None and self.path
2859 and not hasdriveletter(path)):
2859 and not hasdriveletter(path)):
2860 path = '/' + path
2860 path = '/' + path
2861 return path
2861 return path
2862 return self._origpath
2862 return self._origpath
2863
2863
2864 def islocal(self):
2864 def islocal(self):
2865 '''whether localpath will return something that posixfile can open'''
2865 '''whether localpath will return something that posixfile can open'''
2866 return (not self.scheme or self.scheme == 'file'
2866 return (not self.scheme or self.scheme == 'file'
2867 or self.scheme == 'bundle')
2867 or self.scheme == 'bundle')
2868
2868
2869 def hasscheme(path):
2869 def hasscheme(path):
2870 return bool(url(path).scheme)
2870 return bool(url(path).scheme)
2871
2871
2872 def hasdriveletter(path):
2872 def hasdriveletter(path):
2873 return path and path[1:2] == ':' and path[0:1].isalpha()
2873 return path and path[1:2] == ':' and path[0:1].isalpha()
2874
2874
2875 def urllocalpath(path):
2875 def urllocalpath(path):
2876 return url(path, parsequery=False, parsefragment=False).localpath()
2876 return url(path, parsequery=False, parsefragment=False).localpath()
2877
2877
2878 def hidepassword(u):
2878 def hidepassword(u):
2879 '''hide user credential in a url string'''
2879 '''hide user credential in a url string'''
2880 u = url(u)
2880 u = url(u)
2881 if u.passwd:
2881 if u.passwd:
2882 u.passwd = '***'
2882 u.passwd = '***'
2883 return bytes(u)
2883 return bytes(u)
2884
2884
2885 def removeauth(u):
2885 def removeauth(u):
2886 '''remove all authentication information from a url string'''
2886 '''remove all authentication information from a url string'''
2887 u = url(u)
2887 u = url(u)
2888 u.user = u.passwd = None
2888 u.user = u.passwd = None
2889 return str(u)
2889 return str(u)
2890
2890
2891 timecount = unitcountfn(
2891 timecount = unitcountfn(
2892 (1, 1e3, _('%.0f s')),
2892 (1, 1e3, _('%.0f s')),
2893 (100, 1, _('%.1f s')),
2893 (100, 1, _('%.1f s')),
2894 (10, 1, _('%.2f s')),
2894 (10, 1, _('%.2f s')),
2895 (1, 1, _('%.3f s')),
2895 (1, 1, _('%.3f s')),
2896 (100, 0.001, _('%.1f ms')),
2896 (100, 0.001, _('%.1f ms')),
2897 (10, 0.001, _('%.2f ms')),
2897 (10, 0.001, _('%.2f ms')),
2898 (1, 0.001, _('%.3f ms')),
2898 (1, 0.001, _('%.3f ms')),
2899 (100, 0.000001, _('%.1f us')),
2899 (100, 0.000001, _('%.1f us')),
2900 (10, 0.000001, _('%.2f us')),
2900 (10, 0.000001, _('%.2f us')),
2901 (1, 0.000001, _('%.3f us')),
2901 (1, 0.000001, _('%.3f us')),
2902 (100, 0.000000001, _('%.1f ns')),
2902 (100, 0.000000001, _('%.1f ns')),
2903 (10, 0.000000001, _('%.2f ns')),
2903 (10, 0.000000001, _('%.2f ns')),
2904 (1, 0.000000001, _('%.3f ns')),
2904 (1, 0.000000001, _('%.3f ns')),
2905 )
2905 )
2906
2906
2907 _timenesting = [0]
2907 _timenesting = [0]
2908
2908
2909 def timed(func):
2909 def timed(func):
2910 '''Report the execution time of a function call to stderr.
2910 '''Report the execution time of a function call to stderr.
2911
2911
2912 During development, use as a decorator when you need to measure
2912 During development, use as a decorator when you need to measure
2913 the cost of a function, e.g. as follows:
2913 the cost of a function, e.g. as follows:
2914
2914
2915 @util.timed
2915 @util.timed
2916 def foo(a, b, c):
2916 def foo(a, b, c):
2917 pass
2917 pass
2918 '''
2918 '''
2919
2919
2920 def wrapper(*args, **kwargs):
2920 def wrapper(*args, **kwargs):
2921 start = timer()
2921 start = timer()
2922 indent = 2
2922 indent = 2
2923 _timenesting[0] += indent
2923 _timenesting[0] += indent
2924 try:
2924 try:
2925 return func(*args, **kwargs)
2925 return func(*args, **kwargs)
2926 finally:
2926 finally:
2927 elapsed = timer() - start
2927 elapsed = timer() - start
2928 _timenesting[0] -= indent
2928 _timenesting[0] -= indent
2929 stderr.write('%s%s: %s\n' %
2929 stderr.write('%s%s: %s\n' %
2930 (' ' * _timenesting[0], func.__name__,
2930 (' ' * _timenesting[0], func.__name__,
2931 timecount(elapsed)))
2931 timecount(elapsed)))
2932 return wrapper
2932 return wrapper
2933
2933
2934 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2934 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2935 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2935 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2936
2936
2937 def sizetoint(s):
2937 def sizetoint(s):
2938 '''Convert a space specifier to a byte count.
2938 '''Convert a space specifier to a byte count.
2939
2939
2940 >>> sizetoint('30')
2940 >>> sizetoint('30')
2941 30
2941 30
2942 >>> sizetoint('2.2kb')
2942 >>> sizetoint('2.2kb')
2943 2252
2943 2252
2944 >>> sizetoint('6M')
2944 >>> sizetoint('6M')
2945 6291456
2945 6291456
2946 '''
2946 '''
2947 t = s.strip().lower()
2947 t = s.strip().lower()
2948 try:
2948 try:
2949 for k, u in _sizeunits:
2949 for k, u in _sizeunits:
2950 if t.endswith(k):
2950 if t.endswith(k):
2951 return int(float(t[:-len(k)]) * u)
2951 return int(float(t[:-len(k)]) * u)
2952 return int(t)
2952 return int(t)
2953 except ValueError:
2953 except ValueError:
2954 raise error.ParseError(_("couldn't parse size: %s") % s)
2954 raise error.ParseError(_("couldn't parse size: %s") % s)
2955
2955
2956 class hooks(object):
2956 class hooks(object):
2957 '''A collection of hook functions that can be used to extend a
2957 '''A collection of hook functions that can be used to extend a
2958 function's behavior. Hooks are called in lexicographic order,
2958 function's behavior. Hooks are called in lexicographic order,
2959 based on the names of their sources.'''
2959 based on the names of their sources.'''
2960
2960
2961 def __init__(self):
2961 def __init__(self):
2962 self._hooks = []
2962 self._hooks = []
2963
2963
2964 def add(self, source, hook):
2964 def add(self, source, hook):
2965 self._hooks.append((source, hook))
2965 self._hooks.append((source, hook))
2966
2966
2967 def __call__(self, *args):
2967 def __call__(self, *args):
2968 self._hooks.sort(key=lambda x: x[0])
2968 self._hooks.sort(key=lambda x: x[0])
2969 results = []
2969 results = []
2970 for source, hook in self._hooks:
2970 for source, hook in self._hooks:
2971 results.append(hook(*args))
2971 results.append(hook(*args))
2972 return results
2972 return results
2973
2973
2974 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2974 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2975 '''Yields lines for a nicely formatted stacktrace.
2975 '''Yields lines for a nicely formatted stacktrace.
2976 Skips the 'skip' last entries, then return the last 'depth' entries.
2976 Skips the 'skip' last entries, then return the last 'depth' entries.
2977 Each file+linenumber is formatted according to fileline.
2977 Each file+linenumber is formatted according to fileline.
2978 Each line is formatted according to line.
2978 Each line is formatted according to line.
2979 If line is None, it yields:
2979 If line is None, it yields:
2980 length of longest filepath+line number,
2980 length of longest filepath+line number,
2981 filepath+linenumber,
2981 filepath+linenumber,
2982 function
2982 function
2983
2983
2984 Not be used in production code but very convenient while developing.
2984 Not be used in production code but very convenient while developing.
2985 '''
2985 '''
2986 entries = [(fileline % (fn, ln), func)
2986 entries = [(fileline % (fn, ln), func)
2987 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2987 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2988 ][-depth:]
2988 ][-depth:]
2989 if entries:
2989 if entries:
2990 fnmax = max(len(entry[0]) for entry in entries)
2990 fnmax = max(len(entry[0]) for entry in entries)
2991 for fnln, func in entries:
2991 for fnln, func in entries:
2992 if line is None:
2992 if line is None:
2993 yield (fnmax, fnln, func)
2993 yield (fnmax, fnln, func)
2994 else:
2994 else:
2995 yield line % (fnmax, fnln, func)
2995 yield line % (fnmax, fnln, func)
2996
2996
2997 def debugstacktrace(msg='stacktrace', skip=0,
2997 def debugstacktrace(msg='stacktrace', skip=0,
2998 f=stderr, otherf=stdout, depth=0):
2998 f=stderr, otherf=stdout, depth=0):
2999 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2999 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3000 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3000 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3001 By default it will flush stdout first.
3001 By default it will flush stdout first.
3002 It can be used everywhere and intentionally does not require an ui object.
3002 It can be used everywhere and intentionally does not require an ui object.
3003 Not be used in production code but very convenient while developing.
3003 Not be used in production code but very convenient while developing.
3004 '''
3004 '''
3005 if otherf:
3005 if otherf:
3006 otherf.flush()
3006 otherf.flush()
3007 f.write('%s at:\n' % msg.rstrip())
3007 f.write('%s at:\n' % msg.rstrip())
3008 for line in getstackframes(skip + 1, depth=depth):
3008 for line in getstackframes(skip + 1, depth=depth):
3009 f.write(line)
3009 f.write(line)
3010 f.flush()
3010 f.flush()
3011
3011
3012 class dirs(object):
3012 class dirs(object):
3013 '''a multiset of directory names from a dirstate or manifest'''
3013 '''a multiset of directory names from a dirstate or manifest'''
3014
3014
3015 def __init__(self, map, skip=None):
3015 def __init__(self, map, skip=None):
3016 self._dirs = {}
3016 self._dirs = {}
3017 addpath = self.addpath
3017 addpath = self.addpath
3018 if safehasattr(map, 'iteritems') and skip is not None:
3018 if safehasattr(map, 'iteritems') and skip is not None:
3019 for f, s in map.iteritems():
3019 for f, s in map.iteritems():
3020 if s[0] != skip:
3020 if s[0] != skip:
3021 addpath(f)
3021 addpath(f)
3022 else:
3022 else:
3023 for f in map:
3023 for f in map:
3024 addpath(f)
3024 addpath(f)
3025
3025
3026 def addpath(self, path):
3026 def addpath(self, path):
3027 dirs = self._dirs
3027 dirs = self._dirs
3028 for base in finddirs(path):
3028 for base in finddirs(path):
3029 if base in dirs:
3029 if base in dirs:
3030 dirs[base] += 1
3030 dirs[base] += 1
3031 return
3031 return
3032 dirs[base] = 1
3032 dirs[base] = 1
3033
3033
3034 def delpath(self, path):
3034 def delpath(self, path):
3035 dirs = self._dirs
3035 dirs = self._dirs
3036 for base in finddirs(path):
3036 for base in finddirs(path):
3037 if dirs[base] > 1:
3037 if dirs[base] > 1:
3038 dirs[base] -= 1
3038 dirs[base] -= 1
3039 return
3039 return
3040 del dirs[base]
3040 del dirs[base]
3041
3041
3042 def __iter__(self):
3042 def __iter__(self):
3043 return iter(self._dirs)
3043 return iter(self._dirs)
3044
3044
3045 def __contains__(self, d):
3045 def __contains__(self, d):
3046 return d in self._dirs
3046 return d in self._dirs
3047
3047
3048 if safehasattr(parsers, 'dirs'):
3048 if safehasattr(parsers, 'dirs'):
3049 dirs = parsers.dirs
3049 dirs = parsers.dirs
3050
3050
3051 def finddirs(path):
3051 def finddirs(path):
3052 pos = path.rfind('/')
3052 pos = path.rfind('/')
3053 while pos != -1:
3053 while pos != -1:
3054 yield path[:pos]
3054 yield path[:pos]
3055 pos = path.rfind('/', 0, pos)
3055 pos = path.rfind('/', 0, pos)
3056
3056
3057 class ctxmanager(object):
3057 class ctxmanager(object):
3058 '''A context manager for use in 'with' blocks to allow multiple
3058 '''A context manager for use in 'with' blocks to allow multiple
3059 contexts to be entered at once. This is both safer and more
3059 contexts to be entered at once. This is both safer and more
3060 flexible than contextlib.nested.
3060 flexible than contextlib.nested.
3061
3061
3062 Once Mercurial supports Python 2.7+, this will become mostly
3062 Once Mercurial supports Python 2.7+, this will become mostly
3063 unnecessary.
3063 unnecessary.
3064 '''
3064 '''
3065
3065
3066 def __init__(self, *args):
3066 def __init__(self, *args):
3067 '''Accepts a list of no-argument functions that return context
3067 '''Accepts a list of no-argument functions that return context
3068 managers. These will be invoked at __call__ time.'''
3068 managers. These will be invoked at __call__ time.'''
3069 self._pending = args
3069 self._pending = args
3070 self._atexit = []
3070 self._atexit = []
3071
3071
3072 def __enter__(self):
3072 def __enter__(self):
3073 return self
3073 return self
3074
3074
3075 def enter(self):
3075 def enter(self):
3076 '''Create and enter context managers in the order in which they were
3076 '''Create and enter context managers in the order in which they were
3077 passed to the constructor.'''
3077 passed to the constructor.'''
3078 values = []
3078 values = []
3079 for func in self._pending:
3079 for func in self._pending:
3080 obj = func()
3080 obj = func()
3081 values.append(obj.__enter__())
3081 values.append(obj.__enter__())
3082 self._atexit.append(obj.__exit__)
3082 self._atexit.append(obj.__exit__)
3083 del self._pending
3083 del self._pending
3084 return values
3084 return values
3085
3085
3086 def atexit(self, func, *args, **kwargs):
3086 def atexit(self, func, *args, **kwargs):
3087 '''Add a function to call when this context manager exits. The
3087 '''Add a function to call when this context manager exits. The
3088 ordering of multiple atexit calls is unspecified, save that
3088 ordering of multiple atexit calls is unspecified, save that
3089 they will happen before any __exit__ functions.'''
3089 they will happen before any __exit__ functions.'''
3090 def wrapper(exc_type, exc_val, exc_tb):
3090 def wrapper(exc_type, exc_val, exc_tb):
3091 func(*args, **kwargs)
3091 func(*args, **kwargs)
3092 self._atexit.append(wrapper)
3092 self._atexit.append(wrapper)
3093 return func
3093 return func
3094
3094
3095 def __exit__(self, exc_type, exc_val, exc_tb):
3095 def __exit__(self, exc_type, exc_val, exc_tb):
3096 '''Context managers are exited in the reverse order from which
3096 '''Context managers are exited in the reverse order from which
3097 they were created.'''
3097 they were created.'''
3098 received = exc_type is not None
3098 received = exc_type is not None
3099 suppressed = False
3099 suppressed = False
3100 pending = None
3100 pending = None
3101 self._atexit.reverse()
3101 self._atexit.reverse()
3102 for exitfunc in self._atexit:
3102 for exitfunc in self._atexit:
3103 try:
3103 try:
3104 if exitfunc(exc_type, exc_val, exc_tb):
3104 if exitfunc(exc_type, exc_val, exc_tb):
3105 suppressed = True
3105 suppressed = True
3106 exc_type = None
3106 exc_type = None
3107 exc_val = None
3107 exc_val = None
3108 exc_tb = None
3108 exc_tb = None
3109 except BaseException:
3109 except BaseException:
3110 pending = sys.exc_info()
3110 pending = sys.exc_info()
3111 exc_type, exc_val, exc_tb = pending = sys.exc_info()
3111 exc_type, exc_val, exc_tb = pending = sys.exc_info()
3112 del self._atexit
3112 del self._atexit
3113 if pending:
3113 if pending:
3114 raise exc_val
3114 raise exc_val
3115 return received and suppressed
3115 return received and suppressed
3116
3116
3117 # compression code
3117 # compression code
3118
3118
3119 SERVERROLE = 'server'
3119 SERVERROLE = 'server'
3120 CLIENTROLE = 'client'
3120 CLIENTROLE = 'client'
3121
3121
3122 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3122 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3123 (u'name', u'serverpriority',
3123 (u'name', u'serverpriority',
3124 u'clientpriority'))
3124 u'clientpriority'))
3125
3125
3126 class compressormanager(object):
3126 class compressormanager(object):
3127 """Holds registrations of various compression engines.
3127 """Holds registrations of various compression engines.
3128
3128
3129 This class essentially abstracts the differences between compression
3129 This class essentially abstracts the differences between compression
3130 engines to allow new compression formats to be added easily, possibly from
3130 engines to allow new compression formats to be added easily, possibly from
3131 extensions.
3131 extensions.
3132
3132
3133 Compressors are registered against the global instance by calling its
3133 Compressors are registered against the global instance by calling its
3134 ``register()`` method.
3134 ``register()`` method.
3135 """
3135 """
3136 def __init__(self):
3136 def __init__(self):
3137 self._engines = {}
3137 self._engines = {}
3138 # Bundle spec human name to engine name.
3138 # Bundle spec human name to engine name.
3139 self._bundlenames = {}
3139 self._bundlenames = {}
3140 # Internal bundle identifier to engine name.
3140 # Internal bundle identifier to engine name.
3141 self._bundletypes = {}
3141 self._bundletypes = {}
3142 # Revlog header to engine name.
3142 # Revlog header to engine name.
3143 self._revlogheaders = {}
3143 self._revlogheaders = {}
3144 # Wire proto identifier to engine name.
3144 # Wire proto identifier to engine name.
3145 self._wiretypes = {}
3145 self._wiretypes = {}
3146
3146
3147 def __getitem__(self, key):
3147 def __getitem__(self, key):
3148 return self._engines[key]
3148 return self._engines[key]
3149
3149
3150 def __contains__(self, key):
3150 def __contains__(self, key):
3151 return key in self._engines
3151 return key in self._engines
3152
3152
3153 def __iter__(self):
3153 def __iter__(self):
3154 return iter(self._engines.keys())
3154 return iter(self._engines.keys())
3155
3155
3156 def register(self, engine):
3156 def register(self, engine):
3157 """Register a compression engine with the manager.
3157 """Register a compression engine with the manager.
3158
3158
3159 The argument must be a ``compressionengine`` instance.
3159 The argument must be a ``compressionengine`` instance.
3160 """
3160 """
3161 if not isinstance(engine, compressionengine):
3161 if not isinstance(engine, compressionengine):
3162 raise ValueError(_('argument must be a compressionengine'))
3162 raise ValueError(_('argument must be a compressionengine'))
3163
3163
3164 name = engine.name()
3164 name = engine.name()
3165
3165
3166 if name in self._engines:
3166 if name in self._engines:
3167 raise error.Abort(_('compression engine %s already registered') %
3167 raise error.Abort(_('compression engine %s already registered') %
3168 name)
3168 name)
3169
3169
3170 bundleinfo = engine.bundletype()
3170 bundleinfo = engine.bundletype()
3171 if bundleinfo:
3171 if bundleinfo:
3172 bundlename, bundletype = bundleinfo
3172 bundlename, bundletype = bundleinfo
3173
3173
3174 if bundlename in self._bundlenames:
3174 if bundlename in self._bundlenames:
3175 raise error.Abort(_('bundle name %s already registered') %
3175 raise error.Abort(_('bundle name %s already registered') %
3176 bundlename)
3176 bundlename)
3177 if bundletype in self._bundletypes:
3177 if bundletype in self._bundletypes:
3178 raise error.Abort(_('bundle type %s already registered by %s') %
3178 raise error.Abort(_('bundle type %s already registered by %s') %
3179 (bundletype, self._bundletypes[bundletype]))
3179 (bundletype, self._bundletypes[bundletype]))
3180
3180
3181 # No external facing name declared.
3181 # No external facing name declared.
3182 if bundlename:
3182 if bundlename:
3183 self._bundlenames[bundlename] = name
3183 self._bundlenames[bundlename] = name
3184
3184
3185 self._bundletypes[bundletype] = name
3185 self._bundletypes[bundletype] = name
3186
3186
3187 wiresupport = engine.wireprotosupport()
3187 wiresupport = engine.wireprotosupport()
3188 if wiresupport:
3188 if wiresupport:
3189 wiretype = wiresupport.name
3189 wiretype = wiresupport.name
3190 if wiretype in self._wiretypes:
3190 if wiretype in self._wiretypes:
3191 raise error.Abort(_('wire protocol compression %s already '
3191 raise error.Abort(_('wire protocol compression %s already '
3192 'registered by %s') %
3192 'registered by %s') %
3193 (wiretype, self._wiretypes[wiretype]))
3193 (wiretype, self._wiretypes[wiretype]))
3194
3194
3195 self._wiretypes[wiretype] = name
3195 self._wiretypes[wiretype] = name
3196
3196
3197 revlogheader = engine.revlogheader()
3197 revlogheader = engine.revlogheader()
3198 if revlogheader and revlogheader in self._revlogheaders:
3198 if revlogheader and revlogheader in self._revlogheaders:
3199 raise error.Abort(_('revlog header %s already registered by %s') %
3199 raise error.Abort(_('revlog header %s already registered by %s') %
3200 (revlogheader, self._revlogheaders[revlogheader]))
3200 (revlogheader, self._revlogheaders[revlogheader]))
3201
3201
3202 if revlogheader:
3202 if revlogheader:
3203 self._revlogheaders[revlogheader] = name
3203 self._revlogheaders[revlogheader] = name
3204
3204
3205 self._engines[name] = engine
3205 self._engines[name] = engine
3206
3206
3207 @property
3207 @property
3208 def supportedbundlenames(self):
3208 def supportedbundlenames(self):
3209 return set(self._bundlenames.keys())
3209 return set(self._bundlenames.keys())
3210
3210
3211 @property
3211 @property
3212 def supportedbundletypes(self):
3212 def supportedbundletypes(self):
3213 return set(self._bundletypes.keys())
3213 return set(self._bundletypes.keys())
3214
3214
3215 def forbundlename(self, bundlename):
3215 def forbundlename(self, bundlename):
3216 """Obtain a compression engine registered to a bundle name.
3216 """Obtain a compression engine registered to a bundle name.
3217
3217
3218 Will raise KeyError if the bundle type isn't registered.
3218 Will raise KeyError if the bundle type isn't registered.
3219
3219
3220 Will abort if the engine is known but not available.
3220 Will abort if the engine is known but not available.
3221 """
3221 """
3222 engine = self._engines[self._bundlenames[bundlename]]
3222 engine = self._engines[self._bundlenames[bundlename]]
3223 if not engine.available():
3223 if not engine.available():
3224 raise error.Abort(_('compression engine %s could not be loaded') %
3224 raise error.Abort(_('compression engine %s could not be loaded') %
3225 engine.name())
3225 engine.name())
3226 return engine
3226 return engine
3227
3227
3228 def forbundletype(self, bundletype):
3228 def forbundletype(self, bundletype):
3229 """Obtain a compression engine registered to a bundle type.
3229 """Obtain a compression engine registered to a bundle type.
3230
3230
3231 Will raise KeyError if the bundle type isn't registered.
3231 Will raise KeyError if the bundle type isn't registered.
3232
3232
3233 Will abort if the engine is known but not available.
3233 Will abort if the engine is known but not available.
3234 """
3234 """
3235 engine = self._engines[self._bundletypes[bundletype]]
3235 engine = self._engines[self._bundletypes[bundletype]]
3236 if not engine.available():
3236 if not engine.available():
3237 raise error.Abort(_('compression engine %s could not be loaded') %
3237 raise error.Abort(_('compression engine %s could not be loaded') %
3238 engine.name())
3238 engine.name())
3239 return engine
3239 return engine
3240
3240
3241 def supportedwireengines(self, role, onlyavailable=True):
3241 def supportedwireengines(self, role, onlyavailable=True):
3242 """Obtain compression engines that support the wire protocol.
3242 """Obtain compression engines that support the wire protocol.
3243
3243
3244 Returns a list of engines in prioritized order, most desired first.
3244 Returns a list of engines in prioritized order, most desired first.
3245
3245
3246 If ``onlyavailable`` is set, filter out engines that can't be
3246 If ``onlyavailable`` is set, filter out engines that can't be
3247 loaded.
3247 loaded.
3248 """
3248 """
3249 assert role in (SERVERROLE, CLIENTROLE)
3249 assert role in (SERVERROLE, CLIENTROLE)
3250
3250
3251 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3251 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3252
3252
3253 engines = [self._engines[e] for e in self._wiretypes.values()]
3253 engines = [self._engines[e] for e in self._wiretypes.values()]
3254 if onlyavailable:
3254 if onlyavailable:
3255 engines = [e for e in engines if e.available()]
3255 engines = [e for e in engines if e.available()]
3256
3256
3257 def getkey(e):
3257 def getkey(e):
3258 # Sort first by priority, highest first. In case of tie, sort
3258 # Sort first by priority, highest first. In case of tie, sort
3259 # alphabetically. This is arbitrary, but ensures output is
3259 # alphabetically. This is arbitrary, but ensures output is
3260 # stable.
3260 # stable.
3261 w = e.wireprotosupport()
3261 w = e.wireprotosupport()
3262 return -1 * getattr(w, attr), w.name
3262 return -1 * getattr(w, attr), w.name
3263
3263
3264 return list(sorted(engines, key=getkey))
3264 return list(sorted(engines, key=getkey))
3265
3265
3266 def forwiretype(self, wiretype):
3266 def forwiretype(self, wiretype):
3267 engine = self._engines[self._wiretypes[wiretype]]
3267 engine = self._engines[self._wiretypes[wiretype]]
3268 if not engine.available():
3268 if not engine.available():
3269 raise error.Abort(_('compression engine %s could not be loaded') %
3269 raise error.Abort(_('compression engine %s could not be loaded') %
3270 engine.name())
3270 engine.name())
3271 return engine
3271 return engine
3272
3272
3273 def forrevlogheader(self, header):
3273 def forrevlogheader(self, header):
3274 """Obtain a compression engine registered to a revlog header.
3274 """Obtain a compression engine registered to a revlog header.
3275
3275
3276 Will raise KeyError if the revlog header value isn't registered.
3276 Will raise KeyError if the revlog header value isn't registered.
3277 """
3277 """
3278 return self._engines[self._revlogheaders[header]]
3278 return self._engines[self._revlogheaders[header]]
3279
3279
3280 compengines = compressormanager()
3280 compengines = compressormanager()
3281
3281
3282 class compressionengine(object):
3282 class compressionengine(object):
3283 """Base class for compression engines.
3283 """Base class for compression engines.
3284
3284
3285 Compression engines must implement the interface defined by this class.
3285 Compression engines must implement the interface defined by this class.
3286 """
3286 """
3287 def name(self):
3287 def name(self):
3288 """Returns the name of the compression engine.
3288 """Returns the name of the compression engine.
3289
3289
3290 This is the key the engine is registered under.
3290 This is the key the engine is registered under.
3291
3291
3292 This method must be implemented.
3292 This method must be implemented.
3293 """
3293 """
3294 raise NotImplementedError()
3294 raise NotImplementedError()
3295
3295
3296 def available(self):
3296 def available(self):
3297 """Whether the compression engine is available.
3297 """Whether the compression engine is available.
3298
3298
3299 The intent of this method is to allow optional compression engines
3299 The intent of this method is to allow optional compression engines
3300 that may not be available in all installations (such as engines relying
3300 that may not be available in all installations (such as engines relying
3301 on C extensions that may not be present).
3301 on C extensions that may not be present).
3302 """
3302 """
3303 return True
3303 return True
3304
3304
3305 def bundletype(self):
3305 def bundletype(self):
3306 """Describes bundle identifiers for this engine.
3306 """Describes bundle identifiers for this engine.
3307
3307
3308 If this compression engine isn't supported for bundles, returns None.
3308 If this compression engine isn't supported for bundles, returns None.
3309
3309
3310 If this engine can be used for bundles, returns a 2-tuple of strings of
3310 If this engine can be used for bundles, returns a 2-tuple of strings of
3311 the user-facing "bundle spec" compression name and an internal
3311 the user-facing "bundle spec" compression name and an internal
3312 identifier used to denote the compression format within bundles. To
3312 identifier used to denote the compression format within bundles. To
3313 exclude the name from external usage, set the first element to ``None``.
3313 exclude the name from external usage, set the first element to ``None``.
3314
3314
3315 If bundle compression is supported, the class must also implement
3315 If bundle compression is supported, the class must also implement
3316 ``compressstream`` and `decompressorreader``.
3316 ``compressstream`` and `decompressorreader``.
3317
3317
3318 The docstring of this method is used in the help system to tell users
3318 The docstring of this method is used in the help system to tell users
3319 about this engine.
3319 about this engine.
3320 """
3320 """
3321 return None
3321 return None
3322
3322
3323 def wireprotosupport(self):
3323 def wireprotosupport(self):
3324 """Declare support for this compression format on the wire protocol.
3324 """Declare support for this compression format on the wire protocol.
3325
3325
3326 If this compression engine isn't supported for compressing wire
3326 If this compression engine isn't supported for compressing wire
3327 protocol payloads, returns None.
3327 protocol payloads, returns None.
3328
3328
3329 Otherwise, returns ``compenginewireprotosupport`` with the following
3329 Otherwise, returns ``compenginewireprotosupport`` with the following
3330 fields:
3330 fields:
3331
3331
3332 * String format identifier
3332 * String format identifier
3333 * Integer priority for the server
3333 * Integer priority for the server
3334 * Integer priority for the client
3334 * Integer priority for the client
3335
3335
3336 The integer priorities are used to order the advertisement of format
3336 The integer priorities are used to order the advertisement of format
3337 support by server and client. The highest integer is advertised
3337 support by server and client. The highest integer is advertised
3338 first. Integers with non-positive values aren't advertised.
3338 first. Integers with non-positive values aren't advertised.
3339
3339
3340 The priority values are somewhat arbitrary and only used for default
3340 The priority values are somewhat arbitrary and only used for default
3341 ordering. The relative order can be changed via config options.
3341 ordering. The relative order can be changed via config options.
3342
3342
3343 If wire protocol compression is supported, the class must also implement
3343 If wire protocol compression is supported, the class must also implement
3344 ``compressstream`` and ``decompressorreader``.
3344 ``compressstream`` and ``decompressorreader``.
3345 """
3345 """
3346 return None
3346 return None
3347
3347
3348 def revlogheader(self):
3348 def revlogheader(self):
3349 """Header added to revlog chunks that identifies this engine.
3349 """Header added to revlog chunks that identifies this engine.
3350
3350
3351 If this engine can be used to compress revlogs, this method should
3351 If this engine can be used to compress revlogs, this method should
3352 return the bytes used to identify chunks compressed with this engine.
3352 return the bytes used to identify chunks compressed with this engine.
3353 Else, the method should return ``None`` to indicate it does not
3353 Else, the method should return ``None`` to indicate it does not
3354 participate in revlog compression.
3354 participate in revlog compression.
3355 """
3355 """
3356 return None
3356 return None
3357
3357
3358 def compressstream(self, it, opts=None):
3358 def compressstream(self, it, opts=None):
3359 """Compress an iterator of chunks.
3359 """Compress an iterator of chunks.
3360
3360
3361 The method receives an iterator (ideally a generator) of chunks of
3361 The method receives an iterator (ideally a generator) of chunks of
3362 bytes to be compressed. It returns an iterator (ideally a generator)
3362 bytes to be compressed. It returns an iterator (ideally a generator)
3363 of bytes of chunks representing the compressed output.
3363 of bytes of chunks representing the compressed output.
3364
3364
3365 Optionally accepts an argument defining how to perform compression.
3365 Optionally accepts an argument defining how to perform compression.
3366 Each engine treats this argument differently.
3366 Each engine treats this argument differently.
3367 """
3367 """
3368 raise NotImplementedError()
3368 raise NotImplementedError()
3369
3369
3370 def decompressorreader(self, fh):
3370 def decompressorreader(self, fh):
3371 """Perform decompression on a file object.
3371 """Perform decompression on a file object.
3372
3372
3373 Argument is an object with a ``read(size)`` method that returns
3373 Argument is an object with a ``read(size)`` method that returns
3374 compressed data. Return value is an object with a ``read(size)`` that
3374 compressed data. Return value is an object with a ``read(size)`` that
3375 returns uncompressed data.
3375 returns uncompressed data.
3376 """
3376 """
3377 raise NotImplementedError()
3377 raise NotImplementedError()
3378
3378
3379 def revlogcompressor(self, opts=None):
3379 def revlogcompressor(self, opts=None):
3380 """Obtain an object that can be used to compress revlog entries.
3380 """Obtain an object that can be used to compress revlog entries.
3381
3381
3382 The object has a ``compress(data)`` method that compresses binary
3382 The object has a ``compress(data)`` method that compresses binary
3383 data. This method returns compressed binary data or ``None`` if
3383 data. This method returns compressed binary data or ``None`` if
3384 the data could not be compressed (too small, not compressible, etc).
3384 the data could not be compressed (too small, not compressible, etc).
3385 The returned data should have a header uniquely identifying this
3385 The returned data should have a header uniquely identifying this
3386 compression format so decompression can be routed to this engine.
3386 compression format so decompression can be routed to this engine.
3387 This header should be identified by the ``revlogheader()`` return
3387 This header should be identified by the ``revlogheader()`` return
3388 value.
3388 value.
3389
3389
3390 The object has a ``decompress(data)`` method that decompresses
3390 The object has a ``decompress(data)`` method that decompresses
3391 data. The method will only be called if ``data`` begins with
3391 data. The method will only be called if ``data`` begins with
3392 ``revlogheader()``. The method should return the raw, uncompressed
3392 ``revlogheader()``. The method should return the raw, uncompressed
3393 data or raise a ``RevlogError``.
3393 data or raise a ``RevlogError``.
3394
3394
3395 The object is reusable but is not thread safe.
3395 The object is reusable but is not thread safe.
3396 """
3396 """
3397 raise NotImplementedError()
3397 raise NotImplementedError()
3398
3398
3399 class _zlibengine(compressionengine):
3399 class _zlibengine(compressionengine):
3400 def name(self):
3400 def name(self):
3401 return 'zlib'
3401 return 'zlib'
3402
3402
3403 def bundletype(self):
3403 def bundletype(self):
3404 """zlib compression using the DEFLATE algorithm.
3404 """zlib compression using the DEFLATE algorithm.
3405
3405
3406 All Mercurial clients should support this format. The compression
3406 All Mercurial clients should support this format. The compression
3407 algorithm strikes a reasonable balance between compression ratio
3407 algorithm strikes a reasonable balance between compression ratio
3408 and size.
3408 and size.
3409 """
3409 """
3410 return 'gzip', 'GZ'
3410 return 'gzip', 'GZ'
3411
3411
3412 def wireprotosupport(self):
3412 def wireprotosupport(self):
3413 return compewireprotosupport('zlib', 20, 20)
3413 return compewireprotosupport('zlib', 20, 20)
3414
3414
3415 def revlogheader(self):
3415 def revlogheader(self):
3416 return 'x'
3416 return 'x'
3417
3417
3418 def compressstream(self, it, opts=None):
3418 def compressstream(self, it, opts=None):
3419 opts = opts or {}
3419 opts = opts or {}
3420
3420
3421 z = zlib.compressobj(opts.get('level', -1))
3421 z = zlib.compressobj(opts.get('level', -1))
3422 for chunk in it:
3422 for chunk in it:
3423 data = z.compress(chunk)
3423 data = z.compress(chunk)
3424 # Not all calls to compress emit data. It is cheaper to inspect
3424 # Not all calls to compress emit data. It is cheaper to inspect
3425 # here than to feed empty chunks through generator.
3425 # here than to feed empty chunks through generator.
3426 if data:
3426 if data:
3427 yield data
3427 yield data
3428
3428
3429 yield z.flush()
3429 yield z.flush()
3430
3430
3431 def decompressorreader(self, fh):
3431 def decompressorreader(self, fh):
3432 def gen():
3432 def gen():
3433 d = zlib.decompressobj()
3433 d = zlib.decompressobj()
3434 for chunk in filechunkiter(fh):
3434 for chunk in filechunkiter(fh):
3435 while chunk:
3435 while chunk:
3436 # Limit output size to limit memory.
3436 # Limit output size to limit memory.
3437 yield d.decompress(chunk, 2 ** 18)
3437 yield d.decompress(chunk, 2 ** 18)
3438 chunk = d.unconsumed_tail
3438 chunk = d.unconsumed_tail
3439
3439
3440 return chunkbuffer(gen())
3440 return chunkbuffer(gen())
3441
3441
3442 class zlibrevlogcompressor(object):
3442 class zlibrevlogcompressor(object):
3443 def compress(self, data):
3443 def compress(self, data):
3444 insize = len(data)
3444 insize = len(data)
3445 # Caller handles empty input case.
3445 # Caller handles empty input case.
3446 assert insize > 0
3446 assert insize > 0
3447
3447
3448 if insize < 44:
3448 if insize < 44:
3449 return None
3449 return None
3450
3450
3451 elif insize <= 1000000:
3451 elif insize <= 1000000:
3452 compressed = zlib.compress(data)
3452 compressed = zlib.compress(data)
3453 if len(compressed) < insize:
3453 if len(compressed) < insize:
3454 return compressed
3454 return compressed
3455 return None
3455 return None
3456
3456
3457 # zlib makes an internal copy of the input buffer, doubling
3457 # zlib makes an internal copy of the input buffer, doubling
3458 # memory usage for large inputs. So do streaming compression
3458 # memory usage for large inputs. So do streaming compression
3459 # on large inputs.
3459 # on large inputs.
3460 else:
3460 else:
3461 z = zlib.compressobj()
3461 z = zlib.compressobj()
3462 parts = []
3462 parts = []
3463 pos = 0
3463 pos = 0
3464 while pos < insize:
3464 while pos < insize:
3465 pos2 = pos + 2**20
3465 pos2 = pos + 2**20
3466 parts.append(z.compress(data[pos:pos2]))
3466 parts.append(z.compress(data[pos:pos2]))
3467 pos = pos2
3467 pos = pos2
3468 parts.append(z.flush())
3468 parts.append(z.flush())
3469
3469
3470 if sum(map(len, parts)) < insize:
3470 if sum(map(len, parts)) < insize:
3471 return ''.join(parts)
3471 return ''.join(parts)
3472 return None
3472 return None
3473
3473
3474 def decompress(self, data):
3474 def decompress(self, data):
3475 try:
3475 try:
3476 return zlib.decompress(data)
3476 return zlib.decompress(data)
3477 except zlib.error as e:
3477 except zlib.error as e:
3478 raise error.RevlogError(_('revlog decompress error: %s') %
3478 raise error.RevlogError(_('revlog decompress error: %s') %
3479 str(e))
3479 str(e))
3480
3480
3481 def revlogcompressor(self, opts=None):
3481 def revlogcompressor(self, opts=None):
3482 return self.zlibrevlogcompressor()
3482 return self.zlibrevlogcompressor()
3483
3483
3484 compengines.register(_zlibengine())
3484 compengines.register(_zlibengine())
3485
3485
3486 class _bz2engine(compressionengine):
3486 class _bz2engine(compressionengine):
3487 def name(self):
3487 def name(self):
3488 return 'bz2'
3488 return 'bz2'
3489
3489
3490 def bundletype(self):
3490 def bundletype(self):
3491 """An algorithm that produces smaller bundles than ``gzip``.
3491 """An algorithm that produces smaller bundles than ``gzip``.
3492
3492
3493 All Mercurial clients should support this format.
3493 All Mercurial clients should support this format.
3494
3494
3495 This engine will likely produce smaller bundles than ``gzip`` but
3495 This engine will likely produce smaller bundles than ``gzip`` but
3496 will be significantly slower, both during compression and
3496 will be significantly slower, both during compression and
3497 decompression.
3497 decompression.
3498
3498
3499 If available, the ``zstd`` engine can yield similar or better
3499 If available, the ``zstd`` engine can yield similar or better
3500 compression at much higher speeds.
3500 compression at much higher speeds.
3501 """
3501 """
3502 return 'bzip2', 'BZ'
3502 return 'bzip2', 'BZ'
3503
3503
3504 # We declare a protocol name but don't advertise by default because
3504 # We declare a protocol name but don't advertise by default because
3505 # it is slow.
3505 # it is slow.
3506 def wireprotosupport(self):
3506 def wireprotosupport(self):
3507 return compewireprotosupport('bzip2', 0, 0)
3507 return compewireprotosupport('bzip2', 0, 0)
3508
3508
3509 def compressstream(self, it, opts=None):
3509 def compressstream(self, it, opts=None):
3510 opts = opts or {}
3510 opts = opts or {}
3511 z = bz2.BZ2Compressor(opts.get('level', 9))
3511 z = bz2.BZ2Compressor(opts.get('level', 9))
3512 for chunk in it:
3512 for chunk in it:
3513 data = z.compress(chunk)
3513 data = z.compress(chunk)
3514 if data:
3514 if data:
3515 yield data
3515 yield data
3516
3516
3517 yield z.flush()
3517 yield z.flush()
3518
3518
3519 def decompressorreader(self, fh):
3519 def decompressorreader(self, fh):
3520 def gen():
3520 def gen():
3521 d = bz2.BZ2Decompressor()
3521 d = bz2.BZ2Decompressor()
3522 for chunk in filechunkiter(fh):
3522 for chunk in filechunkiter(fh):
3523 yield d.decompress(chunk)
3523 yield d.decompress(chunk)
3524
3524
3525 return chunkbuffer(gen())
3525 return chunkbuffer(gen())
3526
3526
3527 compengines.register(_bz2engine())
3527 compengines.register(_bz2engine())
3528
3528
3529 class _truncatedbz2engine(compressionengine):
3529 class _truncatedbz2engine(compressionengine):
3530 def name(self):
3530 def name(self):
3531 return 'bz2truncated'
3531 return 'bz2truncated'
3532
3532
3533 def bundletype(self):
3533 def bundletype(self):
3534 return None, '_truncatedBZ'
3534 return None, '_truncatedBZ'
3535
3535
3536 # We don't implement compressstream because it is hackily handled elsewhere.
3536 # We don't implement compressstream because it is hackily handled elsewhere.
3537
3537
3538 def decompressorreader(self, fh):
3538 def decompressorreader(self, fh):
3539 def gen():
3539 def gen():
3540 # The input stream doesn't have the 'BZ' header. So add it back.
3540 # The input stream doesn't have the 'BZ' header. So add it back.
3541 d = bz2.BZ2Decompressor()
3541 d = bz2.BZ2Decompressor()
3542 d.decompress('BZ')
3542 d.decompress('BZ')
3543 for chunk in filechunkiter(fh):
3543 for chunk in filechunkiter(fh):
3544 yield d.decompress(chunk)
3544 yield d.decompress(chunk)
3545
3545
3546 return chunkbuffer(gen())
3546 return chunkbuffer(gen())
3547
3547
3548 compengines.register(_truncatedbz2engine())
3548 compengines.register(_truncatedbz2engine())
3549
3549
3550 class _noopengine(compressionengine):
3550 class _noopengine(compressionengine):
3551 def name(self):
3551 def name(self):
3552 return 'none'
3552 return 'none'
3553
3553
3554 def bundletype(self):
3554 def bundletype(self):
3555 """No compression is performed.
3555 """No compression is performed.
3556
3556
3557 Use this compression engine to explicitly disable compression.
3557 Use this compression engine to explicitly disable compression.
3558 """
3558 """
3559 return 'none', 'UN'
3559 return 'none', 'UN'
3560
3560
3561 # Clients always support uncompressed payloads. Servers don't because
3561 # Clients always support uncompressed payloads. Servers don't because
3562 # unless you are on a fast network, uncompressed payloads can easily
3562 # unless you are on a fast network, uncompressed payloads can easily
3563 # saturate your network pipe.
3563 # saturate your network pipe.
3564 def wireprotosupport(self):
3564 def wireprotosupport(self):
3565 return compewireprotosupport('none', 0, 10)
3565 return compewireprotosupport('none', 0, 10)
3566
3566
3567 # We don't implement revlogheader because it is handled specially
3567 # We don't implement revlogheader because it is handled specially
3568 # in the revlog class.
3568 # in the revlog class.
3569
3569
3570 def compressstream(self, it, opts=None):
3570 def compressstream(self, it, opts=None):
3571 return it
3571 return it
3572
3572
3573 def decompressorreader(self, fh):
3573 def decompressorreader(self, fh):
3574 return fh
3574 return fh
3575
3575
3576 class nooprevlogcompressor(object):
3576 class nooprevlogcompressor(object):
3577 def compress(self, data):
3577 def compress(self, data):
3578 return None
3578 return None
3579
3579
3580 def revlogcompressor(self, opts=None):
3580 def revlogcompressor(self, opts=None):
3581 return self.nooprevlogcompressor()
3581 return self.nooprevlogcompressor()
3582
3582
3583 compengines.register(_noopengine())
3583 compengines.register(_noopengine())
3584
3584
3585 class _zstdengine(compressionengine):
3585 class _zstdengine(compressionengine):
3586 def name(self):
3586 def name(self):
3587 return 'zstd'
3587 return 'zstd'
3588
3588
3589 @propertycache
3589 @propertycache
3590 def _module(self):
3590 def _module(self):
3591 # Not all installs have the zstd module available. So defer importing
3591 # Not all installs have the zstd module available. So defer importing
3592 # until first access.
3592 # until first access.
3593 try:
3593 try:
3594 from . import zstd
3594 from . import zstd
3595 # Force delayed import.
3595 # Force delayed import.
3596 zstd.__version__
3596 zstd.__version__
3597 return zstd
3597 return zstd
3598 except ImportError:
3598 except ImportError:
3599 return None
3599 return None
3600
3600
3601 def available(self):
3601 def available(self):
3602 return bool(self._module)
3602 return bool(self._module)
3603
3603
3604 def bundletype(self):
3604 def bundletype(self):
3605 """A modern compression algorithm that is fast and highly flexible.
3605 """A modern compression algorithm that is fast and highly flexible.
3606
3606
3607 Only supported by Mercurial 4.1 and newer clients.
3607 Only supported by Mercurial 4.1 and newer clients.
3608
3608
3609 With the default settings, zstd compression is both faster and yields
3609 With the default settings, zstd compression is both faster and yields
3610 better compression than ``gzip``. It also frequently yields better
3610 better compression than ``gzip``. It also frequently yields better
3611 compression than ``bzip2`` while operating at much higher speeds.
3611 compression than ``bzip2`` while operating at much higher speeds.
3612
3612
3613 If this engine is available and backwards compatibility is not a
3613 If this engine is available and backwards compatibility is not a
3614 concern, it is likely the best available engine.
3614 concern, it is likely the best available engine.
3615 """
3615 """
3616 return 'zstd', 'ZS'
3616 return 'zstd', 'ZS'
3617
3617
3618 def wireprotosupport(self):
3618 def wireprotosupport(self):
3619 return compewireprotosupport('zstd', 50, 50)
3619 return compewireprotosupport('zstd', 50, 50)
3620
3620
3621 def revlogheader(self):
3621 def revlogheader(self):
3622 return '\x28'
3622 return '\x28'
3623
3623
3624 def compressstream(self, it, opts=None):
3624 def compressstream(self, it, opts=None):
3625 opts = opts or {}
3625 opts = opts or {}
3626 # zstd level 3 is almost always significantly faster than zlib
3626 # zstd level 3 is almost always significantly faster than zlib
3627 # while providing no worse compression. It strikes a good balance
3627 # while providing no worse compression. It strikes a good balance
3628 # between speed and compression.
3628 # between speed and compression.
3629 level = opts.get('level', 3)
3629 level = opts.get('level', 3)
3630
3630
3631 zstd = self._module
3631 zstd = self._module
3632 z = zstd.ZstdCompressor(level=level).compressobj()
3632 z = zstd.ZstdCompressor(level=level).compressobj()
3633 for chunk in it:
3633 for chunk in it:
3634 data = z.compress(chunk)
3634 data = z.compress(chunk)
3635 if data:
3635 if data:
3636 yield data
3636 yield data
3637
3637
3638 yield z.flush()
3638 yield z.flush()
3639
3639
3640 def decompressorreader(self, fh):
3640 def decompressorreader(self, fh):
3641 zstd = self._module
3641 zstd = self._module
3642 dctx = zstd.ZstdDecompressor()
3642 dctx = zstd.ZstdDecompressor()
3643 return chunkbuffer(dctx.read_from(fh))
3643 return chunkbuffer(dctx.read_from(fh))
3644
3644
3645 class zstdrevlogcompressor(object):
3645 class zstdrevlogcompressor(object):
3646 def __init__(self, zstd, level=3):
3646 def __init__(self, zstd, level=3):
3647 # Writing the content size adds a few bytes to the output. However,
3647 # Writing the content size adds a few bytes to the output. However,
3648 # it allows decompression to be more optimal since we can
3648 # it allows decompression to be more optimal since we can
3649 # pre-allocate a buffer to hold the result.
3649 # pre-allocate a buffer to hold the result.
3650 self._cctx = zstd.ZstdCompressor(level=level,
3650 self._cctx = zstd.ZstdCompressor(level=level,
3651 write_content_size=True)
3651 write_content_size=True)
3652 self._dctx = zstd.ZstdDecompressor()
3652 self._dctx = zstd.ZstdDecompressor()
3653 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3653 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3654 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3654 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3655
3655
3656 def compress(self, data):
3656 def compress(self, data):
3657 insize = len(data)
3657 insize = len(data)
3658 # Caller handles empty input case.
3658 # Caller handles empty input case.
3659 assert insize > 0
3659 assert insize > 0
3660
3660
3661 if insize < 50:
3661 if insize < 50:
3662 return None
3662 return None
3663
3663
3664 elif insize <= 1000000:
3664 elif insize <= 1000000:
3665 compressed = self._cctx.compress(data)
3665 compressed = self._cctx.compress(data)
3666 if len(compressed) < insize:
3666 if len(compressed) < insize:
3667 return compressed
3667 return compressed
3668 return None
3668 return None
3669 else:
3669 else:
3670 z = self._cctx.compressobj()
3670 z = self._cctx.compressobj()
3671 chunks = []
3671 chunks = []
3672 pos = 0
3672 pos = 0
3673 while pos < insize:
3673 while pos < insize:
3674 pos2 = pos + self._compinsize
3674 pos2 = pos + self._compinsize
3675 chunk = z.compress(data[pos:pos2])
3675 chunk = z.compress(data[pos:pos2])
3676 if chunk:
3676 if chunk:
3677 chunks.append(chunk)
3677 chunks.append(chunk)
3678 pos = pos2
3678 pos = pos2
3679 chunks.append(z.flush())
3679 chunks.append(z.flush())
3680
3680
3681 if sum(map(len, chunks)) < insize:
3681 if sum(map(len, chunks)) < insize:
3682 return ''.join(chunks)
3682 return ''.join(chunks)
3683 return None
3683 return None
3684
3684
3685 def decompress(self, data):
3685 def decompress(self, data):
3686 insize = len(data)
3686 insize = len(data)
3687
3687
3688 try:
3688 try:
3689 # This was measured to be faster than other streaming
3689 # This was measured to be faster than other streaming
3690 # decompressors.
3690 # decompressors.
3691 dobj = self._dctx.decompressobj()
3691 dobj = self._dctx.decompressobj()
3692 chunks = []
3692 chunks = []
3693 pos = 0
3693 pos = 0
3694 while pos < insize:
3694 while pos < insize:
3695 pos2 = pos + self._decompinsize
3695 pos2 = pos + self._decompinsize
3696 chunk = dobj.decompress(data[pos:pos2])
3696 chunk = dobj.decompress(data[pos:pos2])
3697 if chunk:
3697 if chunk:
3698 chunks.append(chunk)
3698 chunks.append(chunk)
3699 pos = pos2
3699 pos = pos2
3700 # Frame should be exhausted, so no finish() API.
3700 # Frame should be exhausted, so no finish() API.
3701
3701
3702 return ''.join(chunks)
3702 return ''.join(chunks)
3703 except Exception as e:
3703 except Exception as e:
3704 raise error.RevlogError(_('revlog decompress error: %s') %
3704 raise error.RevlogError(_('revlog decompress error: %s') %
3705 str(e))
3705 str(e))
3706
3706
3707 def revlogcompressor(self, opts=None):
3707 def revlogcompressor(self, opts=None):
3708 opts = opts or {}
3708 opts = opts or {}
3709 return self.zstdrevlogcompressor(self._module,
3709 return self.zstdrevlogcompressor(self._module,
3710 level=opts.get('level', 3))
3710 level=opts.get('level', 3))
3711
3711
3712 compengines.register(_zstdengine())
3712 compengines.register(_zstdengine())
3713
3713
3714 def bundlecompressiontopics():
3714 def bundlecompressiontopics():
3715 """Obtains a list of available bundle compressions for use in help."""
3715 """Obtains a list of available bundle compressions for use in help."""
3716 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3716 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3717 items = {}
3717 items = {}
3718
3718
3719 # We need to format the docstring. So use a dummy object/type to hold it
3719 # We need to format the docstring. So use a dummy object/type to hold it
3720 # rather than mutating the original.
3720 # rather than mutating the original.
3721 class docobject(object):
3721 class docobject(object):
3722 pass
3722 pass
3723
3723
3724 for name in compengines:
3724 for name in compengines:
3725 engine = compengines[name]
3725 engine = compengines[name]
3726
3726
3727 if not engine.available():
3727 if not engine.available():
3728 continue
3728 continue
3729
3729
3730 bt = engine.bundletype()
3730 bt = engine.bundletype()
3731 if not bt or not bt[0]:
3731 if not bt or not bt[0]:
3732 continue
3732 continue
3733
3733
3734 doc = pycompat.sysstr('``%s``\n %s') % (
3734 doc = pycompat.sysstr('``%s``\n %s') % (
3735 bt[0], engine.bundletype.__doc__)
3735 bt[0], engine.bundletype.__doc__)
3736
3736
3737 value = docobject()
3737 value = docobject()
3738 value.__doc__ = doc
3738 value.__doc__ = doc
3739
3739
3740 items[bt[0]] = value
3740 items[bt[0]] = value
3741
3741
3742 return items
3742 return items
3743
3743
3744 # convenient shortcut
3744 # convenient shortcut
3745 dst = debugstacktrace
3745 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now