##// END OF EJS Templates
rebase: use one dirstateguard for when using rebase.singletransaction...
Durham Goode -
r33621:609606d2 default
parent child Browse files
Show More
@@ -1,1540 +1,1550 b''
1 # rebase.py - rebasing feature for mercurial
1 # rebase.py - rebasing feature for mercurial
2 #
2 #
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to move sets of revisions to a different ancestor
8 '''command to move sets of revisions to a different ancestor
9
9
10 This extension lets you rebase changesets in an existing Mercurial
10 This extension lets you rebase changesets in an existing Mercurial
11 repository.
11 repository.
12
12
13 For more information:
13 For more information:
14 https://mercurial-scm.org/wiki/RebaseExtension
14 https://mercurial-scm.org/wiki/RebaseExtension
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 import errno
19 import errno
20 import os
20 import os
21
21
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import (
23 from mercurial.node import (
24 hex,
24 hex,
25 nullid,
25 nullid,
26 nullrev,
26 nullrev,
27 short,
27 short,
28 )
28 )
29 from mercurial import (
29 from mercurial import (
30 bookmarks,
30 bookmarks,
31 cmdutil,
31 cmdutil,
32 commands,
32 commands,
33 copies,
33 copies,
34 destutil,
34 destutil,
35 dirstateguard,
35 dirstateguard,
36 error,
36 error,
37 extensions,
37 extensions,
38 hg,
38 hg,
39 lock,
39 lock,
40 merge as mergemod,
40 merge as mergemod,
41 mergeutil,
41 mergeutil,
42 obsolete,
42 obsolete,
43 obsutil,
43 obsutil,
44 patch,
44 patch,
45 phases,
45 phases,
46 registrar,
46 registrar,
47 repair,
47 repair,
48 repoview,
48 repoview,
49 revset,
49 revset,
50 scmutil,
50 scmutil,
51 smartset,
51 smartset,
52 util,
52 util,
53 )
53 )
54
54
55 release = lock.release
55 release = lock.release
56 templateopts = cmdutil.templateopts
56 templateopts = cmdutil.templateopts
57
57
58 # The following constants are used throughout the rebase module. The ordering of
58 # The following constants are used throughout the rebase module. The ordering of
59 # their values must be maintained.
59 # their values must be maintained.
60
60
61 # Indicates that a revision needs to be rebased
61 # Indicates that a revision needs to be rebased
62 revtodo = -1
62 revtodo = -1
63 nullmerge = -2
63 nullmerge = -2
64 revignored = -3
64 revignored = -3
65 # successor in rebase destination
65 # successor in rebase destination
66 revprecursor = -4
66 revprecursor = -4
67 # plain prune (no successor)
67 # plain prune (no successor)
68 revpruned = -5
68 revpruned = -5
69 revskipped = (revignored, revprecursor, revpruned)
69 revskipped = (revignored, revprecursor, revpruned)
70
70
71 cmdtable = {}
71 cmdtable = {}
72 command = registrar.command(cmdtable)
72 command = registrar.command(cmdtable)
73 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
73 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
74 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
74 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
75 # be specifying the version(s) of Mercurial they are tested with, or
75 # be specifying the version(s) of Mercurial they are tested with, or
76 # leave the attribute unspecified.
76 # leave the attribute unspecified.
77 testedwith = 'ships-with-hg-core'
77 testedwith = 'ships-with-hg-core'
78
78
79 def _nothingtorebase():
79 def _nothingtorebase():
80 return 1
80 return 1
81
81
82 def _savegraft(ctx, extra):
82 def _savegraft(ctx, extra):
83 s = ctx.extra().get('source', None)
83 s = ctx.extra().get('source', None)
84 if s is not None:
84 if s is not None:
85 extra['source'] = s
85 extra['source'] = s
86 s = ctx.extra().get('intermediate-source', None)
86 s = ctx.extra().get('intermediate-source', None)
87 if s is not None:
87 if s is not None:
88 extra['intermediate-source'] = s
88 extra['intermediate-source'] = s
89
89
90 def _savebranch(ctx, extra):
90 def _savebranch(ctx, extra):
91 extra['branch'] = ctx.branch()
91 extra['branch'] = ctx.branch()
92
92
93 def _makeextrafn(copiers):
93 def _makeextrafn(copiers):
94 """make an extrafn out of the given copy-functions.
94 """make an extrafn out of the given copy-functions.
95
95
96 A copy function takes a context and an extra dict, and mutates the
96 A copy function takes a context and an extra dict, and mutates the
97 extra dict as needed based on the given context.
97 extra dict as needed based on the given context.
98 """
98 """
99 def extrafn(ctx, extra):
99 def extrafn(ctx, extra):
100 for c in copiers:
100 for c in copiers:
101 c(ctx, extra)
101 c(ctx, extra)
102 return extrafn
102 return extrafn
103
103
104 def _destrebase(repo, sourceset, destspace=None):
104 def _destrebase(repo, sourceset, destspace=None):
105 """small wrapper around destmerge to pass the right extra args
105 """small wrapper around destmerge to pass the right extra args
106
106
107 Please wrap destutil.destmerge instead."""
107 Please wrap destutil.destmerge instead."""
108 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
108 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
109 onheadcheck=False, destspace=destspace)
109 onheadcheck=False, destspace=destspace)
110
110
111 revsetpredicate = registrar.revsetpredicate()
111 revsetpredicate = registrar.revsetpredicate()
112
112
113 @revsetpredicate('_destrebase')
113 @revsetpredicate('_destrebase')
114 def _revsetdestrebase(repo, subset, x):
114 def _revsetdestrebase(repo, subset, x):
115 # ``_rebasedefaultdest()``
115 # ``_rebasedefaultdest()``
116
116
117 # default destination for rebase.
117 # default destination for rebase.
118 # # XXX: Currently private because I expect the signature to change.
118 # # XXX: Currently private because I expect the signature to change.
119 # # XXX: - bailing out in case of ambiguity vs returning all data.
119 # # XXX: - bailing out in case of ambiguity vs returning all data.
120 # i18n: "_rebasedefaultdest" is a keyword
120 # i18n: "_rebasedefaultdest" is a keyword
121 sourceset = None
121 sourceset = None
122 if x is not None:
122 if x is not None:
123 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
123 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
124 return subset & smartset.baseset([_destrebase(repo, sourceset)])
124 return subset & smartset.baseset([_destrebase(repo, sourceset)])
125
125
126 class rebaseruntime(object):
126 class rebaseruntime(object):
127 """This class is a container for rebase runtime state"""
127 """This class is a container for rebase runtime state"""
128 def __init__(self, repo, ui, opts=None):
128 def __init__(self, repo, ui, opts=None):
129 if opts is None:
129 if opts is None:
130 opts = {}
130 opts = {}
131
131
132 self.repo = repo
132 self.repo = repo
133 self.ui = ui
133 self.ui = ui
134 self.opts = opts
134 self.opts = opts
135 self.originalwd = None
135 self.originalwd = None
136 self.external = nullrev
136 self.external = nullrev
137 # Mapping between the old revision id and either what is the new rebased
137 # Mapping between the old revision id and either what is the new rebased
138 # revision or what needs to be done with the old revision. The state
138 # revision or what needs to be done with the old revision. The state
139 # dict will be what contains most of the rebase progress state.
139 # dict will be what contains most of the rebase progress state.
140 self.state = {}
140 self.state = {}
141 self.activebookmark = None
141 self.activebookmark = None
142 self.dest = None
142 self.dest = None
143 self.skipped = set()
143 self.skipped = set()
144 self.destancestors = set()
144 self.destancestors = set()
145
145
146 self.collapsef = opts.get('collapse', False)
146 self.collapsef = opts.get('collapse', False)
147 self.collapsemsg = cmdutil.logmessage(ui, opts)
147 self.collapsemsg = cmdutil.logmessage(ui, opts)
148 self.date = opts.get('date', None)
148 self.date = opts.get('date', None)
149
149
150 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
150 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
151 self.extrafns = [_savegraft]
151 self.extrafns = [_savegraft]
152 if e:
152 if e:
153 self.extrafns = [e]
153 self.extrafns = [e]
154
154
155 self.keepf = opts.get('keep', False)
155 self.keepf = opts.get('keep', False)
156 self.keepbranchesf = opts.get('keepbranches', False)
156 self.keepbranchesf = opts.get('keepbranches', False)
157 # keepopen is not meant for use on the command line, but by
157 # keepopen is not meant for use on the command line, but by
158 # other extensions
158 # other extensions
159 self.keepopen = opts.get('keepopen', False)
159 self.keepopen = opts.get('keepopen', False)
160 self.obsoletenotrebased = {}
160 self.obsoletenotrebased = {}
161
161
162 def storestatus(self, tr=None):
162 def storestatus(self, tr=None):
163 """Store the current status to allow recovery"""
163 """Store the current status to allow recovery"""
164 if tr:
164 if tr:
165 tr.addfilegenerator('rebasestate', ('rebasestate',),
165 tr.addfilegenerator('rebasestate', ('rebasestate',),
166 self._writestatus, location='plain')
166 self._writestatus, location='plain')
167 else:
167 else:
168 with self.repo.vfs("rebasestate", "w") as f:
168 with self.repo.vfs("rebasestate", "w") as f:
169 self._writestatus(f)
169 self._writestatus(f)
170
170
171 def _writestatus(self, f):
171 def _writestatus(self, f):
172 repo = self.repo.unfiltered()
172 repo = self.repo.unfiltered()
173 f.write(repo[self.originalwd].hex() + '\n')
173 f.write(repo[self.originalwd].hex() + '\n')
174 f.write(repo[self.dest].hex() + '\n')
174 f.write(repo[self.dest].hex() + '\n')
175 f.write(repo[self.external].hex() + '\n')
175 f.write(repo[self.external].hex() + '\n')
176 f.write('%d\n' % int(self.collapsef))
176 f.write('%d\n' % int(self.collapsef))
177 f.write('%d\n' % int(self.keepf))
177 f.write('%d\n' % int(self.keepf))
178 f.write('%d\n' % int(self.keepbranchesf))
178 f.write('%d\n' % int(self.keepbranchesf))
179 f.write('%s\n' % (self.activebookmark or ''))
179 f.write('%s\n' % (self.activebookmark or ''))
180 for d, v in self.state.iteritems():
180 for d, v in self.state.iteritems():
181 oldrev = repo[d].hex()
181 oldrev = repo[d].hex()
182 if v >= 0:
182 if v >= 0:
183 newrev = repo[v].hex()
183 newrev = repo[v].hex()
184 elif v == revtodo:
184 elif v == revtodo:
185 # To maintain format compatibility, we have to use nullid.
185 # To maintain format compatibility, we have to use nullid.
186 # Please do remove this special case when upgrading the format.
186 # Please do remove this special case when upgrading the format.
187 newrev = hex(nullid)
187 newrev = hex(nullid)
188 else:
188 else:
189 newrev = v
189 newrev = v
190 f.write("%s:%s\n" % (oldrev, newrev))
190 f.write("%s:%s\n" % (oldrev, newrev))
191 repo.ui.debug('rebase status stored\n')
191 repo.ui.debug('rebase status stored\n')
192
192
193 def restorestatus(self):
193 def restorestatus(self):
194 """Restore a previously stored status"""
194 """Restore a previously stored status"""
195 repo = self.repo
195 repo = self.repo
196 keepbranches = None
196 keepbranches = None
197 dest = None
197 dest = None
198 collapse = False
198 collapse = False
199 external = nullrev
199 external = nullrev
200 activebookmark = None
200 activebookmark = None
201 state = {}
201 state = {}
202
202
203 try:
203 try:
204 f = repo.vfs("rebasestate")
204 f = repo.vfs("rebasestate")
205 for i, l in enumerate(f.read().splitlines()):
205 for i, l in enumerate(f.read().splitlines()):
206 if i == 0:
206 if i == 0:
207 originalwd = repo[l].rev()
207 originalwd = repo[l].rev()
208 elif i == 1:
208 elif i == 1:
209 dest = repo[l].rev()
209 dest = repo[l].rev()
210 elif i == 2:
210 elif i == 2:
211 external = repo[l].rev()
211 external = repo[l].rev()
212 elif i == 3:
212 elif i == 3:
213 collapse = bool(int(l))
213 collapse = bool(int(l))
214 elif i == 4:
214 elif i == 4:
215 keep = bool(int(l))
215 keep = bool(int(l))
216 elif i == 5:
216 elif i == 5:
217 keepbranches = bool(int(l))
217 keepbranches = bool(int(l))
218 elif i == 6 and not (len(l) == 81 and ':' in l):
218 elif i == 6 and not (len(l) == 81 and ':' in l):
219 # line 6 is a recent addition, so for backwards
219 # line 6 is a recent addition, so for backwards
220 # compatibility check that the line doesn't look like the
220 # compatibility check that the line doesn't look like the
221 # oldrev:newrev lines
221 # oldrev:newrev lines
222 activebookmark = l
222 activebookmark = l
223 else:
223 else:
224 oldrev, newrev = l.split(':')
224 oldrev, newrev = l.split(':')
225 if newrev in (str(nullmerge), str(revignored),
225 if newrev in (str(nullmerge), str(revignored),
226 str(revprecursor), str(revpruned)):
226 str(revprecursor), str(revpruned)):
227 state[repo[oldrev].rev()] = int(newrev)
227 state[repo[oldrev].rev()] = int(newrev)
228 elif newrev == nullid:
228 elif newrev == nullid:
229 state[repo[oldrev].rev()] = revtodo
229 state[repo[oldrev].rev()] = revtodo
230 # Legacy compat special case
230 # Legacy compat special case
231 else:
231 else:
232 state[repo[oldrev].rev()] = repo[newrev].rev()
232 state[repo[oldrev].rev()] = repo[newrev].rev()
233
233
234 except IOError as err:
234 except IOError as err:
235 if err.errno != errno.ENOENT:
235 if err.errno != errno.ENOENT:
236 raise
236 raise
237 cmdutil.wrongtooltocontinue(repo, _('rebase'))
237 cmdutil.wrongtooltocontinue(repo, _('rebase'))
238
238
239 if keepbranches is None:
239 if keepbranches is None:
240 raise error.Abort(_('.hg/rebasestate is incomplete'))
240 raise error.Abort(_('.hg/rebasestate is incomplete'))
241
241
242 skipped = set()
242 skipped = set()
243 # recompute the set of skipped revs
243 # recompute the set of skipped revs
244 if not collapse:
244 if not collapse:
245 seen = {dest}
245 seen = {dest}
246 for old, new in sorted(state.items()):
246 for old, new in sorted(state.items()):
247 if new != revtodo and new in seen:
247 if new != revtodo and new in seen:
248 skipped.add(old)
248 skipped.add(old)
249 seen.add(new)
249 seen.add(new)
250 repo.ui.debug('computed skipped revs: %s\n' %
250 repo.ui.debug('computed skipped revs: %s\n' %
251 (' '.join(str(r) for r in sorted(skipped)) or None))
251 (' '.join(str(r) for r in sorted(skipped)) or None))
252 repo.ui.debug('rebase status resumed\n')
252 repo.ui.debug('rebase status resumed\n')
253 _setrebasesetvisibility(repo, set(state.keys()) | {originalwd})
253 _setrebasesetvisibility(repo, set(state.keys()) | {originalwd})
254
254
255 self.originalwd = originalwd
255 self.originalwd = originalwd
256 self.dest = dest
256 self.dest = dest
257 self.state = state
257 self.state = state
258 self.skipped = skipped
258 self.skipped = skipped
259 self.collapsef = collapse
259 self.collapsef = collapse
260 self.keepf = keep
260 self.keepf = keep
261 self.keepbranchesf = keepbranches
261 self.keepbranchesf = keepbranches
262 self.external = external
262 self.external = external
263 self.activebookmark = activebookmark
263 self.activebookmark = activebookmark
264
264
265 def _handleskippingobsolete(self, rebaserevs, obsoleterevs, dest):
265 def _handleskippingobsolete(self, rebaserevs, obsoleterevs, dest):
266 """Compute structures necessary for skipping obsolete revisions
266 """Compute structures necessary for skipping obsolete revisions
267
267
268 rebaserevs: iterable of all revisions that are to be rebased
268 rebaserevs: iterable of all revisions that are to be rebased
269 obsoleterevs: iterable of all obsolete revisions in rebaseset
269 obsoleterevs: iterable of all obsolete revisions in rebaseset
270 dest: a destination revision for the rebase operation
270 dest: a destination revision for the rebase operation
271 """
271 """
272 self.obsoletenotrebased = {}
272 self.obsoletenotrebased = {}
273 if not self.ui.configbool('experimental', 'rebaseskipobsolete',
273 if not self.ui.configbool('experimental', 'rebaseskipobsolete',
274 default=True):
274 default=True):
275 return
275 return
276 rebaseset = set(rebaserevs)
276 rebaseset = set(rebaserevs)
277 obsoleteset = set(obsoleterevs)
277 obsoleteset = set(obsoleterevs)
278 self.obsoletenotrebased = _computeobsoletenotrebased(self.repo,
278 self.obsoletenotrebased = _computeobsoletenotrebased(self.repo,
279 obsoleteset, dest)
279 obsoleteset, dest)
280 skippedset = set(self.obsoletenotrebased)
280 skippedset = set(self.obsoletenotrebased)
281 _checkobsrebase(self.repo, self.ui, obsoleteset, rebaseset, skippedset)
281 _checkobsrebase(self.repo, self.ui, obsoleteset, rebaseset, skippedset)
282
282
283 def _prepareabortorcontinue(self, isabort):
283 def _prepareabortorcontinue(self, isabort):
284 try:
284 try:
285 self.restorestatus()
285 self.restorestatus()
286 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
286 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
287 except error.RepoLookupError:
287 except error.RepoLookupError:
288 if isabort:
288 if isabort:
289 clearstatus(self.repo)
289 clearstatus(self.repo)
290 clearcollapsemsg(self.repo)
290 clearcollapsemsg(self.repo)
291 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
291 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
292 ' only broken state is cleared)\n'))
292 ' only broken state is cleared)\n'))
293 return 0
293 return 0
294 else:
294 else:
295 msg = _('cannot continue inconsistent rebase')
295 msg = _('cannot continue inconsistent rebase')
296 hint = _('use "hg rebase --abort" to clear broken state')
296 hint = _('use "hg rebase --abort" to clear broken state')
297 raise error.Abort(msg, hint=hint)
297 raise error.Abort(msg, hint=hint)
298 if isabort:
298 if isabort:
299 return abort(self.repo, self.originalwd, self.dest,
299 return abort(self.repo, self.originalwd, self.dest,
300 self.state, activebookmark=self.activebookmark)
300 self.state, activebookmark=self.activebookmark)
301
301
302 obsrevs = (r for r, st in self.state.items() if st == revprecursor)
302 obsrevs = (r for r, st in self.state.items() if st == revprecursor)
303 self._handleskippingobsolete(self.state.keys(), obsrevs, self.dest)
303 self._handleskippingobsolete(self.state.keys(), obsrevs, self.dest)
304
304
305 def _preparenewrebase(self, dest, rebaseset):
305 def _preparenewrebase(self, dest, rebaseset):
306 if dest is None:
306 if dest is None:
307 return _nothingtorebase()
307 return _nothingtorebase()
308
308
309 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
309 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
310 if (not (self.keepf or allowunstable)
310 if (not (self.keepf or allowunstable)
311 and self.repo.revs('first(children(%ld) - %ld)',
311 and self.repo.revs('first(children(%ld) - %ld)',
312 rebaseset, rebaseset)):
312 rebaseset, rebaseset)):
313 raise error.Abort(
313 raise error.Abort(
314 _("can't remove original changesets with"
314 _("can't remove original changesets with"
315 " unrebased descendants"),
315 " unrebased descendants"),
316 hint=_('use --keep to keep original changesets'))
316 hint=_('use --keep to keep original changesets'))
317
317
318 obsrevs = _filterobsoleterevs(self.repo, set(rebaseset))
318 obsrevs = _filterobsoleterevs(self.repo, set(rebaseset))
319 self._handleskippingobsolete(rebaseset, obsrevs, dest.rev())
319 self._handleskippingobsolete(rebaseset, obsrevs, dest.rev())
320
320
321 result = buildstate(self.repo, dest, rebaseset, self.collapsef,
321 result = buildstate(self.repo, dest, rebaseset, self.collapsef,
322 self.obsoletenotrebased)
322 self.obsoletenotrebased)
323
323
324 if not result:
324 if not result:
325 # Empty state built, nothing to rebase
325 # Empty state built, nothing to rebase
326 self.ui.status(_('nothing to rebase\n'))
326 self.ui.status(_('nothing to rebase\n'))
327 return _nothingtorebase()
327 return _nothingtorebase()
328
328
329 for root in self.repo.set('roots(%ld)', rebaseset):
329 for root in self.repo.set('roots(%ld)', rebaseset):
330 if not self.keepf and not root.mutable():
330 if not self.keepf and not root.mutable():
331 raise error.Abort(_("can't rebase public changeset %s")
331 raise error.Abort(_("can't rebase public changeset %s")
332 % root,
332 % root,
333 hint=_("see 'hg help phases' for details"))
333 hint=_("see 'hg help phases' for details"))
334
334
335 (self.originalwd, self.dest, self.state) = result
335 (self.originalwd, self.dest, self.state) = result
336 if self.collapsef:
336 if self.collapsef:
337 self.destancestors = self.repo.changelog.ancestors(
337 self.destancestors = self.repo.changelog.ancestors(
338 [self.dest],
338 [self.dest],
339 inclusive=True)
339 inclusive=True)
340 self.external = externalparent(self.repo, self.state,
340 self.external = externalparent(self.repo, self.state,
341 self.destancestors)
341 self.destancestors)
342
342
343 if dest.closesbranch() and not self.keepbranchesf:
343 if dest.closesbranch() and not self.keepbranchesf:
344 self.ui.status(_('reopening closed branch head %s\n') % dest)
344 self.ui.status(_('reopening closed branch head %s\n') % dest)
345
345
346 def _performrebase(self, tr):
346 def _performrebase(self, tr):
347 repo, ui, opts = self.repo, self.ui, self.opts
347 repo, ui, opts = self.repo, self.ui, self.opts
348 if self.keepbranchesf:
348 if self.keepbranchesf:
349 # insert _savebranch at the start of extrafns so if
349 # insert _savebranch at the start of extrafns so if
350 # there's a user-provided extrafn it can clobber branch if
350 # there's a user-provided extrafn it can clobber branch if
351 # desired
351 # desired
352 self.extrafns.insert(0, _savebranch)
352 self.extrafns.insert(0, _savebranch)
353 if self.collapsef:
353 if self.collapsef:
354 branches = set()
354 branches = set()
355 for rev in self.state:
355 for rev in self.state:
356 branches.add(repo[rev].branch())
356 branches.add(repo[rev].branch())
357 if len(branches) > 1:
357 if len(branches) > 1:
358 raise error.Abort(_('cannot collapse multiple named '
358 raise error.Abort(_('cannot collapse multiple named '
359 'branches'))
359 'branches'))
360
360
361 # Rebase
361 # Rebase
362 if not self.destancestors:
362 if not self.destancestors:
363 self.destancestors = repo.changelog.ancestors([self.dest],
363 self.destancestors = repo.changelog.ancestors([self.dest],
364 inclusive=True)
364 inclusive=True)
365
365
366 # Keep track of the active bookmarks in order to reset them later
366 # Keep track of the active bookmarks in order to reset them later
367 self.activebookmark = self.activebookmark or repo._activebookmark
367 self.activebookmark = self.activebookmark or repo._activebookmark
368 if self.activebookmark:
368 if self.activebookmark:
369 bookmarks.deactivate(repo)
369 bookmarks.deactivate(repo)
370
370
371 # Store the state before we begin so users can run 'hg rebase --abort'
371 # Store the state before we begin so users can run 'hg rebase --abort'
372 # if we fail before the transaction closes.
372 # if we fail before the transaction closes.
373 self.storestatus()
373 self.storestatus()
374
374
375 sortedrevs = repo.revs('sort(%ld, -topo)', self.state)
375 sortedrevs = repo.revs('sort(%ld, -topo)', self.state)
376 cands = [k for k, v in self.state.iteritems() if v == revtodo]
376 cands = [k for k, v in self.state.iteritems() if v == revtodo]
377 total = len(cands)
377 total = len(cands)
378 pos = 0
378 pos = 0
379 for rev in sortedrevs:
379 for rev in sortedrevs:
380 ctx = repo[rev]
380 ctx = repo[rev]
381 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
381 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
382 ctx.description().split('\n', 1)[0])
382 ctx.description().split('\n', 1)[0])
383 names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
383 names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
384 if names:
384 if names:
385 desc += ' (%s)' % ' '.join(names)
385 desc += ' (%s)' % ' '.join(names)
386 if self.state[rev] == rev:
386 if self.state[rev] == rev:
387 ui.status(_('already rebased %s\n') % desc)
387 ui.status(_('already rebased %s\n') % desc)
388 elif self.state[rev] == revtodo:
388 elif self.state[rev] == revtodo:
389 pos += 1
389 pos += 1
390 ui.status(_('rebasing %s\n') % desc)
390 ui.status(_('rebasing %s\n') % desc)
391 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
391 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
392 _('changesets'), total)
392 _('changesets'), total)
393 p1, p2, base = defineparents(repo, rev, self.dest,
393 p1, p2, base = defineparents(repo, rev, self.dest,
394 self.state,
394 self.state,
395 self.destancestors,
395 self.destancestors,
396 self.obsoletenotrebased)
396 self.obsoletenotrebased)
397 self.storestatus(tr=tr)
397 self.storestatus(tr=tr)
398 storecollapsemsg(repo, self.collapsemsg)
398 storecollapsemsg(repo, self.collapsemsg)
399 if len(repo[None].parents()) == 2:
399 if len(repo[None].parents()) == 2:
400 repo.ui.debug('resuming interrupted rebase\n')
400 repo.ui.debug('resuming interrupted rebase\n')
401 else:
401 else:
402 try:
402 try:
403 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
403 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
404 'rebase')
404 'rebase')
405 stats = rebasenode(repo, rev, p1, base, self.state,
405 stats = rebasenode(repo, rev, p1, base, self.state,
406 self.collapsef, self.dest)
406 self.collapsef, self.dest)
407 if stats and stats[3] > 0:
407 if stats and stats[3] > 0:
408 raise error.InterventionRequired(
408 raise error.InterventionRequired(
409 _('unresolved conflicts (see hg '
409 _('unresolved conflicts (see hg '
410 'resolve, then hg rebase --continue)'))
410 'resolve, then hg rebase --continue)'))
411 finally:
411 finally:
412 ui.setconfig('ui', 'forcemerge', '', 'rebase')
412 ui.setconfig('ui', 'forcemerge', '', 'rebase')
413 if not self.collapsef:
413 if not self.collapsef:
414 merging = p2 != nullrev
414 merging = p2 != nullrev
415 editform = cmdutil.mergeeditform(merging, 'rebase')
415 editform = cmdutil.mergeeditform(merging, 'rebase')
416 editor = cmdutil.getcommiteditor(editform=editform, **opts)
416 editor = cmdutil.getcommiteditor(editform=editform, **opts)
417 newnode = concludenode(repo, rev, p1, p2,
417 newnode = concludenode(repo, rev, p1, p2,
418 extrafn=_makeextrafn(self.extrafns),
418 extrafn=_makeextrafn(self.extrafns),
419 editor=editor,
419 editor=editor,
420 keepbranches=self.keepbranchesf,
420 keepbranches=self.keepbranchesf,
421 date=self.date)
421 date=self.date)
422 if newnode is None:
422 if newnode is None:
423 # If it ended up being a no-op commit, then the normal
423 # If it ended up being a no-op commit, then the normal
424 # merge state clean-up path doesn't happen, so do it
424 # merge state clean-up path doesn't happen, so do it
425 # here. Fix issue5494
425 # here. Fix issue5494
426 mergemod.mergestate.clean(repo)
426 mergemod.mergestate.clean(repo)
427 else:
427 else:
428 # Skip commit if we are collapsing
428 # Skip commit if we are collapsing
429 repo.setparents(repo[p1].node())
429 repo.setparents(repo[p1].node())
430 newnode = None
430 newnode = None
431 # Update the state
431 # Update the state
432 if newnode is not None:
432 if newnode is not None:
433 self.state[rev] = repo[newnode].rev()
433 self.state[rev] = repo[newnode].rev()
434 ui.debug('rebased as %s\n' % short(newnode))
434 ui.debug('rebased as %s\n' % short(newnode))
435 else:
435 else:
436 if not self.collapsef:
436 if not self.collapsef:
437 ui.warn(_('note: rebase of %d:%s created no changes '
437 ui.warn(_('note: rebase of %d:%s created no changes '
438 'to commit\n') % (rev, ctx))
438 'to commit\n') % (rev, ctx))
439 self.skipped.add(rev)
439 self.skipped.add(rev)
440 self.state[rev] = p1
440 self.state[rev] = p1
441 ui.debug('next revision set to %s\n' % p1)
441 ui.debug('next revision set to %s\n' % p1)
442 elif self.state[rev] == nullmerge:
442 elif self.state[rev] == nullmerge:
443 ui.debug('ignoring null merge rebase of %s\n' % rev)
443 ui.debug('ignoring null merge rebase of %s\n' % rev)
444 elif self.state[rev] == revignored:
444 elif self.state[rev] == revignored:
445 ui.status(_('not rebasing ignored %s\n') % desc)
445 ui.status(_('not rebasing ignored %s\n') % desc)
446 elif self.state[rev] == revprecursor:
446 elif self.state[rev] == revprecursor:
447 destctx = repo[self.obsoletenotrebased[rev]]
447 destctx = repo[self.obsoletenotrebased[rev]]
448 descdest = '%d:%s "%s"' % (destctx.rev(), destctx,
448 descdest = '%d:%s "%s"' % (destctx.rev(), destctx,
449 destctx.description().split('\n', 1)[0])
449 destctx.description().split('\n', 1)[0])
450 msg = _('note: not rebasing %s, already in destination as %s\n')
450 msg = _('note: not rebasing %s, already in destination as %s\n')
451 ui.status(msg % (desc, descdest))
451 ui.status(msg % (desc, descdest))
452 elif self.state[rev] == revpruned:
452 elif self.state[rev] == revpruned:
453 msg = _('note: not rebasing %s, it has no successor\n')
453 msg = _('note: not rebasing %s, it has no successor\n')
454 ui.status(msg % desc)
454 ui.status(msg % desc)
455 else:
455 else:
456 ui.status(_('already rebased %s as %s\n') %
456 ui.status(_('already rebased %s as %s\n') %
457 (desc, repo[self.state[rev]]))
457 (desc, repo[self.state[rev]]))
458
458
459 ui.progress(_('rebasing'), None)
459 ui.progress(_('rebasing'), None)
460 ui.note(_('rebase merging completed\n'))
460 ui.note(_('rebase merging completed\n'))
461
461
462 def _finishrebase(self):
462 def _finishrebase(self):
463 repo, ui, opts = self.repo, self.ui, self.opts
463 repo, ui, opts = self.repo, self.ui, self.opts
464 if self.collapsef and not self.keepopen:
464 if self.collapsef and not self.keepopen:
465 p1, p2, _base = defineparents(repo, min(self.state),
465 p1, p2, _base = defineparents(repo, min(self.state),
466 self.dest, self.state,
466 self.dest, self.state,
467 self.destancestors,
467 self.destancestors,
468 self.obsoletenotrebased)
468 self.obsoletenotrebased)
469 editopt = opts.get('edit')
469 editopt = opts.get('edit')
470 editform = 'rebase.collapse'
470 editform = 'rebase.collapse'
471 if self.collapsemsg:
471 if self.collapsemsg:
472 commitmsg = self.collapsemsg
472 commitmsg = self.collapsemsg
473 else:
473 else:
474 commitmsg = 'Collapsed revision'
474 commitmsg = 'Collapsed revision'
475 for rebased in self.state:
475 for rebased in self.state:
476 if rebased not in self.skipped and\
476 if rebased not in self.skipped and\
477 self.state[rebased] > nullmerge:
477 self.state[rebased] > nullmerge:
478 commitmsg += '\n* %s' % repo[rebased].description()
478 commitmsg += '\n* %s' % repo[rebased].description()
479 editopt = True
479 editopt = True
480 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
480 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
481 revtoreuse = max(self.state)
481 revtoreuse = max(self.state)
482
483 dsguard = None
484 if ui.configbool('rebase', 'singletransaction'):
485 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
486 with util.acceptintervention(dsguard):
482 newnode = concludenode(repo, revtoreuse, p1, self.external,
487 newnode = concludenode(repo, revtoreuse, p1, self.external,
483 commitmsg=commitmsg,
488 commitmsg=commitmsg,
484 extrafn=_makeextrafn(self.extrafns),
489 extrafn=_makeextrafn(self.extrafns),
485 editor=editor,
490 editor=editor,
486 keepbranches=self.keepbranchesf,
491 keepbranches=self.keepbranchesf,
487 date=self.date)
492 date=self.date)
488 if newnode is None:
493 if newnode is None:
489 newrev = self.dest
494 newrev = self.dest
490 else:
495 else:
491 newrev = repo[newnode].rev()
496 newrev = repo[newnode].rev()
492 for oldrev in self.state.iterkeys():
497 for oldrev in self.state.iterkeys():
493 if self.state[oldrev] > nullmerge:
498 if self.state[oldrev] > nullmerge:
494 self.state[oldrev] = newrev
499 self.state[oldrev] = newrev
495
500
496 if 'qtip' in repo.tags():
501 if 'qtip' in repo.tags():
497 updatemq(repo, self.state, self.skipped, **opts)
502 updatemq(repo, self.state, self.skipped, **opts)
498
503
499 # restore original working directory
504 # restore original working directory
500 # (we do this before stripping)
505 # (we do this before stripping)
501 newwd = self.state.get(self.originalwd, self.originalwd)
506 newwd = self.state.get(self.originalwd, self.originalwd)
502 if newwd == revprecursor:
507 if newwd == revprecursor:
503 newwd = self.obsoletenotrebased[self.originalwd]
508 newwd = self.obsoletenotrebased[self.originalwd]
504 elif newwd < 0:
509 elif newwd < 0:
505 # original directory is a parent of rebase set root or ignored
510 # original directory is a parent of rebase set root or ignored
506 newwd = self.originalwd
511 newwd = self.originalwd
507 if newwd not in [c.rev() for c in repo[None].parents()]:
512 if newwd not in [c.rev() for c in repo[None].parents()]:
508 ui.note(_("update back to initial working directory parent\n"))
513 ui.note(_("update back to initial working directory parent\n"))
509 hg.updaterepo(repo, newwd, False)
514 hg.updaterepo(repo, newwd, False)
510
515
511 if not self.keepf:
516 if not self.keepf:
512 collapsedas = None
517 collapsedas = None
513 if self.collapsef:
518 if self.collapsef:
514 collapsedas = newnode
519 collapsedas = newnode
515 clearrebased(ui, repo, self.dest, self.state, self.skipped,
520 clearrebased(ui, repo, self.dest, self.state, self.skipped,
516 collapsedas)
521 collapsedas)
517
522
518 clearstatus(repo)
523 clearstatus(repo)
519 clearcollapsemsg(repo)
524 clearcollapsemsg(repo)
520
525
521 ui.note(_("rebase completed\n"))
526 ui.note(_("rebase completed\n"))
522 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
527 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
523 if self.skipped:
528 if self.skipped:
524 skippedlen = len(self.skipped)
529 skippedlen = len(self.skipped)
525 ui.note(_("%d revisions have been skipped\n") % skippedlen)
530 ui.note(_("%d revisions have been skipped\n") % skippedlen)
526
531
527 if (self.activebookmark and self.activebookmark in repo._bookmarks and
532 if (self.activebookmark and self.activebookmark in repo._bookmarks and
528 repo['.'].node() == repo._bookmarks[self.activebookmark]):
533 repo['.'].node() == repo._bookmarks[self.activebookmark]):
529 bookmarks.activate(repo, self.activebookmark)
534 bookmarks.activate(repo, self.activebookmark)
530
535
531 @command('rebase',
536 @command('rebase',
532 [('s', 'source', '',
537 [('s', 'source', '',
533 _('rebase the specified changeset and descendants'), _('REV')),
538 _('rebase the specified changeset and descendants'), _('REV')),
534 ('b', 'base', '',
539 ('b', 'base', '',
535 _('rebase everything from branching point of specified changeset'),
540 _('rebase everything from branching point of specified changeset'),
536 _('REV')),
541 _('REV')),
537 ('r', 'rev', [],
542 ('r', 'rev', [],
538 _('rebase these revisions'),
543 _('rebase these revisions'),
539 _('REV')),
544 _('REV')),
540 ('d', 'dest', '',
545 ('d', 'dest', '',
541 _('rebase onto the specified changeset'), _('REV')),
546 _('rebase onto the specified changeset'), _('REV')),
542 ('', 'collapse', False, _('collapse the rebased changesets')),
547 ('', 'collapse', False, _('collapse the rebased changesets')),
543 ('m', 'message', '',
548 ('m', 'message', '',
544 _('use text as collapse commit message'), _('TEXT')),
549 _('use text as collapse commit message'), _('TEXT')),
545 ('e', 'edit', False, _('invoke editor on commit messages')),
550 ('e', 'edit', False, _('invoke editor on commit messages')),
546 ('l', 'logfile', '',
551 ('l', 'logfile', '',
547 _('read collapse commit message from file'), _('FILE')),
552 _('read collapse commit message from file'), _('FILE')),
548 ('k', 'keep', False, _('keep original changesets')),
553 ('k', 'keep', False, _('keep original changesets')),
549 ('', 'keepbranches', False, _('keep original branch names')),
554 ('', 'keepbranches', False, _('keep original branch names')),
550 ('D', 'detach', False, _('(DEPRECATED)')),
555 ('D', 'detach', False, _('(DEPRECATED)')),
551 ('i', 'interactive', False, _('(DEPRECATED)')),
556 ('i', 'interactive', False, _('(DEPRECATED)')),
552 ('t', 'tool', '', _('specify merge tool')),
557 ('t', 'tool', '', _('specify merge tool')),
553 ('c', 'continue', False, _('continue an interrupted rebase')),
558 ('c', 'continue', False, _('continue an interrupted rebase')),
554 ('a', 'abort', False, _('abort an interrupted rebase'))] +
559 ('a', 'abort', False, _('abort an interrupted rebase'))] +
555 templateopts,
560 templateopts,
556 _('[-s REV | -b REV] [-d REV] [OPTION]'))
561 _('[-s REV | -b REV] [-d REV] [OPTION]'))
557 def rebase(ui, repo, **opts):
562 def rebase(ui, repo, **opts):
558 """move changeset (and descendants) to a different branch
563 """move changeset (and descendants) to a different branch
559
564
560 Rebase uses repeated merging to graft changesets from one part of
565 Rebase uses repeated merging to graft changesets from one part of
561 history (the source) onto another (the destination). This can be
566 history (the source) onto another (the destination). This can be
562 useful for linearizing *local* changes relative to a master
567 useful for linearizing *local* changes relative to a master
563 development tree.
568 development tree.
564
569
565 Published commits cannot be rebased (see :hg:`help phases`).
570 Published commits cannot be rebased (see :hg:`help phases`).
566 To copy commits, see :hg:`help graft`.
571 To copy commits, see :hg:`help graft`.
567
572
568 If you don't specify a destination changeset (``-d/--dest``), rebase
573 If you don't specify a destination changeset (``-d/--dest``), rebase
569 will use the same logic as :hg:`merge` to pick a destination. if
574 will use the same logic as :hg:`merge` to pick a destination. if
570 the current branch contains exactly one other head, the other head
575 the current branch contains exactly one other head, the other head
571 is merged with by default. Otherwise, an explicit revision with
576 is merged with by default. Otherwise, an explicit revision with
572 which to merge with must be provided. (destination changeset is not
577 which to merge with must be provided. (destination changeset is not
573 modified by rebasing, but new changesets are added as its
578 modified by rebasing, but new changesets are added as its
574 descendants.)
579 descendants.)
575
580
576 Here are the ways to select changesets:
581 Here are the ways to select changesets:
577
582
578 1. Explicitly select them using ``--rev``.
583 1. Explicitly select them using ``--rev``.
579
584
580 2. Use ``--source`` to select a root changeset and include all of its
585 2. Use ``--source`` to select a root changeset and include all of its
581 descendants.
586 descendants.
582
587
583 3. Use ``--base`` to select a changeset; rebase will find ancestors
588 3. Use ``--base`` to select a changeset; rebase will find ancestors
584 and their descendants which are not also ancestors of the destination.
589 and their descendants which are not also ancestors of the destination.
585
590
586 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
591 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
587 rebase will use ``--base .`` as above.
592 rebase will use ``--base .`` as above.
588
593
589 Rebase will destroy original changesets unless you use ``--keep``.
594 Rebase will destroy original changesets unless you use ``--keep``.
590 It will also move your bookmarks (even if you do).
595 It will also move your bookmarks (even if you do).
591
596
592 Some changesets may be dropped if they do not contribute changes
597 Some changesets may be dropped if they do not contribute changes
593 (e.g. merges from the destination branch).
598 (e.g. merges from the destination branch).
594
599
595 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
600 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
596 a named branch with two heads. You will need to explicitly specify source
601 a named branch with two heads. You will need to explicitly specify source
597 and/or destination.
602 and/or destination.
598
603
599 If you need to use a tool to automate merge/conflict decisions, you
604 If you need to use a tool to automate merge/conflict decisions, you
600 can specify one with ``--tool``, see :hg:`help merge-tools`.
605 can specify one with ``--tool``, see :hg:`help merge-tools`.
601 As a caveat: the tool will not be used to mediate when a file was
606 As a caveat: the tool will not be used to mediate when a file was
602 deleted, there is no hook presently available for this.
607 deleted, there is no hook presently available for this.
603
608
604 If a rebase is interrupted to manually resolve a conflict, it can be
609 If a rebase is interrupted to manually resolve a conflict, it can be
605 continued with --continue/-c or aborted with --abort/-a.
610 continued with --continue/-c or aborted with --abort/-a.
606
611
607 .. container:: verbose
612 .. container:: verbose
608
613
609 Examples:
614 Examples:
610
615
611 - move "local changes" (current commit back to branching point)
616 - move "local changes" (current commit back to branching point)
612 to the current branch tip after a pull::
617 to the current branch tip after a pull::
613
618
614 hg rebase
619 hg rebase
615
620
616 - move a single changeset to the stable branch::
621 - move a single changeset to the stable branch::
617
622
618 hg rebase -r 5f493448 -d stable
623 hg rebase -r 5f493448 -d stable
619
624
620 - splice a commit and all its descendants onto another part of history::
625 - splice a commit and all its descendants onto another part of history::
621
626
622 hg rebase --source c0c3 --dest 4cf9
627 hg rebase --source c0c3 --dest 4cf9
623
628
624 - rebase everything on a branch marked by a bookmark onto the
629 - rebase everything on a branch marked by a bookmark onto the
625 default branch::
630 default branch::
626
631
627 hg rebase --base myfeature --dest default
632 hg rebase --base myfeature --dest default
628
633
629 - collapse a sequence of changes into a single commit::
634 - collapse a sequence of changes into a single commit::
630
635
631 hg rebase --collapse -r 1520:1525 -d .
636 hg rebase --collapse -r 1520:1525 -d .
632
637
633 - move a named branch while preserving its name::
638 - move a named branch while preserving its name::
634
639
635 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
640 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
636
641
637 Configuration Options:
642 Configuration Options:
638
643
639 You can make rebase require a destination if you set the following config
644 You can make rebase require a destination if you set the following config
640 option::
645 option::
641
646
642 [commands]
647 [commands]
643 rebase.requiredest = True
648 rebase.requiredest = True
644
649
645 By default, rebase will close the transaction after each commit. For
650 By default, rebase will close the transaction after each commit. For
646 performance purposes, you can configure rebase to use a single transaction
651 performance purposes, you can configure rebase to use a single transaction
647 across the entire rebase. WARNING: This setting introduces a significant
652 across the entire rebase. WARNING: This setting introduces a significant
648 risk of losing the work you've done in a rebase if the rebase aborts
653 risk of losing the work you've done in a rebase if the rebase aborts
649 unexpectedly::
654 unexpectedly::
650
655
651 [rebase]
656 [rebase]
652 singletransaction = True
657 singletransaction = True
653
658
654 Return Values:
659 Return Values:
655
660
656 Returns 0 on success, 1 if nothing to rebase or there are
661 Returns 0 on success, 1 if nothing to rebase or there are
657 unresolved conflicts.
662 unresolved conflicts.
658
663
659 """
664 """
660 rbsrt = rebaseruntime(repo, ui, opts)
665 rbsrt = rebaseruntime(repo, ui, opts)
661
666
662 with repo.wlock(), repo.lock():
667 with repo.wlock(), repo.lock():
663 # Validate input and define rebasing points
668 # Validate input and define rebasing points
664 destf = opts.get('dest', None)
669 destf = opts.get('dest', None)
665 srcf = opts.get('source', None)
670 srcf = opts.get('source', None)
666 basef = opts.get('base', None)
671 basef = opts.get('base', None)
667 revf = opts.get('rev', [])
672 revf = opts.get('rev', [])
668 # search default destination in this space
673 # search default destination in this space
669 # used in the 'hg pull --rebase' case, see issue 5214.
674 # used in the 'hg pull --rebase' case, see issue 5214.
670 destspace = opts.get('_destspace')
675 destspace = opts.get('_destspace')
671 contf = opts.get('continue')
676 contf = opts.get('continue')
672 abortf = opts.get('abort')
677 abortf = opts.get('abort')
673 if opts.get('interactive'):
678 if opts.get('interactive'):
674 try:
679 try:
675 if extensions.find('histedit'):
680 if extensions.find('histedit'):
676 enablehistedit = ''
681 enablehistedit = ''
677 except KeyError:
682 except KeyError:
678 enablehistedit = " --config extensions.histedit="
683 enablehistedit = " --config extensions.histedit="
679 help = "hg%s help -e histedit" % enablehistedit
684 help = "hg%s help -e histedit" % enablehistedit
680 msg = _("interactive history editing is supported by the "
685 msg = _("interactive history editing is supported by the "
681 "'histedit' extension (see \"%s\")") % help
686 "'histedit' extension (see \"%s\")") % help
682 raise error.Abort(msg)
687 raise error.Abort(msg)
683
688
684 if rbsrt.collapsemsg and not rbsrt.collapsef:
689 if rbsrt.collapsemsg and not rbsrt.collapsef:
685 raise error.Abort(
690 raise error.Abort(
686 _('message can only be specified with collapse'))
691 _('message can only be specified with collapse'))
687
692
688 if contf or abortf:
693 if contf or abortf:
689 if contf and abortf:
694 if contf and abortf:
690 raise error.Abort(_('cannot use both abort and continue'))
695 raise error.Abort(_('cannot use both abort and continue'))
691 if rbsrt.collapsef:
696 if rbsrt.collapsef:
692 raise error.Abort(
697 raise error.Abort(
693 _('cannot use collapse with continue or abort'))
698 _('cannot use collapse with continue or abort'))
694 if srcf or basef or destf:
699 if srcf or basef or destf:
695 raise error.Abort(
700 raise error.Abort(
696 _('abort and continue do not allow specifying revisions'))
701 _('abort and continue do not allow specifying revisions'))
697 if abortf and opts.get('tool', False):
702 if abortf and opts.get('tool', False):
698 ui.warn(_('tool option will be ignored\n'))
703 ui.warn(_('tool option will be ignored\n'))
699 if contf:
704 if contf:
700 ms = mergemod.mergestate.read(repo)
705 ms = mergemod.mergestate.read(repo)
701 mergeutil.checkunresolved(ms)
706 mergeutil.checkunresolved(ms)
702
707
703 retcode = rbsrt._prepareabortorcontinue(abortf)
708 retcode = rbsrt._prepareabortorcontinue(abortf)
704 if retcode is not None:
709 if retcode is not None:
705 return retcode
710 return retcode
706 else:
711 else:
707 dest, rebaseset = _definesets(ui, repo, destf, srcf, basef, revf,
712 dest, rebaseset = _definesets(ui, repo, destf, srcf, basef, revf,
708 destspace=destspace)
713 destspace=destspace)
709 retcode = rbsrt._preparenewrebase(dest, rebaseset)
714 retcode = rbsrt._preparenewrebase(dest, rebaseset)
710 if retcode is not None:
715 if retcode is not None:
711 return retcode
716 return retcode
712
717
713 tr = None
718 tr = None
714 if ui.configbool('rebase', 'singletransaction'):
719 dsguard = None
720
721 singletr = ui.configbool('rebase', 'singletransaction')
722 if singletr:
715 tr = repo.transaction('rebase')
723 tr = repo.transaction('rebase')
716 with util.acceptintervention(tr):
724 with util.acceptintervention(tr):
725 if singletr:
726 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
727 with util.acceptintervention(dsguard):
717 rbsrt._performrebase(tr)
728 rbsrt._performrebase(tr)
718
729
719 rbsrt._finishrebase()
730 rbsrt._finishrebase()
720
731
721 def _definesets(ui, repo, destf=None, srcf=None, basef=None, revf=None,
732 def _definesets(ui, repo, destf=None, srcf=None, basef=None, revf=None,
722 destspace=None):
733 destspace=None):
723 """use revisions argument to define destination and rebase set
734 """use revisions argument to define destination and rebase set
724 """
735 """
725 if revf is None:
736 if revf is None:
726 revf = []
737 revf = []
727
738
728 # destspace is here to work around issues with `hg pull --rebase` see
739 # destspace is here to work around issues with `hg pull --rebase` see
729 # issue5214 for details
740 # issue5214 for details
730 if srcf and basef:
741 if srcf and basef:
731 raise error.Abort(_('cannot specify both a source and a base'))
742 raise error.Abort(_('cannot specify both a source and a base'))
732 if revf and basef:
743 if revf and basef:
733 raise error.Abort(_('cannot specify both a revision and a base'))
744 raise error.Abort(_('cannot specify both a revision and a base'))
734 if revf and srcf:
745 if revf and srcf:
735 raise error.Abort(_('cannot specify both a revision and a source'))
746 raise error.Abort(_('cannot specify both a revision and a source'))
736
747
737 cmdutil.checkunfinished(repo)
748 cmdutil.checkunfinished(repo)
738 cmdutil.bailifchanged(repo)
749 cmdutil.bailifchanged(repo)
739
750
740 if ui.configbool('commands', 'rebase.requiredest') and not destf:
751 if ui.configbool('commands', 'rebase.requiredest') and not destf:
741 raise error.Abort(_('you must specify a destination'),
752 raise error.Abort(_('you must specify a destination'),
742 hint=_('use: hg rebase -d REV'))
753 hint=_('use: hg rebase -d REV'))
743
754
744 if destf:
755 if destf:
745 dest = scmutil.revsingle(repo, destf)
756 dest = scmutil.revsingle(repo, destf)
746
757
747 if revf:
758 if revf:
748 rebaseset = scmutil.revrange(repo, revf)
759 rebaseset = scmutil.revrange(repo, revf)
749 if not rebaseset:
760 if not rebaseset:
750 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
761 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
751 return None, None
762 return None, None
752 elif srcf:
763 elif srcf:
753 src = scmutil.revrange(repo, [srcf])
764 src = scmutil.revrange(repo, [srcf])
754 if not src:
765 if not src:
755 ui.status(_('empty "source" revision set - nothing to rebase\n'))
766 ui.status(_('empty "source" revision set - nothing to rebase\n'))
756 return None, None
767 return None, None
757 rebaseset = repo.revs('(%ld)::', src)
768 rebaseset = repo.revs('(%ld)::', src)
758 assert rebaseset
769 assert rebaseset
759 else:
770 else:
760 base = scmutil.revrange(repo, [basef or '.'])
771 base = scmutil.revrange(repo, [basef or '.'])
761 if not base:
772 if not base:
762 ui.status(_('empty "base" revision set - '
773 ui.status(_('empty "base" revision set - '
763 "can't compute rebase set\n"))
774 "can't compute rebase set\n"))
764 return None, None
775 return None, None
765 if not destf:
776 if not destf:
766 dest = repo[_destrebase(repo, base, destspace=destspace)]
777 dest = repo[_destrebase(repo, base, destspace=destspace)]
767 destf = str(dest)
778 destf = str(dest)
768
779
769 roots = [] # selected children of branching points
780 roots = [] # selected children of branching points
770 bpbase = {} # {branchingpoint: [origbase]}
781 bpbase = {} # {branchingpoint: [origbase]}
771 for b in base: # group bases by branching points
782 for b in base: # group bases by branching points
772 bp = repo.revs('ancestor(%d, %d)', b, dest).first()
783 bp = repo.revs('ancestor(%d, %d)', b, dest).first()
773 bpbase[bp] = bpbase.get(bp, []) + [b]
784 bpbase[bp] = bpbase.get(bp, []) + [b]
774 if None in bpbase:
785 if None in bpbase:
775 # emulate the old behavior, showing "nothing to rebase" (a better
786 # emulate the old behavior, showing "nothing to rebase" (a better
776 # behavior may be abort with "cannot find branching point" error)
787 # behavior may be abort with "cannot find branching point" error)
777 bpbase.clear()
788 bpbase.clear()
778 for bp, bs in bpbase.iteritems(): # calculate roots
789 for bp, bs in bpbase.iteritems(): # calculate roots
779 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
790 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
780
791
781 rebaseset = repo.revs('%ld::', roots)
792 rebaseset = repo.revs('%ld::', roots)
782
793
783 if not rebaseset:
794 if not rebaseset:
784 # transform to list because smartsets are not comparable to
795 # transform to list because smartsets are not comparable to
785 # lists. This should be improved to honor laziness of
796 # lists. This should be improved to honor laziness of
786 # smartset.
797 # smartset.
787 if list(base) == [dest.rev()]:
798 if list(base) == [dest.rev()]:
788 if basef:
799 if basef:
789 ui.status(_('nothing to rebase - %s is both "base"'
800 ui.status(_('nothing to rebase - %s is both "base"'
790 ' and destination\n') % dest)
801 ' and destination\n') % dest)
791 else:
802 else:
792 ui.status(_('nothing to rebase - working directory '
803 ui.status(_('nothing to rebase - working directory '
793 'parent is also destination\n'))
804 'parent is also destination\n'))
794 elif not repo.revs('%ld - ::%d', base, dest):
805 elif not repo.revs('%ld - ::%d', base, dest):
795 if basef:
806 if basef:
796 ui.status(_('nothing to rebase - "base" %s is '
807 ui.status(_('nothing to rebase - "base" %s is '
797 'already an ancestor of destination '
808 'already an ancestor of destination '
798 '%s\n') %
809 '%s\n') %
799 ('+'.join(str(repo[r]) for r in base),
810 ('+'.join(str(repo[r]) for r in base),
800 dest))
811 dest))
801 else:
812 else:
802 ui.status(_('nothing to rebase - working '
813 ui.status(_('nothing to rebase - working '
803 'directory parent is already an '
814 'directory parent is already an '
804 'ancestor of destination %s\n') % dest)
815 'ancestor of destination %s\n') % dest)
805 else: # can it happen?
816 else: # can it happen?
806 ui.status(_('nothing to rebase from %s to %s\n') %
817 ui.status(_('nothing to rebase from %s to %s\n') %
807 ('+'.join(str(repo[r]) for r in base), dest))
818 ('+'.join(str(repo[r]) for r in base), dest))
808 return None, None
819 return None, None
809
820
810 if not destf:
821 if not destf:
811 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
822 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
812 destf = str(dest)
823 destf = str(dest)
813
824
814 return dest, rebaseset
825 return dest, rebaseset
815
826
816 def externalparent(repo, state, destancestors):
827 def externalparent(repo, state, destancestors):
817 """Return the revision that should be used as the second parent
828 """Return the revision that should be used as the second parent
818 when the revisions in state is collapsed on top of destancestors.
829 when the revisions in state is collapsed on top of destancestors.
819 Abort if there is more than one parent.
830 Abort if there is more than one parent.
820 """
831 """
821 parents = set()
832 parents = set()
822 source = min(state)
833 source = min(state)
823 for rev in state:
834 for rev in state:
824 if rev == source:
835 if rev == source:
825 continue
836 continue
826 for p in repo[rev].parents():
837 for p in repo[rev].parents():
827 if (p.rev() not in state
838 if (p.rev() not in state
828 and p.rev() not in destancestors):
839 and p.rev() not in destancestors):
829 parents.add(p.rev())
840 parents.add(p.rev())
830 if not parents:
841 if not parents:
831 return nullrev
842 return nullrev
832 if len(parents) == 1:
843 if len(parents) == 1:
833 return parents.pop()
844 return parents.pop()
834 raise error.Abort(_('unable to collapse on top of %s, there is more '
845 raise error.Abort(_('unable to collapse on top of %s, there is more '
835 'than one external parent: %s') %
846 'than one external parent: %s') %
836 (max(destancestors),
847 (max(destancestors),
837 ', '.join(str(p) for p in sorted(parents))))
848 ', '.join(str(p) for p in sorted(parents))))
838
849
839 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None,
850 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None,
840 keepbranches=False, date=None):
851 keepbranches=False, date=None):
841 '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
852 '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
842 but also store useful information in extra.
853 but also store useful information in extra.
843 Return node of committed revision.'''
854 Return node of committed revision.'''
855 dsguard = util.nullcontextmanager()
856 if not repo.ui.configbool('rebase', 'singletransaction'):
844 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
857 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
845 try:
858 with dsguard:
846 repo.setparents(repo[p1].node(), repo[p2].node())
859 repo.setparents(repo[p1].node(), repo[p2].node())
847 ctx = repo[rev]
860 ctx = repo[rev]
848 if commitmsg is None:
861 if commitmsg is None:
849 commitmsg = ctx.description()
862 commitmsg = ctx.description()
850 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
863 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
851 extra = {'rebase_source': ctx.hex()}
864 extra = {'rebase_source': ctx.hex()}
852 if extrafn:
865 if extrafn:
853 extrafn(ctx, extra)
866 extrafn(ctx, extra)
854
867
855 destphase = max(ctx.phase(), phases.draft)
868 destphase = max(ctx.phase(), phases.draft)
856 overrides = {('phases', 'new-commit'): destphase}
869 overrides = {('phases', 'new-commit'): destphase}
857 with repo.ui.configoverride(overrides, 'rebase'):
870 with repo.ui.configoverride(overrides, 'rebase'):
858 if keepbranch:
871 if keepbranch:
859 repo.ui.setconfig('ui', 'allowemptycommit', True)
872 repo.ui.setconfig('ui', 'allowemptycommit', True)
860 # Commit might fail if unresolved files exist
873 # Commit might fail if unresolved files exist
861 if date is None:
874 if date is None:
862 date = ctx.date()
875 date = ctx.date()
863 newnode = repo.commit(text=commitmsg, user=ctx.user(),
876 newnode = repo.commit(text=commitmsg, user=ctx.user(),
864 date=date, extra=extra, editor=editor)
877 date=date, extra=extra, editor=editor)
865
878
866 repo.dirstate.setbranch(repo[newnode].branch())
879 repo.dirstate.setbranch(repo[newnode].branch())
867 dsguard.close()
868 return newnode
880 return newnode
869 finally:
870 release(dsguard)
871
881
872 def rebasenode(repo, rev, p1, base, state, collapse, dest):
882 def rebasenode(repo, rev, p1, base, state, collapse, dest):
873 'Rebase a single revision rev on top of p1 using base as merge ancestor'
883 'Rebase a single revision rev on top of p1 using base as merge ancestor'
874 # Merge phase
884 # Merge phase
875 # Update to destination and merge it with local
885 # Update to destination and merge it with local
876 if repo['.'].rev() != p1:
886 if repo['.'].rev() != p1:
877 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
887 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
878 mergemod.update(repo, p1, False, True)
888 mergemod.update(repo, p1, False, True)
879 else:
889 else:
880 repo.ui.debug(" already in destination\n")
890 repo.ui.debug(" already in destination\n")
881 repo.dirstate.write(repo.currenttransaction())
891 repo.dirstate.write(repo.currenttransaction())
882 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
892 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
883 if base is not None:
893 if base is not None:
884 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
894 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
885 # When collapsing in-place, the parent is the common ancestor, we
895 # When collapsing in-place, the parent is the common ancestor, we
886 # have to allow merging with it.
896 # have to allow merging with it.
887 stats = mergemod.update(repo, rev, True, True, base, collapse,
897 stats = mergemod.update(repo, rev, True, True, base, collapse,
888 labels=['dest', 'source'])
898 labels=['dest', 'source'])
889 if collapse:
899 if collapse:
890 copies.duplicatecopies(repo, rev, dest)
900 copies.duplicatecopies(repo, rev, dest)
891 else:
901 else:
892 # If we're not using --collapse, we need to
902 # If we're not using --collapse, we need to
893 # duplicate copies between the revision we're
903 # duplicate copies between the revision we're
894 # rebasing and its first parent, but *not*
904 # rebasing and its first parent, but *not*
895 # duplicate any copies that have already been
905 # duplicate any copies that have already been
896 # performed in the destination.
906 # performed in the destination.
897 p1rev = repo[rev].p1().rev()
907 p1rev = repo[rev].p1().rev()
898 copies.duplicatecopies(repo, rev, p1rev, skiprev=dest)
908 copies.duplicatecopies(repo, rev, p1rev, skiprev=dest)
899 return stats
909 return stats
900
910
901 def adjustdest(repo, rev, dest, state):
911 def adjustdest(repo, rev, dest, state):
902 """adjust rebase destination given the current rebase state
912 """adjust rebase destination given the current rebase state
903
913
904 rev is what is being rebased. Return a list of two revs, which are the
914 rev is what is being rebased. Return a list of two revs, which are the
905 adjusted destinations for rev's p1 and p2, respectively. If a parent is
915 adjusted destinations for rev's p1 and p2, respectively. If a parent is
906 nullrev, return dest without adjustment for it.
916 nullrev, return dest without adjustment for it.
907
917
908 For example, when doing rebase -r B+E -d F, rebase will first move B to B1,
918 For example, when doing rebase -r B+E -d F, rebase will first move B to B1,
909 and E's destination will be adjusted from F to B1.
919 and E's destination will be adjusted from F to B1.
910
920
911 B1 <- written during rebasing B
921 B1 <- written during rebasing B
912 |
922 |
913 F <- original destination of B, E
923 F <- original destination of B, E
914 |
924 |
915 | E <- rev, which is being rebased
925 | E <- rev, which is being rebased
916 | |
926 | |
917 | D <- prev, one parent of rev being checked
927 | D <- prev, one parent of rev being checked
918 | |
928 | |
919 | x <- skipped, ex. no successor or successor in (::dest)
929 | x <- skipped, ex. no successor or successor in (::dest)
920 | |
930 | |
921 | C
931 | C
922 | |
932 | |
923 | B <- rebased as B1
933 | B <- rebased as B1
924 |/
934 |/
925 A
935 A
926
936
927 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
937 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
928 first move C to C1, G to G1, and when it's checking H, the adjusted
938 first move C to C1, G to G1, and when it's checking H, the adjusted
929 destinations will be [C1, G1].
939 destinations will be [C1, G1].
930
940
931 H C1 G1
941 H C1 G1
932 /| | /
942 /| | /
933 F G |/
943 F G |/
934 K | | -> K
944 K | | -> K
935 | C D |
945 | C D |
936 | |/ |
946 | |/ |
937 | B | ...
947 | B | ...
938 |/ |/
948 |/ |/
939 A A
949 A A
940 """
950 """
941 result = []
951 result = []
942 for prev in repo.changelog.parentrevs(rev):
952 for prev in repo.changelog.parentrevs(rev):
943 adjusted = dest
953 adjusted = dest
944 if prev != nullrev:
954 if prev != nullrev:
945 # pick already rebased revs from state
955 # pick already rebased revs from state
946 source = [s for s, d in state.items() if d > 0]
956 source = [s for s, d in state.items() if d > 0]
947 candidate = repo.revs('max(%ld and (::%d))', source, prev).first()
957 candidate = repo.revs('max(%ld and (::%d))', source, prev).first()
948 if candidate is not None:
958 if candidate is not None:
949 adjusted = state[candidate]
959 adjusted = state[candidate]
950 result.append(adjusted)
960 result.append(adjusted)
951 return result
961 return result
952
962
953 def nearestrebased(repo, rev, state):
963 def nearestrebased(repo, rev, state):
954 """return the nearest ancestors of rev in the rebase result"""
964 """return the nearest ancestors of rev in the rebase result"""
955 rebased = [r for r in state if state[r] > nullmerge]
965 rebased = [r for r in state if state[r] > nullmerge]
956 candidates = repo.revs('max(%ld and (::%d))', rebased, rev)
966 candidates = repo.revs('max(%ld and (::%d))', rebased, rev)
957 if candidates:
967 if candidates:
958 return state[candidates.first()]
968 return state[candidates.first()]
959 else:
969 else:
960 return None
970 return None
961
971
962 def _checkobsrebase(repo, ui, rebaseobsrevs, rebasesetrevs, rebaseobsskipped):
972 def _checkobsrebase(repo, ui, rebaseobsrevs, rebasesetrevs, rebaseobsskipped):
963 """
973 """
964 Abort if rebase will create divergence or rebase is noop because of markers
974 Abort if rebase will create divergence or rebase is noop because of markers
965
975
966 `rebaseobsrevs`: set of obsolete revision in source
976 `rebaseobsrevs`: set of obsolete revision in source
967 `rebasesetrevs`: set of revisions to be rebased from source
977 `rebasesetrevs`: set of revisions to be rebased from source
968 `rebaseobsskipped`: set of revisions from source skipped because they have
978 `rebaseobsskipped`: set of revisions from source skipped because they have
969 successors in destination
979 successors in destination
970 """
980 """
971 # Obsolete node with successors not in dest leads to divergence
981 # Obsolete node with successors not in dest leads to divergence
972 divergenceok = ui.configbool('experimental',
982 divergenceok = ui.configbool('experimental',
973 'allowdivergence')
983 'allowdivergence')
974 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
984 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
975
985
976 if divergencebasecandidates and not divergenceok:
986 if divergencebasecandidates and not divergenceok:
977 divhashes = (str(repo[r])
987 divhashes = (str(repo[r])
978 for r in divergencebasecandidates)
988 for r in divergencebasecandidates)
979 msg = _("this rebase will cause "
989 msg = _("this rebase will cause "
980 "divergences from: %s")
990 "divergences from: %s")
981 h = _("to force the rebase please set "
991 h = _("to force the rebase please set "
982 "experimental.allowdivergence=True")
992 "experimental.allowdivergence=True")
983 raise error.Abort(msg % (",".join(divhashes),), hint=h)
993 raise error.Abort(msg % (",".join(divhashes),), hint=h)
984
994
985 def defineparents(repo, rev, dest, state, destancestors,
995 def defineparents(repo, rev, dest, state, destancestors,
986 obsoletenotrebased):
996 obsoletenotrebased):
987 'Return the new parent relationship of the revision that will be rebased'
997 'Return the new parent relationship of the revision that will be rebased'
988 parents = repo[rev].parents()
998 parents = repo[rev].parents()
989 p1 = p2 = nullrev
999 p1 = p2 = nullrev
990 rp1 = None
1000 rp1 = None
991
1001
992 p1n = parents[0].rev()
1002 p1n = parents[0].rev()
993 if p1n in destancestors:
1003 if p1n in destancestors:
994 p1 = dest
1004 p1 = dest
995 elif p1n in state:
1005 elif p1n in state:
996 if state[p1n] == nullmerge:
1006 if state[p1n] == nullmerge:
997 p1 = dest
1007 p1 = dest
998 elif state[p1n] in revskipped:
1008 elif state[p1n] in revskipped:
999 p1 = nearestrebased(repo, p1n, state)
1009 p1 = nearestrebased(repo, p1n, state)
1000 if p1 is None:
1010 if p1 is None:
1001 p1 = dest
1011 p1 = dest
1002 else:
1012 else:
1003 p1 = state[p1n]
1013 p1 = state[p1n]
1004 else: # p1n external
1014 else: # p1n external
1005 p1 = dest
1015 p1 = dest
1006 p2 = p1n
1016 p2 = p1n
1007
1017
1008 if len(parents) == 2 and parents[1].rev() not in destancestors:
1018 if len(parents) == 2 and parents[1].rev() not in destancestors:
1009 p2n = parents[1].rev()
1019 p2n = parents[1].rev()
1010 # interesting second parent
1020 # interesting second parent
1011 if p2n in state:
1021 if p2n in state:
1012 if p1 == dest: # p1n in destancestors or external
1022 if p1 == dest: # p1n in destancestors or external
1013 p1 = state[p2n]
1023 p1 = state[p2n]
1014 if p1 == revprecursor:
1024 if p1 == revprecursor:
1015 rp1 = obsoletenotrebased[p2n]
1025 rp1 = obsoletenotrebased[p2n]
1016 elif state[p2n] in revskipped:
1026 elif state[p2n] in revskipped:
1017 p2 = nearestrebased(repo, p2n, state)
1027 p2 = nearestrebased(repo, p2n, state)
1018 if p2 is None:
1028 if p2 is None:
1019 # no ancestors rebased yet, detach
1029 # no ancestors rebased yet, detach
1020 p2 = dest
1030 p2 = dest
1021 else:
1031 else:
1022 p2 = state[p2n]
1032 p2 = state[p2n]
1023 else: # p2n external
1033 else: # p2n external
1024 if p2 != nullrev: # p1n external too => rev is a merged revision
1034 if p2 != nullrev: # p1n external too => rev is a merged revision
1025 raise error.Abort(_('cannot use revision %d as base, result '
1035 raise error.Abort(_('cannot use revision %d as base, result '
1026 'would have 3 parents') % rev)
1036 'would have 3 parents') % rev)
1027 p2 = p2n
1037 p2 = p2n
1028 repo.ui.debug(" future parents are %d and %d\n" %
1038 repo.ui.debug(" future parents are %d and %d\n" %
1029 (repo[rp1 or p1].rev(), repo[p2].rev()))
1039 (repo[rp1 or p1].rev(), repo[p2].rev()))
1030
1040
1031 if not any(p.rev() in state for p in parents):
1041 if not any(p.rev() in state for p in parents):
1032 # Case (1) root changeset of a non-detaching rebase set.
1042 # Case (1) root changeset of a non-detaching rebase set.
1033 # Let the merge mechanism find the base itself.
1043 # Let the merge mechanism find the base itself.
1034 base = None
1044 base = None
1035 elif not repo[rev].p2():
1045 elif not repo[rev].p2():
1036 # Case (2) detaching the node with a single parent, use this parent
1046 # Case (2) detaching the node with a single parent, use this parent
1037 base = repo[rev].p1().rev()
1047 base = repo[rev].p1().rev()
1038 else:
1048 else:
1039 # Assuming there is a p1, this is the case where there also is a p2.
1049 # Assuming there is a p1, this is the case where there also is a p2.
1040 # We are thus rebasing a merge and need to pick the right merge base.
1050 # We are thus rebasing a merge and need to pick the right merge base.
1041 #
1051 #
1042 # Imagine we have:
1052 # Imagine we have:
1043 # - M: current rebase revision in this step
1053 # - M: current rebase revision in this step
1044 # - A: one parent of M
1054 # - A: one parent of M
1045 # - B: other parent of M
1055 # - B: other parent of M
1046 # - D: destination of this merge step (p1 var)
1056 # - D: destination of this merge step (p1 var)
1047 #
1057 #
1048 # Consider the case where D is a descendant of A or B and the other is
1058 # Consider the case where D is a descendant of A or B and the other is
1049 # 'outside'. In this case, the right merge base is the D ancestor.
1059 # 'outside'. In this case, the right merge base is the D ancestor.
1050 #
1060 #
1051 # An informal proof, assuming A is 'outside' and B is the D ancestor:
1061 # An informal proof, assuming A is 'outside' and B is the D ancestor:
1052 #
1062 #
1053 # If we pick B as the base, the merge involves:
1063 # If we pick B as the base, the merge involves:
1054 # - changes from B to M (actual changeset payload)
1064 # - changes from B to M (actual changeset payload)
1055 # - changes from B to D (induced by rebase) as D is a rebased
1065 # - changes from B to D (induced by rebase) as D is a rebased
1056 # version of B)
1066 # version of B)
1057 # Which exactly represent the rebase operation.
1067 # Which exactly represent the rebase operation.
1058 #
1068 #
1059 # If we pick A as the base, the merge involves:
1069 # If we pick A as the base, the merge involves:
1060 # - changes from A to M (actual changeset payload)
1070 # - changes from A to M (actual changeset payload)
1061 # - changes from A to D (with include changes between unrelated A and B
1071 # - changes from A to D (with include changes between unrelated A and B
1062 # plus changes induced by rebase)
1072 # plus changes induced by rebase)
1063 # Which does not represent anything sensible and creates a lot of
1073 # Which does not represent anything sensible and creates a lot of
1064 # conflicts. A is thus not the right choice - B is.
1074 # conflicts. A is thus not the right choice - B is.
1065 #
1075 #
1066 # Note: The base found in this 'proof' is only correct in the specified
1076 # Note: The base found in this 'proof' is only correct in the specified
1067 # case. This base does not make sense if is not D a descendant of A or B
1077 # case. This base does not make sense if is not D a descendant of A or B
1068 # or if the other is not parent 'outside' (especially not if the other
1078 # or if the other is not parent 'outside' (especially not if the other
1069 # parent has been rebased). The current implementation does not
1079 # parent has been rebased). The current implementation does not
1070 # make it feasible to consider different cases separately. In these
1080 # make it feasible to consider different cases separately. In these
1071 # other cases we currently just leave it to the user to correctly
1081 # other cases we currently just leave it to the user to correctly
1072 # resolve an impossible merge using a wrong ancestor.
1082 # resolve an impossible merge using a wrong ancestor.
1073 #
1083 #
1074 # xx, p1 could be -4, and both parents could probably be -4...
1084 # xx, p1 could be -4, and both parents could probably be -4...
1075 for p in repo[rev].parents():
1085 for p in repo[rev].parents():
1076 if state.get(p.rev()) == p1:
1086 if state.get(p.rev()) == p1:
1077 base = p.rev()
1087 base = p.rev()
1078 break
1088 break
1079 else: # fallback when base not found
1089 else: # fallback when base not found
1080 base = None
1090 base = None
1081
1091
1082 # Raise because this function is called wrong (see issue 4106)
1092 # Raise because this function is called wrong (see issue 4106)
1083 raise AssertionError('no base found to rebase on '
1093 raise AssertionError('no base found to rebase on '
1084 '(defineparents called wrong)')
1094 '(defineparents called wrong)')
1085 return rp1 or p1, p2, base
1095 return rp1 or p1, p2, base
1086
1096
1087 def isagitpatch(repo, patchname):
1097 def isagitpatch(repo, patchname):
1088 'Return true if the given patch is in git format'
1098 'Return true if the given patch is in git format'
1089 mqpatch = os.path.join(repo.mq.path, patchname)
1099 mqpatch = os.path.join(repo.mq.path, patchname)
1090 for line in patch.linereader(file(mqpatch, 'rb')):
1100 for line in patch.linereader(file(mqpatch, 'rb')):
1091 if line.startswith('diff --git'):
1101 if line.startswith('diff --git'):
1092 return True
1102 return True
1093 return False
1103 return False
1094
1104
1095 def updatemq(repo, state, skipped, **opts):
1105 def updatemq(repo, state, skipped, **opts):
1096 'Update rebased mq patches - finalize and then import them'
1106 'Update rebased mq patches - finalize and then import them'
1097 mqrebase = {}
1107 mqrebase = {}
1098 mq = repo.mq
1108 mq = repo.mq
1099 original_series = mq.fullseries[:]
1109 original_series = mq.fullseries[:]
1100 skippedpatches = set()
1110 skippedpatches = set()
1101
1111
1102 for p in mq.applied:
1112 for p in mq.applied:
1103 rev = repo[p.node].rev()
1113 rev = repo[p.node].rev()
1104 if rev in state:
1114 if rev in state:
1105 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1115 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1106 (rev, p.name))
1116 (rev, p.name))
1107 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1117 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1108 else:
1118 else:
1109 # Applied but not rebased, not sure this should happen
1119 # Applied but not rebased, not sure this should happen
1110 skippedpatches.add(p.name)
1120 skippedpatches.add(p.name)
1111
1121
1112 if mqrebase:
1122 if mqrebase:
1113 mq.finish(repo, mqrebase.keys())
1123 mq.finish(repo, mqrebase.keys())
1114
1124
1115 # We must start import from the newest revision
1125 # We must start import from the newest revision
1116 for rev in sorted(mqrebase, reverse=True):
1126 for rev in sorted(mqrebase, reverse=True):
1117 if rev not in skipped:
1127 if rev not in skipped:
1118 name, isgit = mqrebase[rev]
1128 name, isgit = mqrebase[rev]
1119 repo.ui.note(_('updating mq patch %s to %s:%s\n') %
1129 repo.ui.note(_('updating mq patch %s to %s:%s\n') %
1120 (name, state[rev], repo[state[rev]]))
1130 (name, state[rev], repo[state[rev]]))
1121 mq.qimport(repo, (), patchname=name, git=isgit,
1131 mq.qimport(repo, (), patchname=name, git=isgit,
1122 rev=[str(state[rev])])
1132 rev=[str(state[rev])])
1123 else:
1133 else:
1124 # Rebased and skipped
1134 # Rebased and skipped
1125 skippedpatches.add(mqrebase[rev][0])
1135 skippedpatches.add(mqrebase[rev][0])
1126
1136
1127 # Patches were either applied and rebased and imported in
1137 # Patches were either applied and rebased and imported in
1128 # order, applied and removed or unapplied. Discard the removed
1138 # order, applied and removed or unapplied. Discard the removed
1129 # ones while preserving the original series order and guards.
1139 # ones while preserving the original series order and guards.
1130 newseries = [s for s in original_series
1140 newseries = [s for s in original_series
1131 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1141 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1132 mq.fullseries[:] = newseries
1142 mq.fullseries[:] = newseries
1133 mq.seriesdirty = True
1143 mq.seriesdirty = True
1134 mq.savedirty()
1144 mq.savedirty()
1135
1145
1136 def storecollapsemsg(repo, collapsemsg):
1146 def storecollapsemsg(repo, collapsemsg):
1137 'Store the collapse message to allow recovery'
1147 'Store the collapse message to allow recovery'
1138 collapsemsg = collapsemsg or ''
1148 collapsemsg = collapsemsg or ''
1139 f = repo.vfs("last-message.txt", "w")
1149 f = repo.vfs("last-message.txt", "w")
1140 f.write("%s\n" % collapsemsg)
1150 f.write("%s\n" % collapsemsg)
1141 f.close()
1151 f.close()
1142
1152
1143 def clearcollapsemsg(repo):
1153 def clearcollapsemsg(repo):
1144 'Remove collapse message file'
1154 'Remove collapse message file'
1145 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1155 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1146
1156
1147 def restorecollapsemsg(repo, isabort):
1157 def restorecollapsemsg(repo, isabort):
1148 'Restore previously stored collapse message'
1158 'Restore previously stored collapse message'
1149 try:
1159 try:
1150 f = repo.vfs("last-message.txt")
1160 f = repo.vfs("last-message.txt")
1151 collapsemsg = f.readline().strip()
1161 collapsemsg = f.readline().strip()
1152 f.close()
1162 f.close()
1153 except IOError as err:
1163 except IOError as err:
1154 if err.errno != errno.ENOENT:
1164 if err.errno != errno.ENOENT:
1155 raise
1165 raise
1156 if isabort:
1166 if isabort:
1157 # Oh well, just abort like normal
1167 # Oh well, just abort like normal
1158 collapsemsg = ''
1168 collapsemsg = ''
1159 else:
1169 else:
1160 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1170 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1161 return collapsemsg
1171 return collapsemsg
1162
1172
1163 def clearstatus(repo):
1173 def clearstatus(repo):
1164 'Remove the status files'
1174 'Remove the status files'
1165 _clearrebasesetvisibiliy(repo)
1175 _clearrebasesetvisibiliy(repo)
1166 # Make sure the active transaction won't write the state file
1176 # Make sure the active transaction won't write the state file
1167 tr = repo.currenttransaction()
1177 tr = repo.currenttransaction()
1168 if tr:
1178 if tr:
1169 tr.removefilegenerator('rebasestate')
1179 tr.removefilegenerator('rebasestate')
1170 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1180 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1171
1181
1172 def needupdate(repo, state):
1182 def needupdate(repo, state):
1173 '''check whether we should `update --clean` away from a merge, or if
1183 '''check whether we should `update --clean` away from a merge, or if
1174 somehow the working dir got forcibly updated, e.g. by older hg'''
1184 somehow the working dir got forcibly updated, e.g. by older hg'''
1175 parents = [p.rev() for p in repo[None].parents()]
1185 parents = [p.rev() for p in repo[None].parents()]
1176
1186
1177 # Are we in a merge state at all?
1187 # Are we in a merge state at all?
1178 if len(parents) < 2:
1188 if len(parents) < 2:
1179 return False
1189 return False
1180
1190
1181 # We should be standing on the first as-of-yet unrebased commit.
1191 # We should be standing on the first as-of-yet unrebased commit.
1182 firstunrebased = min([old for old, new in state.iteritems()
1192 firstunrebased = min([old for old, new in state.iteritems()
1183 if new == nullrev])
1193 if new == nullrev])
1184 if firstunrebased in parents:
1194 if firstunrebased in parents:
1185 return True
1195 return True
1186
1196
1187 return False
1197 return False
1188
1198
1189 def abort(repo, originalwd, dest, state, activebookmark=None):
1199 def abort(repo, originalwd, dest, state, activebookmark=None):
1190 '''Restore the repository to its original state. Additional args:
1200 '''Restore the repository to its original state. Additional args:
1191
1201
1192 activebookmark: the name of the bookmark that should be active after the
1202 activebookmark: the name of the bookmark that should be active after the
1193 restore'''
1203 restore'''
1194
1204
1195 try:
1205 try:
1196 # If the first commits in the rebased set get skipped during the rebase,
1206 # If the first commits in the rebased set get skipped during the rebase,
1197 # their values within the state mapping will be the dest rev id. The
1207 # their values within the state mapping will be the dest rev id. The
1198 # dstates list must must not contain the dest rev (issue4896)
1208 # dstates list must must not contain the dest rev (issue4896)
1199 dstates = [s for s in state.values() if s >= 0 and s != dest]
1209 dstates = [s for s in state.values() if s >= 0 and s != dest]
1200 immutable = [d for d in dstates if not repo[d].mutable()]
1210 immutable = [d for d in dstates if not repo[d].mutable()]
1201 cleanup = True
1211 cleanup = True
1202 if immutable:
1212 if immutable:
1203 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1213 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1204 % ', '.join(str(repo[r]) for r in immutable),
1214 % ', '.join(str(repo[r]) for r in immutable),
1205 hint=_("see 'hg help phases' for details"))
1215 hint=_("see 'hg help phases' for details"))
1206 cleanup = False
1216 cleanup = False
1207
1217
1208 descendants = set()
1218 descendants = set()
1209 if dstates:
1219 if dstates:
1210 descendants = set(repo.changelog.descendants(dstates))
1220 descendants = set(repo.changelog.descendants(dstates))
1211 if descendants - set(dstates):
1221 if descendants - set(dstates):
1212 repo.ui.warn(_("warning: new changesets detected on destination "
1222 repo.ui.warn(_("warning: new changesets detected on destination "
1213 "branch, can't strip\n"))
1223 "branch, can't strip\n"))
1214 cleanup = False
1224 cleanup = False
1215
1225
1216 if cleanup:
1226 if cleanup:
1217 shouldupdate = False
1227 shouldupdate = False
1218 rebased = filter(lambda x: x >= 0 and x != dest, state.values())
1228 rebased = filter(lambda x: x >= 0 and x != dest, state.values())
1219 if rebased:
1229 if rebased:
1220 strippoints = [
1230 strippoints = [
1221 c.node() for c in repo.set('roots(%ld)', rebased)]
1231 c.node() for c in repo.set('roots(%ld)', rebased)]
1222
1232
1223 updateifonnodes = set(rebased)
1233 updateifonnodes = set(rebased)
1224 updateifonnodes.add(dest)
1234 updateifonnodes.add(dest)
1225 updateifonnodes.add(originalwd)
1235 updateifonnodes.add(originalwd)
1226 shouldupdate = repo['.'].rev() in updateifonnodes
1236 shouldupdate = repo['.'].rev() in updateifonnodes
1227
1237
1228 # Update away from the rebase if necessary
1238 # Update away from the rebase if necessary
1229 if shouldupdate or needupdate(repo, state):
1239 if shouldupdate or needupdate(repo, state):
1230 mergemod.update(repo, originalwd, False, True)
1240 mergemod.update(repo, originalwd, False, True)
1231
1241
1232 # Strip from the first rebased revision
1242 # Strip from the first rebased revision
1233 if rebased:
1243 if rebased:
1234 # no backup of rebased cset versions needed
1244 # no backup of rebased cset versions needed
1235 repair.strip(repo.ui, repo, strippoints)
1245 repair.strip(repo.ui, repo, strippoints)
1236
1246
1237 if activebookmark and activebookmark in repo._bookmarks:
1247 if activebookmark and activebookmark in repo._bookmarks:
1238 bookmarks.activate(repo, activebookmark)
1248 bookmarks.activate(repo, activebookmark)
1239
1249
1240 finally:
1250 finally:
1241 clearstatus(repo)
1251 clearstatus(repo)
1242 clearcollapsemsg(repo)
1252 clearcollapsemsg(repo)
1243 repo.ui.warn(_('rebase aborted\n'))
1253 repo.ui.warn(_('rebase aborted\n'))
1244 return 0
1254 return 0
1245
1255
1246 def buildstate(repo, dest, rebaseset, collapse, obsoletenotrebased):
1256 def buildstate(repo, dest, rebaseset, collapse, obsoletenotrebased):
1247 '''Define which revisions are going to be rebased and where
1257 '''Define which revisions are going to be rebased and where
1248
1258
1249 repo: repo
1259 repo: repo
1250 dest: context
1260 dest: context
1251 rebaseset: set of rev
1261 rebaseset: set of rev
1252 '''
1262 '''
1253 originalwd = repo['.'].rev()
1263 originalwd = repo['.'].rev()
1254 _setrebasesetvisibility(repo, set(rebaseset) | {originalwd})
1264 _setrebasesetvisibility(repo, set(rebaseset) | {originalwd})
1255
1265
1256 # This check isn't strictly necessary, since mq detects commits over an
1266 # This check isn't strictly necessary, since mq detects commits over an
1257 # applied patch. But it prevents messing up the working directory when
1267 # applied patch. But it prevents messing up the working directory when
1258 # a partially completed rebase is blocked by mq.
1268 # a partially completed rebase is blocked by mq.
1259 if 'qtip' in repo.tags() and (dest.node() in
1269 if 'qtip' in repo.tags() and (dest.node() in
1260 [s.node for s in repo.mq.applied]):
1270 [s.node for s in repo.mq.applied]):
1261 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1271 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1262
1272
1263 roots = list(repo.set('roots(%ld)', rebaseset))
1273 roots = list(repo.set('roots(%ld)', rebaseset))
1264 if not roots:
1274 if not roots:
1265 raise error.Abort(_('no matching revisions'))
1275 raise error.Abort(_('no matching revisions'))
1266 roots.sort()
1276 roots.sort()
1267 state = dict.fromkeys(rebaseset, revtodo)
1277 state = dict.fromkeys(rebaseset, revtodo)
1268 detachset = set()
1278 detachset = set()
1269 emptyrebase = True
1279 emptyrebase = True
1270 for root in roots:
1280 for root in roots:
1271 commonbase = root.ancestor(dest)
1281 commonbase = root.ancestor(dest)
1272 if commonbase == root:
1282 if commonbase == root:
1273 raise error.Abort(_('source is ancestor of destination'))
1283 raise error.Abort(_('source is ancestor of destination'))
1274 if commonbase == dest:
1284 if commonbase == dest:
1275 wctx = repo[None]
1285 wctx = repo[None]
1276 if dest == wctx.p1():
1286 if dest == wctx.p1():
1277 # when rebasing to '.', it will use the current wd branch name
1287 # when rebasing to '.', it will use the current wd branch name
1278 samebranch = root.branch() == wctx.branch()
1288 samebranch = root.branch() == wctx.branch()
1279 else:
1289 else:
1280 samebranch = root.branch() == dest.branch()
1290 samebranch = root.branch() == dest.branch()
1281 if not collapse and samebranch and dest in root.parents():
1291 if not collapse and samebranch and dest in root.parents():
1282 # mark the revision as done by setting its new revision
1292 # mark the revision as done by setting its new revision
1283 # equal to its old (current) revisions
1293 # equal to its old (current) revisions
1284 state[root.rev()] = root.rev()
1294 state[root.rev()] = root.rev()
1285 repo.ui.debug('source is a child of destination\n')
1295 repo.ui.debug('source is a child of destination\n')
1286 continue
1296 continue
1287
1297
1288 emptyrebase = False
1298 emptyrebase = False
1289 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1299 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1290 # Rebase tries to turn <dest> into a parent of <root> while
1300 # Rebase tries to turn <dest> into a parent of <root> while
1291 # preserving the number of parents of rebased changesets:
1301 # preserving the number of parents of rebased changesets:
1292 #
1302 #
1293 # - A changeset with a single parent will always be rebased as a
1303 # - A changeset with a single parent will always be rebased as a
1294 # changeset with a single parent.
1304 # changeset with a single parent.
1295 #
1305 #
1296 # - A merge will be rebased as merge unless its parents are both
1306 # - A merge will be rebased as merge unless its parents are both
1297 # ancestors of <dest> or are themselves in the rebased set and
1307 # ancestors of <dest> or are themselves in the rebased set and
1298 # pruned while rebased.
1308 # pruned while rebased.
1299 #
1309 #
1300 # If one parent of <root> is an ancestor of <dest>, the rebased
1310 # If one parent of <root> is an ancestor of <dest>, the rebased
1301 # version of this parent will be <dest>. This is always true with
1311 # version of this parent will be <dest>. This is always true with
1302 # --base option.
1312 # --base option.
1303 #
1313 #
1304 # Otherwise, we need to *replace* the original parents with
1314 # Otherwise, we need to *replace* the original parents with
1305 # <dest>. This "detaches" the rebased set from its former location
1315 # <dest>. This "detaches" the rebased set from its former location
1306 # and rebases it onto <dest>. Changes introduced by ancestors of
1316 # and rebases it onto <dest>. Changes introduced by ancestors of
1307 # <root> not common with <dest> (the detachset, marked as
1317 # <root> not common with <dest> (the detachset, marked as
1308 # nullmerge) are "removed" from the rebased changesets.
1318 # nullmerge) are "removed" from the rebased changesets.
1309 #
1319 #
1310 # - If <root> has a single parent, set it to <dest>.
1320 # - If <root> has a single parent, set it to <dest>.
1311 #
1321 #
1312 # - If <root> is a merge, we cannot decide which parent to
1322 # - If <root> is a merge, we cannot decide which parent to
1313 # replace, the rebase operation is not clearly defined.
1323 # replace, the rebase operation is not clearly defined.
1314 #
1324 #
1315 # The table below sums up this behavior:
1325 # The table below sums up this behavior:
1316 #
1326 #
1317 # +------------------+----------------------+-------------------------+
1327 # +------------------+----------------------+-------------------------+
1318 # | | one parent | merge |
1328 # | | one parent | merge |
1319 # +------------------+----------------------+-------------------------+
1329 # +------------------+----------------------+-------------------------+
1320 # | parent in | new parent is <dest> | parents in ::<dest> are |
1330 # | parent in | new parent is <dest> | parents in ::<dest> are |
1321 # | ::<dest> | | remapped to <dest> |
1331 # | ::<dest> | | remapped to <dest> |
1322 # +------------------+----------------------+-------------------------+
1332 # +------------------+----------------------+-------------------------+
1323 # | unrelated source | new parent is <dest> | ambiguous, abort |
1333 # | unrelated source | new parent is <dest> | ambiguous, abort |
1324 # +------------------+----------------------+-------------------------+
1334 # +------------------+----------------------+-------------------------+
1325 #
1335 #
1326 # The actual abort is handled by `defineparents`
1336 # The actual abort is handled by `defineparents`
1327 if len(root.parents()) <= 1:
1337 if len(root.parents()) <= 1:
1328 # ancestors of <root> not ancestors of <dest>
1338 # ancestors of <root> not ancestors of <dest>
1329 detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
1339 detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
1330 [root.rev()]))
1340 [root.rev()]))
1331 if emptyrebase:
1341 if emptyrebase:
1332 return None
1342 return None
1333 for rev in sorted(state):
1343 for rev in sorted(state):
1334 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1344 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1335 # if all parents of this revision are done, then so is this revision
1345 # if all parents of this revision are done, then so is this revision
1336 if parents and all((state.get(p) == p for p in parents)):
1346 if parents and all((state.get(p) == p for p in parents)):
1337 state[rev] = rev
1347 state[rev] = rev
1338 for r in detachset:
1348 for r in detachset:
1339 if r not in state:
1349 if r not in state:
1340 state[r] = nullmerge
1350 state[r] = nullmerge
1341 if len(roots) > 1:
1351 if len(roots) > 1:
1342 # If we have multiple roots, we may have "hole" in the rebase set.
1352 # If we have multiple roots, we may have "hole" in the rebase set.
1343 # Rebase roots that descend from those "hole" should not be detached as
1353 # Rebase roots that descend from those "hole" should not be detached as
1344 # other root are. We use the special `revignored` to inform rebase that
1354 # other root are. We use the special `revignored` to inform rebase that
1345 # the revision should be ignored but that `defineparents` should search
1355 # the revision should be ignored but that `defineparents` should search
1346 # a rebase destination that make sense regarding rebased topology.
1356 # a rebase destination that make sense regarding rebased topology.
1347 rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
1357 rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
1348 for ignored in set(rebasedomain) - set(rebaseset):
1358 for ignored in set(rebasedomain) - set(rebaseset):
1349 state[ignored] = revignored
1359 state[ignored] = revignored
1350 for r in obsoletenotrebased:
1360 for r in obsoletenotrebased:
1351 if obsoletenotrebased[r] is None:
1361 if obsoletenotrebased[r] is None:
1352 state[r] = revpruned
1362 state[r] = revpruned
1353 else:
1363 else:
1354 state[r] = revprecursor
1364 state[r] = revprecursor
1355 return originalwd, dest.rev(), state
1365 return originalwd, dest.rev(), state
1356
1366
1357 def clearrebased(ui, repo, dest, state, skipped, collapsedas=None):
1367 def clearrebased(ui, repo, dest, state, skipped, collapsedas=None):
1358 """dispose of rebased revision at the end of the rebase
1368 """dispose of rebased revision at the end of the rebase
1359
1369
1360 If `collapsedas` is not None, the rebase was a collapse whose result if the
1370 If `collapsedas` is not None, the rebase was a collapse whose result if the
1361 `collapsedas` node."""
1371 `collapsedas` node."""
1362 tonode = repo.changelog.node
1372 tonode = repo.changelog.node
1363 # Move bookmark of skipped nodes to destination. This cannot be handled
1373 # Move bookmark of skipped nodes to destination. This cannot be handled
1364 # by scmutil.cleanupnodes since it will treat rev as removed (no successor)
1374 # by scmutil.cleanupnodes since it will treat rev as removed (no successor)
1365 # and move bookmark backwards.
1375 # and move bookmark backwards.
1366 bmchanges = [(name, tonode(max(adjustdest(repo, rev, dest, state))))
1376 bmchanges = [(name, tonode(max(adjustdest(repo, rev, dest, state))))
1367 for rev in skipped
1377 for rev in skipped
1368 for name in repo.nodebookmarks(tonode(rev))]
1378 for name in repo.nodebookmarks(tonode(rev))]
1369 if bmchanges:
1379 if bmchanges:
1370 with repo.transaction('rebase') as tr:
1380 with repo.transaction('rebase') as tr:
1371 repo._bookmarks.applychanges(repo, tr, bmchanges)
1381 repo._bookmarks.applychanges(repo, tr, bmchanges)
1372 mapping = {}
1382 mapping = {}
1373 for rev, newrev in sorted(state.items()):
1383 for rev, newrev in sorted(state.items()):
1374 if newrev >= 0 and newrev != rev:
1384 if newrev >= 0 and newrev != rev:
1375 if rev in skipped:
1385 if rev in skipped:
1376 succs = ()
1386 succs = ()
1377 elif collapsedas is not None:
1387 elif collapsedas is not None:
1378 succs = (collapsedas,)
1388 succs = (collapsedas,)
1379 else:
1389 else:
1380 succs = (tonode(newrev),)
1390 succs = (tonode(newrev),)
1381 mapping[tonode(rev)] = succs
1391 mapping[tonode(rev)] = succs
1382 scmutil.cleanupnodes(repo, mapping, 'rebase')
1392 scmutil.cleanupnodes(repo, mapping, 'rebase')
1383
1393
1384 def pullrebase(orig, ui, repo, *args, **opts):
1394 def pullrebase(orig, ui, repo, *args, **opts):
1385 'Call rebase after pull if the latter has been invoked with --rebase'
1395 'Call rebase after pull if the latter has been invoked with --rebase'
1386 ret = None
1396 ret = None
1387 if opts.get('rebase'):
1397 if opts.get('rebase'):
1388 if ui.configbool('commands', 'rebase.requiredest'):
1398 if ui.configbool('commands', 'rebase.requiredest'):
1389 msg = _('rebase destination required by configuration')
1399 msg = _('rebase destination required by configuration')
1390 hint = _('use hg pull followed by hg rebase -d DEST')
1400 hint = _('use hg pull followed by hg rebase -d DEST')
1391 raise error.Abort(msg, hint=hint)
1401 raise error.Abort(msg, hint=hint)
1392
1402
1393 with repo.wlock(), repo.lock():
1403 with repo.wlock(), repo.lock():
1394 if opts.get('update'):
1404 if opts.get('update'):
1395 del opts['update']
1405 del opts['update']
1396 ui.debug('--update and --rebase are not compatible, ignoring '
1406 ui.debug('--update and --rebase are not compatible, ignoring '
1397 'the update flag\n')
1407 'the update flag\n')
1398
1408
1399 cmdutil.checkunfinished(repo)
1409 cmdutil.checkunfinished(repo)
1400 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1410 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1401 'please commit or shelve your changes first'))
1411 'please commit or shelve your changes first'))
1402
1412
1403 revsprepull = len(repo)
1413 revsprepull = len(repo)
1404 origpostincoming = commands.postincoming
1414 origpostincoming = commands.postincoming
1405 def _dummy(*args, **kwargs):
1415 def _dummy(*args, **kwargs):
1406 pass
1416 pass
1407 commands.postincoming = _dummy
1417 commands.postincoming = _dummy
1408 try:
1418 try:
1409 ret = orig(ui, repo, *args, **opts)
1419 ret = orig(ui, repo, *args, **opts)
1410 finally:
1420 finally:
1411 commands.postincoming = origpostincoming
1421 commands.postincoming = origpostincoming
1412 revspostpull = len(repo)
1422 revspostpull = len(repo)
1413 if revspostpull > revsprepull:
1423 if revspostpull > revsprepull:
1414 # --rev option from pull conflict with rebase own --rev
1424 # --rev option from pull conflict with rebase own --rev
1415 # dropping it
1425 # dropping it
1416 if 'rev' in opts:
1426 if 'rev' in opts:
1417 del opts['rev']
1427 del opts['rev']
1418 # positional argument from pull conflicts with rebase's own
1428 # positional argument from pull conflicts with rebase's own
1419 # --source.
1429 # --source.
1420 if 'source' in opts:
1430 if 'source' in opts:
1421 del opts['source']
1431 del opts['source']
1422 # revsprepull is the len of the repo, not revnum of tip.
1432 # revsprepull is the len of the repo, not revnum of tip.
1423 destspace = list(repo.changelog.revs(start=revsprepull))
1433 destspace = list(repo.changelog.revs(start=revsprepull))
1424 opts['_destspace'] = destspace
1434 opts['_destspace'] = destspace
1425 try:
1435 try:
1426 rebase(ui, repo, **opts)
1436 rebase(ui, repo, **opts)
1427 except error.NoMergeDestAbort:
1437 except error.NoMergeDestAbort:
1428 # we can maybe update instead
1438 # we can maybe update instead
1429 rev, _a, _b = destutil.destupdate(repo)
1439 rev, _a, _b = destutil.destupdate(repo)
1430 if rev == repo['.'].rev():
1440 if rev == repo['.'].rev():
1431 ui.status(_('nothing to rebase\n'))
1441 ui.status(_('nothing to rebase\n'))
1432 else:
1442 else:
1433 ui.status(_('nothing to rebase - updating instead\n'))
1443 ui.status(_('nothing to rebase - updating instead\n'))
1434 # not passing argument to get the bare update behavior
1444 # not passing argument to get the bare update behavior
1435 # with warning and trumpets
1445 # with warning and trumpets
1436 commands.update(ui, repo)
1446 commands.update(ui, repo)
1437 else:
1447 else:
1438 if opts.get('tool'):
1448 if opts.get('tool'):
1439 raise error.Abort(_('--tool can only be used with --rebase'))
1449 raise error.Abort(_('--tool can only be used with --rebase'))
1440 ret = orig(ui, repo, *args, **opts)
1450 ret = orig(ui, repo, *args, **opts)
1441
1451
1442 return ret
1452 return ret
1443
1453
1444 def _setrebasesetvisibility(repo, revs):
1454 def _setrebasesetvisibility(repo, revs):
1445 """store the currently rebased set on the repo object
1455 """store the currently rebased set on the repo object
1446
1456
1447 This is used by another function to prevent rebased revision to because
1457 This is used by another function to prevent rebased revision to because
1448 hidden (see issue4504)"""
1458 hidden (see issue4504)"""
1449 repo = repo.unfiltered()
1459 repo = repo.unfiltered()
1450 repo._rebaseset = revs
1460 repo._rebaseset = revs
1451 # invalidate cache if visibility changes
1461 # invalidate cache if visibility changes
1452 hiddens = repo.filteredrevcache.get('visible', set())
1462 hiddens = repo.filteredrevcache.get('visible', set())
1453 if revs & hiddens:
1463 if revs & hiddens:
1454 repo.invalidatevolatilesets()
1464 repo.invalidatevolatilesets()
1455
1465
1456 def _clearrebasesetvisibiliy(repo):
1466 def _clearrebasesetvisibiliy(repo):
1457 """remove rebaseset data from the repo"""
1467 """remove rebaseset data from the repo"""
1458 repo = repo.unfiltered()
1468 repo = repo.unfiltered()
1459 if '_rebaseset' in vars(repo):
1469 if '_rebaseset' in vars(repo):
1460 del repo._rebaseset
1470 del repo._rebaseset
1461
1471
1462 def _rebasedvisible(orig, repo):
1472 def _rebasedvisible(orig, repo):
1463 """ensure rebased revs stay visible (see issue4504)"""
1473 """ensure rebased revs stay visible (see issue4504)"""
1464 blockers = orig(repo)
1474 blockers = orig(repo)
1465 blockers.update(getattr(repo, '_rebaseset', ()))
1475 blockers.update(getattr(repo, '_rebaseset', ()))
1466 return blockers
1476 return blockers
1467
1477
1468 def _filterobsoleterevs(repo, revs):
1478 def _filterobsoleterevs(repo, revs):
1469 """returns a set of the obsolete revisions in revs"""
1479 """returns a set of the obsolete revisions in revs"""
1470 return set(r for r in revs if repo[r].obsolete())
1480 return set(r for r in revs if repo[r].obsolete())
1471
1481
1472 def _computeobsoletenotrebased(repo, rebaseobsrevs, dest):
1482 def _computeobsoletenotrebased(repo, rebaseobsrevs, dest):
1473 """return a mapping obsolete => successor for all obsolete nodes to be
1483 """return a mapping obsolete => successor for all obsolete nodes to be
1474 rebased that have a successors in the destination
1484 rebased that have a successors in the destination
1475
1485
1476 obsolete => None entries in the mapping indicate nodes with no successor"""
1486 obsolete => None entries in the mapping indicate nodes with no successor"""
1477 obsoletenotrebased = {}
1487 obsoletenotrebased = {}
1478
1488
1479 # Build a mapping successor => obsolete nodes for the obsolete
1489 # Build a mapping successor => obsolete nodes for the obsolete
1480 # nodes to be rebased
1490 # nodes to be rebased
1481 allsuccessors = {}
1491 allsuccessors = {}
1482 cl = repo.changelog
1492 cl = repo.changelog
1483 for r in rebaseobsrevs:
1493 for r in rebaseobsrevs:
1484 node = cl.node(r)
1494 node = cl.node(r)
1485 for s in obsutil.allsuccessors(repo.obsstore, [node]):
1495 for s in obsutil.allsuccessors(repo.obsstore, [node]):
1486 try:
1496 try:
1487 allsuccessors[cl.rev(s)] = cl.rev(node)
1497 allsuccessors[cl.rev(s)] = cl.rev(node)
1488 except LookupError:
1498 except LookupError:
1489 pass
1499 pass
1490
1500
1491 if allsuccessors:
1501 if allsuccessors:
1492 # Look for successors of obsolete nodes to be rebased among
1502 # Look for successors of obsolete nodes to be rebased among
1493 # the ancestors of dest
1503 # the ancestors of dest
1494 ancs = cl.ancestors([dest],
1504 ancs = cl.ancestors([dest],
1495 stoprev=min(allsuccessors),
1505 stoprev=min(allsuccessors),
1496 inclusive=True)
1506 inclusive=True)
1497 for s in allsuccessors:
1507 for s in allsuccessors:
1498 if s in ancs:
1508 if s in ancs:
1499 obsoletenotrebased[allsuccessors[s]] = s
1509 obsoletenotrebased[allsuccessors[s]] = s
1500 elif (s == allsuccessors[s] and
1510 elif (s == allsuccessors[s] and
1501 allsuccessors.values().count(s) == 1):
1511 allsuccessors.values().count(s) == 1):
1502 # plain prune
1512 # plain prune
1503 obsoletenotrebased[s] = None
1513 obsoletenotrebased[s] = None
1504
1514
1505 return obsoletenotrebased
1515 return obsoletenotrebased
1506
1516
1507 def summaryhook(ui, repo):
1517 def summaryhook(ui, repo):
1508 if not repo.vfs.exists('rebasestate'):
1518 if not repo.vfs.exists('rebasestate'):
1509 return
1519 return
1510 try:
1520 try:
1511 rbsrt = rebaseruntime(repo, ui, {})
1521 rbsrt = rebaseruntime(repo, ui, {})
1512 rbsrt.restorestatus()
1522 rbsrt.restorestatus()
1513 state = rbsrt.state
1523 state = rbsrt.state
1514 except error.RepoLookupError:
1524 except error.RepoLookupError:
1515 # i18n: column positioning for "hg summary"
1525 # i18n: column positioning for "hg summary"
1516 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1526 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1517 ui.write(msg)
1527 ui.write(msg)
1518 return
1528 return
1519 numrebased = len([i for i in state.itervalues() if i >= 0])
1529 numrebased = len([i for i in state.itervalues() if i >= 0])
1520 # i18n: column positioning for "hg summary"
1530 # i18n: column positioning for "hg summary"
1521 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1531 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1522 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1532 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1523 ui.label(_('%d remaining'), 'rebase.remaining') %
1533 ui.label(_('%d remaining'), 'rebase.remaining') %
1524 (len(state) - numrebased)))
1534 (len(state) - numrebased)))
1525
1535
1526 def uisetup(ui):
1536 def uisetup(ui):
1527 #Replace pull with a decorator to provide --rebase option
1537 #Replace pull with a decorator to provide --rebase option
1528 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1538 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1529 entry[1].append(('', 'rebase', None,
1539 entry[1].append(('', 'rebase', None,
1530 _("rebase working directory to branch head")))
1540 _("rebase working directory to branch head")))
1531 entry[1].append(('t', 'tool', '',
1541 entry[1].append(('t', 'tool', '',
1532 _("specify merge tool for rebase")))
1542 _("specify merge tool for rebase")))
1533 cmdutil.summaryhooks.add('rebase', summaryhook)
1543 cmdutil.summaryhooks.add('rebase', summaryhook)
1534 cmdutil.unfinishedstates.append(
1544 cmdutil.unfinishedstates.append(
1535 ['rebasestate', False, False, _('rebase in progress'),
1545 ['rebasestate', False, False, _('rebase in progress'),
1536 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1546 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1537 cmdutil.afterresolvedstates.append(
1547 cmdutil.afterresolvedstates.append(
1538 ['rebasestate', _('hg rebase --continue')])
1548 ['rebasestate', _('hg rebase --continue')])
1539 # ensure rebased rev are not hidden
1549 # ensure rebased rev are not hidden
1540 extensions.wrapfunction(repoview, 'pinnedrevs', _rebasedvisible)
1550 extensions.wrapfunction(repoview, 'pinnedrevs', _rebasedvisible)
@@ -1,68 +1,78 b''
1 # dirstateguard.py - class to allow restoring dirstate after failure
1 # dirstateguard.py - class to allow restoring dirstate after failure
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 )
14 )
15
15
16 class dirstateguard(object):
16 class dirstateguard(object):
17 '''Restore dirstate at unexpected failure.
17 '''Restore dirstate at unexpected failure.
18
18
19 At the construction, this class does:
19 At the construction, this class does:
20
20
21 - write current ``repo.dirstate`` out, and
21 - write current ``repo.dirstate`` out, and
22 - save ``.hg/dirstate`` into the backup file
22 - save ``.hg/dirstate`` into the backup file
23
23
24 This restores ``.hg/dirstate`` from backup file, if ``release()``
24 This restores ``.hg/dirstate`` from backup file, if ``release()``
25 is invoked before ``close()``.
25 is invoked before ``close()``.
26
26
27 This just removes the backup file at ``close()`` before ``release()``.
27 This just removes the backup file at ``close()`` before ``release()``.
28 '''
28 '''
29
29
30 def __init__(self, repo, name):
30 def __init__(self, repo, name):
31 self._repo = repo
31 self._repo = repo
32 self._active = False
32 self._active = False
33 self._closed = False
33 self._closed = False
34 self._backupname = 'dirstate.backup.%s.%d' % (name, id(self))
34 self._backupname = 'dirstate.backup.%s.%d' % (name, id(self))
35 repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
35 repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
36 self._active = True
36 self._active = True
37
37
38 def __del__(self):
38 def __del__(self):
39 if self._active: # still active
39 if self._active: # still active
40 # this may occur, even if this class is used correctly:
40 # this may occur, even if this class is used correctly:
41 # for example, releasing other resources like transaction
41 # for example, releasing other resources like transaction
42 # may raise exception before ``dirstateguard.release`` in
42 # may raise exception before ``dirstateguard.release`` in
43 # ``release(tr, ....)``.
43 # ``release(tr, ....)``.
44 self._abort()
44 self._abort()
45
45
46 def __enter__(self):
47 return self
48
49 def __exit__(self, exc_type, exc_val, exc_tb):
50 try:
51 if exc_type is None:
52 self.close()
53 finally:
54 self.release()
55
46 def close(self):
56 def close(self):
47 if not self._active: # already inactivated
57 if not self._active: # already inactivated
48 msg = (_("can't close already inactivated backup: %s")
58 msg = (_("can't close already inactivated backup: %s")
49 % self._backupname)
59 % self._backupname)
50 raise error.Abort(msg)
60 raise error.Abort(msg)
51
61
52 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
62 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
53 self._backupname)
63 self._backupname)
54 self._active = False
64 self._active = False
55 self._closed = True
65 self._closed = True
56
66
57 def _abort(self):
67 def _abort(self):
58 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
68 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
59 self._backupname)
69 self._backupname)
60 self._active = False
70 self._active = False
61
71
62 def release(self):
72 def release(self):
63 if not self._closed:
73 if not self._closed:
64 if not self._active: # already inactivated
74 if not self._active: # already inactivated
65 msg = (_("can't release already inactivated backup: %s")
75 msg = (_("can't release already inactivated backup: %s")
66 % self._backupname)
76 % self._backupname)
67 raise error.Abort(msg)
77 raise error.Abort(msg)
68 self._abort()
78 self._abort()
@@ -1,3696 +1,3700 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import codecs
20 import codecs
21 import collections
21 import collections
22 import contextlib
22 import contextlib
23 import datetime
23 import datetime
24 import errno
24 import errno
25 import gc
25 import gc
26 import hashlib
26 import hashlib
27 import imp
27 import imp
28 import os
28 import os
29 import platform as pyplatform
29 import platform as pyplatform
30 import re as remod
30 import re as remod
31 import shutil
31 import shutil
32 import signal
32 import signal
33 import socket
33 import socket
34 import stat
34 import stat
35 import string
35 import string
36 import subprocess
36 import subprocess
37 import sys
37 import sys
38 import tempfile
38 import tempfile
39 import textwrap
39 import textwrap
40 import time
40 import time
41 import traceback
41 import traceback
42 import warnings
42 import warnings
43 import zlib
43 import zlib
44
44
45 from . import (
45 from . import (
46 encoding,
46 encoding,
47 error,
47 error,
48 i18n,
48 i18n,
49 policy,
49 policy,
50 pycompat,
50 pycompat,
51 )
51 )
52
52
53 base85 = policy.importmod(r'base85')
53 base85 = policy.importmod(r'base85')
54 osutil = policy.importmod(r'osutil')
54 osutil = policy.importmod(r'osutil')
55 parsers = policy.importmod(r'parsers')
55 parsers = policy.importmod(r'parsers')
56
56
57 b85decode = base85.b85decode
57 b85decode = base85.b85decode
58 b85encode = base85.b85encode
58 b85encode = base85.b85encode
59
59
60 cookielib = pycompat.cookielib
60 cookielib = pycompat.cookielib
61 empty = pycompat.empty
61 empty = pycompat.empty
62 httplib = pycompat.httplib
62 httplib = pycompat.httplib
63 httpserver = pycompat.httpserver
63 httpserver = pycompat.httpserver
64 pickle = pycompat.pickle
64 pickle = pycompat.pickle
65 queue = pycompat.queue
65 queue = pycompat.queue
66 socketserver = pycompat.socketserver
66 socketserver = pycompat.socketserver
67 stderr = pycompat.stderr
67 stderr = pycompat.stderr
68 stdin = pycompat.stdin
68 stdin = pycompat.stdin
69 stdout = pycompat.stdout
69 stdout = pycompat.stdout
70 stringio = pycompat.stringio
70 stringio = pycompat.stringio
71 urlerr = pycompat.urlerr
71 urlerr = pycompat.urlerr
72 urlreq = pycompat.urlreq
72 urlreq = pycompat.urlreq
73 xmlrpclib = pycompat.xmlrpclib
73 xmlrpclib = pycompat.xmlrpclib
74
74
75 # workaround for win32mbcs
75 # workaround for win32mbcs
76 _filenamebytestr = pycompat.bytestr
76 _filenamebytestr = pycompat.bytestr
77
77
78 def isatty(fp):
78 def isatty(fp):
79 try:
79 try:
80 return fp.isatty()
80 return fp.isatty()
81 except AttributeError:
81 except AttributeError:
82 return False
82 return False
83
83
84 # glibc determines buffering on first write to stdout - if we replace a TTY
84 # glibc determines buffering on first write to stdout - if we replace a TTY
85 # destined stdout with a pipe destined stdout (e.g. pager), we want line
85 # destined stdout with a pipe destined stdout (e.g. pager), we want line
86 # buffering
86 # buffering
87 if isatty(stdout):
87 if isatty(stdout):
88 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
88 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
89
89
90 if pycompat.osname == 'nt':
90 if pycompat.osname == 'nt':
91 from . import windows as platform
91 from . import windows as platform
92 stdout = platform.winstdout(stdout)
92 stdout = platform.winstdout(stdout)
93 else:
93 else:
94 from . import posix as platform
94 from . import posix as platform
95
95
96 _ = i18n._
96 _ = i18n._
97
97
98 bindunixsocket = platform.bindunixsocket
98 bindunixsocket = platform.bindunixsocket
99 cachestat = platform.cachestat
99 cachestat = platform.cachestat
100 checkexec = platform.checkexec
100 checkexec = platform.checkexec
101 checklink = platform.checklink
101 checklink = platform.checklink
102 copymode = platform.copymode
102 copymode = platform.copymode
103 executablepath = platform.executablepath
103 executablepath = platform.executablepath
104 expandglobs = platform.expandglobs
104 expandglobs = platform.expandglobs
105 explainexit = platform.explainexit
105 explainexit = platform.explainexit
106 findexe = platform.findexe
106 findexe = platform.findexe
107 gethgcmd = platform.gethgcmd
107 gethgcmd = platform.gethgcmd
108 getuser = platform.getuser
108 getuser = platform.getuser
109 getpid = os.getpid
109 getpid = os.getpid
110 groupmembers = platform.groupmembers
110 groupmembers = platform.groupmembers
111 groupname = platform.groupname
111 groupname = platform.groupname
112 hidewindow = platform.hidewindow
112 hidewindow = platform.hidewindow
113 isexec = platform.isexec
113 isexec = platform.isexec
114 isowner = platform.isowner
114 isowner = platform.isowner
115 listdir = osutil.listdir
115 listdir = osutil.listdir
116 localpath = platform.localpath
116 localpath = platform.localpath
117 lookupreg = platform.lookupreg
117 lookupreg = platform.lookupreg
118 makedir = platform.makedir
118 makedir = platform.makedir
119 nlinks = platform.nlinks
119 nlinks = platform.nlinks
120 normpath = platform.normpath
120 normpath = platform.normpath
121 normcase = platform.normcase
121 normcase = platform.normcase
122 normcasespec = platform.normcasespec
122 normcasespec = platform.normcasespec
123 normcasefallback = platform.normcasefallback
123 normcasefallback = platform.normcasefallback
124 openhardlinks = platform.openhardlinks
124 openhardlinks = platform.openhardlinks
125 oslink = platform.oslink
125 oslink = platform.oslink
126 parsepatchoutput = platform.parsepatchoutput
126 parsepatchoutput = platform.parsepatchoutput
127 pconvert = platform.pconvert
127 pconvert = platform.pconvert
128 poll = platform.poll
128 poll = platform.poll
129 popen = platform.popen
129 popen = platform.popen
130 posixfile = platform.posixfile
130 posixfile = platform.posixfile
131 quotecommand = platform.quotecommand
131 quotecommand = platform.quotecommand
132 readpipe = platform.readpipe
132 readpipe = platform.readpipe
133 rename = platform.rename
133 rename = platform.rename
134 removedirs = platform.removedirs
134 removedirs = platform.removedirs
135 samedevice = platform.samedevice
135 samedevice = platform.samedevice
136 samefile = platform.samefile
136 samefile = platform.samefile
137 samestat = platform.samestat
137 samestat = platform.samestat
138 setbinary = platform.setbinary
138 setbinary = platform.setbinary
139 setflags = platform.setflags
139 setflags = platform.setflags
140 setsignalhandler = platform.setsignalhandler
140 setsignalhandler = platform.setsignalhandler
141 shellquote = platform.shellquote
141 shellquote = platform.shellquote
142 spawndetached = platform.spawndetached
142 spawndetached = platform.spawndetached
143 split = platform.split
143 split = platform.split
144 sshargs = platform.sshargs
144 sshargs = platform.sshargs
145 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
145 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
146 statisexec = platform.statisexec
146 statisexec = platform.statisexec
147 statislink = platform.statislink
147 statislink = platform.statislink
148 testpid = platform.testpid
148 testpid = platform.testpid
149 umask = platform.umask
149 umask = platform.umask
150 unlink = platform.unlink
150 unlink = platform.unlink
151 username = platform.username
151 username = platform.username
152
152
153 try:
153 try:
154 recvfds = osutil.recvfds
154 recvfds = osutil.recvfds
155 except AttributeError:
155 except AttributeError:
156 pass
156 pass
157 try:
157 try:
158 setprocname = osutil.setprocname
158 setprocname = osutil.setprocname
159 except AttributeError:
159 except AttributeError:
160 pass
160 pass
161
161
162 # Python compatibility
162 # Python compatibility
163
163
164 _notset = object()
164 _notset = object()
165
165
166 # disable Python's problematic floating point timestamps (issue4836)
166 # disable Python's problematic floating point timestamps (issue4836)
167 # (Python hypocritically says you shouldn't change this behavior in
167 # (Python hypocritically says you shouldn't change this behavior in
168 # libraries, and sure enough Mercurial is not a library.)
168 # libraries, and sure enough Mercurial is not a library.)
169 os.stat_float_times(False)
169 os.stat_float_times(False)
170
170
171 def safehasattr(thing, attr):
171 def safehasattr(thing, attr):
172 return getattr(thing, attr, _notset) is not _notset
172 return getattr(thing, attr, _notset) is not _notset
173
173
174 def bitsfrom(container):
174 def bitsfrom(container):
175 bits = 0
175 bits = 0
176 for bit in container:
176 for bit in container:
177 bits |= bit
177 bits |= bit
178 return bits
178 return bits
179
179
180 # python 2.6 still have deprecation warning enabled by default. We do not want
180 # python 2.6 still have deprecation warning enabled by default. We do not want
181 # to display anything to standard user so detect if we are running test and
181 # to display anything to standard user so detect if we are running test and
182 # only use python deprecation warning in this case.
182 # only use python deprecation warning in this case.
183 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
183 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
184 if _dowarn:
184 if _dowarn:
185 # explicitly unfilter our warning for python 2.7
185 # explicitly unfilter our warning for python 2.7
186 #
186 #
187 # The option of setting PYTHONWARNINGS in the test runner was investigated.
187 # The option of setting PYTHONWARNINGS in the test runner was investigated.
188 # However, module name set through PYTHONWARNINGS was exactly matched, so
188 # However, module name set through PYTHONWARNINGS was exactly matched, so
189 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
189 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
190 # makes the whole PYTHONWARNINGS thing useless for our usecase.
190 # makes the whole PYTHONWARNINGS thing useless for our usecase.
191 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
191 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
192 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
192 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
193 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
193 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
194
194
195 def nouideprecwarn(msg, version, stacklevel=1):
195 def nouideprecwarn(msg, version, stacklevel=1):
196 """Issue an python native deprecation warning
196 """Issue an python native deprecation warning
197
197
198 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
198 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
199 """
199 """
200 if _dowarn:
200 if _dowarn:
201 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
201 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
202 " update your code.)") % version
202 " update your code.)") % version
203 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
203 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
204
204
205 DIGESTS = {
205 DIGESTS = {
206 'md5': hashlib.md5,
206 'md5': hashlib.md5,
207 'sha1': hashlib.sha1,
207 'sha1': hashlib.sha1,
208 'sha512': hashlib.sha512,
208 'sha512': hashlib.sha512,
209 }
209 }
210 # List of digest types from strongest to weakest
210 # List of digest types from strongest to weakest
211 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
211 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
212
212
213 for k in DIGESTS_BY_STRENGTH:
213 for k in DIGESTS_BY_STRENGTH:
214 assert k in DIGESTS
214 assert k in DIGESTS
215
215
216 class digester(object):
216 class digester(object):
217 """helper to compute digests.
217 """helper to compute digests.
218
218
219 This helper can be used to compute one or more digests given their name.
219 This helper can be used to compute one or more digests given their name.
220
220
221 >>> d = digester(['md5', 'sha1'])
221 >>> d = digester(['md5', 'sha1'])
222 >>> d.update('foo')
222 >>> d.update('foo')
223 >>> [k for k in sorted(d)]
223 >>> [k for k in sorted(d)]
224 ['md5', 'sha1']
224 ['md5', 'sha1']
225 >>> d['md5']
225 >>> d['md5']
226 'acbd18db4cc2f85cedef654fccc4a4d8'
226 'acbd18db4cc2f85cedef654fccc4a4d8'
227 >>> d['sha1']
227 >>> d['sha1']
228 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
228 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
229 >>> digester.preferred(['md5', 'sha1'])
229 >>> digester.preferred(['md5', 'sha1'])
230 'sha1'
230 'sha1'
231 """
231 """
232
232
233 def __init__(self, digests, s=''):
233 def __init__(self, digests, s=''):
234 self._hashes = {}
234 self._hashes = {}
235 for k in digests:
235 for k in digests:
236 if k not in DIGESTS:
236 if k not in DIGESTS:
237 raise Abort(_('unknown digest type: %s') % k)
237 raise Abort(_('unknown digest type: %s') % k)
238 self._hashes[k] = DIGESTS[k]()
238 self._hashes[k] = DIGESTS[k]()
239 if s:
239 if s:
240 self.update(s)
240 self.update(s)
241
241
242 def update(self, data):
242 def update(self, data):
243 for h in self._hashes.values():
243 for h in self._hashes.values():
244 h.update(data)
244 h.update(data)
245
245
246 def __getitem__(self, key):
246 def __getitem__(self, key):
247 if key not in DIGESTS:
247 if key not in DIGESTS:
248 raise Abort(_('unknown digest type: %s') % k)
248 raise Abort(_('unknown digest type: %s') % k)
249 return self._hashes[key].hexdigest()
249 return self._hashes[key].hexdigest()
250
250
251 def __iter__(self):
251 def __iter__(self):
252 return iter(self._hashes)
252 return iter(self._hashes)
253
253
254 @staticmethod
254 @staticmethod
255 def preferred(supported):
255 def preferred(supported):
256 """returns the strongest digest type in both supported and DIGESTS."""
256 """returns the strongest digest type in both supported and DIGESTS."""
257
257
258 for k in DIGESTS_BY_STRENGTH:
258 for k in DIGESTS_BY_STRENGTH:
259 if k in supported:
259 if k in supported:
260 return k
260 return k
261 return None
261 return None
262
262
263 class digestchecker(object):
263 class digestchecker(object):
264 """file handle wrapper that additionally checks content against a given
264 """file handle wrapper that additionally checks content against a given
265 size and digests.
265 size and digests.
266
266
267 d = digestchecker(fh, size, {'md5': '...'})
267 d = digestchecker(fh, size, {'md5': '...'})
268
268
269 When multiple digests are given, all of them are validated.
269 When multiple digests are given, all of them are validated.
270 """
270 """
271
271
272 def __init__(self, fh, size, digests):
272 def __init__(self, fh, size, digests):
273 self._fh = fh
273 self._fh = fh
274 self._size = size
274 self._size = size
275 self._got = 0
275 self._got = 0
276 self._digests = dict(digests)
276 self._digests = dict(digests)
277 self._digester = digester(self._digests.keys())
277 self._digester = digester(self._digests.keys())
278
278
279 def read(self, length=-1):
279 def read(self, length=-1):
280 content = self._fh.read(length)
280 content = self._fh.read(length)
281 self._digester.update(content)
281 self._digester.update(content)
282 self._got += len(content)
282 self._got += len(content)
283 return content
283 return content
284
284
285 def validate(self):
285 def validate(self):
286 if self._size != self._got:
286 if self._size != self._got:
287 raise Abort(_('size mismatch: expected %d, got %d') %
287 raise Abort(_('size mismatch: expected %d, got %d') %
288 (self._size, self._got))
288 (self._size, self._got))
289 for k, v in self._digests.items():
289 for k, v in self._digests.items():
290 if v != self._digester[k]:
290 if v != self._digester[k]:
291 # i18n: first parameter is a digest name
291 # i18n: first parameter is a digest name
292 raise Abort(_('%s mismatch: expected %s, got %s') %
292 raise Abort(_('%s mismatch: expected %s, got %s') %
293 (k, v, self._digester[k]))
293 (k, v, self._digester[k]))
294
294
295 try:
295 try:
296 buffer = buffer
296 buffer = buffer
297 except NameError:
297 except NameError:
298 def buffer(sliceable, offset=0, length=None):
298 def buffer(sliceable, offset=0, length=None):
299 if length is not None:
299 if length is not None:
300 return memoryview(sliceable)[offset:offset + length]
300 return memoryview(sliceable)[offset:offset + length]
301 return memoryview(sliceable)[offset:]
301 return memoryview(sliceable)[offset:]
302
302
303 closefds = pycompat.osname == 'posix'
303 closefds = pycompat.osname == 'posix'
304
304
305 _chunksize = 4096
305 _chunksize = 4096
306
306
307 class bufferedinputpipe(object):
307 class bufferedinputpipe(object):
308 """a manually buffered input pipe
308 """a manually buffered input pipe
309
309
310 Python will not let us use buffered IO and lazy reading with 'polling' at
310 Python will not let us use buffered IO and lazy reading with 'polling' at
311 the same time. We cannot probe the buffer state and select will not detect
311 the same time. We cannot probe the buffer state and select will not detect
312 that data are ready to read if they are already buffered.
312 that data are ready to read if they are already buffered.
313
313
314 This class let us work around that by implementing its own buffering
314 This class let us work around that by implementing its own buffering
315 (allowing efficient readline) while offering a way to know if the buffer is
315 (allowing efficient readline) while offering a way to know if the buffer is
316 empty from the output (allowing collaboration of the buffer with polling).
316 empty from the output (allowing collaboration of the buffer with polling).
317
317
318 This class lives in the 'util' module because it makes use of the 'os'
318 This class lives in the 'util' module because it makes use of the 'os'
319 module from the python stdlib.
319 module from the python stdlib.
320 """
320 """
321
321
322 def __init__(self, input):
322 def __init__(self, input):
323 self._input = input
323 self._input = input
324 self._buffer = []
324 self._buffer = []
325 self._eof = False
325 self._eof = False
326 self._lenbuf = 0
326 self._lenbuf = 0
327
327
328 @property
328 @property
329 def hasbuffer(self):
329 def hasbuffer(self):
330 """True is any data is currently buffered
330 """True is any data is currently buffered
331
331
332 This will be used externally a pre-step for polling IO. If there is
332 This will be used externally a pre-step for polling IO. If there is
333 already data then no polling should be set in place."""
333 already data then no polling should be set in place."""
334 return bool(self._buffer)
334 return bool(self._buffer)
335
335
336 @property
336 @property
337 def closed(self):
337 def closed(self):
338 return self._input.closed
338 return self._input.closed
339
339
340 def fileno(self):
340 def fileno(self):
341 return self._input.fileno()
341 return self._input.fileno()
342
342
343 def close(self):
343 def close(self):
344 return self._input.close()
344 return self._input.close()
345
345
346 def read(self, size):
346 def read(self, size):
347 while (not self._eof) and (self._lenbuf < size):
347 while (not self._eof) and (self._lenbuf < size):
348 self._fillbuffer()
348 self._fillbuffer()
349 return self._frombuffer(size)
349 return self._frombuffer(size)
350
350
351 def readline(self, *args, **kwargs):
351 def readline(self, *args, **kwargs):
352 if 1 < len(self._buffer):
352 if 1 < len(self._buffer):
353 # this should not happen because both read and readline end with a
353 # this should not happen because both read and readline end with a
354 # _frombuffer call that collapse it.
354 # _frombuffer call that collapse it.
355 self._buffer = [''.join(self._buffer)]
355 self._buffer = [''.join(self._buffer)]
356 self._lenbuf = len(self._buffer[0])
356 self._lenbuf = len(self._buffer[0])
357 lfi = -1
357 lfi = -1
358 if self._buffer:
358 if self._buffer:
359 lfi = self._buffer[-1].find('\n')
359 lfi = self._buffer[-1].find('\n')
360 while (not self._eof) and lfi < 0:
360 while (not self._eof) and lfi < 0:
361 self._fillbuffer()
361 self._fillbuffer()
362 if self._buffer:
362 if self._buffer:
363 lfi = self._buffer[-1].find('\n')
363 lfi = self._buffer[-1].find('\n')
364 size = lfi + 1
364 size = lfi + 1
365 if lfi < 0: # end of file
365 if lfi < 0: # end of file
366 size = self._lenbuf
366 size = self._lenbuf
367 elif 1 < len(self._buffer):
367 elif 1 < len(self._buffer):
368 # we need to take previous chunks into account
368 # we need to take previous chunks into account
369 size += self._lenbuf - len(self._buffer[-1])
369 size += self._lenbuf - len(self._buffer[-1])
370 return self._frombuffer(size)
370 return self._frombuffer(size)
371
371
372 def _frombuffer(self, size):
372 def _frombuffer(self, size):
373 """return at most 'size' data from the buffer
373 """return at most 'size' data from the buffer
374
374
375 The data are removed from the buffer."""
375 The data are removed from the buffer."""
376 if size == 0 or not self._buffer:
376 if size == 0 or not self._buffer:
377 return ''
377 return ''
378 buf = self._buffer[0]
378 buf = self._buffer[0]
379 if 1 < len(self._buffer):
379 if 1 < len(self._buffer):
380 buf = ''.join(self._buffer)
380 buf = ''.join(self._buffer)
381
381
382 data = buf[:size]
382 data = buf[:size]
383 buf = buf[len(data):]
383 buf = buf[len(data):]
384 if buf:
384 if buf:
385 self._buffer = [buf]
385 self._buffer = [buf]
386 self._lenbuf = len(buf)
386 self._lenbuf = len(buf)
387 else:
387 else:
388 self._buffer = []
388 self._buffer = []
389 self._lenbuf = 0
389 self._lenbuf = 0
390 return data
390 return data
391
391
392 def _fillbuffer(self):
392 def _fillbuffer(self):
393 """read data to the buffer"""
393 """read data to the buffer"""
394 data = os.read(self._input.fileno(), _chunksize)
394 data = os.read(self._input.fileno(), _chunksize)
395 if not data:
395 if not data:
396 self._eof = True
396 self._eof = True
397 else:
397 else:
398 self._lenbuf += len(data)
398 self._lenbuf += len(data)
399 self._buffer.append(data)
399 self._buffer.append(data)
400
400
401 def popen2(cmd, env=None, newlines=False):
401 def popen2(cmd, env=None, newlines=False):
402 # Setting bufsize to -1 lets the system decide the buffer size.
402 # Setting bufsize to -1 lets the system decide the buffer size.
403 # The default for bufsize is 0, meaning unbuffered. This leads to
403 # The default for bufsize is 0, meaning unbuffered. This leads to
404 # poor performance on Mac OS X: http://bugs.python.org/issue4194
404 # poor performance on Mac OS X: http://bugs.python.org/issue4194
405 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
405 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
406 close_fds=closefds,
406 close_fds=closefds,
407 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
407 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
408 universal_newlines=newlines,
408 universal_newlines=newlines,
409 env=env)
409 env=env)
410 return p.stdin, p.stdout
410 return p.stdin, p.stdout
411
411
412 def popen3(cmd, env=None, newlines=False):
412 def popen3(cmd, env=None, newlines=False):
413 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
413 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
414 return stdin, stdout, stderr
414 return stdin, stdout, stderr
415
415
416 def popen4(cmd, env=None, newlines=False, bufsize=-1):
416 def popen4(cmd, env=None, newlines=False, bufsize=-1):
417 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
417 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
418 close_fds=closefds,
418 close_fds=closefds,
419 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
419 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
420 stderr=subprocess.PIPE,
420 stderr=subprocess.PIPE,
421 universal_newlines=newlines,
421 universal_newlines=newlines,
422 env=env)
422 env=env)
423 return p.stdin, p.stdout, p.stderr, p
423 return p.stdin, p.stdout, p.stderr, p
424
424
425 def version():
425 def version():
426 """Return version information if available."""
426 """Return version information if available."""
427 try:
427 try:
428 from . import __version__
428 from . import __version__
429 return __version__.version
429 return __version__.version
430 except ImportError:
430 except ImportError:
431 return 'unknown'
431 return 'unknown'
432
432
433 def versiontuple(v=None, n=4):
433 def versiontuple(v=None, n=4):
434 """Parses a Mercurial version string into an N-tuple.
434 """Parses a Mercurial version string into an N-tuple.
435
435
436 The version string to be parsed is specified with the ``v`` argument.
436 The version string to be parsed is specified with the ``v`` argument.
437 If it isn't defined, the current Mercurial version string will be parsed.
437 If it isn't defined, the current Mercurial version string will be parsed.
438
438
439 ``n`` can be 2, 3, or 4. Here is how some version strings map to
439 ``n`` can be 2, 3, or 4. Here is how some version strings map to
440 returned values:
440 returned values:
441
441
442 >>> v = '3.6.1+190-df9b73d2d444'
442 >>> v = '3.6.1+190-df9b73d2d444'
443 >>> versiontuple(v, 2)
443 >>> versiontuple(v, 2)
444 (3, 6)
444 (3, 6)
445 >>> versiontuple(v, 3)
445 >>> versiontuple(v, 3)
446 (3, 6, 1)
446 (3, 6, 1)
447 >>> versiontuple(v, 4)
447 >>> versiontuple(v, 4)
448 (3, 6, 1, '190-df9b73d2d444')
448 (3, 6, 1, '190-df9b73d2d444')
449
449
450 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
450 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
451 (3, 6, 1, '190-df9b73d2d444+20151118')
451 (3, 6, 1, '190-df9b73d2d444+20151118')
452
452
453 >>> v = '3.6'
453 >>> v = '3.6'
454 >>> versiontuple(v, 2)
454 >>> versiontuple(v, 2)
455 (3, 6)
455 (3, 6)
456 >>> versiontuple(v, 3)
456 >>> versiontuple(v, 3)
457 (3, 6, None)
457 (3, 6, None)
458 >>> versiontuple(v, 4)
458 >>> versiontuple(v, 4)
459 (3, 6, None, None)
459 (3, 6, None, None)
460
460
461 >>> v = '3.9-rc'
461 >>> v = '3.9-rc'
462 >>> versiontuple(v, 2)
462 >>> versiontuple(v, 2)
463 (3, 9)
463 (3, 9)
464 >>> versiontuple(v, 3)
464 >>> versiontuple(v, 3)
465 (3, 9, None)
465 (3, 9, None)
466 >>> versiontuple(v, 4)
466 >>> versiontuple(v, 4)
467 (3, 9, None, 'rc')
467 (3, 9, None, 'rc')
468
468
469 >>> v = '3.9-rc+2-02a8fea4289b'
469 >>> v = '3.9-rc+2-02a8fea4289b'
470 >>> versiontuple(v, 2)
470 >>> versiontuple(v, 2)
471 (3, 9)
471 (3, 9)
472 >>> versiontuple(v, 3)
472 >>> versiontuple(v, 3)
473 (3, 9, None)
473 (3, 9, None)
474 >>> versiontuple(v, 4)
474 >>> versiontuple(v, 4)
475 (3, 9, None, 'rc+2-02a8fea4289b')
475 (3, 9, None, 'rc+2-02a8fea4289b')
476 """
476 """
477 if not v:
477 if not v:
478 v = version()
478 v = version()
479 parts = remod.split('[\+-]', v, 1)
479 parts = remod.split('[\+-]', v, 1)
480 if len(parts) == 1:
480 if len(parts) == 1:
481 vparts, extra = parts[0], None
481 vparts, extra = parts[0], None
482 else:
482 else:
483 vparts, extra = parts
483 vparts, extra = parts
484
484
485 vints = []
485 vints = []
486 for i in vparts.split('.'):
486 for i in vparts.split('.'):
487 try:
487 try:
488 vints.append(int(i))
488 vints.append(int(i))
489 except ValueError:
489 except ValueError:
490 break
490 break
491 # (3, 6) -> (3, 6, None)
491 # (3, 6) -> (3, 6, None)
492 while len(vints) < 3:
492 while len(vints) < 3:
493 vints.append(None)
493 vints.append(None)
494
494
495 if n == 2:
495 if n == 2:
496 return (vints[0], vints[1])
496 return (vints[0], vints[1])
497 if n == 3:
497 if n == 3:
498 return (vints[0], vints[1], vints[2])
498 return (vints[0], vints[1], vints[2])
499 if n == 4:
499 if n == 4:
500 return (vints[0], vints[1], vints[2], extra)
500 return (vints[0], vints[1], vints[2], extra)
501
501
502 # used by parsedate
502 # used by parsedate
503 defaultdateformats = (
503 defaultdateformats = (
504 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
504 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
505 '%Y-%m-%dT%H:%M', # without seconds
505 '%Y-%m-%dT%H:%M', # without seconds
506 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
506 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
507 '%Y-%m-%dT%H%M', # without seconds
507 '%Y-%m-%dT%H%M', # without seconds
508 '%Y-%m-%d %H:%M:%S', # our common legal variant
508 '%Y-%m-%d %H:%M:%S', # our common legal variant
509 '%Y-%m-%d %H:%M', # without seconds
509 '%Y-%m-%d %H:%M', # without seconds
510 '%Y-%m-%d %H%M%S', # without :
510 '%Y-%m-%d %H%M%S', # without :
511 '%Y-%m-%d %H%M', # without seconds
511 '%Y-%m-%d %H%M', # without seconds
512 '%Y-%m-%d %I:%M:%S%p',
512 '%Y-%m-%d %I:%M:%S%p',
513 '%Y-%m-%d %H:%M',
513 '%Y-%m-%d %H:%M',
514 '%Y-%m-%d %I:%M%p',
514 '%Y-%m-%d %I:%M%p',
515 '%Y-%m-%d',
515 '%Y-%m-%d',
516 '%m-%d',
516 '%m-%d',
517 '%m/%d',
517 '%m/%d',
518 '%m/%d/%y',
518 '%m/%d/%y',
519 '%m/%d/%Y',
519 '%m/%d/%Y',
520 '%a %b %d %H:%M:%S %Y',
520 '%a %b %d %H:%M:%S %Y',
521 '%a %b %d %I:%M:%S%p %Y',
521 '%a %b %d %I:%M:%S%p %Y',
522 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
522 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
523 '%b %d %H:%M:%S %Y',
523 '%b %d %H:%M:%S %Y',
524 '%b %d %I:%M:%S%p %Y',
524 '%b %d %I:%M:%S%p %Y',
525 '%b %d %H:%M:%S',
525 '%b %d %H:%M:%S',
526 '%b %d %I:%M:%S%p',
526 '%b %d %I:%M:%S%p',
527 '%b %d %H:%M',
527 '%b %d %H:%M',
528 '%b %d %I:%M%p',
528 '%b %d %I:%M%p',
529 '%b %d %Y',
529 '%b %d %Y',
530 '%b %d',
530 '%b %d',
531 '%H:%M:%S',
531 '%H:%M:%S',
532 '%I:%M:%S%p',
532 '%I:%M:%S%p',
533 '%H:%M',
533 '%H:%M',
534 '%I:%M%p',
534 '%I:%M%p',
535 )
535 )
536
536
537 extendeddateformats = defaultdateformats + (
537 extendeddateformats = defaultdateformats + (
538 "%Y",
538 "%Y",
539 "%Y-%m",
539 "%Y-%m",
540 "%b",
540 "%b",
541 "%b %Y",
541 "%b %Y",
542 )
542 )
543
543
544 def cachefunc(func):
544 def cachefunc(func):
545 '''cache the result of function calls'''
545 '''cache the result of function calls'''
546 # XXX doesn't handle keywords args
546 # XXX doesn't handle keywords args
547 if func.__code__.co_argcount == 0:
547 if func.__code__.co_argcount == 0:
548 cache = []
548 cache = []
549 def f():
549 def f():
550 if len(cache) == 0:
550 if len(cache) == 0:
551 cache.append(func())
551 cache.append(func())
552 return cache[0]
552 return cache[0]
553 return f
553 return f
554 cache = {}
554 cache = {}
555 if func.__code__.co_argcount == 1:
555 if func.__code__.co_argcount == 1:
556 # we gain a small amount of time because
556 # we gain a small amount of time because
557 # we don't need to pack/unpack the list
557 # we don't need to pack/unpack the list
558 def f(arg):
558 def f(arg):
559 if arg not in cache:
559 if arg not in cache:
560 cache[arg] = func(arg)
560 cache[arg] = func(arg)
561 return cache[arg]
561 return cache[arg]
562 else:
562 else:
563 def f(*args):
563 def f(*args):
564 if args not in cache:
564 if args not in cache:
565 cache[args] = func(*args)
565 cache[args] = func(*args)
566 return cache[args]
566 return cache[args]
567
567
568 return f
568 return f
569
569
570 class sortdict(collections.OrderedDict):
570 class sortdict(collections.OrderedDict):
571 '''a simple sorted dictionary
571 '''a simple sorted dictionary
572
572
573 >>> d1 = sortdict([('a', 0), ('b', 1)])
573 >>> d1 = sortdict([('a', 0), ('b', 1)])
574 >>> d2 = d1.copy()
574 >>> d2 = d1.copy()
575 >>> d2
575 >>> d2
576 sortdict([('a', 0), ('b', 1)])
576 sortdict([('a', 0), ('b', 1)])
577 >>> d2.update([('a', 2)])
577 >>> d2.update([('a', 2)])
578 >>> d2.keys() # should still be in last-set order
578 >>> d2.keys() # should still be in last-set order
579 ['b', 'a']
579 ['b', 'a']
580 '''
580 '''
581
581
582 def __setitem__(self, key, value):
582 def __setitem__(self, key, value):
583 if key in self:
583 if key in self:
584 del self[key]
584 del self[key]
585 super(sortdict, self).__setitem__(key, value)
585 super(sortdict, self).__setitem__(key, value)
586
586
587 @contextlib.contextmanager
587 @contextlib.contextmanager
588 def acceptintervention(tr=None):
588 def acceptintervention(tr=None):
589 """A context manager that closes the transaction on InterventionRequired
589 """A context manager that closes the transaction on InterventionRequired
590
590
591 If no transaction was provided, this simply runs the body and returns
591 If no transaction was provided, this simply runs the body and returns
592 """
592 """
593 if not tr:
593 if not tr:
594 yield
594 yield
595 return
595 return
596 try:
596 try:
597 yield
597 yield
598 tr.close()
598 tr.close()
599 except error.InterventionRequired:
599 except error.InterventionRequired:
600 tr.close()
600 tr.close()
601 raise
601 raise
602 finally:
602 finally:
603 tr.release()
603 tr.release()
604
604
605 @contextlib.contextmanager
606 def nullcontextmanager():
607 yield
608
605 class _lrucachenode(object):
609 class _lrucachenode(object):
606 """A node in a doubly linked list.
610 """A node in a doubly linked list.
607
611
608 Holds a reference to nodes on either side as well as a key-value
612 Holds a reference to nodes on either side as well as a key-value
609 pair for the dictionary entry.
613 pair for the dictionary entry.
610 """
614 """
611 __slots__ = (u'next', u'prev', u'key', u'value')
615 __slots__ = (u'next', u'prev', u'key', u'value')
612
616
613 def __init__(self):
617 def __init__(self):
614 self.next = None
618 self.next = None
615 self.prev = None
619 self.prev = None
616
620
617 self.key = _notset
621 self.key = _notset
618 self.value = None
622 self.value = None
619
623
620 def markempty(self):
624 def markempty(self):
621 """Mark the node as emptied."""
625 """Mark the node as emptied."""
622 self.key = _notset
626 self.key = _notset
623
627
624 class lrucachedict(object):
628 class lrucachedict(object):
625 """Dict that caches most recent accesses and sets.
629 """Dict that caches most recent accesses and sets.
626
630
627 The dict consists of an actual backing dict - indexed by original
631 The dict consists of an actual backing dict - indexed by original
628 key - and a doubly linked circular list defining the order of entries in
632 key - and a doubly linked circular list defining the order of entries in
629 the cache.
633 the cache.
630
634
631 The head node is the newest entry in the cache. If the cache is full,
635 The head node is the newest entry in the cache. If the cache is full,
632 we recycle head.prev and make it the new head. Cache accesses result in
636 we recycle head.prev and make it the new head. Cache accesses result in
633 the node being moved to before the existing head and being marked as the
637 the node being moved to before the existing head and being marked as the
634 new head node.
638 new head node.
635 """
639 """
636 def __init__(self, max):
640 def __init__(self, max):
637 self._cache = {}
641 self._cache = {}
638
642
639 self._head = head = _lrucachenode()
643 self._head = head = _lrucachenode()
640 head.prev = head
644 head.prev = head
641 head.next = head
645 head.next = head
642 self._size = 1
646 self._size = 1
643 self._capacity = max
647 self._capacity = max
644
648
645 def __len__(self):
649 def __len__(self):
646 return len(self._cache)
650 return len(self._cache)
647
651
648 def __contains__(self, k):
652 def __contains__(self, k):
649 return k in self._cache
653 return k in self._cache
650
654
651 def __iter__(self):
655 def __iter__(self):
652 # We don't have to iterate in cache order, but why not.
656 # We don't have to iterate in cache order, but why not.
653 n = self._head
657 n = self._head
654 for i in range(len(self._cache)):
658 for i in range(len(self._cache)):
655 yield n.key
659 yield n.key
656 n = n.next
660 n = n.next
657
661
658 def __getitem__(self, k):
662 def __getitem__(self, k):
659 node = self._cache[k]
663 node = self._cache[k]
660 self._movetohead(node)
664 self._movetohead(node)
661 return node.value
665 return node.value
662
666
663 def __setitem__(self, k, v):
667 def __setitem__(self, k, v):
664 node = self._cache.get(k)
668 node = self._cache.get(k)
665 # Replace existing value and mark as newest.
669 # Replace existing value and mark as newest.
666 if node is not None:
670 if node is not None:
667 node.value = v
671 node.value = v
668 self._movetohead(node)
672 self._movetohead(node)
669 return
673 return
670
674
671 if self._size < self._capacity:
675 if self._size < self._capacity:
672 node = self._addcapacity()
676 node = self._addcapacity()
673 else:
677 else:
674 # Grab the last/oldest item.
678 # Grab the last/oldest item.
675 node = self._head.prev
679 node = self._head.prev
676
680
677 # At capacity. Kill the old entry.
681 # At capacity. Kill the old entry.
678 if node.key is not _notset:
682 if node.key is not _notset:
679 del self._cache[node.key]
683 del self._cache[node.key]
680
684
681 node.key = k
685 node.key = k
682 node.value = v
686 node.value = v
683 self._cache[k] = node
687 self._cache[k] = node
684 # And mark it as newest entry. No need to adjust order since it
688 # And mark it as newest entry. No need to adjust order since it
685 # is already self._head.prev.
689 # is already self._head.prev.
686 self._head = node
690 self._head = node
687
691
688 def __delitem__(self, k):
692 def __delitem__(self, k):
689 node = self._cache.pop(k)
693 node = self._cache.pop(k)
690 node.markempty()
694 node.markempty()
691
695
692 # Temporarily mark as newest item before re-adjusting head to make
696 # Temporarily mark as newest item before re-adjusting head to make
693 # this node the oldest item.
697 # this node the oldest item.
694 self._movetohead(node)
698 self._movetohead(node)
695 self._head = node.next
699 self._head = node.next
696
700
697 # Additional dict methods.
701 # Additional dict methods.
698
702
699 def get(self, k, default=None):
703 def get(self, k, default=None):
700 try:
704 try:
701 return self._cache[k].value
705 return self._cache[k].value
702 except KeyError:
706 except KeyError:
703 return default
707 return default
704
708
705 def clear(self):
709 def clear(self):
706 n = self._head
710 n = self._head
707 while n.key is not _notset:
711 while n.key is not _notset:
708 n.markempty()
712 n.markempty()
709 n = n.next
713 n = n.next
710
714
711 self._cache.clear()
715 self._cache.clear()
712
716
713 def copy(self):
717 def copy(self):
714 result = lrucachedict(self._capacity)
718 result = lrucachedict(self._capacity)
715 n = self._head.prev
719 n = self._head.prev
716 # Iterate in oldest-to-newest order, so the copy has the right ordering
720 # Iterate in oldest-to-newest order, so the copy has the right ordering
717 for i in range(len(self._cache)):
721 for i in range(len(self._cache)):
718 result[n.key] = n.value
722 result[n.key] = n.value
719 n = n.prev
723 n = n.prev
720 return result
724 return result
721
725
722 def _movetohead(self, node):
726 def _movetohead(self, node):
723 """Mark a node as the newest, making it the new head.
727 """Mark a node as the newest, making it the new head.
724
728
725 When a node is accessed, it becomes the freshest entry in the LRU
729 When a node is accessed, it becomes the freshest entry in the LRU
726 list, which is denoted by self._head.
730 list, which is denoted by self._head.
727
731
728 Visually, let's make ``N`` the new head node (* denotes head):
732 Visually, let's make ``N`` the new head node (* denotes head):
729
733
730 previous/oldest <-> head <-> next/next newest
734 previous/oldest <-> head <-> next/next newest
731
735
732 ----<->--- A* ---<->-----
736 ----<->--- A* ---<->-----
733 | |
737 | |
734 E <-> D <-> N <-> C <-> B
738 E <-> D <-> N <-> C <-> B
735
739
736 To:
740 To:
737
741
738 ----<->--- N* ---<->-----
742 ----<->--- N* ---<->-----
739 | |
743 | |
740 E <-> D <-> C <-> B <-> A
744 E <-> D <-> C <-> B <-> A
741
745
742 This requires the following moves:
746 This requires the following moves:
743
747
744 C.next = D (node.prev.next = node.next)
748 C.next = D (node.prev.next = node.next)
745 D.prev = C (node.next.prev = node.prev)
749 D.prev = C (node.next.prev = node.prev)
746 E.next = N (head.prev.next = node)
750 E.next = N (head.prev.next = node)
747 N.prev = E (node.prev = head.prev)
751 N.prev = E (node.prev = head.prev)
748 N.next = A (node.next = head)
752 N.next = A (node.next = head)
749 A.prev = N (head.prev = node)
753 A.prev = N (head.prev = node)
750 """
754 """
751 head = self._head
755 head = self._head
752 # C.next = D
756 # C.next = D
753 node.prev.next = node.next
757 node.prev.next = node.next
754 # D.prev = C
758 # D.prev = C
755 node.next.prev = node.prev
759 node.next.prev = node.prev
756 # N.prev = E
760 # N.prev = E
757 node.prev = head.prev
761 node.prev = head.prev
758 # N.next = A
762 # N.next = A
759 # It is tempting to do just "head" here, however if node is
763 # It is tempting to do just "head" here, however if node is
760 # adjacent to head, this will do bad things.
764 # adjacent to head, this will do bad things.
761 node.next = head.prev.next
765 node.next = head.prev.next
762 # E.next = N
766 # E.next = N
763 node.next.prev = node
767 node.next.prev = node
764 # A.prev = N
768 # A.prev = N
765 node.prev.next = node
769 node.prev.next = node
766
770
767 self._head = node
771 self._head = node
768
772
769 def _addcapacity(self):
773 def _addcapacity(self):
770 """Add a node to the circular linked list.
774 """Add a node to the circular linked list.
771
775
772 The new node is inserted before the head node.
776 The new node is inserted before the head node.
773 """
777 """
774 head = self._head
778 head = self._head
775 node = _lrucachenode()
779 node = _lrucachenode()
776 head.prev.next = node
780 head.prev.next = node
777 node.prev = head.prev
781 node.prev = head.prev
778 node.next = head
782 node.next = head
779 head.prev = node
783 head.prev = node
780 self._size += 1
784 self._size += 1
781 return node
785 return node
782
786
783 def lrucachefunc(func):
787 def lrucachefunc(func):
784 '''cache most recent results of function calls'''
788 '''cache most recent results of function calls'''
785 cache = {}
789 cache = {}
786 order = collections.deque()
790 order = collections.deque()
787 if func.__code__.co_argcount == 1:
791 if func.__code__.co_argcount == 1:
788 def f(arg):
792 def f(arg):
789 if arg not in cache:
793 if arg not in cache:
790 if len(cache) > 20:
794 if len(cache) > 20:
791 del cache[order.popleft()]
795 del cache[order.popleft()]
792 cache[arg] = func(arg)
796 cache[arg] = func(arg)
793 else:
797 else:
794 order.remove(arg)
798 order.remove(arg)
795 order.append(arg)
799 order.append(arg)
796 return cache[arg]
800 return cache[arg]
797 else:
801 else:
798 def f(*args):
802 def f(*args):
799 if args not in cache:
803 if args not in cache:
800 if len(cache) > 20:
804 if len(cache) > 20:
801 del cache[order.popleft()]
805 del cache[order.popleft()]
802 cache[args] = func(*args)
806 cache[args] = func(*args)
803 else:
807 else:
804 order.remove(args)
808 order.remove(args)
805 order.append(args)
809 order.append(args)
806 return cache[args]
810 return cache[args]
807
811
808 return f
812 return f
809
813
810 class propertycache(object):
814 class propertycache(object):
811 def __init__(self, func):
815 def __init__(self, func):
812 self.func = func
816 self.func = func
813 self.name = func.__name__
817 self.name = func.__name__
814 def __get__(self, obj, type=None):
818 def __get__(self, obj, type=None):
815 result = self.func(obj)
819 result = self.func(obj)
816 self.cachevalue(obj, result)
820 self.cachevalue(obj, result)
817 return result
821 return result
818
822
819 def cachevalue(self, obj, value):
823 def cachevalue(self, obj, value):
820 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
824 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
821 obj.__dict__[self.name] = value
825 obj.__dict__[self.name] = value
822
826
823 def pipefilter(s, cmd):
827 def pipefilter(s, cmd):
824 '''filter string S through command CMD, returning its output'''
828 '''filter string S through command CMD, returning its output'''
825 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
829 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
826 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
830 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
827 pout, perr = p.communicate(s)
831 pout, perr = p.communicate(s)
828 return pout
832 return pout
829
833
830 def tempfilter(s, cmd):
834 def tempfilter(s, cmd):
831 '''filter string S through a pair of temporary files with CMD.
835 '''filter string S through a pair of temporary files with CMD.
832 CMD is used as a template to create the real command to be run,
836 CMD is used as a template to create the real command to be run,
833 with the strings INFILE and OUTFILE replaced by the real names of
837 with the strings INFILE and OUTFILE replaced by the real names of
834 the temporary files generated.'''
838 the temporary files generated.'''
835 inname, outname = None, None
839 inname, outname = None, None
836 try:
840 try:
837 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
841 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
838 fp = os.fdopen(infd, pycompat.sysstr('wb'))
842 fp = os.fdopen(infd, pycompat.sysstr('wb'))
839 fp.write(s)
843 fp.write(s)
840 fp.close()
844 fp.close()
841 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
845 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
842 os.close(outfd)
846 os.close(outfd)
843 cmd = cmd.replace('INFILE', inname)
847 cmd = cmd.replace('INFILE', inname)
844 cmd = cmd.replace('OUTFILE', outname)
848 cmd = cmd.replace('OUTFILE', outname)
845 code = os.system(cmd)
849 code = os.system(cmd)
846 if pycompat.sysplatform == 'OpenVMS' and code & 1:
850 if pycompat.sysplatform == 'OpenVMS' and code & 1:
847 code = 0
851 code = 0
848 if code:
852 if code:
849 raise Abort(_("command '%s' failed: %s") %
853 raise Abort(_("command '%s' failed: %s") %
850 (cmd, explainexit(code)))
854 (cmd, explainexit(code)))
851 return readfile(outname)
855 return readfile(outname)
852 finally:
856 finally:
853 try:
857 try:
854 if inname:
858 if inname:
855 os.unlink(inname)
859 os.unlink(inname)
856 except OSError:
860 except OSError:
857 pass
861 pass
858 try:
862 try:
859 if outname:
863 if outname:
860 os.unlink(outname)
864 os.unlink(outname)
861 except OSError:
865 except OSError:
862 pass
866 pass
863
867
864 filtertable = {
868 filtertable = {
865 'tempfile:': tempfilter,
869 'tempfile:': tempfilter,
866 'pipe:': pipefilter,
870 'pipe:': pipefilter,
867 }
871 }
868
872
869 def filter(s, cmd):
873 def filter(s, cmd):
870 "filter a string through a command that transforms its input to its output"
874 "filter a string through a command that transforms its input to its output"
871 for name, fn in filtertable.iteritems():
875 for name, fn in filtertable.iteritems():
872 if cmd.startswith(name):
876 if cmd.startswith(name):
873 return fn(s, cmd[len(name):].lstrip())
877 return fn(s, cmd[len(name):].lstrip())
874 return pipefilter(s, cmd)
878 return pipefilter(s, cmd)
875
879
876 def binary(s):
880 def binary(s):
877 """return true if a string is binary data"""
881 """return true if a string is binary data"""
878 return bool(s and '\0' in s)
882 return bool(s and '\0' in s)
879
883
880 def increasingchunks(source, min=1024, max=65536):
884 def increasingchunks(source, min=1024, max=65536):
881 '''return no less than min bytes per chunk while data remains,
885 '''return no less than min bytes per chunk while data remains,
882 doubling min after each chunk until it reaches max'''
886 doubling min after each chunk until it reaches max'''
883 def log2(x):
887 def log2(x):
884 if not x:
888 if not x:
885 return 0
889 return 0
886 i = 0
890 i = 0
887 while x:
891 while x:
888 x >>= 1
892 x >>= 1
889 i += 1
893 i += 1
890 return i - 1
894 return i - 1
891
895
892 buf = []
896 buf = []
893 blen = 0
897 blen = 0
894 for chunk in source:
898 for chunk in source:
895 buf.append(chunk)
899 buf.append(chunk)
896 blen += len(chunk)
900 blen += len(chunk)
897 if blen >= min:
901 if blen >= min:
898 if min < max:
902 if min < max:
899 min = min << 1
903 min = min << 1
900 nmin = 1 << log2(blen)
904 nmin = 1 << log2(blen)
901 if nmin > min:
905 if nmin > min:
902 min = nmin
906 min = nmin
903 if min > max:
907 if min > max:
904 min = max
908 min = max
905 yield ''.join(buf)
909 yield ''.join(buf)
906 blen = 0
910 blen = 0
907 buf = []
911 buf = []
908 if buf:
912 if buf:
909 yield ''.join(buf)
913 yield ''.join(buf)
910
914
911 Abort = error.Abort
915 Abort = error.Abort
912
916
913 def always(fn):
917 def always(fn):
914 return True
918 return True
915
919
916 def never(fn):
920 def never(fn):
917 return False
921 return False
918
922
919 def nogc(func):
923 def nogc(func):
920 """disable garbage collector
924 """disable garbage collector
921
925
922 Python's garbage collector triggers a GC each time a certain number of
926 Python's garbage collector triggers a GC each time a certain number of
923 container objects (the number being defined by gc.get_threshold()) are
927 container objects (the number being defined by gc.get_threshold()) are
924 allocated even when marked not to be tracked by the collector. Tracking has
928 allocated even when marked not to be tracked by the collector. Tracking has
925 no effect on when GCs are triggered, only on what objects the GC looks
929 no effect on when GCs are triggered, only on what objects the GC looks
926 into. As a workaround, disable GC while building complex (huge)
930 into. As a workaround, disable GC while building complex (huge)
927 containers.
931 containers.
928
932
929 This garbage collector issue have been fixed in 2.7.
933 This garbage collector issue have been fixed in 2.7.
930 """
934 """
931 if sys.version_info >= (2, 7):
935 if sys.version_info >= (2, 7):
932 return func
936 return func
933 def wrapper(*args, **kwargs):
937 def wrapper(*args, **kwargs):
934 gcenabled = gc.isenabled()
938 gcenabled = gc.isenabled()
935 gc.disable()
939 gc.disable()
936 try:
940 try:
937 return func(*args, **kwargs)
941 return func(*args, **kwargs)
938 finally:
942 finally:
939 if gcenabled:
943 if gcenabled:
940 gc.enable()
944 gc.enable()
941 return wrapper
945 return wrapper
942
946
943 def pathto(root, n1, n2):
947 def pathto(root, n1, n2):
944 '''return the relative path from one place to another.
948 '''return the relative path from one place to another.
945 root should use os.sep to separate directories
949 root should use os.sep to separate directories
946 n1 should use os.sep to separate directories
950 n1 should use os.sep to separate directories
947 n2 should use "/" to separate directories
951 n2 should use "/" to separate directories
948 returns an os.sep-separated path.
952 returns an os.sep-separated path.
949
953
950 If n1 is a relative path, it's assumed it's
954 If n1 is a relative path, it's assumed it's
951 relative to root.
955 relative to root.
952 n2 should always be relative to root.
956 n2 should always be relative to root.
953 '''
957 '''
954 if not n1:
958 if not n1:
955 return localpath(n2)
959 return localpath(n2)
956 if os.path.isabs(n1):
960 if os.path.isabs(n1):
957 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
961 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
958 return os.path.join(root, localpath(n2))
962 return os.path.join(root, localpath(n2))
959 n2 = '/'.join((pconvert(root), n2))
963 n2 = '/'.join((pconvert(root), n2))
960 a, b = splitpath(n1), n2.split('/')
964 a, b = splitpath(n1), n2.split('/')
961 a.reverse()
965 a.reverse()
962 b.reverse()
966 b.reverse()
963 while a and b and a[-1] == b[-1]:
967 while a and b and a[-1] == b[-1]:
964 a.pop()
968 a.pop()
965 b.pop()
969 b.pop()
966 b.reverse()
970 b.reverse()
967 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
971 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
968
972
969 def mainfrozen():
973 def mainfrozen():
970 """return True if we are a frozen executable.
974 """return True if we are a frozen executable.
971
975
972 The code supports py2exe (most common, Windows only) and tools/freeze
976 The code supports py2exe (most common, Windows only) and tools/freeze
973 (portable, not much used).
977 (portable, not much used).
974 """
978 """
975 return (safehasattr(sys, "frozen") or # new py2exe
979 return (safehasattr(sys, "frozen") or # new py2exe
976 safehasattr(sys, "importers") or # old py2exe
980 safehasattr(sys, "importers") or # old py2exe
977 imp.is_frozen(u"__main__")) # tools/freeze
981 imp.is_frozen(u"__main__")) # tools/freeze
978
982
979 # the location of data files matching the source code
983 # the location of data files matching the source code
980 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
984 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
981 # executable version (py2exe) doesn't support __file__
985 # executable version (py2exe) doesn't support __file__
982 datapath = os.path.dirname(pycompat.sysexecutable)
986 datapath = os.path.dirname(pycompat.sysexecutable)
983 else:
987 else:
984 datapath = os.path.dirname(pycompat.fsencode(__file__))
988 datapath = os.path.dirname(pycompat.fsencode(__file__))
985
989
986 i18n.setdatapath(datapath)
990 i18n.setdatapath(datapath)
987
991
988 _hgexecutable = None
992 _hgexecutable = None
989
993
990 def hgexecutable():
994 def hgexecutable():
991 """return location of the 'hg' executable.
995 """return location of the 'hg' executable.
992
996
993 Defaults to $HG or 'hg' in the search path.
997 Defaults to $HG or 'hg' in the search path.
994 """
998 """
995 if _hgexecutable is None:
999 if _hgexecutable is None:
996 hg = encoding.environ.get('HG')
1000 hg = encoding.environ.get('HG')
997 mainmod = sys.modules[pycompat.sysstr('__main__')]
1001 mainmod = sys.modules[pycompat.sysstr('__main__')]
998 if hg:
1002 if hg:
999 _sethgexecutable(hg)
1003 _sethgexecutable(hg)
1000 elif mainfrozen():
1004 elif mainfrozen():
1001 if getattr(sys, 'frozen', None) == 'macosx_app':
1005 if getattr(sys, 'frozen', None) == 'macosx_app':
1002 # Env variable set by py2app
1006 # Env variable set by py2app
1003 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1007 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1004 else:
1008 else:
1005 _sethgexecutable(pycompat.sysexecutable)
1009 _sethgexecutable(pycompat.sysexecutable)
1006 elif (os.path.basename(
1010 elif (os.path.basename(
1007 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1011 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1008 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1012 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1009 else:
1013 else:
1010 exe = findexe('hg') or os.path.basename(sys.argv[0])
1014 exe = findexe('hg') or os.path.basename(sys.argv[0])
1011 _sethgexecutable(exe)
1015 _sethgexecutable(exe)
1012 return _hgexecutable
1016 return _hgexecutable
1013
1017
1014 def _sethgexecutable(path):
1018 def _sethgexecutable(path):
1015 """set location of the 'hg' executable"""
1019 """set location of the 'hg' executable"""
1016 global _hgexecutable
1020 global _hgexecutable
1017 _hgexecutable = path
1021 _hgexecutable = path
1018
1022
1019 def _isstdout(f):
1023 def _isstdout(f):
1020 fileno = getattr(f, 'fileno', None)
1024 fileno = getattr(f, 'fileno', None)
1021 return fileno and fileno() == sys.__stdout__.fileno()
1025 return fileno and fileno() == sys.__stdout__.fileno()
1022
1026
1023 def shellenviron(environ=None):
1027 def shellenviron(environ=None):
1024 """return environ with optional override, useful for shelling out"""
1028 """return environ with optional override, useful for shelling out"""
1025 def py2shell(val):
1029 def py2shell(val):
1026 'convert python object into string that is useful to shell'
1030 'convert python object into string that is useful to shell'
1027 if val is None or val is False:
1031 if val is None or val is False:
1028 return '0'
1032 return '0'
1029 if val is True:
1033 if val is True:
1030 return '1'
1034 return '1'
1031 return str(val)
1035 return str(val)
1032 env = dict(encoding.environ)
1036 env = dict(encoding.environ)
1033 if environ:
1037 if environ:
1034 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1038 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1035 env['HG'] = hgexecutable()
1039 env['HG'] = hgexecutable()
1036 return env
1040 return env
1037
1041
1038 def system(cmd, environ=None, cwd=None, out=None):
1042 def system(cmd, environ=None, cwd=None, out=None):
1039 '''enhanced shell command execution.
1043 '''enhanced shell command execution.
1040 run with environment maybe modified, maybe in different dir.
1044 run with environment maybe modified, maybe in different dir.
1041
1045
1042 if out is specified, it is assumed to be a file-like object that has a
1046 if out is specified, it is assumed to be a file-like object that has a
1043 write() method. stdout and stderr will be redirected to out.'''
1047 write() method. stdout and stderr will be redirected to out.'''
1044 try:
1048 try:
1045 stdout.flush()
1049 stdout.flush()
1046 except Exception:
1050 except Exception:
1047 pass
1051 pass
1048 cmd = quotecommand(cmd)
1052 cmd = quotecommand(cmd)
1049 env = shellenviron(environ)
1053 env = shellenviron(environ)
1050 if out is None or _isstdout(out):
1054 if out is None or _isstdout(out):
1051 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1055 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1052 env=env, cwd=cwd)
1056 env=env, cwd=cwd)
1053 else:
1057 else:
1054 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1058 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1055 env=env, cwd=cwd, stdout=subprocess.PIPE,
1059 env=env, cwd=cwd, stdout=subprocess.PIPE,
1056 stderr=subprocess.STDOUT)
1060 stderr=subprocess.STDOUT)
1057 for line in iter(proc.stdout.readline, ''):
1061 for line in iter(proc.stdout.readline, ''):
1058 out.write(line)
1062 out.write(line)
1059 proc.wait()
1063 proc.wait()
1060 rc = proc.returncode
1064 rc = proc.returncode
1061 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1065 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1062 rc = 0
1066 rc = 0
1063 return rc
1067 return rc
1064
1068
1065 def checksignature(func):
1069 def checksignature(func):
1066 '''wrap a function with code to check for calling errors'''
1070 '''wrap a function with code to check for calling errors'''
1067 def check(*args, **kwargs):
1071 def check(*args, **kwargs):
1068 try:
1072 try:
1069 return func(*args, **kwargs)
1073 return func(*args, **kwargs)
1070 except TypeError:
1074 except TypeError:
1071 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1075 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1072 raise error.SignatureError
1076 raise error.SignatureError
1073 raise
1077 raise
1074
1078
1075 return check
1079 return check
1076
1080
1077 # a whilelist of known filesystems where hardlink works reliably
1081 # a whilelist of known filesystems where hardlink works reliably
1078 _hardlinkfswhitelist = {
1082 _hardlinkfswhitelist = {
1079 'btrfs',
1083 'btrfs',
1080 'ext2',
1084 'ext2',
1081 'ext3',
1085 'ext3',
1082 'ext4',
1086 'ext4',
1083 'hfs',
1087 'hfs',
1084 'jfs',
1088 'jfs',
1085 'reiserfs',
1089 'reiserfs',
1086 'tmpfs',
1090 'tmpfs',
1087 'ufs',
1091 'ufs',
1088 'xfs',
1092 'xfs',
1089 'zfs',
1093 'zfs',
1090 }
1094 }
1091
1095
1092 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1096 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1093 '''copy a file, preserving mode and optionally other stat info like
1097 '''copy a file, preserving mode and optionally other stat info like
1094 atime/mtime
1098 atime/mtime
1095
1099
1096 checkambig argument is used with filestat, and is useful only if
1100 checkambig argument is used with filestat, and is useful only if
1097 destination file is guarded by any lock (e.g. repo.lock or
1101 destination file is guarded by any lock (e.g. repo.lock or
1098 repo.wlock).
1102 repo.wlock).
1099
1103
1100 copystat and checkambig should be exclusive.
1104 copystat and checkambig should be exclusive.
1101 '''
1105 '''
1102 assert not (copystat and checkambig)
1106 assert not (copystat and checkambig)
1103 oldstat = None
1107 oldstat = None
1104 if os.path.lexists(dest):
1108 if os.path.lexists(dest):
1105 if checkambig:
1109 if checkambig:
1106 oldstat = checkambig and filestat.frompath(dest)
1110 oldstat = checkambig and filestat.frompath(dest)
1107 unlink(dest)
1111 unlink(dest)
1108 if hardlink:
1112 if hardlink:
1109 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1113 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1110 # unless we are confident that dest is on a whitelisted filesystem.
1114 # unless we are confident that dest is on a whitelisted filesystem.
1111 try:
1115 try:
1112 fstype = getfstype(os.path.dirname(dest))
1116 fstype = getfstype(os.path.dirname(dest))
1113 except OSError:
1117 except OSError:
1114 fstype = None
1118 fstype = None
1115 if fstype not in _hardlinkfswhitelist:
1119 if fstype not in _hardlinkfswhitelist:
1116 hardlink = False
1120 hardlink = False
1117 if hardlink:
1121 if hardlink:
1118 try:
1122 try:
1119 oslink(src, dest)
1123 oslink(src, dest)
1120 return
1124 return
1121 except (IOError, OSError):
1125 except (IOError, OSError):
1122 pass # fall back to normal copy
1126 pass # fall back to normal copy
1123 if os.path.islink(src):
1127 if os.path.islink(src):
1124 os.symlink(os.readlink(src), dest)
1128 os.symlink(os.readlink(src), dest)
1125 # copytime is ignored for symlinks, but in general copytime isn't needed
1129 # copytime is ignored for symlinks, but in general copytime isn't needed
1126 # for them anyway
1130 # for them anyway
1127 else:
1131 else:
1128 try:
1132 try:
1129 shutil.copyfile(src, dest)
1133 shutil.copyfile(src, dest)
1130 if copystat:
1134 if copystat:
1131 # copystat also copies mode
1135 # copystat also copies mode
1132 shutil.copystat(src, dest)
1136 shutil.copystat(src, dest)
1133 else:
1137 else:
1134 shutil.copymode(src, dest)
1138 shutil.copymode(src, dest)
1135 if oldstat and oldstat.stat:
1139 if oldstat and oldstat.stat:
1136 newstat = filestat.frompath(dest)
1140 newstat = filestat.frompath(dest)
1137 if newstat.isambig(oldstat):
1141 if newstat.isambig(oldstat):
1138 # stat of copied file is ambiguous to original one
1142 # stat of copied file is ambiguous to original one
1139 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1143 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1140 os.utime(dest, (advanced, advanced))
1144 os.utime(dest, (advanced, advanced))
1141 except shutil.Error as inst:
1145 except shutil.Error as inst:
1142 raise Abort(str(inst))
1146 raise Abort(str(inst))
1143
1147
1144 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1148 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1145 """Copy a directory tree using hardlinks if possible."""
1149 """Copy a directory tree using hardlinks if possible."""
1146 num = 0
1150 num = 0
1147
1151
1148 gettopic = lambda: hardlink and _('linking') or _('copying')
1152 gettopic = lambda: hardlink and _('linking') or _('copying')
1149
1153
1150 if os.path.isdir(src):
1154 if os.path.isdir(src):
1151 if hardlink is None:
1155 if hardlink is None:
1152 hardlink = (os.stat(src).st_dev ==
1156 hardlink = (os.stat(src).st_dev ==
1153 os.stat(os.path.dirname(dst)).st_dev)
1157 os.stat(os.path.dirname(dst)).st_dev)
1154 topic = gettopic()
1158 topic = gettopic()
1155 os.mkdir(dst)
1159 os.mkdir(dst)
1156 for name, kind in listdir(src):
1160 for name, kind in listdir(src):
1157 srcname = os.path.join(src, name)
1161 srcname = os.path.join(src, name)
1158 dstname = os.path.join(dst, name)
1162 dstname = os.path.join(dst, name)
1159 def nprog(t, pos):
1163 def nprog(t, pos):
1160 if pos is not None:
1164 if pos is not None:
1161 return progress(t, pos + num)
1165 return progress(t, pos + num)
1162 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1166 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1163 num += n
1167 num += n
1164 else:
1168 else:
1165 if hardlink is None:
1169 if hardlink is None:
1166 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1170 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1167 os.stat(os.path.dirname(dst)).st_dev)
1171 os.stat(os.path.dirname(dst)).st_dev)
1168 topic = gettopic()
1172 topic = gettopic()
1169
1173
1170 if hardlink:
1174 if hardlink:
1171 try:
1175 try:
1172 oslink(src, dst)
1176 oslink(src, dst)
1173 except (IOError, OSError):
1177 except (IOError, OSError):
1174 hardlink = False
1178 hardlink = False
1175 shutil.copy(src, dst)
1179 shutil.copy(src, dst)
1176 else:
1180 else:
1177 shutil.copy(src, dst)
1181 shutil.copy(src, dst)
1178 num += 1
1182 num += 1
1179 progress(topic, num)
1183 progress(topic, num)
1180 progress(topic, None)
1184 progress(topic, None)
1181
1185
1182 return hardlink, num
1186 return hardlink, num
1183
1187
1184 _winreservednames = b'''con prn aux nul
1188 _winreservednames = b'''con prn aux nul
1185 com1 com2 com3 com4 com5 com6 com7 com8 com9
1189 com1 com2 com3 com4 com5 com6 com7 com8 com9
1186 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1190 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1187 _winreservedchars = ':*?"<>|'
1191 _winreservedchars = ':*?"<>|'
1188 def checkwinfilename(path):
1192 def checkwinfilename(path):
1189 r'''Check that the base-relative path is a valid filename on Windows.
1193 r'''Check that the base-relative path is a valid filename on Windows.
1190 Returns None if the path is ok, or a UI string describing the problem.
1194 Returns None if the path is ok, or a UI string describing the problem.
1191
1195
1192 >>> checkwinfilename("just/a/normal/path")
1196 >>> checkwinfilename("just/a/normal/path")
1193 >>> checkwinfilename("foo/bar/con.xml")
1197 >>> checkwinfilename("foo/bar/con.xml")
1194 "filename contains 'con', which is reserved on Windows"
1198 "filename contains 'con', which is reserved on Windows"
1195 >>> checkwinfilename("foo/con.xml/bar")
1199 >>> checkwinfilename("foo/con.xml/bar")
1196 "filename contains 'con', which is reserved on Windows"
1200 "filename contains 'con', which is reserved on Windows"
1197 >>> checkwinfilename("foo/bar/xml.con")
1201 >>> checkwinfilename("foo/bar/xml.con")
1198 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1202 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1199 "filename contains 'AUX', which is reserved on Windows"
1203 "filename contains 'AUX', which is reserved on Windows"
1200 >>> checkwinfilename("foo/bar/bla:.txt")
1204 >>> checkwinfilename("foo/bar/bla:.txt")
1201 "filename contains ':', which is reserved on Windows"
1205 "filename contains ':', which is reserved on Windows"
1202 >>> checkwinfilename("foo/bar/b\07la.txt")
1206 >>> checkwinfilename("foo/bar/b\07la.txt")
1203 "filename contains '\\x07', which is invalid on Windows"
1207 "filename contains '\\x07', which is invalid on Windows"
1204 >>> checkwinfilename("foo/bar/bla ")
1208 >>> checkwinfilename("foo/bar/bla ")
1205 "filename ends with ' ', which is not allowed on Windows"
1209 "filename ends with ' ', which is not allowed on Windows"
1206 >>> checkwinfilename("../bar")
1210 >>> checkwinfilename("../bar")
1207 >>> checkwinfilename("foo\\")
1211 >>> checkwinfilename("foo\\")
1208 "filename ends with '\\', which is invalid on Windows"
1212 "filename ends with '\\', which is invalid on Windows"
1209 >>> checkwinfilename("foo\\/bar")
1213 >>> checkwinfilename("foo\\/bar")
1210 "directory name ends with '\\', which is invalid on Windows"
1214 "directory name ends with '\\', which is invalid on Windows"
1211 '''
1215 '''
1212 if path.endswith('\\'):
1216 if path.endswith('\\'):
1213 return _("filename ends with '\\', which is invalid on Windows")
1217 return _("filename ends with '\\', which is invalid on Windows")
1214 if '\\/' in path:
1218 if '\\/' in path:
1215 return _("directory name ends with '\\', which is invalid on Windows")
1219 return _("directory name ends with '\\', which is invalid on Windows")
1216 for n in path.replace('\\', '/').split('/'):
1220 for n in path.replace('\\', '/').split('/'):
1217 if not n:
1221 if not n:
1218 continue
1222 continue
1219 for c in _filenamebytestr(n):
1223 for c in _filenamebytestr(n):
1220 if c in _winreservedchars:
1224 if c in _winreservedchars:
1221 return _("filename contains '%s', which is reserved "
1225 return _("filename contains '%s', which is reserved "
1222 "on Windows") % c
1226 "on Windows") % c
1223 if ord(c) <= 31:
1227 if ord(c) <= 31:
1224 return _("filename contains %r, which is invalid "
1228 return _("filename contains %r, which is invalid "
1225 "on Windows") % c
1229 "on Windows") % c
1226 base = n.split('.')[0]
1230 base = n.split('.')[0]
1227 if base and base.lower() in _winreservednames:
1231 if base and base.lower() in _winreservednames:
1228 return _("filename contains '%s', which is reserved "
1232 return _("filename contains '%s', which is reserved "
1229 "on Windows") % base
1233 "on Windows") % base
1230 t = n[-1]
1234 t = n[-1]
1231 if t in '. ' and n not in '..':
1235 if t in '. ' and n not in '..':
1232 return _("filename ends with '%s', which is not allowed "
1236 return _("filename ends with '%s', which is not allowed "
1233 "on Windows") % t
1237 "on Windows") % t
1234
1238
1235 if pycompat.osname == 'nt':
1239 if pycompat.osname == 'nt':
1236 checkosfilename = checkwinfilename
1240 checkosfilename = checkwinfilename
1237 timer = time.clock
1241 timer = time.clock
1238 else:
1242 else:
1239 checkosfilename = platform.checkosfilename
1243 checkosfilename = platform.checkosfilename
1240 timer = time.time
1244 timer = time.time
1241
1245
1242 if safehasattr(time, "perf_counter"):
1246 if safehasattr(time, "perf_counter"):
1243 timer = time.perf_counter
1247 timer = time.perf_counter
1244
1248
1245 def makelock(info, pathname):
1249 def makelock(info, pathname):
1246 try:
1250 try:
1247 return os.symlink(info, pathname)
1251 return os.symlink(info, pathname)
1248 except OSError as why:
1252 except OSError as why:
1249 if why.errno == errno.EEXIST:
1253 if why.errno == errno.EEXIST:
1250 raise
1254 raise
1251 except AttributeError: # no symlink in os
1255 except AttributeError: # no symlink in os
1252 pass
1256 pass
1253
1257
1254 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1258 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1255 os.write(ld, info)
1259 os.write(ld, info)
1256 os.close(ld)
1260 os.close(ld)
1257
1261
1258 def readlock(pathname):
1262 def readlock(pathname):
1259 try:
1263 try:
1260 return os.readlink(pathname)
1264 return os.readlink(pathname)
1261 except OSError as why:
1265 except OSError as why:
1262 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1266 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1263 raise
1267 raise
1264 except AttributeError: # no symlink in os
1268 except AttributeError: # no symlink in os
1265 pass
1269 pass
1266 fp = posixfile(pathname)
1270 fp = posixfile(pathname)
1267 r = fp.read()
1271 r = fp.read()
1268 fp.close()
1272 fp.close()
1269 return r
1273 return r
1270
1274
1271 def fstat(fp):
1275 def fstat(fp):
1272 '''stat file object that may not have fileno method.'''
1276 '''stat file object that may not have fileno method.'''
1273 try:
1277 try:
1274 return os.fstat(fp.fileno())
1278 return os.fstat(fp.fileno())
1275 except AttributeError:
1279 except AttributeError:
1276 return os.stat(fp.name)
1280 return os.stat(fp.name)
1277
1281
1278 # File system features
1282 # File system features
1279
1283
1280 def fscasesensitive(path):
1284 def fscasesensitive(path):
1281 """
1285 """
1282 Return true if the given path is on a case-sensitive filesystem
1286 Return true if the given path is on a case-sensitive filesystem
1283
1287
1284 Requires a path (like /foo/.hg) ending with a foldable final
1288 Requires a path (like /foo/.hg) ending with a foldable final
1285 directory component.
1289 directory component.
1286 """
1290 """
1287 s1 = os.lstat(path)
1291 s1 = os.lstat(path)
1288 d, b = os.path.split(path)
1292 d, b = os.path.split(path)
1289 b2 = b.upper()
1293 b2 = b.upper()
1290 if b == b2:
1294 if b == b2:
1291 b2 = b.lower()
1295 b2 = b.lower()
1292 if b == b2:
1296 if b == b2:
1293 return True # no evidence against case sensitivity
1297 return True # no evidence against case sensitivity
1294 p2 = os.path.join(d, b2)
1298 p2 = os.path.join(d, b2)
1295 try:
1299 try:
1296 s2 = os.lstat(p2)
1300 s2 = os.lstat(p2)
1297 if s2 == s1:
1301 if s2 == s1:
1298 return False
1302 return False
1299 return True
1303 return True
1300 except OSError:
1304 except OSError:
1301 return True
1305 return True
1302
1306
1303 try:
1307 try:
1304 import re2
1308 import re2
1305 _re2 = None
1309 _re2 = None
1306 except ImportError:
1310 except ImportError:
1307 _re2 = False
1311 _re2 = False
1308
1312
1309 class _re(object):
1313 class _re(object):
1310 def _checkre2(self):
1314 def _checkre2(self):
1311 global _re2
1315 global _re2
1312 try:
1316 try:
1313 # check if match works, see issue3964
1317 # check if match works, see issue3964
1314 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1318 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1315 except ImportError:
1319 except ImportError:
1316 _re2 = False
1320 _re2 = False
1317
1321
1318 def compile(self, pat, flags=0):
1322 def compile(self, pat, flags=0):
1319 '''Compile a regular expression, using re2 if possible
1323 '''Compile a regular expression, using re2 if possible
1320
1324
1321 For best performance, use only re2-compatible regexp features. The
1325 For best performance, use only re2-compatible regexp features. The
1322 only flags from the re module that are re2-compatible are
1326 only flags from the re module that are re2-compatible are
1323 IGNORECASE and MULTILINE.'''
1327 IGNORECASE and MULTILINE.'''
1324 if _re2 is None:
1328 if _re2 is None:
1325 self._checkre2()
1329 self._checkre2()
1326 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1330 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1327 if flags & remod.IGNORECASE:
1331 if flags & remod.IGNORECASE:
1328 pat = '(?i)' + pat
1332 pat = '(?i)' + pat
1329 if flags & remod.MULTILINE:
1333 if flags & remod.MULTILINE:
1330 pat = '(?m)' + pat
1334 pat = '(?m)' + pat
1331 try:
1335 try:
1332 return re2.compile(pat)
1336 return re2.compile(pat)
1333 except re2.error:
1337 except re2.error:
1334 pass
1338 pass
1335 return remod.compile(pat, flags)
1339 return remod.compile(pat, flags)
1336
1340
1337 @propertycache
1341 @propertycache
1338 def escape(self):
1342 def escape(self):
1339 '''Return the version of escape corresponding to self.compile.
1343 '''Return the version of escape corresponding to self.compile.
1340
1344
1341 This is imperfect because whether re2 or re is used for a particular
1345 This is imperfect because whether re2 or re is used for a particular
1342 function depends on the flags, etc, but it's the best we can do.
1346 function depends on the flags, etc, but it's the best we can do.
1343 '''
1347 '''
1344 global _re2
1348 global _re2
1345 if _re2 is None:
1349 if _re2 is None:
1346 self._checkre2()
1350 self._checkre2()
1347 if _re2:
1351 if _re2:
1348 return re2.escape
1352 return re2.escape
1349 else:
1353 else:
1350 return remod.escape
1354 return remod.escape
1351
1355
1352 re = _re()
1356 re = _re()
1353
1357
1354 _fspathcache = {}
1358 _fspathcache = {}
1355 def fspath(name, root):
1359 def fspath(name, root):
1356 '''Get name in the case stored in the filesystem
1360 '''Get name in the case stored in the filesystem
1357
1361
1358 The name should be relative to root, and be normcase-ed for efficiency.
1362 The name should be relative to root, and be normcase-ed for efficiency.
1359
1363
1360 Note that this function is unnecessary, and should not be
1364 Note that this function is unnecessary, and should not be
1361 called, for case-sensitive filesystems (simply because it's expensive).
1365 called, for case-sensitive filesystems (simply because it's expensive).
1362
1366
1363 The root should be normcase-ed, too.
1367 The root should be normcase-ed, too.
1364 '''
1368 '''
1365 def _makefspathcacheentry(dir):
1369 def _makefspathcacheentry(dir):
1366 return dict((normcase(n), n) for n in os.listdir(dir))
1370 return dict((normcase(n), n) for n in os.listdir(dir))
1367
1371
1368 seps = pycompat.ossep
1372 seps = pycompat.ossep
1369 if pycompat.osaltsep:
1373 if pycompat.osaltsep:
1370 seps = seps + pycompat.osaltsep
1374 seps = seps + pycompat.osaltsep
1371 # Protect backslashes. This gets silly very quickly.
1375 # Protect backslashes. This gets silly very quickly.
1372 seps.replace('\\','\\\\')
1376 seps.replace('\\','\\\\')
1373 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1377 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1374 dir = os.path.normpath(root)
1378 dir = os.path.normpath(root)
1375 result = []
1379 result = []
1376 for part, sep in pattern.findall(name):
1380 for part, sep in pattern.findall(name):
1377 if sep:
1381 if sep:
1378 result.append(sep)
1382 result.append(sep)
1379 continue
1383 continue
1380
1384
1381 if dir not in _fspathcache:
1385 if dir not in _fspathcache:
1382 _fspathcache[dir] = _makefspathcacheentry(dir)
1386 _fspathcache[dir] = _makefspathcacheentry(dir)
1383 contents = _fspathcache[dir]
1387 contents = _fspathcache[dir]
1384
1388
1385 found = contents.get(part)
1389 found = contents.get(part)
1386 if not found:
1390 if not found:
1387 # retry "once per directory" per "dirstate.walk" which
1391 # retry "once per directory" per "dirstate.walk" which
1388 # may take place for each patches of "hg qpush", for example
1392 # may take place for each patches of "hg qpush", for example
1389 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1393 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1390 found = contents.get(part)
1394 found = contents.get(part)
1391
1395
1392 result.append(found or part)
1396 result.append(found or part)
1393 dir = os.path.join(dir, part)
1397 dir = os.path.join(dir, part)
1394
1398
1395 return ''.join(result)
1399 return ''.join(result)
1396
1400
1397 def getfstype(dirpath):
1401 def getfstype(dirpath):
1398 '''Get the filesystem type name from a directory (best-effort)
1402 '''Get the filesystem type name from a directory (best-effort)
1399
1403
1400 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1404 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1401 '''
1405 '''
1402 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1406 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1403
1407
1404 def checknlink(testfile):
1408 def checknlink(testfile):
1405 '''check whether hardlink count reporting works properly'''
1409 '''check whether hardlink count reporting works properly'''
1406
1410
1407 # testfile may be open, so we need a separate file for checking to
1411 # testfile may be open, so we need a separate file for checking to
1408 # work around issue2543 (or testfile may get lost on Samba shares)
1412 # work around issue2543 (or testfile may get lost on Samba shares)
1409 f1 = testfile + ".hgtmp1"
1413 f1 = testfile + ".hgtmp1"
1410 if os.path.lexists(f1):
1414 if os.path.lexists(f1):
1411 return False
1415 return False
1412 try:
1416 try:
1413 posixfile(f1, 'w').close()
1417 posixfile(f1, 'w').close()
1414 except IOError:
1418 except IOError:
1415 try:
1419 try:
1416 os.unlink(f1)
1420 os.unlink(f1)
1417 except OSError:
1421 except OSError:
1418 pass
1422 pass
1419 return False
1423 return False
1420
1424
1421 f2 = testfile + ".hgtmp2"
1425 f2 = testfile + ".hgtmp2"
1422 fd = None
1426 fd = None
1423 try:
1427 try:
1424 oslink(f1, f2)
1428 oslink(f1, f2)
1425 # nlinks() may behave differently for files on Windows shares if
1429 # nlinks() may behave differently for files on Windows shares if
1426 # the file is open.
1430 # the file is open.
1427 fd = posixfile(f2)
1431 fd = posixfile(f2)
1428 return nlinks(f2) > 1
1432 return nlinks(f2) > 1
1429 except OSError:
1433 except OSError:
1430 return False
1434 return False
1431 finally:
1435 finally:
1432 if fd is not None:
1436 if fd is not None:
1433 fd.close()
1437 fd.close()
1434 for f in (f1, f2):
1438 for f in (f1, f2):
1435 try:
1439 try:
1436 os.unlink(f)
1440 os.unlink(f)
1437 except OSError:
1441 except OSError:
1438 pass
1442 pass
1439
1443
1440 def endswithsep(path):
1444 def endswithsep(path):
1441 '''Check path ends with os.sep or os.altsep.'''
1445 '''Check path ends with os.sep or os.altsep.'''
1442 return (path.endswith(pycompat.ossep)
1446 return (path.endswith(pycompat.ossep)
1443 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1447 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1444
1448
1445 def splitpath(path):
1449 def splitpath(path):
1446 '''Split path by os.sep.
1450 '''Split path by os.sep.
1447 Note that this function does not use os.altsep because this is
1451 Note that this function does not use os.altsep because this is
1448 an alternative of simple "xxx.split(os.sep)".
1452 an alternative of simple "xxx.split(os.sep)".
1449 It is recommended to use os.path.normpath() before using this
1453 It is recommended to use os.path.normpath() before using this
1450 function if need.'''
1454 function if need.'''
1451 return path.split(pycompat.ossep)
1455 return path.split(pycompat.ossep)
1452
1456
1453 def gui():
1457 def gui():
1454 '''Are we running in a GUI?'''
1458 '''Are we running in a GUI?'''
1455 if pycompat.sysplatform == 'darwin':
1459 if pycompat.sysplatform == 'darwin':
1456 if 'SSH_CONNECTION' in encoding.environ:
1460 if 'SSH_CONNECTION' in encoding.environ:
1457 # handle SSH access to a box where the user is logged in
1461 # handle SSH access to a box where the user is logged in
1458 return False
1462 return False
1459 elif getattr(osutil, 'isgui', None):
1463 elif getattr(osutil, 'isgui', None):
1460 # check if a CoreGraphics session is available
1464 # check if a CoreGraphics session is available
1461 return osutil.isgui()
1465 return osutil.isgui()
1462 else:
1466 else:
1463 # pure build; use a safe default
1467 # pure build; use a safe default
1464 return True
1468 return True
1465 else:
1469 else:
1466 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1470 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1467
1471
1468 def mktempcopy(name, emptyok=False, createmode=None):
1472 def mktempcopy(name, emptyok=False, createmode=None):
1469 """Create a temporary file with the same contents from name
1473 """Create a temporary file with the same contents from name
1470
1474
1471 The permission bits are copied from the original file.
1475 The permission bits are copied from the original file.
1472
1476
1473 If the temporary file is going to be truncated immediately, you
1477 If the temporary file is going to be truncated immediately, you
1474 can use emptyok=True as an optimization.
1478 can use emptyok=True as an optimization.
1475
1479
1476 Returns the name of the temporary file.
1480 Returns the name of the temporary file.
1477 """
1481 """
1478 d, fn = os.path.split(name)
1482 d, fn = os.path.split(name)
1479 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1483 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1480 os.close(fd)
1484 os.close(fd)
1481 # Temporary files are created with mode 0600, which is usually not
1485 # Temporary files are created with mode 0600, which is usually not
1482 # what we want. If the original file already exists, just copy
1486 # what we want. If the original file already exists, just copy
1483 # its mode. Otherwise, manually obey umask.
1487 # its mode. Otherwise, manually obey umask.
1484 copymode(name, temp, createmode)
1488 copymode(name, temp, createmode)
1485 if emptyok:
1489 if emptyok:
1486 return temp
1490 return temp
1487 try:
1491 try:
1488 try:
1492 try:
1489 ifp = posixfile(name, "rb")
1493 ifp = posixfile(name, "rb")
1490 except IOError as inst:
1494 except IOError as inst:
1491 if inst.errno == errno.ENOENT:
1495 if inst.errno == errno.ENOENT:
1492 return temp
1496 return temp
1493 if not getattr(inst, 'filename', None):
1497 if not getattr(inst, 'filename', None):
1494 inst.filename = name
1498 inst.filename = name
1495 raise
1499 raise
1496 ofp = posixfile(temp, "wb")
1500 ofp = posixfile(temp, "wb")
1497 for chunk in filechunkiter(ifp):
1501 for chunk in filechunkiter(ifp):
1498 ofp.write(chunk)
1502 ofp.write(chunk)
1499 ifp.close()
1503 ifp.close()
1500 ofp.close()
1504 ofp.close()
1501 except: # re-raises
1505 except: # re-raises
1502 try: os.unlink(temp)
1506 try: os.unlink(temp)
1503 except OSError: pass
1507 except OSError: pass
1504 raise
1508 raise
1505 return temp
1509 return temp
1506
1510
1507 class filestat(object):
1511 class filestat(object):
1508 """help to exactly detect change of a file
1512 """help to exactly detect change of a file
1509
1513
1510 'stat' attribute is result of 'os.stat()' if specified 'path'
1514 'stat' attribute is result of 'os.stat()' if specified 'path'
1511 exists. Otherwise, it is None. This can avoid preparative
1515 exists. Otherwise, it is None. This can avoid preparative
1512 'exists()' examination on client side of this class.
1516 'exists()' examination on client side of this class.
1513 """
1517 """
1514 def __init__(self, stat):
1518 def __init__(self, stat):
1515 self.stat = stat
1519 self.stat = stat
1516
1520
1517 @classmethod
1521 @classmethod
1518 def frompath(cls, path):
1522 def frompath(cls, path):
1519 try:
1523 try:
1520 stat = os.stat(path)
1524 stat = os.stat(path)
1521 except OSError as err:
1525 except OSError as err:
1522 if err.errno != errno.ENOENT:
1526 if err.errno != errno.ENOENT:
1523 raise
1527 raise
1524 stat = None
1528 stat = None
1525 return cls(stat)
1529 return cls(stat)
1526
1530
1527 @classmethod
1531 @classmethod
1528 def fromfp(cls, fp):
1532 def fromfp(cls, fp):
1529 stat = os.fstat(fp.fileno())
1533 stat = os.fstat(fp.fileno())
1530 return cls(stat)
1534 return cls(stat)
1531
1535
1532 __hash__ = object.__hash__
1536 __hash__ = object.__hash__
1533
1537
1534 def __eq__(self, old):
1538 def __eq__(self, old):
1535 try:
1539 try:
1536 # if ambiguity between stat of new and old file is
1540 # if ambiguity between stat of new and old file is
1537 # avoided, comparison of size, ctime and mtime is enough
1541 # avoided, comparison of size, ctime and mtime is enough
1538 # to exactly detect change of a file regardless of platform
1542 # to exactly detect change of a file regardless of platform
1539 return (self.stat.st_size == old.stat.st_size and
1543 return (self.stat.st_size == old.stat.st_size and
1540 self.stat.st_ctime == old.stat.st_ctime and
1544 self.stat.st_ctime == old.stat.st_ctime and
1541 self.stat.st_mtime == old.stat.st_mtime)
1545 self.stat.st_mtime == old.stat.st_mtime)
1542 except AttributeError:
1546 except AttributeError:
1543 pass
1547 pass
1544 try:
1548 try:
1545 return self.stat is None and old.stat is None
1549 return self.stat is None and old.stat is None
1546 except AttributeError:
1550 except AttributeError:
1547 return False
1551 return False
1548
1552
1549 def isambig(self, old):
1553 def isambig(self, old):
1550 """Examine whether new (= self) stat is ambiguous against old one
1554 """Examine whether new (= self) stat is ambiguous against old one
1551
1555
1552 "S[N]" below means stat of a file at N-th change:
1556 "S[N]" below means stat of a file at N-th change:
1553
1557
1554 - S[n-1].ctime < S[n].ctime: can detect change of a file
1558 - S[n-1].ctime < S[n].ctime: can detect change of a file
1555 - S[n-1].ctime == S[n].ctime
1559 - S[n-1].ctime == S[n].ctime
1556 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1560 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1557 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1561 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1558 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1562 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1559 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1563 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1560
1564
1561 Case (*2) above means that a file was changed twice or more at
1565 Case (*2) above means that a file was changed twice or more at
1562 same time in sec (= S[n-1].ctime), and comparison of timestamp
1566 same time in sec (= S[n-1].ctime), and comparison of timestamp
1563 is ambiguous.
1567 is ambiguous.
1564
1568
1565 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1569 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1566 timestamp is ambiguous".
1570 timestamp is ambiguous".
1567
1571
1568 But advancing mtime only in case (*2) doesn't work as
1572 But advancing mtime only in case (*2) doesn't work as
1569 expected, because naturally advanced S[n].mtime in case (*1)
1573 expected, because naturally advanced S[n].mtime in case (*1)
1570 might be equal to manually advanced S[n-1 or earlier].mtime.
1574 might be equal to manually advanced S[n-1 or earlier].mtime.
1571
1575
1572 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1576 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1573 treated as ambiguous regardless of mtime, to avoid overlooking
1577 treated as ambiguous regardless of mtime, to avoid overlooking
1574 by confliction between such mtime.
1578 by confliction between such mtime.
1575
1579
1576 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1580 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1577 S[n].mtime", even if size of a file isn't changed.
1581 S[n].mtime", even if size of a file isn't changed.
1578 """
1582 """
1579 try:
1583 try:
1580 return (self.stat.st_ctime == old.stat.st_ctime)
1584 return (self.stat.st_ctime == old.stat.st_ctime)
1581 except AttributeError:
1585 except AttributeError:
1582 return False
1586 return False
1583
1587
1584 def avoidambig(self, path, old):
1588 def avoidambig(self, path, old):
1585 """Change file stat of specified path to avoid ambiguity
1589 """Change file stat of specified path to avoid ambiguity
1586
1590
1587 'old' should be previous filestat of 'path'.
1591 'old' should be previous filestat of 'path'.
1588
1592
1589 This skips avoiding ambiguity, if a process doesn't have
1593 This skips avoiding ambiguity, if a process doesn't have
1590 appropriate privileges for 'path'. This returns False in this
1594 appropriate privileges for 'path'. This returns False in this
1591 case.
1595 case.
1592
1596
1593 Otherwise, this returns True, as "ambiguity is avoided".
1597 Otherwise, this returns True, as "ambiguity is avoided".
1594 """
1598 """
1595 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1599 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1596 try:
1600 try:
1597 os.utime(path, (advanced, advanced))
1601 os.utime(path, (advanced, advanced))
1598 except OSError as inst:
1602 except OSError as inst:
1599 if inst.errno == errno.EPERM:
1603 if inst.errno == errno.EPERM:
1600 # utime() on the file created by another user causes EPERM,
1604 # utime() on the file created by another user causes EPERM,
1601 # if a process doesn't have appropriate privileges
1605 # if a process doesn't have appropriate privileges
1602 return False
1606 return False
1603 raise
1607 raise
1604 return True
1608 return True
1605
1609
1606 def __ne__(self, other):
1610 def __ne__(self, other):
1607 return not self == other
1611 return not self == other
1608
1612
1609 class atomictempfile(object):
1613 class atomictempfile(object):
1610 '''writable file object that atomically updates a file
1614 '''writable file object that atomically updates a file
1611
1615
1612 All writes will go to a temporary copy of the original file. Call
1616 All writes will go to a temporary copy of the original file. Call
1613 close() when you are done writing, and atomictempfile will rename
1617 close() when you are done writing, and atomictempfile will rename
1614 the temporary copy to the original name, making the changes
1618 the temporary copy to the original name, making the changes
1615 visible. If the object is destroyed without being closed, all your
1619 visible. If the object is destroyed without being closed, all your
1616 writes are discarded.
1620 writes are discarded.
1617
1621
1618 checkambig argument of constructor is used with filestat, and is
1622 checkambig argument of constructor is used with filestat, and is
1619 useful only if target file is guarded by any lock (e.g. repo.lock
1623 useful only if target file is guarded by any lock (e.g. repo.lock
1620 or repo.wlock).
1624 or repo.wlock).
1621 '''
1625 '''
1622 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1626 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1623 self.__name = name # permanent name
1627 self.__name = name # permanent name
1624 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1628 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1625 createmode=createmode)
1629 createmode=createmode)
1626 self._fp = posixfile(self._tempname, mode)
1630 self._fp = posixfile(self._tempname, mode)
1627 self._checkambig = checkambig
1631 self._checkambig = checkambig
1628
1632
1629 # delegated methods
1633 # delegated methods
1630 self.read = self._fp.read
1634 self.read = self._fp.read
1631 self.write = self._fp.write
1635 self.write = self._fp.write
1632 self.seek = self._fp.seek
1636 self.seek = self._fp.seek
1633 self.tell = self._fp.tell
1637 self.tell = self._fp.tell
1634 self.fileno = self._fp.fileno
1638 self.fileno = self._fp.fileno
1635
1639
1636 def close(self):
1640 def close(self):
1637 if not self._fp.closed:
1641 if not self._fp.closed:
1638 self._fp.close()
1642 self._fp.close()
1639 filename = localpath(self.__name)
1643 filename = localpath(self.__name)
1640 oldstat = self._checkambig and filestat.frompath(filename)
1644 oldstat = self._checkambig and filestat.frompath(filename)
1641 if oldstat and oldstat.stat:
1645 if oldstat and oldstat.stat:
1642 rename(self._tempname, filename)
1646 rename(self._tempname, filename)
1643 newstat = filestat.frompath(filename)
1647 newstat = filestat.frompath(filename)
1644 if newstat.isambig(oldstat):
1648 if newstat.isambig(oldstat):
1645 # stat of changed file is ambiguous to original one
1649 # stat of changed file is ambiguous to original one
1646 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1650 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1647 os.utime(filename, (advanced, advanced))
1651 os.utime(filename, (advanced, advanced))
1648 else:
1652 else:
1649 rename(self._tempname, filename)
1653 rename(self._tempname, filename)
1650
1654
1651 def discard(self):
1655 def discard(self):
1652 if not self._fp.closed:
1656 if not self._fp.closed:
1653 try:
1657 try:
1654 os.unlink(self._tempname)
1658 os.unlink(self._tempname)
1655 except OSError:
1659 except OSError:
1656 pass
1660 pass
1657 self._fp.close()
1661 self._fp.close()
1658
1662
1659 def __del__(self):
1663 def __del__(self):
1660 if safehasattr(self, '_fp'): # constructor actually did something
1664 if safehasattr(self, '_fp'): # constructor actually did something
1661 self.discard()
1665 self.discard()
1662
1666
1663 def __enter__(self):
1667 def __enter__(self):
1664 return self
1668 return self
1665
1669
1666 def __exit__(self, exctype, excvalue, traceback):
1670 def __exit__(self, exctype, excvalue, traceback):
1667 if exctype is not None:
1671 if exctype is not None:
1668 self.discard()
1672 self.discard()
1669 else:
1673 else:
1670 self.close()
1674 self.close()
1671
1675
1672 def unlinkpath(f, ignoremissing=False):
1676 def unlinkpath(f, ignoremissing=False):
1673 """unlink and remove the directory if it is empty"""
1677 """unlink and remove the directory if it is empty"""
1674 if ignoremissing:
1678 if ignoremissing:
1675 tryunlink(f)
1679 tryunlink(f)
1676 else:
1680 else:
1677 unlink(f)
1681 unlink(f)
1678 # try removing directories that might now be empty
1682 # try removing directories that might now be empty
1679 try:
1683 try:
1680 removedirs(os.path.dirname(f))
1684 removedirs(os.path.dirname(f))
1681 except OSError:
1685 except OSError:
1682 pass
1686 pass
1683
1687
1684 def tryunlink(f):
1688 def tryunlink(f):
1685 """Attempt to remove a file, ignoring ENOENT errors."""
1689 """Attempt to remove a file, ignoring ENOENT errors."""
1686 try:
1690 try:
1687 unlink(f)
1691 unlink(f)
1688 except OSError as e:
1692 except OSError as e:
1689 if e.errno != errno.ENOENT:
1693 if e.errno != errno.ENOENT:
1690 raise
1694 raise
1691
1695
1692 def makedirs(name, mode=None, notindexed=False):
1696 def makedirs(name, mode=None, notindexed=False):
1693 """recursive directory creation with parent mode inheritance
1697 """recursive directory creation with parent mode inheritance
1694
1698
1695 Newly created directories are marked as "not to be indexed by
1699 Newly created directories are marked as "not to be indexed by
1696 the content indexing service", if ``notindexed`` is specified
1700 the content indexing service", if ``notindexed`` is specified
1697 for "write" mode access.
1701 for "write" mode access.
1698 """
1702 """
1699 try:
1703 try:
1700 makedir(name, notindexed)
1704 makedir(name, notindexed)
1701 except OSError as err:
1705 except OSError as err:
1702 if err.errno == errno.EEXIST:
1706 if err.errno == errno.EEXIST:
1703 return
1707 return
1704 if err.errno != errno.ENOENT or not name:
1708 if err.errno != errno.ENOENT or not name:
1705 raise
1709 raise
1706 parent = os.path.dirname(os.path.abspath(name))
1710 parent = os.path.dirname(os.path.abspath(name))
1707 if parent == name:
1711 if parent == name:
1708 raise
1712 raise
1709 makedirs(parent, mode, notindexed)
1713 makedirs(parent, mode, notindexed)
1710 try:
1714 try:
1711 makedir(name, notindexed)
1715 makedir(name, notindexed)
1712 except OSError as err:
1716 except OSError as err:
1713 # Catch EEXIST to handle races
1717 # Catch EEXIST to handle races
1714 if err.errno == errno.EEXIST:
1718 if err.errno == errno.EEXIST:
1715 return
1719 return
1716 raise
1720 raise
1717 if mode is not None:
1721 if mode is not None:
1718 os.chmod(name, mode)
1722 os.chmod(name, mode)
1719
1723
1720 def readfile(path):
1724 def readfile(path):
1721 with open(path, 'rb') as fp:
1725 with open(path, 'rb') as fp:
1722 return fp.read()
1726 return fp.read()
1723
1727
1724 def writefile(path, text):
1728 def writefile(path, text):
1725 with open(path, 'wb') as fp:
1729 with open(path, 'wb') as fp:
1726 fp.write(text)
1730 fp.write(text)
1727
1731
1728 def appendfile(path, text):
1732 def appendfile(path, text):
1729 with open(path, 'ab') as fp:
1733 with open(path, 'ab') as fp:
1730 fp.write(text)
1734 fp.write(text)
1731
1735
1732 class chunkbuffer(object):
1736 class chunkbuffer(object):
1733 """Allow arbitrary sized chunks of data to be efficiently read from an
1737 """Allow arbitrary sized chunks of data to be efficiently read from an
1734 iterator over chunks of arbitrary size."""
1738 iterator over chunks of arbitrary size."""
1735
1739
1736 def __init__(self, in_iter):
1740 def __init__(self, in_iter):
1737 """in_iter is the iterator that's iterating over the input chunks."""
1741 """in_iter is the iterator that's iterating over the input chunks."""
1738 def splitbig(chunks):
1742 def splitbig(chunks):
1739 for chunk in chunks:
1743 for chunk in chunks:
1740 if len(chunk) > 2**20:
1744 if len(chunk) > 2**20:
1741 pos = 0
1745 pos = 0
1742 while pos < len(chunk):
1746 while pos < len(chunk):
1743 end = pos + 2 ** 18
1747 end = pos + 2 ** 18
1744 yield chunk[pos:end]
1748 yield chunk[pos:end]
1745 pos = end
1749 pos = end
1746 else:
1750 else:
1747 yield chunk
1751 yield chunk
1748 self.iter = splitbig(in_iter)
1752 self.iter = splitbig(in_iter)
1749 self._queue = collections.deque()
1753 self._queue = collections.deque()
1750 self._chunkoffset = 0
1754 self._chunkoffset = 0
1751
1755
1752 def read(self, l=None):
1756 def read(self, l=None):
1753 """Read L bytes of data from the iterator of chunks of data.
1757 """Read L bytes of data from the iterator of chunks of data.
1754 Returns less than L bytes if the iterator runs dry.
1758 Returns less than L bytes if the iterator runs dry.
1755
1759
1756 If size parameter is omitted, read everything"""
1760 If size parameter is omitted, read everything"""
1757 if l is None:
1761 if l is None:
1758 return ''.join(self.iter)
1762 return ''.join(self.iter)
1759
1763
1760 left = l
1764 left = l
1761 buf = []
1765 buf = []
1762 queue = self._queue
1766 queue = self._queue
1763 while left > 0:
1767 while left > 0:
1764 # refill the queue
1768 # refill the queue
1765 if not queue:
1769 if not queue:
1766 target = 2**18
1770 target = 2**18
1767 for chunk in self.iter:
1771 for chunk in self.iter:
1768 queue.append(chunk)
1772 queue.append(chunk)
1769 target -= len(chunk)
1773 target -= len(chunk)
1770 if target <= 0:
1774 if target <= 0:
1771 break
1775 break
1772 if not queue:
1776 if not queue:
1773 break
1777 break
1774
1778
1775 # The easy way to do this would be to queue.popleft(), modify the
1779 # The easy way to do this would be to queue.popleft(), modify the
1776 # chunk (if necessary), then queue.appendleft(). However, for cases
1780 # chunk (if necessary), then queue.appendleft(). However, for cases
1777 # where we read partial chunk content, this incurs 2 dequeue
1781 # where we read partial chunk content, this incurs 2 dequeue
1778 # mutations and creates a new str for the remaining chunk in the
1782 # mutations and creates a new str for the remaining chunk in the
1779 # queue. Our code below avoids this overhead.
1783 # queue. Our code below avoids this overhead.
1780
1784
1781 chunk = queue[0]
1785 chunk = queue[0]
1782 chunkl = len(chunk)
1786 chunkl = len(chunk)
1783 offset = self._chunkoffset
1787 offset = self._chunkoffset
1784
1788
1785 # Use full chunk.
1789 # Use full chunk.
1786 if offset == 0 and left >= chunkl:
1790 if offset == 0 and left >= chunkl:
1787 left -= chunkl
1791 left -= chunkl
1788 queue.popleft()
1792 queue.popleft()
1789 buf.append(chunk)
1793 buf.append(chunk)
1790 # self._chunkoffset remains at 0.
1794 # self._chunkoffset remains at 0.
1791 continue
1795 continue
1792
1796
1793 chunkremaining = chunkl - offset
1797 chunkremaining = chunkl - offset
1794
1798
1795 # Use all of unconsumed part of chunk.
1799 # Use all of unconsumed part of chunk.
1796 if left >= chunkremaining:
1800 if left >= chunkremaining:
1797 left -= chunkremaining
1801 left -= chunkremaining
1798 queue.popleft()
1802 queue.popleft()
1799 # offset == 0 is enabled by block above, so this won't merely
1803 # offset == 0 is enabled by block above, so this won't merely
1800 # copy via ``chunk[0:]``.
1804 # copy via ``chunk[0:]``.
1801 buf.append(chunk[offset:])
1805 buf.append(chunk[offset:])
1802 self._chunkoffset = 0
1806 self._chunkoffset = 0
1803
1807
1804 # Partial chunk needed.
1808 # Partial chunk needed.
1805 else:
1809 else:
1806 buf.append(chunk[offset:offset + left])
1810 buf.append(chunk[offset:offset + left])
1807 self._chunkoffset += left
1811 self._chunkoffset += left
1808 left -= chunkremaining
1812 left -= chunkremaining
1809
1813
1810 return ''.join(buf)
1814 return ''.join(buf)
1811
1815
1812 def filechunkiter(f, size=131072, limit=None):
1816 def filechunkiter(f, size=131072, limit=None):
1813 """Create a generator that produces the data in the file size
1817 """Create a generator that produces the data in the file size
1814 (default 131072) bytes at a time, up to optional limit (default is
1818 (default 131072) bytes at a time, up to optional limit (default is
1815 to read all data). Chunks may be less than size bytes if the
1819 to read all data). Chunks may be less than size bytes if the
1816 chunk is the last chunk in the file, or the file is a socket or
1820 chunk is the last chunk in the file, or the file is a socket or
1817 some other type of file that sometimes reads less data than is
1821 some other type of file that sometimes reads less data than is
1818 requested."""
1822 requested."""
1819 assert size >= 0
1823 assert size >= 0
1820 assert limit is None or limit >= 0
1824 assert limit is None or limit >= 0
1821 while True:
1825 while True:
1822 if limit is None:
1826 if limit is None:
1823 nbytes = size
1827 nbytes = size
1824 else:
1828 else:
1825 nbytes = min(limit, size)
1829 nbytes = min(limit, size)
1826 s = nbytes and f.read(nbytes)
1830 s = nbytes and f.read(nbytes)
1827 if not s:
1831 if not s:
1828 break
1832 break
1829 if limit:
1833 if limit:
1830 limit -= len(s)
1834 limit -= len(s)
1831 yield s
1835 yield s
1832
1836
1833 def makedate(timestamp=None):
1837 def makedate(timestamp=None):
1834 '''Return a unix timestamp (or the current time) as a (unixtime,
1838 '''Return a unix timestamp (or the current time) as a (unixtime,
1835 offset) tuple based off the local timezone.'''
1839 offset) tuple based off the local timezone.'''
1836 if timestamp is None:
1840 if timestamp is None:
1837 timestamp = time.time()
1841 timestamp = time.time()
1838 if timestamp < 0:
1842 if timestamp < 0:
1839 hint = _("check your clock")
1843 hint = _("check your clock")
1840 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1844 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1841 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1845 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1842 datetime.datetime.fromtimestamp(timestamp))
1846 datetime.datetime.fromtimestamp(timestamp))
1843 tz = delta.days * 86400 + delta.seconds
1847 tz = delta.days * 86400 + delta.seconds
1844 return timestamp, tz
1848 return timestamp, tz
1845
1849
1846 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1850 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1847 """represent a (unixtime, offset) tuple as a localized time.
1851 """represent a (unixtime, offset) tuple as a localized time.
1848 unixtime is seconds since the epoch, and offset is the time zone's
1852 unixtime is seconds since the epoch, and offset is the time zone's
1849 number of seconds away from UTC.
1853 number of seconds away from UTC.
1850
1854
1851 >>> datestr((0, 0))
1855 >>> datestr((0, 0))
1852 'Thu Jan 01 00:00:00 1970 +0000'
1856 'Thu Jan 01 00:00:00 1970 +0000'
1853 >>> datestr((42, 0))
1857 >>> datestr((42, 0))
1854 'Thu Jan 01 00:00:42 1970 +0000'
1858 'Thu Jan 01 00:00:42 1970 +0000'
1855 >>> datestr((-42, 0))
1859 >>> datestr((-42, 0))
1856 'Wed Dec 31 23:59:18 1969 +0000'
1860 'Wed Dec 31 23:59:18 1969 +0000'
1857 >>> datestr((0x7fffffff, 0))
1861 >>> datestr((0x7fffffff, 0))
1858 'Tue Jan 19 03:14:07 2038 +0000'
1862 'Tue Jan 19 03:14:07 2038 +0000'
1859 >>> datestr((-0x80000000, 0))
1863 >>> datestr((-0x80000000, 0))
1860 'Fri Dec 13 20:45:52 1901 +0000'
1864 'Fri Dec 13 20:45:52 1901 +0000'
1861 """
1865 """
1862 t, tz = date or makedate()
1866 t, tz = date or makedate()
1863 if "%1" in format or "%2" in format or "%z" in format:
1867 if "%1" in format or "%2" in format or "%z" in format:
1864 sign = (tz > 0) and "-" or "+"
1868 sign = (tz > 0) and "-" or "+"
1865 minutes = abs(tz) // 60
1869 minutes = abs(tz) // 60
1866 q, r = divmod(minutes, 60)
1870 q, r = divmod(minutes, 60)
1867 format = format.replace("%z", "%1%2")
1871 format = format.replace("%z", "%1%2")
1868 format = format.replace("%1", "%c%02d" % (sign, q))
1872 format = format.replace("%1", "%c%02d" % (sign, q))
1869 format = format.replace("%2", "%02d" % r)
1873 format = format.replace("%2", "%02d" % r)
1870 d = t - tz
1874 d = t - tz
1871 if d > 0x7fffffff:
1875 if d > 0x7fffffff:
1872 d = 0x7fffffff
1876 d = 0x7fffffff
1873 elif d < -0x80000000:
1877 elif d < -0x80000000:
1874 d = -0x80000000
1878 d = -0x80000000
1875 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1879 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1876 # because they use the gmtime() system call which is buggy on Windows
1880 # because they use the gmtime() system call which is buggy on Windows
1877 # for negative values.
1881 # for negative values.
1878 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1882 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1879 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1883 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1880 return s
1884 return s
1881
1885
1882 def shortdate(date=None):
1886 def shortdate(date=None):
1883 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1887 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1884 return datestr(date, format='%Y-%m-%d')
1888 return datestr(date, format='%Y-%m-%d')
1885
1889
1886 def parsetimezone(s):
1890 def parsetimezone(s):
1887 """find a trailing timezone, if any, in string, and return a
1891 """find a trailing timezone, if any, in string, and return a
1888 (offset, remainder) pair"""
1892 (offset, remainder) pair"""
1889
1893
1890 if s.endswith("GMT") or s.endswith("UTC"):
1894 if s.endswith("GMT") or s.endswith("UTC"):
1891 return 0, s[:-3].rstrip()
1895 return 0, s[:-3].rstrip()
1892
1896
1893 # Unix-style timezones [+-]hhmm
1897 # Unix-style timezones [+-]hhmm
1894 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1898 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1895 sign = (s[-5] == "+") and 1 or -1
1899 sign = (s[-5] == "+") and 1 or -1
1896 hours = int(s[-4:-2])
1900 hours = int(s[-4:-2])
1897 minutes = int(s[-2:])
1901 minutes = int(s[-2:])
1898 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1902 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1899
1903
1900 # ISO8601 trailing Z
1904 # ISO8601 trailing Z
1901 if s.endswith("Z") and s[-2:-1].isdigit():
1905 if s.endswith("Z") and s[-2:-1].isdigit():
1902 return 0, s[:-1]
1906 return 0, s[:-1]
1903
1907
1904 # ISO8601-style [+-]hh:mm
1908 # ISO8601-style [+-]hh:mm
1905 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1909 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1906 s[-5:-3].isdigit() and s[-2:].isdigit()):
1910 s[-5:-3].isdigit() and s[-2:].isdigit()):
1907 sign = (s[-6] == "+") and 1 or -1
1911 sign = (s[-6] == "+") and 1 or -1
1908 hours = int(s[-5:-3])
1912 hours = int(s[-5:-3])
1909 minutes = int(s[-2:])
1913 minutes = int(s[-2:])
1910 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1914 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1911
1915
1912 return None, s
1916 return None, s
1913
1917
1914 def strdate(string, format, defaults=None):
1918 def strdate(string, format, defaults=None):
1915 """parse a localized time string and return a (unixtime, offset) tuple.
1919 """parse a localized time string and return a (unixtime, offset) tuple.
1916 if the string cannot be parsed, ValueError is raised."""
1920 if the string cannot be parsed, ValueError is raised."""
1917 if defaults is None:
1921 if defaults is None:
1918 defaults = {}
1922 defaults = {}
1919
1923
1920 # NOTE: unixtime = localunixtime + offset
1924 # NOTE: unixtime = localunixtime + offset
1921 offset, date = parsetimezone(string)
1925 offset, date = parsetimezone(string)
1922
1926
1923 # add missing elements from defaults
1927 # add missing elements from defaults
1924 usenow = False # default to using biased defaults
1928 usenow = False # default to using biased defaults
1925 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1929 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1926 part = pycompat.bytestr(part)
1930 part = pycompat.bytestr(part)
1927 found = [True for p in part if ("%"+p) in format]
1931 found = [True for p in part if ("%"+p) in format]
1928 if not found:
1932 if not found:
1929 date += "@" + defaults[part][usenow]
1933 date += "@" + defaults[part][usenow]
1930 format += "@%" + part[0]
1934 format += "@%" + part[0]
1931 else:
1935 else:
1932 # We've found a specific time element, less specific time
1936 # We've found a specific time element, less specific time
1933 # elements are relative to today
1937 # elements are relative to today
1934 usenow = True
1938 usenow = True
1935
1939
1936 timetuple = time.strptime(encoding.strfromlocal(date),
1940 timetuple = time.strptime(encoding.strfromlocal(date),
1937 encoding.strfromlocal(format))
1941 encoding.strfromlocal(format))
1938 localunixtime = int(calendar.timegm(timetuple))
1942 localunixtime = int(calendar.timegm(timetuple))
1939 if offset is None:
1943 if offset is None:
1940 # local timezone
1944 # local timezone
1941 unixtime = int(time.mktime(timetuple))
1945 unixtime = int(time.mktime(timetuple))
1942 offset = unixtime - localunixtime
1946 offset = unixtime - localunixtime
1943 else:
1947 else:
1944 unixtime = localunixtime + offset
1948 unixtime = localunixtime + offset
1945 return unixtime, offset
1949 return unixtime, offset
1946
1950
1947 def parsedate(date, formats=None, bias=None):
1951 def parsedate(date, formats=None, bias=None):
1948 """parse a localized date/time and return a (unixtime, offset) tuple.
1952 """parse a localized date/time and return a (unixtime, offset) tuple.
1949
1953
1950 The date may be a "unixtime offset" string or in one of the specified
1954 The date may be a "unixtime offset" string or in one of the specified
1951 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1955 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1952
1956
1953 >>> parsedate(' today ') == parsedate(\
1957 >>> parsedate(' today ') == parsedate(\
1954 datetime.date.today().strftime('%b %d'))
1958 datetime.date.today().strftime('%b %d'))
1955 True
1959 True
1956 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1960 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1957 datetime.timedelta(days=1)\
1961 datetime.timedelta(days=1)\
1958 ).strftime('%b %d'))
1962 ).strftime('%b %d'))
1959 True
1963 True
1960 >>> now, tz = makedate()
1964 >>> now, tz = makedate()
1961 >>> strnow, strtz = parsedate('now')
1965 >>> strnow, strtz = parsedate('now')
1962 >>> (strnow - now) < 1
1966 >>> (strnow - now) < 1
1963 True
1967 True
1964 >>> tz == strtz
1968 >>> tz == strtz
1965 True
1969 True
1966 """
1970 """
1967 if bias is None:
1971 if bias is None:
1968 bias = {}
1972 bias = {}
1969 if not date:
1973 if not date:
1970 return 0, 0
1974 return 0, 0
1971 if isinstance(date, tuple) and len(date) == 2:
1975 if isinstance(date, tuple) and len(date) == 2:
1972 return date
1976 return date
1973 if not formats:
1977 if not formats:
1974 formats = defaultdateformats
1978 formats = defaultdateformats
1975 date = date.strip()
1979 date = date.strip()
1976
1980
1977 if date == 'now' or date == _('now'):
1981 if date == 'now' or date == _('now'):
1978 return makedate()
1982 return makedate()
1979 if date == 'today' or date == _('today'):
1983 if date == 'today' or date == _('today'):
1980 date = datetime.date.today().strftime('%b %d')
1984 date = datetime.date.today().strftime('%b %d')
1981 elif date == 'yesterday' or date == _('yesterday'):
1985 elif date == 'yesterday' or date == _('yesterday'):
1982 date = (datetime.date.today() -
1986 date = (datetime.date.today() -
1983 datetime.timedelta(days=1)).strftime('%b %d')
1987 datetime.timedelta(days=1)).strftime('%b %d')
1984
1988
1985 try:
1989 try:
1986 when, offset = map(int, date.split(' '))
1990 when, offset = map(int, date.split(' '))
1987 except ValueError:
1991 except ValueError:
1988 # fill out defaults
1992 # fill out defaults
1989 now = makedate()
1993 now = makedate()
1990 defaults = {}
1994 defaults = {}
1991 for part in ("d", "mb", "yY", "HI", "M", "S"):
1995 for part in ("d", "mb", "yY", "HI", "M", "S"):
1992 # this piece is for rounding the specific end of unknowns
1996 # this piece is for rounding the specific end of unknowns
1993 b = bias.get(part)
1997 b = bias.get(part)
1994 if b is None:
1998 if b is None:
1995 if part[0:1] in "HMS":
1999 if part[0:1] in "HMS":
1996 b = "00"
2000 b = "00"
1997 else:
2001 else:
1998 b = "0"
2002 b = "0"
1999
2003
2000 # this piece is for matching the generic end to today's date
2004 # this piece is for matching the generic end to today's date
2001 n = datestr(now, "%" + part[0:1])
2005 n = datestr(now, "%" + part[0:1])
2002
2006
2003 defaults[part] = (b, n)
2007 defaults[part] = (b, n)
2004
2008
2005 for format in formats:
2009 for format in formats:
2006 try:
2010 try:
2007 when, offset = strdate(date, format, defaults)
2011 when, offset = strdate(date, format, defaults)
2008 except (ValueError, OverflowError):
2012 except (ValueError, OverflowError):
2009 pass
2013 pass
2010 else:
2014 else:
2011 break
2015 break
2012 else:
2016 else:
2013 raise error.ParseError(_('invalid date: %r') % date)
2017 raise error.ParseError(_('invalid date: %r') % date)
2014 # validate explicit (probably user-specified) date and
2018 # validate explicit (probably user-specified) date and
2015 # time zone offset. values must fit in signed 32 bits for
2019 # time zone offset. values must fit in signed 32 bits for
2016 # current 32-bit linux runtimes. timezones go from UTC-12
2020 # current 32-bit linux runtimes. timezones go from UTC-12
2017 # to UTC+14
2021 # to UTC+14
2018 if when < -0x80000000 or when > 0x7fffffff:
2022 if when < -0x80000000 or when > 0x7fffffff:
2019 raise error.ParseError(_('date exceeds 32 bits: %d') % when)
2023 raise error.ParseError(_('date exceeds 32 bits: %d') % when)
2020 if offset < -50400 or offset > 43200:
2024 if offset < -50400 or offset > 43200:
2021 raise error.ParseError(_('impossible time zone offset: %d') % offset)
2025 raise error.ParseError(_('impossible time zone offset: %d') % offset)
2022 return when, offset
2026 return when, offset
2023
2027
2024 def matchdate(date):
2028 def matchdate(date):
2025 """Return a function that matches a given date match specifier
2029 """Return a function that matches a given date match specifier
2026
2030
2027 Formats include:
2031 Formats include:
2028
2032
2029 '{date}' match a given date to the accuracy provided
2033 '{date}' match a given date to the accuracy provided
2030
2034
2031 '<{date}' on or before a given date
2035 '<{date}' on or before a given date
2032
2036
2033 '>{date}' on or after a given date
2037 '>{date}' on or after a given date
2034
2038
2035 >>> p1 = parsedate("10:29:59")
2039 >>> p1 = parsedate("10:29:59")
2036 >>> p2 = parsedate("10:30:00")
2040 >>> p2 = parsedate("10:30:00")
2037 >>> p3 = parsedate("10:30:59")
2041 >>> p3 = parsedate("10:30:59")
2038 >>> p4 = parsedate("10:31:00")
2042 >>> p4 = parsedate("10:31:00")
2039 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2043 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2040 >>> f = matchdate("10:30")
2044 >>> f = matchdate("10:30")
2041 >>> f(p1[0])
2045 >>> f(p1[0])
2042 False
2046 False
2043 >>> f(p2[0])
2047 >>> f(p2[0])
2044 True
2048 True
2045 >>> f(p3[0])
2049 >>> f(p3[0])
2046 True
2050 True
2047 >>> f(p4[0])
2051 >>> f(p4[0])
2048 False
2052 False
2049 >>> f(p5[0])
2053 >>> f(p5[0])
2050 False
2054 False
2051 """
2055 """
2052
2056
2053 def lower(date):
2057 def lower(date):
2054 d = {'mb': "1", 'd': "1"}
2058 d = {'mb': "1", 'd': "1"}
2055 return parsedate(date, extendeddateformats, d)[0]
2059 return parsedate(date, extendeddateformats, d)[0]
2056
2060
2057 def upper(date):
2061 def upper(date):
2058 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2062 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2059 for days in ("31", "30", "29"):
2063 for days in ("31", "30", "29"):
2060 try:
2064 try:
2061 d["d"] = days
2065 d["d"] = days
2062 return parsedate(date, extendeddateformats, d)[0]
2066 return parsedate(date, extendeddateformats, d)[0]
2063 except Abort:
2067 except Abort:
2064 pass
2068 pass
2065 d["d"] = "28"
2069 d["d"] = "28"
2066 return parsedate(date, extendeddateformats, d)[0]
2070 return parsedate(date, extendeddateformats, d)[0]
2067
2071
2068 date = date.strip()
2072 date = date.strip()
2069
2073
2070 if not date:
2074 if not date:
2071 raise Abort(_("dates cannot consist entirely of whitespace"))
2075 raise Abort(_("dates cannot consist entirely of whitespace"))
2072 elif date[0] == "<":
2076 elif date[0] == "<":
2073 if not date[1:]:
2077 if not date[1:]:
2074 raise Abort(_("invalid day spec, use '<DATE'"))
2078 raise Abort(_("invalid day spec, use '<DATE'"))
2075 when = upper(date[1:])
2079 when = upper(date[1:])
2076 return lambda x: x <= when
2080 return lambda x: x <= when
2077 elif date[0] == ">":
2081 elif date[0] == ">":
2078 if not date[1:]:
2082 if not date[1:]:
2079 raise Abort(_("invalid day spec, use '>DATE'"))
2083 raise Abort(_("invalid day spec, use '>DATE'"))
2080 when = lower(date[1:])
2084 when = lower(date[1:])
2081 return lambda x: x >= when
2085 return lambda x: x >= when
2082 elif date[0] == "-":
2086 elif date[0] == "-":
2083 try:
2087 try:
2084 days = int(date[1:])
2088 days = int(date[1:])
2085 except ValueError:
2089 except ValueError:
2086 raise Abort(_("invalid day spec: %s") % date[1:])
2090 raise Abort(_("invalid day spec: %s") % date[1:])
2087 if days < 0:
2091 if days < 0:
2088 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2092 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2089 % date[1:])
2093 % date[1:])
2090 when = makedate()[0] - days * 3600 * 24
2094 when = makedate()[0] - days * 3600 * 24
2091 return lambda x: x >= when
2095 return lambda x: x >= when
2092 elif " to " in date:
2096 elif " to " in date:
2093 a, b = date.split(" to ")
2097 a, b = date.split(" to ")
2094 start, stop = lower(a), upper(b)
2098 start, stop = lower(a), upper(b)
2095 return lambda x: x >= start and x <= stop
2099 return lambda x: x >= start and x <= stop
2096 else:
2100 else:
2097 start, stop = lower(date), upper(date)
2101 start, stop = lower(date), upper(date)
2098 return lambda x: x >= start and x <= stop
2102 return lambda x: x >= start and x <= stop
2099
2103
2100 def stringmatcher(pattern, casesensitive=True):
2104 def stringmatcher(pattern, casesensitive=True):
2101 """
2105 """
2102 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2106 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2103 returns the matcher name, pattern, and matcher function.
2107 returns the matcher name, pattern, and matcher function.
2104 missing or unknown prefixes are treated as literal matches.
2108 missing or unknown prefixes are treated as literal matches.
2105
2109
2106 helper for tests:
2110 helper for tests:
2107 >>> def test(pattern, *tests):
2111 >>> def test(pattern, *tests):
2108 ... kind, pattern, matcher = stringmatcher(pattern)
2112 ... kind, pattern, matcher = stringmatcher(pattern)
2109 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2113 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2110 >>> def itest(pattern, *tests):
2114 >>> def itest(pattern, *tests):
2111 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2115 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2112 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2116 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2113
2117
2114 exact matching (no prefix):
2118 exact matching (no prefix):
2115 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2119 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2116 ('literal', 'abcdefg', [False, False, True])
2120 ('literal', 'abcdefg', [False, False, True])
2117
2121
2118 regex matching ('re:' prefix)
2122 regex matching ('re:' prefix)
2119 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2123 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2120 ('re', 'a.+b', [False, False, True])
2124 ('re', 'a.+b', [False, False, True])
2121
2125
2122 force exact matches ('literal:' prefix)
2126 force exact matches ('literal:' prefix)
2123 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2127 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2124 ('literal', 're:foobar', [False, True])
2128 ('literal', 're:foobar', [False, True])
2125
2129
2126 unknown prefixes are ignored and treated as literals
2130 unknown prefixes are ignored and treated as literals
2127 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2131 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2128 ('literal', 'foo:bar', [False, False, True])
2132 ('literal', 'foo:bar', [False, False, True])
2129
2133
2130 case insensitive regex matches
2134 case insensitive regex matches
2131 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2135 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2132 ('re', 'A.+b', [False, False, True])
2136 ('re', 'A.+b', [False, False, True])
2133
2137
2134 case insensitive literal matches
2138 case insensitive literal matches
2135 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2139 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2136 ('literal', 'ABCDEFG', [False, False, True])
2140 ('literal', 'ABCDEFG', [False, False, True])
2137 """
2141 """
2138 if pattern.startswith('re:'):
2142 if pattern.startswith('re:'):
2139 pattern = pattern[3:]
2143 pattern = pattern[3:]
2140 try:
2144 try:
2141 flags = 0
2145 flags = 0
2142 if not casesensitive:
2146 if not casesensitive:
2143 flags = remod.I
2147 flags = remod.I
2144 regex = remod.compile(pattern, flags)
2148 regex = remod.compile(pattern, flags)
2145 except remod.error as e:
2149 except remod.error as e:
2146 raise error.ParseError(_('invalid regular expression: %s')
2150 raise error.ParseError(_('invalid regular expression: %s')
2147 % e)
2151 % e)
2148 return 're', pattern, regex.search
2152 return 're', pattern, regex.search
2149 elif pattern.startswith('literal:'):
2153 elif pattern.startswith('literal:'):
2150 pattern = pattern[8:]
2154 pattern = pattern[8:]
2151
2155
2152 match = pattern.__eq__
2156 match = pattern.__eq__
2153
2157
2154 if not casesensitive:
2158 if not casesensitive:
2155 ipat = encoding.lower(pattern)
2159 ipat = encoding.lower(pattern)
2156 match = lambda s: ipat == encoding.lower(s)
2160 match = lambda s: ipat == encoding.lower(s)
2157 return 'literal', pattern, match
2161 return 'literal', pattern, match
2158
2162
2159 def shortuser(user):
2163 def shortuser(user):
2160 """Return a short representation of a user name or email address."""
2164 """Return a short representation of a user name or email address."""
2161 f = user.find('@')
2165 f = user.find('@')
2162 if f >= 0:
2166 if f >= 0:
2163 user = user[:f]
2167 user = user[:f]
2164 f = user.find('<')
2168 f = user.find('<')
2165 if f >= 0:
2169 if f >= 0:
2166 user = user[f + 1:]
2170 user = user[f + 1:]
2167 f = user.find(' ')
2171 f = user.find(' ')
2168 if f >= 0:
2172 if f >= 0:
2169 user = user[:f]
2173 user = user[:f]
2170 f = user.find('.')
2174 f = user.find('.')
2171 if f >= 0:
2175 if f >= 0:
2172 user = user[:f]
2176 user = user[:f]
2173 return user
2177 return user
2174
2178
2175 def emailuser(user):
2179 def emailuser(user):
2176 """Return the user portion of an email address."""
2180 """Return the user portion of an email address."""
2177 f = user.find('@')
2181 f = user.find('@')
2178 if f >= 0:
2182 if f >= 0:
2179 user = user[:f]
2183 user = user[:f]
2180 f = user.find('<')
2184 f = user.find('<')
2181 if f >= 0:
2185 if f >= 0:
2182 user = user[f + 1:]
2186 user = user[f + 1:]
2183 return user
2187 return user
2184
2188
2185 def email(author):
2189 def email(author):
2186 '''get email of author.'''
2190 '''get email of author.'''
2187 r = author.find('>')
2191 r = author.find('>')
2188 if r == -1:
2192 if r == -1:
2189 r = None
2193 r = None
2190 return author[author.find('<') + 1:r]
2194 return author[author.find('<') + 1:r]
2191
2195
2192 def ellipsis(text, maxlength=400):
2196 def ellipsis(text, maxlength=400):
2193 """Trim string to at most maxlength (default: 400) columns in display."""
2197 """Trim string to at most maxlength (default: 400) columns in display."""
2194 return encoding.trim(text, maxlength, ellipsis='...')
2198 return encoding.trim(text, maxlength, ellipsis='...')
2195
2199
2196 def unitcountfn(*unittable):
2200 def unitcountfn(*unittable):
2197 '''return a function that renders a readable count of some quantity'''
2201 '''return a function that renders a readable count of some quantity'''
2198
2202
2199 def go(count):
2203 def go(count):
2200 for multiplier, divisor, format in unittable:
2204 for multiplier, divisor, format in unittable:
2201 if abs(count) >= divisor * multiplier:
2205 if abs(count) >= divisor * multiplier:
2202 return format % (count / float(divisor))
2206 return format % (count / float(divisor))
2203 return unittable[-1][2] % count
2207 return unittable[-1][2] % count
2204
2208
2205 return go
2209 return go
2206
2210
2207 def processlinerange(fromline, toline):
2211 def processlinerange(fromline, toline):
2208 """Check that linerange <fromline>:<toline> makes sense and return a
2212 """Check that linerange <fromline>:<toline> makes sense and return a
2209 0-based range.
2213 0-based range.
2210
2214
2211 >>> processlinerange(10, 20)
2215 >>> processlinerange(10, 20)
2212 (9, 20)
2216 (9, 20)
2213 >>> processlinerange(2, 1)
2217 >>> processlinerange(2, 1)
2214 Traceback (most recent call last):
2218 Traceback (most recent call last):
2215 ...
2219 ...
2216 ParseError: line range must be positive
2220 ParseError: line range must be positive
2217 >>> processlinerange(0, 5)
2221 >>> processlinerange(0, 5)
2218 Traceback (most recent call last):
2222 Traceback (most recent call last):
2219 ...
2223 ...
2220 ParseError: fromline must be strictly positive
2224 ParseError: fromline must be strictly positive
2221 """
2225 """
2222 if toline - fromline < 0:
2226 if toline - fromline < 0:
2223 raise error.ParseError(_("line range must be positive"))
2227 raise error.ParseError(_("line range must be positive"))
2224 if fromline < 1:
2228 if fromline < 1:
2225 raise error.ParseError(_("fromline must be strictly positive"))
2229 raise error.ParseError(_("fromline must be strictly positive"))
2226 return fromline - 1, toline
2230 return fromline - 1, toline
2227
2231
2228 bytecount = unitcountfn(
2232 bytecount = unitcountfn(
2229 (100, 1 << 30, _('%.0f GB')),
2233 (100, 1 << 30, _('%.0f GB')),
2230 (10, 1 << 30, _('%.1f GB')),
2234 (10, 1 << 30, _('%.1f GB')),
2231 (1, 1 << 30, _('%.2f GB')),
2235 (1, 1 << 30, _('%.2f GB')),
2232 (100, 1 << 20, _('%.0f MB')),
2236 (100, 1 << 20, _('%.0f MB')),
2233 (10, 1 << 20, _('%.1f MB')),
2237 (10, 1 << 20, _('%.1f MB')),
2234 (1, 1 << 20, _('%.2f MB')),
2238 (1, 1 << 20, _('%.2f MB')),
2235 (100, 1 << 10, _('%.0f KB')),
2239 (100, 1 << 10, _('%.0f KB')),
2236 (10, 1 << 10, _('%.1f KB')),
2240 (10, 1 << 10, _('%.1f KB')),
2237 (1, 1 << 10, _('%.2f KB')),
2241 (1, 1 << 10, _('%.2f KB')),
2238 (1, 1, _('%.0f bytes')),
2242 (1, 1, _('%.0f bytes')),
2239 )
2243 )
2240
2244
2241 # Matches a single EOL which can either be a CRLF where repeated CR
2245 # Matches a single EOL which can either be a CRLF where repeated CR
2242 # are removed or a LF. We do not care about old Macintosh files, so a
2246 # are removed or a LF. We do not care about old Macintosh files, so a
2243 # stray CR is an error.
2247 # stray CR is an error.
2244 _eolre = remod.compile(br'\r*\n')
2248 _eolre = remod.compile(br'\r*\n')
2245
2249
2246 def tolf(s):
2250 def tolf(s):
2247 return _eolre.sub('\n', s)
2251 return _eolre.sub('\n', s)
2248
2252
2249 def tocrlf(s):
2253 def tocrlf(s):
2250 return _eolre.sub('\r\n', s)
2254 return _eolre.sub('\r\n', s)
2251
2255
2252 if pycompat.oslinesep == '\r\n':
2256 if pycompat.oslinesep == '\r\n':
2253 tonativeeol = tocrlf
2257 tonativeeol = tocrlf
2254 fromnativeeol = tolf
2258 fromnativeeol = tolf
2255 else:
2259 else:
2256 tonativeeol = pycompat.identity
2260 tonativeeol = pycompat.identity
2257 fromnativeeol = pycompat.identity
2261 fromnativeeol = pycompat.identity
2258
2262
2259 def escapestr(s):
2263 def escapestr(s):
2260 # call underlying function of s.encode('string_escape') directly for
2264 # call underlying function of s.encode('string_escape') directly for
2261 # Python 3 compatibility
2265 # Python 3 compatibility
2262 return codecs.escape_encode(s)[0]
2266 return codecs.escape_encode(s)[0]
2263
2267
2264 def unescapestr(s):
2268 def unescapestr(s):
2265 return codecs.escape_decode(s)[0]
2269 return codecs.escape_decode(s)[0]
2266
2270
2267 def uirepr(s):
2271 def uirepr(s):
2268 # Avoid double backslash in Windows path repr()
2272 # Avoid double backslash in Windows path repr()
2269 return repr(s).replace('\\\\', '\\')
2273 return repr(s).replace('\\\\', '\\')
2270
2274
2271 # delay import of textwrap
2275 # delay import of textwrap
2272 def MBTextWrapper(**kwargs):
2276 def MBTextWrapper(**kwargs):
2273 class tw(textwrap.TextWrapper):
2277 class tw(textwrap.TextWrapper):
2274 """
2278 """
2275 Extend TextWrapper for width-awareness.
2279 Extend TextWrapper for width-awareness.
2276
2280
2277 Neither number of 'bytes' in any encoding nor 'characters' is
2281 Neither number of 'bytes' in any encoding nor 'characters' is
2278 appropriate to calculate terminal columns for specified string.
2282 appropriate to calculate terminal columns for specified string.
2279
2283
2280 Original TextWrapper implementation uses built-in 'len()' directly,
2284 Original TextWrapper implementation uses built-in 'len()' directly,
2281 so overriding is needed to use width information of each characters.
2285 so overriding is needed to use width information of each characters.
2282
2286
2283 In addition, characters classified into 'ambiguous' width are
2287 In addition, characters classified into 'ambiguous' width are
2284 treated as wide in East Asian area, but as narrow in other.
2288 treated as wide in East Asian area, but as narrow in other.
2285
2289
2286 This requires use decision to determine width of such characters.
2290 This requires use decision to determine width of such characters.
2287 """
2291 """
2288 def _cutdown(self, ucstr, space_left):
2292 def _cutdown(self, ucstr, space_left):
2289 l = 0
2293 l = 0
2290 colwidth = encoding.ucolwidth
2294 colwidth = encoding.ucolwidth
2291 for i in xrange(len(ucstr)):
2295 for i in xrange(len(ucstr)):
2292 l += colwidth(ucstr[i])
2296 l += colwidth(ucstr[i])
2293 if space_left < l:
2297 if space_left < l:
2294 return (ucstr[:i], ucstr[i:])
2298 return (ucstr[:i], ucstr[i:])
2295 return ucstr, ''
2299 return ucstr, ''
2296
2300
2297 # overriding of base class
2301 # overriding of base class
2298 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2302 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2299 space_left = max(width - cur_len, 1)
2303 space_left = max(width - cur_len, 1)
2300
2304
2301 if self.break_long_words:
2305 if self.break_long_words:
2302 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2306 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2303 cur_line.append(cut)
2307 cur_line.append(cut)
2304 reversed_chunks[-1] = res
2308 reversed_chunks[-1] = res
2305 elif not cur_line:
2309 elif not cur_line:
2306 cur_line.append(reversed_chunks.pop())
2310 cur_line.append(reversed_chunks.pop())
2307
2311
2308 # this overriding code is imported from TextWrapper of Python 2.6
2312 # this overriding code is imported from TextWrapper of Python 2.6
2309 # to calculate columns of string by 'encoding.ucolwidth()'
2313 # to calculate columns of string by 'encoding.ucolwidth()'
2310 def _wrap_chunks(self, chunks):
2314 def _wrap_chunks(self, chunks):
2311 colwidth = encoding.ucolwidth
2315 colwidth = encoding.ucolwidth
2312
2316
2313 lines = []
2317 lines = []
2314 if self.width <= 0:
2318 if self.width <= 0:
2315 raise ValueError("invalid width %r (must be > 0)" % self.width)
2319 raise ValueError("invalid width %r (must be > 0)" % self.width)
2316
2320
2317 # Arrange in reverse order so items can be efficiently popped
2321 # Arrange in reverse order so items can be efficiently popped
2318 # from a stack of chucks.
2322 # from a stack of chucks.
2319 chunks.reverse()
2323 chunks.reverse()
2320
2324
2321 while chunks:
2325 while chunks:
2322
2326
2323 # Start the list of chunks that will make up the current line.
2327 # Start the list of chunks that will make up the current line.
2324 # cur_len is just the length of all the chunks in cur_line.
2328 # cur_len is just the length of all the chunks in cur_line.
2325 cur_line = []
2329 cur_line = []
2326 cur_len = 0
2330 cur_len = 0
2327
2331
2328 # Figure out which static string will prefix this line.
2332 # Figure out which static string will prefix this line.
2329 if lines:
2333 if lines:
2330 indent = self.subsequent_indent
2334 indent = self.subsequent_indent
2331 else:
2335 else:
2332 indent = self.initial_indent
2336 indent = self.initial_indent
2333
2337
2334 # Maximum width for this line.
2338 # Maximum width for this line.
2335 width = self.width - len(indent)
2339 width = self.width - len(indent)
2336
2340
2337 # First chunk on line is whitespace -- drop it, unless this
2341 # First chunk on line is whitespace -- drop it, unless this
2338 # is the very beginning of the text (i.e. no lines started yet).
2342 # is the very beginning of the text (i.e. no lines started yet).
2339 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2343 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2340 del chunks[-1]
2344 del chunks[-1]
2341
2345
2342 while chunks:
2346 while chunks:
2343 l = colwidth(chunks[-1])
2347 l = colwidth(chunks[-1])
2344
2348
2345 # Can at least squeeze this chunk onto the current line.
2349 # Can at least squeeze this chunk onto the current line.
2346 if cur_len + l <= width:
2350 if cur_len + l <= width:
2347 cur_line.append(chunks.pop())
2351 cur_line.append(chunks.pop())
2348 cur_len += l
2352 cur_len += l
2349
2353
2350 # Nope, this line is full.
2354 # Nope, this line is full.
2351 else:
2355 else:
2352 break
2356 break
2353
2357
2354 # The current line is full, and the next chunk is too big to
2358 # The current line is full, and the next chunk is too big to
2355 # fit on *any* line (not just this one).
2359 # fit on *any* line (not just this one).
2356 if chunks and colwidth(chunks[-1]) > width:
2360 if chunks and colwidth(chunks[-1]) > width:
2357 self._handle_long_word(chunks, cur_line, cur_len, width)
2361 self._handle_long_word(chunks, cur_line, cur_len, width)
2358
2362
2359 # If the last chunk on this line is all whitespace, drop it.
2363 # If the last chunk on this line is all whitespace, drop it.
2360 if (self.drop_whitespace and
2364 if (self.drop_whitespace and
2361 cur_line and cur_line[-1].strip() == r''):
2365 cur_line and cur_line[-1].strip() == r''):
2362 del cur_line[-1]
2366 del cur_line[-1]
2363
2367
2364 # Convert current line back to a string and store it in list
2368 # Convert current line back to a string and store it in list
2365 # of all lines (return value).
2369 # of all lines (return value).
2366 if cur_line:
2370 if cur_line:
2367 lines.append(indent + r''.join(cur_line))
2371 lines.append(indent + r''.join(cur_line))
2368
2372
2369 return lines
2373 return lines
2370
2374
2371 global MBTextWrapper
2375 global MBTextWrapper
2372 MBTextWrapper = tw
2376 MBTextWrapper = tw
2373 return tw(**kwargs)
2377 return tw(**kwargs)
2374
2378
2375 def wrap(line, width, initindent='', hangindent=''):
2379 def wrap(line, width, initindent='', hangindent=''):
2376 maxindent = max(len(hangindent), len(initindent))
2380 maxindent = max(len(hangindent), len(initindent))
2377 if width <= maxindent:
2381 if width <= maxindent:
2378 # adjust for weird terminal size
2382 # adjust for weird terminal size
2379 width = max(78, maxindent + 1)
2383 width = max(78, maxindent + 1)
2380 line = line.decode(pycompat.sysstr(encoding.encoding),
2384 line = line.decode(pycompat.sysstr(encoding.encoding),
2381 pycompat.sysstr(encoding.encodingmode))
2385 pycompat.sysstr(encoding.encodingmode))
2382 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2386 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2383 pycompat.sysstr(encoding.encodingmode))
2387 pycompat.sysstr(encoding.encodingmode))
2384 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2388 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2385 pycompat.sysstr(encoding.encodingmode))
2389 pycompat.sysstr(encoding.encodingmode))
2386 wrapper = MBTextWrapper(width=width,
2390 wrapper = MBTextWrapper(width=width,
2387 initial_indent=initindent,
2391 initial_indent=initindent,
2388 subsequent_indent=hangindent)
2392 subsequent_indent=hangindent)
2389 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2393 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2390
2394
2391 if (pyplatform.python_implementation() == 'CPython' and
2395 if (pyplatform.python_implementation() == 'CPython' and
2392 sys.version_info < (3, 0)):
2396 sys.version_info < (3, 0)):
2393 # There is an issue in CPython that some IO methods do not handle EINTR
2397 # There is an issue in CPython that some IO methods do not handle EINTR
2394 # correctly. The following table shows what CPython version (and functions)
2398 # correctly. The following table shows what CPython version (and functions)
2395 # are affected (buggy: has the EINTR bug, okay: otherwise):
2399 # are affected (buggy: has the EINTR bug, okay: otherwise):
2396 #
2400 #
2397 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2401 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2398 # --------------------------------------------------
2402 # --------------------------------------------------
2399 # fp.__iter__ | buggy | buggy | okay
2403 # fp.__iter__ | buggy | buggy | okay
2400 # fp.read* | buggy | okay [1] | okay
2404 # fp.read* | buggy | okay [1] | okay
2401 #
2405 #
2402 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2406 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2403 #
2407 #
2404 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2408 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2405 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2409 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2406 #
2410 #
2407 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2411 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2408 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2412 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2409 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2413 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2410 # fp.__iter__ but not other fp.read* methods.
2414 # fp.__iter__ but not other fp.read* methods.
2411 #
2415 #
2412 # On modern systems like Linux, the "read" syscall cannot be interrupted
2416 # On modern systems like Linux, the "read" syscall cannot be interrupted
2413 # when reading "fast" files like on-disk files. So the EINTR issue only
2417 # when reading "fast" files like on-disk files. So the EINTR issue only
2414 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2418 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2415 # files approximately as "fast" files and use the fast (unsafe) code path,
2419 # files approximately as "fast" files and use the fast (unsafe) code path,
2416 # to minimize the performance impact.
2420 # to minimize the performance impact.
2417 if sys.version_info >= (2, 7, 4):
2421 if sys.version_info >= (2, 7, 4):
2418 # fp.readline deals with EINTR correctly, use it as a workaround.
2422 # fp.readline deals with EINTR correctly, use it as a workaround.
2419 def _safeiterfile(fp):
2423 def _safeiterfile(fp):
2420 return iter(fp.readline, '')
2424 return iter(fp.readline, '')
2421 else:
2425 else:
2422 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2426 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2423 # note: this may block longer than necessary because of bufsize.
2427 # note: this may block longer than necessary because of bufsize.
2424 def _safeiterfile(fp, bufsize=4096):
2428 def _safeiterfile(fp, bufsize=4096):
2425 fd = fp.fileno()
2429 fd = fp.fileno()
2426 line = ''
2430 line = ''
2427 while True:
2431 while True:
2428 try:
2432 try:
2429 buf = os.read(fd, bufsize)
2433 buf = os.read(fd, bufsize)
2430 except OSError as ex:
2434 except OSError as ex:
2431 # os.read only raises EINTR before any data is read
2435 # os.read only raises EINTR before any data is read
2432 if ex.errno == errno.EINTR:
2436 if ex.errno == errno.EINTR:
2433 continue
2437 continue
2434 else:
2438 else:
2435 raise
2439 raise
2436 line += buf
2440 line += buf
2437 if '\n' in buf:
2441 if '\n' in buf:
2438 splitted = line.splitlines(True)
2442 splitted = line.splitlines(True)
2439 line = ''
2443 line = ''
2440 for l in splitted:
2444 for l in splitted:
2441 if l[-1] == '\n':
2445 if l[-1] == '\n':
2442 yield l
2446 yield l
2443 else:
2447 else:
2444 line = l
2448 line = l
2445 if not buf:
2449 if not buf:
2446 break
2450 break
2447 if line:
2451 if line:
2448 yield line
2452 yield line
2449
2453
2450 def iterfile(fp):
2454 def iterfile(fp):
2451 fastpath = True
2455 fastpath = True
2452 if type(fp) is file:
2456 if type(fp) is file:
2453 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2457 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2454 if fastpath:
2458 if fastpath:
2455 return fp
2459 return fp
2456 else:
2460 else:
2457 return _safeiterfile(fp)
2461 return _safeiterfile(fp)
2458 else:
2462 else:
2459 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2463 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2460 def iterfile(fp):
2464 def iterfile(fp):
2461 return fp
2465 return fp
2462
2466
2463 def iterlines(iterator):
2467 def iterlines(iterator):
2464 for chunk in iterator:
2468 for chunk in iterator:
2465 for line in chunk.splitlines():
2469 for line in chunk.splitlines():
2466 yield line
2470 yield line
2467
2471
2468 def expandpath(path):
2472 def expandpath(path):
2469 return os.path.expanduser(os.path.expandvars(path))
2473 return os.path.expanduser(os.path.expandvars(path))
2470
2474
2471 def hgcmd():
2475 def hgcmd():
2472 """Return the command used to execute current hg
2476 """Return the command used to execute current hg
2473
2477
2474 This is different from hgexecutable() because on Windows we want
2478 This is different from hgexecutable() because on Windows we want
2475 to avoid things opening new shell windows like batch files, so we
2479 to avoid things opening new shell windows like batch files, so we
2476 get either the python call or current executable.
2480 get either the python call or current executable.
2477 """
2481 """
2478 if mainfrozen():
2482 if mainfrozen():
2479 if getattr(sys, 'frozen', None) == 'macosx_app':
2483 if getattr(sys, 'frozen', None) == 'macosx_app':
2480 # Env variable set by py2app
2484 # Env variable set by py2app
2481 return [encoding.environ['EXECUTABLEPATH']]
2485 return [encoding.environ['EXECUTABLEPATH']]
2482 else:
2486 else:
2483 return [pycompat.sysexecutable]
2487 return [pycompat.sysexecutable]
2484 return gethgcmd()
2488 return gethgcmd()
2485
2489
2486 def rundetached(args, condfn):
2490 def rundetached(args, condfn):
2487 """Execute the argument list in a detached process.
2491 """Execute the argument list in a detached process.
2488
2492
2489 condfn is a callable which is called repeatedly and should return
2493 condfn is a callable which is called repeatedly and should return
2490 True once the child process is known to have started successfully.
2494 True once the child process is known to have started successfully.
2491 At this point, the child process PID is returned. If the child
2495 At this point, the child process PID is returned. If the child
2492 process fails to start or finishes before condfn() evaluates to
2496 process fails to start or finishes before condfn() evaluates to
2493 True, return -1.
2497 True, return -1.
2494 """
2498 """
2495 # Windows case is easier because the child process is either
2499 # Windows case is easier because the child process is either
2496 # successfully starting and validating the condition or exiting
2500 # successfully starting and validating the condition or exiting
2497 # on failure. We just poll on its PID. On Unix, if the child
2501 # on failure. We just poll on its PID. On Unix, if the child
2498 # process fails to start, it will be left in a zombie state until
2502 # process fails to start, it will be left in a zombie state until
2499 # the parent wait on it, which we cannot do since we expect a long
2503 # the parent wait on it, which we cannot do since we expect a long
2500 # running process on success. Instead we listen for SIGCHLD telling
2504 # running process on success. Instead we listen for SIGCHLD telling
2501 # us our child process terminated.
2505 # us our child process terminated.
2502 terminated = set()
2506 terminated = set()
2503 def handler(signum, frame):
2507 def handler(signum, frame):
2504 terminated.add(os.wait())
2508 terminated.add(os.wait())
2505 prevhandler = None
2509 prevhandler = None
2506 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2510 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2507 if SIGCHLD is not None:
2511 if SIGCHLD is not None:
2508 prevhandler = signal.signal(SIGCHLD, handler)
2512 prevhandler = signal.signal(SIGCHLD, handler)
2509 try:
2513 try:
2510 pid = spawndetached(args)
2514 pid = spawndetached(args)
2511 while not condfn():
2515 while not condfn():
2512 if ((pid in terminated or not testpid(pid))
2516 if ((pid in terminated or not testpid(pid))
2513 and not condfn()):
2517 and not condfn()):
2514 return -1
2518 return -1
2515 time.sleep(0.1)
2519 time.sleep(0.1)
2516 return pid
2520 return pid
2517 finally:
2521 finally:
2518 if prevhandler is not None:
2522 if prevhandler is not None:
2519 signal.signal(signal.SIGCHLD, prevhandler)
2523 signal.signal(signal.SIGCHLD, prevhandler)
2520
2524
2521 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2525 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2522 """Return the result of interpolating items in the mapping into string s.
2526 """Return the result of interpolating items in the mapping into string s.
2523
2527
2524 prefix is a single character string, or a two character string with
2528 prefix is a single character string, or a two character string with
2525 a backslash as the first character if the prefix needs to be escaped in
2529 a backslash as the first character if the prefix needs to be escaped in
2526 a regular expression.
2530 a regular expression.
2527
2531
2528 fn is an optional function that will be applied to the replacement text
2532 fn is an optional function that will be applied to the replacement text
2529 just before replacement.
2533 just before replacement.
2530
2534
2531 escape_prefix is an optional flag that allows using doubled prefix for
2535 escape_prefix is an optional flag that allows using doubled prefix for
2532 its escaping.
2536 its escaping.
2533 """
2537 """
2534 fn = fn or (lambda s: s)
2538 fn = fn or (lambda s: s)
2535 patterns = '|'.join(mapping.keys())
2539 patterns = '|'.join(mapping.keys())
2536 if escape_prefix:
2540 if escape_prefix:
2537 patterns += '|' + prefix
2541 patterns += '|' + prefix
2538 if len(prefix) > 1:
2542 if len(prefix) > 1:
2539 prefix_char = prefix[1:]
2543 prefix_char = prefix[1:]
2540 else:
2544 else:
2541 prefix_char = prefix
2545 prefix_char = prefix
2542 mapping[prefix_char] = prefix_char
2546 mapping[prefix_char] = prefix_char
2543 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2547 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2544 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2548 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2545
2549
2546 def getport(port):
2550 def getport(port):
2547 """Return the port for a given network service.
2551 """Return the port for a given network service.
2548
2552
2549 If port is an integer, it's returned as is. If it's a string, it's
2553 If port is an integer, it's returned as is. If it's a string, it's
2550 looked up using socket.getservbyname(). If there's no matching
2554 looked up using socket.getservbyname(). If there's no matching
2551 service, error.Abort is raised.
2555 service, error.Abort is raised.
2552 """
2556 """
2553 try:
2557 try:
2554 return int(port)
2558 return int(port)
2555 except ValueError:
2559 except ValueError:
2556 pass
2560 pass
2557
2561
2558 try:
2562 try:
2559 return socket.getservbyname(port)
2563 return socket.getservbyname(port)
2560 except socket.error:
2564 except socket.error:
2561 raise Abort(_("no port number associated with service '%s'") % port)
2565 raise Abort(_("no port number associated with service '%s'") % port)
2562
2566
2563 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2567 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2564 '0': False, 'no': False, 'false': False, 'off': False,
2568 '0': False, 'no': False, 'false': False, 'off': False,
2565 'never': False}
2569 'never': False}
2566
2570
2567 def parsebool(s):
2571 def parsebool(s):
2568 """Parse s into a boolean.
2572 """Parse s into a boolean.
2569
2573
2570 If s is not a valid boolean, returns None.
2574 If s is not a valid boolean, returns None.
2571 """
2575 """
2572 return _booleans.get(s.lower(), None)
2576 return _booleans.get(s.lower(), None)
2573
2577
2574 _hextochr = dict((a + b, chr(int(a + b, 16)))
2578 _hextochr = dict((a + b, chr(int(a + b, 16)))
2575 for a in string.hexdigits for b in string.hexdigits)
2579 for a in string.hexdigits for b in string.hexdigits)
2576
2580
2577 class url(object):
2581 class url(object):
2578 r"""Reliable URL parser.
2582 r"""Reliable URL parser.
2579
2583
2580 This parses URLs and provides attributes for the following
2584 This parses URLs and provides attributes for the following
2581 components:
2585 components:
2582
2586
2583 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2587 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2584
2588
2585 Missing components are set to None. The only exception is
2589 Missing components are set to None. The only exception is
2586 fragment, which is set to '' if present but empty.
2590 fragment, which is set to '' if present but empty.
2587
2591
2588 If parsefragment is False, fragment is included in query. If
2592 If parsefragment is False, fragment is included in query. If
2589 parsequery is False, query is included in path. If both are
2593 parsequery is False, query is included in path. If both are
2590 False, both fragment and query are included in path.
2594 False, both fragment and query are included in path.
2591
2595
2592 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2596 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2593
2597
2594 Note that for backward compatibility reasons, bundle URLs do not
2598 Note that for backward compatibility reasons, bundle URLs do not
2595 take host names. That means 'bundle://../' has a path of '../'.
2599 take host names. That means 'bundle://../' has a path of '../'.
2596
2600
2597 Examples:
2601 Examples:
2598
2602
2599 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2603 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2600 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2604 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2601 >>> url('ssh://[::1]:2200//home/joe/repo')
2605 >>> url('ssh://[::1]:2200//home/joe/repo')
2602 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2606 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2603 >>> url('file:///home/joe/repo')
2607 >>> url('file:///home/joe/repo')
2604 <url scheme: 'file', path: '/home/joe/repo'>
2608 <url scheme: 'file', path: '/home/joe/repo'>
2605 >>> url('file:///c:/temp/foo/')
2609 >>> url('file:///c:/temp/foo/')
2606 <url scheme: 'file', path: 'c:/temp/foo/'>
2610 <url scheme: 'file', path: 'c:/temp/foo/'>
2607 >>> url('bundle:foo')
2611 >>> url('bundle:foo')
2608 <url scheme: 'bundle', path: 'foo'>
2612 <url scheme: 'bundle', path: 'foo'>
2609 >>> url('bundle://../foo')
2613 >>> url('bundle://../foo')
2610 <url scheme: 'bundle', path: '../foo'>
2614 <url scheme: 'bundle', path: '../foo'>
2611 >>> url(r'c:\foo\bar')
2615 >>> url(r'c:\foo\bar')
2612 <url path: 'c:\\foo\\bar'>
2616 <url path: 'c:\\foo\\bar'>
2613 >>> url(r'\\blah\blah\blah')
2617 >>> url(r'\\blah\blah\blah')
2614 <url path: '\\\\blah\\blah\\blah'>
2618 <url path: '\\\\blah\\blah\\blah'>
2615 >>> url(r'\\blah\blah\blah#baz')
2619 >>> url(r'\\blah\blah\blah#baz')
2616 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2620 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2617 >>> url(r'file:///C:\users\me')
2621 >>> url(r'file:///C:\users\me')
2618 <url scheme: 'file', path: 'C:\\users\\me'>
2622 <url scheme: 'file', path: 'C:\\users\\me'>
2619
2623
2620 Authentication credentials:
2624 Authentication credentials:
2621
2625
2622 >>> url('ssh://joe:xyz@x/repo')
2626 >>> url('ssh://joe:xyz@x/repo')
2623 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2627 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2624 >>> url('ssh://joe@x/repo')
2628 >>> url('ssh://joe@x/repo')
2625 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2629 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2626
2630
2627 Query strings and fragments:
2631 Query strings and fragments:
2628
2632
2629 >>> url('http://host/a?b#c')
2633 >>> url('http://host/a?b#c')
2630 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2634 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2631 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2635 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2632 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2636 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2633
2637
2634 Empty path:
2638 Empty path:
2635
2639
2636 >>> url('')
2640 >>> url('')
2637 <url path: ''>
2641 <url path: ''>
2638 >>> url('#a')
2642 >>> url('#a')
2639 <url path: '', fragment: 'a'>
2643 <url path: '', fragment: 'a'>
2640 >>> url('http://host/')
2644 >>> url('http://host/')
2641 <url scheme: 'http', host: 'host', path: ''>
2645 <url scheme: 'http', host: 'host', path: ''>
2642 >>> url('http://host/#a')
2646 >>> url('http://host/#a')
2643 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2647 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2644
2648
2645 Only scheme:
2649 Only scheme:
2646
2650
2647 >>> url('http:')
2651 >>> url('http:')
2648 <url scheme: 'http'>
2652 <url scheme: 'http'>
2649 """
2653 """
2650
2654
2651 _safechars = "!~*'()+"
2655 _safechars = "!~*'()+"
2652 _safepchars = "/!~*'()+:\\"
2656 _safepchars = "/!~*'()+:\\"
2653 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2657 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2654
2658
2655 def __init__(self, path, parsequery=True, parsefragment=True):
2659 def __init__(self, path, parsequery=True, parsefragment=True):
2656 # We slowly chomp away at path until we have only the path left
2660 # We slowly chomp away at path until we have only the path left
2657 self.scheme = self.user = self.passwd = self.host = None
2661 self.scheme = self.user = self.passwd = self.host = None
2658 self.port = self.path = self.query = self.fragment = None
2662 self.port = self.path = self.query = self.fragment = None
2659 self._localpath = True
2663 self._localpath = True
2660 self._hostport = ''
2664 self._hostport = ''
2661 self._origpath = path
2665 self._origpath = path
2662
2666
2663 if parsefragment and '#' in path:
2667 if parsefragment and '#' in path:
2664 path, self.fragment = path.split('#', 1)
2668 path, self.fragment = path.split('#', 1)
2665
2669
2666 # special case for Windows drive letters and UNC paths
2670 # special case for Windows drive letters and UNC paths
2667 if hasdriveletter(path) or path.startswith('\\\\'):
2671 if hasdriveletter(path) or path.startswith('\\\\'):
2668 self.path = path
2672 self.path = path
2669 return
2673 return
2670
2674
2671 # For compatibility reasons, we can't handle bundle paths as
2675 # For compatibility reasons, we can't handle bundle paths as
2672 # normal URLS
2676 # normal URLS
2673 if path.startswith('bundle:'):
2677 if path.startswith('bundle:'):
2674 self.scheme = 'bundle'
2678 self.scheme = 'bundle'
2675 path = path[7:]
2679 path = path[7:]
2676 if path.startswith('//'):
2680 if path.startswith('//'):
2677 path = path[2:]
2681 path = path[2:]
2678 self.path = path
2682 self.path = path
2679 return
2683 return
2680
2684
2681 if self._matchscheme(path):
2685 if self._matchscheme(path):
2682 parts = path.split(':', 1)
2686 parts = path.split(':', 1)
2683 if parts[0]:
2687 if parts[0]:
2684 self.scheme, path = parts
2688 self.scheme, path = parts
2685 self._localpath = False
2689 self._localpath = False
2686
2690
2687 if not path:
2691 if not path:
2688 path = None
2692 path = None
2689 if self._localpath:
2693 if self._localpath:
2690 self.path = ''
2694 self.path = ''
2691 return
2695 return
2692 else:
2696 else:
2693 if self._localpath:
2697 if self._localpath:
2694 self.path = path
2698 self.path = path
2695 return
2699 return
2696
2700
2697 if parsequery and '?' in path:
2701 if parsequery and '?' in path:
2698 path, self.query = path.split('?', 1)
2702 path, self.query = path.split('?', 1)
2699 if not path:
2703 if not path:
2700 path = None
2704 path = None
2701 if not self.query:
2705 if not self.query:
2702 self.query = None
2706 self.query = None
2703
2707
2704 # // is required to specify a host/authority
2708 # // is required to specify a host/authority
2705 if path and path.startswith('//'):
2709 if path and path.startswith('//'):
2706 parts = path[2:].split('/', 1)
2710 parts = path[2:].split('/', 1)
2707 if len(parts) > 1:
2711 if len(parts) > 1:
2708 self.host, path = parts
2712 self.host, path = parts
2709 else:
2713 else:
2710 self.host = parts[0]
2714 self.host = parts[0]
2711 path = None
2715 path = None
2712 if not self.host:
2716 if not self.host:
2713 self.host = None
2717 self.host = None
2714 # path of file:///d is /d
2718 # path of file:///d is /d
2715 # path of file:///d:/ is d:/, not /d:/
2719 # path of file:///d:/ is d:/, not /d:/
2716 if path and not hasdriveletter(path):
2720 if path and not hasdriveletter(path):
2717 path = '/' + path
2721 path = '/' + path
2718
2722
2719 if self.host and '@' in self.host:
2723 if self.host and '@' in self.host:
2720 self.user, self.host = self.host.rsplit('@', 1)
2724 self.user, self.host = self.host.rsplit('@', 1)
2721 if ':' in self.user:
2725 if ':' in self.user:
2722 self.user, self.passwd = self.user.split(':', 1)
2726 self.user, self.passwd = self.user.split(':', 1)
2723 if not self.host:
2727 if not self.host:
2724 self.host = None
2728 self.host = None
2725
2729
2726 # Don't split on colons in IPv6 addresses without ports
2730 # Don't split on colons in IPv6 addresses without ports
2727 if (self.host and ':' in self.host and
2731 if (self.host and ':' in self.host and
2728 not (self.host.startswith('[') and self.host.endswith(']'))):
2732 not (self.host.startswith('[') and self.host.endswith(']'))):
2729 self._hostport = self.host
2733 self._hostport = self.host
2730 self.host, self.port = self.host.rsplit(':', 1)
2734 self.host, self.port = self.host.rsplit(':', 1)
2731 if not self.host:
2735 if not self.host:
2732 self.host = None
2736 self.host = None
2733
2737
2734 if (self.host and self.scheme == 'file' and
2738 if (self.host and self.scheme == 'file' and
2735 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2739 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2736 raise Abort(_('file:// URLs can only refer to localhost'))
2740 raise Abort(_('file:// URLs can only refer to localhost'))
2737
2741
2738 self.path = path
2742 self.path = path
2739
2743
2740 # leave the query string escaped
2744 # leave the query string escaped
2741 for a in ('user', 'passwd', 'host', 'port',
2745 for a in ('user', 'passwd', 'host', 'port',
2742 'path', 'fragment'):
2746 'path', 'fragment'):
2743 v = getattr(self, a)
2747 v = getattr(self, a)
2744 if v is not None:
2748 if v is not None:
2745 setattr(self, a, urlreq.unquote(v))
2749 setattr(self, a, urlreq.unquote(v))
2746
2750
2747 def __repr__(self):
2751 def __repr__(self):
2748 attrs = []
2752 attrs = []
2749 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2753 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2750 'query', 'fragment'):
2754 'query', 'fragment'):
2751 v = getattr(self, a)
2755 v = getattr(self, a)
2752 if v is not None:
2756 if v is not None:
2753 attrs.append('%s: %r' % (a, v))
2757 attrs.append('%s: %r' % (a, v))
2754 return '<url %s>' % ', '.join(attrs)
2758 return '<url %s>' % ', '.join(attrs)
2755
2759
2756 def __bytes__(self):
2760 def __bytes__(self):
2757 r"""Join the URL's components back into a URL string.
2761 r"""Join the URL's components back into a URL string.
2758
2762
2759 Examples:
2763 Examples:
2760
2764
2761 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2765 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2762 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2766 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2763 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2767 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2764 'http://user:pw@host:80/?foo=bar&baz=42'
2768 'http://user:pw@host:80/?foo=bar&baz=42'
2765 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2769 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2766 'http://user:pw@host:80/?foo=bar%3dbaz'
2770 'http://user:pw@host:80/?foo=bar%3dbaz'
2767 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2771 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2768 'ssh://user:pw@[::1]:2200//home/joe#'
2772 'ssh://user:pw@[::1]:2200//home/joe#'
2769 >>> str(url('http://localhost:80//'))
2773 >>> str(url('http://localhost:80//'))
2770 'http://localhost:80//'
2774 'http://localhost:80//'
2771 >>> str(url('http://localhost:80/'))
2775 >>> str(url('http://localhost:80/'))
2772 'http://localhost:80/'
2776 'http://localhost:80/'
2773 >>> str(url('http://localhost:80'))
2777 >>> str(url('http://localhost:80'))
2774 'http://localhost:80/'
2778 'http://localhost:80/'
2775 >>> str(url('bundle:foo'))
2779 >>> str(url('bundle:foo'))
2776 'bundle:foo'
2780 'bundle:foo'
2777 >>> str(url('bundle://../foo'))
2781 >>> str(url('bundle://../foo'))
2778 'bundle:../foo'
2782 'bundle:../foo'
2779 >>> str(url('path'))
2783 >>> str(url('path'))
2780 'path'
2784 'path'
2781 >>> str(url('file:///tmp/foo/bar'))
2785 >>> str(url('file:///tmp/foo/bar'))
2782 'file:///tmp/foo/bar'
2786 'file:///tmp/foo/bar'
2783 >>> str(url('file:///c:/tmp/foo/bar'))
2787 >>> str(url('file:///c:/tmp/foo/bar'))
2784 'file:///c:/tmp/foo/bar'
2788 'file:///c:/tmp/foo/bar'
2785 >>> print url(r'bundle:foo\bar')
2789 >>> print url(r'bundle:foo\bar')
2786 bundle:foo\bar
2790 bundle:foo\bar
2787 >>> print url(r'file:///D:\data\hg')
2791 >>> print url(r'file:///D:\data\hg')
2788 file:///D:\data\hg
2792 file:///D:\data\hg
2789 """
2793 """
2790 if self._localpath:
2794 if self._localpath:
2791 s = self.path
2795 s = self.path
2792 if self.scheme == 'bundle':
2796 if self.scheme == 'bundle':
2793 s = 'bundle:' + s
2797 s = 'bundle:' + s
2794 if self.fragment:
2798 if self.fragment:
2795 s += '#' + self.fragment
2799 s += '#' + self.fragment
2796 return s
2800 return s
2797
2801
2798 s = self.scheme + ':'
2802 s = self.scheme + ':'
2799 if self.user or self.passwd or self.host:
2803 if self.user or self.passwd or self.host:
2800 s += '//'
2804 s += '//'
2801 elif self.scheme and (not self.path or self.path.startswith('/')
2805 elif self.scheme and (not self.path or self.path.startswith('/')
2802 or hasdriveletter(self.path)):
2806 or hasdriveletter(self.path)):
2803 s += '//'
2807 s += '//'
2804 if hasdriveletter(self.path):
2808 if hasdriveletter(self.path):
2805 s += '/'
2809 s += '/'
2806 if self.user:
2810 if self.user:
2807 s += urlreq.quote(self.user, safe=self._safechars)
2811 s += urlreq.quote(self.user, safe=self._safechars)
2808 if self.passwd:
2812 if self.passwd:
2809 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2813 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2810 if self.user or self.passwd:
2814 if self.user or self.passwd:
2811 s += '@'
2815 s += '@'
2812 if self.host:
2816 if self.host:
2813 if not (self.host.startswith('[') and self.host.endswith(']')):
2817 if not (self.host.startswith('[') and self.host.endswith(']')):
2814 s += urlreq.quote(self.host)
2818 s += urlreq.quote(self.host)
2815 else:
2819 else:
2816 s += self.host
2820 s += self.host
2817 if self.port:
2821 if self.port:
2818 s += ':' + urlreq.quote(self.port)
2822 s += ':' + urlreq.quote(self.port)
2819 if self.host:
2823 if self.host:
2820 s += '/'
2824 s += '/'
2821 if self.path:
2825 if self.path:
2822 # TODO: similar to the query string, we should not unescape the
2826 # TODO: similar to the query string, we should not unescape the
2823 # path when we store it, the path might contain '%2f' = '/',
2827 # path when we store it, the path might contain '%2f' = '/',
2824 # which we should *not* escape.
2828 # which we should *not* escape.
2825 s += urlreq.quote(self.path, safe=self._safepchars)
2829 s += urlreq.quote(self.path, safe=self._safepchars)
2826 if self.query:
2830 if self.query:
2827 # we store the query in escaped form.
2831 # we store the query in escaped form.
2828 s += '?' + self.query
2832 s += '?' + self.query
2829 if self.fragment is not None:
2833 if self.fragment is not None:
2830 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2834 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2831 return s
2835 return s
2832
2836
2833 __str__ = encoding.strmethod(__bytes__)
2837 __str__ = encoding.strmethod(__bytes__)
2834
2838
2835 def authinfo(self):
2839 def authinfo(self):
2836 user, passwd = self.user, self.passwd
2840 user, passwd = self.user, self.passwd
2837 try:
2841 try:
2838 self.user, self.passwd = None, None
2842 self.user, self.passwd = None, None
2839 s = bytes(self)
2843 s = bytes(self)
2840 finally:
2844 finally:
2841 self.user, self.passwd = user, passwd
2845 self.user, self.passwd = user, passwd
2842 if not self.user:
2846 if not self.user:
2843 return (s, None)
2847 return (s, None)
2844 # authinfo[1] is passed to urllib2 password manager, and its
2848 # authinfo[1] is passed to urllib2 password manager, and its
2845 # URIs must not contain credentials. The host is passed in the
2849 # URIs must not contain credentials. The host is passed in the
2846 # URIs list because Python < 2.4.3 uses only that to search for
2850 # URIs list because Python < 2.4.3 uses only that to search for
2847 # a password.
2851 # a password.
2848 return (s, (None, (s, self.host),
2852 return (s, (None, (s, self.host),
2849 self.user, self.passwd or ''))
2853 self.user, self.passwd or ''))
2850
2854
2851 def isabs(self):
2855 def isabs(self):
2852 if self.scheme and self.scheme != 'file':
2856 if self.scheme and self.scheme != 'file':
2853 return True # remote URL
2857 return True # remote URL
2854 if hasdriveletter(self.path):
2858 if hasdriveletter(self.path):
2855 return True # absolute for our purposes - can't be joined()
2859 return True # absolute for our purposes - can't be joined()
2856 if self.path.startswith(br'\\'):
2860 if self.path.startswith(br'\\'):
2857 return True # Windows UNC path
2861 return True # Windows UNC path
2858 if self.path.startswith('/'):
2862 if self.path.startswith('/'):
2859 return True # POSIX-style
2863 return True # POSIX-style
2860 return False
2864 return False
2861
2865
2862 def localpath(self):
2866 def localpath(self):
2863 if self.scheme == 'file' or self.scheme == 'bundle':
2867 if self.scheme == 'file' or self.scheme == 'bundle':
2864 path = self.path or '/'
2868 path = self.path or '/'
2865 # For Windows, we need to promote hosts containing drive
2869 # For Windows, we need to promote hosts containing drive
2866 # letters to paths with drive letters.
2870 # letters to paths with drive letters.
2867 if hasdriveletter(self._hostport):
2871 if hasdriveletter(self._hostport):
2868 path = self._hostport + '/' + self.path
2872 path = self._hostport + '/' + self.path
2869 elif (self.host is not None and self.path
2873 elif (self.host is not None and self.path
2870 and not hasdriveletter(path)):
2874 and not hasdriveletter(path)):
2871 path = '/' + path
2875 path = '/' + path
2872 return path
2876 return path
2873 return self._origpath
2877 return self._origpath
2874
2878
2875 def islocal(self):
2879 def islocal(self):
2876 '''whether localpath will return something that posixfile can open'''
2880 '''whether localpath will return something that posixfile can open'''
2877 return (not self.scheme or self.scheme == 'file'
2881 return (not self.scheme or self.scheme == 'file'
2878 or self.scheme == 'bundle')
2882 or self.scheme == 'bundle')
2879
2883
2880 def hasscheme(path):
2884 def hasscheme(path):
2881 return bool(url(path).scheme)
2885 return bool(url(path).scheme)
2882
2886
2883 def hasdriveletter(path):
2887 def hasdriveletter(path):
2884 return path and path[1:2] == ':' and path[0:1].isalpha()
2888 return path and path[1:2] == ':' and path[0:1].isalpha()
2885
2889
2886 def urllocalpath(path):
2890 def urllocalpath(path):
2887 return url(path, parsequery=False, parsefragment=False).localpath()
2891 return url(path, parsequery=False, parsefragment=False).localpath()
2888
2892
2889 def hidepassword(u):
2893 def hidepassword(u):
2890 '''hide user credential in a url string'''
2894 '''hide user credential in a url string'''
2891 u = url(u)
2895 u = url(u)
2892 if u.passwd:
2896 if u.passwd:
2893 u.passwd = '***'
2897 u.passwd = '***'
2894 return bytes(u)
2898 return bytes(u)
2895
2899
2896 def removeauth(u):
2900 def removeauth(u):
2897 '''remove all authentication information from a url string'''
2901 '''remove all authentication information from a url string'''
2898 u = url(u)
2902 u = url(u)
2899 u.user = u.passwd = None
2903 u.user = u.passwd = None
2900 return str(u)
2904 return str(u)
2901
2905
2902 timecount = unitcountfn(
2906 timecount = unitcountfn(
2903 (1, 1e3, _('%.0f s')),
2907 (1, 1e3, _('%.0f s')),
2904 (100, 1, _('%.1f s')),
2908 (100, 1, _('%.1f s')),
2905 (10, 1, _('%.2f s')),
2909 (10, 1, _('%.2f s')),
2906 (1, 1, _('%.3f s')),
2910 (1, 1, _('%.3f s')),
2907 (100, 0.001, _('%.1f ms')),
2911 (100, 0.001, _('%.1f ms')),
2908 (10, 0.001, _('%.2f ms')),
2912 (10, 0.001, _('%.2f ms')),
2909 (1, 0.001, _('%.3f ms')),
2913 (1, 0.001, _('%.3f ms')),
2910 (100, 0.000001, _('%.1f us')),
2914 (100, 0.000001, _('%.1f us')),
2911 (10, 0.000001, _('%.2f us')),
2915 (10, 0.000001, _('%.2f us')),
2912 (1, 0.000001, _('%.3f us')),
2916 (1, 0.000001, _('%.3f us')),
2913 (100, 0.000000001, _('%.1f ns')),
2917 (100, 0.000000001, _('%.1f ns')),
2914 (10, 0.000000001, _('%.2f ns')),
2918 (10, 0.000000001, _('%.2f ns')),
2915 (1, 0.000000001, _('%.3f ns')),
2919 (1, 0.000000001, _('%.3f ns')),
2916 )
2920 )
2917
2921
2918 _timenesting = [0]
2922 _timenesting = [0]
2919
2923
2920 def timed(func):
2924 def timed(func):
2921 '''Report the execution time of a function call to stderr.
2925 '''Report the execution time of a function call to stderr.
2922
2926
2923 During development, use as a decorator when you need to measure
2927 During development, use as a decorator when you need to measure
2924 the cost of a function, e.g. as follows:
2928 the cost of a function, e.g. as follows:
2925
2929
2926 @util.timed
2930 @util.timed
2927 def foo(a, b, c):
2931 def foo(a, b, c):
2928 pass
2932 pass
2929 '''
2933 '''
2930
2934
2931 def wrapper(*args, **kwargs):
2935 def wrapper(*args, **kwargs):
2932 start = timer()
2936 start = timer()
2933 indent = 2
2937 indent = 2
2934 _timenesting[0] += indent
2938 _timenesting[0] += indent
2935 try:
2939 try:
2936 return func(*args, **kwargs)
2940 return func(*args, **kwargs)
2937 finally:
2941 finally:
2938 elapsed = timer() - start
2942 elapsed = timer() - start
2939 _timenesting[0] -= indent
2943 _timenesting[0] -= indent
2940 stderr.write('%s%s: %s\n' %
2944 stderr.write('%s%s: %s\n' %
2941 (' ' * _timenesting[0], func.__name__,
2945 (' ' * _timenesting[0], func.__name__,
2942 timecount(elapsed)))
2946 timecount(elapsed)))
2943 return wrapper
2947 return wrapper
2944
2948
2945 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2949 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2946 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2950 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2947
2951
2948 def sizetoint(s):
2952 def sizetoint(s):
2949 '''Convert a space specifier to a byte count.
2953 '''Convert a space specifier to a byte count.
2950
2954
2951 >>> sizetoint('30')
2955 >>> sizetoint('30')
2952 30
2956 30
2953 >>> sizetoint('2.2kb')
2957 >>> sizetoint('2.2kb')
2954 2252
2958 2252
2955 >>> sizetoint('6M')
2959 >>> sizetoint('6M')
2956 6291456
2960 6291456
2957 '''
2961 '''
2958 t = s.strip().lower()
2962 t = s.strip().lower()
2959 try:
2963 try:
2960 for k, u in _sizeunits:
2964 for k, u in _sizeunits:
2961 if t.endswith(k):
2965 if t.endswith(k):
2962 return int(float(t[:-len(k)]) * u)
2966 return int(float(t[:-len(k)]) * u)
2963 return int(t)
2967 return int(t)
2964 except ValueError:
2968 except ValueError:
2965 raise error.ParseError(_("couldn't parse size: %s") % s)
2969 raise error.ParseError(_("couldn't parse size: %s") % s)
2966
2970
2967 class hooks(object):
2971 class hooks(object):
2968 '''A collection of hook functions that can be used to extend a
2972 '''A collection of hook functions that can be used to extend a
2969 function's behavior. Hooks are called in lexicographic order,
2973 function's behavior. Hooks are called in lexicographic order,
2970 based on the names of their sources.'''
2974 based on the names of their sources.'''
2971
2975
2972 def __init__(self):
2976 def __init__(self):
2973 self._hooks = []
2977 self._hooks = []
2974
2978
2975 def add(self, source, hook):
2979 def add(self, source, hook):
2976 self._hooks.append((source, hook))
2980 self._hooks.append((source, hook))
2977
2981
2978 def __call__(self, *args):
2982 def __call__(self, *args):
2979 self._hooks.sort(key=lambda x: x[0])
2983 self._hooks.sort(key=lambda x: x[0])
2980 results = []
2984 results = []
2981 for source, hook in self._hooks:
2985 for source, hook in self._hooks:
2982 results.append(hook(*args))
2986 results.append(hook(*args))
2983 return results
2987 return results
2984
2988
2985 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2989 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2986 '''Yields lines for a nicely formatted stacktrace.
2990 '''Yields lines for a nicely formatted stacktrace.
2987 Skips the 'skip' last entries, then return the last 'depth' entries.
2991 Skips the 'skip' last entries, then return the last 'depth' entries.
2988 Each file+linenumber is formatted according to fileline.
2992 Each file+linenumber is formatted according to fileline.
2989 Each line is formatted according to line.
2993 Each line is formatted according to line.
2990 If line is None, it yields:
2994 If line is None, it yields:
2991 length of longest filepath+line number,
2995 length of longest filepath+line number,
2992 filepath+linenumber,
2996 filepath+linenumber,
2993 function
2997 function
2994
2998
2995 Not be used in production code but very convenient while developing.
2999 Not be used in production code but very convenient while developing.
2996 '''
3000 '''
2997 entries = [(fileline % (fn, ln), func)
3001 entries = [(fileline % (fn, ln), func)
2998 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3002 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2999 ][-depth:]
3003 ][-depth:]
3000 if entries:
3004 if entries:
3001 fnmax = max(len(entry[0]) for entry in entries)
3005 fnmax = max(len(entry[0]) for entry in entries)
3002 for fnln, func in entries:
3006 for fnln, func in entries:
3003 if line is None:
3007 if line is None:
3004 yield (fnmax, fnln, func)
3008 yield (fnmax, fnln, func)
3005 else:
3009 else:
3006 yield line % (fnmax, fnln, func)
3010 yield line % (fnmax, fnln, func)
3007
3011
3008 def debugstacktrace(msg='stacktrace', skip=0,
3012 def debugstacktrace(msg='stacktrace', skip=0,
3009 f=stderr, otherf=stdout, depth=0):
3013 f=stderr, otherf=stdout, depth=0):
3010 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3014 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3011 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3015 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3012 By default it will flush stdout first.
3016 By default it will flush stdout first.
3013 It can be used everywhere and intentionally does not require an ui object.
3017 It can be used everywhere and intentionally does not require an ui object.
3014 Not be used in production code but very convenient while developing.
3018 Not be used in production code but very convenient while developing.
3015 '''
3019 '''
3016 if otherf:
3020 if otherf:
3017 otherf.flush()
3021 otherf.flush()
3018 f.write('%s at:\n' % msg.rstrip())
3022 f.write('%s at:\n' % msg.rstrip())
3019 for line in getstackframes(skip + 1, depth=depth):
3023 for line in getstackframes(skip + 1, depth=depth):
3020 f.write(line)
3024 f.write(line)
3021 f.flush()
3025 f.flush()
3022
3026
3023 class dirs(object):
3027 class dirs(object):
3024 '''a multiset of directory names from a dirstate or manifest'''
3028 '''a multiset of directory names from a dirstate or manifest'''
3025
3029
3026 def __init__(self, map, skip=None):
3030 def __init__(self, map, skip=None):
3027 self._dirs = {}
3031 self._dirs = {}
3028 addpath = self.addpath
3032 addpath = self.addpath
3029 if safehasattr(map, 'iteritems') and skip is not None:
3033 if safehasattr(map, 'iteritems') and skip is not None:
3030 for f, s in map.iteritems():
3034 for f, s in map.iteritems():
3031 if s[0] != skip:
3035 if s[0] != skip:
3032 addpath(f)
3036 addpath(f)
3033 else:
3037 else:
3034 for f in map:
3038 for f in map:
3035 addpath(f)
3039 addpath(f)
3036
3040
3037 def addpath(self, path):
3041 def addpath(self, path):
3038 dirs = self._dirs
3042 dirs = self._dirs
3039 for base in finddirs(path):
3043 for base in finddirs(path):
3040 if base in dirs:
3044 if base in dirs:
3041 dirs[base] += 1
3045 dirs[base] += 1
3042 return
3046 return
3043 dirs[base] = 1
3047 dirs[base] = 1
3044
3048
3045 def delpath(self, path):
3049 def delpath(self, path):
3046 dirs = self._dirs
3050 dirs = self._dirs
3047 for base in finddirs(path):
3051 for base in finddirs(path):
3048 if dirs[base] > 1:
3052 if dirs[base] > 1:
3049 dirs[base] -= 1
3053 dirs[base] -= 1
3050 return
3054 return
3051 del dirs[base]
3055 del dirs[base]
3052
3056
3053 def __iter__(self):
3057 def __iter__(self):
3054 return iter(self._dirs)
3058 return iter(self._dirs)
3055
3059
3056 def __contains__(self, d):
3060 def __contains__(self, d):
3057 return d in self._dirs
3061 return d in self._dirs
3058
3062
3059 if safehasattr(parsers, 'dirs'):
3063 if safehasattr(parsers, 'dirs'):
3060 dirs = parsers.dirs
3064 dirs = parsers.dirs
3061
3065
3062 def finddirs(path):
3066 def finddirs(path):
3063 pos = path.rfind('/')
3067 pos = path.rfind('/')
3064 while pos != -1:
3068 while pos != -1:
3065 yield path[:pos]
3069 yield path[:pos]
3066 pos = path.rfind('/', 0, pos)
3070 pos = path.rfind('/', 0, pos)
3067
3071
3068 # compression code
3072 # compression code
3069
3073
3070 SERVERROLE = 'server'
3074 SERVERROLE = 'server'
3071 CLIENTROLE = 'client'
3075 CLIENTROLE = 'client'
3072
3076
3073 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3077 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3074 (u'name', u'serverpriority',
3078 (u'name', u'serverpriority',
3075 u'clientpriority'))
3079 u'clientpriority'))
3076
3080
3077 class compressormanager(object):
3081 class compressormanager(object):
3078 """Holds registrations of various compression engines.
3082 """Holds registrations of various compression engines.
3079
3083
3080 This class essentially abstracts the differences between compression
3084 This class essentially abstracts the differences between compression
3081 engines to allow new compression formats to be added easily, possibly from
3085 engines to allow new compression formats to be added easily, possibly from
3082 extensions.
3086 extensions.
3083
3087
3084 Compressors are registered against the global instance by calling its
3088 Compressors are registered against the global instance by calling its
3085 ``register()`` method.
3089 ``register()`` method.
3086 """
3090 """
3087 def __init__(self):
3091 def __init__(self):
3088 self._engines = {}
3092 self._engines = {}
3089 # Bundle spec human name to engine name.
3093 # Bundle spec human name to engine name.
3090 self._bundlenames = {}
3094 self._bundlenames = {}
3091 # Internal bundle identifier to engine name.
3095 # Internal bundle identifier to engine name.
3092 self._bundletypes = {}
3096 self._bundletypes = {}
3093 # Revlog header to engine name.
3097 # Revlog header to engine name.
3094 self._revlogheaders = {}
3098 self._revlogheaders = {}
3095 # Wire proto identifier to engine name.
3099 # Wire proto identifier to engine name.
3096 self._wiretypes = {}
3100 self._wiretypes = {}
3097
3101
3098 def __getitem__(self, key):
3102 def __getitem__(self, key):
3099 return self._engines[key]
3103 return self._engines[key]
3100
3104
3101 def __contains__(self, key):
3105 def __contains__(self, key):
3102 return key in self._engines
3106 return key in self._engines
3103
3107
3104 def __iter__(self):
3108 def __iter__(self):
3105 return iter(self._engines.keys())
3109 return iter(self._engines.keys())
3106
3110
3107 def register(self, engine):
3111 def register(self, engine):
3108 """Register a compression engine with the manager.
3112 """Register a compression engine with the manager.
3109
3113
3110 The argument must be a ``compressionengine`` instance.
3114 The argument must be a ``compressionengine`` instance.
3111 """
3115 """
3112 if not isinstance(engine, compressionengine):
3116 if not isinstance(engine, compressionengine):
3113 raise ValueError(_('argument must be a compressionengine'))
3117 raise ValueError(_('argument must be a compressionengine'))
3114
3118
3115 name = engine.name()
3119 name = engine.name()
3116
3120
3117 if name in self._engines:
3121 if name in self._engines:
3118 raise error.Abort(_('compression engine %s already registered') %
3122 raise error.Abort(_('compression engine %s already registered') %
3119 name)
3123 name)
3120
3124
3121 bundleinfo = engine.bundletype()
3125 bundleinfo = engine.bundletype()
3122 if bundleinfo:
3126 if bundleinfo:
3123 bundlename, bundletype = bundleinfo
3127 bundlename, bundletype = bundleinfo
3124
3128
3125 if bundlename in self._bundlenames:
3129 if bundlename in self._bundlenames:
3126 raise error.Abort(_('bundle name %s already registered') %
3130 raise error.Abort(_('bundle name %s already registered') %
3127 bundlename)
3131 bundlename)
3128 if bundletype in self._bundletypes:
3132 if bundletype in self._bundletypes:
3129 raise error.Abort(_('bundle type %s already registered by %s') %
3133 raise error.Abort(_('bundle type %s already registered by %s') %
3130 (bundletype, self._bundletypes[bundletype]))
3134 (bundletype, self._bundletypes[bundletype]))
3131
3135
3132 # No external facing name declared.
3136 # No external facing name declared.
3133 if bundlename:
3137 if bundlename:
3134 self._bundlenames[bundlename] = name
3138 self._bundlenames[bundlename] = name
3135
3139
3136 self._bundletypes[bundletype] = name
3140 self._bundletypes[bundletype] = name
3137
3141
3138 wiresupport = engine.wireprotosupport()
3142 wiresupport = engine.wireprotosupport()
3139 if wiresupport:
3143 if wiresupport:
3140 wiretype = wiresupport.name
3144 wiretype = wiresupport.name
3141 if wiretype in self._wiretypes:
3145 if wiretype in self._wiretypes:
3142 raise error.Abort(_('wire protocol compression %s already '
3146 raise error.Abort(_('wire protocol compression %s already '
3143 'registered by %s') %
3147 'registered by %s') %
3144 (wiretype, self._wiretypes[wiretype]))
3148 (wiretype, self._wiretypes[wiretype]))
3145
3149
3146 self._wiretypes[wiretype] = name
3150 self._wiretypes[wiretype] = name
3147
3151
3148 revlogheader = engine.revlogheader()
3152 revlogheader = engine.revlogheader()
3149 if revlogheader and revlogheader in self._revlogheaders:
3153 if revlogheader and revlogheader in self._revlogheaders:
3150 raise error.Abort(_('revlog header %s already registered by %s') %
3154 raise error.Abort(_('revlog header %s already registered by %s') %
3151 (revlogheader, self._revlogheaders[revlogheader]))
3155 (revlogheader, self._revlogheaders[revlogheader]))
3152
3156
3153 if revlogheader:
3157 if revlogheader:
3154 self._revlogheaders[revlogheader] = name
3158 self._revlogheaders[revlogheader] = name
3155
3159
3156 self._engines[name] = engine
3160 self._engines[name] = engine
3157
3161
3158 @property
3162 @property
3159 def supportedbundlenames(self):
3163 def supportedbundlenames(self):
3160 return set(self._bundlenames.keys())
3164 return set(self._bundlenames.keys())
3161
3165
3162 @property
3166 @property
3163 def supportedbundletypes(self):
3167 def supportedbundletypes(self):
3164 return set(self._bundletypes.keys())
3168 return set(self._bundletypes.keys())
3165
3169
3166 def forbundlename(self, bundlename):
3170 def forbundlename(self, bundlename):
3167 """Obtain a compression engine registered to a bundle name.
3171 """Obtain a compression engine registered to a bundle name.
3168
3172
3169 Will raise KeyError if the bundle type isn't registered.
3173 Will raise KeyError if the bundle type isn't registered.
3170
3174
3171 Will abort if the engine is known but not available.
3175 Will abort if the engine is known but not available.
3172 """
3176 """
3173 engine = self._engines[self._bundlenames[bundlename]]
3177 engine = self._engines[self._bundlenames[bundlename]]
3174 if not engine.available():
3178 if not engine.available():
3175 raise error.Abort(_('compression engine %s could not be loaded') %
3179 raise error.Abort(_('compression engine %s could not be loaded') %
3176 engine.name())
3180 engine.name())
3177 return engine
3181 return engine
3178
3182
3179 def forbundletype(self, bundletype):
3183 def forbundletype(self, bundletype):
3180 """Obtain a compression engine registered to a bundle type.
3184 """Obtain a compression engine registered to a bundle type.
3181
3185
3182 Will raise KeyError if the bundle type isn't registered.
3186 Will raise KeyError if the bundle type isn't registered.
3183
3187
3184 Will abort if the engine is known but not available.
3188 Will abort if the engine is known but not available.
3185 """
3189 """
3186 engine = self._engines[self._bundletypes[bundletype]]
3190 engine = self._engines[self._bundletypes[bundletype]]
3187 if not engine.available():
3191 if not engine.available():
3188 raise error.Abort(_('compression engine %s could not be loaded') %
3192 raise error.Abort(_('compression engine %s could not be loaded') %
3189 engine.name())
3193 engine.name())
3190 return engine
3194 return engine
3191
3195
3192 def supportedwireengines(self, role, onlyavailable=True):
3196 def supportedwireengines(self, role, onlyavailable=True):
3193 """Obtain compression engines that support the wire protocol.
3197 """Obtain compression engines that support the wire protocol.
3194
3198
3195 Returns a list of engines in prioritized order, most desired first.
3199 Returns a list of engines in prioritized order, most desired first.
3196
3200
3197 If ``onlyavailable`` is set, filter out engines that can't be
3201 If ``onlyavailable`` is set, filter out engines that can't be
3198 loaded.
3202 loaded.
3199 """
3203 """
3200 assert role in (SERVERROLE, CLIENTROLE)
3204 assert role in (SERVERROLE, CLIENTROLE)
3201
3205
3202 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3206 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3203
3207
3204 engines = [self._engines[e] for e in self._wiretypes.values()]
3208 engines = [self._engines[e] for e in self._wiretypes.values()]
3205 if onlyavailable:
3209 if onlyavailable:
3206 engines = [e for e in engines if e.available()]
3210 engines = [e for e in engines if e.available()]
3207
3211
3208 def getkey(e):
3212 def getkey(e):
3209 # Sort first by priority, highest first. In case of tie, sort
3213 # Sort first by priority, highest first. In case of tie, sort
3210 # alphabetically. This is arbitrary, but ensures output is
3214 # alphabetically. This is arbitrary, but ensures output is
3211 # stable.
3215 # stable.
3212 w = e.wireprotosupport()
3216 w = e.wireprotosupport()
3213 return -1 * getattr(w, attr), w.name
3217 return -1 * getattr(w, attr), w.name
3214
3218
3215 return list(sorted(engines, key=getkey))
3219 return list(sorted(engines, key=getkey))
3216
3220
3217 def forwiretype(self, wiretype):
3221 def forwiretype(self, wiretype):
3218 engine = self._engines[self._wiretypes[wiretype]]
3222 engine = self._engines[self._wiretypes[wiretype]]
3219 if not engine.available():
3223 if not engine.available():
3220 raise error.Abort(_('compression engine %s could not be loaded') %
3224 raise error.Abort(_('compression engine %s could not be loaded') %
3221 engine.name())
3225 engine.name())
3222 return engine
3226 return engine
3223
3227
3224 def forrevlogheader(self, header):
3228 def forrevlogheader(self, header):
3225 """Obtain a compression engine registered to a revlog header.
3229 """Obtain a compression engine registered to a revlog header.
3226
3230
3227 Will raise KeyError if the revlog header value isn't registered.
3231 Will raise KeyError if the revlog header value isn't registered.
3228 """
3232 """
3229 return self._engines[self._revlogheaders[header]]
3233 return self._engines[self._revlogheaders[header]]
3230
3234
3231 compengines = compressormanager()
3235 compengines = compressormanager()
3232
3236
3233 class compressionengine(object):
3237 class compressionengine(object):
3234 """Base class for compression engines.
3238 """Base class for compression engines.
3235
3239
3236 Compression engines must implement the interface defined by this class.
3240 Compression engines must implement the interface defined by this class.
3237 """
3241 """
3238 def name(self):
3242 def name(self):
3239 """Returns the name of the compression engine.
3243 """Returns the name of the compression engine.
3240
3244
3241 This is the key the engine is registered under.
3245 This is the key the engine is registered under.
3242
3246
3243 This method must be implemented.
3247 This method must be implemented.
3244 """
3248 """
3245 raise NotImplementedError()
3249 raise NotImplementedError()
3246
3250
3247 def available(self):
3251 def available(self):
3248 """Whether the compression engine is available.
3252 """Whether the compression engine is available.
3249
3253
3250 The intent of this method is to allow optional compression engines
3254 The intent of this method is to allow optional compression engines
3251 that may not be available in all installations (such as engines relying
3255 that may not be available in all installations (such as engines relying
3252 on C extensions that may not be present).
3256 on C extensions that may not be present).
3253 """
3257 """
3254 return True
3258 return True
3255
3259
3256 def bundletype(self):
3260 def bundletype(self):
3257 """Describes bundle identifiers for this engine.
3261 """Describes bundle identifiers for this engine.
3258
3262
3259 If this compression engine isn't supported for bundles, returns None.
3263 If this compression engine isn't supported for bundles, returns None.
3260
3264
3261 If this engine can be used for bundles, returns a 2-tuple of strings of
3265 If this engine can be used for bundles, returns a 2-tuple of strings of
3262 the user-facing "bundle spec" compression name and an internal
3266 the user-facing "bundle spec" compression name and an internal
3263 identifier used to denote the compression format within bundles. To
3267 identifier used to denote the compression format within bundles. To
3264 exclude the name from external usage, set the first element to ``None``.
3268 exclude the name from external usage, set the first element to ``None``.
3265
3269
3266 If bundle compression is supported, the class must also implement
3270 If bundle compression is supported, the class must also implement
3267 ``compressstream`` and `decompressorreader``.
3271 ``compressstream`` and `decompressorreader``.
3268
3272
3269 The docstring of this method is used in the help system to tell users
3273 The docstring of this method is used in the help system to tell users
3270 about this engine.
3274 about this engine.
3271 """
3275 """
3272 return None
3276 return None
3273
3277
3274 def wireprotosupport(self):
3278 def wireprotosupport(self):
3275 """Declare support for this compression format on the wire protocol.
3279 """Declare support for this compression format on the wire protocol.
3276
3280
3277 If this compression engine isn't supported for compressing wire
3281 If this compression engine isn't supported for compressing wire
3278 protocol payloads, returns None.
3282 protocol payloads, returns None.
3279
3283
3280 Otherwise, returns ``compenginewireprotosupport`` with the following
3284 Otherwise, returns ``compenginewireprotosupport`` with the following
3281 fields:
3285 fields:
3282
3286
3283 * String format identifier
3287 * String format identifier
3284 * Integer priority for the server
3288 * Integer priority for the server
3285 * Integer priority for the client
3289 * Integer priority for the client
3286
3290
3287 The integer priorities are used to order the advertisement of format
3291 The integer priorities are used to order the advertisement of format
3288 support by server and client. The highest integer is advertised
3292 support by server and client. The highest integer is advertised
3289 first. Integers with non-positive values aren't advertised.
3293 first. Integers with non-positive values aren't advertised.
3290
3294
3291 The priority values are somewhat arbitrary and only used for default
3295 The priority values are somewhat arbitrary and only used for default
3292 ordering. The relative order can be changed via config options.
3296 ordering. The relative order can be changed via config options.
3293
3297
3294 If wire protocol compression is supported, the class must also implement
3298 If wire protocol compression is supported, the class must also implement
3295 ``compressstream`` and ``decompressorreader``.
3299 ``compressstream`` and ``decompressorreader``.
3296 """
3300 """
3297 return None
3301 return None
3298
3302
3299 def revlogheader(self):
3303 def revlogheader(self):
3300 """Header added to revlog chunks that identifies this engine.
3304 """Header added to revlog chunks that identifies this engine.
3301
3305
3302 If this engine can be used to compress revlogs, this method should
3306 If this engine can be used to compress revlogs, this method should
3303 return the bytes used to identify chunks compressed with this engine.
3307 return the bytes used to identify chunks compressed with this engine.
3304 Else, the method should return ``None`` to indicate it does not
3308 Else, the method should return ``None`` to indicate it does not
3305 participate in revlog compression.
3309 participate in revlog compression.
3306 """
3310 """
3307 return None
3311 return None
3308
3312
3309 def compressstream(self, it, opts=None):
3313 def compressstream(self, it, opts=None):
3310 """Compress an iterator of chunks.
3314 """Compress an iterator of chunks.
3311
3315
3312 The method receives an iterator (ideally a generator) of chunks of
3316 The method receives an iterator (ideally a generator) of chunks of
3313 bytes to be compressed. It returns an iterator (ideally a generator)
3317 bytes to be compressed. It returns an iterator (ideally a generator)
3314 of bytes of chunks representing the compressed output.
3318 of bytes of chunks representing the compressed output.
3315
3319
3316 Optionally accepts an argument defining how to perform compression.
3320 Optionally accepts an argument defining how to perform compression.
3317 Each engine treats this argument differently.
3321 Each engine treats this argument differently.
3318 """
3322 """
3319 raise NotImplementedError()
3323 raise NotImplementedError()
3320
3324
3321 def decompressorreader(self, fh):
3325 def decompressorreader(self, fh):
3322 """Perform decompression on a file object.
3326 """Perform decompression on a file object.
3323
3327
3324 Argument is an object with a ``read(size)`` method that returns
3328 Argument is an object with a ``read(size)`` method that returns
3325 compressed data. Return value is an object with a ``read(size)`` that
3329 compressed data. Return value is an object with a ``read(size)`` that
3326 returns uncompressed data.
3330 returns uncompressed data.
3327 """
3331 """
3328 raise NotImplementedError()
3332 raise NotImplementedError()
3329
3333
3330 def revlogcompressor(self, opts=None):
3334 def revlogcompressor(self, opts=None):
3331 """Obtain an object that can be used to compress revlog entries.
3335 """Obtain an object that can be used to compress revlog entries.
3332
3336
3333 The object has a ``compress(data)`` method that compresses binary
3337 The object has a ``compress(data)`` method that compresses binary
3334 data. This method returns compressed binary data or ``None`` if
3338 data. This method returns compressed binary data or ``None`` if
3335 the data could not be compressed (too small, not compressible, etc).
3339 the data could not be compressed (too small, not compressible, etc).
3336 The returned data should have a header uniquely identifying this
3340 The returned data should have a header uniquely identifying this
3337 compression format so decompression can be routed to this engine.
3341 compression format so decompression can be routed to this engine.
3338 This header should be identified by the ``revlogheader()`` return
3342 This header should be identified by the ``revlogheader()`` return
3339 value.
3343 value.
3340
3344
3341 The object has a ``decompress(data)`` method that decompresses
3345 The object has a ``decompress(data)`` method that decompresses
3342 data. The method will only be called if ``data`` begins with
3346 data. The method will only be called if ``data`` begins with
3343 ``revlogheader()``. The method should return the raw, uncompressed
3347 ``revlogheader()``. The method should return the raw, uncompressed
3344 data or raise a ``RevlogError``.
3348 data or raise a ``RevlogError``.
3345
3349
3346 The object is reusable but is not thread safe.
3350 The object is reusable but is not thread safe.
3347 """
3351 """
3348 raise NotImplementedError()
3352 raise NotImplementedError()
3349
3353
3350 class _zlibengine(compressionengine):
3354 class _zlibengine(compressionengine):
3351 def name(self):
3355 def name(self):
3352 return 'zlib'
3356 return 'zlib'
3353
3357
3354 def bundletype(self):
3358 def bundletype(self):
3355 """zlib compression using the DEFLATE algorithm.
3359 """zlib compression using the DEFLATE algorithm.
3356
3360
3357 All Mercurial clients should support this format. The compression
3361 All Mercurial clients should support this format. The compression
3358 algorithm strikes a reasonable balance between compression ratio
3362 algorithm strikes a reasonable balance between compression ratio
3359 and size.
3363 and size.
3360 """
3364 """
3361 return 'gzip', 'GZ'
3365 return 'gzip', 'GZ'
3362
3366
3363 def wireprotosupport(self):
3367 def wireprotosupport(self):
3364 return compewireprotosupport('zlib', 20, 20)
3368 return compewireprotosupport('zlib', 20, 20)
3365
3369
3366 def revlogheader(self):
3370 def revlogheader(self):
3367 return 'x'
3371 return 'x'
3368
3372
3369 def compressstream(self, it, opts=None):
3373 def compressstream(self, it, opts=None):
3370 opts = opts or {}
3374 opts = opts or {}
3371
3375
3372 z = zlib.compressobj(opts.get('level', -1))
3376 z = zlib.compressobj(opts.get('level', -1))
3373 for chunk in it:
3377 for chunk in it:
3374 data = z.compress(chunk)
3378 data = z.compress(chunk)
3375 # Not all calls to compress emit data. It is cheaper to inspect
3379 # Not all calls to compress emit data. It is cheaper to inspect
3376 # here than to feed empty chunks through generator.
3380 # here than to feed empty chunks through generator.
3377 if data:
3381 if data:
3378 yield data
3382 yield data
3379
3383
3380 yield z.flush()
3384 yield z.flush()
3381
3385
3382 def decompressorreader(self, fh):
3386 def decompressorreader(self, fh):
3383 def gen():
3387 def gen():
3384 d = zlib.decompressobj()
3388 d = zlib.decompressobj()
3385 for chunk in filechunkiter(fh):
3389 for chunk in filechunkiter(fh):
3386 while chunk:
3390 while chunk:
3387 # Limit output size to limit memory.
3391 # Limit output size to limit memory.
3388 yield d.decompress(chunk, 2 ** 18)
3392 yield d.decompress(chunk, 2 ** 18)
3389 chunk = d.unconsumed_tail
3393 chunk = d.unconsumed_tail
3390
3394
3391 return chunkbuffer(gen())
3395 return chunkbuffer(gen())
3392
3396
3393 class zlibrevlogcompressor(object):
3397 class zlibrevlogcompressor(object):
3394 def compress(self, data):
3398 def compress(self, data):
3395 insize = len(data)
3399 insize = len(data)
3396 # Caller handles empty input case.
3400 # Caller handles empty input case.
3397 assert insize > 0
3401 assert insize > 0
3398
3402
3399 if insize < 44:
3403 if insize < 44:
3400 return None
3404 return None
3401
3405
3402 elif insize <= 1000000:
3406 elif insize <= 1000000:
3403 compressed = zlib.compress(data)
3407 compressed = zlib.compress(data)
3404 if len(compressed) < insize:
3408 if len(compressed) < insize:
3405 return compressed
3409 return compressed
3406 return None
3410 return None
3407
3411
3408 # zlib makes an internal copy of the input buffer, doubling
3412 # zlib makes an internal copy of the input buffer, doubling
3409 # memory usage for large inputs. So do streaming compression
3413 # memory usage for large inputs. So do streaming compression
3410 # on large inputs.
3414 # on large inputs.
3411 else:
3415 else:
3412 z = zlib.compressobj()
3416 z = zlib.compressobj()
3413 parts = []
3417 parts = []
3414 pos = 0
3418 pos = 0
3415 while pos < insize:
3419 while pos < insize:
3416 pos2 = pos + 2**20
3420 pos2 = pos + 2**20
3417 parts.append(z.compress(data[pos:pos2]))
3421 parts.append(z.compress(data[pos:pos2]))
3418 pos = pos2
3422 pos = pos2
3419 parts.append(z.flush())
3423 parts.append(z.flush())
3420
3424
3421 if sum(map(len, parts)) < insize:
3425 if sum(map(len, parts)) < insize:
3422 return ''.join(parts)
3426 return ''.join(parts)
3423 return None
3427 return None
3424
3428
3425 def decompress(self, data):
3429 def decompress(self, data):
3426 try:
3430 try:
3427 return zlib.decompress(data)
3431 return zlib.decompress(data)
3428 except zlib.error as e:
3432 except zlib.error as e:
3429 raise error.RevlogError(_('revlog decompress error: %s') %
3433 raise error.RevlogError(_('revlog decompress error: %s') %
3430 str(e))
3434 str(e))
3431
3435
3432 def revlogcompressor(self, opts=None):
3436 def revlogcompressor(self, opts=None):
3433 return self.zlibrevlogcompressor()
3437 return self.zlibrevlogcompressor()
3434
3438
3435 compengines.register(_zlibengine())
3439 compengines.register(_zlibengine())
3436
3440
3437 class _bz2engine(compressionengine):
3441 class _bz2engine(compressionengine):
3438 def name(self):
3442 def name(self):
3439 return 'bz2'
3443 return 'bz2'
3440
3444
3441 def bundletype(self):
3445 def bundletype(self):
3442 """An algorithm that produces smaller bundles than ``gzip``.
3446 """An algorithm that produces smaller bundles than ``gzip``.
3443
3447
3444 All Mercurial clients should support this format.
3448 All Mercurial clients should support this format.
3445
3449
3446 This engine will likely produce smaller bundles than ``gzip`` but
3450 This engine will likely produce smaller bundles than ``gzip`` but
3447 will be significantly slower, both during compression and
3451 will be significantly slower, both during compression and
3448 decompression.
3452 decompression.
3449
3453
3450 If available, the ``zstd`` engine can yield similar or better
3454 If available, the ``zstd`` engine can yield similar or better
3451 compression at much higher speeds.
3455 compression at much higher speeds.
3452 """
3456 """
3453 return 'bzip2', 'BZ'
3457 return 'bzip2', 'BZ'
3454
3458
3455 # We declare a protocol name but don't advertise by default because
3459 # We declare a protocol name but don't advertise by default because
3456 # it is slow.
3460 # it is slow.
3457 def wireprotosupport(self):
3461 def wireprotosupport(self):
3458 return compewireprotosupport('bzip2', 0, 0)
3462 return compewireprotosupport('bzip2', 0, 0)
3459
3463
3460 def compressstream(self, it, opts=None):
3464 def compressstream(self, it, opts=None):
3461 opts = opts or {}
3465 opts = opts or {}
3462 z = bz2.BZ2Compressor(opts.get('level', 9))
3466 z = bz2.BZ2Compressor(opts.get('level', 9))
3463 for chunk in it:
3467 for chunk in it:
3464 data = z.compress(chunk)
3468 data = z.compress(chunk)
3465 if data:
3469 if data:
3466 yield data
3470 yield data
3467
3471
3468 yield z.flush()
3472 yield z.flush()
3469
3473
3470 def decompressorreader(self, fh):
3474 def decompressorreader(self, fh):
3471 def gen():
3475 def gen():
3472 d = bz2.BZ2Decompressor()
3476 d = bz2.BZ2Decompressor()
3473 for chunk in filechunkiter(fh):
3477 for chunk in filechunkiter(fh):
3474 yield d.decompress(chunk)
3478 yield d.decompress(chunk)
3475
3479
3476 return chunkbuffer(gen())
3480 return chunkbuffer(gen())
3477
3481
3478 compengines.register(_bz2engine())
3482 compengines.register(_bz2engine())
3479
3483
3480 class _truncatedbz2engine(compressionengine):
3484 class _truncatedbz2engine(compressionengine):
3481 def name(self):
3485 def name(self):
3482 return 'bz2truncated'
3486 return 'bz2truncated'
3483
3487
3484 def bundletype(self):
3488 def bundletype(self):
3485 return None, '_truncatedBZ'
3489 return None, '_truncatedBZ'
3486
3490
3487 # We don't implement compressstream because it is hackily handled elsewhere.
3491 # We don't implement compressstream because it is hackily handled elsewhere.
3488
3492
3489 def decompressorreader(self, fh):
3493 def decompressorreader(self, fh):
3490 def gen():
3494 def gen():
3491 # The input stream doesn't have the 'BZ' header. So add it back.
3495 # The input stream doesn't have the 'BZ' header. So add it back.
3492 d = bz2.BZ2Decompressor()
3496 d = bz2.BZ2Decompressor()
3493 d.decompress('BZ')
3497 d.decompress('BZ')
3494 for chunk in filechunkiter(fh):
3498 for chunk in filechunkiter(fh):
3495 yield d.decompress(chunk)
3499 yield d.decompress(chunk)
3496
3500
3497 return chunkbuffer(gen())
3501 return chunkbuffer(gen())
3498
3502
3499 compengines.register(_truncatedbz2engine())
3503 compengines.register(_truncatedbz2engine())
3500
3504
3501 class _noopengine(compressionengine):
3505 class _noopengine(compressionengine):
3502 def name(self):
3506 def name(self):
3503 return 'none'
3507 return 'none'
3504
3508
3505 def bundletype(self):
3509 def bundletype(self):
3506 """No compression is performed.
3510 """No compression is performed.
3507
3511
3508 Use this compression engine to explicitly disable compression.
3512 Use this compression engine to explicitly disable compression.
3509 """
3513 """
3510 return 'none', 'UN'
3514 return 'none', 'UN'
3511
3515
3512 # Clients always support uncompressed payloads. Servers don't because
3516 # Clients always support uncompressed payloads. Servers don't because
3513 # unless you are on a fast network, uncompressed payloads can easily
3517 # unless you are on a fast network, uncompressed payloads can easily
3514 # saturate your network pipe.
3518 # saturate your network pipe.
3515 def wireprotosupport(self):
3519 def wireprotosupport(self):
3516 return compewireprotosupport('none', 0, 10)
3520 return compewireprotosupport('none', 0, 10)
3517
3521
3518 # We don't implement revlogheader because it is handled specially
3522 # We don't implement revlogheader because it is handled specially
3519 # in the revlog class.
3523 # in the revlog class.
3520
3524
3521 def compressstream(self, it, opts=None):
3525 def compressstream(self, it, opts=None):
3522 return it
3526 return it
3523
3527
3524 def decompressorreader(self, fh):
3528 def decompressorreader(self, fh):
3525 return fh
3529 return fh
3526
3530
3527 class nooprevlogcompressor(object):
3531 class nooprevlogcompressor(object):
3528 def compress(self, data):
3532 def compress(self, data):
3529 return None
3533 return None
3530
3534
3531 def revlogcompressor(self, opts=None):
3535 def revlogcompressor(self, opts=None):
3532 return self.nooprevlogcompressor()
3536 return self.nooprevlogcompressor()
3533
3537
3534 compengines.register(_noopengine())
3538 compengines.register(_noopengine())
3535
3539
3536 class _zstdengine(compressionengine):
3540 class _zstdengine(compressionengine):
3537 def name(self):
3541 def name(self):
3538 return 'zstd'
3542 return 'zstd'
3539
3543
3540 @propertycache
3544 @propertycache
3541 def _module(self):
3545 def _module(self):
3542 # Not all installs have the zstd module available. So defer importing
3546 # Not all installs have the zstd module available. So defer importing
3543 # until first access.
3547 # until first access.
3544 try:
3548 try:
3545 from . import zstd
3549 from . import zstd
3546 # Force delayed import.
3550 # Force delayed import.
3547 zstd.__version__
3551 zstd.__version__
3548 return zstd
3552 return zstd
3549 except ImportError:
3553 except ImportError:
3550 return None
3554 return None
3551
3555
3552 def available(self):
3556 def available(self):
3553 return bool(self._module)
3557 return bool(self._module)
3554
3558
3555 def bundletype(self):
3559 def bundletype(self):
3556 """A modern compression algorithm that is fast and highly flexible.
3560 """A modern compression algorithm that is fast and highly flexible.
3557
3561
3558 Only supported by Mercurial 4.1 and newer clients.
3562 Only supported by Mercurial 4.1 and newer clients.
3559
3563
3560 With the default settings, zstd compression is both faster and yields
3564 With the default settings, zstd compression is both faster and yields
3561 better compression than ``gzip``. It also frequently yields better
3565 better compression than ``gzip``. It also frequently yields better
3562 compression than ``bzip2`` while operating at much higher speeds.
3566 compression than ``bzip2`` while operating at much higher speeds.
3563
3567
3564 If this engine is available and backwards compatibility is not a
3568 If this engine is available and backwards compatibility is not a
3565 concern, it is likely the best available engine.
3569 concern, it is likely the best available engine.
3566 """
3570 """
3567 return 'zstd', 'ZS'
3571 return 'zstd', 'ZS'
3568
3572
3569 def wireprotosupport(self):
3573 def wireprotosupport(self):
3570 return compewireprotosupport('zstd', 50, 50)
3574 return compewireprotosupport('zstd', 50, 50)
3571
3575
3572 def revlogheader(self):
3576 def revlogheader(self):
3573 return '\x28'
3577 return '\x28'
3574
3578
3575 def compressstream(self, it, opts=None):
3579 def compressstream(self, it, opts=None):
3576 opts = opts or {}
3580 opts = opts or {}
3577 # zstd level 3 is almost always significantly faster than zlib
3581 # zstd level 3 is almost always significantly faster than zlib
3578 # while providing no worse compression. It strikes a good balance
3582 # while providing no worse compression. It strikes a good balance
3579 # between speed and compression.
3583 # between speed and compression.
3580 level = opts.get('level', 3)
3584 level = opts.get('level', 3)
3581
3585
3582 zstd = self._module
3586 zstd = self._module
3583 z = zstd.ZstdCompressor(level=level).compressobj()
3587 z = zstd.ZstdCompressor(level=level).compressobj()
3584 for chunk in it:
3588 for chunk in it:
3585 data = z.compress(chunk)
3589 data = z.compress(chunk)
3586 if data:
3590 if data:
3587 yield data
3591 yield data
3588
3592
3589 yield z.flush()
3593 yield z.flush()
3590
3594
3591 def decompressorreader(self, fh):
3595 def decompressorreader(self, fh):
3592 zstd = self._module
3596 zstd = self._module
3593 dctx = zstd.ZstdDecompressor()
3597 dctx = zstd.ZstdDecompressor()
3594 return chunkbuffer(dctx.read_from(fh))
3598 return chunkbuffer(dctx.read_from(fh))
3595
3599
3596 class zstdrevlogcompressor(object):
3600 class zstdrevlogcompressor(object):
3597 def __init__(self, zstd, level=3):
3601 def __init__(self, zstd, level=3):
3598 # Writing the content size adds a few bytes to the output. However,
3602 # Writing the content size adds a few bytes to the output. However,
3599 # it allows decompression to be more optimal since we can
3603 # it allows decompression to be more optimal since we can
3600 # pre-allocate a buffer to hold the result.
3604 # pre-allocate a buffer to hold the result.
3601 self._cctx = zstd.ZstdCompressor(level=level,
3605 self._cctx = zstd.ZstdCompressor(level=level,
3602 write_content_size=True)
3606 write_content_size=True)
3603 self._dctx = zstd.ZstdDecompressor()
3607 self._dctx = zstd.ZstdDecompressor()
3604 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3608 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3605 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3609 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3606
3610
3607 def compress(self, data):
3611 def compress(self, data):
3608 insize = len(data)
3612 insize = len(data)
3609 # Caller handles empty input case.
3613 # Caller handles empty input case.
3610 assert insize > 0
3614 assert insize > 0
3611
3615
3612 if insize < 50:
3616 if insize < 50:
3613 return None
3617 return None
3614
3618
3615 elif insize <= 1000000:
3619 elif insize <= 1000000:
3616 compressed = self._cctx.compress(data)
3620 compressed = self._cctx.compress(data)
3617 if len(compressed) < insize:
3621 if len(compressed) < insize:
3618 return compressed
3622 return compressed
3619 return None
3623 return None
3620 else:
3624 else:
3621 z = self._cctx.compressobj()
3625 z = self._cctx.compressobj()
3622 chunks = []
3626 chunks = []
3623 pos = 0
3627 pos = 0
3624 while pos < insize:
3628 while pos < insize:
3625 pos2 = pos + self._compinsize
3629 pos2 = pos + self._compinsize
3626 chunk = z.compress(data[pos:pos2])
3630 chunk = z.compress(data[pos:pos2])
3627 if chunk:
3631 if chunk:
3628 chunks.append(chunk)
3632 chunks.append(chunk)
3629 pos = pos2
3633 pos = pos2
3630 chunks.append(z.flush())
3634 chunks.append(z.flush())
3631
3635
3632 if sum(map(len, chunks)) < insize:
3636 if sum(map(len, chunks)) < insize:
3633 return ''.join(chunks)
3637 return ''.join(chunks)
3634 return None
3638 return None
3635
3639
3636 def decompress(self, data):
3640 def decompress(self, data):
3637 insize = len(data)
3641 insize = len(data)
3638
3642
3639 try:
3643 try:
3640 # This was measured to be faster than other streaming
3644 # This was measured to be faster than other streaming
3641 # decompressors.
3645 # decompressors.
3642 dobj = self._dctx.decompressobj()
3646 dobj = self._dctx.decompressobj()
3643 chunks = []
3647 chunks = []
3644 pos = 0
3648 pos = 0
3645 while pos < insize:
3649 while pos < insize:
3646 pos2 = pos + self._decompinsize
3650 pos2 = pos + self._decompinsize
3647 chunk = dobj.decompress(data[pos:pos2])
3651 chunk = dobj.decompress(data[pos:pos2])
3648 if chunk:
3652 if chunk:
3649 chunks.append(chunk)
3653 chunks.append(chunk)
3650 pos = pos2
3654 pos = pos2
3651 # Frame should be exhausted, so no finish() API.
3655 # Frame should be exhausted, so no finish() API.
3652
3656
3653 return ''.join(chunks)
3657 return ''.join(chunks)
3654 except Exception as e:
3658 except Exception as e:
3655 raise error.RevlogError(_('revlog decompress error: %s') %
3659 raise error.RevlogError(_('revlog decompress error: %s') %
3656 str(e))
3660 str(e))
3657
3661
3658 def revlogcompressor(self, opts=None):
3662 def revlogcompressor(self, opts=None):
3659 opts = opts or {}
3663 opts = opts or {}
3660 return self.zstdrevlogcompressor(self._module,
3664 return self.zstdrevlogcompressor(self._module,
3661 level=opts.get('level', 3))
3665 level=opts.get('level', 3))
3662
3666
3663 compengines.register(_zstdengine())
3667 compengines.register(_zstdengine())
3664
3668
3665 def bundlecompressiontopics():
3669 def bundlecompressiontopics():
3666 """Obtains a list of available bundle compressions for use in help."""
3670 """Obtains a list of available bundle compressions for use in help."""
3667 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3671 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3668 items = {}
3672 items = {}
3669
3673
3670 # We need to format the docstring. So use a dummy object/type to hold it
3674 # We need to format the docstring. So use a dummy object/type to hold it
3671 # rather than mutating the original.
3675 # rather than mutating the original.
3672 class docobject(object):
3676 class docobject(object):
3673 pass
3677 pass
3674
3678
3675 for name in compengines:
3679 for name in compengines:
3676 engine = compengines[name]
3680 engine = compengines[name]
3677
3681
3678 if not engine.available():
3682 if not engine.available():
3679 continue
3683 continue
3680
3684
3681 bt = engine.bundletype()
3685 bt = engine.bundletype()
3682 if not bt or not bt[0]:
3686 if not bt or not bt[0]:
3683 continue
3687 continue
3684
3688
3685 doc = pycompat.sysstr('``%s``\n %s') % (
3689 doc = pycompat.sysstr('``%s``\n %s') % (
3686 bt[0], engine.bundletype.__doc__)
3690 bt[0], engine.bundletype.__doc__)
3687
3691
3688 value = docobject()
3692 value = docobject()
3689 value.__doc__ = doc
3693 value.__doc__ = doc
3690
3694
3691 items[bt[0]] = value
3695 items[bt[0]] = value
3692
3696
3693 return items
3697 return items
3694
3698
3695 # convenient shortcut
3699 # convenient shortcut
3696 dst = debugstacktrace
3700 dst = debugstacktrace
@@ -1,381 +1,418 b''
1 $ cat >> $HGRCPATH <<EOF
1 $ cat >> $HGRCPATH <<EOF
2 > [extensions]
2 > [extensions]
3 > rebase=
3 > rebase=
4 > drawdag=$TESTDIR/drawdag.py
4 > drawdag=$TESTDIR/drawdag.py
5 >
5 >
6 > [phases]
6 > [phases]
7 > publish=False
7 > publish=False
8 >
8 >
9 > [alias]
9 > [alias]
10 > tglog = log -G --template "{rev}: {desc}"
10 > tglog = log -G --template "{rev}: {desc}"
11 > EOF
11 > EOF
12
12
13 $ rebasewithdag() {
13 $ rebasewithdag() {
14 > N=`$PYTHON -c "print($N+1)"`
14 > N=`$PYTHON -c "print($N+1)"`
15 > hg init repo$N && cd repo$N
15 > hg init repo$N && cd repo$N
16 > hg debugdrawdag
16 > hg debugdrawdag
17 > hg rebase "$@" > _rebasetmp
17 > hg rebase "$@" > _rebasetmp
18 > r=$?
18 > r=$?
19 > grep -v 'saved backup bundle' _rebasetmp
19 > grep -v 'saved backup bundle' _rebasetmp
20 > [ $r -eq 0 ] && hg tglog
20 > [ $r -eq 0 ] && hg tglog
21 > cd ..
21 > cd ..
22 > return $r
22 > return $r
23 > }
23 > }
24
24
25 Single branching point, without merge:
25 Single branching point, without merge:
26
26
27 $ rebasewithdag -b D -d Z <<'EOS'
27 $ rebasewithdag -b D -d Z <<'EOS'
28 > D E
28 > D E
29 > |/
29 > |/
30 > Z B C # C: branching point, E should be picked
30 > Z B C # C: branching point, E should be picked
31 > \|/ # B should not be picked
31 > \|/ # B should not be picked
32 > A
32 > A
33 > |
33 > |
34 > R
34 > R
35 > EOS
35 > EOS
36 rebasing 3:d6003a550c2c "C" (C)
36 rebasing 3:d6003a550c2c "C" (C)
37 rebasing 5:4526cf523425 "D" (D)
37 rebasing 5:4526cf523425 "D" (D)
38 rebasing 6:b296604d9846 "E" (E tip)
38 rebasing 6:b296604d9846 "E" (E tip)
39 o 6: E
39 o 6: E
40 |
40 |
41 | o 5: D
41 | o 5: D
42 |/
42 |/
43 o 4: C
43 o 4: C
44 |
44 |
45 o 3: Z
45 o 3: Z
46 |
46 |
47 | o 2: B
47 | o 2: B
48 |/
48 |/
49 o 1: A
49 o 1: A
50 |
50 |
51 o 0: R
51 o 0: R
52
52
53 Multiple branching points caused by selecting a single merge changeset:
53 Multiple branching points caused by selecting a single merge changeset:
54
54
55 $ rebasewithdag -b E -d Z <<'EOS'
55 $ rebasewithdag -b E -d Z <<'EOS'
56 > E
56 > E
57 > /|
57 > /|
58 > B C D # B, C: multiple branching points
58 > B C D # B, C: multiple branching points
59 > | |/ # D should not be picked
59 > | |/ # D should not be picked
60 > Z | /
60 > Z | /
61 > \|/
61 > \|/
62 > A
62 > A
63 > |
63 > |
64 > R
64 > R
65 > EOS
65 > EOS
66 rebasing 2:c1e6b162678d "B" (B)
66 rebasing 2:c1e6b162678d "B" (B)
67 rebasing 3:d6003a550c2c "C" (C)
67 rebasing 3:d6003a550c2c "C" (C)
68 rebasing 6:54c8f00cb91c "E" (E tip)
68 rebasing 6:54c8f00cb91c "E" (E tip)
69 o 6: E
69 o 6: E
70 |\
70 |\
71 | o 5: C
71 | o 5: C
72 | |
72 | |
73 o | 4: B
73 o | 4: B
74 |/
74 |/
75 o 3: Z
75 o 3: Z
76 |
76 |
77 | o 2: D
77 | o 2: D
78 |/
78 |/
79 o 1: A
79 o 1: A
80 |
80 |
81 o 0: R
81 o 0: R
82
82
83 Rebase should not extend the "--base" revset using "descendants":
83 Rebase should not extend the "--base" revset using "descendants":
84
84
85 $ rebasewithdag -b B -d Z <<'EOS'
85 $ rebasewithdag -b B -d Z <<'EOS'
86 > E
86 > E
87 > /|
87 > /|
88 > Z B C # descendants(B) = B+E. With E, C will be included incorrectly
88 > Z B C # descendants(B) = B+E. With E, C will be included incorrectly
89 > \|/
89 > \|/
90 > A
90 > A
91 > |
91 > |
92 > R
92 > R
93 > EOS
93 > EOS
94 rebasing 2:c1e6b162678d "B" (B)
94 rebasing 2:c1e6b162678d "B" (B)
95 rebasing 5:54c8f00cb91c "E" (E tip)
95 rebasing 5:54c8f00cb91c "E" (E tip)
96 o 5: E
96 o 5: E
97 |\
97 |\
98 | o 4: B
98 | o 4: B
99 | |
99 | |
100 | o 3: Z
100 | o 3: Z
101 | |
101 | |
102 o | 2: C
102 o | 2: C
103 |/
103 |/
104 o 1: A
104 o 1: A
105 |
105 |
106 o 0: R
106 o 0: R
107
107
108 Rebase should not simplify the "--base" revset using "roots":
108 Rebase should not simplify the "--base" revset using "roots":
109
109
110 $ rebasewithdag -b B+E -d Z <<'EOS'
110 $ rebasewithdag -b B+E -d Z <<'EOS'
111 > E
111 > E
112 > /|
112 > /|
113 > Z B C # roots(B+E) = B. Without E, C will be missed incorrectly
113 > Z B C # roots(B+E) = B. Without E, C will be missed incorrectly
114 > \|/
114 > \|/
115 > A
115 > A
116 > |
116 > |
117 > R
117 > R
118 > EOS
118 > EOS
119 rebasing 2:c1e6b162678d "B" (B)
119 rebasing 2:c1e6b162678d "B" (B)
120 rebasing 3:d6003a550c2c "C" (C)
120 rebasing 3:d6003a550c2c "C" (C)
121 rebasing 5:54c8f00cb91c "E" (E tip)
121 rebasing 5:54c8f00cb91c "E" (E tip)
122 o 5: E
122 o 5: E
123 |\
123 |\
124 | o 4: C
124 | o 4: C
125 | |
125 | |
126 o | 3: B
126 o | 3: B
127 |/
127 |/
128 o 2: Z
128 o 2: Z
129 |
129 |
130 o 1: A
130 o 1: A
131 |
131 |
132 o 0: R
132 o 0: R
133
133
134 The destination is one of the two branching points of a merge:
134 The destination is one of the two branching points of a merge:
135
135
136 $ rebasewithdag -b F -d Z <<'EOS'
136 $ rebasewithdag -b F -d Z <<'EOS'
137 > F
137 > F
138 > / \
138 > / \
139 > E D
139 > E D
140 > / /
140 > / /
141 > Z C
141 > Z C
142 > \ /
142 > \ /
143 > B
143 > B
144 > |
144 > |
145 > A
145 > A
146 > EOS
146 > EOS
147 nothing to rebase
147 nothing to rebase
148 [1]
148 [1]
149
149
150 Multiple branching points caused by multiple bases (issue5420):
150 Multiple branching points caused by multiple bases (issue5420):
151
151
152 $ rebasewithdag -b E1+E2+C2+B1 -d Z <<'EOS'
152 $ rebasewithdag -b E1+E2+C2+B1 -d Z <<'EOS'
153 > Z E2
153 > Z E2
154 > | /
154 > | /
155 > F E1 C2
155 > F E1 C2
156 > |/ /
156 > |/ /
157 > E C1 B2
157 > E C1 B2
158 > |/ /
158 > |/ /
159 > C B1
159 > C B1
160 > |/
160 > |/
161 > B
161 > B
162 > |
162 > |
163 > A
163 > A
164 > |
164 > |
165 > R
165 > R
166 > EOS
166 > EOS
167 rebasing 3:a113dbaa660a "B1" (B1)
167 rebasing 3:a113dbaa660a "B1" (B1)
168 rebasing 5:06ce7b1cc8c2 "B2" (B2)
168 rebasing 5:06ce7b1cc8c2 "B2" (B2)
169 rebasing 6:0ac98cce32d3 "C1" (C1)
169 rebasing 6:0ac98cce32d3 "C1" (C1)
170 rebasing 8:781512f5e33d "C2" (C2)
170 rebasing 8:781512f5e33d "C2" (C2)
171 rebasing 9:428d8c18f641 "E1" (E1)
171 rebasing 9:428d8c18f641 "E1" (E1)
172 rebasing 11:e1bf82f6b6df "E2" (E2)
172 rebasing 11:e1bf82f6b6df "E2" (E2)
173 o 12: E2
173 o 12: E2
174 |
174 |
175 o 11: E1
175 o 11: E1
176 |
176 |
177 | o 10: C2
177 | o 10: C2
178 | |
178 | |
179 | o 9: C1
179 | o 9: C1
180 |/
180 |/
181 | o 8: B2
181 | o 8: B2
182 | |
182 | |
183 | o 7: B1
183 | o 7: B1
184 |/
184 |/
185 o 6: Z
185 o 6: Z
186 |
186 |
187 o 5: F
187 o 5: F
188 |
188 |
189 o 4: E
189 o 4: E
190 |
190 |
191 o 3: C
191 o 3: C
192 |
192 |
193 o 2: B
193 o 2: B
194 |
194 |
195 o 1: A
195 o 1: A
196 |
196 |
197 o 0: R
197 o 0: R
198
198
199 Multiple branching points with multiple merges:
199 Multiple branching points with multiple merges:
200
200
201 $ rebasewithdag -b G+P -d Z <<'EOS'
201 $ rebasewithdag -b G+P -d Z <<'EOS'
202 > G H P
202 > G H P
203 > |\ /| |\
203 > |\ /| |\
204 > F E D M N
204 > F E D M N
205 > \|/| /| |\
205 > \|/| /| |\
206 > Z C B I J K L
206 > Z C B I J K L
207 > \|/ |/ |/
207 > \|/ |/ |/
208 > A A A
208 > A A A
209 > EOS
209 > EOS
210 rebasing 2:dc0947a82db8 "C" (C)
210 rebasing 2:dc0947a82db8 "C" (C)
211 rebasing 8:4e4f9194f9f1 "D" (D)
211 rebasing 8:4e4f9194f9f1 "D" (D)
212 rebasing 9:03ca77807e91 "E" (E)
212 rebasing 9:03ca77807e91 "E" (E)
213 rebasing 10:afc707c82df0 "F" (F)
213 rebasing 10:afc707c82df0 "F" (F)
214 rebasing 13:690dfff91e9e "G" (G)
214 rebasing 13:690dfff91e9e "G" (G)
215 rebasing 14:2893b886bb10 "H" (H)
215 rebasing 14:2893b886bb10 "H" (H)
216 rebasing 3:08ebfeb61bac "I" (I)
216 rebasing 3:08ebfeb61bac "I" (I)
217 rebasing 4:a0a5005cec67 "J" (J)
217 rebasing 4:a0a5005cec67 "J" (J)
218 rebasing 5:83780307a7e8 "K" (K)
218 rebasing 5:83780307a7e8 "K" (K)
219 rebasing 6:e131637a1cb6 "L" (L)
219 rebasing 6:e131637a1cb6 "L" (L)
220 rebasing 11:d1f6d0c3c7e4 "M" (M)
220 rebasing 11:d1f6d0c3c7e4 "M" (M)
221 rebasing 12:7aaec6f81888 "N" (N)
221 rebasing 12:7aaec6f81888 "N" (N)
222 rebasing 15:325bc8f1760d "P" (P tip)
222 rebasing 15:325bc8f1760d "P" (P tip)
223 o 15: P
223 o 15: P
224 |\
224 |\
225 | o 14: N
225 | o 14: N
226 | |\
226 | |\
227 o \ \ 13: M
227 o \ \ 13: M
228 |\ \ \
228 |\ \ \
229 | | | o 12: L
229 | | | o 12: L
230 | | | |
230 | | | |
231 | | o | 11: K
231 | | o | 11: K
232 | | |/
232 | | |/
233 | o / 10: J
233 | o / 10: J
234 | |/
234 | |/
235 o / 9: I
235 o / 9: I
236 |/
236 |/
237 | o 8: H
237 | o 8: H
238 | |\
238 | |\
239 | | | o 7: G
239 | | | o 7: G
240 | | |/|
240 | | |/|
241 | | | o 6: F
241 | | | o 6: F
242 | | | |
242 | | | |
243 | | o | 5: E
243 | | o | 5: E
244 | | |/
244 | | |/
245 | o | 4: D
245 | o | 4: D
246 | |\|
246 | |\|
247 +---o 3: C
247 +---o 3: C
248 | |
248 | |
249 o | 2: Z
249 o | 2: Z
250 | |
250 | |
251 | o 1: B
251 | o 1: B
252 |/
252 |/
253 o 0: A
253 o 0: A
254
254
255 Slightly more complex merge case (mentioned in https://www.mercurial-scm.org/pipermail/mercurial-devel/2016-November/091074.html):
255 Slightly more complex merge case (mentioned in https://www.mercurial-scm.org/pipermail/mercurial-devel/2016-November/091074.html):
256
256
257 $ rebasewithdag -b A3+B3 -d Z <<'EOF'
257 $ rebasewithdag -b A3+B3 -d Z <<'EOF'
258 > Z C1 A3 B3
258 > Z C1 A3 B3
259 > | / / \ / \
259 > | / / \ / \
260 > M3 C0 A1 A2 B1 B2
260 > M3 C0 A1 A2 B1 B2
261 > | / | | | |
261 > | / | | | |
262 > M2 M1 C1 C1 M3
262 > M2 M1 C1 C1 M3
263 > |
263 > |
264 > M1
264 > M1
265 > |
265 > |
266 > M0
266 > M0
267 > EOF
267 > EOF
268 rebasing 4:8817fae53c94 "C0" (C0)
268 rebasing 4:8817fae53c94 "C0" (C0)
269 rebasing 6:06ca5dfe3b5b "B2" (B2)
269 rebasing 6:06ca5dfe3b5b "B2" (B2)
270 rebasing 7:73508237b032 "C1" (C1)
270 rebasing 7:73508237b032 "C1" (C1)
271 rebasing 9:fdb955e2faed "A2" (A2)
271 rebasing 9:fdb955e2faed "A2" (A2)
272 rebasing 11:4e449bd1a643 "A3" (A3)
272 rebasing 11:4e449bd1a643 "A3" (A3)
273 rebasing 10:0a33b0519128 "B1" (B1)
273 rebasing 10:0a33b0519128 "B1" (B1)
274 rebasing 12:209327807c3a "B3" (B3 tip)
274 rebasing 12:209327807c3a "B3" (B3 tip)
275 o 12: B3
275 o 12: B3
276 |\
276 |\
277 | o 11: B1
277 | o 11: B1
278 | |
278 | |
279 | | o 10: A3
279 | | o 10: A3
280 | | |\
280 | | |\
281 | +---o 9: A2
281 | +---o 9: A2
282 | | |
282 | | |
283 | o | 8: C1
283 | o | 8: C1
284 | | |
284 | | |
285 o | | 7: B2
285 o | | 7: B2
286 | | |
286 | | |
287 | o | 6: C0
287 | o | 6: C0
288 |/ /
288 |/ /
289 o | 5: Z
289 o | 5: Z
290 | |
290 | |
291 o | 4: M3
291 o | 4: M3
292 | |
292 | |
293 o | 3: M2
293 o | 3: M2
294 | |
294 | |
295 | o 2: A1
295 | o 2: A1
296 |/
296 |/
297 o 1: M1
297 o 1: M1
298 |
298 |
299 o 0: M0
299 o 0: M0
300
300
301 Disconnected graph:
301 Disconnected graph:
302
302
303 $ rebasewithdag -b B -d Z <<'EOS'
303 $ rebasewithdag -b B -d Z <<'EOS'
304 > B
304 > B
305 > |
305 > |
306 > Z A
306 > Z A
307 > EOS
307 > EOS
308 nothing to rebase from 112478962961 to 48b9aae0607f
308 nothing to rebase from 112478962961 to 48b9aae0607f
309 [1]
309 [1]
310
310
311 Multiple roots. Roots are ancestors of dest:
311 Multiple roots. Roots are ancestors of dest:
312
312
313 $ rebasewithdag -b B+D -d Z <<'EOF'
313 $ rebasewithdag -b B+D -d Z <<'EOF'
314 > D Z B
314 > D Z B
315 > \|\|
315 > \|\|
316 > C A
316 > C A
317 > EOF
317 > EOF
318 rebasing 2:112478962961 "B" (B)
318 rebasing 2:112478962961 "B" (B)
319 rebasing 3:b70f76719894 "D" (D)
319 rebasing 3:b70f76719894 "D" (D)
320 o 4: D
320 o 4: D
321 |
321 |
322 | o 3: B
322 | o 3: B
323 |/
323 |/
324 o 2: Z
324 o 2: Z
325 |\
325 |\
326 | o 1: C
326 | o 1: C
327 |
327 |
328 o 0: A
328 o 0: A
329
329
330 Multiple roots. One root is not an ancestor of dest:
330 Multiple roots. One root is not an ancestor of dest:
331
331
332 $ rebasewithdag -b B+D -d Z <<'EOF'
332 $ rebasewithdag -b B+D -d Z <<'EOF'
333 > Z B D
333 > Z B D
334 > \|\|
334 > \|\|
335 > A C
335 > A C
336 > EOF
336 > EOF
337 nothing to rebase from f675d5a1c6a4+b70f76719894 to 262e37e34f63
337 nothing to rebase from f675d5a1c6a4+b70f76719894 to 262e37e34f63
338 [1]
338 [1]
339
339
340 Multiple roots. One root is not an ancestor of dest. Select using a merge:
340 Multiple roots. One root is not an ancestor of dest. Select using a merge:
341
341
342 $ rebasewithdag -b E -d Z <<'EOF'
342 $ rebasewithdag -b E -d Z <<'EOF'
343 > E
343 > E
344 > |\
344 > |\
345 > Z B D
345 > Z B D
346 > \|\|
346 > \|\|
347 > A C
347 > A C
348 > EOF
348 > EOF
349 rebasing 2:f675d5a1c6a4 "B" (B)
349 rebasing 2:f675d5a1c6a4 "B" (B)
350 rebasing 5:f68696fe6af8 "E" (E tip)
350 rebasing 5:f68696fe6af8 "E" (E tip)
351 o 5: E
351 o 5: E
352 |\
352 |\
353 | o 4: B
353 | o 4: B
354 | |\
354 | |\
355 | | o 3: Z
355 | | o 3: Z
356 | | |
356 | | |
357 o | | 2: D
357 o | | 2: D
358 |/ /
358 |/ /
359 o / 1: C
359 o / 1: C
360 /
360 /
361 o 0: A
361 o 0: A
362
362
363 Multiple roots. Two children share two parents while dest has only one parent:
363 Multiple roots. Two children share two parents while dest has only one parent:
364
364
365 $ rebasewithdag -b B+D -d Z <<'EOF'
365 $ rebasewithdag -b B+D -d Z <<'EOF'
366 > Z B D
366 > Z B D
367 > \|\|\
367 > \|\|\
368 > A C A
368 > A C A
369 > EOF
369 > EOF
370 rebasing 2:f675d5a1c6a4 "B" (B)
370 rebasing 2:f675d5a1c6a4 "B" (B)
371 rebasing 3:c2a779e13b56 "D" (D)
371 rebasing 3:c2a779e13b56 "D" (D)
372 o 4: D
372 o 4: D
373 |\
373 |\
374 +---o 3: B
374 +---o 3: B
375 | |/
375 | |/
376 | o 2: Z
376 | o 2: Z
377 | |
377 | |
378 o | 1: C
378 o | 1: C
379 /
379 /
380 o 0: A
380 o 0: A
381
381
382 Rebasing using a single transaction
383
384 $ hg init singletr && cd singletr
385 $ cat >> .hg/hgrc <<EOF
386 > [rebase]
387 > singletransaction=True
388 > EOF
389 $ hg debugdrawdag <<'EOF'
390 > Z
391 > |
392 > | D
393 > | |
394 > | C
395 > | |
396 > Y B
397 > |/
398 > A
399 > EOF
400 - We should only see two status stored messages. One from the start, one from
401 - the end.
402 $ hg rebase --debug -b D -d Z | grep 'status stored'
403 rebase status stored
404 rebase status stored
405 $ hg tglog
406 o 5: D
407 |
408 o 4: C
409 |
410 o 3: B
411 |
412 o 2: Z
413 |
414 o 1: Y
415 |
416 o 0: A
417
418 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now