Show More
@@ -1,1951 +1,1951 b'' | |||||
1 | # rebase.py - rebasing feature for mercurial |
|
1 | # rebase.py - rebasing feature for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com> |
|
3 | # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''command to move sets of revisions to a different ancestor |
|
8 | '''command to move sets of revisions to a different ancestor | |
9 |
|
9 | |||
10 | This extension lets you rebase changesets in an existing Mercurial |
|
10 | This extension lets you rebase changesets in an existing Mercurial | |
11 | repository. |
|
11 | repository. | |
12 |
|
12 | |||
13 | For more information: |
|
13 | For more information: | |
14 | https://mercurial-scm.org/wiki/RebaseExtension |
|
14 | https://mercurial-scm.org/wiki/RebaseExtension | |
15 | ''' |
|
15 | ''' | |
16 |
|
16 | |||
17 | from __future__ import absolute_import |
|
17 | from __future__ import absolute_import | |
18 |
|
18 | |||
19 | import errno |
|
19 | import errno | |
20 | import os |
|
20 | import os | |
21 |
|
21 | |||
22 | from mercurial.i18n import _ |
|
22 | from mercurial.i18n import _ | |
23 | from mercurial.node import ( |
|
23 | from mercurial.node import ( | |
24 | nullrev, |
|
24 | nullrev, | |
25 | short, |
|
25 | short, | |
26 | ) |
|
26 | ) | |
27 | from mercurial import ( |
|
27 | from mercurial import ( | |
28 | bookmarks, |
|
28 | bookmarks, | |
29 | cmdutil, |
|
29 | cmdutil, | |
30 | commands, |
|
30 | commands, | |
31 | copies, |
|
31 | copies, | |
32 | destutil, |
|
32 | destutil, | |
33 | dirstateguard, |
|
33 | dirstateguard, | |
34 | error, |
|
34 | error, | |
35 | extensions, |
|
35 | extensions, | |
36 | hg, |
|
36 | hg, | |
37 | merge as mergemod, |
|
37 | merge as mergemod, | |
38 | mergeutil, |
|
38 | mergeutil, | |
39 | obsolete, |
|
39 | obsolete, | |
40 | obsutil, |
|
40 | obsutil, | |
41 | patch, |
|
41 | patch, | |
42 | phases, |
|
42 | phases, | |
43 | pycompat, |
|
43 | pycompat, | |
44 | registrar, |
|
44 | registrar, | |
45 | repair, |
|
45 | repair, | |
46 | revset, |
|
46 | revset, | |
47 | revsetlang, |
|
47 | revsetlang, | |
48 | scmutil, |
|
48 | scmutil, | |
49 | smartset, |
|
49 | smartset, | |
50 | state as statemod, |
|
50 | state as statemod, | |
51 | util, |
|
51 | util, | |
52 | ) |
|
52 | ) | |
53 |
|
53 | |||
54 | # The following constants are used throughout the rebase module. The ordering of |
|
54 | # The following constants are used throughout the rebase module. The ordering of | |
55 | # their values must be maintained. |
|
55 | # their values must be maintained. | |
56 |
|
56 | |||
57 | # Indicates that a revision needs to be rebased |
|
57 | # Indicates that a revision needs to be rebased | |
58 | revtodo = -1 |
|
58 | revtodo = -1 | |
59 | revtodostr = '-1' |
|
59 | revtodostr = '-1' | |
60 |
|
60 | |||
61 | # legacy revstates no longer needed in current code |
|
61 | # legacy revstates no longer needed in current code | |
62 | # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned |
|
62 | # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned | |
63 | legacystates = {'-2', '-3', '-4', '-5'} |
|
63 | legacystates = {'-2', '-3', '-4', '-5'} | |
64 |
|
64 | |||
65 | cmdtable = {} |
|
65 | cmdtable = {} | |
66 | command = registrar.command(cmdtable) |
|
66 | command = registrar.command(cmdtable) | |
67 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
67 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
68 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
68 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
69 | # be specifying the version(s) of Mercurial they are tested with, or |
|
69 | # be specifying the version(s) of Mercurial they are tested with, or | |
70 | # leave the attribute unspecified. |
|
70 | # leave the attribute unspecified. | |
71 | testedwith = 'ships-with-hg-core' |
|
71 | testedwith = 'ships-with-hg-core' | |
72 |
|
72 | |||
73 | def _nothingtorebase(): |
|
73 | def _nothingtorebase(): | |
74 | return 1 |
|
74 | return 1 | |
75 |
|
75 | |||
76 | def _savegraft(ctx, extra): |
|
76 | def _savegraft(ctx, extra): | |
77 | s = ctx.extra().get('source', None) |
|
77 | s = ctx.extra().get('source', None) | |
78 | if s is not None: |
|
78 | if s is not None: | |
79 | extra['source'] = s |
|
79 | extra['source'] = s | |
80 | s = ctx.extra().get('intermediate-source', None) |
|
80 | s = ctx.extra().get('intermediate-source', None) | |
81 | if s is not None: |
|
81 | if s is not None: | |
82 | extra['intermediate-source'] = s |
|
82 | extra['intermediate-source'] = s | |
83 |
|
83 | |||
84 | def _savebranch(ctx, extra): |
|
84 | def _savebranch(ctx, extra): | |
85 | extra['branch'] = ctx.branch() |
|
85 | extra['branch'] = ctx.branch() | |
86 |
|
86 | |||
87 | def _destrebase(repo, sourceset, destspace=None): |
|
87 | def _destrebase(repo, sourceset, destspace=None): | |
88 | """small wrapper around destmerge to pass the right extra args |
|
88 | """small wrapper around destmerge to pass the right extra args | |
89 |
|
89 | |||
90 | Please wrap destutil.destmerge instead.""" |
|
90 | Please wrap destutil.destmerge instead.""" | |
91 | return destutil.destmerge(repo, action='rebase', sourceset=sourceset, |
|
91 | return destutil.destmerge(repo, action='rebase', sourceset=sourceset, | |
92 | onheadcheck=False, destspace=destspace) |
|
92 | onheadcheck=False, destspace=destspace) | |
93 |
|
93 | |||
94 | revsetpredicate = registrar.revsetpredicate() |
|
94 | revsetpredicate = registrar.revsetpredicate() | |
95 |
|
95 | |||
96 | @revsetpredicate('_destrebase') |
|
96 | @revsetpredicate('_destrebase') | |
97 | def _revsetdestrebase(repo, subset, x): |
|
97 | def _revsetdestrebase(repo, subset, x): | |
98 | # ``_rebasedefaultdest()`` |
|
98 | # ``_rebasedefaultdest()`` | |
99 |
|
99 | |||
100 | # default destination for rebase. |
|
100 | # default destination for rebase. | |
101 | # # XXX: Currently private because I expect the signature to change. |
|
101 | # # XXX: Currently private because I expect the signature to change. | |
102 | # # XXX: - bailing out in case of ambiguity vs returning all data. |
|
102 | # # XXX: - bailing out in case of ambiguity vs returning all data. | |
103 | # i18n: "_rebasedefaultdest" is a keyword |
|
103 | # i18n: "_rebasedefaultdest" is a keyword | |
104 | sourceset = None |
|
104 | sourceset = None | |
105 | if x is not None: |
|
105 | if x is not None: | |
106 | sourceset = revset.getset(repo, smartset.fullreposet(repo), x) |
|
106 | sourceset = revset.getset(repo, smartset.fullreposet(repo), x) | |
107 | return subset & smartset.baseset([_destrebase(repo, sourceset)]) |
|
107 | return subset & smartset.baseset([_destrebase(repo, sourceset)]) | |
108 |
|
108 | |||
109 | @revsetpredicate('_destautoorphanrebase') |
|
109 | @revsetpredicate('_destautoorphanrebase') | |
110 | def _revsetdestautoorphanrebase(repo, subset, x): |
|
110 | def _revsetdestautoorphanrebase(repo, subset, x): | |
111 | """automatic rebase destination for a single orphan revision""" |
|
111 | """automatic rebase destination for a single orphan revision""" | |
112 | unfi = repo.unfiltered() |
|
112 | unfi = repo.unfiltered() | |
113 | obsoleted = unfi.revs('obsolete()') |
|
113 | obsoleted = unfi.revs('obsolete()') | |
114 |
|
114 | |||
115 | src = revset.getset(repo, subset, x).first() |
|
115 | src = revset.getset(repo, subset, x).first() | |
116 |
|
116 | |||
117 | # Empty src or already obsoleted - Do not return a destination |
|
117 | # Empty src or already obsoleted - Do not return a destination | |
118 | if not src or src in obsoleted: |
|
118 | if not src or src in obsoleted: | |
119 | return smartset.baseset() |
|
119 | return smartset.baseset() | |
120 | dests = destutil.orphanpossibledestination(repo, src) |
|
120 | dests = destutil.orphanpossibledestination(repo, src) | |
121 | if len(dests) > 1: |
|
121 | if len(dests) > 1: | |
122 | raise error.Abort( |
|
122 | raise error.Abort( | |
123 | _("ambiguous automatic rebase: %r could end up on any of %r") % ( |
|
123 | _("ambiguous automatic rebase: %r could end up on any of %r") % ( | |
124 | src, dests)) |
|
124 | src, dests)) | |
125 | # We have zero or one destination, so we can just return here. |
|
125 | # We have zero or one destination, so we can just return here. | |
126 | return smartset.baseset(dests) |
|
126 | return smartset.baseset(dests) | |
127 |
|
127 | |||
128 | def _ctxdesc(ctx): |
|
128 | def _ctxdesc(ctx): | |
129 | """short description for a context""" |
|
129 | """short description for a context""" | |
130 | desc = '%d:%s "%s"' % (ctx.rev(), ctx, |
|
130 | desc = '%d:%s "%s"' % (ctx.rev(), ctx, | |
131 | ctx.description().split('\n', 1)[0]) |
|
131 | ctx.description().split('\n', 1)[0]) | |
132 | repo = ctx.repo() |
|
132 | repo = ctx.repo() | |
133 | names = [] |
|
133 | names = [] | |
134 | for nsname, ns in repo.names.iteritems(): |
|
134 | for nsname, ns in repo.names.iteritems(): | |
135 | if nsname == 'branches': |
|
135 | if nsname == 'branches': | |
136 | continue |
|
136 | continue | |
137 | names.extend(ns.names(repo, ctx.node())) |
|
137 | names.extend(ns.names(repo, ctx.node())) | |
138 | if names: |
|
138 | if names: | |
139 | desc += ' (%s)' % ' '.join(names) |
|
139 | desc += ' (%s)' % ' '.join(names) | |
140 | return desc |
|
140 | return desc | |
141 |
|
141 | |||
142 | class rebaseruntime(object): |
|
142 | class rebaseruntime(object): | |
143 | """This class is a container for rebase runtime state""" |
|
143 | """This class is a container for rebase runtime state""" | |
144 | def __init__(self, repo, ui, inmemory=False, opts=None): |
|
144 | def __init__(self, repo, ui, inmemory=False, opts=None): | |
145 | if opts is None: |
|
145 | if opts is None: | |
146 | opts = {} |
|
146 | opts = {} | |
147 |
|
147 | |||
148 | # prepared: whether we have rebasestate prepared or not. Currently it |
|
148 | # prepared: whether we have rebasestate prepared or not. Currently it | |
149 | # decides whether "self.repo" is unfiltered or not. |
|
149 | # decides whether "self.repo" is unfiltered or not. | |
150 | # The rebasestate has explicit hash to hash instructions not depending |
|
150 | # The rebasestate has explicit hash to hash instructions not depending | |
151 | # on visibility. If rebasestate exists (in-memory or on-disk), use |
|
151 | # on visibility. If rebasestate exists (in-memory or on-disk), use | |
152 | # unfiltered repo to avoid visibility issues. |
|
152 | # unfiltered repo to avoid visibility issues. | |
153 | # Before knowing rebasestate (i.e. when starting a new rebase (not |
|
153 | # Before knowing rebasestate (i.e. when starting a new rebase (not | |
154 | # --continue or --abort)), the original repo should be used so |
|
154 | # --continue or --abort)), the original repo should be used so | |
155 | # visibility-dependent revsets are correct. |
|
155 | # visibility-dependent revsets are correct. | |
156 | self.prepared = False |
|
156 | self.prepared = False | |
157 | self._repo = repo |
|
157 | self._repo = repo | |
158 |
|
158 | |||
159 | self.ui = ui |
|
159 | self.ui = ui | |
160 | self.opts = opts |
|
160 | self.opts = opts | |
161 | self.originalwd = None |
|
161 | self.originalwd = None | |
162 | self.external = nullrev |
|
162 | self.external = nullrev | |
163 | # Mapping between the old revision id and either what is the new rebased |
|
163 | # Mapping between the old revision id and either what is the new rebased | |
164 | # revision or what needs to be done with the old revision. The state |
|
164 | # revision or what needs to be done with the old revision. The state | |
165 | # dict will be what contains most of the rebase progress state. |
|
165 | # dict will be what contains most of the rebase progress state. | |
166 | self.state = {} |
|
166 | self.state = {} | |
167 | self.activebookmark = None |
|
167 | self.activebookmark = None | |
168 | self.destmap = {} |
|
168 | self.destmap = {} | |
169 | self.skipped = set() |
|
169 | self.skipped = set() | |
170 |
|
170 | |||
171 | self.collapsef = opts.get('collapse', False) |
|
171 | self.collapsef = opts.get('collapse', False) | |
172 | self.collapsemsg = cmdutil.logmessage(ui, opts) |
|
172 | self.collapsemsg = cmdutil.logmessage(ui, opts) | |
173 | self.date = opts.get('date', None) |
|
173 | self.date = opts.get('date', None) | |
174 |
|
174 | |||
175 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion |
|
175 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion | |
176 | self.extrafns = [_savegraft] |
|
176 | self.extrafns = [_savegraft] | |
177 | if e: |
|
177 | if e: | |
178 | self.extrafns = [e] |
|
178 | self.extrafns = [e] | |
179 |
|
179 | |||
180 | self.backupf = ui.configbool('rewrite', 'backup-bundle') |
|
180 | self.backupf = ui.configbool('rewrite', 'backup-bundle') | |
181 | self.keepf = opts.get('keep', False) |
|
181 | self.keepf = opts.get('keep', False) | |
182 | self.keepbranchesf = opts.get('keepbranches', False) |
|
182 | self.keepbranchesf = opts.get('keepbranches', False) | |
183 | self.obsoletenotrebased = {} |
|
183 | self.obsoletenotrebased = {} | |
184 | self.obsoletewithoutsuccessorindestination = set() |
|
184 | self.obsoletewithoutsuccessorindestination = set() | |
185 | self.inmemory = inmemory |
|
185 | self.inmemory = inmemory | |
186 | self.stateobj = statemod.cmdstate(repo, 'rebasestate') |
|
186 | self.stateobj = statemod.cmdstate(repo, 'rebasestate') | |
187 |
|
187 | |||
188 | @property |
|
188 | @property | |
189 | def repo(self): |
|
189 | def repo(self): | |
190 | if self.prepared: |
|
190 | if self.prepared: | |
191 | return self._repo.unfiltered() |
|
191 | return self._repo.unfiltered() | |
192 | else: |
|
192 | else: | |
193 | return self._repo |
|
193 | return self._repo | |
194 |
|
194 | |||
195 | def storestatus(self, tr=None): |
|
195 | def storestatus(self, tr=None): | |
196 | """Store the current status to allow recovery""" |
|
196 | """Store the current status to allow recovery""" | |
197 | if tr: |
|
197 | if tr: | |
198 | tr.addfilegenerator('rebasestate', ('rebasestate',), |
|
198 | tr.addfilegenerator('rebasestate', ('rebasestate',), | |
199 | self._writestatus, location='plain') |
|
199 | self._writestatus, location='plain') | |
200 | else: |
|
200 | else: | |
201 | with self.repo.vfs("rebasestate", "w") as f: |
|
201 | with self.repo.vfs("rebasestate", "w") as f: | |
202 | self._writestatus(f) |
|
202 | self._writestatus(f) | |
203 |
|
203 | |||
204 | def _writestatus(self, f): |
|
204 | def _writestatus(self, f): | |
205 | repo = self.repo |
|
205 | repo = self.repo | |
206 | assert repo.filtername is None |
|
206 | assert repo.filtername is None | |
207 | f.write(repo[self.originalwd].hex() + '\n') |
|
207 | f.write(repo[self.originalwd].hex() + '\n') | |
208 | # was "dest". we now write dest per src root below. |
|
208 | # was "dest". we now write dest per src root below. | |
209 | f.write('\n') |
|
209 | f.write('\n') | |
210 | f.write(repo[self.external].hex() + '\n') |
|
210 | f.write(repo[self.external].hex() + '\n') | |
211 | f.write('%d\n' % int(self.collapsef)) |
|
211 | f.write('%d\n' % int(self.collapsef)) | |
212 | f.write('%d\n' % int(self.keepf)) |
|
212 | f.write('%d\n' % int(self.keepf)) | |
213 | f.write('%d\n' % int(self.keepbranchesf)) |
|
213 | f.write('%d\n' % int(self.keepbranchesf)) | |
214 | f.write('%s\n' % (self.activebookmark or '')) |
|
214 | f.write('%s\n' % (self.activebookmark or '')) | |
215 | destmap = self.destmap |
|
215 | destmap = self.destmap | |
216 | for d, v in self.state.iteritems(): |
|
216 | for d, v in self.state.iteritems(): | |
217 | oldrev = repo[d].hex() |
|
217 | oldrev = repo[d].hex() | |
218 | if v >= 0: |
|
218 | if v >= 0: | |
219 | newrev = repo[v].hex() |
|
219 | newrev = repo[v].hex() | |
220 | else: |
|
220 | else: | |
221 | newrev = "%d" % v |
|
221 | newrev = "%d" % v | |
222 | destnode = repo[destmap[d]].hex() |
|
222 | destnode = repo[destmap[d]].hex() | |
223 | f.write("%s:%s:%s\n" % (oldrev, newrev, destnode)) |
|
223 | f.write("%s:%s:%s\n" % (oldrev, newrev, destnode)) | |
224 | repo.ui.debug('rebase status stored\n') |
|
224 | repo.ui.debug('rebase status stored\n') | |
225 |
|
225 | |||
226 | def restorestatus(self): |
|
226 | def restorestatus(self): | |
227 | """Restore a previously stored status""" |
|
227 | """Restore a previously stored status""" | |
228 | if not self.stateobj.exists(): |
|
228 | if not self.stateobj.exists(): | |
229 | cmdutil.wrongtooltocontinue(self.repo, _('rebase')) |
|
229 | cmdutil.wrongtooltocontinue(self.repo, _('rebase')) | |
230 |
|
230 | |||
231 | data = self._read() |
|
231 | data = self._read() | |
232 | self.repo.ui.debug('rebase status resumed\n') |
|
232 | self.repo.ui.debug('rebase status resumed\n') | |
233 |
|
233 | |||
234 | self.originalwd = data['originalwd'] |
|
234 | self.originalwd = data['originalwd'] | |
235 | self.destmap = data['destmap'] |
|
235 | self.destmap = data['destmap'] | |
236 | self.state = data['state'] |
|
236 | self.state = data['state'] | |
237 | self.skipped = data['skipped'] |
|
237 | self.skipped = data['skipped'] | |
238 | self.collapsef = data['collapse'] |
|
238 | self.collapsef = data['collapse'] | |
239 | self.keepf = data['keep'] |
|
239 | self.keepf = data['keep'] | |
240 | self.keepbranchesf = data['keepbranches'] |
|
240 | self.keepbranchesf = data['keepbranches'] | |
241 | self.external = data['external'] |
|
241 | self.external = data['external'] | |
242 | self.activebookmark = data['activebookmark'] |
|
242 | self.activebookmark = data['activebookmark'] | |
243 |
|
243 | |||
244 | def _read(self): |
|
244 | def _read(self): | |
245 | self.prepared = True |
|
245 | self.prepared = True | |
246 | repo = self.repo |
|
246 | repo = self.repo | |
247 | assert repo.filtername is None |
|
247 | assert repo.filtername is None | |
248 | data = {'keepbranches': None, 'collapse': None, 'activebookmark': None, |
|
248 | data = {'keepbranches': None, 'collapse': None, 'activebookmark': None, | |
249 | 'external': nullrev, 'keep': None, 'originalwd': None} |
|
249 | 'external': nullrev, 'keep': None, 'originalwd': None} | |
250 | legacydest = None |
|
250 | legacydest = None | |
251 | state = {} |
|
251 | state = {} | |
252 | destmap = {} |
|
252 | destmap = {} | |
253 |
|
253 | |||
254 | if True: |
|
254 | if True: | |
255 | f = repo.vfs("rebasestate") |
|
255 | f = repo.vfs("rebasestate") | |
256 | for i, l in enumerate(f.read().splitlines()): |
|
256 | for i, l in enumerate(f.read().splitlines()): | |
257 | if i == 0: |
|
257 | if i == 0: | |
258 | data['originalwd'] = repo[l].rev() |
|
258 | data['originalwd'] = repo[l].rev() | |
259 | elif i == 1: |
|
259 | elif i == 1: | |
260 | # this line should be empty in newer version. but legacy |
|
260 | # this line should be empty in newer version. but legacy | |
261 | # clients may still use it |
|
261 | # clients may still use it | |
262 | if l: |
|
262 | if l: | |
263 | legacydest = repo[l].rev() |
|
263 | legacydest = repo[l].rev() | |
264 | elif i == 2: |
|
264 | elif i == 2: | |
265 | data['external'] = repo[l].rev() |
|
265 | data['external'] = repo[l].rev() | |
266 | elif i == 3: |
|
266 | elif i == 3: | |
267 | data['collapse'] = bool(int(l)) |
|
267 | data['collapse'] = bool(int(l)) | |
268 | elif i == 4: |
|
268 | elif i == 4: | |
269 | data['keep'] = bool(int(l)) |
|
269 | data['keep'] = bool(int(l)) | |
270 | elif i == 5: |
|
270 | elif i == 5: | |
271 | data['keepbranches'] = bool(int(l)) |
|
271 | data['keepbranches'] = bool(int(l)) | |
272 | elif i == 6 and not (len(l) == 81 and ':' in l): |
|
272 | elif i == 6 and not (len(l) == 81 and ':' in l): | |
273 | # line 6 is a recent addition, so for backwards |
|
273 | # line 6 is a recent addition, so for backwards | |
274 | # compatibility check that the line doesn't look like the |
|
274 | # compatibility check that the line doesn't look like the | |
275 | # oldrev:newrev lines |
|
275 | # oldrev:newrev lines | |
276 | data['activebookmark'] = l |
|
276 | data['activebookmark'] = l | |
277 | else: |
|
277 | else: | |
278 | args = l.split(':') |
|
278 | args = l.split(':') | |
279 | oldrev = repo[args[0]].rev() |
|
279 | oldrev = repo[args[0]].rev() | |
280 | newrev = args[1] |
|
280 | newrev = args[1] | |
281 | if newrev in legacystates: |
|
281 | if newrev in legacystates: | |
282 | continue |
|
282 | continue | |
283 | if len(args) > 2: |
|
283 | if len(args) > 2: | |
284 | destrev = repo[args[2]].rev() |
|
284 | destrev = repo[args[2]].rev() | |
285 | else: |
|
285 | else: | |
286 | destrev = legacydest |
|
286 | destrev = legacydest | |
287 | destmap[oldrev] = destrev |
|
287 | destmap[oldrev] = destrev | |
288 | if newrev == revtodostr: |
|
288 | if newrev == revtodostr: | |
289 | state[oldrev] = revtodo |
|
289 | state[oldrev] = revtodo | |
290 | # Legacy compat special case |
|
290 | # Legacy compat special case | |
291 | else: |
|
291 | else: | |
292 | state[oldrev] = repo[newrev].rev() |
|
292 | state[oldrev] = repo[newrev].rev() | |
293 |
|
293 | |||
294 | if data['keepbranches'] is None: |
|
294 | if data['keepbranches'] is None: | |
295 | raise error.Abort(_('.hg/rebasestate is incomplete')) |
|
295 | raise error.Abort(_('.hg/rebasestate is incomplete')) | |
296 |
|
296 | |||
297 | data['destmap'] = destmap |
|
297 | data['destmap'] = destmap | |
298 | data['state'] = state |
|
298 | data['state'] = state | |
299 | skipped = set() |
|
299 | skipped = set() | |
300 | # recompute the set of skipped revs |
|
300 | # recompute the set of skipped revs | |
301 | if not data['collapse']: |
|
301 | if not data['collapse']: | |
302 | seen = set(destmap.values()) |
|
302 | seen = set(destmap.values()) | |
303 | for old, new in sorted(state.items()): |
|
303 | for old, new in sorted(state.items()): | |
304 | if new != revtodo and new in seen: |
|
304 | if new != revtodo and new in seen: | |
305 | skipped.add(old) |
|
305 | skipped.add(old) | |
306 | seen.add(new) |
|
306 | seen.add(new) | |
307 | data['skipped'] = skipped |
|
307 | data['skipped'] = skipped | |
308 | repo.ui.debug('computed skipped revs: %s\n' % |
|
308 | repo.ui.debug('computed skipped revs: %s\n' % | |
309 | (' '.join('%d' % r for r in sorted(skipped)) or '')) |
|
309 | (' '.join('%d' % r for r in sorted(skipped)) or '')) | |
310 |
|
310 | |||
311 | return data |
|
311 | return data | |
312 |
|
312 | |||
313 | def _handleskippingobsolete(self, obsoleterevs, destmap): |
|
313 | def _handleskippingobsolete(self, obsoleterevs, destmap): | |
314 | """Compute structures necessary for skipping obsolete revisions |
|
314 | """Compute structures necessary for skipping obsolete revisions | |
315 |
|
315 | |||
316 | obsoleterevs: iterable of all obsolete revisions in rebaseset |
|
316 | obsoleterevs: iterable of all obsolete revisions in rebaseset | |
317 | destmap: {srcrev: destrev} destination revisions |
|
317 | destmap: {srcrev: destrev} destination revisions | |
318 | """ |
|
318 | """ | |
319 | self.obsoletenotrebased = {} |
|
319 | self.obsoletenotrebased = {} | |
320 | if not self.ui.configbool('experimental', 'rebaseskipobsolete'): |
|
320 | if not self.ui.configbool('experimental', 'rebaseskipobsolete'): | |
321 | return |
|
321 | return | |
322 | obsoleteset = set(obsoleterevs) |
|
322 | obsoleteset = set(obsoleterevs) | |
323 | (self.obsoletenotrebased, |
|
323 | (self.obsoletenotrebased, | |
324 | self.obsoletewithoutsuccessorindestination, |
|
324 | self.obsoletewithoutsuccessorindestination, | |
325 | obsoleteextinctsuccessors) = _computeobsoletenotrebased( |
|
325 | obsoleteextinctsuccessors) = _computeobsoletenotrebased( | |
326 | self.repo, obsoleteset, destmap) |
|
326 | self.repo, obsoleteset, destmap) | |
327 | skippedset = set(self.obsoletenotrebased) |
|
327 | skippedset = set(self.obsoletenotrebased) | |
328 | skippedset.update(self.obsoletewithoutsuccessorindestination) |
|
328 | skippedset.update(self.obsoletewithoutsuccessorindestination) | |
329 | skippedset.update(obsoleteextinctsuccessors) |
|
329 | skippedset.update(obsoleteextinctsuccessors) | |
330 | _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset) |
|
330 | _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset) | |
331 |
|
331 | |||
332 | def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False): |
|
332 | def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False): | |
333 | try: |
|
333 | try: | |
334 | self.restorestatus() |
|
334 | self.restorestatus() | |
335 | self.collapsemsg = restorecollapsemsg(self.repo, isabort) |
|
335 | self.collapsemsg = restorecollapsemsg(self.repo, isabort) | |
336 | except error.RepoLookupError: |
|
336 | except error.RepoLookupError: | |
337 | if isabort: |
|
337 | if isabort: | |
338 | clearstatus(self.repo) |
|
338 | clearstatus(self.repo) | |
339 | clearcollapsemsg(self.repo) |
|
339 | clearcollapsemsg(self.repo) | |
340 | self.repo.ui.warn(_('rebase aborted (no revision is removed,' |
|
340 | self.repo.ui.warn(_('rebase aborted (no revision is removed,' | |
341 | ' only broken state is cleared)\n')) |
|
341 | ' only broken state is cleared)\n')) | |
342 | return 0 |
|
342 | return 0 | |
343 | else: |
|
343 | else: | |
344 | msg = _('cannot continue inconsistent rebase') |
|
344 | msg = _('cannot continue inconsistent rebase') | |
345 | hint = _('use "hg rebase --abort" to clear broken state') |
|
345 | hint = _('use "hg rebase --abort" to clear broken state') | |
346 | raise error.Abort(msg, hint=hint) |
|
346 | raise error.Abort(msg, hint=hint) | |
347 |
|
347 | |||
348 | if isabort: |
|
348 | if isabort: | |
349 | backup = backup and self.backupf |
|
349 | backup = backup and self.backupf | |
350 | return self._abort(backup=backup, suppwarns=suppwarns) |
|
350 | return self._abort(backup=backup, suppwarns=suppwarns) | |
351 |
|
351 | |||
352 | def _preparenewrebase(self, destmap): |
|
352 | def _preparenewrebase(self, destmap): | |
353 | if not destmap: |
|
353 | if not destmap: | |
354 | return _nothingtorebase() |
|
354 | return _nothingtorebase() | |
355 |
|
355 | |||
356 | rebaseset = destmap.keys() |
|
356 | rebaseset = destmap.keys() | |
357 | allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt) |
|
357 | allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt) | |
358 | if (not (self.keepf or allowunstable) |
|
358 | if (not (self.keepf or allowunstable) | |
359 | and self.repo.revs('first(children(%ld) - %ld)', |
|
359 | and self.repo.revs('first(children(%ld) - %ld)', | |
360 | rebaseset, rebaseset)): |
|
360 | rebaseset, rebaseset)): | |
361 | raise error.Abort( |
|
361 | raise error.Abort( | |
362 | _("can't remove original changesets with" |
|
362 | _("can't remove original changesets with" | |
363 | " unrebased descendants"), |
|
363 | " unrebased descendants"), | |
364 | hint=_('use --keep to keep original changesets')) |
|
364 | hint=_('use --keep to keep original changesets')) | |
365 |
|
365 | |||
366 | result = buildstate(self.repo, destmap, self.collapsef) |
|
366 | result = buildstate(self.repo, destmap, self.collapsef) | |
367 |
|
367 | |||
368 | if not result: |
|
368 | if not result: | |
369 | # Empty state built, nothing to rebase |
|
369 | # Empty state built, nothing to rebase | |
370 | self.ui.status(_('nothing to rebase\n')) |
|
370 | self.ui.status(_('nothing to rebase\n')) | |
371 | return _nothingtorebase() |
|
371 | return _nothingtorebase() | |
372 |
|
372 | |||
373 | for root in self.repo.set('roots(%ld)', rebaseset): |
|
373 | for root in self.repo.set('roots(%ld)', rebaseset): | |
374 | if not self.keepf and not root.mutable(): |
|
374 | if not self.keepf and not root.mutable(): | |
375 | raise error.Abort(_("can't rebase public changeset %s") |
|
375 | raise error.Abort(_("can't rebase public changeset %s") | |
376 | % root, |
|
376 | % root, | |
377 | hint=_("see 'hg help phases' for details")) |
|
377 | hint=_("see 'hg help phases' for details")) | |
378 |
|
378 | |||
379 | (self.originalwd, self.destmap, self.state) = result |
|
379 | (self.originalwd, self.destmap, self.state) = result | |
380 | if self.collapsef: |
|
380 | if self.collapsef: | |
381 | dests = set(self.destmap.values()) |
|
381 | dests = set(self.destmap.values()) | |
382 | if len(dests) != 1: |
|
382 | if len(dests) != 1: | |
383 | raise error.Abort( |
|
383 | raise error.Abort( | |
384 | _('--collapse does not work with multiple destinations')) |
|
384 | _('--collapse does not work with multiple destinations')) | |
385 | destrev = next(iter(dests)) |
|
385 | destrev = next(iter(dests)) | |
386 | destancestors = self.repo.changelog.ancestors([destrev], |
|
386 | destancestors = self.repo.changelog.ancestors([destrev], | |
387 | inclusive=True) |
|
387 | inclusive=True) | |
388 | self.external = externalparent(self.repo, self.state, destancestors) |
|
388 | self.external = externalparent(self.repo, self.state, destancestors) | |
389 |
|
389 | |||
390 | for destrev in sorted(set(destmap.values())): |
|
390 | for destrev in sorted(set(destmap.values())): | |
391 | dest = self.repo[destrev] |
|
391 | dest = self.repo[destrev] | |
392 | if dest.closesbranch() and not self.keepbranchesf: |
|
392 | if dest.closesbranch() and not self.keepbranchesf: | |
393 | self.ui.status(_('reopening closed branch head %s\n') % dest) |
|
393 | self.ui.status(_('reopening closed branch head %s\n') % dest) | |
394 |
|
394 | |||
395 | self.prepared = True |
|
395 | self.prepared = True | |
396 |
|
396 | |||
397 | def _assignworkingcopy(self): |
|
397 | def _assignworkingcopy(self): | |
398 | if self.inmemory: |
|
398 | if self.inmemory: | |
399 | from mercurial.context import overlayworkingctx |
|
399 | from mercurial.context import overlayworkingctx | |
400 | self.wctx = overlayworkingctx(self.repo) |
|
400 | self.wctx = overlayworkingctx(self.repo) | |
401 | self.repo.ui.debug("rebasing in-memory\n") |
|
401 | self.repo.ui.debug("rebasing in-memory\n") | |
402 | else: |
|
402 | else: | |
403 | self.wctx = self.repo[None] |
|
403 | self.wctx = self.repo[None] | |
404 | self.repo.ui.debug("rebasing on disk\n") |
|
404 | self.repo.ui.debug("rebasing on disk\n") | |
405 | self.repo.ui.log("rebase", |
|
405 | self.repo.ui.log("rebase", | |
406 | "using in-memory rebase: %r\n", self.inmemory, |
|
406 | "using in-memory rebase: %r\n", self.inmemory, | |
407 | rebase_imm_used=self.inmemory) |
|
407 | rebase_imm_used=self.inmemory) | |
408 |
|
408 | |||
409 | def _performrebase(self, tr): |
|
409 | def _performrebase(self, tr): | |
410 | self._assignworkingcopy() |
|
410 | self._assignworkingcopy() | |
411 | repo, ui = self.repo, self.ui |
|
411 | repo, ui = self.repo, self.ui | |
412 | if self.keepbranchesf: |
|
412 | if self.keepbranchesf: | |
413 | # insert _savebranch at the start of extrafns so if |
|
413 | # insert _savebranch at the start of extrafns so if | |
414 | # there's a user-provided extrafn it can clobber branch if |
|
414 | # there's a user-provided extrafn it can clobber branch if | |
415 | # desired |
|
415 | # desired | |
416 | self.extrafns.insert(0, _savebranch) |
|
416 | self.extrafns.insert(0, _savebranch) | |
417 | if self.collapsef: |
|
417 | if self.collapsef: | |
418 | branches = set() |
|
418 | branches = set() | |
419 | for rev in self.state: |
|
419 | for rev in self.state: | |
420 | branches.add(repo[rev].branch()) |
|
420 | branches.add(repo[rev].branch()) | |
421 | if len(branches) > 1: |
|
421 | if len(branches) > 1: | |
422 | raise error.Abort(_('cannot collapse multiple named ' |
|
422 | raise error.Abort(_('cannot collapse multiple named ' | |
423 | 'branches')) |
|
423 | 'branches')) | |
424 |
|
424 | |||
425 | # Calculate self.obsoletenotrebased |
|
425 | # Calculate self.obsoletenotrebased | |
426 | obsrevs = _filterobsoleterevs(self.repo, self.state) |
|
426 | obsrevs = _filterobsoleterevs(self.repo, self.state) | |
427 | self._handleskippingobsolete(obsrevs, self.destmap) |
|
427 | self._handleskippingobsolete(obsrevs, self.destmap) | |
428 |
|
428 | |||
429 | # Keep track of the active bookmarks in order to reset them later |
|
429 | # Keep track of the active bookmarks in order to reset them later | |
430 | self.activebookmark = self.activebookmark or repo._activebookmark |
|
430 | self.activebookmark = self.activebookmark or repo._activebookmark | |
431 | if self.activebookmark: |
|
431 | if self.activebookmark: | |
432 | bookmarks.deactivate(repo) |
|
432 | bookmarks.deactivate(repo) | |
433 |
|
433 | |||
434 | # Store the state before we begin so users can run 'hg rebase --abort' |
|
434 | # Store the state before we begin so users can run 'hg rebase --abort' | |
435 | # if we fail before the transaction closes. |
|
435 | # if we fail before the transaction closes. | |
436 | self.storestatus() |
|
436 | self.storestatus() | |
437 | if tr: |
|
437 | if tr: | |
438 | # When using single transaction, store state when transaction |
|
438 | # When using single transaction, store state when transaction | |
439 | # commits. |
|
439 | # commits. | |
440 | self.storestatus(tr) |
|
440 | self.storestatus(tr) | |
441 |
|
441 | |||
442 | cands = [k for k, v in self.state.iteritems() if v == revtodo] |
|
442 | cands = [k for k, v in self.state.iteritems() if v == revtodo] | |
443 | p = repo.ui.makeprogress(_("rebasing"), unit=_('changesets'), |
|
443 | p = repo.ui.makeprogress(_("rebasing"), unit=_('changesets'), | |
444 | total=len(cands)) |
|
444 | total=len(cands)) | |
445 | def progress(ctx): |
|
445 | def progress(ctx): | |
446 | p.increment(item=("%d:%s" % (ctx.rev(), ctx))) |
|
446 | p.increment(item=("%d:%s" % (ctx.rev(), ctx))) | |
447 | allowdivergence = self.ui.configbool( |
|
447 | allowdivergence = self.ui.configbool( | |
448 | 'experimental', 'evolution.allowdivergence') |
|
448 | 'experimental', 'evolution.allowdivergence') | |
449 | for subset in sortsource(self.destmap): |
|
449 | for subset in sortsource(self.destmap): | |
450 | sortedrevs = self.repo.revs('sort(%ld, -topo)', subset) |
|
450 | sortedrevs = self.repo.revs('sort(%ld, -topo)', subset) | |
451 | if not allowdivergence: |
|
451 | if not allowdivergence: | |
452 | sortedrevs -= self.repo.revs( |
|
452 | sortedrevs -= self.repo.revs( | |
453 | 'descendants(%ld) and not %ld', |
|
453 | 'descendants(%ld) and not %ld', | |
454 | self.obsoletewithoutsuccessorindestination, |
|
454 | self.obsoletewithoutsuccessorindestination, | |
455 | self.obsoletewithoutsuccessorindestination, |
|
455 | self.obsoletewithoutsuccessorindestination, | |
456 | ) |
|
456 | ) | |
457 | for rev in sortedrevs: |
|
457 | for rev in sortedrevs: | |
458 | self._rebasenode(tr, rev, allowdivergence, progress) |
|
458 | self._rebasenode(tr, rev, allowdivergence, progress) | |
459 | p.complete() |
|
459 | p.complete() | |
460 | ui.note(_('rebase merging completed\n')) |
|
460 | ui.note(_('rebase merging completed\n')) | |
461 |
|
461 | |||
462 | def _concludenode(self, rev, p1, p2, editor, commitmsg=None): |
|
462 | def _concludenode(self, rev, p1, p2, editor, commitmsg=None): | |
463 | '''Commit the wd changes with parents p1 and p2. |
|
463 | '''Commit the wd changes with parents p1 and p2. | |
464 |
|
464 | |||
465 | Reuse commit info from rev but also store useful information in extra. |
|
465 | Reuse commit info from rev but also store useful information in extra. | |
466 | Return node of committed revision.''' |
|
466 | Return node of committed revision.''' | |
467 | repo = self.repo |
|
467 | repo = self.repo | |
468 | ctx = repo[rev] |
|
468 | ctx = repo[rev] | |
469 | if commitmsg is None: |
|
469 | if commitmsg is None: | |
470 | commitmsg = ctx.description() |
|
470 | commitmsg = ctx.description() | |
471 | date = self.date |
|
471 | date = self.date | |
472 | if date is None: |
|
472 | if date is None: | |
473 | date = ctx.date() |
|
473 | date = ctx.date() | |
474 | extra = {'rebase_source': ctx.hex()} |
|
474 | extra = {'rebase_source': ctx.hex()} | |
475 | for c in self.extrafns: |
|
475 | for c in self.extrafns: | |
476 | c(ctx, extra) |
|
476 | c(ctx, extra) | |
477 | keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch() |
|
477 | keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch() | |
478 | destphase = max(ctx.phase(), phases.draft) |
|
478 | destphase = max(ctx.phase(), phases.draft) | |
479 | overrides = {('phases', 'new-commit'): destphase} |
|
479 | overrides = {('phases', 'new-commit'): destphase} | |
480 | if keepbranch: |
|
480 | if keepbranch: | |
481 | overrides[('ui', 'allowemptycommit')] = True |
|
481 | overrides[('ui', 'allowemptycommit')] = True | |
482 | with repo.ui.configoverride(overrides, 'rebase'): |
|
482 | with repo.ui.configoverride(overrides, 'rebase'): | |
483 | if self.inmemory: |
|
483 | if self.inmemory: | |
484 | newnode = commitmemorynode(repo, p1, p2, |
|
484 | newnode = commitmemorynode(repo, p1, p2, | |
485 | wctx=self.wctx, |
|
485 | wctx=self.wctx, | |
486 | extra=extra, |
|
486 | extra=extra, | |
487 | commitmsg=commitmsg, |
|
487 | commitmsg=commitmsg, | |
488 | editor=editor, |
|
488 | editor=editor, | |
489 | user=ctx.user(), |
|
489 | user=ctx.user(), | |
490 | date=date) |
|
490 | date=date) | |
491 | mergemod.mergestate.clean(repo) |
|
491 | mergemod.mergestate.clean(repo) | |
492 | else: |
|
492 | else: | |
493 | newnode = commitnode(repo, p1, p2, |
|
493 | newnode = commitnode(repo, p1, p2, | |
494 | extra=extra, |
|
494 | extra=extra, | |
495 | commitmsg=commitmsg, |
|
495 | commitmsg=commitmsg, | |
496 | editor=editor, |
|
496 | editor=editor, | |
497 | user=ctx.user(), |
|
497 | user=ctx.user(), | |
498 | date=date) |
|
498 | date=date) | |
499 |
|
499 | |||
500 | if newnode is None: |
|
500 | if newnode is None: | |
501 | # If it ended up being a no-op commit, then the normal |
|
501 | # If it ended up being a no-op commit, then the normal | |
502 | # merge state clean-up path doesn't happen, so do it |
|
502 | # merge state clean-up path doesn't happen, so do it | |
503 | # here. Fix issue5494 |
|
503 | # here. Fix issue5494 | |
504 | mergemod.mergestate.clean(repo) |
|
504 | mergemod.mergestate.clean(repo) | |
505 | return newnode |
|
505 | return newnode | |
506 |
|
506 | |||
507 | def _rebasenode(self, tr, rev, allowdivergence, progressfn): |
|
507 | def _rebasenode(self, tr, rev, allowdivergence, progressfn): | |
508 | repo, ui, opts = self.repo, self.ui, self.opts |
|
508 | repo, ui, opts = self.repo, self.ui, self.opts | |
509 | dest = self.destmap[rev] |
|
509 | dest = self.destmap[rev] | |
510 | ctx = repo[rev] |
|
510 | ctx = repo[rev] | |
511 | desc = _ctxdesc(ctx) |
|
511 | desc = _ctxdesc(ctx) | |
512 | if self.state[rev] == rev: |
|
512 | if self.state[rev] == rev: | |
513 | ui.status(_('already rebased %s\n') % desc) |
|
513 | ui.status(_('already rebased %s\n') % desc) | |
514 | elif (not allowdivergence |
|
514 | elif (not allowdivergence | |
515 | and rev in self.obsoletewithoutsuccessorindestination): |
|
515 | and rev in self.obsoletewithoutsuccessorindestination): | |
516 | msg = _('note: not rebasing %s and its descendants as ' |
|
516 | msg = _('note: not rebasing %s and its descendants as ' | |
517 | 'this would cause divergence\n') % desc |
|
517 | 'this would cause divergence\n') % desc | |
518 | repo.ui.status(msg) |
|
518 | repo.ui.status(msg) | |
519 | self.skipped.add(rev) |
|
519 | self.skipped.add(rev) | |
520 | elif rev in self.obsoletenotrebased: |
|
520 | elif rev in self.obsoletenotrebased: | |
521 | succ = self.obsoletenotrebased[rev] |
|
521 | succ = self.obsoletenotrebased[rev] | |
522 | if succ is None: |
|
522 | if succ is None: | |
523 | msg = _('note: not rebasing %s, it has no ' |
|
523 | msg = _('note: not rebasing %s, it has no ' | |
524 | 'successor\n') % desc |
|
524 | 'successor\n') % desc | |
525 | else: |
|
525 | else: | |
526 | succdesc = _ctxdesc(repo[succ]) |
|
526 | succdesc = _ctxdesc(repo[succ]) | |
527 | msg = (_('note: not rebasing %s, already in ' |
|
527 | msg = (_('note: not rebasing %s, already in ' | |
528 | 'destination as %s\n') % (desc, succdesc)) |
|
528 | 'destination as %s\n') % (desc, succdesc)) | |
529 | repo.ui.status(msg) |
|
529 | repo.ui.status(msg) | |
530 | # Make clearrebased aware state[rev] is not a true successor |
|
530 | # Make clearrebased aware state[rev] is not a true successor | |
531 | self.skipped.add(rev) |
|
531 | self.skipped.add(rev) | |
532 | # Record rev as moved to its desired destination in self.state. |
|
532 | # Record rev as moved to its desired destination in self.state. | |
533 | # This helps bookmark and working parent movement. |
|
533 | # This helps bookmark and working parent movement. | |
534 | dest = max(adjustdest(repo, rev, self.destmap, self.state, |
|
534 | dest = max(adjustdest(repo, rev, self.destmap, self.state, | |
535 | self.skipped)) |
|
535 | self.skipped)) | |
536 | self.state[rev] = dest |
|
536 | self.state[rev] = dest | |
537 | elif self.state[rev] == revtodo: |
|
537 | elif self.state[rev] == revtodo: | |
538 | ui.status(_('rebasing %s\n') % desc) |
|
538 | ui.status(_('rebasing %s\n') % desc) | |
539 | progressfn(ctx) |
|
539 | progressfn(ctx) | |
540 | p1, p2, base = defineparents(repo, rev, self.destmap, |
|
540 | p1, p2, base = defineparents(repo, rev, self.destmap, | |
541 | self.state, self.skipped, |
|
541 | self.state, self.skipped, | |
542 | self.obsoletenotrebased) |
|
542 | self.obsoletenotrebased) | |
543 | if not self.inmemory and len(repo[None].parents()) == 2: |
|
543 | if not self.inmemory and len(repo[None].parents()) == 2: | |
544 | repo.ui.debug('resuming interrupted rebase\n') |
|
544 | repo.ui.debug('resuming interrupted rebase\n') | |
545 | else: |
|
545 | else: | |
546 | overrides = {('ui', 'forcemerge'): opts.get('tool', '')} |
|
546 | overrides = {('ui', 'forcemerge'): opts.get('tool', '')} | |
547 | with ui.configoverride(overrides, 'rebase'): |
|
547 | with ui.configoverride(overrides, 'rebase'): | |
548 | stats = rebasenode(repo, rev, p1, base, self.collapsef, |
|
548 | stats = rebasenode(repo, rev, p1, base, self.collapsef, | |
549 | dest, wctx=self.wctx) |
|
549 | dest, wctx=self.wctx) | |
550 | if stats.unresolvedcount > 0: |
|
550 | if stats.unresolvedcount > 0: | |
551 | if self.inmemory: |
|
551 | if self.inmemory: | |
552 | raise error.InMemoryMergeConflictsError() |
|
552 | raise error.InMemoryMergeConflictsError() | |
553 | else: |
|
553 | else: | |
554 | raise error.InterventionRequired( |
|
554 | raise error.InterventionRequired( | |
555 | _('unresolved conflicts (see hg ' |
|
555 | _('unresolved conflicts (see hg ' | |
556 | 'resolve, then hg rebase --continue)')) |
|
556 | 'resolve, then hg rebase --continue)')) | |
557 | if not self.collapsef: |
|
557 | if not self.collapsef: | |
558 | merging = p2 != nullrev |
|
558 | merging = p2 != nullrev | |
559 | editform = cmdutil.mergeeditform(merging, 'rebase') |
|
559 | editform = cmdutil.mergeeditform(merging, 'rebase') | |
560 | editor = cmdutil.getcommiteditor(editform=editform, |
|
560 | editor = cmdutil.getcommiteditor(editform=editform, | |
561 | **pycompat.strkwargs(opts)) |
|
561 | **pycompat.strkwargs(opts)) | |
562 | newnode = self._concludenode(rev, p1, p2, editor) |
|
562 | newnode = self._concludenode(rev, p1, p2, editor) | |
563 | else: |
|
563 | else: | |
564 | # Skip commit if we are collapsing |
|
564 | # Skip commit if we are collapsing | |
565 | if self.inmemory: |
|
565 | if self.inmemory: | |
566 | self.wctx.setbase(repo[p1]) |
|
566 | self.wctx.setbase(repo[p1]) | |
567 | else: |
|
567 | else: | |
568 | repo.setparents(repo[p1].node()) |
|
568 | repo.setparents(repo[p1].node()) | |
569 | newnode = None |
|
569 | newnode = None | |
570 | # Update the state |
|
570 | # Update the state | |
571 | if newnode is not None: |
|
571 | if newnode is not None: | |
572 | self.state[rev] = repo[newnode].rev() |
|
572 | self.state[rev] = repo[newnode].rev() | |
573 | ui.debug('rebased as %s\n' % short(newnode)) |
|
573 | ui.debug('rebased as %s\n' % short(newnode)) | |
574 | else: |
|
574 | else: | |
575 | if not self.collapsef: |
|
575 | if not self.collapsef: | |
576 | ui.warn(_('note: not rebasing %s, its destination already ' |
|
576 | ui.warn(_('note: not rebasing %s, its destination already ' | |
577 | 'has all its changes\n') % desc) |
|
577 | 'has all its changes\n') % desc) | |
578 | self.skipped.add(rev) |
|
578 | self.skipped.add(rev) | |
579 | self.state[rev] = p1 |
|
579 | self.state[rev] = p1 | |
580 | ui.debug('next revision set to %d\n' % p1) |
|
580 | ui.debug('next revision set to %d\n' % p1) | |
581 | else: |
|
581 | else: | |
582 | ui.status(_('already rebased %s as %s\n') % |
|
582 | ui.status(_('already rebased %s as %s\n') % | |
583 | (desc, repo[self.state[rev]])) |
|
583 | (desc, repo[self.state[rev]])) | |
584 | if not tr: |
|
584 | if not tr: | |
585 | # When not using single transaction, store state after each |
|
585 | # When not using single transaction, store state after each | |
586 | # commit is completely done. On InterventionRequired, we thus |
|
586 | # commit is completely done. On InterventionRequired, we thus | |
587 | # won't store the status. Instead, we'll hit the "len(parents) == 2" |
|
587 | # won't store the status. Instead, we'll hit the "len(parents) == 2" | |
588 | # case and realize that the commit was in progress. |
|
588 | # case and realize that the commit was in progress. | |
589 | self.storestatus() |
|
589 | self.storestatus() | |
590 |
|
590 | |||
591 | def _finishrebase(self): |
|
591 | def _finishrebase(self): | |
592 | repo, ui, opts = self.repo, self.ui, self.opts |
|
592 | repo, ui, opts = self.repo, self.ui, self.opts | |
593 | fm = ui.formatter('rebase', opts) |
|
593 | fm = ui.formatter('rebase', opts) | |
594 | fm.startitem() |
|
594 | fm.startitem() | |
595 | if self.collapsef: |
|
595 | if self.collapsef: | |
596 | p1, p2, _base = defineparents(repo, min(self.state), self.destmap, |
|
596 | p1, p2, _base = defineparents(repo, min(self.state), self.destmap, | |
597 | self.state, self.skipped, |
|
597 | self.state, self.skipped, | |
598 | self.obsoletenotrebased) |
|
598 | self.obsoletenotrebased) | |
599 | editopt = opts.get('edit') |
|
599 | editopt = opts.get('edit') | |
600 | editform = 'rebase.collapse' |
|
600 | editform = 'rebase.collapse' | |
601 | if self.collapsemsg: |
|
601 | if self.collapsemsg: | |
602 | commitmsg = self.collapsemsg |
|
602 | commitmsg = self.collapsemsg | |
603 | else: |
|
603 | else: | |
604 | commitmsg = 'Collapsed revision' |
|
604 | commitmsg = 'Collapsed revision' | |
605 | for rebased in sorted(self.state): |
|
605 | for rebased in sorted(self.state): | |
606 | if rebased not in self.skipped: |
|
606 | if rebased not in self.skipped: | |
607 | commitmsg += '\n* %s' % repo[rebased].description() |
|
607 | commitmsg += '\n* %s' % repo[rebased].description() | |
608 | editopt = True |
|
608 | editopt = True | |
609 | editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) |
|
609 | editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) | |
610 | revtoreuse = max(self.state) |
|
610 | revtoreuse = max(self.state) | |
611 |
|
611 | |||
612 | newnode = self._concludenode(revtoreuse, p1, self.external, |
|
612 | newnode = self._concludenode(revtoreuse, p1, self.external, | |
613 | editor, commitmsg=commitmsg) |
|
613 | editor, commitmsg=commitmsg) | |
614 |
|
614 | |||
615 | if newnode is not None: |
|
615 | if newnode is not None: | |
616 | newrev = repo[newnode].rev() |
|
616 | newrev = repo[newnode].rev() | |
617 | for oldrev in self.state: |
|
617 | for oldrev in self.state: | |
618 | self.state[oldrev] = newrev |
|
618 | self.state[oldrev] = newrev | |
619 |
|
619 | |||
620 | if 'qtip' in repo.tags(): |
|
620 | if 'qtip' in repo.tags(): | |
621 | updatemq(repo, self.state, self.skipped, |
|
621 | updatemq(repo, self.state, self.skipped, | |
622 | **pycompat.strkwargs(opts)) |
|
622 | **pycompat.strkwargs(opts)) | |
623 |
|
623 | |||
624 | # restore original working directory |
|
624 | # restore original working directory | |
625 | # (we do this before stripping) |
|
625 | # (we do this before stripping) | |
626 | newwd = self.state.get(self.originalwd, self.originalwd) |
|
626 | newwd = self.state.get(self.originalwd, self.originalwd) | |
627 | if newwd < 0: |
|
627 | if newwd < 0: | |
628 | # original directory is a parent of rebase set root or ignored |
|
628 | # original directory is a parent of rebase set root or ignored | |
629 | newwd = self.originalwd |
|
629 | newwd = self.originalwd | |
630 | if newwd not in [c.rev() for c in repo[None].parents()]: |
|
630 | if newwd not in [c.rev() for c in repo[None].parents()]: | |
631 | ui.note(_("update back to initial working directory parent\n")) |
|
631 | ui.note(_("update back to initial working directory parent\n")) | |
632 | hg.updaterepo(repo, newwd, overwrite=False) |
|
632 | hg.updaterepo(repo, newwd, overwrite=False) | |
633 |
|
633 | |||
634 | collapsedas = None |
|
634 | collapsedas = None | |
635 | if self.collapsef and not self.keepf: |
|
635 | if self.collapsef and not self.keepf: | |
636 | collapsedas = newnode |
|
636 | collapsedas = newnode | |
637 | clearrebased(ui, repo, self.destmap, self.state, self.skipped, |
|
637 | clearrebased(ui, repo, self.destmap, self.state, self.skipped, | |
638 | collapsedas, self.keepf, fm=fm, backup=self.backupf) |
|
638 | collapsedas, self.keepf, fm=fm, backup=self.backupf) | |
639 |
|
639 | |||
640 | clearstatus(repo) |
|
640 | clearstatus(repo) | |
641 | clearcollapsemsg(repo) |
|
641 | clearcollapsemsg(repo) | |
642 |
|
642 | |||
643 | ui.note(_("rebase completed\n")) |
|
643 | ui.note(_("rebase completed\n")) | |
644 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) |
|
644 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) | |
645 | if self.skipped: |
|
645 | if self.skipped: | |
646 | skippedlen = len(self.skipped) |
|
646 | skippedlen = len(self.skipped) | |
647 | ui.note(_("%d revisions have been skipped\n") % skippedlen) |
|
647 | ui.note(_("%d revisions have been skipped\n") % skippedlen) | |
648 | fm.end() |
|
648 | fm.end() | |
649 |
|
649 | |||
650 | if (self.activebookmark and self.activebookmark in repo._bookmarks and |
|
650 | if (self.activebookmark and self.activebookmark in repo._bookmarks and | |
651 | repo['.'].node() == repo._bookmarks[self.activebookmark]): |
|
651 | repo['.'].node() == repo._bookmarks[self.activebookmark]): | |
652 | bookmarks.activate(repo, self.activebookmark) |
|
652 | bookmarks.activate(repo, self.activebookmark) | |
653 |
|
653 | |||
654 | def _abort(self, backup=True, suppwarns=False): |
|
654 | def _abort(self, backup=True, suppwarns=False): | |
655 | '''Restore the repository to its original state.''' |
|
655 | '''Restore the repository to its original state.''' | |
656 |
|
656 | |||
657 | repo = self.repo |
|
657 | repo = self.repo | |
658 | try: |
|
658 | try: | |
659 | # If the first commits in the rebased set get skipped during the |
|
659 | # If the first commits in the rebased set get skipped during the | |
660 | # rebase, their values within the state mapping will be the dest |
|
660 | # rebase, their values within the state mapping will be the dest | |
661 | # rev id. The rebased list must must not contain the dest rev |
|
661 | # rev id. The rebased list must must not contain the dest rev | |
662 | # (issue4896) |
|
662 | # (issue4896) | |
663 | rebased = [s for r, s in self.state.items() |
|
663 | rebased = [s for r, s in self.state.items() | |
664 | if s >= 0 and s != r and s != self.destmap[r]] |
|
664 | if s >= 0 and s != r and s != self.destmap[r]] | |
665 | immutable = [d for d in rebased if not repo[d].mutable()] |
|
665 | immutable = [d for d in rebased if not repo[d].mutable()] | |
666 | cleanup = True |
|
666 | cleanup = True | |
667 | if immutable: |
|
667 | if immutable: | |
668 | repo.ui.warn(_("warning: can't clean up public changesets %s\n") |
|
668 | repo.ui.warn(_("warning: can't clean up public changesets %s\n") | |
669 | % ', '.join(bytes(repo[r]) for r in immutable), |
|
669 | % ', '.join(bytes(repo[r]) for r in immutable), | |
670 | hint=_("see 'hg help phases' for details")) |
|
670 | hint=_("see 'hg help phases' for details")) | |
671 | cleanup = False |
|
671 | cleanup = False | |
672 |
|
672 | |||
673 | descendants = set() |
|
673 | descendants = set() | |
674 | if rebased: |
|
674 | if rebased: | |
675 | descendants = set(repo.changelog.descendants(rebased)) |
|
675 | descendants = set(repo.changelog.descendants(rebased)) | |
676 | if descendants - set(rebased): |
|
676 | if descendants - set(rebased): | |
677 | repo.ui.warn(_("warning: new changesets detected on " |
|
677 | repo.ui.warn(_("warning: new changesets detected on " | |
678 | "destination branch, can't strip\n")) |
|
678 | "destination branch, can't strip\n")) | |
679 | cleanup = False |
|
679 | cleanup = False | |
680 |
|
680 | |||
681 | if cleanup: |
|
681 | if cleanup: | |
682 | shouldupdate = False |
|
682 | shouldupdate = False | |
683 | if rebased: |
|
683 | if rebased: | |
684 | strippoints = [ |
|
684 | strippoints = [ | |
685 | c.node() for c in repo.set('roots(%ld)', rebased)] |
|
685 | c.node() for c in repo.set('roots(%ld)', rebased)] | |
686 |
|
686 | |||
687 | updateifonnodes = set(rebased) |
|
687 | updateifonnodes = set(rebased) | |
688 | updateifonnodes.update(self.destmap.values()) |
|
688 | updateifonnodes.update(self.destmap.values()) | |
689 | updateifonnodes.add(self.originalwd) |
|
689 | updateifonnodes.add(self.originalwd) | |
690 | shouldupdate = repo['.'].rev() in updateifonnodes |
|
690 | shouldupdate = repo['.'].rev() in updateifonnodes | |
691 |
|
691 | |||
692 | # Update away from the rebase if necessary |
|
692 | # Update away from the rebase if necessary | |
693 | if shouldupdate or needupdate(repo, self.state): |
|
693 | if shouldupdate or needupdate(repo, self.state): | |
694 | mergemod.update(repo, self.originalwd, branchmerge=False, |
|
694 | mergemod.update(repo, self.originalwd, branchmerge=False, | |
695 | force=True) |
|
695 | force=True) | |
696 |
|
696 | |||
697 | # Strip from the first rebased revision |
|
697 | # Strip from the first rebased revision | |
698 | if rebased: |
|
698 | if rebased: | |
699 | repair.strip(repo.ui, repo, strippoints, backup=backup) |
|
699 | repair.strip(repo.ui, repo, strippoints, backup=backup) | |
700 |
|
700 | |||
701 | if self.activebookmark and self.activebookmark in repo._bookmarks: |
|
701 | if self.activebookmark and self.activebookmark in repo._bookmarks: | |
702 | bookmarks.activate(repo, self.activebookmark) |
|
702 | bookmarks.activate(repo, self.activebookmark) | |
703 |
|
703 | |||
704 | finally: |
|
704 | finally: | |
705 | clearstatus(repo) |
|
705 | clearstatus(repo) | |
706 | clearcollapsemsg(repo) |
|
706 | clearcollapsemsg(repo) | |
707 | if not suppwarns: |
|
707 | if not suppwarns: | |
708 | repo.ui.warn(_('rebase aborted\n')) |
|
708 | repo.ui.warn(_('rebase aborted\n')) | |
709 | return 0 |
|
709 | return 0 | |
710 |
|
710 | |||
711 | @command('rebase', |
|
711 | @command('rebase', | |
712 | [('s', 'source', '', |
|
712 | [('s', 'source', '', | |
713 | _('rebase the specified changeset and descendants'), _('REV')), |
|
713 | _('rebase the specified changeset and descendants'), _('REV')), | |
714 | ('b', 'base', '', |
|
714 | ('b', 'base', '', | |
715 | _('rebase everything from branching point of specified changeset'), |
|
715 | _('rebase everything from branching point of specified changeset'), | |
716 | _('REV')), |
|
716 | _('REV')), | |
717 | ('r', 'rev', [], |
|
717 | ('r', 'rev', [], | |
718 | _('rebase these revisions'), |
|
718 | _('rebase these revisions'), | |
719 | _('REV')), |
|
719 | _('REV')), | |
720 | ('d', 'dest', '', |
|
720 | ('d', 'dest', '', | |
721 | _('rebase onto the specified changeset'), _('REV')), |
|
721 | _('rebase onto the specified changeset'), _('REV')), | |
722 | ('', 'collapse', False, _('collapse the rebased changesets')), |
|
722 | ('', 'collapse', False, _('collapse the rebased changesets')), | |
723 | ('m', 'message', '', |
|
723 | ('m', 'message', '', | |
724 | _('use text as collapse commit message'), _('TEXT')), |
|
724 | _('use text as collapse commit message'), _('TEXT')), | |
725 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
725 | ('e', 'edit', False, _('invoke editor on commit messages')), | |
726 | ('l', 'logfile', '', |
|
726 | ('l', 'logfile', '', | |
727 | _('read collapse commit message from file'), _('FILE')), |
|
727 | _('read collapse commit message from file'), _('FILE')), | |
728 | ('k', 'keep', False, _('keep original changesets')), |
|
728 | ('k', 'keep', False, _('keep original changesets')), | |
729 | ('', 'keepbranches', False, _('keep original branch names')), |
|
729 | ('', 'keepbranches', False, _('keep original branch names')), | |
730 | ('D', 'detach', False, _('(DEPRECATED)')), |
|
730 | ('D', 'detach', False, _('(DEPRECATED)')), | |
731 | ('i', 'interactive', False, _('(DEPRECATED)')), |
|
731 | ('i', 'interactive', False, _('(DEPRECATED)')), | |
732 | ('t', 'tool', '', _('specify merge tool')), |
|
732 | ('t', 'tool', '', _('specify merge tool')), | |
733 | ('', 'stop', False, _('stop interrupted rebase')), |
|
733 | ('', 'stop', False, _('stop interrupted rebase')), | |
734 | ('c', 'continue', False, _('continue an interrupted rebase')), |
|
734 | ('c', 'continue', False, _('continue an interrupted rebase')), | |
735 | ('a', 'abort', False, _('abort an interrupted rebase')), |
|
735 | ('a', 'abort', False, _('abort an interrupted rebase')), | |
736 | ('', 'auto-orphans', '', _('automatically rebase orphan revisions ' |
|
736 | ('', 'auto-orphans', '', _('automatically rebase orphan revisions ' | |
737 | 'in the specified revset (EXPERIMENTAL)')), |
|
737 | 'in the specified revset (EXPERIMENTAL)')), | |
738 | ] + cmdutil.dryrunopts + cmdutil.formatteropts + cmdutil.confirmopts, |
|
738 | ] + cmdutil.dryrunopts + cmdutil.formatteropts + cmdutil.confirmopts, | |
739 | _('[-s REV | -b REV] [-d REV] [OPTION]'), |
|
739 | _('[-s REV | -b REV] [-d REV] [OPTION]'), | |
740 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT) |
|
740 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT) | |
741 | def rebase(ui, repo, **opts): |
|
741 | def rebase(ui, repo, **opts): | |
742 | """move changeset (and descendants) to a different branch |
|
742 | """move changeset (and descendants) to a different branch | |
743 |
|
743 | |||
744 | Rebase uses repeated merging to graft changesets from one part of |
|
744 | Rebase uses repeated merging to graft changesets from one part of | |
745 | history (the source) onto another (the destination). This can be |
|
745 | history (the source) onto another (the destination). This can be | |
746 | useful for linearizing *local* changes relative to a master |
|
746 | useful for linearizing *local* changes relative to a master | |
747 | development tree. |
|
747 | development tree. | |
748 |
|
748 | |||
749 | Published commits cannot be rebased (see :hg:`help phases`). |
|
749 | Published commits cannot be rebased (see :hg:`help phases`). | |
750 | To copy commits, see :hg:`help graft`. |
|
750 | To copy commits, see :hg:`help graft`. | |
751 |
|
751 | |||
752 | If you don't specify a destination changeset (``-d/--dest``), rebase |
|
752 | If you don't specify a destination changeset (``-d/--dest``), rebase | |
753 | will use the same logic as :hg:`merge` to pick a destination. if |
|
753 | will use the same logic as :hg:`merge` to pick a destination. if | |
754 | the current branch contains exactly one other head, the other head |
|
754 | the current branch contains exactly one other head, the other head | |
755 | is merged with by default. Otherwise, an explicit revision with |
|
755 | is merged with by default. Otherwise, an explicit revision with | |
756 | which to merge with must be provided. (destination changeset is not |
|
756 | which to merge with must be provided. (destination changeset is not | |
757 | modified by rebasing, but new changesets are added as its |
|
757 | modified by rebasing, but new changesets are added as its | |
758 | descendants.) |
|
758 | descendants.) | |
759 |
|
759 | |||
760 | Here are the ways to select changesets: |
|
760 | Here are the ways to select changesets: | |
761 |
|
761 | |||
762 | 1. Explicitly select them using ``--rev``. |
|
762 | 1. Explicitly select them using ``--rev``. | |
763 |
|
763 | |||
764 | 2. Use ``--source`` to select a root changeset and include all of its |
|
764 | 2. Use ``--source`` to select a root changeset and include all of its | |
765 | descendants. |
|
765 | descendants. | |
766 |
|
766 | |||
767 | 3. Use ``--base`` to select a changeset; rebase will find ancestors |
|
767 | 3. Use ``--base`` to select a changeset; rebase will find ancestors | |
768 | and their descendants which are not also ancestors of the destination. |
|
768 | and their descendants which are not also ancestors of the destination. | |
769 |
|
769 | |||
770 | 4. If you do not specify any of ``--rev``, ``source``, or ``--base``, |
|
770 | 4. If you do not specify any of ``--rev``, ``source``, or ``--base``, | |
771 | rebase will use ``--base .`` as above. |
|
771 | rebase will use ``--base .`` as above. | |
772 |
|
772 | |||
773 | If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC`` |
|
773 | If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC`` | |
774 | can be used in ``--dest``. Destination would be calculated per source |
|
774 | can be used in ``--dest``. Destination would be calculated per source | |
775 | revision with ``SRC`` substituted by that single source revision and |
|
775 | revision with ``SRC`` substituted by that single source revision and | |
776 | ``ALLSRC`` substituted by all source revisions. |
|
776 | ``ALLSRC`` substituted by all source revisions. | |
777 |
|
777 | |||
778 | Rebase will destroy original changesets unless you use ``--keep``. |
|
778 | Rebase will destroy original changesets unless you use ``--keep``. | |
779 | It will also move your bookmarks (even if you do). |
|
779 | It will also move your bookmarks (even if you do). | |
780 |
|
780 | |||
781 | Some changesets may be dropped if they do not contribute changes |
|
781 | Some changesets may be dropped if they do not contribute changes | |
782 | (e.g. merges from the destination branch). |
|
782 | (e.g. merges from the destination branch). | |
783 |
|
783 | |||
784 | Unlike ``merge``, rebase will do nothing if you are at the branch tip of |
|
784 | Unlike ``merge``, rebase will do nothing if you are at the branch tip of | |
785 | a named branch with two heads. You will need to explicitly specify source |
|
785 | a named branch with two heads. You will need to explicitly specify source | |
786 | and/or destination. |
|
786 | and/or destination. | |
787 |
|
787 | |||
788 | If you need to use a tool to automate merge/conflict decisions, you |
|
788 | If you need to use a tool to automate merge/conflict decisions, you | |
789 | can specify one with ``--tool``, see :hg:`help merge-tools`. |
|
789 | can specify one with ``--tool``, see :hg:`help merge-tools`. | |
790 | As a caveat: the tool will not be used to mediate when a file was |
|
790 | As a caveat: the tool will not be used to mediate when a file was | |
791 | deleted, there is no hook presently available for this. |
|
791 | deleted, there is no hook presently available for this. | |
792 |
|
792 | |||
793 | If a rebase is interrupted to manually resolve a conflict, it can be |
|
793 | If a rebase is interrupted to manually resolve a conflict, it can be | |
794 | continued with --continue/-c, aborted with --abort/-a, or stopped with |
|
794 | continued with --continue/-c, aborted with --abort/-a, or stopped with | |
795 | --stop. |
|
795 | --stop. | |
796 |
|
796 | |||
797 | .. container:: verbose |
|
797 | .. container:: verbose | |
798 |
|
798 | |||
799 | Examples: |
|
799 | Examples: | |
800 |
|
800 | |||
801 | - move "local changes" (current commit back to branching point) |
|
801 | - move "local changes" (current commit back to branching point) | |
802 | to the current branch tip after a pull:: |
|
802 | to the current branch tip after a pull:: | |
803 |
|
803 | |||
804 | hg rebase |
|
804 | hg rebase | |
805 |
|
805 | |||
806 | - move a single changeset to the stable branch:: |
|
806 | - move a single changeset to the stable branch:: | |
807 |
|
807 | |||
808 | hg rebase -r 5f493448 -d stable |
|
808 | hg rebase -r 5f493448 -d stable | |
809 |
|
809 | |||
810 | - splice a commit and all its descendants onto another part of history:: |
|
810 | - splice a commit and all its descendants onto another part of history:: | |
811 |
|
811 | |||
812 | hg rebase --source c0c3 --dest 4cf9 |
|
812 | hg rebase --source c0c3 --dest 4cf9 | |
813 |
|
813 | |||
814 | - rebase everything on a branch marked by a bookmark onto the |
|
814 | - rebase everything on a branch marked by a bookmark onto the | |
815 | default branch:: |
|
815 | default branch:: | |
816 |
|
816 | |||
817 | hg rebase --base myfeature --dest default |
|
817 | hg rebase --base myfeature --dest default | |
818 |
|
818 | |||
819 | - collapse a sequence of changes into a single commit:: |
|
819 | - collapse a sequence of changes into a single commit:: | |
820 |
|
820 | |||
821 | hg rebase --collapse -r 1520:1525 -d . |
|
821 | hg rebase --collapse -r 1520:1525 -d . | |
822 |
|
822 | |||
823 | - move a named branch while preserving its name:: |
|
823 | - move a named branch while preserving its name:: | |
824 |
|
824 | |||
825 | hg rebase -r "branch(featureX)" -d 1.3 --keepbranches |
|
825 | hg rebase -r "branch(featureX)" -d 1.3 --keepbranches | |
826 |
|
826 | |||
827 | - stabilize orphaned changesets so history looks linear:: |
|
827 | - stabilize orphaned changesets so history looks linear:: | |
828 |
|
828 | |||
829 | hg rebase -r 'orphan()-obsolete()'\ |
|
829 | hg rebase -r 'orphan()-obsolete()'\ | |
830 | -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\ |
|
830 | -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\ | |
831 | max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))' |
|
831 | max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))' | |
832 |
|
832 | |||
833 | Configuration Options: |
|
833 | Configuration Options: | |
834 |
|
834 | |||
835 | You can make rebase require a destination if you set the following config |
|
835 | You can make rebase require a destination if you set the following config | |
836 | option:: |
|
836 | option:: | |
837 |
|
837 | |||
838 | [commands] |
|
838 | [commands] | |
839 | rebase.requiredest = True |
|
839 | rebase.requiredest = True | |
840 |
|
840 | |||
841 | By default, rebase will close the transaction after each commit. For |
|
841 | By default, rebase will close the transaction after each commit. For | |
842 | performance purposes, you can configure rebase to use a single transaction |
|
842 | performance purposes, you can configure rebase to use a single transaction | |
843 | across the entire rebase. WARNING: This setting introduces a significant |
|
843 | across the entire rebase. WARNING: This setting introduces a significant | |
844 | risk of losing the work you've done in a rebase if the rebase aborts |
|
844 | risk of losing the work you've done in a rebase if the rebase aborts | |
845 | unexpectedly:: |
|
845 | unexpectedly:: | |
846 |
|
846 | |||
847 | [rebase] |
|
847 | [rebase] | |
848 | singletransaction = True |
|
848 | singletransaction = True | |
849 |
|
849 | |||
850 | By default, rebase writes to the working copy, but you can configure it to |
|
850 | By default, rebase writes to the working copy, but you can configure it to | |
851 | run in-memory for for better performance, and to allow it to run if the |
|
851 | run in-memory for for better performance, and to allow it to run if the | |
852 | working copy is dirty:: |
|
852 | working copy is dirty:: | |
853 |
|
853 | |||
854 | [rebase] |
|
854 | [rebase] | |
855 | experimental.inmemory = True |
|
855 | experimental.inmemory = True | |
856 |
|
856 | |||
857 | Return Values: |
|
857 | Return Values: | |
858 |
|
858 | |||
859 | Returns 0 on success, 1 if nothing to rebase or there are |
|
859 | Returns 0 on success, 1 if nothing to rebase or there are | |
860 | unresolved conflicts. |
|
860 | unresolved conflicts. | |
861 |
|
861 | |||
862 | """ |
|
862 | """ | |
863 | opts = pycompat.byteskwargs(opts) |
|
863 | opts = pycompat.byteskwargs(opts) | |
864 | inmemory = ui.configbool('rebase', 'experimental.inmemory') |
|
864 | inmemory = ui.configbool('rebase', 'experimental.inmemory') | |
865 | dryrun = opts.get('dry_run') |
|
865 | dryrun = opts.get('dry_run') | |
866 | confirm = opts.get('confirm') |
|
866 | confirm = opts.get('confirm') | |
867 | selactions = [k for k in ['abort', 'stop', 'continue'] if opts.get(k)] |
|
867 | selactions = [k for k in ['abort', 'stop', 'continue'] if opts.get(k)] | |
868 | if len(selactions) > 1: |
|
868 | if len(selactions) > 1: | |
869 | raise error.Abort(_('cannot use --%s with --%s') |
|
869 | raise error.Abort(_('cannot use --%s with --%s') | |
870 | % tuple(selactions[:2])) |
|
870 | % tuple(selactions[:2])) | |
871 | action = selactions[0] if selactions else None |
|
871 | action = selactions[0] if selactions else None | |
872 | if dryrun and action: |
|
872 | if dryrun and action: | |
873 | raise error.Abort(_('cannot specify both --dry-run and --%s') % action) |
|
873 | raise error.Abort(_('cannot specify both --dry-run and --%s') % action) | |
874 | if confirm and action: |
|
874 | if confirm and action: | |
875 | raise error.Abort(_('cannot specify both --confirm and --%s') % action) |
|
875 | raise error.Abort(_('cannot specify both --confirm and --%s') % action) | |
876 | if dryrun and confirm: |
|
876 | if dryrun and confirm: | |
877 | raise error.Abort(_('cannot specify both --confirm and --dry-run')) |
|
877 | raise error.Abort(_('cannot specify both --confirm and --dry-run')) | |
878 |
|
878 | |||
879 | if action or repo.currenttransaction() is not None: |
|
879 | if action or repo.currenttransaction() is not None: | |
880 | # in-memory rebase is not compatible with resuming rebases. |
|
880 | # in-memory rebase is not compatible with resuming rebases. | |
881 | # (Or if it is run within a transaction, since the restart logic can |
|
881 | # (Or if it is run within a transaction, since the restart logic can | |
882 | # fail the entire transaction.) |
|
882 | # fail the entire transaction.) | |
883 | inmemory = False |
|
883 | inmemory = False | |
884 |
|
884 | |||
885 | if opts.get('auto_orphans'): |
|
885 | if opts.get('auto_orphans'): | |
886 | for key in opts: |
|
886 | for key in opts: | |
887 | if key != 'auto_orphans' and opts.get(key): |
|
887 | if key != 'auto_orphans' and opts.get(key): | |
888 | raise error.Abort(_('--auto-orphans is incompatible with %s') % |
|
888 | raise error.Abort(_('--auto-orphans is incompatible with %s') % | |
889 | ('--' + key)) |
|
889 | ('--' + key)) | |
890 | userrevs = list(repo.revs(opts.get('auto_orphans'))) |
|
890 | userrevs = list(repo.revs(opts.get('auto_orphans'))) | |
891 | opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)] |
|
891 | opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)] | |
892 | opts['dest'] = '_destautoorphanrebase(SRC)' |
|
892 | opts['dest'] = '_destautoorphanrebase(SRC)' | |
893 |
|
893 | |||
894 | if dryrun or confirm: |
|
894 | if dryrun or confirm: | |
895 | return _dryrunrebase(ui, repo, action, opts) |
|
895 | return _dryrunrebase(ui, repo, action, opts) | |
896 | elif action == 'stop': |
|
896 | elif action == 'stop': | |
897 | rbsrt = rebaseruntime(repo, ui) |
|
897 | rbsrt = rebaseruntime(repo, ui) | |
898 | with repo.wlock(), repo.lock(): |
|
898 | with repo.wlock(), repo.lock(): | |
899 | rbsrt.restorestatus() |
|
899 | rbsrt.restorestatus() | |
900 | if rbsrt.collapsef: |
|
900 | if rbsrt.collapsef: | |
901 | raise error.Abort(_("cannot stop in --collapse session")) |
|
901 | raise error.Abort(_("cannot stop in --collapse session")) | |
902 | allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) |
|
902 | allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) | |
903 | if not (rbsrt.keepf or allowunstable): |
|
903 | if not (rbsrt.keepf or allowunstable): | |
904 | raise error.Abort(_("cannot remove original changesets with" |
|
904 | raise error.Abort(_("cannot remove original changesets with" | |
905 | " unrebased descendants"), |
|
905 | " unrebased descendants"), | |
906 | hint=_('either enable obsmarkers to allow unstable ' |
|
906 | hint=_('either enable obsmarkers to allow unstable ' | |
907 | 'revisions or use --keep to keep original ' |
|
907 | 'revisions or use --keep to keep original ' | |
908 | 'changesets')) |
|
908 | 'changesets')) | |
909 | if needupdate(repo, rbsrt.state): |
|
909 | if needupdate(repo, rbsrt.state): | |
910 | # update to the current working revision |
|
910 | # update to the current working revision | |
911 | # to clear interrupted merge |
|
911 | # to clear interrupted merge | |
912 | hg.updaterepo(repo, rbsrt.originalwd, overwrite=True) |
|
912 | hg.updaterepo(repo, rbsrt.originalwd, overwrite=True) | |
913 | rbsrt._finishrebase() |
|
913 | rbsrt._finishrebase() | |
914 | return 0 |
|
914 | return 0 | |
915 | elif inmemory: |
|
915 | elif inmemory: | |
916 | try: |
|
916 | try: | |
917 | # in-memory merge doesn't support conflicts, so if we hit any, abort |
|
917 | # in-memory merge doesn't support conflicts, so if we hit any, abort | |
918 | # and re-run as an on-disk merge. |
|
918 | # and re-run as an on-disk merge. | |
919 | overrides = {('rebase', 'singletransaction'): True} |
|
919 | overrides = {('rebase', 'singletransaction'): True} | |
920 | with ui.configoverride(overrides, 'rebase'): |
|
920 | with ui.configoverride(overrides, 'rebase'): | |
921 | return _dorebase(ui, repo, action, opts, inmemory=inmemory) |
|
921 | return _dorebase(ui, repo, action, opts, inmemory=inmemory) | |
922 | except error.InMemoryMergeConflictsError: |
|
922 | except error.InMemoryMergeConflictsError: | |
923 | ui.warn(_('hit merge conflicts; re-running rebase without in-memory' |
|
923 | ui.warn(_('hit merge conflicts; re-running rebase without in-memory' | |
924 | ' merge\n')) |
|
924 | ' merge\n')) | |
925 | # TODO: Make in-memory merge not use the on-disk merge state, so |
|
925 | # TODO: Make in-memory merge not use the on-disk merge state, so | |
926 | # we don't have to clean it here |
|
926 | # we don't have to clean it here | |
927 | mergemod.mergestate.clean(repo) |
|
927 | mergemod.mergestate.clean(repo) | |
928 | clearstatus(repo) |
|
928 | clearstatus(repo) | |
929 | clearcollapsemsg(repo) |
|
929 | clearcollapsemsg(repo) | |
930 | return _dorebase(ui, repo, action, opts, inmemory=False) |
|
930 | return _dorebase(ui, repo, action, opts, inmemory=False) | |
931 | else: |
|
931 | else: | |
932 | return _dorebase(ui, repo, action, opts) |
|
932 | return _dorebase(ui, repo, action, opts) | |
933 |
|
933 | |||
934 | def _dryrunrebase(ui, repo, action, opts): |
|
934 | def _dryrunrebase(ui, repo, action, opts): | |
935 | rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts) |
|
935 | rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts) | |
936 | confirm = opts.get('confirm') |
|
936 | confirm = opts.get('confirm') | |
937 | if confirm: |
|
937 | if confirm: | |
938 | ui.status(_('starting in-memory rebase\n')) |
|
938 | ui.status(_('starting in-memory rebase\n')) | |
939 | else: |
|
939 | else: | |
940 | ui.status(_('starting dry-run rebase; repository will not be ' |
|
940 | ui.status(_('starting dry-run rebase; repository will not be ' | |
941 | 'changed\n')) |
|
941 | 'changed\n')) | |
942 | with repo.wlock(), repo.lock(): |
|
942 | with repo.wlock(), repo.lock(): | |
943 | needsabort = True |
|
943 | needsabort = True | |
944 | try: |
|
944 | try: | |
945 | overrides = {('rebase', 'singletransaction'): True} |
|
945 | overrides = {('rebase', 'singletransaction'): True} | |
946 | with ui.configoverride(overrides, 'rebase'): |
|
946 | with ui.configoverride(overrides, 'rebase'): | |
947 | _origrebase(ui, repo, action, opts, rbsrt, inmemory=True, |
|
947 | _origrebase(ui, repo, action, opts, rbsrt, inmemory=True, | |
948 | leaveunfinished=True) |
|
948 | leaveunfinished=True) | |
949 | except error.InMemoryMergeConflictsError: |
|
949 | except error.InMemoryMergeConflictsError: | |
950 | ui.status(_('hit a merge conflict\n')) |
|
950 | ui.status(_('hit a merge conflict\n')) | |
951 | return 1 |
|
951 | return 1 | |
952 | else: |
|
952 | else: | |
953 | if confirm: |
|
953 | if confirm: | |
954 | ui.status(_('rebase completed successfully\n')) |
|
954 | ui.status(_('rebase completed successfully\n')) | |
955 | if not ui.promptchoice(_(b'apply changes (yn)?' |
|
955 | if not ui.promptchoice(_(b'apply changes (yn)?' | |
956 | b'$$ &Yes $$ &No')): |
|
956 | b'$$ &Yes $$ &No')): | |
957 | # finish unfinished rebase |
|
957 | # finish unfinished rebase | |
958 | rbsrt._finishrebase() |
|
958 | rbsrt._finishrebase() | |
959 | else: |
|
959 | else: | |
960 | rbsrt._prepareabortorcontinue(isabort=True, backup=False, |
|
960 | rbsrt._prepareabortorcontinue(isabort=True, backup=False, | |
961 | suppwarns=True) |
|
961 | suppwarns=True) | |
962 | needsabort = False |
|
962 | needsabort = False | |
963 | else: |
|
963 | else: | |
964 | ui.status(_('dry-run rebase completed successfully; run without' |
|
964 | ui.status(_('dry-run rebase completed successfully; run without' | |
965 | ' -n/--dry-run to perform this rebase\n')) |
|
965 | ' -n/--dry-run to perform this rebase\n')) | |
966 | return 0 |
|
966 | return 0 | |
967 | finally: |
|
967 | finally: | |
968 | if needsabort: |
|
968 | if needsabort: | |
969 | # no need to store backup in case of dryrun |
|
969 | # no need to store backup in case of dryrun | |
970 | rbsrt._prepareabortorcontinue(isabort=True, backup=False, |
|
970 | rbsrt._prepareabortorcontinue(isabort=True, backup=False, | |
971 | suppwarns=True) |
|
971 | suppwarns=True) | |
972 |
|
972 | |||
973 | def _dorebase(ui, repo, action, opts, inmemory=False): |
|
973 | def _dorebase(ui, repo, action, opts, inmemory=False): | |
974 | rbsrt = rebaseruntime(repo, ui, inmemory, opts) |
|
974 | rbsrt = rebaseruntime(repo, ui, inmemory, opts) | |
975 | return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory) |
|
975 | return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory) | |
976 |
|
976 | |||
977 | def _origrebase(ui, repo, action, opts, rbsrt, inmemory=False, |
|
977 | def _origrebase(ui, repo, action, opts, rbsrt, inmemory=False, | |
978 | leaveunfinished=False): |
|
978 | leaveunfinished=False): | |
979 | assert action != 'stop' |
|
979 | assert action != 'stop' | |
980 | with repo.wlock(), repo.lock(): |
|
980 | with repo.wlock(), repo.lock(): | |
981 | # Validate input and define rebasing points |
|
981 | # Validate input and define rebasing points | |
982 | destf = opts.get('dest', None) |
|
982 | destf = opts.get('dest', None) | |
983 | srcf = opts.get('source', None) |
|
983 | srcf = opts.get('source', None) | |
984 | basef = opts.get('base', None) |
|
984 | basef = opts.get('base', None) | |
985 | revf = opts.get('rev', []) |
|
985 | revf = opts.get('rev', []) | |
986 | # search default destination in this space |
|
986 | # search default destination in this space | |
987 | # used in the 'hg pull --rebase' case, see issue 5214. |
|
987 | # used in the 'hg pull --rebase' case, see issue 5214. | |
988 | destspace = opts.get('_destspace') |
|
988 | destspace = opts.get('_destspace') | |
989 | if opts.get('interactive'): |
|
989 | if opts.get('interactive'): | |
990 | try: |
|
990 | try: | |
991 | if extensions.find('histedit'): |
|
991 | if extensions.find('histedit'): | |
992 | enablehistedit = '' |
|
992 | enablehistedit = '' | |
993 | except KeyError: |
|
993 | except KeyError: | |
994 | enablehistedit = " --config extensions.histedit=" |
|
994 | enablehistedit = " --config extensions.histedit=" | |
995 | help = "hg%s help -e histedit" % enablehistedit |
|
995 | help = "hg%s help -e histedit" % enablehistedit | |
996 | msg = _("interactive history editing is supported by the " |
|
996 | msg = _("interactive history editing is supported by the " | |
997 | "'histedit' extension (see \"%s\")") % help |
|
997 | "'histedit' extension (see \"%s\")") % help | |
998 | raise error.Abort(msg) |
|
998 | raise error.Abort(msg) | |
999 |
|
999 | |||
1000 | if rbsrt.collapsemsg and not rbsrt.collapsef: |
|
1000 | if rbsrt.collapsemsg and not rbsrt.collapsef: | |
1001 | raise error.Abort( |
|
1001 | raise error.Abort( | |
1002 | _('message can only be specified with collapse')) |
|
1002 | _('message can only be specified with collapse')) | |
1003 |
|
1003 | |||
1004 | if action: |
|
1004 | if action: | |
1005 | if rbsrt.collapsef: |
|
1005 | if rbsrt.collapsef: | |
1006 | raise error.Abort( |
|
1006 | raise error.Abort( | |
1007 | _('cannot use collapse with continue or abort')) |
|
1007 | _('cannot use collapse with continue or abort')) | |
1008 | if srcf or basef or destf: |
|
1008 | if srcf or basef or destf: | |
1009 | raise error.Abort( |
|
1009 | raise error.Abort( | |
1010 | _('abort and continue do not allow specifying revisions')) |
|
1010 | _('abort and continue do not allow specifying revisions')) | |
1011 | if action == 'abort' and opts.get('tool', False): |
|
1011 | if action == 'abort' and opts.get('tool', False): | |
1012 | ui.warn(_('tool option will be ignored\n')) |
|
1012 | ui.warn(_('tool option will be ignored\n')) | |
1013 | if action == 'continue': |
|
1013 | if action == 'continue': | |
1014 | ms = mergemod.mergestate.read(repo) |
|
1014 | ms = mergemod.mergestate.read(repo) | |
1015 | mergeutil.checkunresolved(ms) |
|
1015 | mergeutil.checkunresolved(ms) | |
1016 |
|
1016 | |||
1017 | retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort')) |
|
1017 | retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort')) | |
1018 | if retcode is not None: |
|
1018 | if retcode is not None: | |
1019 | return retcode |
|
1019 | return retcode | |
1020 | else: |
|
1020 | else: | |
1021 | destmap = _definedestmap(ui, repo, inmemory, destf, srcf, basef, |
|
1021 | destmap = _definedestmap(ui, repo, inmemory, destf, srcf, basef, | |
1022 | revf, destspace=destspace) |
|
1022 | revf, destspace=destspace) | |
1023 | retcode = rbsrt._preparenewrebase(destmap) |
|
1023 | retcode = rbsrt._preparenewrebase(destmap) | |
1024 | if retcode is not None: |
|
1024 | if retcode is not None: | |
1025 | return retcode |
|
1025 | return retcode | |
1026 | storecollapsemsg(repo, rbsrt.collapsemsg) |
|
1026 | storecollapsemsg(repo, rbsrt.collapsemsg) | |
1027 |
|
1027 | |||
1028 | tr = None |
|
1028 | tr = None | |
1029 |
|
1029 | |||
1030 | singletr = ui.configbool('rebase', 'singletransaction') |
|
1030 | singletr = ui.configbool('rebase', 'singletransaction') | |
1031 | if singletr: |
|
1031 | if singletr: | |
1032 | tr = repo.transaction('rebase') |
|
1032 | tr = repo.transaction('rebase') | |
1033 |
|
1033 | |||
1034 | # If `rebase.singletransaction` is enabled, wrap the entire operation in |
|
1034 | # If `rebase.singletransaction` is enabled, wrap the entire operation in | |
1035 | # one transaction here. Otherwise, transactions are obtained when |
|
1035 | # one transaction here. Otherwise, transactions are obtained when | |
1036 | # committing each node, which is slower but allows partial success. |
|
1036 | # committing each node, which is slower but allows partial success. | |
1037 | with util.acceptintervention(tr): |
|
1037 | with util.acceptintervention(tr): | |
1038 | # Same logic for the dirstate guard, except we don't create one when |
|
1038 | # Same logic for the dirstate guard, except we don't create one when | |
1039 | # rebasing in-memory (it's not needed). |
|
1039 | # rebasing in-memory (it's not needed). | |
1040 | dsguard = None |
|
1040 | dsguard = None | |
1041 | if singletr and not inmemory: |
|
1041 | if singletr and not inmemory: | |
1042 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') |
|
1042 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') | |
1043 | with util.acceptintervention(dsguard): |
|
1043 | with util.acceptintervention(dsguard): | |
1044 | rbsrt._performrebase(tr) |
|
1044 | rbsrt._performrebase(tr) | |
1045 | if not leaveunfinished: |
|
1045 | if not leaveunfinished: | |
1046 | rbsrt._finishrebase() |
|
1046 | rbsrt._finishrebase() | |
1047 |
|
1047 | |||
1048 | def _definedestmap(ui, repo, inmemory, destf=None, srcf=None, basef=None, |
|
1048 | def _definedestmap(ui, repo, inmemory, destf=None, srcf=None, basef=None, | |
1049 | revf=None, destspace=None): |
|
1049 | revf=None, destspace=None): | |
1050 | """use revisions argument to define destmap {srcrev: destrev}""" |
|
1050 | """use revisions argument to define destmap {srcrev: destrev}""" | |
1051 | if revf is None: |
|
1051 | if revf is None: | |
1052 | revf = [] |
|
1052 | revf = [] | |
1053 |
|
1053 | |||
1054 | # destspace is here to work around issues with `hg pull --rebase` see |
|
1054 | # destspace is here to work around issues with `hg pull --rebase` see | |
1055 | # issue5214 for details |
|
1055 | # issue5214 for details | |
1056 | if srcf and basef: |
|
1056 | if srcf and basef: | |
1057 | raise error.Abort(_('cannot specify both a source and a base')) |
|
1057 | raise error.Abort(_('cannot specify both a source and a base')) | |
1058 | if revf and basef: |
|
1058 | if revf and basef: | |
1059 | raise error.Abort(_('cannot specify both a revision and a base')) |
|
1059 | raise error.Abort(_('cannot specify both a revision and a base')) | |
1060 | if revf and srcf: |
|
1060 | if revf and srcf: | |
1061 | raise error.Abort(_('cannot specify both a revision and a source')) |
|
1061 | raise error.Abort(_('cannot specify both a revision and a source')) | |
1062 |
|
1062 | |||
1063 | if not inmemory: |
|
1063 | if not inmemory: | |
1064 | cmdutil.checkunfinished(repo) |
|
1064 | cmdutil.checkunfinished(repo) | |
1065 | cmdutil.bailifchanged(repo) |
|
1065 | cmdutil.bailifchanged(repo) | |
1066 |
|
1066 | |||
1067 | if ui.configbool('commands', 'rebase.requiredest') and not destf: |
|
1067 | if ui.configbool('commands', 'rebase.requiredest') and not destf: | |
1068 | raise error.Abort(_('you must specify a destination'), |
|
1068 | raise error.Abort(_('you must specify a destination'), | |
1069 | hint=_('use: hg rebase -d REV')) |
|
1069 | hint=_('use: hg rebase -d REV')) | |
1070 |
|
1070 | |||
1071 | dest = None |
|
1071 | dest = None | |
1072 |
|
1072 | |||
1073 | if revf: |
|
1073 | if revf: | |
1074 | rebaseset = scmutil.revrange(repo, revf) |
|
1074 | rebaseset = scmutil.revrange(repo, revf) | |
1075 | if not rebaseset: |
|
1075 | if not rebaseset: | |
1076 | ui.status(_('empty "rev" revision set - nothing to rebase\n')) |
|
1076 | ui.status(_('empty "rev" revision set - nothing to rebase\n')) | |
1077 | return None |
|
1077 | return None | |
1078 | elif srcf: |
|
1078 | elif srcf: | |
1079 | src = scmutil.revrange(repo, [srcf]) |
|
1079 | src = scmutil.revrange(repo, [srcf]) | |
1080 | if not src: |
|
1080 | if not src: | |
1081 | ui.status(_('empty "source" revision set - nothing to rebase\n')) |
|
1081 | ui.status(_('empty "source" revision set - nothing to rebase\n')) | |
1082 | return None |
|
1082 | return None | |
1083 | rebaseset = repo.revs('(%ld)::', src) |
|
1083 | rebaseset = repo.revs('(%ld)::', src) | |
1084 | assert rebaseset |
|
1084 | assert rebaseset | |
1085 | else: |
|
1085 | else: | |
1086 | base = scmutil.revrange(repo, [basef or '.']) |
|
1086 | base = scmutil.revrange(repo, [basef or '.']) | |
1087 | if not base: |
|
1087 | if not base: | |
1088 | ui.status(_('empty "base" revision set - ' |
|
1088 | ui.status(_('empty "base" revision set - ' | |
1089 | "can't compute rebase set\n")) |
|
1089 | "can't compute rebase set\n")) | |
1090 | return None |
|
1090 | return None | |
1091 | if destf: |
|
1091 | if destf: | |
1092 | # --base does not support multiple destinations |
|
1092 | # --base does not support multiple destinations | |
1093 | dest = scmutil.revsingle(repo, destf) |
|
1093 | dest = scmutil.revsingle(repo, destf) | |
1094 | else: |
|
1094 | else: | |
1095 | dest = repo[_destrebase(repo, base, destspace=destspace)] |
|
1095 | dest = repo[_destrebase(repo, base, destspace=destspace)] | |
1096 | destf = bytes(dest) |
|
1096 | destf = bytes(dest) | |
1097 |
|
1097 | |||
1098 | roots = [] # selected children of branching points |
|
1098 | roots = [] # selected children of branching points | |
1099 | bpbase = {} # {branchingpoint: [origbase]} |
|
1099 | bpbase = {} # {branchingpoint: [origbase]} | |
1100 | for b in base: # group bases by branching points |
|
1100 | for b in base: # group bases by branching points | |
1101 | bp = repo.revs('ancestor(%d, %d)', b, dest.rev()).first() |
|
1101 | bp = repo.revs('ancestor(%d, %d)', b, dest.rev()).first() | |
1102 | bpbase[bp] = bpbase.get(bp, []) + [b] |
|
1102 | bpbase[bp] = bpbase.get(bp, []) + [b] | |
1103 | if None in bpbase: |
|
1103 | if None in bpbase: | |
1104 | # emulate the old behavior, showing "nothing to rebase" (a better |
|
1104 | # emulate the old behavior, showing "nothing to rebase" (a better | |
1105 | # behavior may be abort with "cannot find branching point" error) |
|
1105 | # behavior may be abort with "cannot find branching point" error) | |
1106 | bpbase.clear() |
|
1106 | bpbase.clear() | |
1107 | for bp, bs in bpbase.iteritems(): # calculate roots |
|
1107 | for bp, bs in bpbase.iteritems(): # calculate roots | |
1108 | roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs)) |
|
1108 | roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs)) | |
1109 |
|
1109 | |||
1110 | rebaseset = repo.revs('%ld::', roots) |
|
1110 | rebaseset = repo.revs('%ld::', roots) | |
1111 |
|
1111 | |||
1112 | if not rebaseset: |
|
1112 | if not rebaseset: | |
1113 | # transform to list because smartsets are not comparable to |
|
1113 | # transform to list because smartsets are not comparable to | |
1114 | # lists. This should be improved to honor laziness of |
|
1114 | # lists. This should be improved to honor laziness of | |
1115 | # smartset. |
|
1115 | # smartset. | |
1116 | if list(base) == [dest.rev()]: |
|
1116 | if list(base) == [dest.rev()]: | |
1117 | if basef: |
|
1117 | if basef: | |
1118 | ui.status(_('nothing to rebase - %s is both "base"' |
|
1118 | ui.status(_('nothing to rebase - %s is both "base"' | |
1119 | ' and destination\n') % dest) |
|
1119 | ' and destination\n') % dest) | |
1120 | else: |
|
1120 | else: | |
1121 | ui.status(_('nothing to rebase - working directory ' |
|
1121 | ui.status(_('nothing to rebase - working directory ' | |
1122 | 'parent is also destination\n')) |
|
1122 | 'parent is also destination\n')) | |
1123 | elif not repo.revs('%ld - ::%d', base, dest.rev()): |
|
1123 | elif not repo.revs('%ld - ::%d', base, dest.rev()): | |
1124 | if basef: |
|
1124 | if basef: | |
1125 | ui.status(_('nothing to rebase - "base" %s is ' |
|
1125 | ui.status(_('nothing to rebase - "base" %s is ' | |
1126 | 'already an ancestor of destination ' |
|
1126 | 'already an ancestor of destination ' | |
1127 | '%s\n') % |
|
1127 | '%s\n') % | |
1128 | ('+'.join(bytes(repo[r]) for r in base), |
|
1128 | ('+'.join(bytes(repo[r]) for r in base), | |
1129 | dest)) |
|
1129 | dest)) | |
1130 | else: |
|
1130 | else: | |
1131 | ui.status(_('nothing to rebase - working ' |
|
1131 | ui.status(_('nothing to rebase - working ' | |
1132 | 'directory parent is already an ' |
|
1132 | 'directory parent is already an ' | |
1133 | 'ancestor of destination %s\n') % dest) |
|
1133 | 'ancestor of destination %s\n') % dest) | |
1134 | else: # can it happen? |
|
1134 | else: # can it happen? | |
1135 | ui.status(_('nothing to rebase from %s to %s\n') % |
|
1135 | ui.status(_('nothing to rebase from %s to %s\n') % | |
1136 | ('+'.join(bytes(repo[r]) for r in base), dest)) |
|
1136 | ('+'.join(bytes(repo[r]) for r in base), dest)) | |
1137 | return None |
|
1137 | return None | |
1138 |
|
1138 | |||
1139 | rebasingwcp = repo['.'].rev() in rebaseset |
|
1139 | rebasingwcp = repo['.'].rev() in rebaseset | |
1140 | ui.log("rebase", "rebasing working copy parent: %r\n", rebasingwcp, |
|
1140 | ui.log("rebase", "rebasing working copy parent: %r\n", rebasingwcp, | |
1141 | rebase_rebasing_wcp=rebasingwcp) |
|
1141 | rebase_rebasing_wcp=rebasingwcp) | |
1142 | if inmemory and rebasingwcp: |
|
1142 | if inmemory and rebasingwcp: | |
1143 | # Check these since we did not before. |
|
1143 | # Check these since we did not before. | |
1144 | cmdutil.checkunfinished(repo) |
|
1144 | cmdutil.checkunfinished(repo) | |
1145 | cmdutil.bailifchanged(repo) |
|
1145 | cmdutil.bailifchanged(repo) | |
1146 |
|
1146 | |||
1147 | if not destf: |
|
1147 | if not destf: | |
1148 | dest = repo[_destrebase(repo, rebaseset, destspace=destspace)] |
|
1148 | dest = repo[_destrebase(repo, rebaseset, destspace=destspace)] | |
1149 | destf = bytes(dest) |
|
1149 | destf = bytes(dest) | |
1150 |
|
1150 | |||
1151 | allsrc = revsetlang.formatspec('%ld', rebaseset) |
|
1151 | allsrc = revsetlang.formatspec('%ld', rebaseset) | |
1152 | alias = {'ALLSRC': allsrc} |
|
1152 | alias = {'ALLSRC': allsrc} | |
1153 |
|
1153 | |||
1154 | if dest is None: |
|
1154 | if dest is None: | |
1155 | try: |
|
1155 | try: | |
1156 | # fast path: try to resolve dest without SRC alias |
|
1156 | # fast path: try to resolve dest without SRC alias | |
1157 | dest = scmutil.revsingle(repo, destf, localalias=alias) |
|
1157 | dest = scmutil.revsingle(repo, destf, localalias=alias) | |
1158 | except error.RepoLookupError: |
|
1158 | except error.RepoLookupError: | |
1159 | # multi-dest path: resolve dest for each SRC separately |
|
1159 | # multi-dest path: resolve dest for each SRC separately | |
1160 | destmap = {} |
|
1160 | destmap = {} | |
1161 | for r in rebaseset: |
|
1161 | for r in rebaseset: | |
1162 | alias['SRC'] = revsetlang.formatspec('%d', r) |
|
1162 | alias['SRC'] = revsetlang.formatspec('%d', r) | |
1163 | # use repo.anyrevs instead of scmutil.revsingle because we |
|
1163 | # use repo.anyrevs instead of scmutil.revsingle because we | |
1164 | # don't want to abort if destset is empty. |
|
1164 | # don't want to abort if destset is empty. | |
1165 | destset = repo.anyrevs([destf], user=True, localalias=alias) |
|
1165 | destset = repo.anyrevs([destf], user=True, localalias=alias) | |
1166 | size = len(destset) |
|
1166 | size = len(destset) | |
1167 | if size == 1: |
|
1167 | if size == 1: | |
1168 | destmap[r] = destset.first() |
|
1168 | destmap[r] = destset.first() | |
1169 | elif size == 0: |
|
1169 | elif size == 0: | |
1170 | ui.note(_('skipping %s - empty destination\n') % repo[r]) |
|
1170 | ui.note(_('skipping %s - empty destination\n') % repo[r]) | |
1171 | else: |
|
1171 | else: | |
1172 | raise error.Abort(_('rebase destination for %s is not ' |
|
1172 | raise error.Abort(_('rebase destination for %s is not ' | |
1173 | 'unique') % repo[r]) |
|
1173 | 'unique') % repo[r]) | |
1174 |
|
1174 | |||
1175 | if dest is not None: |
|
1175 | if dest is not None: | |
1176 | # single-dest case: assign dest to each rev in rebaseset |
|
1176 | # single-dest case: assign dest to each rev in rebaseset | |
1177 | destrev = dest.rev() |
|
1177 | destrev = dest.rev() | |
1178 | destmap = {r: destrev for r in rebaseset} # {srcrev: destrev} |
|
1178 | destmap = {r: destrev for r in rebaseset} # {srcrev: destrev} | |
1179 |
|
1179 | |||
1180 | if not destmap: |
|
1180 | if not destmap: | |
1181 | ui.status(_('nothing to rebase - empty destination\n')) |
|
1181 | ui.status(_('nothing to rebase - empty destination\n')) | |
1182 | return None |
|
1182 | return None | |
1183 |
|
1183 | |||
1184 | return destmap |
|
1184 | return destmap | |
1185 |
|
1185 | |||
1186 | def externalparent(repo, state, destancestors): |
|
1186 | def externalparent(repo, state, destancestors): | |
1187 | """Return the revision that should be used as the second parent |
|
1187 | """Return the revision that should be used as the second parent | |
1188 | when the revisions in state is collapsed on top of destancestors. |
|
1188 | when the revisions in state is collapsed on top of destancestors. | |
1189 | Abort if there is more than one parent. |
|
1189 | Abort if there is more than one parent. | |
1190 | """ |
|
1190 | """ | |
1191 | parents = set() |
|
1191 | parents = set() | |
1192 | source = min(state) |
|
1192 | source = min(state) | |
1193 | for rev in state: |
|
1193 | for rev in state: | |
1194 | if rev == source: |
|
1194 | if rev == source: | |
1195 | continue |
|
1195 | continue | |
1196 | for p in repo[rev].parents(): |
|
1196 | for p in repo[rev].parents(): | |
1197 | if (p.rev() not in state |
|
1197 | if (p.rev() not in state | |
1198 | and p.rev() not in destancestors): |
|
1198 | and p.rev() not in destancestors): | |
1199 | parents.add(p.rev()) |
|
1199 | parents.add(p.rev()) | |
1200 | if not parents: |
|
1200 | if not parents: | |
1201 | return nullrev |
|
1201 | return nullrev | |
1202 | if len(parents) == 1: |
|
1202 | if len(parents) == 1: | |
1203 | return parents.pop() |
|
1203 | return parents.pop() | |
1204 | raise error.Abort(_('unable to collapse on top of %d, there is more ' |
|
1204 | raise error.Abort(_('unable to collapse on top of %d, there is more ' | |
1205 | 'than one external parent: %s') % |
|
1205 | 'than one external parent: %s') % | |
1206 | (max(destancestors), |
|
1206 | (max(destancestors), | |
1207 | ', '.join("%d" % p for p in sorted(parents)))) |
|
1207 | ', '.join("%d" % p for p in sorted(parents)))) | |
1208 |
|
1208 | |||
1209 | def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg): |
|
1209 | def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg): | |
1210 | '''Commit the memory changes with parents p1 and p2. |
|
1210 | '''Commit the memory changes with parents p1 and p2. | |
1211 | Return node of committed revision.''' |
|
1211 | Return node of committed revision.''' | |
1212 | # Replicates the empty check in ``repo.commit``. |
|
1212 | # Replicates the empty check in ``repo.commit``. | |
1213 | if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'): |
|
1213 | if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'): | |
1214 | return None |
|
1214 | return None | |
1215 |
|
1215 | |||
1216 | # By convention, ``extra['branch']`` (set by extrafn) clobbers |
|
1216 | # By convention, ``extra['branch']`` (set by extrafn) clobbers | |
1217 | # ``branch`` (used when passing ``--keepbranches``). |
|
1217 | # ``branch`` (used when passing ``--keepbranches``). | |
1218 | branch = repo[p1].branch() |
|
1218 | branch = repo[p1].branch() | |
1219 | if 'branch' in extra: |
|
1219 | if 'branch' in extra: | |
1220 | branch = extra['branch'] |
|
1220 | branch = extra['branch'] | |
1221 |
|
1221 | |||
1222 | memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date, |
|
1222 | memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date, | |
1223 | extra=extra, user=user, branch=branch, editor=editor) |
|
1223 | extra=extra, user=user, branch=branch, editor=editor) | |
1224 | commitres = repo.commitctx(memctx) |
|
1224 | commitres = repo.commitctx(memctx) | |
1225 | wctx.clean() # Might be reused |
|
1225 | wctx.clean() # Might be reused | |
1226 | return commitres |
|
1226 | return commitres | |
1227 |
|
1227 | |||
1228 | def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg): |
|
1228 | def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg): | |
1229 | '''Commit the wd changes with parents p1 and p2. |
|
1229 | '''Commit the wd changes with parents p1 and p2. | |
1230 | Return node of committed revision.''' |
|
1230 | Return node of committed revision.''' | |
1231 | dsguard = util.nullcontextmanager() |
|
1231 | dsguard = util.nullcontextmanager() | |
1232 | if not repo.ui.configbool('rebase', 'singletransaction'): |
|
1232 | if not repo.ui.configbool('rebase', 'singletransaction'): | |
1233 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') |
|
1233 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') | |
1234 | with dsguard: |
|
1234 | with dsguard: | |
1235 | repo.setparents(repo[p1].node(), repo[p2].node()) |
|
1235 | repo.setparents(repo[p1].node(), repo[p2].node()) | |
1236 |
|
1236 | |||
1237 | # Commit might fail if unresolved files exist |
|
1237 | # Commit might fail if unresolved files exist | |
1238 | newnode = repo.commit(text=commitmsg, user=user, date=date, |
|
1238 | newnode = repo.commit(text=commitmsg, user=user, date=date, | |
1239 | extra=extra, editor=editor) |
|
1239 | extra=extra, editor=editor) | |
1240 |
|
1240 | |||
1241 | repo.dirstate.setbranch(repo[newnode].branch()) |
|
1241 | repo.dirstate.setbranch(repo[newnode].branch()) | |
1242 | return newnode |
|
1242 | return newnode | |
1243 |
|
1243 | |||
1244 | def rebasenode(repo, rev, p1, base, collapse, dest, wctx): |
|
1244 | def rebasenode(repo, rev, p1, base, collapse, dest, wctx): | |
1245 | 'Rebase a single revision rev on top of p1 using base as merge ancestor' |
|
1245 | 'Rebase a single revision rev on top of p1 using base as merge ancestor' | |
1246 | # Merge phase |
|
1246 | # Merge phase | |
1247 | # Update to destination and merge it with local |
|
1247 | # Update to destination and merge it with local | |
1248 | if wctx.isinmemory(): |
|
1248 | if wctx.isinmemory(): | |
1249 | wctx.setbase(repo[p1]) |
|
1249 | wctx.setbase(repo[p1]) | |
1250 | else: |
|
1250 | else: | |
1251 | if repo['.'].rev() != p1: |
|
1251 | if repo['.'].rev() != p1: | |
1252 | repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1])) |
|
1252 | repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1])) | |
1253 | mergemod.update(repo, p1, branchmerge=False, force=True) |
|
1253 | mergemod.update(repo, p1, branchmerge=False, force=True) | |
1254 | else: |
|
1254 | else: | |
1255 | repo.ui.debug(" already in destination\n") |
|
1255 | repo.ui.debug(" already in destination\n") | |
1256 | # This is, alas, necessary to invalidate workingctx's manifest cache, |
|
1256 | # This is, alas, necessary to invalidate workingctx's manifest cache, | |
1257 | # as well as other data we litter on it in other places. |
|
1257 | # as well as other data we litter on it in other places. | |
1258 | wctx = repo[None] |
|
1258 | wctx = repo[None] | |
1259 | repo.dirstate.write(repo.currenttransaction()) |
|
1259 | repo.dirstate.write(repo.currenttransaction()) | |
1260 | repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev])) |
|
1260 | repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev])) | |
1261 | if base is not None: |
|
1261 | if base is not None: | |
1262 | repo.ui.debug(" detach base %d:%s\n" % (base, repo[base])) |
|
1262 | repo.ui.debug(" detach base %d:%s\n" % (base, repo[base])) | |
1263 | # When collapsing in-place, the parent is the common ancestor, we |
|
1263 | # When collapsing in-place, the parent is the common ancestor, we | |
1264 | # have to allow merging with it. |
|
1264 | # have to allow merging with it. | |
1265 | stats = mergemod.update(repo, rev, branchmerge=True, force=True, |
|
1265 | stats = mergemod.update(repo, rev, branchmerge=True, force=True, | |
1266 | ancestor=base, mergeancestor=collapse, |
|
1266 | ancestor=base, mergeancestor=collapse, | |
1267 | labels=['dest', 'source'], wc=wctx) |
|
1267 | labels=['dest', 'source'], wc=wctx) | |
1268 | if collapse: |
|
1268 | if collapse: | |
1269 | copies.duplicatecopies(repo, wctx, rev, dest) |
|
1269 | copies.duplicatecopies(repo, wctx, rev, dest) | |
1270 | else: |
|
1270 | else: | |
1271 | # If we're not using --collapse, we need to |
|
1271 | # If we're not using --collapse, we need to | |
1272 | # duplicate copies between the revision we're |
|
1272 | # duplicate copies between the revision we're | |
1273 | # rebasing and its first parent, but *not* |
|
1273 | # rebasing and its first parent, but *not* | |
1274 | # duplicate any copies that have already been |
|
1274 | # duplicate any copies that have already been | |
1275 | # performed in the destination. |
|
1275 | # performed in the destination. | |
1276 | p1rev = repo[rev].p1().rev() |
|
1276 | p1rev = repo[rev].p1().rev() | |
1277 | copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest) |
|
1277 | copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest) | |
1278 | return stats |
|
1278 | return stats | |
1279 |
|
1279 | |||
1280 | def adjustdest(repo, rev, destmap, state, skipped): |
|
1280 | def adjustdest(repo, rev, destmap, state, skipped): | |
1281 | """adjust rebase destination given the current rebase state |
|
1281 | r"""adjust rebase destination given the current rebase state | |
1282 |
|
1282 | |||
1283 | rev is what is being rebased. Return a list of two revs, which are the |
|
1283 | rev is what is being rebased. Return a list of two revs, which are the | |
1284 | adjusted destinations for rev's p1 and p2, respectively. If a parent is |
|
1284 | adjusted destinations for rev's p1 and p2, respectively. If a parent is | |
1285 | nullrev, return dest without adjustment for it. |
|
1285 | nullrev, return dest without adjustment for it. | |
1286 |
|
1286 | |||
1287 | For example, when doing rebasing B+E to F, C to G, rebase will first move B |
|
1287 | For example, when doing rebasing B+E to F, C to G, rebase will first move B | |
1288 | to B1, and E's destination will be adjusted from F to B1. |
|
1288 | to B1, and E's destination will be adjusted from F to B1. | |
1289 |
|
1289 | |||
1290 | B1 <- written during rebasing B |
|
1290 | B1 <- written during rebasing B | |
1291 | | |
|
1291 | | | |
1292 | F <- original destination of B, E |
|
1292 | F <- original destination of B, E | |
1293 | | |
|
1293 | | | |
1294 | | E <- rev, which is being rebased |
|
1294 | | E <- rev, which is being rebased | |
1295 | | | |
|
1295 | | | | |
1296 | | D <- prev, one parent of rev being checked |
|
1296 | | D <- prev, one parent of rev being checked | |
1297 | | | |
|
1297 | | | | |
1298 | | x <- skipped, ex. no successor or successor in (::dest) |
|
1298 | | x <- skipped, ex. no successor or successor in (::dest) | |
1299 | | | |
|
1299 | | | | |
1300 | | C <- rebased as C', different destination |
|
1300 | | C <- rebased as C', different destination | |
1301 | | | |
|
1301 | | | | |
1302 | | B <- rebased as B1 C' |
|
1302 | | B <- rebased as B1 C' | |
1303 | |/ | |
|
1303 | |/ | | |
1304 | A G <- destination of C, different |
|
1304 | A G <- destination of C, different | |
1305 |
|
1305 | |||
1306 | Another example about merge changeset, rebase -r C+G+H -d K, rebase will |
|
1306 | Another example about merge changeset, rebase -r C+G+H -d K, rebase will | |
1307 | first move C to C1, G to G1, and when it's checking H, the adjusted |
|
1307 | first move C to C1, G to G1, and when it's checking H, the adjusted | |
1308 | destinations will be [C1, G1]. |
|
1308 | destinations will be [C1, G1]. | |
1309 |
|
1309 | |||
1310 | H C1 G1 |
|
1310 | H C1 G1 | |
1311 | /| | / |
|
1311 | /| | / | |
1312 | F G |/ |
|
1312 | F G |/ | |
1313 | K | | -> K |
|
1313 | K | | -> K | |
1314 | | C D | |
|
1314 | | C D | | |
1315 | | |/ | |
|
1315 | | |/ | | |
1316 | | B | ... |
|
1316 | | B | ... | |
1317 | |/ |/ |
|
1317 | |/ |/ | |
1318 | A A |
|
1318 | A A | |
1319 |
|
1319 | |||
1320 | Besides, adjust dest according to existing rebase information. For example, |
|
1320 | Besides, adjust dest according to existing rebase information. For example, | |
1321 |
|
1321 | |||
1322 | B C D B needs to be rebased on top of C, C needs to be rebased on top |
|
1322 | B C D B needs to be rebased on top of C, C needs to be rebased on top | |
1323 | \|/ of D. We will rebase C first. |
|
1323 | \|/ of D. We will rebase C first. | |
1324 | A |
|
1324 | A | |
1325 |
|
1325 | |||
1326 | C' After rebasing C, when considering B's destination, use C' |
|
1326 | C' After rebasing C, when considering B's destination, use C' | |
1327 | | instead of the original C. |
|
1327 | | instead of the original C. | |
1328 | B D |
|
1328 | B D | |
1329 | \ / |
|
1329 | \ / | |
1330 | A |
|
1330 | A | |
1331 | """ |
|
1331 | """ | |
1332 | # pick already rebased revs with same dest from state as interesting source |
|
1332 | # pick already rebased revs with same dest from state as interesting source | |
1333 | dest = destmap[rev] |
|
1333 | dest = destmap[rev] | |
1334 | source = [s for s, d in state.items() |
|
1334 | source = [s for s, d in state.items() | |
1335 | if d > 0 and destmap[s] == dest and s not in skipped] |
|
1335 | if d > 0 and destmap[s] == dest and s not in skipped] | |
1336 |
|
1336 | |||
1337 | result = [] |
|
1337 | result = [] | |
1338 | for prev in repo.changelog.parentrevs(rev): |
|
1338 | for prev in repo.changelog.parentrevs(rev): | |
1339 | adjusted = dest |
|
1339 | adjusted = dest | |
1340 | if prev != nullrev: |
|
1340 | if prev != nullrev: | |
1341 | candidate = repo.revs('max(%ld and (::%d))', source, prev).first() |
|
1341 | candidate = repo.revs('max(%ld and (::%d))', source, prev).first() | |
1342 | if candidate is not None: |
|
1342 | if candidate is not None: | |
1343 | adjusted = state[candidate] |
|
1343 | adjusted = state[candidate] | |
1344 | if adjusted == dest and dest in state: |
|
1344 | if adjusted == dest and dest in state: | |
1345 | adjusted = state[dest] |
|
1345 | adjusted = state[dest] | |
1346 | if adjusted == revtodo: |
|
1346 | if adjusted == revtodo: | |
1347 | # sortsource should produce an order that makes this impossible |
|
1347 | # sortsource should produce an order that makes this impossible | |
1348 | raise error.ProgrammingError( |
|
1348 | raise error.ProgrammingError( | |
1349 | 'rev %d should be rebased already at this time' % dest) |
|
1349 | 'rev %d should be rebased already at this time' % dest) | |
1350 | result.append(adjusted) |
|
1350 | result.append(adjusted) | |
1351 | return result |
|
1351 | return result | |
1352 |
|
1352 | |||
1353 | def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped): |
|
1353 | def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped): | |
1354 | """ |
|
1354 | """ | |
1355 | Abort if rebase will create divergence or rebase is noop because of markers |
|
1355 | Abort if rebase will create divergence or rebase is noop because of markers | |
1356 |
|
1356 | |||
1357 | `rebaseobsrevs`: set of obsolete revision in source |
|
1357 | `rebaseobsrevs`: set of obsolete revision in source | |
1358 | `rebaseobsskipped`: set of revisions from source skipped because they have |
|
1358 | `rebaseobsskipped`: set of revisions from source skipped because they have | |
1359 | successors in destination or no non-obsolete successor. |
|
1359 | successors in destination or no non-obsolete successor. | |
1360 | """ |
|
1360 | """ | |
1361 | # Obsolete node with successors not in dest leads to divergence |
|
1361 | # Obsolete node with successors not in dest leads to divergence | |
1362 | divergenceok = ui.configbool('experimental', |
|
1362 | divergenceok = ui.configbool('experimental', | |
1363 | 'evolution.allowdivergence') |
|
1363 | 'evolution.allowdivergence') | |
1364 | divergencebasecandidates = rebaseobsrevs - rebaseobsskipped |
|
1364 | divergencebasecandidates = rebaseobsrevs - rebaseobsskipped | |
1365 |
|
1365 | |||
1366 | if divergencebasecandidates and not divergenceok: |
|
1366 | if divergencebasecandidates and not divergenceok: | |
1367 | divhashes = (bytes(repo[r]) |
|
1367 | divhashes = (bytes(repo[r]) | |
1368 | for r in divergencebasecandidates) |
|
1368 | for r in divergencebasecandidates) | |
1369 | msg = _("this rebase will cause " |
|
1369 | msg = _("this rebase will cause " | |
1370 | "divergences from: %s") |
|
1370 | "divergences from: %s") | |
1371 | h = _("to force the rebase please set " |
|
1371 | h = _("to force the rebase please set " | |
1372 | "experimental.evolution.allowdivergence=True") |
|
1372 | "experimental.evolution.allowdivergence=True") | |
1373 | raise error.Abort(msg % (",".join(divhashes),), hint=h) |
|
1373 | raise error.Abort(msg % (",".join(divhashes),), hint=h) | |
1374 |
|
1374 | |||
1375 | def successorrevs(unfi, rev): |
|
1375 | def successorrevs(unfi, rev): | |
1376 | """yield revision numbers for successors of rev""" |
|
1376 | """yield revision numbers for successors of rev""" | |
1377 | assert unfi.filtername is None |
|
1377 | assert unfi.filtername is None | |
1378 | nodemap = unfi.changelog.nodemap |
|
1378 | nodemap = unfi.changelog.nodemap | |
1379 | for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]): |
|
1379 | for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]): | |
1380 | if s in nodemap: |
|
1380 | if s in nodemap: | |
1381 | yield nodemap[s] |
|
1381 | yield nodemap[s] | |
1382 |
|
1382 | |||
1383 | def defineparents(repo, rev, destmap, state, skipped, obsskipped): |
|
1383 | def defineparents(repo, rev, destmap, state, skipped, obsskipped): | |
1384 | """Return new parents and optionally a merge base for rev being rebased |
|
1384 | """Return new parents and optionally a merge base for rev being rebased | |
1385 |
|
1385 | |||
1386 | The destination specified by "dest" cannot always be used directly because |
|
1386 | The destination specified by "dest" cannot always be used directly because | |
1387 | previously rebase result could affect destination. For example, |
|
1387 | previously rebase result could affect destination. For example, | |
1388 |
|
1388 | |||
1389 | D E rebase -r C+D+E -d B |
|
1389 | D E rebase -r C+D+E -d B | |
1390 | |/ C will be rebased to C' |
|
1390 | |/ C will be rebased to C' | |
1391 | B C D's new destination will be C' instead of B |
|
1391 | B C D's new destination will be C' instead of B | |
1392 | |/ E's new destination will be C' instead of B |
|
1392 | |/ E's new destination will be C' instead of B | |
1393 | A |
|
1393 | A | |
1394 |
|
1394 | |||
1395 | The new parents of a merge is slightly more complicated. See the comment |
|
1395 | The new parents of a merge is slightly more complicated. See the comment | |
1396 | block below. |
|
1396 | block below. | |
1397 | """ |
|
1397 | """ | |
1398 | # use unfiltered changelog since successorrevs may return filtered nodes |
|
1398 | # use unfiltered changelog since successorrevs may return filtered nodes | |
1399 | assert repo.filtername is None |
|
1399 | assert repo.filtername is None | |
1400 | cl = repo.changelog |
|
1400 | cl = repo.changelog | |
1401 | isancestor = cl.isancestorrev |
|
1401 | isancestor = cl.isancestorrev | |
1402 |
|
1402 | |||
1403 | dest = destmap[rev] |
|
1403 | dest = destmap[rev] | |
1404 | oldps = repo.changelog.parentrevs(rev) # old parents |
|
1404 | oldps = repo.changelog.parentrevs(rev) # old parents | |
1405 | newps = [nullrev, nullrev] # new parents |
|
1405 | newps = [nullrev, nullrev] # new parents | |
1406 | dests = adjustdest(repo, rev, destmap, state, skipped) |
|
1406 | dests = adjustdest(repo, rev, destmap, state, skipped) | |
1407 | bases = list(oldps) # merge base candidates, initially just old parents |
|
1407 | bases = list(oldps) # merge base candidates, initially just old parents | |
1408 |
|
1408 | |||
1409 | if all(r == nullrev for r in oldps[1:]): |
|
1409 | if all(r == nullrev for r in oldps[1:]): | |
1410 | # For non-merge changeset, just move p to adjusted dest as requested. |
|
1410 | # For non-merge changeset, just move p to adjusted dest as requested. | |
1411 | newps[0] = dests[0] |
|
1411 | newps[0] = dests[0] | |
1412 | else: |
|
1412 | else: | |
1413 | # For merge changeset, if we move p to dests[i] unconditionally, both |
|
1413 | # For merge changeset, if we move p to dests[i] unconditionally, both | |
1414 | # parents may change and the end result looks like "the merge loses a |
|
1414 | # parents may change and the end result looks like "the merge loses a | |
1415 | # parent", which is a surprise. This is a limit because "--dest" only |
|
1415 | # parent", which is a surprise. This is a limit because "--dest" only | |
1416 | # accepts one dest per src. |
|
1416 | # accepts one dest per src. | |
1417 | # |
|
1417 | # | |
1418 | # Therefore, only move p with reasonable conditions (in this order): |
|
1418 | # Therefore, only move p with reasonable conditions (in this order): | |
1419 | # 1. use dest, if dest is a descendent of (p or one of p's successors) |
|
1419 | # 1. use dest, if dest is a descendent of (p or one of p's successors) | |
1420 | # 2. use p's rebased result, if p is rebased (state[p] > 0) |
|
1420 | # 2. use p's rebased result, if p is rebased (state[p] > 0) | |
1421 | # |
|
1421 | # | |
1422 | # Comparing with adjustdest, the logic here does some additional work: |
|
1422 | # Comparing with adjustdest, the logic here does some additional work: | |
1423 | # 1. decide which parents will not be moved towards dest |
|
1423 | # 1. decide which parents will not be moved towards dest | |
1424 | # 2. if the above decision is "no", should a parent still be moved |
|
1424 | # 2. if the above decision is "no", should a parent still be moved | |
1425 | # because it was rebased? |
|
1425 | # because it was rebased? | |
1426 | # |
|
1426 | # | |
1427 | # For example: |
|
1427 | # For example: | |
1428 | # |
|
1428 | # | |
1429 | # C # "rebase -r C -d D" is an error since none of the parents |
|
1429 | # C # "rebase -r C -d D" is an error since none of the parents | |
1430 | # /| # can be moved. "rebase -r B+C -d D" will move C's parent |
|
1430 | # /| # can be moved. "rebase -r B+C -d D" will move C's parent | |
1431 | # A B D # B (using rule "2."), since B will be rebased. |
|
1431 | # A B D # B (using rule "2."), since B will be rebased. | |
1432 | # |
|
1432 | # | |
1433 | # The loop tries to be not rely on the fact that a Mercurial node has |
|
1433 | # The loop tries to be not rely on the fact that a Mercurial node has | |
1434 | # at most 2 parents. |
|
1434 | # at most 2 parents. | |
1435 | for i, p in enumerate(oldps): |
|
1435 | for i, p in enumerate(oldps): | |
1436 | np = p # new parent |
|
1436 | np = p # new parent | |
1437 | if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)): |
|
1437 | if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)): | |
1438 | np = dests[i] |
|
1438 | np = dests[i] | |
1439 | elif p in state and state[p] > 0: |
|
1439 | elif p in state and state[p] > 0: | |
1440 | np = state[p] |
|
1440 | np = state[p] | |
1441 |
|
1441 | |||
1442 | # "bases" only record "special" merge bases that cannot be |
|
1442 | # "bases" only record "special" merge bases that cannot be | |
1443 | # calculated from changelog DAG (i.e. isancestor(p, np) is False). |
|
1443 | # calculated from changelog DAG (i.e. isancestor(p, np) is False). | |
1444 | # For example: |
|
1444 | # For example: | |
1445 | # |
|
1445 | # | |
1446 | # B' # rebase -s B -d D, when B was rebased to B'. dest for C |
|
1446 | # B' # rebase -s B -d D, when B was rebased to B'. dest for C | |
1447 | # | C # is B', but merge base for C is B, instead of |
|
1447 | # | C # is B', but merge base for C is B, instead of | |
1448 | # D | # changelog.ancestor(C, B') == A. If changelog DAG and |
|
1448 | # D | # changelog.ancestor(C, B') == A. If changelog DAG and | |
1449 | # | B # "state" edges are merged (so there will be an edge from |
|
1449 | # | B # "state" edges are merged (so there will be an edge from | |
1450 | # |/ # B to B'), the merge base is still ancestor(C, B') in |
|
1450 | # |/ # B to B'), the merge base is still ancestor(C, B') in | |
1451 | # A # the merged graph. |
|
1451 | # A # the merged graph. | |
1452 | # |
|
1452 | # | |
1453 | # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8 |
|
1453 | # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8 | |
1454 | # which uses "virtual null merge" to explain this situation. |
|
1454 | # which uses "virtual null merge" to explain this situation. | |
1455 | if isancestor(p, np): |
|
1455 | if isancestor(p, np): | |
1456 | bases[i] = nullrev |
|
1456 | bases[i] = nullrev | |
1457 |
|
1457 | |||
1458 | # If one parent becomes an ancestor of the other, drop the ancestor |
|
1458 | # If one parent becomes an ancestor of the other, drop the ancestor | |
1459 | for j, x in enumerate(newps[:i]): |
|
1459 | for j, x in enumerate(newps[:i]): | |
1460 | if x == nullrev: |
|
1460 | if x == nullrev: | |
1461 | continue |
|
1461 | continue | |
1462 | if isancestor(np, x): # CASE-1 |
|
1462 | if isancestor(np, x): # CASE-1 | |
1463 | np = nullrev |
|
1463 | np = nullrev | |
1464 | elif isancestor(x, np): # CASE-2 |
|
1464 | elif isancestor(x, np): # CASE-2 | |
1465 | newps[j] = np |
|
1465 | newps[j] = np | |
1466 | np = nullrev |
|
1466 | np = nullrev | |
1467 | # New parents forming an ancestor relationship does not |
|
1467 | # New parents forming an ancestor relationship does not | |
1468 | # mean the old parents have a similar relationship. Do not |
|
1468 | # mean the old parents have a similar relationship. Do not | |
1469 | # set bases[x] to nullrev. |
|
1469 | # set bases[x] to nullrev. | |
1470 | bases[j], bases[i] = bases[i], bases[j] |
|
1470 | bases[j], bases[i] = bases[i], bases[j] | |
1471 |
|
1471 | |||
1472 | newps[i] = np |
|
1472 | newps[i] = np | |
1473 |
|
1473 | |||
1474 | # "rebasenode" updates to new p1, and the old p1 will be used as merge |
|
1474 | # "rebasenode" updates to new p1, and the old p1 will be used as merge | |
1475 | # base. If only p2 changes, merging using unchanged p1 as merge base is |
|
1475 | # base. If only p2 changes, merging using unchanged p1 as merge base is | |
1476 | # suboptimal. Therefore swap parents to make the merge sane. |
|
1476 | # suboptimal. Therefore swap parents to make the merge sane. | |
1477 | if newps[1] != nullrev and oldps[0] == newps[0]: |
|
1477 | if newps[1] != nullrev and oldps[0] == newps[0]: | |
1478 | assert len(newps) == 2 and len(oldps) == 2 |
|
1478 | assert len(newps) == 2 and len(oldps) == 2 | |
1479 | newps.reverse() |
|
1479 | newps.reverse() | |
1480 | bases.reverse() |
|
1480 | bases.reverse() | |
1481 |
|
1481 | |||
1482 | # No parent change might be an error because we fail to make rev a |
|
1482 | # No parent change might be an error because we fail to make rev a | |
1483 | # descendent of requested dest. This can happen, for example: |
|
1483 | # descendent of requested dest. This can happen, for example: | |
1484 | # |
|
1484 | # | |
1485 | # C # rebase -r C -d D |
|
1485 | # C # rebase -r C -d D | |
1486 | # /| # None of A and B will be changed to D and rebase fails. |
|
1486 | # /| # None of A and B will be changed to D and rebase fails. | |
1487 | # A B D |
|
1487 | # A B D | |
1488 | if set(newps) == set(oldps) and dest not in newps: |
|
1488 | if set(newps) == set(oldps) and dest not in newps: | |
1489 | raise error.Abort(_('cannot rebase %d:%s without ' |
|
1489 | raise error.Abort(_('cannot rebase %d:%s without ' | |
1490 | 'moving at least one of its parents') |
|
1490 | 'moving at least one of its parents') | |
1491 | % (rev, repo[rev])) |
|
1491 | % (rev, repo[rev])) | |
1492 |
|
1492 | |||
1493 | # Source should not be ancestor of dest. The check here guarantees it's |
|
1493 | # Source should not be ancestor of dest. The check here guarantees it's | |
1494 | # impossible. With multi-dest, the initial check does not cover complex |
|
1494 | # impossible. With multi-dest, the initial check does not cover complex | |
1495 | # cases since we don't have abstractions to dry-run rebase cheaply. |
|
1495 | # cases since we don't have abstractions to dry-run rebase cheaply. | |
1496 | if any(p != nullrev and isancestor(rev, p) for p in newps): |
|
1496 | if any(p != nullrev and isancestor(rev, p) for p in newps): | |
1497 | raise error.Abort(_('source is ancestor of destination')) |
|
1497 | raise error.Abort(_('source is ancestor of destination')) | |
1498 |
|
1498 | |||
1499 | # "rebasenode" updates to new p1, use the corresponding merge base. |
|
1499 | # "rebasenode" updates to new p1, use the corresponding merge base. | |
1500 | if bases[0] != nullrev: |
|
1500 | if bases[0] != nullrev: | |
1501 | base = bases[0] |
|
1501 | base = bases[0] | |
1502 | else: |
|
1502 | else: | |
1503 | base = None |
|
1503 | base = None | |
1504 |
|
1504 | |||
1505 | # Check if the merge will contain unwanted changes. That may happen if |
|
1505 | # Check if the merge will contain unwanted changes. That may happen if | |
1506 | # there are multiple special (non-changelog ancestor) merge bases, which |
|
1506 | # there are multiple special (non-changelog ancestor) merge bases, which | |
1507 | # cannot be handled well by the 3-way merge algorithm. For example: |
|
1507 | # cannot be handled well by the 3-way merge algorithm. For example: | |
1508 | # |
|
1508 | # | |
1509 | # F |
|
1509 | # F | |
1510 | # /| |
|
1510 | # /| | |
1511 | # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen |
|
1511 | # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen | |
1512 | # | | # as merge base, the difference between D and F will include |
|
1512 | # | | # as merge base, the difference between D and F will include | |
1513 | # B C # C, so the rebased F will contain C surprisingly. If "E" was |
|
1513 | # B C # C, so the rebased F will contain C surprisingly. If "E" was | |
1514 | # |/ # chosen, the rebased F will contain B. |
|
1514 | # |/ # chosen, the rebased F will contain B. | |
1515 | # A Z |
|
1515 | # A Z | |
1516 | # |
|
1516 | # | |
1517 | # But our merge base candidates (D and E in above case) could still be |
|
1517 | # But our merge base candidates (D and E in above case) could still be | |
1518 | # better than the default (ancestor(F, Z) == null). Therefore still |
|
1518 | # better than the default (ancestor(F, Z) == null). Therefore still | |
1519 | # pick one (so choose p1 above). |
|
1519 | # pick one (so choose p1 above). | |
1520 | if sum(1 for b in bases if b != nullrev) > 1: |
|
1520 | if sum(1 for b in bases if b != nullrev) > 1: | |
1521 | unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i] |
|
1521 | unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i] | |
1522 | for i, base in enumerate(bases): |
|
1522 | for i, base in enumerate(bases): | |
1523 | if base == nullrev: |
|
1523 | if base == nullrev: | |
1524 | continue |
|
1524 | continue | |
1525 | # Revisions in the side (not chosen as merge base) branch that |
|
1525 | # Revisions in the side (not chosen as merge base) branch that | |
1526 | # might contain "surprising" contents |
|
1526 | # might contain "surprising" contents | |
1527 | siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))', |
|
1527 | siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))', | |
1528 | bases, base, base, dest)) |
|
1528 | bases, base, base, dest)) | |
1529 |
|
1529 | |||
1530 | # If those revisions are covered by rebaseset, the result is good. |
|
1530 | # If those revisions are covered by rebaseset, the result is good. | |
1531 | # A merge in rebaseset would be considered to cover its ancestors. |
|
1531 | # A merge in rebaseset would be considered to cover its ancestors. | |
1532 | if siderevs: |
|
1532 | if siderevs: | |
1533 | rebaseset = [r for r, d in state.items() |
|
1533 | rebaseset = [r for r, d in state.items() | |
1534 | if d > 0 and r not in obsskipped] |
|
1534 | if d > 0 and r not in obsskipped] | |
1535 | merges = [r for r in rebaseset |
|
1535 | merges = [r for r in rebaseset | |
1536 | if cl.parentrevs(r)[1] != nullrev] |
|
1536 | if cl.parentrevs(r)[1] != nullrev] | |
1537 | unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld', |
|
1537 | unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld', | |
1538 | siderevs, merges, rebaseset)) |
|
1538 | siderevs, merges, rebaseset)) | |
1539 |
|
1539 | |||
1540 | # Choose a merge base that has a minimal number of unwanted revs. |
|
1540 | # Choose a merge base that has a minimal number of unwanted revs. | |
1541 | l, i = min((len(revs), i) |
|
1541 | l, i = min((len(revs), i) | |
1542 | for i, revs in enumerate(unwanted) if revs is not None) |
|
1542 | for i, revs in enumerate(unwanted) if revs is not None) | |
1543 | base = bases[i] |
|
1543 | base = bases[i] | |
1544 |
|
1544 | |||
1545 | # newps[0] should match merge base if possible. Currently, if newps[i] |
|
1545 | # newps[0] should match merge base if possible. Currently, if newps[i] | |
1546 | # is nullrev, the only case is newps[i] and newps[j] (j < i), one is |
|
1546 | # is nullrev, the only case is newps[i] and newps[j] (j < i), one is | |
1547 | # the other's ancestor. In that case, it's fine to not swap newps here. |
|
1547 | # the other's ancestor. In that case, it's fine to not swap newps here. | |
1548 | # (see CASE-1 and CASE-2 above) |
|
1548 | # (see CASE-1 and CASE-2 above) | |
1549 | if i != 0 and newps[i] != nullrev: |
|
1549 | if i != 0 and newps[i] != nullrev: | |
1550 | newps[0], newps[i] = newps[i], newps[0] |
|
1550 | newps[0], newps[i] = newps[i], newps[0] | |
1551 |
|
1551 | |||
1552 | # The merge will include unwanted revisions. Abort now. Revisit this if |
|
1552 | # The merge will include unwanted revisions. Abort now. Revisit this if | |
1553 | # we have a more advanced merge algorithm that handles multiple bases. |
|
1553 | # we have a more advanced merge algorithm that handles multiple bases. | |
1554 | if l > 0: |
|
1554 | if l > 0: | |
1555 | unwanteddesc = _(' or ').join( |
|
1555 | unwanteddesc = _(' or ').join( | |
1556 | (', '.join('%d:%s' % (r, repo[r]) for r in revs) |
|
1556 | (', '.join('%d:%s' % (r, repo[r]) for r in revs) | |
1557 | for revs in unwanted if revs is not None)) |
|
1557 | for revs in unwanted if revs is not None)) | |
1558 | raise error.Abort( |
|
1558 | raise error.Abort( | |
1559 | _('rebasing %d:%s will include unwanted changes from %s') |
|
1559 | _('rebasing %d:%s will include unwanted changes from %s') | |
1560 | % (rev, repo[rev], unwanteddesc)) |
|
1560 | % (rev, repo[rev], unwanteddesc)) | |
1561 |
|
1561 | |||
1562 | repo.ui.debug(" future parents are %d and %d\n" % tuple(newps)) |
|
1562 | repo.ui.debug(" future parents are %d and %d\n" % tuple(newps)) | |
1563 |
|
1563 | |||
1564 | return newps[0], newps[1], base |
|
1564 | return newps[0], newps[1], base | |
1565 |
|
1565 | |||
1566 | def isagitpatch(repo, patchname): |
|
1566 | def isagitpatch(repo, patchname): | |
1567 | 'Return true if the given patch is in git format' |
|
1567 | 'Return true if the given patch is in git format' | |
1568 | mqpatch = os.path.join(repo.mq.path, patchname) |
|
1568 | mqpatch = os.path.join(repo.mq.path, patchname) | |
1569 | for line in patch.linereader(open(mqpatch, 'rb')): |
|
1569 | for line in patch.linereader(open(mqpatch, 'rb')): | |
1570 | if line.startswith('diff --git'): |
|
1570 | if line.startswith('diff --git'): | |
1571 | return True |
|
1571 | return True | |
1572 | return False |
|
1572 | return False | |
1573 |
|
1573 | |||
1574 | def updatemq(repo, state, skipped, **opts): |
|
1574 | def updatemq(repo, state, skipped, **opts): | |
1575 | 'Update rebased mq patches - finalize and then import them' |
|
1575 | 'Update rebased mq patches - finalize and then import them' | |
1576 | mqrebase = {} |
|
1576 | mqrebase = {} | |
1577 | mq = repo.mq |
|
1577 | mq = repo.mq | |
1578 | original_series = mq.fullseries[:] |
|
1578 | original_series = mq.fullseries[:] | |
1579 | skippedpatches = set() |
|
1579 | skippedpatches = set() | |
1580 |
|
1580 | |||
1581 | for p in mq.applied: |
|
1581 | for p in mq.applied: | |
1582 | rev = repo[p.node].rev() |
|
1582 | rev = repo[p.node].rev() | |
1583 | if rev in state: |
|
1583 | if rev in state: | |
1584 | repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' % |
|
1584 | repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' % | |
1585 | (rev, p.name)) |
|
1585 | (rev, p.name)) | |
1586 | mqrebase[rev] = (p.name, isagitpatch(repo, p.name)) |
|
1586 | mqrebase[rev] = (p.name, isagitpatch(repo, p.name)) | |
1587 | else: |
|
1587 | else: | |
1588 | # Applied but not rebased, not sure this should happen |
|
1588 | # Applied but not rebased, not sure this should happen | |
1589 | skippedpatches.add(p.name) |
|
1589 | skippedpatches.add(p.name) | |
1590 |
|
1590 | |||
1591 | if mqrebase: |
|
1591 | if mqrebase: | |
1592 | mq.finish(repo, mqrebase.keys()) |
|
1592 | mq.finish(repo, mqrebase.keys()) | |
1593 |
|
1593 | |||
1594 | # We must start import from the newest revision |
|
1594 | # We must start import from the newest revision | |
1595 | for rev in sorted(mqrebase, reverse=True): |
|
1595 | for rev in sorted(mqrebase, reverse=True): | |
1596 | if rev not in skipped: |
|
1596 | if rev not in skipped: | |
1597 | name, isgit = mqrebase[rev] |
|
1597 | name, isgit = mqrebase[rev] | |
1598 | repo.ui.note(_('updating mq patch %s to %d:%s\n') % |
|
1598 | repo.ui.note(_('updating mq patch %s to %d:%s\n') % | |
1599 | (name, state[rev], repo[state[rev]])) |
|
1599 | (name, state[rev], repo[state[rev]])) | |
1600 | mq.qimport(repo, (), patchname=name, git=isgit, |
|
1600 | mq.qimport(repo, (), patchname=name, git=isgit, | |
1601 | rev=["%d" % state[rev]]) |
|
1601 | rev=["%d" % state[rev]]) | |
1602 | else: |
|
1602 | else: | |
1603 | # Rebased and skipped |
|
1603 | # Rebased and skipped | |
1604 | skippedpatches.add(mqrebase[rev][0]) |
|
1604 | skippedpatches.add(mqrebase[rev][0]) | |
1605 |
|
1605 | |||
1606 | # Patches were either applied and rebased and imported in |
|
1606 | # Patches were either applied and rebased and imported in | |
1607 | # order, applied and removed or unapplied. Discard the removed |
|
1607 | # order, applied and removed or unapplied. Discard the removed | |
1608 | # ones while preserving the original series order and guards. |
|
1608 | # ones while preserving the original series order and guards. | |
1609 | newseries = [s for s in original_series |
|
1609 | newseries = [s for s in original_series | |
1610 | if mq.guard_re.split(s, 1)[0] not in skippedpatches] |
|
1610 | if mq.guard_re.split(s, 1)[0] not in skippedpatches] | |
1611 | mq.fullseries[:] = newseries |
|
1611 | mq.fullseries[:] = newseries | |
1612 | mq.seriesdirty = True |
|
1612 | mq.seriesdirty = True | |
1613 | mq.savedirty() |
|
1613 | mq.savedirty() | |
1614 |
|
1614 | |||
1615 | def storecollapsemsg(repo, collapsemsg): |
|
1615 | def storecollapsemsg(repo, collapsemsg): | |
1616 | 'Store the collapse message to allow recovery' |
|
1616 | 'Store the collapse message to allow recovery' | |
1617 | collapsemsg = collapsemsg or '' |
|
1617 | collapsemsg = collapsemsg or '' | |
1618 | f = repo.vfs("last-message.txt", "w") |
|
1618 | f = repo.vfs("last-message.txt", "w") | |
1619 | f.write("%s\n" % collapsemsg) |
|
1619 | f.write("%s\n" % collapsemsg) | |
1620 | f.close() |
|
1620 | f.close() | |
1621 |
|
1621 | |||
1622 | def clearcollapsemsg(repo): |
|
1622 | def clearcollapsemsg(repo): | |
1623 | 'Remove collapse message file' |
|
1623 | 'Remove collapse message file' | |
1624 | repo.vfs.unlinkpath("last-message.txt", ignoremissing=True) |
|
1624 | repo.vfs.unlinkpath("last-message.txt", ignoremissing=True) | |
1625 |
|
1625 | |||
1626 | def restorecollapsemsg(repo, isabort): |
|
1626 | def restorecollapsemsg(repo, isabort): | |
1627 | 'Restore previously stored collapse message' |
|
1627 | 'Restore previously stored collapse message' | |
1628 | try: |
|
1628 | try: | |
1629 | f = repo.vfs("last-message.txt") |
|
1629 | f = repo.vfs("last-message.txt") | |
1630 | collapsemsg = f.readline().strip() |
|
1630 | collapsemsg = f.readline().strip() | |
1631 | f.close() |
|
1631 | f.close() | |
1632 | except IOError as err: |
|
1632 | except IOError as err: | |
1633 | if err.errno != errno.ENOENT: |
|
1633 | if err.errno != errno.ENOENT: | |
1634 | raise |
|
1634 | raise | |
1635 | if isabort: |
|
1635 | if isabort: | |
1636 | # Oh well, just abort like normal |
|
1636 | # Oh well, just abort like normal | |
1637 | collapsemsg = '' |
|
1637 | collapsemsg = '' | |
1638 | else: |
|
1638 | else: | |
1639 | raise error.Abort(_('missing .hg/last-message.txt for rebase')) |
|
1639 | raise error.Abort(_('missing .hg/last-message.txt for rebase')) | |
1640 | return collapsemsg |
|
1640 | return collapsemsg | |
1641 |
|
1641 | |||
1642 | def clearstatus(repo): |
|
1642 | def clearstatus(repo): | |
1643 | 'Remove the status files' |
|
1643 | 'Remove the status files' | |
1644 | # Make sure the active transaction won't write the state file |
|
1644 | # Make sure the active transaction won't write the state file | |
1645 | tr = repo.currenttransaction() |
|
1645 | tr = repo.currenttransaction() | |
1646 | if tr: |
|
1646 | if tr: | |
1647 | tr.removefilegenerator('rebasestate') |
|
1647 | tr.removefilegenerator('rebasestate') | |
1648 | repo.vfs.unlinkpath("rebasestate", ignoremissing=True) |
|
1648 | repo.vfs.unlinkpath("rebasestate", ignoremissing=True) | |
1649 |
|
1649 | |||
1650 | def needupdate(repo, state): |
|
1650 | def needupdate(repo, state): | |
1651 | '''check whether we should `update --clean` away from a merge, or if |
|
1651 | '''check whether we should `update --clean` away from a merge, or if | |
1652 | somehow the working dir got forcibly updated, e.g. by older hg''' |
|
1652 | somehow the working dir got forcibly updated, e.g. by older hg''' | |
1653 | parents = [p.rev() for p in repo[None].parents()] |
|
1653 | parents = [p.rev() for p in repo[None].parents()] | |
1654 |
|
1654 | |||
1655 | # Are we in a merge state at all? |
|
1655 | # Are we in a merge state at all? | |
1656 | if len(parents) < 2: |
|
1656 | if len(parents) < 2: | |
1657 | return False |
|
1657 | return False | |
1658 |
|
1658 | |||
1659 | # We should be standing on the first as-of-yet unrebased commit. |
|
1659 | # We should be standing on the first as-of-yet unrebased commit. | |
1660 | firstunrebased = min([old for old, new in state.iteritems() |
|
1660 | firstunrebased = min([old for old, new in state.iteritems() | |
1661 | if new == nullrev]) |
|
1661 | if new == nullrev]) | |
1662 | if firstunrebased in parents: |
|
1662 | if firstunrebased in parents: | |
1663 | return True |
|
1663 | return True | |
1664 |
|
1664 | |||
1665 | return False |
|
1665 | return False | |
1666 |
|
1666 | |||
1667 | def sortsource(destmap): |
|
1667 | def sortsource(destmap): | |
1668 | """yield source revisions in an order that we only rebase things once |
|
1668 | """yield source revisions in an order that we only rebase things once | |
1669 |
|
1669 | |||
1670 | If source and destination overlaps, we should filter out revisions |
|
1670 | If source and destination overlaps, we should filter out revisions | |
1671 | depending on other revisions which hasn't been rebased yet. |
|
1671 | depending on other revisions which hasn't been rebased yet. | |
1672 |
|
1672 | |||
1673 | Yield a sorted list of revisions each time. |
|
1673 | Yield a sorted list of revisions each time. | |
1674 |
|
1674 | |||
1675 | For example, when rebasing A to B, B to C. This function yields [B], then |
|
1675 | For example, when rebasing A to B, B to C. This function yields [B], then | |
1676 | [A], indicating B needs to be rebased first. |
|
1676 | [A], indicating B needs to be rebased first. | |
1677 |
|
1677 | |||
1678 | Raise if there is a cycle so the rebase is impossible. |
|
1678 | Raise if there is a cycle so the rebase is impossible. | |
1679 | """ |
|
1679 | """ | |
1680 | srcset = set(destmap) |
|
1680 | srcset = set(destmap) | |
1681 | while srcset: |
|
1681 | while srcset: | |
1682 | srclist = sorted(srcset) |
|
1682 | srclist = sorted(srcset) | |
1683 | result = [] |
|
1683 | result = [] | |
1684 | for r in srclist: |
|
1684 | for r in srclist: | |
1685 | if destmap[r] not in srcset: |
|
1685 | if destmap[r] not in srcset: | |
1686 | result.append(r) |
|
1686 | result.append(r) | |
1687 | if not result: |
|
1687 | if not result: | |
1688 | raise error.Abort(_('source and destination form a cycle')) |
|
1688 | raise error.Abort(_('source and destination form a cycle')) | |
1689 | srcset -= set(result) |
|
1689 | srcset -= set(result) | |
1690 | yield result |
|
1690 | yield result | |
1691 |
|
1691 | |||
1692 | def buildstate(repo, destmap, collapse): |
|
1692 | def buildstate(repo, destmap, collapse): | |
1693 | '''Define which revisions are going to be rebased and where |
|
1693 | '''Define which revisions are going to be rebased and where | |
1694 |
|
1694 | |||
1695 | repo: repo |
|
1695 | repo: repo | |
1696 | destmap: {srcrev: destrev} |
|
1696 | destmap: {srcrev: destrev} | |
1697 | ''' |
|
1697 | ''' | |
1698 | rebaseset = destmap.keys() |
|
1698 | rebaseset = destmap.keys() | |
1699 | originalwd = repo['.'].rev() |
|
1699 | originalwd = repo['.'].rev() | |
1700 |
|
1700 | |||
1701 | # This check isn't strictly necessary, since mq detects commits over an |
|
1701 | # This check isn't strictly necessary, since mq detects commits over an | |
1702 | # applied patch. But it prevents messing up the working directory when |
|
1702 | # applied patch. But it prevents messing up the working directory when | |
1703 | # a partially completed rebase is blocked by mq. |
|
1703 | # a partially completed rebase is blocked by mq. | |
1704 | if 'qtip' in repo.tags(): |
|
1704 | if 'qtip' in repo.tags(): | |
1705 | mqapplied = set(repo[s.node].rev() for s in repo.mq.applied) |
|
1705 | mqapplied = set(repo[s.node].rev() for s in repo.mq.applied) | |
1706 | if set(destmap.values()) & mqapplied: |
|
1706 | if set(destmap.values()) & mqapplied: | |
1707 | raise error.Abort(_('cannot rebase onto an applied mq patch')) |
|
1707 | raise error.Abort(_('cannot rebase onto an applied mq patch')) | |
1708 |
|
1708 | |||
1709 | # Get "cycle" error early by exhausting the generator. |
|
1709 | # Get "cycle" error early by exhausting the generator. | |
1710 | sortedsrc = list(sortsource(destmap)) # a list of sorted revs |
|
1710 | sortedsrc = list(sortsource(destmap)) # a list of sorted revs | |
1711 | if not sortedsrc: |
|
1711 | if not sortedsrc: | |
1712 | raise error.Abort(_('no matching revisions')) |
|
1712 | raise error.Abort(_('no matching revisions')) | |
1713 |
|
1713 | |||
1714 | # Only check the first batch of revisions to rebase not depending on other |
|
1714 | # Only check the first batch of revisions to rebase not depending on other | |
1715 | # rebaseset. This means "source is ancestor of destination" for the second |
|
1715 | # rebaseset. This means "source is ancestor of destination" for the second | |
1716 | # (and following) batches of revisions are not checked here. We rely on |
|
1716 | # (and following) batches of revisions are not checked here. We rely on | |
1717 | # "defineparents" to do that check. |
|
1717 | # "defineparents" to do that check. | |
1718 | roots = list(repo.set('roots(%ld)', sortedsrc[0])) |
|
1718 | roots = list(repo.set('roots(%ld)', sortedsrc[0])) | |
1719 | if not roots: |
|
1719 | if not roots: | |
1720 | raise error.Abort(_('no matching revisions')) |
|
1720 | raise error.Abort(_('no matching revisions')) | |
1721 | def revof(r): |
|
1721 | def revof(r): | |
1722 | return r.rev() |
|
1722 | return r.rev() | |
1723 | roots = sorted(roots, key=revof) |
|
1723 | roots = sorted(roots, key=revof) | |
1724 | state = dict.fromkeys(rebaseset, revtodo) |
|
1724 | state = dict.fromkeys(rebaseset, revtodo) | |
1725 | emptyrebase = (len(sortedsrc) == 1) |
|
1725 | emptyrebase = (len(sortedsrc) == 1) | |
1726 | for root in roots: |
|
1726 | for root in roots: | |
1727 | dest = repo[destmap[root.rev()]] |
|
1727 | dest = repo[destmap[root.rev()]] | |
1728 | commonbase = root.ancestor(dest) |
|
1728 | commonbase = root.ancestor(dest) | |
1729 | if commonbase == root: |
|
1729 | if commonbase == root: | |
1730 | raise error.Abort(_('source is ancestor of destination')) |
|
1730 | raise error.Abort(_('source is ancestor of destination')) | |
1731 | if commonbase == dest: |
|
1731 | if commonbase == dest: | |
1732 | wctx = repo[None] |
|
1732 | wctx = repo[None] | |
1733 | if dest == wctx.p1(): |
|
1733 | if dest == wctx.p1(): | |
1734 | # when rebasing to '.', it will use the current wd branch name |
|
1734 | # when rebasing to '.', it will use the current wd branch name | |
1735 | samebranch = root.branch() == wctx.branch() |
|
1735 | samebranch = root.branch() == wctx.branch() | |
1736 | else: |
|
1736 | else: | |
1737 | samebranch = root.branch() == dest.branch() |
|
1737 | samebranch = root.branch() == dest.branch() | |
1738 | if not collapse and samebranch and dest in root.parents(): |
|
1738 | if not collapse and samebranch and dest in root.parents(): | |
1739 | # mark the revision as done by setting its new revision |
|
1739 | # mark the revision as done by setting its new revision | |
1740 | # equal to its old (current) revisions |
|
1740 | # equal to its old (current) revisions | |
1741 | state[root.rev()] = root.rev() |
|
1741 | state[root.rev()] = root.rev() | |
1742 | repo.ui.debug('source is a child of destination\n') |
|
1742 | repo.ui.debug('source is a child of destination\n') | |
1743 | continue |
|
1743 | continue | |
1744 |
|
1744 | |||
1745 | emptyrebase = False |
|
1745 | emptyrebase = False | |
1746 | repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root)) |
|
1746 | repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root)) | |
1747 | if emptyrebase: |
|
1747 | if emptyrebase: | |
1748 | return None |
|
1748 | return None | |
1749 | for rev in sorted(state): |
|
1749 | for rev in sorted(state): | |
1750 | parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev] |
|
1750 | parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev] | |
1751 | # if all parents of this revision are done, then so is this revision |
|
1751 | # if all parents of this revision are done, then so is this revision | |
1752 | if parents and all((state.get(p) == p for p in parents)): |
|
1752 | if parents and all((state.get(p) == p for p in parents)): | |
1753 | state[rev] = rev |
|
1753 | state[rev] = rev | |
1754 | return originalwd, destmap, state |
|
1754 | return originalwd, destmap, state | |
1755 |
|
1755 | |||
1756 | def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None, |
|
1756 | def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None, | |
1757 | keepf=False, fm=None, backup=True): |
|
1757 | keepf=False, fm=None, backup=True): | |
1758 | """dispose of rebased revision at the end of the rebase |
|
1758 | """dispose of rebased revision at the end of the rebase | |
1759 |
|
1759 | |||
1760 | If `collapsedas` is not None, the rebase was a collapse whose result if the |
|
1760 | If `collapsedas` is not None, the rebase was a collapse whose result if the | |
1761 | `collapsedas` node. |
|
1761 | `collapsedas` node. | |
1762 |
|
1762 | |||
1763 | If `keepf` is not True, the rebase has --keep set and no nodes should be |
|
1763 | If `keepf` is not True, the rebase has --keep set and no nodes should be | |
1764 | removed (but bookmarks still need to be moved). |
|
1764 | removed (but bookmarks still need to be moved). | |
1765 |
|
1765 | |||
1766 | If `backup` is False, no backup will be stored when stripping rebased |
|
1766 | If `backup` is False, no backup will be stored when stripping rebased | |
1767 | revisions. |
|
1767 | revisions. | |
1768 | """ |
|
1768 | """ | |
1769 | tonode = repo.changelog.node |
|
1769 | tonode = repo.changelog.node | |
1770 | replacements = {} |
|
1770 | replacements = {} | |
1771 | moves = {} |
|
1771 | moves = {} | |
1772 | stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt) |
|
1772 | stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt) | |
1773 |
|
1773 | |||
1774 | collapsednodes = [] |
|
1774 | collapsednodes = [] | |
1775 | for rev, newrev in sorted(state.items()): |
|
1775 | for rev, newrev in sorted(state.items()): | |
1776 | if newrev >= 0 and newrev != rev: |
|
1776 | if newrev >= 0 and newrev != rev: | |
1777 | oldnode = tonode(rev) |
|
1777 | oldnode = tonode(rev) | |
1778 | newnode = collapsedas or tonode(newrev) |
|
1778 | newnode = collapsedas or tonode(newrev) | |
1779 | moves[oldnode] = newnode |
|
1779 | moves[oldnode] = newnode | |
1780 | if not keepf: |
|
1780 | if not keepf: | |
1781 | succs = None |
|
1781 | succs = None | |
1782 | if rev in skipped: |
|
1782 | if rev in skipped: | |
1783 | if stripcleanup or not repo[rev].obsolete(): |
|
1783 | if stripcleanup or not repo[rev].obsolete(): | |
1784 | succs = () |
|
1784 | succs = () | |
1785 | elif collapsedas: |
|
1785 | elif collapsedas: | |
1786 | collapsednodes.append(oldnode) |
|
1786 | collapsednodes.append(oldnode) | |
1787 | else: |
|
1787 | else: | |
1788 | succs = (newnode,) |
|
1788 | succs = (newnode,) | |
1789 | if succs is not None: |
|
1789 | if succs is not None: | |
1790 | replacements[(oldnode,)] = succs |
|
1790 | replacements[(oldnode,)] = succs | |
1791 | if collapsednodes: |
|
1791 | if collapsednodes: | |
1792 | replacements[tuple(collapsednodes)] = (collapsedas,) |
|
1792 | replacements[tuple(collapsednodes)] = (collapsedas,) | |
1793 | scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup) |
|
1793 | scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup) | |
1794 | if fm: |
|
1794 | if fm: | |
1795 | hf = fm.hexfunc |
|
1795 | hf = fm.hexfunc | |
1796 | fl = fm.formatlist |
|
1796 | fl = fm.formatlist | |
1797 | fd = fm.formatdict |
|
1797 | fd = fm.formatdict | |
1798 | changes = {} |
|
1798 | changes = {} | |
1799 | for oldns, newn in replacements.iteritems(): |
|
1799 | for oldns, newn in replacements.iteritems(): | |
1800 | for oldn in oldns: |
|
1800 | for oldn in oldns: | |
1801 | changes[hf(oldn)] = fl([hf(n) for n in newn], name='node') |
|
1801 | changes[hf(oldn)] = fl([hf(n) for n in newn], name='node') | |
1802 | nodechanges = fd(changes, key="oldnode", value="newnodes") |
|
1802 | nodechanges = fd(changes, key="oldnode", value="newnodes") | |
1803 | fm.data(nodechanges=nodechanges) |
|
1803 | fm.data(nodechanges=nodechanges) | |
1804 |
|
1804 | |||
1805 | def pullrebase(orig, ui, repo, *args, **opts): |
|
1805 | def pullrebase(orig, ui, repo, *args, **opts): | |
1806 | 'Call rebase after pull if the latter has been invoked with --rebase' |
|
1806 | 'Call rebase after pull if the latter has been invoked with --rebase' | |
1807 | if opts.get(r'rebase'): |
|
1807 | if opts.get(r'rebase'): | |
1808 | if ui.configbool('commands', 'rebase.requiredest'): |
|
1808 | if ui.configbool('commands', 'rebase.requiredest'): | |
1809 | msg = _('rebase destination required by configuration') |
|
1809 | msg = _('rebase destination required by configuration') | |
1810 | hint = _('use hg pull followed by hg rebase -d DEST') |
|
1810 | hint = _('use hg pull followed by hg rebase -d DEST') | |
1811 | raise error.Abort(msg, hint=hint) |
|
1811 | raise error.Abort(msg, hint=hint) | |
1812 |
|
1812 | |||
1813 | with repo.wlock(), repo.lock(): |
|
1813 | with repo.wlock(), repo.lock(): | |
1814 | if opts.get(r'update'): |
|
1814 | if opts.get(r'update'): | |
1815 | del opts[r'update'] |
|
1815 | del opts[r'update'] | |
1816 | ui.debug('--update and --rebase are not compatible, ignoring ' |
|
1816 | ui.debug('--update and --rebase are not compatible, ignoring ' | |
1817 | 'the update flag\n') |
|
1817 | 'the update flag\n') | |
1818 |
|
1818 | |||
1819 | cmdutil.checkunfinished(repo) |
|
1819 | cmdutil.checkunfinished(repo) | |
1820 | cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: ' |
|
1820 | cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: ' | |
1821 | 'please commit or shelve your changes first')) |
|
1821 | 'please commit or shelve your changes first')) | |
1822 |
|
1822 | |||
1823 | revsprepull = len(repo) |
|
1823 | revsprepull = len(repo) | |
1824 | origpostincoming = commands.postincoming |
|
1824 | origpostincoming = commands.postincoming | |
1825 | def _dummy(*args, **kwargs): |
|
1825 | def _dummy(*args, **kwargs): | |
1826 | pass |
|
1826 | pass | |
1827 | commands.postincoming = _dummy |
|
1827 | commands.postincoming = _dummy | |
1828 | try: |
|
1828 | try: | |
1829 | ret = orig(ui, repo, *args, **opts) |
|
1829 | ret = orig(ui, repo, *args, **opts) | |
1830 | finally: |
|
1830 | finally: | |
1831 | commands.postincoming = origpostincoming |
|
1831 | commands.postincoming = origpostincoming | |
1832 | revspostpull = len(repo) |
|
1832 | revspostpull = len(repo) | |
1833 | if revspostpull > revsprepull: |
|
1833 | if revspostpull > revsprepull: | |
1834 | # --rev option from pull conflict with rebase own --rev |
|
1834 | # --rev option from pull conflict with rebase own --rev | |
1835 | # dropping it |
|
1835 | # dropping it | |
1836 | if r'rev' in opts: |
|
1836 | if r'rev' in opts: | |
1837 | del opts[r'rev'] |
|
1837 | del opts[r'rev'] | |
1838 | # positional argument from pull conflicts with rebase's own |
|
1838 | # positional argument from pull conflicts with rebase's own | |
1839 | # --source. |
|
1839 | # --source. | |
1840 | if r'source' in opts: |
|
1840 | if r'source' in opts: | |
1841 | del opts[r'source'] |
|
1841 | del opts[r'source'] | |
1842 | # revsprepull is the len of the repo, not revnum of tip. |
|
1842 | # revsprepull is the len of the repo, not revnum of tip. | |
1843 | destspace = list(repo.changelog.revs(start=revsprepull)) |
|
1843 | destspace = list(repo.changelog.revs(start=revsprepull)) | |
1844 | opts[r'_destspace'] = destspace |
|
1844 | opts[r'_destspace'] = destspace | |
1845 | try: |
|
1845 | try: | |
1846 | rebase(ui, repo, **opts) |
|
1846 | rebase(ui, repo, **opts) | |
1847 | except error.NoMergeDestAbort: |
|
1847 | except error.NoMergeDestAbort: | |
1848 | # we can maybe update instead |
|
1848 | # we can maybe update instead | |
1849 | rev, _a, _b = destutil.destupdate(repo) |
|
1849 | rev, _a, _b = destutil.destupdate(repo) | |
1850 | if rev == repo['.'].rev(): |
|
1850 | if rev == repo['.'].rev(): | |
1851 | ui.status(_('nothing to rebase\n')) |
|
1851 | ui.status(_('nothing to rebase\n')) | |
1852 | else: |
|
1852 | else: | |
1853 | ui.status(_('nothing to rebase - updating instead\n')) |
|
1853 | ui.status(_('nothing to rebase - updating instead\n')) | |
1854 | # not passing argument to get the bare update behavior |
|
1854 | # not passing argument to get the bare update behavior | |
1855 | # with warning and trumpets |
|
1855 | # with warning and trumpets | |
1856 | commands.update(ui, repo) |
|
1856 | commands.update(ui, repo) | |
1857 | else: |
|
1857 | else: | |
1858 | if opts.get(r'tool'): |
|
1858 | if opts.get(r'tool'): | |
1859 | raise error.Abort(_('--tool can only be used with --rebase')) |
|
1859 | raise error.Abort(_('--tool can only be used with --rebase')) | |
1860 | ret = orig(ui, repo, *args, **opts) |
|
1860 | ret = orig(ui, repo, *args, **opts) | |
1861 |
|
1861 | |||
1862 | return ret |
|
1862 | return ret | |
1863 |
|
1863 | |||
1864 | def _filterobsoleterevs(repo, revs): |
|
1864 | def _filterobsoleterevs(repo, revs): | |
1865 | """returns a set of the obsolete revisions in revs""" |
|
1865 | """returns a set of the obsolete revisions in revs""" | |
1866 | return set(r for r in revs if repo[r].obsolete()) |
|
1866 | return set(r for r in revs if repo[r].obsolete()) | |
1867 |
|
1867 | |||
1868 | def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap): |
|
1868 | def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap): | |
1869 | """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination). |
|
1869 | """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination). | |
1870 |
|
1870 | |||
1871 | `obsoletenotrebased` is a mapping mapping obsolete => successor for all |
|
1871 | `obsoletenotrebased` is a mapping mapping obsolete => successor for all | |
1872 | obsolete nodes to be rebased given in `rebaseobsrevs`. |
|
1872 | obsolete nodes to be rebased given in `rebaseobsrevs`. | |
1873 |
|
1873 | |||
1874 | `obsoletewithoutsuccessorindestination` is a set with obsolete revisions |
|
1874 | `obsoletewithoutsuccessorindestination` is a set with obsolete revisions | |
1875 | without a successor in destination. |
|
1875 | without a successor in destination. | |
1876 |
|
1876 | |||
1877 | `obsoleteextinctsuccessors` is a set of obsolete revisions with only |
|
1877 | `obsoleteextinctsuccessors` is a set of obsolete revisions with only | |
1878 | obsolete successors. |
|
1878 | obsolete successors. | |
1879 | """ |
|
1879 | """ | |
1880 | obsoletenotrebased = {} |
|
1880 | obsoletenotrebased = {} | |
1881 | obsoletewithoutsuccessorindestination = set([]) |
|
1881 | obsoletewithoutsuccessorindestination = set([]) | |
1882 | obsoleteextinctsuccessors = set([]) |
|
1882 | obsoleteextinctsuccessors = set([]) | |
1883 |
|
1883 | |||
1884 | assert repo.filtername is None |
|
1884 | assert repo.filtername is None | |
1885 | cl = repo.changelog |
|
1885 | cl = repo.changelog | |
1886 | nodemap = cl.nodemap |
|
1886 | nodemap = cl.nodemap | |
1887 | extinctrevs = set(repo.revs('extinct()')) |
|
1887 | extinctrevs = set(repo.revs('extinct()')) | |
1888 | for srcrev in rebaseobsrevs: |
|
1888 | for srcrev in rebaseobsrevs: | |
1889 | srcnode = cl.node(srcrev) |
|
1889 | srcnode = cl.node(srcrev) | |
1890 | # XXX: more advanced APIs are required to handle split correctly |
|
1890 | # XXX: more advanced APIs are required to handle split correctly | |
1891 | successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode])) |
|
1891 | successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode])) | |
1892 | # obsutil.allsuccessors includes node itself |
|
1892 | # obsutil.allsuccessors includes node itself | |
1893 | successors.remove(srcnode) |
|
1893 | successors.remove(srcnode) | |
1894 | succrevs = {nodemap[s] for s in successors if s in nodemap} |
|
1894 | succrevs = {nodemap[s] for s in successors if s in nodemap} | |
1895 | if succrevs.issubset(extinctrevs): |
|
1895 | if succrevs.issubset(extinctrevs): | |
1896 | # all successors are extinct |
|
1896 | # all successors are extinct | |
1897 | obsoleteextinctsuccessors.add(srcrev) |
|
1897 | obsoleteextinctsuccessors.add(srcrev) | |
1898 | if not successors: |
|
1898 | if not successors: | |
1899 | # no successor |
|
1899 | # no successor | |
1900 | obsoletenotrebased[srcrev] = None |
|
1900 | obsoletenotrebased[srcrev] = None | |
1901 | else: |
|
1901 | else: | |
1902 | dstrev = destmap[srcrev] |
|
1902 | dstrev = destmap[srcrev] | |
1903 | for succrev in succrevs: |
|
1903 | for succrev in succrevs: | |
1904 | if cl.isancestorrev(succrev, dstrev): |
|
1904 | if cl.isancestorrev(succrev, dstrev): | |
1905 | obsoletenotrebased[srcrev] = succrev |
|
1905 | obsoletenotrebased[srcrev] = succrev | |
1906 | break |
|
1906 | break | |
1907 | else: |
|
1907 | else: | |
1908 | # If 'srcrev' has a successor in rebase set but none in |
|
1908 | # If 'srcrev' has a successor in rebase set but none in | |
1909 | # destination (which would be catched above), we shall skip it |
|
1909 | # destination (which would be catched above), we shall skip it | |
1910 | # and its descendants to avoid divergence. |
|
1910 | # and its descendants to avoid divergence. | |
1911 | if srcrev in extinctrevs or any(s in destmap for s in succrevs): |
|
1911 | if srcrev in extinctrevs or any(s in destmap for s in succrevs): | |
1912 | obsoletewithoutsuccessorindestination.add(srcrev) |
|
1912 | obsoletewithoutsuccessorindestination.add(srcrev) | |
1913 |
|
1913 | |||
1914 | return ( |
|
1914 | return ( | |
1915 | obsoletenotrebased, |
|
1915 | obsoletenotrebased, | |
1916 | obsoletewithoutsuccessorindestination, |
|
1916 | obsoletewithoutsuccessorindestination, | |
1917 | obsoleteextinctsuccessors, |
|
1917 | obsoleteextinctsuccessors, | |
1918 | ) |
|
1918 | ) | |
1919 |
|
1919 | |||
1920 | def summaryhook(ui, repo): |
|
1920 | def summaryhook(ui, repo): | |
1921 | if not repo.vfs.exists('rebasestate'): |
|
1921 | if not repo.vfs.exists('rebasestate'): | |
1922 | return |
|
1922 | return | |
1923 | try: |
|
1923 | try: | |
1924 | rbsrt = rebaseruntime(repo, ui, {}) |
|
1924 | rbsrt = rebaseruntime(repo, ui, {}) | |
1925 | rbsrt.restorestatus() |
|
1925 | rbsrt.restorestatus() | |
1926 | state = rbsrt.state |
|
1926 | state = rbsrt.state | |
1927 | except error.RepoLookupError: |
|
1927 | except error.RepoLookupError: | |
1928 | # i18n: column positioning for "hg summary" |
|
1928 | # i18n: column positioning for "hg summary" | |
1929 | msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n') |
|
1929 | msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n') | |
1930 | ui.write(msg) |
|
1930 | ui.write(msg) | |
1931 | return |
|
1931 | return | |
1932 | numrebased = len([i for i in state.itervalues() if i >= 0]) |
|
1932 | numrebased = len([i for i in state.itervalues() if i >= 0]) | |
1933 | # i18n: column positioning for "hg summary" |
|
1933 | # i18n: column positioning for "hg summary" | |
1934 | ui.write(_('rebase: %s, %s (rebase --continue)\n') % |
|
1934 | ui.write(_('rebase: %s, %s (rebase --continue)\n') % | |
1935 | (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased, |
|
1935 | (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased, | |
1936 | ui.label(_('%d remaining'), 'rebase.remaining') % |
|
1936 | ui.label(_('%d remaining'), 'rebase.remaining') % | |
1937 | (len(state) - numrebased))) |
|
1937 | (len(state) - numrebased))) | |
1938 |
|
1938 | |||
1939 | def uisetup(ui): |
|
1939 | def uisetup(ui): | |
1940 | #Replace pull with a decorator to provide --rebase option |
|
1940 | #Replace pull with a decorator to provide --rebase option | |
1941 | entry = extensions.wrapcommand(commands.table, 'pull', pullrebase) |
|
1941 | entry = extensions.wrapcommand(commands.table, 'pull', pullrebase) | |
1942 | entry[1].append(('', 'rebase', None, |
|
1942 | entry[1].append(('', 'rebase', None, | |
1943 | _("rebase working directory to branch head"))) |
|
1943 | _("rebase working directory to branch head"))) | |
1944 | entry[1].append(('t', 'tool', '', |
|
1944 | entry[1].append(('t', 'tool', '', | |
1945 | _("specify merge tool for rebase"))) |
|
1945 | _("specify merge tool for rebase"))) | |
1946 | cmdutil.summaryhooks.add('rebase', summaryhook) |
|
1946 | cmdutil.summaryhooks.add('rebase', summaryhook) | |
1947 | cmdutil.unfinishedstates.append( |
|
1947 | cmdutil.unfinishedstates.append( | |
1948 | ['rebasestate', False, False, _('rebase in progress'), |
|
1948 | ['rebasestate', False, False, _('rebase in progress'), | |
1949 | _("use 'hg rebase --continue' or 'hg rebase --abort'")]) |
|
1949 | _("use 'hg rebase --continue' or 'hg rebase --abort'")]) | |
1950 | cmdutil.afterresolvedstates.append( |
|
1950 | cmdutil.afterresolvedstates.append( | |
1951 | ['rebasestate', _('hg rebase --continue')]) |
|
1951 | ['rebasestate', _('hg rebase --continue')]) |
@@ -1,964 +1,964 b'' | |||||
1 | # Mercurial bookmark support code |
|
1 | # Mercurial bookmark support code | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 David Soria Parra <dsp@php.net> |
|
3 | # Copyright 2008 David Soria Parra <dsp@php.net> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import struct |
|
11 | import struct | |
12 |
|
12 | |||
13 | from .i18n import _ |
|
13 | from .i18n import _ | |
14 | from .node import ( |
|
14 | from .node import ( | |
15 | bin, |
|
15 | bin, | |
16 | hex, |
|
16 | hex, | |
17 | short, |
|
17 | short, | |
18 | wdirid, |
|
18 | wdirid, | |
19 | ) |
|
19 | ) | |
20 | from . import ( |
|
20 | from . import ( | |
21 | encoding, |
|
21 | encoding, | |
22 | error, |
|
22 | error, | |
23 | obsutil, |
|
23 | obsutil, | |
24 | pycompat, |
|
24 | pycompat, | |
25 | scmutil, |
|
25 | scmutil, | |
26 | txnutil, |
|
26 | txnutil, | |
27 | util, |
|
27 | util, | |
28 | ) |
|
28 | ) | |
29 |
|
29 | |||
30 | # label constants |
|
30 | # label constants | |
31 | # until 3.5, bookmarks.current was the advertised name, not |
|
31 | # until 3.5, bookmarks.current was the advertised name, not | |
32 | # bookmarks.active, so we must use both to avoid breaking old |
|
32 | # bookmarks.active, so we must use both to avoid breaking old | |
33 | # custom styles |
|
33 | # custom styles | |
34 | activebookmarklabel = 'bookmarks.active bookmarks.current' |
|
34 | activebookmarklabel = 'bookmarks.active bookmarks.current' | |
35 |
|
35 | |||
36 | def _getbkfile(repo): |
|
36 | def _getbkfile(repo): | |
37 | """Hook so that extensions that mess with the store can hook bm storage. |
|
37 | """Hook so that extensions that mess with the store can hook bm storage. | |
38 |
|
38 | |||
39 | For core, this just handles wether we should see pending |
|
39 | For core, this just handles wether we should see pending | |
40 | bookmarks or the committed ones. Other extensions (like share) |
|
40 | bookmarks or the committed ones. Other extensions (like share) | |
41 | may need to tweak this behavior further. |
|
41 | may need to tweak this behavior further. | |
42 | """ |
|
42 | """ | |
43 | fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks') |
|
43 | fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks') | |
44 | return fp |
|
44 | return fp | |
45 |
|
45 | |||
46 | class bmstore(object): |
|
46 | class bmstore(object): | |
47 | """Storage for bookmarks. |
|
47 | r"""Storage for bookmarks. | |
48 |
|
48 | |||
49 | This object should do all bookmark-related reads and writes, so |
|
49 | This object should do all bookmark-related reads and writes, so | |
50 | that it's fairly simple to replace the storage underlying |
|
50 | that it's fairly simple to replace the storage underlying | |
51 | bookmarks without having to clone the logic surrounding |
|
51 | bookmarks without having to clone the logic surrounding | |
52 | bookmarks. This type also should manage the active bookmark, if |
|
52 | bookmarks. This type also should manage the active bookmark, if | |
53 | any. |
|
53 | any. | |
54 |
|
54 | |||
55 | This particular bmstore implementation stores bookmarks as |
|
55 | This particular bmstore implementation stores bookmarks as | |
56 | {hash}\s{name}\n (the same format as localtags) in |
|
56 | {hash}\s{name}\n (the same format as localtags) in | |
57 | .hg/bookmarks. The mapping is stored as {name: nodeid}. |
|
57 | .hg/bookmarks. The mapping is stored as {name: nodeid}. | |
58 | """ |
|
58 | """ | |
59 |
|
59 | |||
60 | def __init__(self, repo): |
|
60 | def __init__(self, repo): | |
61 | self._repo = repo |
|
61 | self._repo = repo | |
62 | self._refmap = refmap = {} # refspec: node |
|
62 | self._refmap = refmap = {} # refspec: node | |
63 | self._nodemap = nodemap = {} # node: sorted([refspec, ...]) |
|
63 | self._nodemap = nodemap = {} # node: sorted([refspec, ...]) | |
64 | self._clean = True |
|
64 | self._clean = True | |
65 | self._aclean = True |
|
65 | self._aclean = True | |
66 | nm = repo.changelog.nodemap |
|
66 | nm = repo.changelog.nodemap | |
67 | tonode = bin # force local lookup |
|
67 | tonode = bin # force local lookup | |
68 | try: |
|
68 | try: | |
69 | with _getbkfile(repo) as bkfile: |
|
69 | with _getbkfile(repo) as bkfile: | |
70 | for line in bkfile: |
|
70 | for line in bkfile: | |
71 | line = line.strip() |
|
71 | line = line.strip() | |
72 | if not line: |
|
72 | if not line: | |
73 | continue |
|
73 | continue | |
74 | try: |
|
74 | try: | |
75 | sha, refspec = line.split(' ', 1) |
|
75 | sha, refspec = line.split(' ', 1) | |
76 | node = tonode(sha) |
|
76 | node = tonode(sha) | |
77 | if node in nm: |
|
77 | if node in nm: | |
78 | refspec = encoding.tolocal(refspec) |
|
78 | refspec = encoding.tolocal(refspec) | |
79 | refmap[refspec] = node |
|
79 | refmap[refspec] = node | |
80 | nrefs = nodemap.get(node) |
|
80 | nrefs = nodemap.get(node) | |
81 | if nrefs is None: |
|
81 | if nrefs is None: | |
82 | nodemap[node] = [refspec] |
|
82 | nodemap[node] = [refspec] | |
83 | else: |
|
83 | else: | |
84 | nrefs.append(refspec) |
|
84 | nrefs.append(refspec) | |
85 | if nrefs[-2] > refspec: |
|
85 | if nrefs[-2] > refspec: | |
86 | # bookmarks weren't sorted before 4.5 |
|
86 | # bookmarks weren't sorted before 4.5 | |
87 | nrefs.sort() |
|
87 | nrefs.sort() | |
88 | except (TypeError, ValueError): |
|
88 | except (TypeError, ValueError): | |
89 | # TypeError: |
|
89 | # TypeError: | |
90 | # - bin(...) |
|
90 | # - bin(...) | |
91 | # ValueError: |
|
91 | # ValueError: | |
92 | # - node in nm, for non-20-bytes entry |
|
92 | # - node in nm, for non-20-bytes entry | |
93 | # - split(...), for string without ' ' |
|
93 | # - split(...), for string without ' ' | |
94 | repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') |
|
94 | repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') | |
95 | % pycompat.bytestr(line)) |
|
95 | % pycompat.bytestr(line)) | |
96 | except IOError as inst: |
|
96 | except IOError as inst: | |
97 | if inst.errno != errno.ENOENT: |
|
97 | if inst.errno != errno.ENOENT: | |
98 | raise |
|
98 | raise | |
99 | self._active = _readactive(repo, self) |
|
99 | self._active = _readactive(repo, self) | |
100 |
|
100 | |||
101 | @property |
|
101 | @property | |
102 | def active(self): |
|
102 | def active(self): | |
103 | return self._active |
|
103 | return self._active | |
104 |
|
104 | |||
105 | @active.setter |
|
105 | @active.setter | |
106 | def active(self, mark): |
|
106 | def active(self, mark): | |
107 | if mark is not None and mark not in self._refmap: |
|
107 | if mark is not None and mark not in self._refmap: | |
108 | raise AssertionError('bookmark %s does not exist!' % mark) |
|
108 | raise AssertionError('bookmark %s does not exist!' % mark) | |
109 |
|
109 | |||
110 | self._active = mark |
|
110 | self._active = mark | |
111 | self._aclean = False |
|
111 | self._aclean = False | |
112 |
|
112 | |||
113 | def __len__(self): |
|
113 | def __len__(self): | |
114 | return len(self._refmap) |
|
114 | return len(self._refmap) | |
115 |
|
115 | |||
116 | def __iter__(self): |
|
116 | def __iter__(self): | |
117 | return iter(self._refmap) |
|
117 | return iter(self._refmap) | |
118 |
|
118 | |||
119 | def iteritems(self): |
|
119 | def iteritems(self): | |
120 | return self._refmap.iteritems() |
|
120 | return self._refmap.iteritems() | |
121 |
|
121 | |||
122 | def items(self): |
|
122 | def items(self): | |
123 | return self._refmap.items() |
|
123 | return self._refmap.items() | |
124 |
|
124 | |||
125 | # TODO: maybe rename to allnames()? |
|
125 | # TODO: maybe rename to allnames()? | |
126 | def keys(self): |
|
126 | def keys(self): | |
127 | return self._refmap.keys() |
|
127 | return self._refmap.keys() | |
128 |
|
128 | |||
129 | # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated |
|
129 | # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated | |
130 | # could be self._nodemap.keys() |
|
130 | # could be self._nodemap.keys() | |
131 | def values(self): |
|
131 | def values(self): | |
132 | return self._refmap.values() |
|
132 | return self._refmap.values() | |
133 |
|
133 | |||
134 | def __contains__(self, mark): |
|
134 | def __contains__(self, mark): | |
135 | return mark in self._refmap |
|
135 | return mark in self._refmap | |
136 |
|
136 | |||
137 | def __getitem__(self, mark): |
|
137 | def __getitem__(self, mark): | |
138 | return self._refmap[mark] |
|
138 | return self._refmap[mark] | |
139 |
|
139 | |||
140 | def get(self, mark, default=None): |
|
140 | def get(self, mark, default=None): | |
141 | return self._refmap.get(mark, default) |
|
141 | return self._refmap.get(mark, default) | |
142 |
|
142 | |||
143 | def _set(self, mark, node): |
|
143 | def _set(self, mark, node): | |
144 | self._clean = False |
|
144 | self._clean = False | |
145 | if mark in self._refmap: |
|
145 | if mark in self._refmap: | |
146 | self._del(mark) |
|
146 | self._del(mark) | |
147 | self._refmap[mark] = node |
|
147 | self._refmap[mark] = node | |
148 | nrefs = self._nodemap.get(node) |
|
148 | nrefs = self._nodemap.get(node) | |
149 | if nrefs is None: |
|
149 | if nrefs is None: | |
150 | self._nodemap[node] = [mark] |
|
150 | self._nodemap[node] = [mark] | |
151 | else: |
|
151 | else: | |
152 | nrefs.append(mark) |
|
152 | nrefs.append(mark) | |
153 | nrefs.sort() |
|
153 | nrefs.sort() | |
154 |
|
154 | |||
155 | def _del(self, mark): |
|
155 | def _del(self, mark): | |
156 | self._clean = False |
|
156 | self._clean = False | |
157 | node = self._refmap.pop(mark) |
|
157 | node = self._refmap.pop(mark) | |
158 | nrefs = self._nodemap[node] |
|
158 | nrefs = self._nodemap[node] | |
159 | if len(nrefs) == 1: |
|
159 | if len(nrefs) == 1: | |
160 | assert nrefs[0] == mark |
|
160 | assert nrefs[0] == mark | |
161 | del self._nodemap[node] |
|
161 | del self._nodemap[node] | |
162 | else: |
|
162 | else: | |
163 | nrefs.remove(mark) |
|
163 | nrefs.remove(mark) | |
164 |
|
164 | |||
165 | def names(self, node): |
|
165 | def names(self, node): | |
166 | """Return a sorted list of bookmarks pointing to the specified node""" |
|
166 | """Return a sorted list of bookmarks pointing to the specified node""" | |
167 | return self._nodemap.get(node, []) |
|
167 | return self._nodemap.get(node, []) | |
168 |
|
168 | |||
169 | def changectx(self, mark): |
|
169 | def changectx(self, mark): | |
170 | node = self._refmap[mark] |
|
170 | node = self._refmap[mark] | |
171 | return self._repo[node] |
|
171 | return self._repo[node] | |
172 |
|
172 | |||
173 | def applychanges(self, repo, tr, changes): |
|
173 | def applychanges(self, repo, tr, changes): | |
174 | """Apply a list of changes to bookmarks |
|
174 | """Apply a list of changes to bookmarks | |
175 | """ |
|
175 | """ | |
176 | bmchanges = tr.changes.get('bookmarks') |
|
176 | bmchanges = tr.changes.get('bookmarks') | |
177 | for name, node in changes: |
|
177 | for name, node in changes: | |
178 | old = self._refmap.get(name) |
|
178 | old = self._refmap.get(name) | |
179 | if node is None: |
|
179 | if node is None: | |
180 | self._del(name) |
|
180 | self._del(name) | |
181 | else: |
|
181 | else: | |
182 | self._set(name, node) |
|
182 | self._set(name, node) | |
183 | if bmchanges is not None: |
|
183 | if bmchanges is not None: | |
184 | # if a previous value exist preserve the "initial" value |
|
184 | # if a previous value exist preserve the "initial" value | |
185 | previous = bmchanges.get(name) |
|
185 | previous = bmchanges.get(name) | |
186 | if previous is not None: |
|
186 | if previous is not None: | |
187 | old = previous[0] |
|
187 | old = previous[0] | |
188 | bmchanges[name] = (old, node) |
|
188 | bmchanges[name] = (old, node) | |
189 | self._recordchange(tr) |
|
189 | self._recordchange(tr) | |
190 |
|
190 | |||
191 | def _recordchange(self, tr): |
|
191 | def _recordchange(self, tr): | |
192 | """record that bookmarks have been changed in a transaction |
|
192 | """record that bookmarks have been changed in a transaction | |
193 |
|
193 | |||
194 | The transaction is then responsible for updating the file content.""" |
|
194 | The transaction is then responsible for updating the file content.""" | |
195 | tr.addfilegenerator('bookmarks', ('bookmarks',), self._write, |
|
195 | tr.addfilegenerator('bookmarks', ('bookmarks',), self._write, | |
196 | location='plain') |
|
196 | location='plain') | |
197 | tr.hookargs['bookmark_moved'] = '1' |
|
197 | tr.hookargs['bookmark_moved'] = '1' | |
198 |
|
198 | |||
199 | def _writerepo(self, repo): |
|
199 | def _writerepo(self, repo): | |
200 | """Factored out for extensibility""" |
|
200 | """Factored out for extensibility""" | |
201 | rbm = repo._bookmarks |
|
201 | rbm = repo._bookmarks | |
202 | if rbm.active not in self._refmap: |
|
202 | if rbm.active not in self._refmap: | |
203 | rbm.active = None |
|
203 | rbm.active = None | |
204 | rbm._writeactive() |
|
204 | rbm._writeactive() | |
205 |
|
205 | |||
206 | with repo.wlock(): |
|
206 | with repo.wlock(): | |
207 | file_ = repo.vfs('bookmarks', 'w', atomictemp=True, |
|
207 | file_ = repo.vfs('bookmarks', 'w', atomictemp=True, | |
208 | checkambig=True) |
|
208 | checkambig=True) | |
209 | try: |
|
209 | try: | |
210 | self._write(file_) |
|
210 | self._write(file_) | |
211 | except: # re-raises |
|
211 | except: # re-raises | |
212 | file_.discard() |
|
212 | file_.discard() | |
213 | raise |
|
213 | raise | |
214 | finally: |
|
214 | finally: | |
215 | file_.close() |
|
215 | file_.close() | |
216 |
|
216 | |||
217 | def _writeactive(self): |
|
217 | def _writeactive(self): | |
218 | if self._aclean: |
|
218 | if self._aclean: | |
219 | return |
|
219 | return | |
220 | with self._repo.wlock(): |
|
220 | with self._repo.wlock(): | |
221 | if self._active is not None: |
|
221 | if self._active is not None: | |
222 | f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True, |
|
222 | f = self._repo.vfs('bookmarks.current', 'w', atomictemp=True, | |
223 | checkambig=True) |
|
223 | checkambig=True) | |
224 | try: |
|
224 | try: | |
225 | f.write(encoding.fromlocal(self._active)) |
|
225 | f.write(encoding.fromlocal(self._active)) | |
226 | finally: |
|
226 | finally: | |
227 | f.close() |
|
227 | f.close() | |
228 | else: |
|
228 | else: | |
229 | self._repo.vfs.tryunlink('bookmarks.current') |
|
229 | self._repo.vfs.tryunlink('bookmarks.current') | |
230 | self._aclean = True |
|
230 | self._aclean = True | |
231 |
|
231 | |||
232 | def _write(self, fp): |
|
232 | def _write(self, fp): | |
233 | for name, node in sorted(self._refmap.iteritems()): |
|
233 | for name, node in sorted(self._refmap.iteritems()): | |
234 | fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name))) |
|
234 | fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name))) | |
235 | self._clean = True |
|
235 | self._clean = True | |
236 | self._repo.invalidatevolatilesets() |
|
236 | self._repo.invalidatevolatilesets() | |
237 |
|
237 | |||
238 | def expandname(self, bname): |
|
238 | def expandname(self, bname): | |
239 | if bname == '.': |
|
239 | if bname == '.': | |
240 | if self.active: |
|
240 | if self.active: | |
241 | return self.active |
|
241 | return self.active | |
242 | else: |
|
242 | else: | |
243 | raise error.RepoLookupError(_("no active bookmark")) |
|
243 | raise error.RepoLookupError(_("no active bookmark")) | |
244 | return bname |
|
244 | return bname | |
245 |
|
245 | |||
246 | def checkconflict(self, mark, force=False, target=None): |
|
246 | def checkconflict(self, mark, force=False, target=None): | |
247 | """check repo for a potential clash of mark with an existing bookmark, |
|
247 | """check repo for a potential clash of mark with an existing bookmark, | |
248 | branch, or hash |
|
248 | branch, or hash | |
249 |
|
249 | |||
250 | If target is supplied, then check that we are moving the bookmark |
|
250 | If target is supplied, then check that we are moving the bookmark | |
251 | forward. |
|
251 | forward. | |
252 |
|
252 | |||
253 | If force is supplied, then forcibly move the bookmark to a new commit |
|
253 | If force is supplied, then forcibly move the bookmark to a new commit | |
254 | regardless if it is a move forward. |
|
254 | regardless if it is a move forward. | |
255 |
|
255 | |||
256 | If divergent bookmark are to be deleted, they will be returned as list. |
|
256 | If divergent bookmark are to be deleted, they will be returned as list. | |
257 | """ |
|
257 | """ | |
258 | cur = self._repo['.'].node() |
|
258 | cur = self._repo['.'].node() | |
259 | if mark in self._refmap and not force: |
|
259 | if mark in self._refmap and not force: | |
260 | if target: |
|
260 | if target: | |
261 | if self._refmap[mark] == target and target == cur: |
|
261 | if self._refmap[mark] == target and target == cur: | |
262 | # re-activating a bookmark |
|
262 | # re-activating a bookmark | |
263 | return [] |
|
263 | return [] | |
264 | rev = self._repo[target].rev() |
|
264 | rev = self._repo[target].rev() | |
265 | anc = self._repo.changelog.ancestors([rev]) |
|
265 | anc = self._repo.changelog.ancestors([rev]) | |
266 | bmctx = self.changectx(mark) |
|
266 | bmctx = self.changectx(mark) | |
267 | divs = [self._refmap[b] for b in self._refmap |
|
267 | divs = [self._refmap[b] for b in self._refmap | |
268 | if b.split('@', 1)[0] == mark.split('@', 1)[0]] |
|
268 | if b.split('@', 1)[0] == mark.split('@', 1)[0]] | |
269 |
|
269 | |||
270 | # allow resolving a single divergent bookmark even if moving |
|
270 | # allow resolving a single divergent bookmark even if moving | |
271 | # the bookmark across branches when a revision is specified |
|
271 | # the bookmark across branches when a revision is specified | |
272 | # that contains a divergent bookmark |
|
272 | # that contains a divergent bookmark | |
273 | if bmctx.rev() not in anc and target in divs: |
|
273 | if bmctx.rev() not in anc and target in divs: | |
274 | return divergent2delete(self._repo, [target], mark) |
|
274 | return divergent2delete(self._repo, [target], mark) | |
275 |
|
275 | |||
276 | deletefrom = [b for b in divs |
|
276 | deletefrom = [b for b in divs | |
277 | if self._repo[b].rev() in anc or b == target] |
|
277 | if self._repo[b].rev() in anc or b == target] | |
278 | delbms = divergent2delete(self._repo, deletefrom, mark) |
|
278 | delbms = divergent2delete(self._repo, deletefrom, mark) | |
279 | if validdest(self._repo, bmctx, self._repo[target]): |
|
279 | if validdest(self._repo, bmctx, self._repo[target]): | |
280 | self._repo.ui.status( |
|
280 | self._repo.ui.status( | |
281 | _("moving bookmark '%s' forward from %s\n") % |
|
281 | _("moving bookmark '%s' forward from %s\n") % | |
282 | (mark, short(bmctx.node()))) |
|
282 | (mark, short(bmctx.node()))) | |
283 | return delbms |
|
283 | return delbms | |
284 | raise error.Abort(_("bookmark '%s' already exists " |
|
284 | raise error.Abort(_("bookmark '%s' already exists " | |
285 | "(use -f to force)") % mark) |
|
285 | "(use -f to force)") % mark) | |
286 | if ((mark in self._repo.branchmap() or |
|
286 | if ((mark in self._repo.branchmap() or | |
287 | mark == self._repo.dirstate.branch()) and not force): |
|
287 | mark == self._repo.dirstate.branch()) and not force): | |
288 | raise error.Abort( |
|
288 | raise error.Abort( | |
289 | _("a bookmark cannot have the name of an existing branch")) |
|
289 | _("a bookmark cannot have the name of an existing branch")) | |
290 | if len(mark) > 3 and not force: |
|
290 | if len(mark) > 3 and not force: | |
291 | try: |
|
291 | try: | |
292 | shadowhash = scmutil.isrevsymbol(self._repo, mark) |
|
292 | shadowhash = scmutil.isrevsymbol(self._repo, mark) | |
293 | except error.LookupError: # ambiguous identifier |
|
293 | except error.LookupError: # ambiguous identifier | |
294 | shadowhash = False |
|
294 | shadowhash = False | |
295 | if shadowhash: |
|
295 | if shadowhash: | |
296 | self._repo.ui.warn( |
|
296 | self._repo.ui.warn( | |
297 | _("bookmark %s matches a changeset hash\n" |
|
297 | _("bookmark %s matches a changeset hash\n" | |
298 | "(did you leave a -r out of an 'hg bookmark' " |
|
298 | "(did you leave a -r out of an 'hg bookmark' " | |
299 | "command?)\n") |
|
299 | "command?)\n") | |
300 | % mark) |
|
300 | % mark) | |
301 | return [] |
|
301 | return [] | |
302 |
|
302 | |||
303 | def _readactive(repo, marks): |
|
303 | def _readactive(repo, marks): | |
304 | """ |
|
304 | """ | |
305 | Get the active bookmark. We can have an active bookmark that updates |
|
305 | Get the active bookmark. We can have an active bookmark that updates | |
306 | itself as we commit. This function returns the name of that bookmark. |
|
306 | itself as we commit. This function returns the name of that bookmark. | |
307 | It is stored in .hg/bookmarks.current |
|
307 | It is stored in .hg/bookmarks.current | |
308 | """ |
|
308 | """ | |
309 | try: |
|
309 | try: | |
310 | file = repo.vfs('bookmarks.current') |
|
310 | file = repo.vfs('bookmarks.current') | |
311 | except IOError as inst: |
|
311 | except IOError as inst: | |
312 | if inst.errno != errno.ENOENT: |
|
312 | if inst.errno != errno.ENOENT: | |
313 | raise |
|
313 | raise | |
314 | return None |
|
314 | return None | |
315 | try: |
|
315 | try: | |
316 | # No readline() in osutil.posixfile, reading everything is |
|
316 | # No readline() in osutil.posixfile, reading everything is | |
317 | # cheap. |
|
317 | # cheap. | |
318 | # Note that it's possible for readlines() here to raise |
|
318 | # Note that it's possible for readlines() here to raise | |
319 | # IOError, since we might be reading the active mark over |
|
319 | # IOError, since we might be reading the active mark over | |
320 | # static-http which only tries to load the file when we try |
|
320 | # static-http which only tries to load the file when we try | |
321 | # to read from it. |
|
321 | # to read from it. | |
322 | mark = encoding.tolocal((file.readlines() or [''])[0]) |
|
322 | mark = encoding.tolocal((file.readlines() or [''])[0]) | |
323 | if mark == '' or mark not in marks: |
|
323 | if mark == '' or mark not in marks: | |
324 | mark = None |
|
324 | mark = None | |
325 | except IOError as inst: |
|
325 | except IOError as inst: | |
326 | if inst.errno != errno.ENOENT: |
|
326 | if inst.errno != errno.ENOENT: | |
327 | raise |
|
327 | raise | |
328 | return None |
|
328 | return None | |
329 | finally: |
|
329 | finally: | |
330 | file.close() |
|
330 | file.close() | |
331 | return mark |
|
331 | return mark | |
332 |
|
332 | |||
333 | def activate(repo, mark): |
|
333 | def activate(repo, mark): | |
334 | """ |
|
334 | """ | |
335 | Set the given bookmark to be 'active', meaning that this bookmark will |
|
335 | Set the given bookmark to be 'active', meaning that this bookmark will | |
336 | follow new commits that are made. |
|
336 | follow new commits that are made. | |
337 | The name is recorded in .hg/bookmarks.current |
|
337 | The name is recorded in .hg/bookmarks.current | |
338 | """ |
|
338 | """ | |
339 | repo._bookmarks.active = mark |
|
339 | repo._bookmarks.active = mark | |
340 | repo._bookmarks._writeactive() |
|
340 | repo._bookmarks._writeactive() | |
341 |
|
341 | |||
342 | def deactivate(repo): |
|
342 | def deactivate(repo): | |
343 | """ |
|
343 | """ | |
344 | Unset the active bookmark in this repository. |
|
344 | Unset the active bookmark in this repository. | |
345 | """ |
|
345 | """ | |
346 | repo._bookmarks.active = None |
|
346 | repo._bookmarks.active = None | |
347 | repo._bookmarks._writeactive() |
|
347 | repo._bookmarks._writeactive() | |
348 |
|
348 | |||
349 | def isactivewdirparent(repo): |
|
349 | def isactivewdirparent(repo): | |
350 | """ |
|
350 | """ | |
351 | Tell whether the 'active' bookmark (the one that follows new commits) |
|
351 | Tell whether the 'active' bookmark (the one that follows new commits) | |
352 | points to one of the parents of the current working directory (wdir). |
|
352 | points to one of the parents of the current working directory (wdir). | |
353 |
|
353 | |||
354 | While this is normally the case, it can on occasion be false; for example, |
|
354 | While this is normally the case, it can on occasion be false; for example, | |
355 | immediately after a pull, the active bookmark can be moved to point |
|
355 | immediately after a pull, the active bookmark can be moved to point | |
356 | to a place different than the wdir. This is solved by running `hg update`. |
|
356 | to a place different than the wdir. This is solved by running `hg update`. | |
357 | """ |
|
357 | """ | |
358 | mark = repo._activebookmark |
|
358 | mark = repo._activebookmark | |
359 | marks = repo._bookmarks |
|
359 | marks = repo._bookmarks | |
360 | parents = [p.node() for p in repo[None].parents()] |
|
360 | parents = [p.node() for p in repo[None].parents()] | |
361 | return (mark in marks and marks[mark] in parents) |
|
361 | return (mark in marks and marks[mark] in parents) | |
362 |
|
362 | |||
363 | def divergent2delete(repo, deletefrom, bm): |
|
363 | def divergent2delete(repo, deletefrom, bm): | |
364 | """find divergent versions of bm on nodes in deletefrom. |
|
364 | """find divergent versions of bm on nodes in deletefrom. | |
365 |
|
365 | |||
366 | the list of bookmark to delete.""" |
|
366 | the list of bookmark to delete.""" | |
367 | todelete = [] |
|
367 | todelete = [] | |
368 | marks = repo._bookmarks |
|
368 | marks = repo._bookmarks | |
369 | divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]] |
|
369 | divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]] | |
370 | for mark in divergent: |
|
370 | for mark in divergent: | |
371 | if mark == '@' or '@' not in mark: |
|
371 | if mark == '@' or '@' not in mark: | |
372 | # can't be divergent by definition |
|
372 | # can't be divergent by definition | |
373 | continue |
|
373 | continue | |
374 | if mark and marks[mark] in deletefrom: |
|
374 | if mark and marks[mark] in deletefrom: | |
375 | if mark != bm: |
|
375 | if mark != bm: | |
376 | todelete.append(mark) |
|
376 | todelete.append(mark) | |
377 | return todelete |
|
377 | return todelete | |
378 |
|
378 | |||
379 | def headsforactive(repo): |
|
379 | def headsforactive(repo): | |
380 | """Given a repo with an active bookmark, return divergent bookmark nodes. |
|
380 | """Given a repo with an active bookmark, return divergent bookmark nodes. | |
381 |
|
381 | |||
382 | Args: |
|
382 | Args: | |
383 | repo: A repository with an active bookmark. |
|
383 | repo: A repository with an active bookmark. | |
384 |
|
384 | |||
385 | Returns: |
|
385 | Returns: | |
386 | A list of binary node ids that is the full list of other |
|
386 | A list of binary node ids that is the full list of other | |
387 | revisions with bookmarks divergent from the active bookmark. If |
|
387 | revisions with bookmarks divergent from the active bookmark. If | |
388 | there were no divergent bookmarks, then this list will contain |
|
388 | there were no divergent bookmarks, then this list will contain | |
389 | only one entry. |
|
389 | only one entry. | |
390 | """ |
|
390 | """ | |
391 | if not repo._activebookmark: |
|
391 | if not repo._activebookmark: | |
392 | raise ValueError( |
|
392 | raise ValueError( | |
393 | 'headsforactive() only makes sense with an active bookmark') |
|
393 | 'headsforactive() only makes sense with an active bookmark') | |
394 | name = repo._activebookmark.split('@', 1)[0] |
|
394 | name = repo._activebookmark.split('@', 1)[0] | |
395 | heads = [] |
|
395 | heads = [] | |
396 | for mark, n in repo._bookmarks.iteritems(): |
|
396 | for mark, n in repo._bookmarks.iteritems(): | |
397 | if mark.split('@', 1)[0] == name: |
|
397 | if mark.split('@', 1)[0] == name: | |
398 | heads.append(n) |
|
398 | heads.append(n) | |
399 | return heads |
|
399 | return heads | |
400 |
|
400 | |||
401 | def calculateupdate(ui, repo): |
|
401 | def calculateupdate(ui, repo): | |
402 | '''Return a tuple (activemark, movemarkfrom) indicating the active bookmark |
|
402 | '''Return a tuple (activemark, movemarkfrom) indicating the active bookmark | |
403 | and where to move the active bookmark from, if needed.''' |
|
403 | and where to move the active bookmark from, if needed.''' | |
404 | checkout, movemarkfrom = None, None |
|
404 | checkout, movemarkfrom = None, None | |
405 | activemark = repo._activebookmark |
|
405 | activemark = repo._activebookmark | |
406 | if isactivewdirparent(repo): |
|
406 | if isactivewdirparent(repo): | |
407 | movemarkfrom = repo['.'].node() |
|
407 | movemarkfrom = repo['.'].node() | |
408 | elif activemark: |
|
408 | elif activemark: | |
409 | ui.status(_("updating to active bookmark %s\n") % activemark) |
|
409 | ui.status(_("updating to active bookmark %s\n") % activemark) | |
410 | checkout = activemark |
|
410 | checkout = activemark | |
411 | return (checkout, movemarkfrom) |
|
411 | return (checkout, movemarkfrom) | |
412 |
|
412 | |||
413 | def update(repo, parents, node): |
|
413 | def update(repo, parents, node): | |
414 | deletefrom = parents |
|
414 | deletefrom = parents | |
415 | marks = repo._bookmarks |
|
415 | marks = repo._bookmarks | |
416 | active = marks.active |
|
416 | active = marks.active | |
417 | if not active: |
|
417 | if not active: | |
418 | return False |
|
418 | return False | |
419 |
|
419 | |||
420 | bmchanges = [] |
|
420 | bmchanges = [] | |
421 | if marks[active] in parents: |
|
421 | if marks[active] in parents: | |
422 | new = repo[node] |
|
422 | new = repo[node] | |
423 | divs = [marks.changectx(b) for b in marks |
|
423 | divs = [marks.changectx(b) for b in marks | |
424 | if b.split('@', 1)[0] == active.split('@', 1)[0]] |
|
424 | if b.split('@', 1)[0] == active.split('@', 1)[0]] | |
425 | anc = repo.changelog.ancestors([new.rev()]) |
|
425 | anc = repo.changelog.ancestors([new.rev()]) | |
426 | deletefrom = [b.node() for b in divs if b.rev() in anc or b == new] |
|
426 | deletefrom = [b.node() for b in divs if b.rev() in anc or b == new] | |
427 | if validdest(repo, marks.changectx(active), new): |
|
427 | if validdest(repo, marks.changectx(active), new): | |
428 | bmchanges.append((active, new.node())) |
|
428 | bmchanges.append((active, new.node())) | |
429 |
|
429 | |||
430 | for bm in divergent2delete(repo, deletefrom, active): |
|
430 | for bm in divergent2delete(repo, deletefrom, active): | |
431 | bmchanges.append((bm, None)) |
|
431 | bmchanges.append((bm, None)) | |
432 |
|
432 | |||
433 | if bmchanges: |
|
433 | if bmchanges: | |
434 | with repo.lock(), repo.transaction('bookmark') as tr: |
|
434 | with repo.lock(), repo.transaction('bookmark') as tr: | |
435 | marks.applychanges(repo, tr, bmchanges) |
|
435 | marks.applychanges(repo, tr, bmchanges) | |
436 | return bool(bmchanges) |
|
436 | return bool(bmchanges) | |
437 |
|
437 | |||
438 | def listbinbookmarks(repo): |
|
438 | def listbinbookmarks(repo): | |
439 | # We may try to list bookmarks on a repo type that does not |
|
439 | # We may try to list bookmarks on a repo type that does not | |
440 | # support it (e.g., statichttprepository). |
|
440 | # support it (e.g., statichttprepository). | |
441 | marks = getattr(repo, '_bookmarks', {}) |
|
441 | marks = getattr(repo, '_bookmarks', {}) | |
442 |
|
442 | |||
443 | hasnode = repo.changelog.hasnode |
|
443 | hasnode = repo.changelog.hasnode | |
444 | for k, v in marks.iteritems(): |
|
444 | for k, v in marks.iteritems(): | |
445 | # don't expose local divergent bookmarks |
|
445 | # don't expose local divergent bookmarks | |
446 | if hasnode(v) and ('@' not in k or k.endswith('@')): |
|
446 | if hasnode(v) and ('@' not in k or k.endswith('@')): | |
447 | yield k, v |
|
447 | yield k, v | |
448 |
|
448 | |||
449 | def listbookmarks(repo): |
|
449 | def listbookmarks(repo): | |
450 | d = {} |
|
450 | d = {} | |
451 | for book, node in listbinbookmarks(repo): |
|
451 | for book, node in listbinbookmarks(repo): | |
452 | d[book] = hex(node) |
|
452 | d[book] = hex(node) | |
453 | return d |
|
453 | return d | |
454 |
|
454 | |||
455 | def pushbookmark(repo, key, old, new): |
|
455 | def pushbookmark(repo, key, old, new): | |
456 | with repo.wlock(), repo.lock(), repo.transaction('bookmarks') as tr: |
|
456 | with repo.wlock(), repo.lock(), repo.transaction('bookmarks') as tr: | |
457 | marks = repo._bookmarks |
|
457 | marks = repo._bookmarks | |
458 | existing = hex(marks.get(key, '')) |
|
458 | existing = hex(marks.get(key, '')) | |
459 | if existing != old and existing != new: |
|
459 | if existing != old and existing != new: | |
460 | return False |
|
460 | return False | |
461 | if new == '': |
|
461 | if new == '': | |
462 | changes = [(key, None)] |
|
462 | changes = [(key, None)] | |
463 | else: |
|
463 | else: | |
464 | if new not in repo: |
|
464 | if new not in repo: | |
465 | return False |
|
465 | return False | |
466 | changes = [(key, repo[new].node())] |
|
466 | changes = [(key, repo[new].node())] | |
467 | marks.applychanges(repo, tr, changes) |
|
467 | marks.applychanges(repo, tr, changes) | |
468 | return True |
|
468 | return True | |
469 |
|
469 | |||
470 | def comparebookmarks(repo, srcmarks, dstmarks, targets=None): |
|
470 | def comparebookmarks(repo, srcmarks, dstmarks, targets=None): | |
471 | '''Compare bookmarks between srcmarks and dstmarks |
|
471 | '''Compare bookmarks between srcmarks and dstmarks | |
472 |
|
472 | |||
473 | This returns tuple "(addsrc, adddst, advsrc, advdst, diverge, |
|
473 | This returns tuple "(addsrc, adddst, advsrc, advdst, diverge, | |
474 | differ, invalid)", each are list of bookmarks below: |
|
474 | differ, invalid)", each are list of bookmarks below: | |
475 |
|
475 | |||
476 | :addsrc: added on src side (removed on dst side, perhaps) |
|
476 | :addsrc: added on src side (removed on dst side, perhaps) | |
477 | :adddst: added on dst side (removed on src side, perhaps) |
|
477 | :adddst: added on dst side (removed on src side, perhaps) | |
478 | :advsrc: advanced on src side |
|
478 | :advsrc: advanced on src side | |
479 | :advdst: advanced on dst side |
|
479 | :advdst: advanced on dst side | |
480 | :diverge: diverge |
|
480 | :diverge: diverge | |
481 | :differ: changed, but changeset referred on src is unknown on dst |
|
481 | :differ: changed, but changeset referred on src is unknown on dst | |
482 | :invalid: unknown on both side |
|
482 | :invalid: unknown on both side | |
483 | :same: same on both side |
|
483 | :same: same on both side | |
484 |
|
484 | |||
485 | Each elements of lists in result tuple is tuple "(bookmark name, |
|
485 | Each elements of lists in result tuple is tuple "(bookmark name, | |
486 | changeset ID on source side, changeset ID on destination |
|
486 | changeset ID on source side, changeset ID on destination | |
487 | side)". Each changeset IDs are 40 hexadecimal digit string or |
|
487 | side)". Each changeset IDs are 40 hexadecimal digit string or | |
488 | None. |
|
488 | None. | |
489 |
|
489 | |||
490 | Changeset IDs of tuples in "addsrc", "adddst", "differ" or |
|
490 | Changeset IDs of tuples in "addsrc", "adddst", "differ" or | |
491 | "invalid" list may be unknown for repo. |
|
491 | "invalid" list may be unknown for repo. | |
492 |
|
492 | |||
493 | If "targets" is specified, only bookmarks listed in it are |
|
493 | If "targets" is specified, only bookmarks listed in it are | |
494 | examined. |
|
494 | examined. | |
495 | ''' |
|
495 | ''' | |
496 |
|
496 | |||
497 | if targets: |
|
497 | if targets: | |
498 | bset = set(targets) |
|
498 | bset = set(targets) | |
499 | else: |
|
499 | else: | |
500 | srcmarkset = set(srcmarks) |
|
500 | srcmarkset = set(srcmarks) | |
501 | dstmarkset = set(dstmarks) |
|
501 | dstmarkset = set(dstmarks) | |
502 | bset = srcmarkset | dstmarkset |
|
502 | bset = srcmarkset | dstmarkset | |
503 |
|
503 | |||
504 | results = ([], [], [], [], [], [], [], []) |
|
504 | results = ([], [], [], [], [], [], [], []) | |
505 | addsrc = results[0].append |
|
505 | addsrc = results[0].append | |
506 | adddst = results[1].append |
|
506 | adddst = results[1].append | |
507 | advsrc = results[2].append |
|
507 | advsrc = results[2].append | |
508 | advdst = results[3].append |
|
508 | advdst = results[3].append | |
509 | diverge = results[4].append |
|
509 | diverge = results[4].append | |
510 | differ = results[5].append |
|
510 | differ = results[5].append | |
511 | invalid = results[6].append |
|
511 | invalid = results[6].append | |
512 | same = results[7].append |
|
512 | same = results[7].append | |
513 |
|
513 | |||
514 | for b in sorted(bset): |
|
514 | for b in sorted(bset): | |
515 | if b not in srcmarks: |
|
515 | if b not in srcmarks: | |
516 | if b in dstmarks: |
|
516 | if b in dstmarks: | |
517 | adddst((b, None, dstmarks[b])) |
|
517 | adddst((b, None, dstmarks[b])) | |
518 | else: |
|
518 | else: | |
519 | invalid((b, None, None)) |
|
519 | invalid((b, None, None)) | |
520 | elif b not in dstmarks: |
|
520 | elif b not in dstmarks: | |
521 | addsrc((b, srcmarks[b], None)) |
|
521 | addsrc((b, srcmarks[b], None)) | |
522 | else: |
|
522 | else: | |
523 | scid = srcmarks[b] |
|
523 | scid = srcmarks[b] | |
524 | dcid = dstmarks[b] |
|
524 | dcid = dstmarks[b] | |
525 | if scid == dcid: |
|
525 | if scid == dcid: | |
526 | same((b, scid, dcid)) |
|
526 | same((b, scid, dcid)) | |
527 | elif scid in repo and dcid in repo: |
|
527 | elif scid in repo and dcid in repo: | |
528 | sctx = repo[scid] |
|
528 | sctx = repo[scid] | |
529 | dctx = repo[dcid] |
|
529 | dctx = repo[dcid] | |
530 | if sctx.rev() < dctx.rev(): |
|
530 | if sctx.rev() < dctx.rev(): | |
531 | if validdest(repo, sctx, dctx): |
|
531 | if validdest(repo, sctx, dctx): | |
532 | advdst((b, scid, dcid)) |
|
532 | advdst((b, scid, dcid)) | |
533 | else: |
|
533 | else: | |
534 | diverge((b, scid, dcid)) |
|
534 | diverge((b, scid, dcid)) | |
535 | else: |
|
535 | else: | |
536 | if validdest(repo, dctx, sctx): |
|
536 | if validdest(repo, dctx, sctx): | |
537 | advsrc((b, scid, dcid)) |
|
537 | advsrc((b, scid, dcid)) | |
538 | else: |
|
538 | else: | |
539 | diverge((b, scid, dcid)) |
|
539 | diverge((b, scid, dcid)) | |
540 | else: |
|
540 | else: | |
541 | # it is too expensive to examine in detail, in this case |
|
541 | # it is too expensive to examine in detail, in this case | |
542 | differ((b, scid, dcid)) |
|
542 | differ((b, scid, dcid)) | |
543 |
|
543 | |||
544 | return results |
|
544 | return results | |
545 |
|
545 | |||
546 | def _diverge(ui, b, path, localmarks, remotenode): |
|
546 | def _diverge(ui, b, path, localmarks, remotenode): | |
547 | '''Return appropriate diverged bookmark for specified ``path`` |
|
547 | '''Return appropriate diverged bookmark for specified ``path`` | |
548 |
|
548 | |||
549 | This returns None, if it is failed to assign any divergent |
|
549 | This returns None, if it is failed to assign any divergent | |
550 | bookmark name. |
|
550 | bookmark name. | |
551 |
|
551 | |||
552 | This reuses already existing one with "@number" suffix, if it |
|
552 | This reuses already existing one with "@number" suffix, if it | |
553 | refers ``remotenode``. |
|
553 | refers ``remotenode``. | |
554 | ''' |
|
554 | ''' | |
555 | if b == '@': |
|
555 | if b == '@': | |
556 | b = '' |
|
556 | b = '' | |
557 | # try to use an @pathalias suffix |
|
557 | # try to use an @pathalias suffix | |
558 | # if an @pathalias already exists, we overwrite (update) it |
|
558 | # if an @pathalias already exists, we overwrite (update) it | |
559 | if path.startswith("file:"): |
|
559 | if path.startswith("file:"): | |
560 | path = util.url(path).path |
|
560 | path = util.url(path).path | |
561 | for p, u in ui.configitems("paths"): |
|
561 | for p, u in ui.configitems("paths"): | |
562 | if u.startswith("file:"): |
|
562 | if u.startswith("file:"): | |
563 | u = util.url(u).path |
|
563 | u = util.url(u).path | |
564 | if path == u: |
|
564 | if path == u: | |
565 | return '%s@%s' % (b, p) |
|
565 | return '%s@%s' % (b, p) | |
566 |
|
566 | |||
567 | # assign a unique "@number" suffix newly |
|
567 | # assign a unique "@number" suffix newly | |
568 | for x in range(1, 100): |
|
568 | for x in range(1, 100): | |
569 | n = '%s@%d' % (b, x) |
|
569 | n = '%s@%d' % (b, x) | |
570 | if n not in localmarks or localmarks[n] == remotenode: |
|
570 | if n not in localmarks or localmarks[n] == remotenode: | |
571 | return n |
|
571 | return n | |
572 |
|
572 | |||
573 | return None |
|
573 | return None | |
574 |
|
574 | |||
575 | def unhexlifybookmarks(marks): |
|
575 | def unhexlifybookmarks(marks): | |
576 | binremotemarks = {} |
|
576 | binremotemarks = {} | |
577 | for name, node in marks.items(): |
|
577 | for name, node in marks.items(): | |
578 | binremotemarks[name] = bin(node) |
|
578 | binremotemarks[name] = bin(node) | |
579 | return binremotemarks |
|
579 | return binremotemarks | |
580 |
|
580 | |||
581 | _binaryentry = struct.Struct('>20sH') |
|
581 | _binaryentry = struct.Struct('>20sH') | |
582 |
|
582 | |||
583 | def binaryencode(bookmarks): |
|
583 | def binaryencode(bookmarks): | |
584 | """encode a '(bookmark, node)' iterable into a binary stream |
|
584 | """encode a '(bookmark, node)' iterable into a binary stream | |
585 |
|
585 | |||
586 | the binary format is: |
|
586 | the binary format is: | |
587 |
|
587 | |||
588 | <node><bookmark-length><bookmark-name> |
|
588 | <node><bookmark-length><bookmark-name> | |
589 |
|
589 | |||
590 | :node: is a 20 bytes binary node, |
|
590 | :node: is a 20 bytes binary node, | |
591 | :bookmark-length: an unsigned short, |
|
591 | :bookmark-length: an unsigned short, | |
592 | :bookmark-name: the name of the bookmark (of length <bookmark-length>) |
|
592 | :bookmark-name: the name of the bookmark (of length <bookmark-length>) | |
593 |
|
593 | |||
594 | wdirid (all bits set) will be used as a special value for "missing" |
|
594 | wdirid (all bits set) will be used as a special value for "missing" | |
595 | """ |
|
595 | """ | |
596 | binarydata = [] |
|
596 | binarydata = [] | |
597 | for book, node in bookmarks: |
|
597 | for book, node in bookmarks: | |
598 | if not node: # None or '' |
|
598 | if not node: # None or '' | |
599 | node = wdirid |
|
599 | node = wdirid | |
600 | binarydata.append(_binaryentry.pack(node, len(book))) |
|
600 | binarydata.append(_binaryentry.pack(node, len(book))) | |
601 | binarydata.append(book) |
|
601 | binarydata.append(book) | |
602 | return ''.join(binarydata) |
|
602 | return ''.join(binarydata) | |
603 |
|
603 | |||
604 | def binarydecode(stream): |
|
604 | def binarydecode(stream): | |
605 | """decode a binary stream into an '(bookmark, node)' iterable |
|
605 | """decode a binary stream into an '(bookmark, node)' iterable | |
606 |
|
606 | |||
607 | the binary format is: |
|
607 | the binary format is: | |
608 |
|
608 | |||
609 | <node><bookmark-length><bookmark-name> |
|
609 | <node><bookmark-length><bookmark-name> | |
610 |
|
610 | |||
611 | :node: is a 20 bytes binary node, |
|
611 | :node: is a 20 bytes binary node, | |
612 | :bookmark-length: an unsigned short, |
|
612 | :bookmark-length: an unsigned short, | |
613 | :bookmark-name: the name of the bookmark (of length <bookmark-length>)) |
|
613 | :bookmark-name: the name of the bookmark (of length <bookmark-length>)) | |
614 |
|
614 | |||
615 | wdirid (all bits set) will be used as a special value for "missing" |
|
615 | wdirid (all bits set) will be used as a special value for "missing" | |
616 | """ |
|
616 | """ | |
617 | entrysize = _binaryentry.size |
|
617 | entrysize = _binaryentry.size | |
618 | books = [] |
|
618 | books = [] | |
619 | while True: |
|
619 | while True: | |
620 | entry = stream.read(entrysize) |
|
620 | entry = stream.read(entrysize) | |
621 | if len(entry) < entrysize: |
|
621 | if len(entry) < entrysize: | |
622 | if entry: |
|
622 | if entry: | |
623 | raise error.Abort(_('bad bookmark stream')) |
|
623 | raise error.Abort(_('bad bookmark stream')) | |
624 | break |
|
624 | break | |
625 | node, length = _binaryentry.unpack(entry) |
|
625 | node, length = _binaryentry.unpack(entry) | |
626 | bookmark = stream.read(length) |
|
626 | bookmark = stream.read(length) | |
627 | if len(bookmark) < length: |
|
627 | if len(bookmark) < length: | |
628 | if entry: |
|
628 | if entry: | |
629 | raise error.Abort(_('bad bookmark stream')) |
|
629 | raise error.Abort(_('bad bookmark stream')) | |
630 | if node == wdirid: |
|
630 | if node == wdirid: | |
631 | node = None |
|
631 | node = None | |
632 | books.append((bookmark, node)) |
|
632 | books.append((bookmark, node)) | |
633 | return books |
|
633 | return books | |
634 |
|
634 | |||
635 | def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()): |
|
635 | def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()): | |
636 | ui.debug("checking for updated bookmarks\n") |
|
636 | ui.debug("checking for updated bookmarks\n") | |
637 | localmarks = repo._bookmarks |
|
637 | localmarks = repo._bookmarks | |
638 | (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same |
|
638 | (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same | |
639 | ) = comparebookmarks(repo, remotemarks, localmarks) |
|
639 | ) = comparebookmarks(repo, remotemarks, localmarks) | |
640 |
|
640 | |||
641 | status = ui.status |
|
641 | status = ui.status | |
642 | warn = ui.warn |
|
642 | warn = ui.warn | |
643 | if ui.configbool('ui', 'quietbookmarkmove'): |
|
643 | if ui.configbool('ui', 'quietbookmarkmove'): | |
644 | status = warn = ui.debug |
|
644 | status = warn = ui.debug | |
645 |
|
645 | |||
646 | explicit = set(explicit) |
|
646 | explicit = set(explicit) | |
647 | changed = [] |
|
647 | changed = [] | |
648 | for b, scid, dcid in addsrc: |
|
648 | for b, scid, dcid in addsrc: | |
649 | if scid in repo: # add remote bookmarks for changes we already have |
|
649 | if scid in repo: # add remote bookmarks for changes we already have | |
650 | changed.append((b, scid, status, |
|
650 | changed.append((b, scid, status, | |
651 | _("adding remote bookmark %s\n") % (b))) |
|
651 | _("adding remote bookmark %s\n") % (b))) | |
652 | elif b in explicit: |
|
652 | elif b in explicit: | |
653 | explicit.remove(b) |
|
653 | explicit.remove(b) | |
654 | ui.warn(_("remote bookmark %s points to locally missing %s\n") |
|
654 | ui.warn(_("remote bookmark %s points to locally missing %s\n") | |
655 | % (b, hex(scid)[:12])) |
|
655 | % (b, hex(scid)[:12])) | |
656 |
|
656 | |||
657 | for b, scid, dcid in advsrc: |
|
657 | for b, scid, dcid in advsrc: | |
658 | changed.append((b, scid, status, |
|
658 | changed.append((b, scid, status, | |
659 | _("updating bookmark %s\n") % (b))) |
|
659 | _("updating bookmark %s\n") % (b))) | |
660 | # remove normal movement from explicit set |
|
660 | # remove normal movement from explicit set | |
661 | explicit.difference_update(d[0] for d in changed) |
|
661 | explicit.difference_update(d[0] for d in changed) | |
662 |
|
662 | |||
663 | for b, scid, dcid in diverge: |
|
663 | for b, scid, dcid in diverge: | |
664 | if b in explicit: |
|
664 | if b in explicit: | |
665 | explicit.discard(b) |
|
665 | explicit.discard(b) | |
666 | changed.append((b, scid, status, |
|
666 | changed.append((b, scid, status, | |
667 | _("importing bookmark %s\n") % (b))) |
|
667 | _("importing bookmark %s\n") % (b))) | |
668 | else: |
|
668 | else: | |
669 | db = _diverge(ui, b, path, localmarks, scid) |
|
669 | db = _diverge(ui, b, path, localmarks, scid) | |
670 | if db: |
|
670 | if db: | |
671 | changed.append((db, scid, warn, |
|
671 | changed.append((db, scid, warn, | |
672 | _("divergent bookmark %s stored as %s\n") % |
|
672 | _("divergent bookmark %s stored as %s\n") % | |
673 | (b, db))) |
|
673 | (b, db))) | |
674 | else: |
|
674 | else: | |
675 | warn(_("warning: failed to assign numbered name " |
|
675 | warn(_("warning: failed to assign numbered name " | |
676 | "to divergent bookmark %s\n") % (b)) |
|
676 | "to divergent bookmark %s\n") % (b)) | |
677 | for b, scid, dcid in adddst + advdst: |
|
677 | for b, scid, dcid in adddst + advdst: | |
678 | if b in explicit: |
|
678 | if b in explicit: | |
679 | explicit.discard(b) |
|
679 | explicit.discard(b) | |
680 | changed.append((b, scid, status, |
|
680 | changed.append((b, scid, status, | |
681 | _("importing bookmark %s\n") % (b))) |
|
681 | _("importing bookmark %s\n") % (b))) | |
682 | for b, scid, dcid in differ: |
|
682 | for b, scid, dcid in differ: | |
683 | if b in explicit: |
|
683 | if b in explicit: | |
684 | explicit.remove(b) |
|
684 | explicit.remove(b) | |
685 | ui.warn(_("remote bookmark %s points to locally missing %s\n") |
|
685 | ui.warn(_("remote bookmark %s points to locally missing %s\n") | |
686 | % (b, hex(scid)[:12])) |
|
686 | % (b, hex(scid)[:12])) | |
687 |
|
687 | |||
688 | if changed: |
|
688 | if changed: | |
689 | tr = trfunc() |
|
689 | tr = trfunc() | |
690 | changes = [] |
|
690 | changes = [] | |
691 | for b, node, writer, msg in sorted(changed): |
|
691 | for b, node, writer, msg in sorted(changed): | |
692 | changes.append((b, node)) |
|
692 | changes.append((b, node)) | |
693 | writer(msg) |
|
693 | writer(msg) | |
694 | localmarks.applychanges(repo, tr, changes) |
|
694 | localmarks.applychanges(repo, tr, changes) | |
695 |
|
695 | |||
696 | def incoming(ui, repo, peer): |
|
696 | def incoming(ui, repo, peer): | |
697 | '''Show bookmarks incoming from other to repo |
|
697 | '''Show bookmarks incoming from other to repo | |
698 | ''' |
|
698 | ''' | |
699 | ui.status(_("searching for changed bookmarks\n")) |
|
699 | ui.status(_("searching for changed bookmarks\n")) | |
700 |
|
700 | |||
701 | with peer.commandexecutor() as e: |
|
701 | with peer.commandexecutor() as e: | |
702 | remotemarks = unhexlifybookmarks(e.callcommand('listkeys', { |
|
702 | remotemarks = unhexlifybookmarks(e.callcommand('listkeys', { | |
703 | 'namespace': 'bookmarks', |
|
703 | 'namespace': 'bookmarks', | |
704 | }).result()) |
|
704 | }).result()) | |
705 |
|
705 | |||
706 | r = comparebookmarks(repo, remotemarks, repo._bookmarks) |
|
706 | r = comparebookmarks(repo, remotemarks, repo._bookmarks) | |
707 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r |
|
707 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | |
708 |
|
708 | |||
709 | incomings = [] |
|
709 | incomings = [] | |
710 | if ui.debugflag: |
|
710 | if ui.debugflag: | |
711 | getid = lambda id: id |
|
711 | getid = lambda id: id | |
712 | else: |
|
712 | else: | |
713 | getid = lambda id: id[:12] |
|
713 | getid = lambda id: id[:12] | |
714 | if ui.verbose: |
|
714 | if ui.verbose: | |
715 | def add(b, id, st): |
|
715 | def add(b, id, st): | |
716 | incomings.append(" %-25s %s %s\n" % (b, getid(id), st)) |
|
716 | incomings.append(" %-25s %s %s\n" % (b, getid(id), st)) | |
717 | else: |
|
717 | else: | |
718 | def add(b, id, st): |
|
718 | def add(b, id, st): | |
719 | incomings.append(" %-25s %s\n" % (b, getid(id))) |
|
719 | incomings.append(" %-25s %s\n" % (b, getid(id))) | |
720 | for b, scid, dcid in addsrc: |
|
720 | for b, scid, dcid in addsrc: | |
721 | # i18n: "added" refers to a bookmark |
|
721 | # i18n: "added" refers to a bookmark | |
722 | add(b, hex(scid), _('added')) |
|
722 | add(b, hex(scid), _('added')) | |
723 | for b, scid, dcid in advsrc: |
|
723 | for b, scid, dcid in advsrc: | |
724 | # i18n: "advanced" refers to a bookmark |
|
724 | # i18n: "advanced" refers to a bookmark | |
725 | add(b, hex(scid), _('advanced')) |
|
725 | add(b, hex(scid), _('advanced')) | |
726 | for b, scid, dcid in diverge: |
|
726 | for b, scid, dcid in diverge: | |
727 | # i18n: "diverged" refers to a bookmark |
|
727 | # i18n: "diverged" refers to a bookmark | |
728 | add(b, hex(scid), _('diverged')) |
|
728 | add(b, hex(scid), _('diverged')) | |
729 | for b, scid, dcid in differ: |
|
729 | for b, scid, dcid in differ: | |
730 | # i18n: "changed" refers to a bookmark |
|
730 | # i18n: "changed" refers to a bookmark | |
731 | add(b, hex(scid), _('changed')) |
|
731 | add(b, hex(scid), _('changed')) | |
732 |
|
732 | |||
733 | if not incomings: |
|
733 | if not incomings: | |
734 | ui.status(_("no changed bookmarks found\n")) |
|
734 | ui.status(_("no changed bookmarks found\n")) | |
735 | return 1 |
|
735 | return 1 | |
736 |
|
736 | |||
737 | for s in sorted(incomings): |
|
737 | for s in sorted(incomings): | |
738 | ui.write(s) |
|
738 | ui.write(s) | |
739 |
|
739 | |||
740 | return 0 |
|
740 | return 0 | |
741 |
|
741 | |||
742 | def outgoing(ui, repo, other): |
|
742 | def outgoing(ui, repo, other): | |
743 | '''Show bookmarks outgoing from repo to other |
|
743 | '''Show bookmarks outgoing from repo to other | |
744 | ''' |
|
744 | ''' | |
745 | ui.status(_("searching for changed bookmarks\n")) |
|
745 | ui.status(_("searching for changed bookmarks\n")) | |
746 |
|
746 | |||
747 | remotemarks = unhexlifybookmarks(other.listkeys('bookmarks')) |
|
747 | remotemarks = unhexlifybookmarks(other.listkeys('bookmarks')) | |
748 | r = comparebookmarks(repo, repo._bookmarks, remotemarks) |
|
748 | r = comparebookmarks(repo, repo._bookmarks, remotemarks) | |
749 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r |
|
749 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | |
750 |
|
750 | |||
751 | outgoings = [] |
|
751 | outgoings = [] | |
752 | if ui.debugflag: |
|
752 | if ui.debugflag: | |
753 | getid = lambda id: id |
|
753 | getid = lambda id: id | |
754 | else: |
|
754 | else: | |
755 | getid = lambda id: id[:12] |
|
755 | getid = lambda id: id[:12] | |
756 | if ui.verbose: |
|
756 | if ui.verbose: | |
757 | def add(b, id, st): |
|
757 | def add(b, id, st): | |
758 | outgoings.append(" %-25s %s %s\n" % (b, getid(id), st)) |
|
758 | outgoings.append(" %-25s %s %s\n" % (b, getid(id), st)) | |
759 | else: |
|
759 | else: | |
760 | def add(b, id, st): |
|
760 | def add(b, id, st): | |
761 | outgoings.append(" %-25s %s\n" % (b, getid(id))) |
|
761 | outgoings.append(" %-25s %s\n" % (b, getid(id))) | |
762 | for b, scid, dcid in addsrc: |
|
762 | for b, scid, dcid in addsrc: | |
763 | # i18n: "added refers to a bookmark |
|
763 | # i18n: "added refers to a bookmark | |
764 | add(b, hex(scid), _('added')) |
|
764 | add(b, hex(scid), _('added')) | |
765 | for b, scid, dcid in adddst: |
|
765 | for b, scid, dcid in adddst: | |
766 | # i18n: "deleted" refers to a bookmark |
|
766 | # i18n: "deleted" refers to a bookmark | |
767 | add(b, ' ' * 40, _('deleted')) |
|
767 | add(b, ' ' * 40, _('deleted')) | |
768 | for b, scid, dcid in advsrc: |
|
768 | for b, scid, dcid in advsrc: | |
769 | # i18n: "advanced" refers to a bookmark |
|
769 | # i18n: "advanced" refers to a bookmark | |
770 | add(b, hex(scid), _('advanced')) |
|
770 | add(b, hex(scid), _('advanced')) | |
771 | for b, scid, dcid in diverge: |
|
771 | for b, scid, dcid in diverge: | |
772 | # i18n: "diverged" refers to a bookmark |
|
772 | # i18n: "diverged" refers to a bookmark | |
773 | add(b, hex(scid), _('diverged')) |
|
773 | add(b, hex(scid), _('diverged')) | |
774 | for b, scid, dcid in differ: |
|
774 | for b, scid, dcid in differ: | |
775 | # i18n: "changed" refers to a bookmark |
|
775 | # i18n: "changed" refers to a bookmark | |
776 | add(b, hex(scid), _('changed')) |
|
776 | add(b, hex(scid), _('changed')) | |
777 |
|
777 | |||
778 | if not outgoings: |
|
778 | if not outgoings: | |
779 | ui.status(_("no changed bookmarks found\n")) |
|
779 | ui.status(_("no changed bookmarks found\n")) | |
780 | return 1 |
|
780 | return 1 | |
781 |
|
781 | |||
782 | for s in sorted(outgoings): |
|
782 | for s in sorted(outgoings): | |
783 | ui.write(s) |
|
783 | ui.write(s) | |
784 |
|
784 | |||
785 | return 0 |
|
785 | return 0 | |
786 |
|
786 | |||
787 | def summary(repo, peer): |
|
787 | def summary(repo, peer): | |
788 | '''Compare bookmarks between repo and other for "hg summary" output |
|
788 | '''Compare bookmarks between repo and other for "hg summary" output | |
789 |
|
789 | |||
790 | This returns "(# of incoming, # of outgoing)" tuple. |
|
790 | This returns "(# of incoming, # of outgoing)" tuple. | |
791 | ''' |
|
791 | ''' | |
792 | with peer.commandexecutor() as e: |
|
792 | with peer.commandexecutor() as e: | |
793 | remotemarks = unhexlifybookmarks(e.callcommand('listkeys', { |
|
793 | remotemarks = unhexlifybookmarks(e.callcommand('listkeys', { | |
794 | 'namespace': 'bookmarks', |
|
794 | 'namespace': 'bookmarks', | |
795 | }).result()) |
|
795 | }).result()) | |
796 |
|
796 | |||
797 | r = comparebookmarks(repo, remotemarks, repo._bookmarks) |
|
797 | r = comparebookmarks(repo, remotemarks, repo._bookmarks) | |
798 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r |
|
798 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r | |
799 | return (len(addsrc), len(adddst)) |
|
799 | return (len(addsrc), len(adddst)) | |
800 |
|
800 | |||
801 | def validdest(repo, old, new): |
|
801 | def validdest(repo, old, new): | |
802 | """Is the new bookmark destination a valid update from the old one""" |
|
802 | """Is the new bookmark destination a valid update from the old one""" | |
803 | repo = repo.unfiltered() |
|
803 | repo = repo.unfiltered() | |
804 | if old == new: |
|
804 | if old == new: | |
805 | # Old == new -> nothing to update. |
|
805 | # Old == new -> nothing to update. | |
806 | return False |
|
806 | return False | |
807 | elif not old: |
|
807 | elif not old: | |
808 | # old is nullrev, anything is valid. |
|
808 | # old is nullrev, anything is valid. | |
809 | # (new != nullrev has been excluded by the previous check) |
|
809 | # (new != nullrev has been excluded by the previous check) | |
810 | return True |
|
810 | return True | |
811 | elif repo.obsstore: |
|
811 | elif repo.obsstore: | |
812 | return new.node() in obsutil.foreground(repo, [old.node()]) |
|
812 | return new.node() in obsutil.foreground(repo, [old.node()]) | |
813 | else: |
|
813 | else: | |
814 | # still an independent clause as it is lazier (and therefore faster) |
|
814 | # still an independent clause as it is lazier (and therefore faster) | |
815 | return old.isancestorof(new) |
|
815 | return old.isancestorof(new) | |
816 |
|
816 | |||
817 | def checkformat(repo, mark): |
|
817 | def checkformat(repo, mark): | |
818 | """return a valid version of a potential bookmark name |
|
818 | """return a valid version of a potential bookmark name | |
819 |
|
819 | |||
820 | Raises an abort error if the bookmark name is not valid. |
|
820 | Raises an abort error if the bookmark name is not valid. | |
821 | """ |
|
821 | """ | |
822 | mark = mark.strip() |
|
822 | mark = mark.strip() | |
823 | if not mark: |
|
823 | if not mark: | |
824 | raise error.Abort(_("bookmark names cannot consist entirely of " |
|
824 | raise error.Abort(_("bookmark names cannot consist entirely of " | |
825 | "whitespace")) |
|
825 | "whitespace")) | |
826 | scmutil.checknewlabel(repo, mark, 'bookmark') |
|
826 | scmutil.checknewlabel(repo, mark, 'bookmark') | |
827 | return mark |
|
827 | return mark | |
828 |
|
828 | |||
829 | def delete(repo, tr, names): |
|
829 | def delete(repo, tr, names): | |
830 | """remove a mark from the bookmark store |
|
830 | """remove a mark from the bookmark store | |
831 |
|
831 | |||
832 | Raises an abort error if mark does not exist. |
|
832 | Raises an abort error if mark does not exist. | |
833 | """ |
|
833 | """ | |
834 | marks = repo._bookmarks |
|
834 | marks = repo._bookmarks | |
835 | changes = [] |
|
835 | changes = [] | |
836 | for mark in names: |
|
836 | for mark in names: | |
837 | if mark not in marks: |
|
837 | if mark not in marks: | |
838 | raise error.Abort(_("bookmark '%s' does not exist") % mark) |
|
838 | raise error.Abort(_("bookmark '%s' does not exist") % mark) | |
839 | if mark == repo._activebookmark: |
|
839 | if mark == repo._activebookmark: | |
840 | deactivate(repo) |
|
840 | deactivate(repo) | |
841 | changes.append((mark, None)) |
|
841 | changes.append((mark, None)) | |
842 | marks.applychanges(repo, tr, changes) |
|
842 | marks.applychanges(repo, tr, changes) | |
843 |
|
843 | |||
844 | def rename(repo, tr, old, new, force=False, inactive=False): |
|
844 | def rename(repo, tr, old, new, force=False, inactive=False): | |
845 | """rename a bookmark from old to new |
|
845 | """rename a bookmark from old to new | |
846 |
|
846 | |||
847 | If force is specified, then the new name can overwrite an existing |
|
847 | If force is specified, then the new name can overwrite an existing | |
848 | bookmark. |
|
848 | bookmark. | |
849 |
|
849 | |||
850 | If inactive is specified, then do not activate the new bookmark. |
|
850 | If inactive is specified, then do not activate the new bookmark. | |
851 |
|
851 | |||
852 | Raises an abort error if old is not in the bookmark store. |
|
852 | Raises an abort error if old is not in the bookmark store. | |
853 | """ |
|
853 | """ | |
854 | marks = repo._bookmarks |
|
854 | marks = repo._bookmarks | |
855 | mark = checkformat(repo, new) |
|
855 | mark = checkformat(repo, new) | |
856 | if old not in marks: |
|
856 | if old not in marks: | |
857 | raise error.Abort(_("bookmark '%s' does not exist") % old) |
|
857 | raise error.Abort(_("bookmark '%s' does not exist") % old) | |
858 | changes = [] |
|
858 | changes = [] | |
859 | for bm in marks.checkconflict(mark, force): |
|
859 | for bm in marks.checkconflict(mark, force): | |
860 | changes.append((bm, None)) |
|
860 | changes.append((bm, None)) | |
861 | changes.extend([(mark, marks[old]), (old, None)]) |
|
861 | changes.extend([(mark, marks[old]), (old, None)]) | |
862 | marks.applychanges(repo, tr, changes) |
|
862 | marks.applychanges(repo, tr, changes) | |
863 | if repo._activebookmark == old and not inactive: |
|
863 | if repo._activebookmark == old and not inactive: | |
864 | activate(repo, mark) |
|
864 | activate(repo, mark) | |
865 |
|
865 | |||
866 | def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False): |
|
866 | def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False): | |
867 | """add a list of bookmarks |
|
867 | """add a list of bookmarks | |
868 |
|
868 | |||
869 | If force is specified, then the new name can overwrite an existing |
|
869 | If force is specified, then the new name can overwrite an existing | |
870 | bookmark. |
|
870 | bookmark. | |
871 |
|
871 | |||
872 | If inactive is specified, then do not activate any bookmark. Otherwise, the |
|
872 | If inactive is specified, then do not activate any bookmark. Otherwise, the | |
873 | first bookmark is activated. |
|
873 | first bookmark is activated. | |
874 |
|
874 | |||
875 | Raises an abort error if old is not in the bookmark store. |
|
875 | Raises an abort error if old is not in the bookmark store. | |
876 | """ |
|
876 | """ | |
877 | marks = repo._bookmarks |
|
877 | marks = repo._bookmarks | |
878 | cur = repo['.'].node() |
|
878 | cur = repo['.'].node() | |
879 | newact = None |
|
879 | newact = None | |
880 | changes = [] |
|
880 | changes = [] | |
881 | hiddenrev = None |
|
881 | hiddenrev = None | |
882 |
|
882 | |||
883 | # unhide revs if any |
|
883 | # unhide revs if any | |
884 | if rev: |
|
884 | if rev: | |
885 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') |
|
885 | repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') | |
886 |
|
886 | |||
887 | for mark in names: |
|
887 | for mark in names: | |
888 | mark = checkformat(repo, mark) |
|
888 | mark = checkformat(repo, mark) | |
889 | if newact is None: |
|
889 | if newact is None: | |
890 | newact = mark |
|
890 | newact = mark | |
891 | if inactive and mark == repo._activebookmark: |
|
891 | if inactive and mark == repo._activebookmark: | |
892 | deactivate(repo) |
|
892 | deactivate(repo) | |
893 | return |
|
893 | return | |
894 | tgt = cur |
|
894 | tgt = cur | |
895 | if rev: |
|
895 | if rev: | |
896 | ctx = scmutil.revsingle(repo, rev) |
|
896 | ctx = scmutil.revsingle(repo, rev) | |
897 | if ctx.hidden(): |
|
897 | if ctx.hidden(): | |
898 | hiddenrev = ctx.hex()[:12] |
|
898 | hiddenrev = ctx.hex()[:12] | |
899 | tgt = ctx.node() |
|
899 | tgt = ctx.node() | |
900 | for bm in marks.checkconflict(mark, force, tgt): |
|
900 | for bm in marks.checkconflict(mark, force, tgt): | |
901 | changes.append((bm, None)) |
|
901 | changes.append((bm, None)) | |
902 | changes.append((mark, tgt)) |
|
902 | changes.append((mark, tgt)) | |
903 |
|
903 | |||
904 | if hiddenrev: |
|
904 | if hiddenrev: | |
905 | repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev) |
|
905 | repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev) | |
906 |
|
906 | |||
907 | if ctx.obsolete(): |
|
907 | if ctx.obsolete(): | |
908 | msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx) |
|
908 | msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx) | |
909 | repo.ui.warn("(%s)\n" % msg) |
|
909 | repo.ui.warn("(%s)\n" % msg) | |
910 |
|
910 | |||
911 | marks.applychanges(repo, tr, changes) |
|
911 | marks.applychanges(repo, tr, changes) | |
912 | if not inactive and cur == marks[newact] and not rev: |
|
912 | if not inactive and cur == marks[newact] and not rev: | |
913 | activate(repo, newact) |
|
913 | activate(repo, newact) | |
914 | elif cur != tgt and newact == repo._activebookmark: |
|
914 | elif cur != tgt and newact == repo._activebookmark: | |
915 | deactivate(repo) |
|
915 | deactivate(repo) | |
916 |
|
916 | |||
917 | def _printbookmarks(ui, repo, fm, bmarks): |
|
917 | def _printbookmarks(ui, repo, fm, bmarks): | |
918 | """private method to print bookmarks |
|
918 | """private method to print bookmarks | |
919 |
|
919 | |||
920 | Provides a way for extensions to control how bookmarks are printed (e.g. |
|
920 | Provides a way for extensions to control how bookmarks are printed (e.g. | |
921 | prepend or postpend names) |
|
921 | prepend or postpend names) | |
922 | """ |
|
922 | """ | |
923 | hexfn = fm.hexfunc |
|
923 | hexfn = fm.hexfunc | |
924 | if len(bmarks) == 0 and fm.isplain(): |
|
924 | if len(bmarks) == 0 and fm.isplain(): | |
925 | ui.status(_("no bookmarks set\n")) |
|
925 | ui.status(_("no bookmarks set\n")) | |
926 | for bmark, (n, prefix, label) in sorted(bmarks.iteritems()): |
|
926 | for bmark, (n, prefix, label) in sorted(bmarks.iteritems()): | |
927 | fm.startitem() |
|
927 | fm.startitem() | |
928 | fm.context(repo=repo) |
|
928 | fm.context(repo=repo) | |
929 | if not ui.quiet: |
|
929 | if not ui.quiet: | |
930 | fm.plain(' %s ' % prefix, label=label) |
|
930 | fm.plain(' %s ' % prefix, label=label) | |
931 | fm.write('bookmark', '%s', bmark, label=label) |
|
931 | fm.write('bookmark', '%s', bmark, label=label) | |
932 | pad = " " * (25 - encoding.colwidth(bmark)) |
|
932 | pad = " " * (25 - encoding.colwidth(bmark)) | |
933 | fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s', |
|
933 | fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s', | |
934 | repo.changelog.rev(n), hexfn(n), label=label) |
|
934 | repo.changelog.rev(n), hexfn(n), label=label) | |
935 | fm.data(active=(activebookmarklabel in label)) |
|
935 | fm.data(active=(activebookmarklabel in label)) | |
936 | fm.plain('\n') |
|
936 | fm.plain('\n') | |
937 |
|
937 | |||
938 | def printbookmarks(ui, repo, fm, names=None): |
|
938 | def printbookmarks(ui, repo, fm, names=None): | |
939 | """print bookmarks by the given formatter |
|
939 | """print bookmarks by the given formatter | |
940 |
|
940 | |||
941 | Provides a way for extensions to control how bookmarks are printed. |
|
941 | Provides a way for extensions to control how bookmarks are printed. | |
942 | """ |
|
942 | """ | |
943 | marks = repo._bookmarks |
|
943 | marks = repo._bookmarks | |
944 | bmarks = {} |
|
944 | bmarks = {} | |
945 | for bmark in (names or marks): |
|
945 | for bmark in (names or marks): | |
946 | if bmark not in marks: |
|
946 | if bmark not in marks: | |
947 | raise error.Abort(_("bookmark '%s' does not exist") % bmark) |
|
947 | raise error.Abort(_("bookmark '%s' does not exist") % bmark) | |
948 | active = repo._activebookmark |
|
948 | active = repo._activebookmark | |
949 | if bmark == active: |
|
949 | if bmark == active: | |
950 | prefix, label = '*', activebookmarklabel |
|
950 | prefix, label = '*', activebookmarklabel | |
951 | else: |
|
951 | else: | |
952 | prefix, label = ' ', '' |
|
952 | prefix, label = ' ', '' | |
953 |
|
953 | |||
954 | bmarks[bmark] = (marks[bmark], prefix, label) |
|
954 | bmarks[bmark] = (marks[bmark], prefix, label) | |
955 | _printbookmarks(ui, repo, fm, bmarks) |
|
955 | _printbookmarks(ui, repo, fm, bmarks) | |
956 |
|
956 | |||
957 | def preparehookargs(name, old, new): |
|
957 | def preparehookargs(name, old, new): | |
958 | if new is None: |
|
958 | if new is None: | |
959 | new = '' |
|
959 | new = '' | |
960 | if old is None: |
|
960 | if old is None: | |
961 | old = '' |
|
961 | old = '' | |
962 | return {'bookmark': name, |
|
962 | return {'bookmark': name, | |
963 | 'node': hex(new), |
|
963 | 'node': hex(new), | |
964 | 'oldnode': hex(old)} |
|
964 | 'oldnode': hex(old)} |
@@ -1,846 +1,846 b'' | |||||
1 | # dagop.py - graph ancestry and topology algorithm for revset |
|
1 | # dagop.py - graph ancestry and topology algorithm for revset | |
2 | # |
|
2 | # | |
3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import heapq |
|
10 | import heapq | |
11 |
|
11 | |||
12 | from .node import ( |
|
12 | from .node import ( | |
13 | nullrev, |
|
13 | nullrev, | |
14 | ) |
|
14 | ) | |
15 | from .thirdparty import ( |
|
15 | from .thirdparty import ( | |
16 | attr, |
|
16 | attr, | |
17 | ) |
|
17 | ) | |
18 | from . import ( |
|
18 | from . import ( | |
19 | error, |
|
19 | error, | |
20 | mdiff, |
|
20 | mdiff, | |
21 | node, |
|
21 | node, | |
22 | patch, |
|
22 | patch, | |
23 | pycompat, |
|
23 | pycompat, | |
24 | smartset, |
|
24 | smartset, | |
25 | ) |
|
25 | ) | |
26 |
|
26 | |||
27 | baseset = smartset.baseset |
|
27 | baseset = smartset.baseset | |
28 | generatorset = smartset.generatorset |
|
28 | generatorset = smartset.generatorset | |
29 |
|
29 | |||
30 | # possible maximum depth between null and wdir() |
|
30 | # possible maximum depth between null and wdir() | |
31 | maxlogdepth = 0x80000000 |
|
31 | maxlogdepth = 0x80000000 | |
32 |
|
32 | |||
33 | def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse): |
|
33 | def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse): | |
34 | """Walk DAG using 'pfunc' from the given 'revs' nodes |
|
34 | """Walk DAG using 'pfunc' from the given 'revs' nodes | |
35 |
|
35 | |||
36 | 'pfunc(rev)' should return the parent/child revisions of the given 'rev' |
|
36 | 'pfunc(rev)' should return the parent/child revisions of the given 'rev' | |
37 | if 'reverse' is True/False respectively. |
|
37 | if 'reverse' is True/False respectively. | |
38 |
|
38 | |||
39 | Scan ends at the stopdepth (exlusive) if specified. Revisions found |
|
39 | Scan ends at the stopdepth (exlusive) if specified. Revisions found | |
40 | earlier than the startdepth are omitted. |
|
40 | earlier than the startdepth are omitted. | |
41 | """ |
|
41 | """ | |
42 | if startdepth is None: |
|
42 | if startdepth is None: | |
43 | startdepth = 0 |
|
43 | startdepth = 0 | |
44 | if stopdepth is None: |
|
44 | if stopdepth is None: | |
45 | stopdepth = maxlogdepth |
|
45 | stopdepth = maxlogdepth | |
46 | if stopdepth == 0: |
|
46 | if stopdepth == 0: | |
47 | return |
|
47 | return | |
48 | if stopdepth < 0: |
|
48 | if stopdepth < 0: | |
49 | raise error.ProgrammingError('negative stopdepth') |
|
49 | raise error.ProgrammingError('negative stopdepth') | |
50 | if reverse: |
|
50 | if reverse: | |
51 | heapsign = -1 # max heap |
|
51 | heapsign = -1 # max heap | |
52 | else: |
|
52 | else: | |
53 | heapsign = +1 # min heap |
|
53 | heapsign = +1 # min heap | |
54 |
|
54 | |||
55 | # load input revs lazily to heap so earlier revisions can be yielded |
|
55 | # load input revs lazily to heap so earlier revisions can be yielded | |
56 | # without fully computing the input revs |
|
56 | # without fully computing the input revs | |
57 | revs.sort(reverse) |
|
57 | revs.sort(reverse) | |
58 | irevs = iter(revs) |
|
58 | irevs = iter(revs) | |
59 | pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first) |
|
59 | pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first) | |
60 |
|
60 | |||
61 | inputrev = next(irevs, None) |
|
61 | inputrev = next(irevs, None) | |
62 | if inputrev is not None: |
|
62 | if inputrev is not None: | |
63 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) |
|
63 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) | |
64 |
|
64 | |||
65 | lastrev = None |
|
65 | lastrev = None | |
66 | while pendingheap: |
|
66 | while pendingheap: | |
67 | currev, curdepth = heapq.heappop(pendingheap) |
|
67 | currev, curdepth = heapq.heappop(pendingheap) | |
68 | currev = heapsign * currev |
|
68 | currev = heapsign * currev | |
69 | if currev == inputrev: |
|
69 | if currev == inputrev: | |
70 | inputrev = next(irevs, None) |
|
70 | inputrev = next(irevs, None) | |
71 | if inputrev is not None: |
|
71 | if inputrev is not None: | |
72 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) |
|
72 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) | |
73 | # rescan parents until curdepth >= startdepth because queued entries |
|
73 | # rescan parents until curdepth >= startdepth because queued entries | |
74 | # of the same revision are iterated from the lowest depth |
|
74 | # of the same revision are iterated from the lowest depth | |
75 | foundnew = (currev != lastrev) |
|
75 | foundnew = (currev != lastrev) | |
76 | if foundnew and curdepth >= startdepth: |
|
76 | if foundnew and curdepth >= startdepth: | |
77 | lastrev = currev |
|
77 | lastrev = currev | |
78 | yield currev |
|
78 | yield currev | |
79 | pdepth = curdepth + 1 |
|
79 | pdepth = curdepth + 1 | |
80 | if foundnew and pdepth < stopdepth: |
|
80 | if foundnew and pdepth < stopdepth: | |
81 | for prev in pfunc(currev): |
|
81 | for prev in pfunc(currev): | |
82 | if prev != node.nullrev: |
|
82 | if prev != node.nullrev: | |
83 | heapq.heappush(pendingheap, (heapsign * prev, pdepth)) |
|
83 | heapq.heappush(pendingheap, (heapsign * prev, pdepth)) | |
84 |
|
84 | |||
85 | def filectxancestors(fctxs, followfirst=False): |
|
85 | def filectxancestors(fctxs, followfirst=False): | |
86 | """Like filectx.ancestors(), but can walk from multiple files/revisions, |
|
86 | """Like filectx.ancestors(), but can walk from multiple files/revisions, | |
87 | and includes the given fctxs themselves |
|
87 | and includes the given fctxs themselves | |
88 |
|
88 | |||
89 | Yields (rev, {fctx, ...}) pairs in descending order. |
|
89 | Yields (rev, {fctx, ...}) pairs in descending order. | |
90 | """ |
|
90 | """ | |
91 | visit = {} |
|
91 | visit = {} | |
92 | visitheap = [] |
|
92 | visitheap = [] | |
93 | def addvisit(fctx): |
|
93 | def addvisit(fctx): | |
94 | rev = fctx.rev() |
|
94 | rev = fctx.rev() | |
95 | if rev not in visit: |
|
95 | if rev not in visit: | |
96 | visit[rev] = set() |
|
96 | visit[rev] = set() | |
97 | heapq.heappush(visitheap, -rev) # max heap |
|
97 | heapq.heappush(visitheap, -rev) # max heap | |
98 | visit[rev].add(fctx) |
|
98 | visit[rev].add(fctx) | |
99 |
|
99 | |||
100 | if followfirst: |
|
100 | if followfirst: | |
101 | cut = 1 |
|
101 | cut = 1 | |
102 | else: |
|
102 | else: | |
103 | cut = None |
|
103 | cut = None | |
104 |
|
104 | |||
105 | for c in fctxs: |
|
105 | for c in fctxs: | |
106 | addvisit(c) |
|
106 | addvisit(c) | |
107 | while visit: |
|
107 | while visit: | |
108 | currev = -heapq.heappop(visitheap) |
|
108 | currev = -heapq.heappop(visitheap) | |
109 | curfctxs = visit.pop(currev) |
|
109 | curfctxs = visit.pop(currev) | |
110 | yield currev, curfctxs |
|
110 | yield currev, curfctxs | |
111 | for c in curfctxs: |
|
111 | for c in curfctxs: | |
112 | for parent in c.parents()[:cut]: |
|
112 | for parent in c.parents()[:cut]: | |
113 | addvisit(parent) |
|
113 | addvisit(parent) | |
114 | assert not visitheap |
|
114 | assert not visitheap | |
115 |
|
115 | |||
116 | def filerevancestors(fctxs, followfirst=False): |
|
116 | def filerevancestors(fctxs, followfirst=False): | |
117 | """Like filectx.ancestors(), but can walk from multiple files/revisions, |
|
117 | """Like filectx.ancestors(), but can walk from multiple files/revisions, | |
118 | and includes the given fctxs themselves |
|
118 | and includes the given fctxs themselves | |
119 |
|
119 | |||
120 | Returns a smartset. |
|
120 | Returns a smartset. | |
121 | """ |
|
121 | """ | |
122 | gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst)) |
|
122 | gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst)) | |
123 | return generatorset(gen, iterasc=False) |
|
123 | return generatorset(gen, iterasc=False) | |
124 |
|
124 | |||
125 | def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc): |
|
125 | def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc): | |
126 | if followfirst: |
|
126 | if followfirst: | |
127 | cut = 1 |
|
127 | cut = 1 | |
128 | else: |
|
128 | else: | |
129 | cut = None |
|
129 | cut = None | |
130 | cl = repo.changelog |
|
130 | cl = repo.changelog | |
131 | def plainpfunc(rev): |
|
131 | def plainpfunc(rev): | |
132 | try: |
|
132 | try: | |
133 | return cl.parentrevs(rev)[:cut] |
|
133 | return cl.parentrevs(rev)[:cut] | |
134 | except error.WdirUnsupported: |
|
134 | except error.WdirUnsupported: | |
135 | return (pctx.rev() for pctx in repo[rev].parents()[:cut]) |
|
135 | return (pctx.rev() for pctx in repo[rev].parents()[:cut]) | |
136 | if cutfunc is None: |
|
136 | if cutfunc is None: | |
137 | pfunc = plainpfunc |
|
137 | pfunc = plainpfunc | |
138 | else: |
|
138 | else: | |
139 | pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)] |
|
139 | pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)] | |
140 | revs = revs.filter(lambda rev: not cutfunc(rev)) |
|
140 | revs = revs.filter(lambda rev: not cutfunc(rev)) | |
141 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True) |
|
141 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True) | |
142 |
|
142 | |||
143 | def revancestors(repo, revs, followfirst=False, startdepth=None, |
|
143 | def revancestors(repo, revs, followfirst=False, startdepth=None, | |
144 | stopdepth=None, cutfunc=None): |
|
144 | stopdepth=None, cutfunc=None): | |
145 | """Like revlog.ancestors(), but supports additional options, includes |
|
145 | r"""Like revlog.ancestors(), but supports additional options, includes | |
146 | the given revs themselves, and returns a smartset |
|
146 | the given revs themselves, and returns a smartset | |
147 |
|
147 | |||
148 | Scan ends at the stopdepth (exlusive) if specified. Revisions found |
|
148 | Scan ends at the stopdepth (exlusive) if specified. Revisions found | |
149 | earlier than the startdepth are omitted. |
|
149 | earlier than the startdepth are omitted. | |
150 |
|
150 | |||
151 | If cutfunc is provided, it will be used to cut the traversal of the DAG. |
|
151 | If cutfunc is provided, it will be used to cut the traversal of the DAG. | |
152 | When cutfunc(X) returns True, the DAG traversal stops - revision X and |
|
152 | When cutfunc(X) returns True, the DAG traversal stops - revision X and | |
153 | X's ancestors in the traversal path will be skipped. This could be an |
|
153 | X's ancestors in the traversal path will be skipped. This could be an | |
154 | optimization sometimes. |
|
154 | optimization sometimes. | |
155 |
|
155 | |||
156 | Note: if Y is an ancestor of X, cutfunc(X) returning True does not |
|
156 | Note: if Y is an ancestor of X, cutfunc(X) returning True does not | |
157 | necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to |
|
157 | necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to | |
158 | return True in this case. For example, |
|
158 | return True in this case. For example, | |
159 |
|
159 | |||
160 | D # revancestors(repo, D, cutfunc=lambda rev: rev == B) |
|
160 | D # revancestors(repo, D, cutfunc=lambda rev: rev == B) | |
161 | |\ # will include "A", because the path D -> C -> A was not cut. |
|
161 | |\ # will include "A", because the path D -> C -> A was not cut. | |
162 | B C # If "B" gets cut, "A" might want to be cut too. |
|
162 | B C # If "B" gets cut, "A" might want to be cut too. | |
163 | |/ |
|
163 | |/ | |
164 | A |
|
164 | A | |
165 | """ |
|
165 | """ | |
166 | gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, |
|
166 | gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, | |
167 | cutfunc) |
|
167 | cutfunc) | |
168 | return generatorset(gen, iterasc=False) |
|
168 | return generatorset(gen, iterasc=False) | |
169 |
|
169 | |||
170 | def _genrevdescendants(repo, revs, followfirst): |
|
170 | def _genrevdescendants(repo, revs, followfirst): | |
171 | if followfirst: |
|
171 | if followfirst: | |
172 | cut = 1 |
|
172 | cut = 1 | |
173 | else: |
|
173 | else: | |
174 | cut = None |
|
174 | cut = None | |
175 |
|
175 | |||
176 | cl = repo.changelog |
|
176 | cl = repo.changelog | |
177 | first = revs.min() |
|
177 | first = revs.min() | |
178 | nullrev = node.nullrev |
|
178 | nullrev = node.nullrev | |
179 | if first == nullrev: |
|
179 | if first == nullrev: | |
180 | # Are there nodes with a null first parent and a non-null |
|
180 | # Are there nodes with a null first parent and a non-null | |
181 | # second one? Maybe. Do we care? Probably not. |
|
181 | # second one? Maybe. Do we care? Probably not. | |
182 | yield first |
|
182 | yield first | |
183 | for i in cl: |
|
183 | for i in cl: | |
184 | yield i |
|
184 | yield i | |
185 | else: |
|
185 | else: | |
186 | seen = set(revs) |
|
186 | seen = set(revs) | |
187 | for i in cl.revs(first): |
|
187 | for i in cl.revs(first): | |
188 | if i in seen: |
|
188 | if i in seen: | |
189 | yield i |
|
189 | yield i | |
190 | continue |
|
190 | continue | |
191 | for x in cl.parentrevs(i)[:cut]: |
|
191 | for x in cl.parentrevs(i)[:cut]: | |
192 | if x != nullrev and x in seen: |
|
192 | if x != nullrev and x in seen: | |
193 | seen.add(i) |
|
193 | seen.add(i) | |
194 | yield i |
|
194 | yield i | |
195 | break |
|
195 | break | |
196 |
|
196 | |||
197 | def _builddescendantsmap(repo, startrev, followfirst): |
|
197 | def _builddescendantsmap(repo, startrev, followfirst): | |
198 | """Build map of 'rev -> child revs', offset from startrev""" |
|
198 | """Build map of 'rev -> child revs', offset from startrev""" | |
199 | cl = repo.changelog |
|
199 | cl = repo.changelog | |
200 | nullrev = node.nullrev |
|
200 | nullrev = node.nullrev | |
201 | descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))] |
|
201 | descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))] | |
202 | for currev in cl.revs(startrev + 1): |
|
202 | for currev in cl.revs(startrev + 1): | |
203 | p1rev, p2rev = cl.parentrevs(currev) |
|
203 | p1rev, p2rev = cl.parentrevs(currev) | |
204 | if p1rev >= startrev: |
|
204 | if p1rev >= startrev: | |
205 | descmap[p1rev - startrev].append(currev) |
|
205 | descmap[p1rev - startrev].append(currev) | |
206 | if not followfirst and p2rev != nullrev and p2rev >= startrev: |
|
206 | if not followfirst and p2rev != nullrev and p2rev >= startrev: | |
207 | descmap[p2rev - startrev].append(currev) |
|
207 | descmap[p2rev - startrev].append(currev) | |
208 | return descmap |
|
208 | return descmap | |
209 |
|
209 | |||
210 | def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth): |
|
210 | def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth): | |
211 | startrev = revs.min() |
|
211 | startrev = revs.min() | |
212 | descmap = _builddescendantsmap(repo, startrev, followfirst) |
|
212 | descmap = _builddescendantsmap(repo, startrev, followfirst) | |
213 | def pfunc(rev): |
|
213 | def pfunc(rev): | |
214 | return descmap[rev - startrev] |
|
214 | return descmap[rev - startrev] | |
215 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False) |
|
215 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False) | |
216 |
|
216 | |||
217 | def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None): |
|
217 | def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None): | |
218 | """Like revlog.descendants() but supports additional options, includes |
|
218 | """Like revlog.descendants() but supports additional options, includes | |
219 | the given revs themselves, and returns a smartset |
|
219 | the given revs themselves, and returns a smartset | |
220 |
|
220 | |||
221 | Scan ends at the stopdepth (exlusive) if specified. Revisions found |
|
221 | Scan ends at the stopdepth (exlusive) if specified. Revisions found | |
222 | earlier than the startdepth are omitted. |
|
222 | earlier than the startdepth are omitted. | |
223 | """ |
|
223 | """ | |
224 | if startdepth is None and (stopdepth is None or stopdepth >= maxlogdepth): |
|
224 | if startdepth is None and (stopdepth is None or stopdepth >= maxlogdepth): | |
225 | gen = _genrevdescendants(repo, revs, followfirst) |
|
225 | gen = _genrevdescendants(repo, revs, followfirst) | |
226 | else: |
|
226 | else: | |
227 | gen = _genrevdescendantsofdepth(repo, revs, followfirst, |
|
227 | gen = _genrevdescendantsofdepth(repo, revs, followfirst, | |
228 | startdepth, stopdepth) |
|
228 | startdepth, stopdepth) | |
229 | return generatorset(gen, iterasc=True) |
|
229 | return generatorset(gen, iterasc=True) | |
230 |
|
230 | |||
231 | def descendantrevs(revs, revsfn, parentrevsfn): |
|
231 | def descendantrevs(revs, revsfn, parentrevsfn): | |
232 | """Generate revision number descendants in revision order. |
|
232 | """Generate revision number descendants in revision order. | |
233 |
|
233 | |||
234 | Yields revision numbers starting with a child of some rev in |
|
234 | Yields revision numbers starting with a child of some rev in | |
235 | ``revs``. Results are ordered by revision number and are |
|
235 | ``revs``. Results are ordered by revision number and are | |
236 | therefore topological. Each revision is not considered a descendant |
|
236 | therefore topological. Each revision is not considered a descendant | |
237 | of itself. |
|
237 | of itself. | |
238 |
|
238 | |||
239 | ``revsfn`` is a callable that with no argument iterates over all |
|
239 | ``revsfn`` is a callable that with no argument iterates over all | |
240 | revision numbers and with a ``start`` argument iterates over revision |
|
240 | revision numbers and with a ``start`` argument iterates over revision | |
241 | numbers beginning with that value. |
|
241 | numbers beginning with that value. | |
242 |
|
242 | |||
243 | ``parentrevsfn`` is a callable that receives a revision number and |
|
243 | ``parentrevsfn`` is a callable that receives a revision number and | |
244 | returns an iterable of parent revision numbers, whose values may include |
|
244 | returns an iterable of parent revision numbers, whose values may include | |
245 | nullrev. |
|
245 | nullrev. | |
246 | """ |
|
246 | """ | |
247 | first = min(revs) |
|
247 | first = min(revs) | |
248 |
|
248 | |||
249 | if first == nullrev: |
|
249 | if first == nullrev: | |
250 | for rev in revsfn(): |
|
250 | for rev in revsfn(): | |
251 | yield rev |
|
251 | yield rev | |
252 | return |
|
252 | return | |
253 |
|
253 | |||
254 | seen = set(revs) |
|
254 | seen = set(revs) | |
255 | for rev in revsfn(start=first + 1): |
|
255 | for rev in revsfn(start=first + 1): | |
256 | for prev in parentrevsfn(rev): |
|
256 | for prev in parentrevsfn(rev): | |
257 | if prev != nullrev and prev in seen: |
|
257 | if prev != nullrev and prev in seen: | |
258 | seen.add(rev) |
|
258 | seen.add(rev) | |
259 | yield rev |
|
259 | yield rev | |
260 | break |
|
260 | break | |
261 |
|
261 | |||
262 | def _reachablerootspure(repo, minroot, roots, heads, includepath): |
|
262 | def _reachablerootspure(repo, minroot, roots, heads, includepath): | |
263 | """return (heads(::<roots> and ::<heads>)) |
|
263 | """return (heads(::<roots> and ::<heads>)) | |
264 |
|
264 | |||
265 | If includepath is True, return (<roots>::<heads>).""" |
|
265 | If includepath is True, return (<roots>::<heads>).""" | |
266 | if not roots: |
|
266 | if not roots: | |
267 | return [] |
|
267 | return [] | |
268 | parentrevs = repo.changelog.parentrevs |
|
268 | parentrevs = repo.changelog.parentrevs | |
269 | roots = set(roots) |
|
269 | roots = set(roots) | |
270 | visit = list(heads) |
|
270 | visit = list(heads) | |
271 | reachable = set() |
|
271 | reachable = set() | |
272 | seen = {} |
|
272 | seen = {} | |
273 | # prefetch all the things! (because python is slow) |
|
273 | # prefetch all the things! (because python is slow) | |
274 | reached = reachable.add |
|
274 | reached = reachable.add | |
275 | dovisit = visit.append |
|
275 | dovisit = visit.append | |
276 | nextvisit = visit.pop |
|
276 | nextvisit = visit.pop | |
277 | # open-code the post-order traversal due to the tiny size of |
|
277 | # open-code the post-order traversal due to the tiny size of | |
278 | # sys.getrecursionlimit() |
|
278 | # sys.getrecursionlimit() | |
279 | while visit: |
|
279 | while visit: | |
280 | rev = nextvisit() |
|
280 | rev = nextvisit() | |
281 | if rev in roots: |
|
281 | if rev in roots: | |
282 | reached(rev) |
|
282 | reached(rev) | |
283 | if not includepath: |
|
283 | if not includepath: | |
284 | continue |
|
284 | continue | |
285 | parents = parentrevs(rev) |
|
285 | parents = parentrevs(rev) | |
286 | seen[rev] = parents |
|
286 | seen[rev] = parents | |
287 | for parent in parents: |
|
287 | for parent in parents: | |
288 | if parent >= minroot and parent not in seen: |
|
288 | if parent >= minroot and parent not in seen: | |
289 | dovisit(parent) |
|
289 | dovisit(parent) | |
290 | if not reachable: |
|
290 | if not reachable: | |
291 | return baseset() |
|
291 | return baseset() | |
292 | if not includepath: |
|
292 | if not includepath: | |
293 | return reachable |
|
293 | return reachable | |
294 | for rev in sorted(seen): |
|
294 | for rev in sorted(seen): | |
295 | for parent in seen[rev]: |
|
295 | for parent in seen[rev]: | |
296 | if parent in reachable: |
|
296 | if parent in reachable: | |
297 | reached(rev) |
|
297 | reached(rev) | |
298 | return reachable |
|
298 | return reachable | |
299 |
|
299 | |||
300 | def reachableroots(repo, roots, heads, includepath=False): |
|
300 | def reachableroots(repo, roots, heads, includepath=False): | |
301 | """return (heads(::<roots> and ::<heads>)) |
|
301 | """return (heads(::<roots> and ::<heads>)) | |
302 |
|
302 | |||
303 | If includepath is True, return (<roots>::<heads>).""" |
|
303 | If includepath is True, return (<roots>::<heads>).""" | |
304 | if not roots: |
|
304 | if not roots: | |
305 | return baseset() |
|
305 | return baseset() | |
306 | minroot = roots.min() |
|
306 | minroot = roots.min() | |
307 | roots = list(roots) |
|
307 | roots = list(roots) | |
308 | heads = list(heads) |
|
308 | heads = list(heads) | |
309 | try: |
|
309 | try: | |
310 | revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) |
|
310 | revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) | |
311 | except AttributeError: |
|
311 | except AttributeError: | |
312 | revs = _reachablerootspure(repo, minroot, roots, heads, includepath) |
|
312 | revs = _reachablerootspure(repo, minroot, roots, heads, includepath) | |
313 | revs = baseset(revs) |
|
313 | revs = baseset(revs) | |
314 | revs.sort() |
|
314 | revs.sort() | |
315 | return revs |
|
315 | return revs | |
316 |
|
316 | |||
317 | def _changesrange(fctx1, fctx2, linerange2, diffopts): |
|
317 | def _changesrange(fctx1, fctx2, linerange2, diffopts): | |
318 | """Return `(diffinrange, linerange1)` where `diffinrange` is True |
|
318 | """Return `(diffinrange, linerange1)` where `diffinrange` is True | |
319 | if diff from fctx2 to fctx1 has changes in linerange2 and |
|
319 | if diff from fctx2 to fctx1 has changes in linerange2 and | |
320 | `linerange1` is the new line range for fctx1. |
|
320 | `linerange1` is the new line range for fctx1. | |
321 | """ |
|
321 | """ | |
322 | blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts) |
|
322 | blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts) | |
323 | filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2) |
|
323 | filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2) | |
324 | diffinrange = any(stype == '!' for _, stype in filteredblocks) |
|
324 | diffinrange = any(stype == '!' for _, stype in filteredblocks) | |
325 | return diffinrange, linerange1 |
|
325 | return diffinrange, linerange1 | |
326 |
|
326 | |||
327 | def blockancestors(fctx, fromline, toline, followfirst=False): |
|
327 | def blockancestors(fctx, fromline, toline, followfirst=False): | |
328 | """Yield ancestors of `fctx` with respect to the block of lines within |
|
328 | """Yield ancestors of `fctx` with respect to the block of lines within | |
329 | `fromline`-`toline` range. |
|
329 | `fromline`-`toline` range. | |
330 | """ |
|
330 | """ | |
331 | diffopts = patch.diffopts(fctx._repo.ui) |
|
331 | diffopts = patch.diffopts(fctx._repo.ui) | |
332 | fctx = fctx.introfilectx() |
|
332 | fctx = fctx.introfilectx() | |
333 | visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))} |
|
333 | visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))} | |
334 | while visit: |
|
334 | while visit: | |
335 | c, linerange2 = visit.pop(max(visit)) |
|
335 | c, linerange2 = visit.pop(max(visit)) | |
336 | pl = c.parents() |
|
336 | pl = c.parents() | |
337 | if followfirst: |
|
337 | if followfirst: | |
338 | pl = pl[:1] |
|
338 | pl = pl[:1] | |
339 | if not pl: |
|
339 | if not pl: | |
340 | # The block originates from the initial revision. |
|
340 | # The block originates from the initial revision. | |
341 | yield c, linerange2 |
|
341 | yield c, linerange2 | |
342 | continue |
|
342 | continue | |
343 | inrange = False |
|
343 | inrange = False | |
344 | for p in pl: |
|
344 | for p in pl: | |
345 | inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts) |
|
345 | inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts) | |
346 | inrange = inrange or inrangep |
|
346 | inrange = inrange or inrangep | |
347 | if linerange1[0] == linerange1[1]: |
|
347 | if linerange1[0] == linerange1[1]: | |
348 | # Parent's linerange is empty, meaning that the block got |
|
348 | # Parent's linerange is empty, meaning that the block got | |
349 | # introduced in this revision; no need to go futher in this |
|
349 | # introduced in this revision; no need to go futher in this | |
350 | # branch. |
|
350 | # branch. | |
351 | continue |
|
351 | continue | |
352 | # Set _descendantrev with 'c' (a known descendant) so that, when |
|
352 | # Set _descendantrev with 'c' (a known descendant) so that, when | |
353 | # _adjustlinkrev is called for 'p', it receives this descendant |
|
353 | # _adjustlinkrev is called for 'p', it receives this descendant | |
354 | # (as srcrev) instead possibly topmost introrev. |
|
354 | # (as srcrev) instead possibly topmost introrev. | |
355 | p._descendantrev = c.rev() |
|
355 | p._descendantrev = c.rev() | |
356 | visit[p.linkrev(), p.filenode()] = p, linerange1 |
|
356 | visit[p.linkrev(), p.filenode()] = p, linerange1 | |
357 | if inrange: |
|
357 | if inrange: | |
358 | yield c, linerange2 |
|
358 | yield c, linerange2 | |
359 |
|
359 | |||
360 | def blockdescendants(fctx, fromline, toline): |
|
360 | def blockdescendants(fctx, fromline, toline): | |
361 | """Yield descendants of `fctx` with respect to the block of lines within |
|
361 | """Yield descendants of `fctx` with respect to the block of lines within | |
362 | `fromline`-`toline` range. |
|
362 | `fromline`-`toline` range. | |
363 | """ |
|
363 | """ | |
364 | # First possibly yield 'fctx' if it has changes in range with respect to |
|
364 | # First possibly yield 'fctx' if it has changes in range with respect to | |
365 | # its parents. |
|
365 | # its parents. | |
366 | try: |
|
366 | try: | |
367 | c, linerange1 = next(blockancestors(fctx, fromline, toline)) |
|
367 | c, linerange1 = next(blockancestors(fctx, fromline, toline)) | |
368 | except StopIteration: |
|
368 | except StopIteration: | |
369 | pass |
|
369 | pass | |
370 | else: |
|
370 | else: | |
371 | if c == fctx: |
|
371 | if c == fctx: | |
372 | yield c, linerange1 |
|
372 | yield c, linerange1 | |
373 |
|
373 | |||
374 | diffopts = patch.diffopts(fctx._repo.ui) |
|
374 | diffopts = patch.diffopts(fctx._repo.ui) | |
375 | fl = fctx.filelog() |
|
375 | fl = fctx.filelog() | |
376 | seen = {fctx.filerev(): (fctx, (fromline, toline))} |
|
376 | seen = {fctx.filerev(): (fctx, (fromline, toline))} | |
377 | for i in fl.descendants([fctx.filerev()]): |
|
377 | for i in fl.descendants([fctx.filerev()]): | |
378 | c = fctx.filectx(i) |
|
378 | c = fctx.filectx(i) | |
379 | inrange = False |
|
379 | inrange = False | |
380 | for x in fl.parentrevs(i): |
|
380 | for x in fl.parentrevs(i): | |
381 | try: |
|
381 | try: | |
382 | p, linerange2 = seen[x] |
|
382 | p, linerange2 = seen[x] | |
383 | except KeyError: |
|
383 | except KeyError: | |
384 | # nullrev or other branch |
|
384 | # nullrev or other branch | |
385 | continue |
|
385 | continue | |
386 | inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts) |
|
386 | inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts) | |
387 | inrange = inrange or inrangep |
|
387 | inrange = inrange or inrangep | |
388 | # If revision 'i' has been seen (it's a merge) and the line range |
|
388 | # If revision 'i' has been seen (it's a merge) and the line range | |
389 | # previously computed differs from the one we just got, we take the |
|
389 | # previously computed differs from the one we just got, we take the | |
390 | # surrounding interval. This is conservative but avoids loosing |
|
390 | # surrounding interval. This is conservative but avoids loosing | |
391 | # information. |
|
391 | # information. | |
392 | if i in seen and seen[i][1] != linerange1: |
|
392 | if i in seen and seen[i][1] != linerange1: | |
393 | lbs, ubs = zip(linerange1, seen[i][1]) |
|
393 | lbs, ubs = zip(linerange1, seen[i][1]) | |
394 | linerange1 = min(lbs), max(ubs) |
|
394 | linerange1 = min(lbs), max(ubs) | |
395 | seen[i] = c, linerange1 |
|
395 | seen[i] = c, linerange1 | |
396 | if inrange: |
|
396 | if inrange: | |
397 | yield c, linerange1 |
|
397 | yield c, linerange1 | |
398 |
|
398 | |||
399 | @attr.s(slots=True, frozen=True) |
|
399 | @attr.s(slots=True, frozen=True) | |
400 | class annotateline(object): |
|
400 | class annotateline(object): | |
401 | fctx = attr.ib() |
|
401 | fctx = attr.ib() | |
402 | lineno = attr.ib() |
|
402 | lineno = attr.ib() | |
403 | # Whether this annotation was the result of a skip-annotate. |
|
403 | # Whether this annotation was the result of a skip-annotate. | |
404 | skip = attr.ib(default=False) |
|
404 | skip = attr.ib(default=False) | |
405 | text = attr.ib(default=None) |
|
405 | text = attr.ib(default=None) | |
406 |
|
406 | |||
407 | @attr.s(slots=True, frozen=True) |
|
407 | @attr.s(slots=True, frozen=True) | |
408 | class _annotatedfile(object): |
|
408 | class _annotatedfile(object): | |
409 | # list indexed by lineno - 1 |
|
409 | # list indexed by lineno - 1 | |
410 | fctxs = attr.ib() |
|
410 | fctxs = attr.ib() | |
411 | linenos = attr.ib() |
|
411 | linenos = attr.ib() | |
412 | skips = attr.ib() |
|
412 | skips = attr.ib() | |
413 | # full file content |
|
413 | # full file content | |
414 | text = attr.ib() |
|
414 | text = attr.ib() | |
415 |
|
415 | |||
416 | def _countlines(text): |
|
416 | def _countlines(text): | |
417 | if text.endswith("\n"): |
|
417 | if text.endswith("\n"): | |
418 | return text.count("\n") |
|
418 | return text.count("\n") | |
419 | return text.count("\n") + int(bool(text)) |
|
419 | return text.count("\n") + int(bool(text)) | |
420 |
|
420 | |||
421 | def _decoratelines(text, fctx): |
|
421 | def _decoratelines(text, fctx): | |
422 | n = _countlines(text) |
|
422 | n = _countlines(text) | |
423 | linenos = pycompat.rangelist(1, n + 1) |
|
423 | linenos = pycompat.rangelist(1, n + 1) | |
424 | return _annotatedfile([fctx] * n, linenos, [False] * n, text) |
|
424 | return _annotatedfile([fctx] * n, linenos, [False] * n, text) | |
425 |
|
425 | |||
426 | def _annotatepair(parents, childfctx, child, skipchild, diffopts): |
|
426 | def _annotatepair(parents, childfctx, child, skipchild, diffopts): | |
427 | r''' |
|
427 | r''' | |
428 | Given parent and child fctxes and annotate data for parents, for all lines |
|
428 | Given parent and child fctxes and annotate data for parents, for all lines | |
429 | in either parent that match the child, annotate the child with the parent's |
|
429 | in either parent that match the child, annotate the child with the parent's | |
430 | data. |
|
430 | data. | |
431 |
|
431 | |||
432 | Additionally, if `skipchild` is True, replace all other lines with parent |
|
432 | Additionally, if `skipchild` is True, replace all other lines with parent | |
433 | annotate data as well such that child is never blamed for any lines. |
|
433 | annotate data as well such that child is never blamed for any lines. | |
434 |
|
434 | |||
435 | See test-annotate.py for unit tests. |
|
435 | See test-annotate.py for unit tests. | |
436 | ''' |
|
436 | ''' | |
437 | pblocks = [(parent, mdiff.allblocks(parent.text, child.text, opts=diffopts)) |
|
437 | pblocks = [(parent, mdiff.allblocks(parent.text, child.text, opts=diffopts)) | |
438 | for parent in parents] |
|
438 | for parent in parents] | |
439 |
|
439 | |||
440 | if skipchild: |
|
440 | if skipchild: | |
441 | # Need to iterate over the blocks twice -- make it a list |
|
441 | # Need to iterate over the blocks twice -- make it a list | |
442 | pblocks = [(p, list(blocks)) for (p, blocks) in pblocks] |
|
442 | pblocks = [(p, list(blocks)) for (p, blocks) in pblocks] | |
443 | # Mercurial currently prefers p2 over p1 for annotate. |
|
443 | # Mercurial currently prefers p2 over p1 for annotate. | |
444 | # TODO: change this? |
|
444 | # TODO: change this? | |
445 | for parent, blocks in pblocks: |
|
445 | for parent, blocks in pblocks: | |
446 | for (a1, a2, b1, b2), t in blocks: |
|
446 | for (a1, a2, b1, b2), t in blocks: | |
447 | # Changed blocks ('!') or blocks made only of blank lines ('~') |
|
447 | # Changed blocks ('!') or blocks made only of blank lines ('~') | |
448 | # belong to the child. |
|
448 | # belong to the child. | |
449 | if t == '=': |
|
449 | if t == '=': | |
450 | child.fctxs[b1:b2] = parent.fctxs[a1:a2] |
|
450 | child.fctxs[b1:b2] = parent.fctxs[a1:a2] | |
451 | child.linenos[b1:b2] = parent.linenos[a1:a2] |
|
451 | child.linenos[b1:b2] = parent.linenos[a1:a2] | |
452 | child.skips[b1:b2] = parent.skips[a1:a2] |
|
452 | child.skips[b1:b2] = parent.skips[a1:a2] | |
453 |
|
453 | |||
454 | if skipchild: |
|
454 | if skipchild: | |
455 | # Now try and match up anything that couldn't be matched, |
|
455 | # Now try and match up anything that couldn't be matched, | |
456 | # Reversing pblocks maintains bias towards p2, matching above |
|
456 | # Reversing pblocks maintains bias towards p2, matching above | |
457 | # behavior. |
|
457 | # behavior. | |
458 | pblocks.reverse() |
|
458 | pblocks.reverse() | |
459 |
|
459 | |||
460 | # The heuristics are: |
|
460 | # The heuristics are: | |
461 | # * Work on blocks of changed lines (effectively diff hunks with -U0). |
|
461 | # * Work on blocks of changed lines (effectively diff hunks with -U0). | |
462 | # This could potentially be smarter but works well enough. |
|
462 | # This could potentially be smarter but works well enough. | |
463 | # * For a non-matching section, do a best-effort fit. Match lines in |
|
463 | # * For a non-matching section, do a best-effort fit. Match lines in | |
464 | # diff hunks 1:1, dropping lines as necessary. |
|
464 | # diff hunks 1:1, dropping lines as necessary. | |
465 | # * Repeat the last line as a last resort. |
|
465 | # * Repeat the last line as a last resort. | |
466 |
|
466 | |||
467 | # First, replace as much as possible without repeating the last line. |
|
467 | # First, replace as much as possible without repeating the last line. | |
468 | remaining = [(parent, []) for parent, _blocks in pblocks] |
|
468 | remaining = [(parent, []) for parent, _blocks in pblocks] | |
469 | for idx, (parent, blocks) in enumerate(pblocks): |
|
469 | for idx, (parent, blocks) in enumerate(pblocks): | |
470 | for (a1, a2, b1, b2), _t in blocks: |
|
470 | for (a1, a2, b1, b2), _t in blocks: | |
471 | if a2 - a1 >= b2 - b1: |
|
471 | if a2 - a1 >= b2 - b1: | |
472 | for bk in pycompat.xrange(b1, b2): |
|
472 | for bk in pycompat.xrange(b1, b2): | |
473 | if child.fctxs[bk] == childfctx: |
|
473 | if child.fctxs[bk] == childfctx: | |
474 | ak = min(a1 + (bk - b1), a2 - 1) |
|
474 | ak = min(a1 + (bk - b1), a2 - 1) | |
475 | child.fctxs[bk] = parent.fctxs[ak] |
|
475 | child.fctxs[bk] = parent.fctxs[ak] | |
476 | child.linenos[bk] = parent.linenos[ak] |
|
476 | child.linenos[bk] = parent.linenos[ak] | |
477 | child.skips[bk] = True |
|
477 | child.skips[bk] = True | |
478 | else: |
|
478 | else: | |
479 | remaining[idx][1].append((a1, a2, b1, b2)) |
|
479 | remaining[idx][1].append((a1, a2, b1, b2)) | |
480 |
|
480 | |||
481 | # Then, look at anything left, which might involve repeating the last |
|
481 | # Then, look at anything left, which might involve repeating the last | |
482 | # line. |
|
482 | # line. | |
483 | for parent, blocks in remaining: |
|
483 | for parent, blocks in remaining: | |
484 | for a1, a2, b1, b2 in blocks: |
|
484 | for a1, a2, b1, b2 in blocks: | |
485 | for bk in pycompat.xrange(b1, b2): |
|
485 | for bk in pycompat.xrange(b1, b2): | |
486 | if child.fctxs[bk] == childfctx: |
|
486 | if child.fctxs[bk] == childfctx: | |
487 | ak = min(a1 + (bk - b1), a2 - 1) |
|
487 | ak = min(a1 + (bk - b1), a2 - 1) | |
488 | child.fctxs[bk] = parent.fctxs[ak] |
|
488 | child.fctxs[bk] = parent.fctxs[ak] | |
489 | child.linenos[bk] = parent.linenos[ak] |
|
489 | child.linenos[bk] = parent.linenos[ak] | |
490 | child.skips[bk] = True |
|
490 | child.skips[bk] = True | |
491 | return child |
|
491 | return child | |
492 |
|
492 | |||
493 | def annotate(base, parents, skiprevs=None, diffopts=None): |
|
493 | def annotate(base, parents, skiprevs=None, diffopts=None): | |
494 | """Core algorithm for filectx.annotate() |
|
494 | """Core algorithm for filectx.annotate() | |
495 |
|
495 | |||
496 | `parents(fctx)` is a function returning a list of parent filectxs. |
|
496 | `parents(fctx)` is a function returning a list of parent filectxs. | |
497 | """ |
|
497 | """ | |
498 |
|
498 | |||
499 | # This algorithm would prefer to be recursive, but Python is a |
|
499 | # This algorithm would prefer to be recursive, but Python is a | |
500 | # bit recursion-hostile. Instead we do an iterative |
|
500 | # bit recursion-hostile. Instead we do an iterative | |
501 | # depth-first search. |
|
501 | # depth-first search. | |
502 |
|
502 | |||
503 | # 1st DFS pre-calculates pcache and needed |
|
503 | # 1st DFS pre-calculates pcache and needed | |
504 | visit = [base] |
|
504 | visit = [base] | |
505 | pcache = {} |
|
505 | pcache = {} | |
506 | needed = {base: 1} |
|
506 | needed = {base: 1} | |
507 | while visit: |
|
507 | while visit: | |
508 | f = visit.pop() |
|
508 | f = visit.pop() | |
509 | if f in pcache: |
|
509 | if f in pcache: | |
510 | continue |
|
510 | continue | |
511 | pl = parents(f) |
|
511 | pl = parents(f) | |
512 | pcache[f] = pl |
|
512 | pcache[f] = pl | |
513 | for p in pl: |
|
513 | for p in pl: | |
514 | needed[p] = needed.get(p, 0) + 1 |
|
514 | needed[p] = needed.get(p, 0) + 1 | |
515 | if p not in pcache: |
|
515 | if p not in pcache: | |
516 | visit.append(p) |
|
516 | visit.append(p) | |
517 |
|
517 | |||
518 | # 2nd DFS does the actual annotate |
|
518 | # 2nd DFS does the actual annotate | |
519 | visit[:] = [base] |
|
519 | visit[:] = [base] | |
520 | hist = {} |
|
520 | hist = {} | |
521 | while visit: |
|
521 | while visit: | |
522 | f = visit[-1] |
|
522 | f = visit[-1] | |
523 | if f in hist: |
|
523 | if f in hist: | |
524 | visit.pop() |
|
524 | visit.pop() | |
525 | continue |
|
525 | continue | |
526 |
|
526 | |||
527 | ready = True |
|
527 | ready = True | |
528 | pl = pcache[f] |
|
528 | pl = pcache[f] | |
529 | for p in pl: |
|
529 | for p in pl: | |
530 | if p not in hist: |
|
530 | if p not in hist: | |
531 | ready = False |
|
531 | ready = False | |
532 | visit.append(p) |
|
532 | visit.append(p) | |
533 | if ready: |
|
533 | if ready: | |
534 | visit.pop() |
|
534 | visit.pop() | |
535 | curr = _decoratelines(f.data(), f) |
|
535 | curr = _decoratelines(f.data(), f) | |
536 | skipchild = False |
|
536 | skipchild = False | |
537 | if skiprevs is not None: |
|
537 | if skiprevs is not None: | |
538 | skipchild = f._changeid in skiprevs |
|
538 | skipchild = f._changeid in skiprevs | |
539 | curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild, |
|
539 | curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild, | |
540 | diffopts) |
|
540 | diffopts) | |
541 | for p in pl: |
|
541 | for p in pl: | |
542 | if needed[p] == 1: |
|
542 | if needed[p] == 1: | |
543 | del hist[p] |
|
543 | del hist[p] | |
544 | del needed[p] |
|
544 | del needed[p] | |
545 | else: |
|
545 | else: | |
546 | needed[p] -= 1 |
|
546 | needed[p] -= 1 | |
547 |
|
547 | |||
548 | hist[f] = curr |
|
548 | hist[f] = curr | |
549 | del pcache[f] |
|
549 | del pcache[f] | |
550 |
|
550 | |||
551 | a = hist[base] |
|
551 | a = hist[base] | |
552 | return [annotateline(*r) for r in zip(a.fctxs, a.linenos, a.skips, |
|
552 | return [annotateline(*r) for r in zip(a.fctxs, a.linenos, a.skips, | |
553 | mdiff.splitnewlines(a.text))] |
|
553 | mdiff.splitnewlines(a.text))] | |
554 |
|
554 | |||
555 | def toposort(revs, parentsfunc, firstbranch=()): |
|
555 | def toposort(revs, parentsfunc, firstbranch=()): | |
556 | """Yield revisions from heads to roots one (topo) branch at a time. |
|
556 | """Yield revisions from heads to roots one (topo) branch at a time. | |
557 |
|
557 | |||
558 | This function aims to be used by a graph generator that wishes to minimize |
|
558 | This function aims to be used by a graph generator that wishes to minimize | |
559 | the number of parallel branches and their interleaving. |
|
559 | the number of parallel branches and their interleaving. | |
560 |
|
560 | |||
561 | Example iteration order (numbers show the "true" order in a changelog): |
|
561 | Example iteration order (numbers show the "true" order in a changelog): | |
562 |
|
562 | |||
563 | o 4 |
|
563 | o 4 | |
564 | | |
|
564 | | | |
565 | o 1 |
|
565 | o 1 | |
566 | | |
|
566 | | | |
567 | | o 3 |
|
567 | | o 3 | |
568 | | | |
|
568 | | | | |
569 | | o 2 |
|
569 | | o 2 | |
570 | |/ |
|
570 | |/ | |
571 | o 0 |
|
571 | o 0 | |
572 |
|
572 | |||
573 | Note that the ancestors of merges are understood by the current |
|
573 | Note that the ancestors of merges are understood by the current | |
574 | algorithm to be on the same branch. This means no reordering will |
|
574 | algorithm to be on the same branch. This means no reordering will | |
575 | occur behind a merge. |
|
575 | occur behind a merge. | |
576 | """ |
|
576 | """ | |
577 |
|
577 | |||
578 | ### Quick summary of the algorithm |
|
578 | ### Quick summary of the algorithm | |
579 | # |
|
579 | # | |
580 | # This function is based around a "retention" principle. We keep revisions |
|
580 | # This function is based around a "retention" principle. We keep revisions | |
581 | # in memory until we are ready to emit a whole branch that immediately |
|
581 | # in memory until we are ready to emit a whole branch that immediately | |
582 | # "merges" into an existing one. This reduces the number of parallel |
|
582 | # "merges" into an existing one. This reduces the number of parallel | |
583 | # branches with interleaved revisions. |
|
583 | # branches with interleaved revisions. | |
584 | # |
|
584 | # | |
585 | # During iteration revs are split into two groups: |
|
585 | # During iteration revs are split into two groups: | |
586 | # A) revision already emitted |
|
586 | # A) revision already emitted | |
587 | # B) revision in "retention". They are stored as different subgroups. |
|
587 | # B) revision in "retention". They are stored as different subgroups. | |
588 | # |
|
588 | # | |
589 | # for each REV, we do the following logic: |
|
589 | # for each REV, we do the following logic: | |
590 | # |
|
590 | # | |
591 | # 1) if REV is a parent of (A), we will emit it. If there is a |
|
591 | # 1) if REV is a parent of (A), we will emit it. If there is a | |
592 | # retention group ((B) above) that is blocked on REV being |
|
592 | # retention group ((B) above) that is blocked on REV being | |
593 | # available, we emit all the revisions out of that retention |
|
593 | # available, we emit all the revisions out of that retention | |
594 | # group first. |
|
594 | # group first. | |
595 | # |
|
595 | # | |
596 | # 2) else, we'll search for a subgroup in (B) awaiting for REV to be |
|
596 | # 2) else, we'll search for a subgroup in (B) awaiting for REV to be | |
597 | # available, if such subgroup exist, we add REV to it and the subgroup is |
|
597 | # available, if such subgroup exist, we add REV to it and the subgroup is | |
598 | # now awaiting for REV.parents() to be available. |
|
598 | # now awaiting for REV.parents() to be available. | |
599 | # |
|
599 | # | |
600 | # 3) finally if no such group existed in (B), we create a new subgroup. |
|
600 | # 3) finally if no such group existed in (B), we create a new subgroup. | |
601 | # |
|
601 | # | |
602 | # |
|
602 | # | |
603 | # To bootstrap the algorithm, we emit the tipmost revision (which |
|
603 | # To bootstrap the algorithm, we emit the tipmost revision (which | |
604 | # puts it in group (A) from above). |
|
604 | # puts it in group (A) from above). | |
605 |
|
605 | |||
606 | revs.sort(reverse=True) |
|
606 | revs.sort(reverse=True) | |
607 |
|
607 | |||
608 | # Set of parents of revision that have been emitted. They can be considered |
|
608 | # Set of parents of revision that have been emitted. They can be considered | |
609 | # unblocked as the graph generator is already aware of them so there is no |
|
609 | # unblocked as the graph generator is already aware of them so there is no | |
610 | # need to delay the revisions that reference them. |
|
610 | # need to delay the revisions that reference them. | |
611 | # |
|
611 | # | |
612 | # If someone wants to prioritize a branch over the others, pre-filling this |
|
612 | # If someone wants to prioritize a branch over the others, pre-filling this | |
613 | # set will force all other branches to wait until this branch is ready to be |
|
613 | # set will force all other branches to wait until this branch is ready to be | |
614 | # emitted. |
|
614 | # emitted. | |
615 | unblocked = set(firstbranch) |
|
615 | unblocked = set(firstbranch) | |
616 |
|
616 | |||
617 | # list of groups waiting to be displayed, each group is defined by: |
|
617 | # list of groups waiting to be displayed, each group is defined by: | |
618 | # |
|
618 | # | |
619 | # (revs: lists of revs waiting to be displayed, |
|
619 | # (revs: lists of revs waiting to be displayed, | |
620 | # blocked: set of that cannot be displayed before those in 'revs') |
|
620 | # blocked: set of that cannot be displayed before those in 'revs') | |
621 | # |
|
621 | # | |
622 | # The second value ('blocked') correspond to parents of any revision in the |
|
622 | # The second value ('blocked') correspond to parents of any revision in the | |
623 | # group ('revs') that is not itself contained in the group. The main idea |
|
623 | # group ('revs') that is not itself contained in the group. The main idea | |
624 | # of this algorithm is to delay as much as possible the emission of any |
|
624 | # of this algorithm is to delay as much as possible the emission of any | |
625 | # revision. This means waiting for the moment we are about to display |
|
625 | # revision. This means waiting for the moment we are about to display | |
626 | # these parents to display the revs in a group. |
|
626 | # these parents to display the revs in a group. | |
627 | # |
|
627 | # | |
628 | # This first implementation is smart until it encounters a merge: it will |
|
628 | # This first implementation is smart until it encounters a merge: it will | |
629 | # emit revs as soon as any parent is about to be emitted and can grow an |
|
629 | # emit revs as soon as any parent is about to be emitted and can grow an | |
630 | # arbitrary number of revs in 'blocked'. In practice this mean we properly |
|
630 | # arbitrary number of revs in 'blocked'. In practice this mean we properly | |
631 | # retains new branches but gives up on any special ordering for ancestors |
|
631 | # retains new branches but gives up on any special ordering for ancestors | |
632 | # of merges. The implementation can be improved to handle this better. |
|
632 | # of merges. The implementation can be improved to handle this better. | |
633 | # |
|
633 | # | |
634 | # The first subgroup is special. It corresponds to all the revision that |
|
634 | # The first subgroup is special. It corresponds to all the revision that | |
635 | # were already emitted. The 'revs' lists is expected to be empty and the |
|
635 | # were already emitted. The 'revs' lists is expected to be empty and the | |
636 | # 'blocked' set contains the parents revisions of already emitted revision. |
|
636 | # 'blocked' set contains the parents revisions of already emitted revision. | |
637 | # |
|
637 | # | |
638 | # You could pre-seed the <parents> set of groups[0] to a specific |
|
638 | # You could pre-seed the <parents> set of groups[0] to a specific | |
639 | # changesets to select what the first emitted branch should be. |
|
639 | # changesets to select what the first emitted branch should be. | |
640 | groups = [([], unblocked)] |
|
640 | groups = [([], unblocked)] | |
641 | pendingheap = [] |
|
641 | pendingheap = [] | |
642 | pendingset = set() |
|
642 | pendingset = set() | |
643 |
|
643 | |||
644 | heapq.heapify(pendingheap) |
|
644 | heapq.heapify(pendingheap) | |
645 | heappop = heapq.heappop |
|
645 | heappop = heapq.heappop | |
646 | heappush = heapq.heappush |
|
646 | heappush = heapq.heappush | |
647 | for currentrev in revs: |
|
647 | for currentrev in revs: | |
648 | # Heap works with smallest element, we want highest so we invert |
|
648 | # Heap works with smallest element, we want highest so we invert | |
649 | if currentrev not in pendingset: |
|
649 | if currentrev not in pendingset: | |
650 | heappush(pendingheap, -currentrev) |
|
650 | heappush(pendingheap, -currentrev) | |
651 | pendingset.add(currentrev) |
|
651 | pendingset.add(currentrev) | |
652 | # iterates on pending rev until after the current rev have been |
|
652 | # iterates on pending rev until after the current rev have been | |
653 | # processed. |
|
653 | # processed. | |
654 | rev = None |
|
654 | rev = None | |
655 | while rev != currentrev: |
|
655 | while rev != currentrev: | |
656 | rev = -heappop(pendingheap) |
|
656 | rev = -heappop(pendingheap) | |
657 | pendingset.remove(rev) |
|
657 | pendingset.remove(rev) | |
658 |
|
658 | |||
659 | # Seek for a subgroup blocked, waiting for the current revision. |
|
659 | # Seek for a subgroup blocked, waiting for the current revision. | |
660 | matching = [i for i, g in enumerate(groups) if rev in g[1]] |
|
660 | matching = [i for i, g in enumerate(groups) if rev in g[1]] | |
661 |
|
661 | |||
662 | if matching: |
|
662 | if matching: | |
663 | # The main idea is to gather together all sets that are blocked |
|
663 | # The main idea is to gather together all sets that are blocked | |
664 | # on the same revision. |
|
664 | # on the same revision. | |
665 | # |
|
665 | # | |
666 | # Groups are merged when a common blocking ancestor is |
|
666 | # Groups are merged when a common blocking ancestor is | |
667 | # observed. For example, given two groups: |
|
667 | # observed. For example, given two groups: | |
668 | # |
|
668 | # | |
669 | # revs [5, 4] waiting for 1 |
|
669 | # revs [5, 4] waiting for 1 | |
670 | # revs [3, 2] waiting for 1 |
|
670 | # revs [3, 2] waiting for 1 | |
671 | # |
|
671 | # | |
672 | # These two groups will be merged when we process |
|
672 | # These two groups will be merged when we process | |
673 | # 1. In theory, we could have merged the groups when |
|
673 | # 1. In theory, we could have merged the groups when | |
674 | # we added 2 to the group it is now in (we could have |
|
674 | # we added 2 to the group it is now in (we could have | |
675 | # noticed the groups were both blocked on 1 then), but |
|
675 | # noticed the groups were both blocked on 1 then), but | |
676 | # the way it works now makes the algorithm simpler. |
|
676 | # the way it works now makes the algorithm simpler. | |
677 | # |
|
677 | # | |
678 | # We also always keep the oldest subgroup first. We can |
|
678 | # We also always keep the oldest subgroup first. We can | |
679 | # probably improve the behavior by having the longest set |
|
679 | # probably improve the behavior by having the longest set | |
680 | # first. That way, graph algorithms could minimise the length |
|
680 | # first. That way, graph algorithms could minimise the length | |
681 | # of parallel lines their drawing. This is currently not done. |
|
681 | # of parallel lines their drawing. This is currently not done. | |
682 | targetidx = matching.pop(0) |
|
682 | targetidx = matching.pop(0) | |
683 | trevs, tparents = groups[targetidx] |
|
683 | trevs, tparents = groups[targetidx] | |
684 | for i in matching: |
|
684 | for i in matching: | |
685 | gr = groups[i] |
|
685 | gr = groups[i] | |
686 | trevs.extend(gr[0]) |
|
686 | trevs.extend(gr[0]) | |
687 | tparents |= gr[1] |
|
687 | tparents |= gr[1] | |
688 | # delete all merged subgroups (except the one we kept) |
|
688 | # delete all merged subgroups (except the one we kept) | |
689 | # (starting from the last subgroup for performance and |
|
689 | # (starting from the last subgroup for performance and | |
690 | # sanity reasons) |
|
690 | # sanity reasons) | |
691 | for i in reversed(matching): |
|
691 | for i in reversed(matching): | |
692 | del groups[i] |
|
692 | del groups[i] | |
693 | else: |
|
693 | else: | |
694 | # This is a new head. We create a new subgroup for it. |
|
694 | # This is a new head. We create a new subgroup for it. | |
695 | targetidx = len(groups) |
|
695 | targetidx = len(groups) | |
696 | groups.append(([], {rev})) |
|
696 | groups.append(([], {rev})) | |
697 |
|
697 | |||
698 | gr = groups[targetidx] |
|
698 | gr = groups[targetidx] | |
699 |
|
699 | |||
700 | # We now add the current nodes to this subgroups. This is done |
|
700 | # We now add the current nodes to this subgroups. This is done | |
701 | # after the subgroup merging because all elements from a subgroup |
|
701 | # after the subgroup merging because all elements from a subgroup | |
702 | # that relied on this rev must precede it. |
|
702 | # that relied on this rev must precede it. | |
703 | # |
|
703 | # | |
704 | # we also update the <parents> set to include the parents of the |
|
704 | # we also update the <parents> set to include the parents of the | |
705 | # new nodes. |
|
705 | # new nodes. | |
706 | if rev == currentrev: # only display stuff in rev |
|
706 | if rev == currentrev: # only display stuff in rev | |
707 | gr[0].append(rev) |
|
707 | gr[0].append(rev) | |
708 | gr[1].remove(rev) |
|
708 | gr[1].remove(rev) | |
709 | parents = [p for p in parentsfunc(rev) if p > node.nullrev] |
|
709 | parents = [p for p in parentsfunc(rev) if p > node.nullrev] | |
710 | gr[1].update(parents) |
|
710 | gr[1].update(parents) | |
711 | for p in parents: |
|
711 | for p in parents: | |
712 | if p not in pendingset: |
|
712 | if p not in pendingset: | |
713 | pendingset.add(p) |
|
713 | pendingset.add(p) | |
714 | heappush(pendingheap, -p) |
|
714 | heappush(pendingheap, -p) | |
715 |
|
715 | |||
716 | # Look for a subgroup to display |
|
716 | # Look for a subgroup to display | |
717 | # |
|
717 | # | |
718 | # When unblocked is empty (if clause), we were not waiting for any |
|
718 | # When unblocked is empty (if clause), we were not waiting for any | |
719 | # revisions during the first iteration (if no priority was given) or |
|
719 | # revisions during the first iteration (if no priority was given) or | |
720 | # if we emitted a whole disconnected set of the graph (reached a |
|
720 | # if we emitted a whole disconnected set of the graph (reached a | |
721 | # root). In that case we arbitrarily take the oldest known |
|
721 | # root). In that case we arbitrarily take the oldest known | |
722 | # subgroup. The heuristic could probably be better. |
|
722 | # subgroup. The heuristic could probably be better. | |
723 | # |
|
723 | # | |
724 | # Otherwise (elif clause) if the subgroup is blocked on |
|
724 | # Otherwise (elif clause) if the subgroup is blocked on | |
725 | # a revision we just emitted, we can safely emit it as |
|
725 | # a revision we just emitted, we can safely emit it as | |
726 | # well. |
|
726 | # well. | |
727 | if not unblocked: |
|
727 | if not unblocked: | |
728 | if len(groups) > 1: # display other subset |
|
728 | if len(groups) > 1: # display other subset | |
729 | targetidx = 1 |
|
729 | targetidx = 1 | |
730 | gr = groups[1] |
|
730 | gr = groups[1] | |
731 | elif not gr[1] & unblocked: |
|
731 | elif not gr[1] & unblocked: | |
732 | gr = None |
|
732 | gr = None | |
733 |
|
733 | |||
734 | if gr is not None: |
|
734 | if gr is not None: | |
735 | # update the set of awaited revisions with the one from the |
|
735 | # update the set of awaited revisions with the one from the | |
736 | # subgroup |
|
736 | # subgroup | |
737 | unblocked |= gr[1] |
|
737 | unblocked |= gr[1] | |
738 | # output all revisions in the subgroup |
|
738 | # output all revisions in the subgroup | |
739 | for r in gr[0]: |
|
739 | for r in gr[0]: | |
740 | yield r |
|
740 | yield r | |
741 | # delete the subgroup that you just output |
|
741 | # delete the subgroup that you just output | |
742 | # unless it is groups[0] in which case you just empty it. |
|
742 | # unless it is groups[0] in which case you just empty it. | |
743 | if targetidx: |
|
743 | if targetidx: | |
744 | del groups[targetidx] |
|
744 | del groups[targetidx] | |
745 | else: |
|
745 | else: | |
746 | gr[0][:] = [] |
|
746 | gr[0][:] = [] | |
747 | # Check if we have some subgroup waiting for revisions we are not going to |
|
747 | # Check if we have some subgroup waiting for revisions we are not going to | |
748 | # iterate over |
|
748 | # iterate over | |
749 | for g in groups: |
|
749 | for g in groups: | |
750 | for r in g[0]: |
|
750 | for r in g[0]: | |
751 | yield r |
|
751 | yield r | |
752 |
|
752 | |||
753 | def headrevs(revs, parentsfn): |
|
753 | def headrevs(revs, parentsfn): | |
754 | """Resolve the set of heads from a set of revisions. |
|
754 | """Resolve the set of heads from a set of revisions. | |
755 |
|
755 | |||
756 | Receives an iterable of revision numbers and a callbable that receives a |
|
756 | Receives an iterable of revision numbers and a callbable that receives a | |
757 | revision number and returns an iterable of parent revision numbers, possibly |
|
757 | revision number and returns an iterable of parent revision numbers, possibly | |
758 | including nullrev. |
|
758 | including nullrev. | |
759 |
|
759 | |||
760 | Returns a set of revision numbers that are DAG heads within the passed |
|
760 | Returns a set of revision numbers that are DAG heads within the passed | |
761 | subset. |
|
761 | subset. | |
762 |
|
762 | |||
763 | ``nullrev`` is never included in the returned set, even if it is provided in |
|
763 | ``nullrev`` is never included in the returned set, even if it is provided in | |
764 | the input set. |
|
764 | the input set. | |
765 | """ |
|
765 | """ | |
766 | headrevs = set(revs) |
|
766 | headrevs = set(revs) | |
767 | parents = set([node.nullrev]) |
|
767 | parents = set([node.nullrev]) | |
768 | up = parents.update |
|
768 | up = parents.update | |
769 |
|
769 | |||
770 | for rev in revs: |
|
770 | for rev in revs: | |
771 | up(parentsfn(rev)) |
|
771 | up(parentsfn(rev)) | |
772 | headrevs.difference_update(parents) |
|
772 | headrevs.difference_update(parents) | |
773 | return headrevs |
|
773 | return headrevs | |
774 |
|
774 | |||
775 | def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None): |
|
775 | def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None): | |
776 | """Returns the set of all revs that have no children with control. |
|
776 | """Returns the set of all revs that have no children with control. | |
777 |
|
777 | |||
778 | ``revsfn`` is a callable that with no arguments returns an iterator over |
|
778 | ``revsfn`` is a callable that with no arguments returns an iterator over | |
779 | all revision numbers in topological order. With a ``start`` argument, it |
|
779 | all revision numbers in topological order. With a ``start`` argument, it | |
780 | returns revision numbers starting at that number. |
|
780 | returns revision numbers starting at that number. | |
781 |
|
781 | |||
782 | ``parentrevsfn`` is a callable receiving a revision number and returns an |
|
782 | ``parentrevsfn`` is a callable receiving a revision number and returns an | |
783 | iterable of parent revision numbers, where values can include nullrev. |
|
783 | iterable of parent revision numbers, where values can include nullrev. | |
784 |
|
784 | |||
785 | ``startrev`` is a revision number at which to start the search. |
|
785 | ``startrev`` is a revision number at which to start the search. | |
786 |
|
786 | |||
787 | ``stoprevs`` is an iterable of revision numbers that, when encountered, |
|
787 | ``stoprevs`` is an iterable of revision numbers that, when encountered, | |
788 | will stop DAG traversal beyond them. Parents of revisions in this |
|
788 | will stop DAG traversal beyond them. Parents of revisions in this | |
789 | collection will be heads. |
|
789 | collection will be heads. | |
790 | """ |
|
790 | """ | |
791 | if startrev is None: |
|
791 | if startrev is None: | |
792 | startrev = nullrev |
|
792 | startrev = nullrev | |
793 |
|
793 | |||
794 | stoprevs = set(stoprevs or []) |
|
794 | stoprevs = set(stoprevs or []) | |
795 |
|
795 | |||
796 | reachable = {startrev} |
|
796 | reachable = {startrev} | |
797 | heads = {startrev} |
|
797 | heads = {startrev} | |
798 |
|
798 | |||
799 | for rev in revsfn(start=startrev + 1): |
|
799 | for rev in revsfn(start=startrev + 1): | |
800 | for prev in parentrevsfn(rev): |
|
800 | for prev in parentrevsfn(rev): | |
801 | if prev in reachable: |
|
801 | if prev in reachable: | |
802 | if rev not in stoprevs: |
|
802 | if rev not in stoprevs: | |
803 | reachable.add(rev) |
|
803 | reachable.add(rev) | |
804 | heads.add(rev) |
|
804 | heads.add(rev) | |
805 |
|
805 | |||
806 | if prev in heads and prev not in stoprevs: |
|
806 | if prev in heads and prev not in stoprevs: | |
807 | heads.remove(prev) |
|
807 | heads.remove(prev) | |
808 |
|
808 | |||
809 | return heads |
|
809 | return heads | |
810 |
|
810 | |||
811 | def linearize(revs, parentsfn): |
|
811 | def linearize(revs, parentsfn): | |
812 | """Linearize and topologically sort a list of revisions. |
|
812 | """Linearize and topologically sort a list of revisions. | |
813 |
|
813 | |||
814 | The linearization process tries to create long runs of revs where a child |
|
814 | The linearization process tries to create long runs of revs where a child | |
815 | rev comes immediately after its first parent. This is done by visiting the |
|
815 | rev comes immediately after its first parent. This is done by visiting the | |
816 | heads of the revs in inverse topological order, and for each visited rev, |
|
816 | heads of the revs in inverse topological order, and for each visited rev, | |
817 | visiting its second parent, then its first parent, then adding the rev |
|
817 | visiting its second parent, then its first parent, then adding the rev | |
818 | itself to the output list. |
|
818 | itself to the output list. | |
819 |
|
819 | |||
820 | Returns a list of revision numbers. |
|
820 | Returns a list of revision numbers. | |
821 | """ |
|
821 | """ | |
822 | visit = list(sorted(headrevs(revs, parentsfn), reverse=True)) |
|
822 | visit = list(sorted(headrevs(revs, parentsfn), reverse=True)) | |
823 | finished = set() |
|
823 | finished = set() | |
824 | result = [] |
|
824 | result = [] | |
825 |
|
825 | |||
826 | while visit: |
|
826 | while visit: | |
827 | rev = visit.pop() |
|
827 | rev = visit.pop() | |
828 | if rev < 0: |
|
828 | if rev < 0: | |
829 | rev = -rev - 1 |
|
829 | rev = -rev - 1 | |
830 |
|
830 | |||
831 | if rev not in finished: |
|
831 | if rev not in finished: | |
832 | result.append(rev) |
|
832 | result.append(rev) | |
833 | finished.add(rev) |
|
833 | finished.add(rev) | |
834 |
|
834 | |||
835 | else: |
|
835 | else: | |
836 | visit.append(-rev - 1) |
|
836 | visit.append(-rev - 1) | |
837 |
|
837 | |||
838 | for prev in parentsfn(rev): |
|
838 | for prev in parentsfn(rev): | |
839 | if prev == node.nullrev or prev not in revs or prev in finished: |
|
839 | if prev == node.nullrev or prev not in revs or prev in finished: | |
840 | continue |
|
840 | continue | |
841 |
|
841 | |||
842 | visit.append(prev) |
|
842 | visit.append(prev) | |
843 |
|
843 | |||
844 | assert len(result) == len(revs) |
|
844 | assert len(result) == len(revs) | |
845 |
|
845 | |||
846 | return result |
|
846 | return result |
@@ -1,1241 +1,1241 b'' | |||||
1 | # |
|
1 | # | |
2 | # This is the mercurial setup script. |
|
2 | # This is the mercurial setup script. | |
3 | # |
|
3 | # | |
4 | # 'python setup.py install', or |
|
4 | # 'python setup.py install', or | |
5 | # 'python setup.py --help' for more options |
|
5 | # 'python setup.py --help' for more options | |
6 |
|
6 | |||
7 | import os |
|
7 | import os | |
8 |
|
8 | |||
9 | supportedpy = '~= 2.7' |
|
9 | supportedpy = '~= 2.7' | |
10 | if os.environ.get('HGALLOWPYTHON3', ''): |
|
10 | if os.environ.get('HGALLOWPYTHON3', ''): | |
11 | # Mercurial will never work on Python 3 before 3.5 due to a lack |
|
11 | # Mercurial will never work on Python 3 before 3.5 due to a lack | |
12 | # of % formatting on bytestrings, and can't work on 3.6.0 or 3.6.1 |
|
12 | # of % formatting on bytestrings, and can't work on 3.6.0 or 3.6.1 | |
13 | # due to a bug in % formatting in bytestrings. |
|
13 | # due to a bug in % formatting in bytestrings. | |
14 | # We cannot support Python 3.5.0, 3.5.1, 3.5.2 because of bug in |
|
14 | # We cannot support Python 3.5.0, 3.5.1, 3.5.2 because of bug in | |
15 | # codecs.escape_encode() where it raises SystemError on empty bytestring |
|
15 | # codecs.escape_encode() where it raises SystemError on empty bytestring | |
16 | # bug link: https://bugs.python.org/issue25270 |
|
16 | # bug link: https://bugs.python.org/issue25270 | |
17 | # |
|
17 | # | |
18 | # TODO: when we actually work on Python 3, use this string as the |
|
18 | # TODO: when we actually work on Python 3, use this string as the | |
19 | # actual supportedpy string. |
|
19 | # actual supportedpy string. | |
20 | supportedpy = ','.join([ |
|
20 | supportedpy = ','.join([ | |
21 | '>=2.7', |
|
21 | '>=2.7', | |
22 | '!=3.0.*', |
|
22 | '!=3.0.*', | |
23 | '!=3.1.*', |
|
23 | '!=3.1.*', | |
24 | '!=3.2.*', |
|
24 | '!=3.2.*', | |
25 | '!=3.3.*', |
|
25 | '!=3.3.*', | |
26 | '!=3.4.*', |
|
26 | '!=3.4.*', | |
27 | '!=3.5.0', |
|
27 | '!=3.5.0', | |
28 | '!=3.5.1', |
|
28 | '!=3.5.1', | |
29 | '!=3.5.2', |
|
29 | '!=3.5.2', | |
30 | '!=3.6.0', |
|
30 | '!=3.6.0', | |
31 | '!=3.6.1', |
|
31 | '!=3.6.1', | |
32 | ]) |
|
32 | ]) | |
33 |
|
33 | |||
34 | import sys, platform |
|
34 | import sys, platform | |
35 | if sys.version_info[0] >= 3: |
|
35 | if sys.version_info[0] >= 3: | |
36 | printf = eval('print') |
|
36 | printf = eval('print') | |
37 | libdir_escape = 'unicode_escape' |
|
37 | libdir_escape = 'unicode_escape' | |
38 | def sysstr(s): |
|
38 | def sysstr(s): | |
39 | return s.decode('latin-1') |
|
39 | return s.decode('latin-1') | |
40 | else: |
|
40 | else: | |
41 | libdir_escape = 'string_escape' |
|
41 | libdir_escape = 'string_escape' | |
42 | def printf(*args, **kwargs): |
|
42 | def printf(*args, **kwargs): | |
43 | f = kwargs.get('file', sys.stdout) |
|
43 | f = kwargs.get('file', sys.stdout) | |
44 | end = kwargs.get('end', '\n') |
|
44 | end = kwargs.get('end', '\n') | |
45 | f.write(b' '.join(args) + end) |
|
45 | f.write(b' '.join(args) + end) | |
46 | def sysstr(s): |
|
46 | def sysstr(s): | |
47 | return s |
|
47 | return s | |
48 |
|
48 | |||
49 | # Attempt to guide users to a modern pip - this means that 2.6 users |
|
49 | # Attempt to guide users to a modern pip - this means that 2.6 users | |
50 | # should have a chance of getting a 4.2 release, and when we ratchet |
|
50 | # should have a chance of getting a 4.2 release, and when we ratchet | |
51 | # the version requirement forward again hopefully everyone will get |
|
51 | # the version requirement forward again hopefully everyone will get | |
52 | # something that works for them. |
|
52 | # something that works for them. | |
53 | if sys.version_info < (2, 7, 0, 'final'): |
|
53 | if sys.version_info < (2, 7, 0, 'final'): | |
54 | pip_message = ('This may be due to an out of date pip. ' |
|
54 | pip_message = ('This may be due to an out of date pip. ' | |
55 | 'Make sure you have pip >= 9.0.1.') |
|
55 | 'Make sure you have pip >= 9.0.1.') | |
56 | try: |
|
56 | try: | |
57 | import pip |
|
57 | import pip | |
58 | pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]]) |
|
58 | pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]]) | |
59 | if pip_version < (9, 0, 1) : |
|
59 | if pip_version < (9, 0, 1) : | |
60 | pip_message = ( |
|
60 | pip_message = ( | |
61 | 'Your pip version is out of date, please install ' |
|
61 | 'Your pip version is out of date, please install ' | |
62 | 'pip >= 9.0.1. pip {} detected.'.format(pip.__version__)) |
|
62 | 'pip >= 9.0.1. pip {} detected.'.format(pip.__version__)) | |
63 | else: |
|
63 | else: | |
64 | # pip is new enough - it must be something else |
|
64 | # pip is new enough - it must be something else | |
65 | pip_message = '' |
|
65 | pip_message = '' | |
66 | except Exception: |
|
66 | except Exception: | |
67 | pass |
|
67 | pass | |
68 | error = """ |
|
68 | error = """ | |
69 | Mercurial does not support Python older than 2.7. |
|
69 | Mercurial does not support Python older than 2.7. | |
70 | Python {py} detected. |
|
70 | Python {py} detected. | |
71 | {pip} |
|
71 | {pip} | |
72 | """.format(py=sys.version_info, pip=pip_message) |
|
72 | """.format(py=sys.version_info, pip=pip_message) | |
73 | printf(error, file=sys.stderr) |
|
73 | printf(error, file=sys.stderr) | |
74 | sys.exit(1) |
|
74 | sys.exit(1) | |
75 |
|
75 | |||
76 | # We don't yet officially support Python 3. But we want to allow developers to |
|
76 | # We don't yet officially support Python 3. But we want to allow developers to | |
77 | # hack on. Detect and disallow running on Python 3 by default. But provide a |
|
77 | # hack on. Detect and disallow running on Python 3 by default. But provide a | |
78 | # backdoor to enable working on Python 3. |
|
78 | # backdoor to enable working on Python 3. | |
79 | if sys.version_info[0] != 2: |
|
79 | if sys.version_info[0] != 2: | |
80 | badpython = True |
|
80 | badpython = True | |
81 |
|
81 | |||
82 | # Allow Python 3 from source checkouts. |
|
82 | # Allow Python 3 from source checkouts. | |
83 | if os.path.isdir('.hg') or 'HGPYTHON3' in os.environ: |
|
83 | if os.path.isdir('.hg') or 'HGPYTHON3' in os.environ: | |
84 | badpython = False |
|
84 | badpython = False | |
85 |
|
85 | |||
86 | if badpython: |
|
86 | if badpython: | |
87 | error = """ |
|
87 | error = """ | |
88 | Mercurial only supports Python 2.7. |
|
88 | Mercurial only supports Python 2.7. | |
89 | Python {py} detected. |
|
89 | Python {py} detected. | |
90 | Please re-run with Python 2.7. |
|
90 | Please re-run with Python 2.7. | |
91 | """.format(py=sys.version_info) |
|
91 | """.format(py=sys.version_info) | |
92 |
|
92 | |||
93 | printf(error, file=sys.stderr) |
|
93 | printf(error, file=sys.stderr) | |
94 | sys.exit(1) |
|
94 | sys.exit(1) | |
95 |
|
95 | |||
96 | # Solaris Python packaging brain damage |
|
96 | # Solaris Python packaging brain damage | |
97 | try: |
|
97 | try: | |
98 | import hashlib |
|
98 | import hashlib | |
99 | sha = hashlib.sha1() |
|
99 | sha = hashlib.sha1() | |
100 | except ImportError: |
|
100 | except ImportError: | |
101 | try: |
|
101 | try: | |
102 | import sha |
|
102 | import sha | |
103 | sha.sha # silence unused import warning |
|
103 | sha.sha # silence unused import warning | |
104 | except ImportError: |
|
104 | except ImportError: | |
105 | raise SystemExit( |
|
105 | raise SystemExit( | |
106 | "Couldn't import standard hashlib (incomplete Python install).") |
|
106 | "Couldn't import standard hashlib (incomplete Python install).") | |
107 |
|
107 | |||
108 | try: |
|
108 | try: | |
109 | import zlib |
|
109 | import zlib | |
110 | zlib.compressobj # silence unused import warning |
|
110 | zlib.compressobj # silence unused import warning | |
111 | except ImportError: |
|
111 | except ImportError: | |
112 | raise SystemExit( |
|
112 | raise SystemExit( | |
113 | "Couldn't import standard zlib (incomplete Python install).") |
|
113 | "Couldn't import standard zlib (incomplete Python install).") | |
114 |
|
114 | |||
115 | # The base IronPython distribution (as of 2.7.1) doesn't support bz2 |
|
115 | # The base IronPython distribution (as of 2.7.1) doesn't support bz2 | |
116 | isironpython = False |
|
116 | isironpython = False | |
117 | try: |
|
117 | try: | |
118 | isironpython = (platform.python_implementation() |
|
118 | isironpython = (platform.python_implementation() | |
119 | .lower().find("ironpython") != -1) |
|
119 | .lower().find("ironpython") != -1) | |
120 | except AttributeError: |
|
120 | except AttributeError: | |
121 | pass |
|
121 | pass | |
122 |
|
122 | |||
123 | if isironpython: |
|
123 | if isironpython: | |
124 | sys.stderr.write("warning: IronPython detected (no bz2 support)\n") |
|
124 | sys.stderr.write("warning: IronPython detected (no bz2 support)\n") | |
125 | else: |
|
125 | else: | |
126 | try: |
|
126 | try: | |
127 | import bz2 |
|
127 | import bz2 | |
128 | bz2.BZ2Compressor # silence unused import warning |
|
128 | bz2.BZ2Compressor # silence unused import warning | |
129 | except ImportError: |
|
129 | except ImportError: | |
130 | raise SystemExit( |
|
130 | raise SystemExit( | |
131 | "Couldn't import standard bz2 (incomplete Python install).") |
|
131 | "Couldn't import standard bz2 (incomplete Python install).") | |
132 |
|
132 | |||
133 | ispypy = "PyPy" in sys.version |
|
133 | ispypy = "PyPy" in sys.version | |
134 |
|
134 | |||
135 | hgrustext = os.environ.get('HGWITHRUSTEXT') |
|
135 | hgrustext = os.environ.get('HGWITHRUSTEXT') | |
136 | # TODO record it for proper rebuild upon changes |
|
136 | # TODO record it for proper rebuild upon changes | |
137 | # (see mercurial/__modulepolicy__.py) |
|
137 | # (see mercurial/__modulepolicy__.py) | |
138 | if hgrustext != 'cpython' and hgrustext is not None: |
|
138 | if hgrustext != 'cpython' and hgrustext is not None: | |
139 | hgrustext = 'direct-ffi' |
|
139 | hgrustext = 'direct-ffi' | |
140 |
|
140 | |||
141 | import ctypes |
|
141 | import ctypes | |
142 | import errno |
|
142 | import errno | |
143 | import stat, subprocess, time |
|
143 | import stat, subprocess, time | |
144 | import re |
|
144 | import re | |
145 | import shutil |
|
145 | import shutil | |
146 | import tempfile |
|
146 | import tempfile | |
147 | from distutils import log |
|
147 | from distutils import log | |
148 | # We have issues with setuptools on some platforms and builders. Until |
|
148 | # We have issues with setuptools on some platforms and builders. Until | |
149 | # those are resolved, setuptools is opt-in except for platforms where |
|
149 | # those are resolved, setuptools is opt-in except for platforms where | |
150 | # we don't have issues. |
|
150 | # we don't have issues. | |
151 | issetuptools = (os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ) |
|
151 | issetuptools = (os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ) | |
152 | if issetuptools: |
|
152 | if issetuptools: | |
153 | from setuptools import setup |
|
153 | from setuptools import setup | |
154 | else: |
|
154 | else: | |
155 | from distutils.core import setup |
|
155 | from distutils.core import setup | |
156 | from distutils.ccompiler import new_compiler |
|
156 | from distutils.ccompiler import new_compiler | |
157 | from distutils.core import Command, Extension |
|
157 | from distutils.core import Command, Extension | |
158 | from distutils.dist import Distribution |
|
158 | from distutils.dist import Distribution | |
159 | from distutils.command.build import build |
|
159 | from distutils.command.build import build | |
160 | from distutils.command.build_ext import build_ext |
|
160 | from distutils.command.build_ext import build_ext | |
161 | from distutils.command.build_py import build_py |
|
161 | from distutils.command.build_py import build_py | |
162 | from distutils.command.build_scripts import build_scripts |
|
162 | from distutils.command.build_scripts import build_scripts | |
163 | from distutils.command.install import install |
|
163 | from distutils.command.install import install | |
164 | from distutils.command.install_lib import install_lib |
|
164 | from distutils.command.install_lib import install_lib | |
165 | from distutils.command.install_scripts import install_scripts |
|
165 | from distutils.command.install_scripts import install_scripts | |
166 | from distutils.spawn import spawn, find_executable |
|
166 | from distutils.spawn import spawn, find_executable | |
167 | from distutils import file_util |
|
167 | from distutils import file_util | |
168 | from distutils.errors import ( |
|
168 | from distutils.errors import ( | |
169 | CCompilerError, |
|
169 | CCompilerError, | |
170 | DistutilsError, |
|
170 | DistutilsError, | |
171 | DistutilsExecError, |
|
171 | DistutilsExecError, | |
172 | ) |
|
172 | ) | |
173 | from distutils.sysconfig import get_python_inc, get_config_var |
|
173 | from distutils.sysconfig import get_python_inc, get_config_var | |
174 | from distutils.version import StrictVersion |
|
174 | from distutils.version import StrictVersion | |
175 |
|
175 | |||
176 | # Explain to distutils.StrictVersion how our release candidates are versionned |
|
176 | # Explain to distutils.StrictVersion how our release candidates are versionned | |
177 | StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$') |
|
177 | StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$') | |
178 |
|
178 | |||
179 | def write_if_changed(path, content): |
|
179 | def write_if_changed(path, content): | |
180 | """Write content to a file iff the content hasn't changed.""" |
|
180 | """Write content to a file iff the content hasn't changed.""" | |
181 | if os.path.exists(path): |
|
181 | if os.path.exists(path): | |
182 | with open(path, 'rb') as fh: |
|
182 | with open(path, 'rb') as fh: | |
183 | current = fh.read() |
|
183 | current = fh.read() | |
184 | else: |
|
184 | else: | |
185 | current = b'' |
|
185 | current = b'' | |
186 |
|
186 | |||
187 | if current != content: |
|
187 | if current != content: | |
188 | with open(path, 'wb') as fh: |
|
188 | with open(path, 'wb') as fh: | |
189 | fh.write(content) |
|
189 | fh.write(content) | |
190 |
|
190 | |||
191 | scripts = ['hg'] |
|
191 | scripts = ['hg'] | |
192 | if os.name == 'nt': |
|
192 | if os.name == 'nt': | |
193 | # We remove hg.bat if we are able to build hg.exe. |
|
193 | # We remove hg.bat if we are able to build hg.exe. | |
194 | scripts.append('contrib/win32/hg.bat') |
|
194 | scripts.append('contrib/win32/hg.bat') | |
195 |
|
195 | |||
196 | def cancompile(cc, code): |
|
196 | def cancompile(cc, code): | |
197 | tmpdir = tempfile.mkdtemp(prefix='hg-install-') |
|
197 | tmpdir = tempfile.mkdtemp(prefix='hg-install-') | |
198 | devnull = oldstderr = None |
|
198 | devnull = oldstderr = None | |
199 | try: |
|
199 | try: | |
200 | fname = os.path.join(tmpdir, 'testcomp.c') |
|
200 | fname = os.path.join(tmpdir, 'testcomp.c') | |
201 | f = open(fname, 'w') |
|
201 | f = open(fname, 'w') | |
202 | f.write(code) |
|
202 | f.write(code) | |
203 | f.close() |
|
203 | f.close() | |
204 | # Redirect stderr to /dev/null to hide any error messages |
|
204 | # Redirect stderr to /dev/null to hide any error messages | |
205 | # from the compiler. |
|
205 | # from the compiler. | |
206 | # This will have to be changed if we ever have to check |
|
206 | # This will have to be changed if we ever have to check | |
207 | # for a function on Windows. |
|
207 | # for a function on Windows. | |
208 | devnull = open('/dev/null', 'w') |
|
208 | devnull = open('/dev/null', 'w') | |
209 | oldstderr = os.dup(sys.stderr.fileno()) |
|
209 | oldstderr = os.dup(sys.stderr.fileno()) | |
210 | os.dup2(devnull.fileno(), sys.stderr.fileno()) |
|
210 | os.dup2(devnull.fileno(), sys.stderr.fileno()) | |
211 | objects = cc.compile([fname], output_dir=tmpdir) |
|
211 | objects = cc.compile([fname], output_dir=tmpdir) | |
212 | cc.link_executable(objects, os.path.join(tmpdir, "a.out")) |
|
212 | cc.link_executable(objects, os.path.join(tmpdir, "a.out")) | |
213 | return True |
|
213 | return True | |
214 | except Exception: |
|
214 | except Exception: | |
215 | return False |
|
215 | return False | |
216 | finally: |
|
216 | finally: | |
217 | if oldstderr is not None: |
|
217 | if oldstderr is not None: | |
218 | os.dup2(oldstderr, sys.stderr.fileno()) |
|
218 | os.dup2(oldstderr, sys.stderr.fileno()) | |
219 | if devnull is not None: |
|
219 | if devnull is not None: | |
220 | devnull.close() |
|
220 | devnull.close() | |
221 | shutil.rmtree(tmpdir) |
|
221 | shutil.rmtree(tmpdir) | |
222 |
|
222 | |||
223 | # simplified version of distutils.ccompiler.CCompiler.has_function |
|
223 | # simplified version of distutils.ccompiler.CCompiler.has_function | |
224 | # that actually removes its temporary files. |
|
224 | # that actually removes its temporary files. | |
225 | def hasfunction(cc, funcname): |
|
225 | def hasfunction(cc, funcname): | |
226 | code = 'int main(void) { %s(); }\n' % funcname |
|
226 | code = 'int main(void) { %s(); }\n' % funcname | |
227 | return cancompile(cc, code) |
|
227 | return cancompile(cc, code) | |
228 |
|
228 | |||
229 | def hasheader(cc, headername): |
|
229 | def hasheader(cc, headername): | |
230 | code = '#include <%s>\nint main(void) { return 0; }\n' % headername |
|
230 | code = '#include <%s>\nint main(void) { return 0; }\n' % headername | |
231 | return cancompile(cc, code) |
|
231 | return cancompile(cc, code) | |
232 |
|
232 | |||
233 | # py2exe needs to be installed to work |
|
233 | # py2exe needs to be installed to work | |
234 | try: |
|
234 | try: | |
235 | import py2exe |
|
235 | import py2exe | |
236 | py2exe.Distribution # silence unused import warning |
|
236 | py2exe.Distribution # silence unused import warning | |
237 | py2exeloaded = True |
|
237 | py2exeloaded = True | |
238 | # import py2exe's patched Distribution class |
|
238 | # import py2exe's patched Distribution class | |
239 | from distutils.core import Distribution |
|
239 | from distutils.core import Distribution | |
240 | except ImportError: |
|
240 | except ImportError: | |
241 | py2exeloaded = False |
|
241 | py2exeloaded = False | |
242 |
|
242 | |||
243 | def runcmd(cmd, env): |
|
243 | def runcmd(cmd, env): | |
244 | p = subprocess.Popen(cmd, stdout=subprocess.PIPE, |
|
244 | p = subprocess.Popen(cmd, stdout=subprocess.PIPE, | |
245 | stderr=subprocess.PIPE, env=env) |
|
245 | stderr=subprocess.PIPE, env=env) | |
246 | out, err = p.communicate() |
|
246 | out, err = p.communicate() | |
247 | return p.returncode, out, err |
|
247 | return p.returncode, out, err | |
248 |
|
248 | |||
249 | class hgcommand(object): |
|
249 | class hgcommand(object): | |
250 | def __init__(self, cmd, env): |
|
250 | def __init__(self, cmd, env): | |
251 | self.cmd = cmd |
|
251 | self.cmd = cmd | |
252 | self.env = env |
|
252 | self.env = env | |
253 |
|
253 | |||
254 | def run(self, args): |
|
254 | def run(self, args): | |
255 | cmd = self.cmd + args |
|
255 | cmd = self.cmd + args | |
256 | returncode, out, err = runcmd(cmd, self.env) |
|
256 | returncode, out, err = runcmd(cmd, self.env) | |
257 | err = filterhgerr(err) |
|
257 | err = filterhgerr(err) | |
258 | if err or returncode != 0: |
|
258 | if err or returncode != 0: | |
259 | printf("stderr from '%s':" % (' '.join(cmd)), file=sys.stderr) |
|
259 | printf("stderr from '%s':" % (' '.join(cmd)), file=sys.stderr) | |
260 | printf(err, file=sys.stderr) |
|
260 | printf(err, file=sys.stderr) | |
261 | return '' |
|
261 | return '' | |
262 | return out |
|
262 | return out | |
263 |
|
263 | |||
264 | def filterhgerr(err): |
|
264 | def filterhgerr(err): | |
265 | # If root is executing setup.py, but the repository is owned by |
|
265 | # If root is executing setup.py, but the repository is owned by | |
266 | # another user (as in "sudo python setup.py install") we will get |
|
266 | # another user (as in "sudo python setup.py install") we will get | |
267 | # trust warnings since the .hg/hgrc file is untrusted. That is |
|
267 | # trust warnings since the .hg/hgrc file is untrusted. That is | |
268 | # fine, we don't want to load it anyway. Python may warn about |
|
268 | # fine, we don't want to load it anyway. Python may warn about | |
269 | # a missing __init__.py in mercurial/locale, we also ignore that. |
|
269 | # a missing __init__.py in mercurial/locale, we also ignore that. | |
270 | err = [e for e in err.splitlines() |
|
270 | err = [e for e in err.splitlines() | |
271 | if (not e.startswith(b'not trusting file') |
|
271 | if (not e.startswith(b'not trusting file') | |
272 | and not e.startswith(b'warning: Not importing') |
|
272 | and not e.startswith(b'warning: Not importing') | |
273 | and not e.startswith(b'obsolete feature not enabled') |
|
273 | and not e.startswith(b'obsolete feature not enabled') | |
274 | and not e.startswith(b'*** failed to import extension') |
|
274 | and not e.startswith(b'*** failed to import extension') | |
275 | and not e.startswith(b'devel-warn:') |
|
275 | and not e.startswith(b'devel-warn:') | |
276 | and not (e.startswith(b'(third party extension') |
|
276 | and not (e.startswith(b'(third party extension') | |
277 | and e.endswith(b'or newer of Mercurial; disabling)')))] |
|
277 | and e.endswith(b'or newer of Mercurial; disabling)')))] | |
278 | return b'\n'.join(b' ' + e for e in err) |
|
278 | return b'\n'.join(b' ' + e for e in err) | |
279 |
|
279 | |||
280 | def findhg(): |
|
280 | def findhg(): | |
281 | """Try to figure out how we should invoke hg for examining the local |
|
281 | """Try to figure out how we should invoke hg for examining the local | |
282 | repository contents. |
|
282 | repository contents. | |
283 |
|
283 | |||
284 | Returns an hgcommand object.""" |
|
284 | Returns an hgcommand object.""" | |
285 | # By default, prefer the "hg" command in the user's path. This was |
|
285 | # By default, prefer the "hg" command in the user's path. This was | |
286 | # presumably the hg command that the user used to create this repository. |
|
286 | # presumably the hg command that the user used to create this repository. | |
287 | # |
|
287 | # | |
288 | # This repository may require extensions or other settings that would not |
|
288 | # This repository may require extensions or other settings that would not | |
289 | # be enabled by running the hg script directly from this local repository. |
|
289 | # be enabled by running the hg script directly from this local repository. | |
290 | hgenv = os.environ.copy() |
|
290 | hgenv = os.environ.copy() | |
291 | # Use HGPLAIN to disable hgrc settings that would change output formatting, |
|
291 | # Use HGPLAIN to disable hgrc settings that would change output formatting, | |
292 | # and disable localization for the same reasons. |
|
292 | # and disable localization for the same reasons. | |
293 | hgenv['HGPLAIN'] = '1' |
|
293 | hgenv['HGPLAIN'] = '1' | |
294 | hgenv['LANGUAGE'] = 'C' |
|
294 | hgenv['LANGUAGE'] = 'C' | |
295 | hgcmd = ['hg'] |
|
295 | hgcmd = ['hg'] | |
296 | # Run a simple "hg log" command just to see if using hg from the user's |
|
296 | # Run a simple "hg log" command just to see if using hg from the user's | |
297 | # path works and can successfully interact with this repository. Windows |
|
297 | # path works and can successfully interact with this repository. Windows | |
298 | # gives precedence to hg.exe in the current directory, so fall back to the |
|
298 | # gives precedence to hg.exe in the current directory, so fall back to the | |
299 | # python invocation of local hg, where pythonXY.dll can always be found. |
|
299 | # python invocation of local hg, where pythonXY.dll can always be found. | |
300 | check_cmd = ['log', '-r.', '-Ttest'] |
|
300 | check_cmd = ['log', '-r.', '-Ttest'] | |
301 | if os.name != 'nt': |
|
301 | if os.name != 'nt': | |
302 | try: |
|
302 | try: | |
303 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) |
|
303 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) | |
304 | except EnvironmentError: |
|
304 | except EnvironmentError: | |
305 | retcode = -1 |
|
305 | retcode = -1 | |
306 | if retcode == 0 and not filterhgerr(err): |
|
306 | if retcode == 0 and not filterhgerr(err): | |
307 | return hgcommand(hgcmd, hgenv) |
|
307 | return hgcommand(hgcmd, hgenv) | |
308 |
|
308 | |||
309 | # Fall back to trying the local hg installation. |
|
309 | # Fall back to trying the local hg installation. | |
310 | hgenv = localhgenv() |
|
310 | hgenv = localhgenv() | |
311 | hgcmd = [sys.executable, 'hg'] |
|
311 | hgcmd = [sys.executable, 'hg'] | |
312 | try: |
|
312 | try: | |
313 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) |
|
313 | retcode, out, err = runcmd(hgcmd + check_cmd, hgenv) | |
314 | except EnvironmentError: |
|
314 | except EnvironmentError: | |
315 | retcode = -1 |
|
315 | retcode = -1 | |
316 | if retcode == 0 and not filterhgerr(err): |
|
316 | if retcode == 0 and not filterhgerr(err): | |
317 | return hgcommand(hgcmd, hgenv) |
|
317 | return hgcommand(hgcmd, hgenv) | |
318 |
|
318 | |||
319 | raise SystemExit('Unable to find a working hg binary to extract the ' |
|
319 | raise SystemExit('Unable to find a working hg binary to extract the ' | |
320 | 'version from the repository tags') |
|
320 | 'version from the repository tags') | |
321 |
|
321 | |||
322 | def localhgenv(): |
|
322 | def localhgenv(): | |
323 | """Get an environment dictionary to use for invoking or importing |
|
323 | """Get an environment dictionary to use for invoking or importing | |
324 | mercurial from the local repository.""" |
|
324 | mercurial from the local repository.""" | |
325 | # Execute hg out of this directory with a custom environment which takes |
|
325 | # Execute hg out of this directory with a custom environment which takes | |
326 | # care to not use any hgrc files and do no localization. |
|
326 | # care to not use any hgrc files and do no localization. | |
327 | env = {'HGMODULEPOLICY': 'py', |
|
327 | env = {'HGMODULEPOLICY': 'py', | |
328 | 'HGRCPATH': '', |
|
328 | 'HGRCPATH': '', | |
329 | 'LANGUAGE': 'C', |
|
329 | 'LANGUAGE': 'C', | |
330 | 'PATH': ''} # make pypi modules that use os.environ['PATH'] happy |
|
330 | 'PATH': ''} # make pypi modules that use os.environ['PATH'] happy | |
331 | if 'LD_LIBRARY_PATH' in os.environ: |
|
331 | if 'LD_LIBRARY_PATH' in os.environ: | |
332 | env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH'] |
|
332 | env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH'] | |
333 | if 'SystemRoot' in os.environ: |
|
333 | if 'SystemRoot' in os.environ: | |
334 | # SystemRoot is required by Windows to load various DLLs. See: |
|
334 | # SystemRoot is required by Windows to load various DLLs. See: | |
335 | # https://bugs.python.org/issue13524#msg148850 |
|
335 | # https://bugs.python.org/issue13524#msg148850 | |
336 | env['SystemRoot'] = os.environ['SystemRoot'] |
|
336 | env['SystemRoot'] = os.environ['SystemRoot'] | |
337 | return env |
|
337 | return env | |
338 |
|
338 | |||
339 | version = '' |
|
339 | version = '' | |
340 |
|
340 | |||
341 | if os.path.isdir('.hg'): |
|
341 | if os.path.isdir('.hg'): | |
342 | hg = findhg() |
|
342 | hg = findhg() | |
343 | cmd = ['log', '-r', '.', '--template', '{tags}\n'] |
|
343 | cmd = ['log', '-r', '.', '--template', '{tags}\n'] | |
344 | numerictags = [t for t in sysstr(hg.run(cmd)).split() if t[0:1].isdigit()] |
|
344 | numerictags = [t for t in sysstr(hg.run(cmd)).split() if t[0:1].isdigit()] | |
345 | hgid = sysstr(hg.run(['id', '-i'])).strip() |
|
345 | hgid = sysstr(hg.run(['id', '-i'])).strip() | |
346 | if not hgid: |
|
346 | if not hgid: | |
347 | # Bail out if hg is having problems interacting with this repository, |
|
347 | # Bail out if hg is having problems interacting with this repository, | |
348 | # rather than falling through and producing a bogus version number. |
|
348 | # rather than falling through and producing a bogus version number. | |
349 | # Continuing with an invalid version number will break extensions |
|
349 | # Continuing with an invalid version number will break extensions | |
350 | # that define minimumhgversion. |
|
350 | # that define minimumhgversion. | |
351 | raise SystemExit('Unable to determine hg version from local repository') |
|
351 | raise SystemExit('Unable to determine hg version from local repository') | |
352 | if numerictags: # tag(s) found |
|
352 | if numerictags: # tag(s) found | |
353 | version = numerictags[-1] |
|
353 | version = numerictags[-1] | |
354 | if hgid.endswith('+'): # propagate the dirty status to the tag |
|
354 | if hgid.endswith('+'): # propagate the dirty status to the tag | |
355 | version += '+' |
|
355 | version += '+' | |
356 | else: # no tag found |
|
356 | else: # no tag found | |
357 | ltagcmd = ['parents', '--template', '{latesttag}'] |
|
357 | ltagcmd = ['parents', '--template', '{latesttag}'] | |
358 | ltag = sysstr(hg.run(ltagcmd)) |
|
358 | ltag = sysstr(hg.run(ltagcmd)) | |
359 | changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag] |
|
359 | changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag] | |
360 | changessince = len(hg.run(changessincecmd).splitlines()) |
|
360 | changessince = len(hg.run(changessincecmd).splitlines()) | |
361 | version = '%s+%s-%s' % (ltag, changessince, hgid) |
|
361 | version = '%s+%s-%s' % (ltag, changessince, hgid) | |
362 | if version.endswith('+'): |
|
362 | if version.endswith('+'): | |
363 | version += time.strftime('%Y%m%d') |
|
363 | version += time.strftime('%Y%m%d') | |
364 | elif os.path.exists('.hg_archival.txt'): |
|
364 | elif os.path.exists('.hg_archival.txt'): | |
365 | kw = dict([[t.strip() for t in l.split(':', 1)] |
|
365 | kw = dict([[t.strip() for t in l.split(':', 1)] | |
366 | for l in open('.hg_archival.txt')]) |
|
366 | for l in open('.hg_archival.txt')]) | |
367 | if 'tag' in kw: |
|
367 | if 'tag' in kw: | |
368 | version = kw['tag'] |
|
368 | version = kw['tag'] | |
369 | elif 'latesttag' in kw: |
|
369 | elif 'latesttag' in kw: | |
370 | if 'changessincelatesttag' in kw: |
|
370 | if 'changessincelatesttag' in kw: | |
371 | version = '%(latesttag)s+%(changessincelatesttag)s-%(node).12s' % kw |
|
371 | version = '%(latesttag)s+%(changessincelatesttag)s-%(node).12s' % kw | |
372 | else: |
|
372 | else: | |
373 | version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw |
|
373 | version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw | |
374 | else: |
|
374 | else: | |
375 | version = kw.get('node', '')[:12] |
|
375 | version = kw.get('node', '')[:12] | |
376 |
|
376 | |||
377 | if version: |
|
377 | if version: | |
378 | versionb = version |
|
378 | versionb = version | |
379 | if not isinstance(versionb, bytes): |
|
379 | if not isinstance(versionb, bytes): | |
380 | versionb = versionb.encode('ascii') |
|
380 | versionb = versionb.encode('ascii') | |
381 |
|
381 | |||
382 | write_if_changed('mercurial/__version__.py', b''.join([ |
|
382 | write_if_changed('mercurial/__version__.py', b''.join([ | |
383 | b'# this file is autogenerated by setup.py\n' |
|
383 | b'# this file is autogenerated by setup.py\n' | |
384 | b'version = b"%s"\n' % versionb, |
|
384 | b'version = b"%s"\n' % versionb, | |
385 | ])) |
|
385 | ])) | |
386 |
|
386 | |||
387 | try: |
|
387 | try: | |
388 | oldpolicy = os.environ.get('HGMODULEPOLICY', None) |
|
388 | oldpolicy = os.environ.get('HGMODULEPOLICY', None) | |
389 | os.environ['HGMODULEPOLICY'] = 'py' |
|
389 | os.environ['HGMODULEPOLICY'] = 'py' | |
390 | from mercurial import __version__ |
|
390 | from mercurial import __version__ | |
391 | version = __version__.version |
|
391 | version = __version__.version | |
392 | except ImportError: |
|
392 | except ImportError: | |
393 | version = b'unknown' |
|
393 | version = b'unknown' | |
394 | finally: |
|
394 | finally: | |
395 | if oldpolicy is None: |
|
395 | if oldpolicy is None: | |
396 | del os.environ['HGMODULEPOLICY'] |
|
396 | del os.environ['HGMODULEPOLICY'] | |
397 | else: |
|
397 | else: | |
398 | os.environ['HGMODULEPOLICY'] = oldpolicy |
|
398 | os.environ['HGMODULEPOLICY'] = oldpolicy | |
399 |
|
399 | |||
400 | class hgbuild(build): |
|
400 | class hgbuild(build): | |
401 | # Insert hgbuildmo first so that files in mercurial/locale/ are found |
|
401 | # Insert hgbuildmo first so that files in mercurial/locale/ are found | |
402 | # when build_py is run next. |
|
402 | # when build_py is run next. | |
403 | sub_commands = [('build_mo', None)] + build.sub_commands |
|
403 | sub_commands = [('build_mo', None)] + build.sub_commands | |
404 |
|
404 | |||
405 | class hgbuildmo(build): |
|
405 | class hgbuildmo(build): | |
406 |
|
406 | |||
407 | description = "build translations (.mo files)" |
|
407 | description = "build translations (.mo files)" | |
408 |
|
408 | |||
409 | def run(self): |
|
409 | def run(self): | |
410 | if not find_executable('msgfmt'): |
|
410 | if not find_executable('msgfmt'): | |
411 | self.warn("could not find msgfmt executable, no translations " |
|
411 | self.warn("could not find msgfmt executable, no translations " | |
412 | "will be built") |
|
412 | "will be built") | |
413 | return |
|
413 | return | |
414 |
|
414 | |||
415 | podir = 'i18n' |
|
415 | podir = 'i18n' | |
416 | if not os.path.isdir(podir): |
|
416 | if not os.path.isdir(podir): | |
417 | self.warn("could not find %s/ directory" % podir) |
|
417 | self.warn("could not find %s/ directory" % podir) | |
418 | return |
|
418 | return | |
419 |
|
419 | |||
420 | join = os.path.join |
|
420 | join = os.path.join | |
421 | for po in os.listdir(podir): |
|
421 | for po in os.listdir(podir): | |
422 | if not po.endswith('.po'): |
|
422 | if not po.endswith('.po'): | |
423 | continue |
|
423 | continue | |
424 | pofile = join(podir, po) |
|
424 | pofile = join(podir, po) | |
425 | modir = join('locale', po[:-3], 'LC_MESSAGES') |
|
425 | modir = join('locale', po[:-3], 'LC_MESSAGES') | |
426 | mofile = join(modir, 'hg.mo') |
|
426 | mofile = join(modir, 'hg.mo') | |
427 | mobuildfile = join('mercurial', mofile) |
|
427 | mobuildfile = join('mercurial', mofile) | |
428 | cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile] |
|
428 | cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile] | |
429 | if sys.platform != 'sunos5': |
|
429 | if sys.platform != 'sunos5': | |
430 | # msgfmt on Solaris does not know about -c |
|
430 | # msgfmt on Solaris does not know about -c | |
431 | cmd.append('-c') |
|
431 | cmd.append('-c') | |
432 | self.mkpath(join('mercurial', modir)) |
|
432 | self.mkpath(join('mercurial', modir)) | |
433 | self.make_file([pofile], mobuildfile, spawn, (cmd,)) |
|
433 | self.make_file([pofile], mobuildfile, spawn, (cmd,)) | |
434 |
|
434 | |||
435 |
|
435 | |||
436 | class hgdist(Distribution): |
|
436 | class hgdist(Distribution): | |
437 | pure = False |
|
437 | pure = False | |
438 | cffi = ispypy |
|
438 | cffi = ispypy | |
439 |
|
439 | |||
440 | global_options = Distribution.global_options + \ |
|
440 | global_options = Distribution.global_options + \ | |
441 | [('pure', None, "use pure (slow) Python " |
|
441 | [('pure', None, "use pure (slow) Python " | |
442 | "code instead of C extensions"), |
|
442 | "code instead of C extensions"), | |
443 | ] |
|
443 | ] | |
444 |
|
444 | |||
445 | def has_ext_modules(self): |
|
445 | def has_ext_modules(self): | |
446 | # self.ext_modules is emptied in hgbuildpy.finalize_options which is |
|
446 | # self.ext_modules is emptied in hgbuildpy.finalize_options which is | |
447 | # too late for some cases |
|
447 | # too late for some cases | |
448 | return not self.pure and Distribution.has_ext_modules(self) |
|
448 | return not self.pure and Distribution.has_ext_modules(self) | |
449 |
|
449 | |||
450 | # This is ugly as a one-liner. So use a variable. |
|
450 | # This is ugly as a one-liner. So use a variable. | |
451 | buildextnegops = dict(getattr(build_ext, 'negative_options', {})) |
|
451 | buildextnegops = dict(getattr(build_ext, 'negative_options', {})) | |
452 | buildextnegops['no-zstd'] = 'zstd' |
|
452 | buildextnegops['no-zstd'] = 'zstd' | |
453 |
|
453 | |||
454 | class hgbuildext(build_ext): |
|
454 | class hgbuildext(build_ext): | |
455 | user_options = build_ext.user_options + [ |
|
455 | user_options = build_ext.user_options + [ | |
456 | ('zstd', None, 'compile zstd bindings [default]'), |
|
456 | ('zstd', None, 'compile zstd bindings [default]'), | |
457 | ('no-zstd', None, 'do not compile zstd bindings'), |
|
457 | ('no-zstd', None, 'do not compile zstd bindings'), | |
458 | ] |
|
458 | ] | |
459 |
|
459 | |||
460 | boolean_options = build_ext.boolean_options + ['zstd'] |
|
460 | boolean_options = build_ext.boolean_options + ['zstd'] | |
461 | negative_opt = buildextnegops |
|
461 | negative_opt = buildextnegops | |
462 |
|
462 | |||
463 | def initialize_options(self): |
|
463 | def initialize_options(self): | |
464 | self.zstd = True |
|
464 | self.zstd = True | |
465 | return build_ext.initialize_options(self) |
|
465 | return build_ext.initialize_options(self) | |
466 |
|
466 | |||
467 | def build_extensions(self): |
|
467 | def build_extensions(self): | |
468 | ruststandalones = [e for e in self.extensions |
|
468 | ruststandalones = [e for e in self.extensions | |
469 | if isinstance(e, RustStandaloneExtension)] |
|
469 | if isinstance(e, RustStandaloneExtension)] | |
470 | self.extensions = [e for e in self.extensions |
|
470 | self.extensions = [e for e in self.extensions | |
471 | if e not in ruststandalones] |
|
471 | if e not in ruststandalones] | |
472 | # Filter out zstd if disabled via argument. |
|
472 | # Filter out zstd if disabled via argument. | |
473 | if not self.zstd: |
|
473 | if not self.zstd: | |
474 | self.extensions = [e for e in self.extensions |
|
474 | self.extensions = [e for e in self.extensions | |
475 | if e.name != 'mercurial.zstd'] |
|
475 | if e.name != 'mercurial.zstd'] | |
476 |
|
476 | |||
477 | for rustext in ruststandalones: |
|
477 | for rustext in ruststandalones: | |
478 | rustext.build('' if self.inplace else self.build_lib) |
|
478 | rustext.build('' if self.inplace else self.build_lib) | |
479 |
|
479 | |||
480 | return build_ext.build_extensions(self) |
|
480 | return build_ext.build_extensions(self) | |
481 |
|
481 | |||
482 | def build_extension(self, ext): |
|
482 | def build_extension(self, ext): | |
483 | if isinstance(ext, RustExtension): |
|
483 | if isinstance(ext, RustExtension): | |
484 | ext.rustbuild() |
|
484 | ext.rustbuild() | |
485 | try: |
|
485 | try: | |
486 | build_ext.build_extension(self, ext) |
|
486 | build_ext.build_extension(self, ext) | |
487 | except CCompilerError: |
|
487 | except CCompilerError: | |
488 | if not getattr(ext, 'optional', False): |
|
488 | if not getattr(ext, 'optional', False): | |
489 | raise |
|
489 | raise | |
490 | log.warn("Failed to build optional extension '%s' (skipping)", |
|
490 | log.warn("Failed to build optional extension '%s' (skipping)", | |
491 | ext.name) |
|
491 | ext.name) | |
492 |
|
492 | |||
493 | class hgbuildscripts(build_scripts): |
|
493 | class hgbuildscripts(build_scripts): | |
494 | def run(self): |
|
494 | def run(self): | |
495 | if os.name != 'nt' or self.distribution.pure: |
|
495 | if os.name != 'nt' or self.distribution.pure: | |
496 | return build_scripts.run(self) |
|
496 | return build_scripts.run(self) | |
497 |
|
497 | |||
498 | exebuilt = False |
|
498 | exebuilt = False | |
499 | try: |
|
499 | try: | |
500 | self.run_command('build_hgexe') |
|
500 | self.run_command('build_hgexe') | |
501 | exebuilt = True |
|
501 | exebuilt = True | |
502 | except (DistutilsError, CCompilerError): |
|
502 | except (DistutilsError, CCompilerError): | |
503 | log.warn('failed to build optional hg.exe') |
|
503 | log.warn('failed to build optional hg.exe') | |
504 |
|
504 | |||
505 | if exebuilt: |
|
505 | if exebuilt: | |
506 | # Copying hg.exe to the scripts build directory ensures it is |
|
506 | # Copying hg.exe to the scripts build directory ensures it is | |
507 | # installed by the install_scripts command. |
|
507 | # installed by the install_scripts command. | |
508 | hgexecommand = self.get_finalized_command('build_hgexe') |
|
508 | hgexecommand = self.get_finalized_command('build_hgexe') | |
509 | dest = os.path.join(self.build_dir, 'hg.exe') |
|
509 | dest = os.path.join(self.build_dir, 'hg.exe') | |
510 | self.mkpath(self.build_dir) |
|
510 | self.mkpath(self.build_dir) | |
511 | self.copy_file(hgexecommand.hgexepath, dest) |
|
511 | self.copy_file(hgexecommand.hgexepath, dest) | |
512 |
|
512 | |||
513 | # Remove hg.bat because it is redundant with hg.exe. |
|
513 | # Remove hg.bat because it is redundant with hg.exe. | |
514 | self.scripts.remove('contrib/win32/hg.bat') |
|
514 | self.scripts.remove('contrib/win32/hg.bat') | |
515 |
|
515 | |||
516 | return build_scripts.run(self) |
|
516 | return build_scripts.run(self) | |
517 |
|
517 | |||
518 | class hgbuildpy(build_py): |
|
518 | class hgbuildpy(build_py): | |
519 | def finalize_options(self): |
|
519 | def finalize_options(self): | |
520 | build_py.finalize_options(self) |
|
520 | build_py.finalize_options(self) | |
521 |
|
521 | |||
522 | if self.distribution.pure: |
|
522 | if self.distribution.pure: | |
523 | self.distribution.ext_modules = [] |
|
523 | self.distribution.ext_modules = [] | |
524 | elif self.distribution.cffi: |
|
524 | elif self.distribution.cffi: | |
525 | from mercurial.cffi import ( |
|
525 | from mercurial.cffi import ( | |
526 | bdiffbuild, |
|
526 | bdiffbuild, | |
527 | mpatchbuild, |
|
527 | mpatchbuild, | |
528 | ) |
|
528 | ) | |
529 | exts = [mpatchbuild.ffi.distutils_extension(), |
|
529 | exts = [mpatchbuild.ffi.distutils_extension(), | |
530 | bdiffbuild.ffi.distutils_extension()] |
|
530 | bdiffbuild.ffi.distutils_extension()] | |
531 | # cffi modules go here |
|
531 | # cffi modules go here | |
532 | if sys.platform == 'darwin': |
|
532 | if sys.platform == 'darwin': | |
533 | from mercurial.cffi import osutilbuild |
|
533 | from mercurial.cffi import osutilbuild | |
534 | exts.append(osutilbuild.ffi.distutils_extension()) |
|
534 | exts.append(osutilbuild.ffi.distutils_extension()) | |
535 | self.distribution.ext_modules = exts |
|
535 | self.distribution.ext_modules = exts | |
536 | else: |
|
536 | else: | |
537 | h = os.path.join(get_python_inc(), 'Python.h') |
|
537 | h = os.path.join(get_python_inc(), 'Python.h') | |
538 | if not os.path.exists(h): |
|
538 | if not os.path.exists(h): | |
539 | raise SystemExit('Python headers are required to build ' |
|
539 | raise SystemExit('Python headers are required to build ' | |
540 | 'Mercurial but weren\'t found in %s' % h) |
|
540 | 'Mercurial but weren\'t found in %s' % h) | |
541 |
|
541 | |||
542 | def run(self): |
|
542 | def run(self): | |
543 | basepath = os.path.join(self.build_lib, 'mercurial') |
|
543 | basepath = os.path.join(self.build_lib, 'mercurial') | |
544 | self.mkpath(basepath) |
|
544 | self.mkpath(basepath) | |
545 |
|
545 | |||
546 | if self.distribution.pure: |
|
546 | if self.distribution.pure: | |
547 | modulepolicy = 'py' |
|
547 | modulepolicy = 'py' | |
548 | elif self.build_lib == '.': |
|
548 | elif self.build_lib == '.': | |
549 | # in-place build should run without rebuilding C extensions |
|
549 | # in-place build should run without rebuilding C extensions | |
550 | modulepolicy = 'allow' |
|
550 | modulepolicy = 'allow' | |
551 | else: |
|
551 | else: | |
552 | modulepolicy = 'c' |
|
552 | modulepolicy = 'c' | |
553 |
|
553 | |||
554 | content = b''.join([ |
|
554 | content = b''.join([ | |
555 | b'# this file is autogenerated by setup.py\n', |
|
555 | b'# this file is autogenerated by setup.py\n', | |
556 | b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'), |
|
556 | b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'), | |
557 | ]) |
|
557 | ]) | |
558 | write_if_changed(os.path.join(basepath, '__modulepolicy__.py'), |
|
558 | write_if_changed(os.path.join(basepath, '__modulepolicy__.py'), | |
559 | content) |
|
559 | content) | |
560 |
|
560 | |||
561 | build_py.run(self) |
|
561 | build_py.run(self) | |
562 |
|
562 | |||
563 | class buildhgextindex(Command): |
|
563 | class buildhgextindex(Command): | |
564 | description = 'generate prebuilt index of hgext (for frozen package)' |
|
564 | description = 'generate prebuilt index of hgext (for frozen package)' | |
565 | user_options = [] |
|
565 | user_options = [] | |
566 | _indexfilename = 'hgext/__index__.py' |
|
566 | _indexfilename = 'hgext/__index__.py' | |
567 |
|
567 | |||
568 | def initialize_options(self): |
|
568 | def initialize_options(self): | |
569 | pass |
|
569 | pass | |
570 |
|
570 | |||
571 | def finalize_options(self): |
|
571 | def finalize_options(self): | |
572 | pass |
|
572 | pass | |
573 |
|
573 | |||
574 | def run(self): |
|
574 | def run(self): | |
575 | if os.path.exists(self._indexfilename): |
|
575 | if os.path.exists(self._indexfilename): | |
576 | with open(self._indexfilename, 'w') as f: |
|
576 | with open(self._indexfilename, 'w') as f: | |
577 | f.write('# empty\n') |
|
577 | f.write('# empty\n') | |
578 |
|
578 | |||
579 | # here no extension enabled, disabled() lists up everything |
|
579 | # here no extension enabled, disabled() lists up everything | |
580 | code = ('import pprint; from mercurial import extensions; ' |
|
580 | code = ('import pprint; from mercurial import extensions; ' | |
581 | 'pprint.pprint(extensions.disabled())') |
|
581 | 'pprint.pprint(extensions.disabled())') | |
582 | returncode, out, err = runcmd([sys.executable, '-c', code], |
|
582 | returncode, out, err = runcmd([sys.executable, '-c', code], | |
583 | localhgenv()) |
|
583 | localhgenv()) | |
584 | if err or returncode != 0: |
|
584 | if err or returncode != 0: | |
585 | raise DistutilsExecError(err) |
|
585 | raise DistutilsExecError(err) | |
586 |
|
586 | |||
587 | with open(self._indexfilename, 'w') as f: |
|
587 | with open(self._indexfilename, 'w') as f: | |
588 | f.write('# this file is autogenerated by setup.py\n') |
|
588 | f.write('# this file is autogenerated by setup.py\n') | |
589 | f.write('docs = ') |
|
589 | f.write('docs = ') | |
590 | f.write(out) |
|
590 | f.write(out) | |
591 |
|
591 | |||
592 | class buildhgexe(build_ext): |
|
592 | class buildhgexe(build_ext): | |
593 | description = 'compile hg.exe from mercurial/exewrapper.c' |
|
593 | description = 'compile hg.exe from mercurial/exewrapper.c' | |
594 | user_options = build_ext.user_options + [ |
|
594 | user_options = build_ext.user_options + [ | |
595 | ('long-paths-support', None, 'enable support for long paths on ' |
|
595 | ('long-paths-support', None, 'enable support for long paths on ' | |
596 | 'Windows (off by default and ' |
|
596 | 'Windows (off by default and ' | |
597 | 'experimental)'), |
|
597 | 'experimental)'), | |
598 | ] |
|
598 | ] | |
599 |
|
599 | |||
600 | LONG_PATHS_MANIFEST = """ |
|
600 | LONG_PATHS_MANIFEST = """ | |
601 | <?xml version="1.0" encoding="UTF-8" standalone="yes"?> |
|
601 | <?xml version="1.0" encoding="UTF-8" standalone="yes"?> | |
602 | <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> |
|
602 | <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> | |
603 | <application> |
|
603 | <application> | |
604 | <windowsSettings |
|
604 | <windowsSettings | |
605 | xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings"> |
|
605 | xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings"> | |
606 | <ws2:longPathAware>true</ws2:longPathAware> |
|
606 | <ws2:longPathAware>true</ws2:longPathAware> | |
607 | </windowsSettings> |
|
607 | </windowsSettings> | |
608 | </application> |
|
608 | </application> | |
609 | </assembly>""" |
|
609 | </assembly>""" | |
610 |
|
610 | |||
611 | def initialize_options(self): |
|
611 | def initialize_options(self): | |
612 | build_ext.initialize_options(self) |
|
612 | build_ext.initialize_options(self) | |
613 | self.long_paths_support = False |
|
613 | self.long_paths_support = False | |
614 |
|
614 | |||
615 | def build_extensions(self): |
|
615 | def build_extensions(self): | |
616 | if os.name != 'nt': |
|
616 | if os.name != 'nt': | |
617 | return |
|
617 | return | |
618 | if isinstance(self.compiler, HackedMingw32CCompiler): |
|
618 | if isinstance(self.compiler, HackedMingw32CCompiler): | |
619 | self.compiler.compiler_so = self.compiler.compiler # no -mdll |
|
619 | self.compiler.compiler_so = self.compiler.compiler # no -mdll | |
620 | self.compiler.dll_libraries = [] # no -lmsrvc90 |
|
620 | self.compiler.dll_libraries = [] # no -lmsrvc90 | |
621 |
|
621 | |||
622 | # Different Python installs can have different Python library |
|
622 | # Different Python installs can have different Python library | |
623 | # names. e.g. the official CPython distribution uses pythonXY.dll |
|
623 | # names. e.g. the official CPython distribution uses pythonXY.dll | |
624 | # and MinGW uses libpythonX.Y.dll. |
|
624 | # and MinGW uses libpythonX.Y.dll. | |
625 | _kernel32 = ctypes.windll.kernel32 |
|
625 | _kernel32 = ctypes.windll.kernel32 | |
626 | _kernel32.GetModuleFileNameA.argtypes = [ctypes.c_void_p, |
|
626 | _kernel32.GetModuleFileNameA.argtypes = [ctypes.c_void_p, | |
627 | ctypes.c_void_p, |
|
627 | ctypes.c_void_p, | |
628 | ctypes.c_ulong] |
|
628 | ctypes.c_ulong] | |
629 | _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong |
|
629 | _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong | |
630 | size = 1000 |
|
630 | size = 1000 | |
631 | buf = ctypes.create_string_buffer(size + 1) |
|
631 | buf = ctypes.create_string_buffer(size + 1) | |
632 | filelen = _kernel32.GetModuleFileNameA(sys.dllhandle, ctypes.byref(buf), |
|
632 | filelen = _kernel32.GetModuleFileNameA(sys.dllhandle, ctypes.byref(buf), | |
633 | size) |
|
633 | size) | |
634 |
|
634 | |||
635 | if filelen > 0 and filelen != size: |
|
635 | if filelen > 0 and filelen != size: | |
636 | dllbasename = os.path.basename(buf.value) |
|
636 | dllbasename = os.path.basename(buf.value) | |
637 | if not dllbasename.lower().endswith(b'.dll'): |
|
637 | if not dllbasename.lower().endswith(b'.dll'): | |
638 | raise SystemExit('Python DLL does not end with .dll: %s' % |
|
638 | raise SystemExit('Python DLL does not end with .dll: %s' % | |
639 | dllbasename) |
|
639 | dllbasename) | |
640 | pythonlib = dllbasename[:-4] |
|
640 | pythonlib = dllbasename[:-4] | |
641 | else: |
|
641 | else: | |
642 | log.warn('could not determine Python DLL filename; ' |
|
642 | log.warn('could not determine Python DLL filename; ' | |
643 | 'assuming pythonXY') |
|
643 | 'assuming pythonXY') | |
644 |
|
644 | |||
645 | hv = sys.hexversion |
|
645 | hv = sys.hexversion | |
646 | pythonlib = 'python%d%d' % (hv >> 24, (hv >> 16) & 0xff) |
|
646 | pythonlib = 'python%d%d' % (hv >> 24, (hv >> 16) & 0xff) | |
647 |
|
647 | |||
648 | log.info('using %s as Python library name' % pythonlib) |
|
648 | log.info('using %s as Python library name' % pythonlib) | |
649 | with open('mercurial/hgpythonlib.h', 'wb') as f: |
|
649 | with open('mercurial/hgpythonlib.h', 'wb') as f: | |
650 | f.write(b'/* this file is autogenerated by setup.py */\n') |
|
650 | f.write(b'/* this file is autogenerated by setup.py */\n') | |
651 | f.write(b'#define HGPYTHONLIB "%s"\n' % pythonlib) |
|
651 | f.write(b'#define HGPYTHONLIB "%s"\n' % pythonlib) | |
652 |
|
652 | |||
653 | macros = None |
|
653 | macros = None | |
654 | if sys.version_info[0] >= 3: |
|
654 | if sys.version_info[0] >= 3: | |
655 | macros = [('_UNICODE', None), ('UNICODE', None)] |
|
655 | macros = [('_UNICODE', None), ('UNICODE', None)] | |
656 |
|
656 | |||
657 | objects = self.compiler.compile(['mercurial/exewrapper.c'], |
|
657 | objects = self.compiler.compile(['mercurial/exewrapper.c'], | |
658 | output_dir=self.build_temp, |
|
658 | output_dir=self.build_temp, | |
659 | macros=macros) |
|
659 | macros=macros) | |
660 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) |
|
660 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) | |
661 | self.hgtarget = os.path.join(dir, 'hg') |
|
661 | self.hgtarget = os.path.join(dir, 'hg') | |
662 | self.compiler.link_executable(objects, self.hgtarget, |
|
662 | self.compiler.link_executable(objects, self.hgtarget, | |
663 | libraries=[], |
|
663 | libraries=[], | |
664 | output_dir=self.build_temp) |
|
664 | output_dir=self.build_temp) | |
665 | if self.long_paths_support: |
|
665 | if self.long_paths_support: | |
666 | self.addlongpathsmanifest() |
|
666 | self.addlongpathsmanifest() | |
667 |
|
667 | |||
668 | def addlongpathsmanifest(self): |
|
668 | def addlongpathsmanifest(self): | |
669 | """Add manifest pieces so that hg.exe understands long paths |
|
669 | r"""Add manifest pieces so that hg.exe understands long paths | |
670 |
|
670 | |||
671 | This is an EXPERIMENTAL feature, use with care. |
|
671 | This is an EXPERIMENTAL feature, use with care. | |
672 | To enable long paths support, one needs to do two things: |
|
672 | To enable long paths support, one needs to do two things: | |
673 | - build Mercurial with --long-paths-support option |
|
673 | - build Mercurial with --long-paths-support option | |
674 | - change HKLM\SYSTEM\CurrentControlSet\Control\FileSystem\ |
|
674 | - change HKLM\SYSTEM\CurrentControlSet\Control\FileSystem\ | |
675 | LongPathsEnabled to have value 1. |
|
675 | LongPathsEnabled to have value 1. | |
676 |
|
676 | |||
677 | Please ignore 'warning 81010002: Unrecognized Element "longPathAware"'; |
|
677 | Please ignore 'warning 81010002: Unrecognized Element "longPathAware"'; | |
678 | it happens because Mercurial uses mt.exe circa 2008, which is not |
|
678 | it happens because Mercurial uses mt.exe circa 2008, which is not | |
679 | yet aware of long paths support in the manifest (I think so at least). |
|
679 | yet aware of long paths support in the manifest (I think so at least). | |
680 | This does not stop mt.exe from embedding/merging the XML properly. |
|
680 | This does not stop mt.exe from embedding/merging the XML properly. | |
681 |
|
681 | |||
682 | Why resource #1 should be used for .exe manifests? I don't know and |
|
682 | Why resource #1 should be used for .exe manifests? I don't know and | |
683 | wasn't able to find an explanation for mortals. But it seems to work. |
|
683 | wasn't able to find an explanation for mortals. But it seems to work. | |
684 | """ |
|
684 | """ | |
685 | exefname = self.compiler.executable_filename(self.hgtarget) |
|
685 | exefname = self.compiler.executable_filename(self.hgtarget) | |
686 | fdauto, manfname = tempfile.mkstemp(suffix='.hg.exe.manifest') |
|
686 | fdauto, manfname = tempfile.mkstemp(suffix='.hg.exe.manifest') | |
687 | os.close(fdauto) |
|
687 | os.close(fdauto) | |
688 | with open(manfname, 'w') as f: |
|
688 | with open(manfname, 'w') as f: | |
689 | f.write(self.LONG_PATHS_MANIFEST) |
|
689 | f.write(self.LONG_PATHS_MANIFEST) | |
690 | log.info("long paths manifest is written to '%s'" % manfname) |
|
690 | log.info("long paths manifest is written to '%s'" % manfname) | |
691 | inputresource = '-inputresource:%s;#1' % exefname |
|
691 | inputresource = '-inputresource:%s;#1' % exefname | |
692 | outputresource = '-outputresource:%s;#1' % exefname |
|
692 | outputresource = '-outputresource:%s;#1' % exefname | |
693 | log.info("running mt.exe to update hg.exe's manifest in-place") |
|
693 | log.info("running mt.exe to update hg.exe's manifest in-place") | |
694 | # supplying both -manifest and -inputresource to mt.exe makes |
|
694 | # supplying both -manifest and -inputresource to mt.exe makes | |
695 | # it merge the embedded and supplied manifests in the -outputresource |
|
695 | # it merge the embedded and supplied manifests in the -outputresource | |
696 | self.spawn(['mt.exe', '-nologo', '-manifest', manfname, |
|
696 | self.spawn(['mt.exe', '-nologo', '-manifest', manfname, | |
697 | inputresource, outputresource]) |
|
697 | inputresource, outputresource]) | |
698 | log.info("done updating hg.exe's manifest") |
|
698 | log.info("done updating hg.exe's manifest") | |
699 | os.remove(manfname) |
|
699 | os.remove(manfname) | |
700 |
|
700 | |||
701 | @property |
|
701 | @property | |
702 | def hgexepath(self): |
|
702 | def hgexepath(self): | |
703 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) |
|
703 | dir = os.path.dirname(self.get_ext_fullpath('dummy')) | |
704 | return os.path.join(self.build_temp, dir, 'hg.exe') |
|
704 | return os.path.join(self.build_temp, dir, 'hg.exe') | |
705 |
|
705 | |||
706 | class hginstall(install): |
|
706 | class hginstall(install): | |
707 |
|
707 | |||
708 | user_options = install.user_options + [ |
|
708 | user_options = install.user_options + [ | |
709 | ('old-and-unmanageable', None, |
|
709 | ('old-and-unmanageable', None, | |
710 | 'noop, present for eggless setuptools compat'), |
|
710 | 'noop, present for eggless setuptools compat'), | |
711 | ('single-version-externally-managed', None, |
|
711 | ('single-version-externally-managed', None, | |
712 | 'noop, present for eggless setuptools compat'), |
|
712 | 'noop, present for eggless setuptools compat'), | |
713 | ] |
|
713 | ] | |
714 |
|
714 | |||
715 | # Also helps setuptools not be sad while we refuse to create eggs. |
|
715 | # Also helps setuptools not be sad while we refuse to create eggs. | |
716 | single_version_externally_managed = True |
|
716 | single_version_externally_managed = True | |
717 |
|
717 | |||
718 | def get_sub_commands(self): |
|
718 | def get_sub_commands(self): | |
719 | # Screen out egg related commands to prevent egg generation. But allow |
|
719 | # Screen out egg related commands to prevent egg generation. But allow | |
720 | # mercurial.egg-info generation, since that is part of modern |
|
720 | # mercurial.egg-info generation, since that is part of modern | |
721 | # packaging. |
|
721 | # packaging. | |
722 | excl = set(['bdist_egg']) |
|
722 | excl = set(['bdist_egg']) | |
723 | return filter(lambda x: x not in excl, install.get_sub_commands(self)) |
|
723 | return filter(lambda x: x not in excl, install.get_sub_commands(self)) | |
724 |
|
724 | |||
725 | class hginstalllib(install_lib): |
|
725 | class hginstalllib(install_lib): | |
726 | ''' |
|
726 | ''' | |
727 | This is a specialization of install_lib that replaces the copy_file used |
|
727 | This is a specialization of install_lib that replaces the copy_file used | |
728 | there so that it supports setting the mode of files after copying them, |
|
728 | there so that it supports setting the mode of files after copying them, | |
729 | instead of just preserving the mode that the files originally had. If your |
|
729 | instead of just preserving the mode that the files originally had. If your | |
730 | system has a umask of something like 027, preserving the permissions when |
|
730 | system has a umask of something like 027, preserving the permissions when | |
731 | copying will lead to a broken install. |
|
731 | copying will lead to a broken install. | |
732 |
|
732 | |||
733 | Note that just passing keep_permissions=False to copy_file would be |
|
733 | Note that just passing keep_permissions=False to copy_file would be | |
734 | insufficient, as it might still be applying a umask. |
|
734 | insufficient, as it might still be applying a umask. | |
735 | ''' |
|
735 | ''' | |
736 |
|
736 | |||
737 | def run(self): |
|
737 | def run(self): | |
738 | realcopyfile = file_util.copy_file |
|
738 | realcopyfile = file_util.copy_file | |
739 | def copyfileandsetmode(*args, **kwargs): |
|
739 | def copyfileandsetmode(*args, **kwargs): | |
740 | src, dst = args[0], args[1] |
|
740 | src, dst = args[0], args[1] | |
741 | dst, copied = realcopyfile(*args, **kwargs) |
|
741 | dst, copied = realcopyfile(*args, **kwargs) | |
742 | if copied: |
|
742 | if copied: | |
743 | st = os.stat(src) |
|
743 | st = os.stat(src) | |
744 | # Persist executable bit (apply it to group and other if user |
|
744 | # Persist executable bit (apply it to group and other if user | |
745 | # has it) |
|
745 | # has it) | |
746 | if st[stat.ST_MODE] & stat.S_IXUSR: |
|
746 | if st[stat.ST_MODE] & stat.S_IXUSR: | |
747 | setmode = int('0755', 8) |
|
747 | setmode = int('0755', 8) | |
748 | else: |
|
748 | else: | |
749 | setmode = int('0644', 8) |
|
749 | setmode = int('0644', 8) | |
750 | m = stat.S_IMODE(st[stat.ST_MODE]) |
|
750 | m = stat.S_IMODE(st[stat.ST_MODE]) | |
751 | m = (m & ~int('0777', 8)) | setmode |
|
751 | m = (m & ~int('0777', 8)) | setmode | |
752 | os.chmod(dst, m) |
|
752 | os.chmod(dst, m) | |
753 | file_util.copy_file = copyfileandsetmode |
|
753 | file_util.copy_file = copyfileandsetmode | |
754 | try: |
|
754 | try: | |
755 | install_lib.run(self) |
|
755 | install_lib.run(self) | |
756 | finally: |
|
756 | finally: | |
757 | file_util.copy_file = realcopyfile |
|
757 | file_util.copy_file = realcopyfile | |
758 |
|
758 | |||
759 | class hginstallscripts(install_scripts): |
|
759 | class hginstallscripts(install_scripts): | |
760 | ''' |
|
760 | ''' | |
761 | This is a specialization of install_scripts that replaces the @LIBDIR@ with |
|
761 | This is a specialization of install_scripts that replaces the @LIBDIR@ with | |
762 | the configured directory for modules. If possible, the path is made relative |
|
762 | the configured directory for modules. If possible, the path is made relative | |
763 | to the directory for scripts. |
|
763 | to the directory for scripts. | |
764 | ''' |
|
764 | ''' | |
765 |
|
765 | |||
766 | def initialize_options(self): |
|
766 | def initialize_options(self): | |
767 | install_scripts.initialize_options(self) |
|
767 | install_scripts.initialize_options(self) | |
768 |
|
768 | |||
769 | self.install_lib = None |
|
769 | self.install_lib = None | |
770 |
|
770 | |||
771 | def finalize_options(self): |
|
771 | def finalize_options(self): | |
772 | install_scripts.finalize_options(self) |
|
772 | install_scripts.finalize_options(self) | |
773 | self.set_undefined_options('install', |
|
773 | self.set_undefined_options('install', | |
774 | ('install_lib', 'install_lib')) |
|
774 | ('install_lib', 'install_lib')) | |
775 |
|
775 | |||
776 | def run(self): |
|
776 | def run(self): | |
777 | install_scripts.run(self) |
|
777 | install_scripts.run(self) | |
778 |
|
778 | |||
779 | # It only makes sense to replace @LIBDIR@ with the install path if |
|
779 | # It only makes sense to replace @LIBDIR@ with the install path if | |
780 | # the install path is known. For wheels, the logic below calculates |
|
780 | # the install path is known. For wheels, the logic below calculates | |
781 | # the libdir to be "../..". This is because the internal layout of a |
|
781 | # the libdir to be "../..". This is because the internal layout of a | |
782 | # wheel archive looks like: |
|
782 | # wheel archive looks like: | |
783 | # |
|
783 | # | |
784 | # mercurial-3.6.1.data/scripts/hg |
|
784 | # mercurial-3.6.1.data/scripts/hg | |
785 | # mercurial/__init__.py |
|
785 | # mercurial/__init__.py | |
786 | # |
|
786 | # | |
787 | # When installing wheels, the subdirectories of the "<pkg>.data" |
|
787 | # When installing wheels, the subdirectories of the "<pkg>.data" | |
788 | # directory are translated to system local paths and files therein |
|
788 | # directory are translated to system local paths and files therein | |
789 | # are copied in place. The mercurial/* files are installed into the |
|
789 | # are copied in place. The mercurial/* files are installed into the | |
790 | # site-packages directory. However, the site-packages directory |
|
790 | # site-packages directory. However, the site-packages directory | |
791 | # isn't known until wheel install time. This means we have no clue |
|
791 | # isn't known until wheel install time. This means we have no clue | |
792 | # at wheel generation time what the installed site-packages directory |
|
792 | # at wheel generation time what the installed site-packages directory | |
793 | # will be. And, wheels don't appear to provide the ability to register |
|
793 | # will be. And, wheels don't appear to provide the ability to register | |
794 | # custom code to run during wheel installation. This all means that |
|
794 | # custom code to run during wheel installation. This all means that | |
795 | # we can't reliably set the libdir in wheels: the default behavior |
|
795 | # we can't reliably set the libdir in wheels: the default behavior | |
796 | # of looking in sys.path must do. |
|
796 | # of looking in sys.path must do. | |
797 |
|
797 | |||
798 | if (os.path.splitdrive(self.install_dir)[0] != |
|
798 | if (os.path.splitdrive(self.install_dir)[0] != | |
799 | os.path.splitdrive(self.install_lib)[0]): |
|
799 | os.path.splitdrive(self.install_lib)[0]): | |
800 | # can't make relative paths from one drive to another, so use an |
|
800 | # can't make relative paths from one drive to another, so use an | |
801 | # absolute path instead |
|
801 | # absolute path instead | |
802 | libdir = self.install_lib |
|
802 | libdir = self.install_lib | |
803 | else: |
|
803 | else: | |
804 | common = os.path.commonprefix((self.install_dir, self.install_lib)) |
|
804 | common = os.path.commonprefix((self.install_dir, self.install_lib)) | |
805 | rest = self.install_dir[len(common):] |
|
805 | rest = self.install_dir[len(common):] | |
806 | uplevel = len([n for n in os.path.split(rest) if n]) |
|
806 | uplevel = len([n for n in os.path.split(rest) if n]) | |
807 |
|
807 | |||
808 | libdir = uplevel * ('..' + os.sep) + self.install_lib[len(common):] |
|
808 | libdir = uplevel * ('..' + os.sep) + self.install_lib[len(common):] | |
809 |
|
809 | |||
810 | for outfile in self.outfiles: |
|
810 | for outfile in self.outfiles: | |
811 | with open(outfile, 'rb') as fp: |
|
811 | with open(outfile, 'rb') as fp: | |
812 | data = fp.read() |
|
812 | data = fp.read() | |
813 |
|
813 | |||
814 | # skip binary files |
|
814 | # skip binary files | |
815 | if b'\0' in data: |
|
815 | if b'\0' in data: | |
816 | continue |
|
816 | continue | |
817 |
|
817 | |||
818 | # During local installs, the shebang will be rewritten to the final |
|
818 | # During local installs, the shebang will be rewritten to the final | |
819 | # install path. During wheel packaging, the shebang has a special |
|
819 | # install path. During wheel packaging, the shebang has a special | |
820 | # value. |
|
820 | # value. | |
821 | if data.startswith(b'#!python'): |
|
821 | if data.startswith(b'#!python'): | |
822 | log.info('not rewriting @LIBDIR@ in %s because install path ' |
|
822 | log.info('not rewriting @LIBDIR@ in %s because install path ' | |
823 | 'not known' % outfile) |
|
823 | 'not known' % outfile) | |
824 | continue |
|
824 | continue | |
825 |
|
825 | |||
826 | data = data.replace(b'@LIBDIR@', libdir.encode(libdir_escape)) |
|
826 | data = data.replace(b'@LIBDIR@', libdir.encode(libdir_escape)) | |
827 | with open(outfile, 'wb') as fp: |
|
827 | with open(outfile, 'wb') as fp: | |
828 | fp.write(data) |
|
828 | fp.write(data) | |
829 |
|
829 | |||
830 | cmdclass = {'build': hgbuild, |
|
830 | cmdclass = {'build': hgbuild, | |
831 | 'build_mo': hgbuildmo, |
|
831 | 'build_mo': hgbuildmo, | |
832 | 'build_ext': hgbuildext, |
|
832 | 'build_ext': hgbuildext, | |
833 | 'build_py': hgbuildpy, |
|
833 | 'build_py': hgbuildpy, | |
834 | 'build_scripts': hgbuildscripts, |
|
834 | 'build_scripts': hgbuildscripts, | |
835 | 'build_hgextindex': buildhgextindex, |
|
835 | 'build_hgextindex': buildhgextindex, | |
836 | 'install': hginstall, |
|
836 | 'install': hginstall, | |
837 | 'install_lib': hginstalllib, |
|
837 | 'install_lib': hginstalllib, | |
838 | 'install_scripts': hginstallscripts, |
|
838 | 'install_scripts': hginstallscripts, | |
839 | 'build_hgexe': buildhgexe, |
|
839 | 'build_hgexe': buildhgexe, | |
840 | } |
|
840 | } | |
841 |
|
841 | |||
842 | packages = ['mercurial', |
|
842 | packages = ['mercurial', | |
843 | 'mercurial.cext', |
|
843 | 'mercurial.cext', | |
844 | 'mercurial.cffi', |
|
844 | 'mercurial.cffi', | |
845 | 'mercurial.hgweb', |
|
845 | 'mercurial.hgweb', | |
846 | 'mercurial.pure', |
|
846 | 'mercurial.pure', | |
847 | 'mercurial.thirdparty', |
|
847 | 'mercurial.thirdparty', | |
848 | 'mercurial.thirdparty.attr', |
|
848 | 'mercurial.thirdparty.attr', | |
849 | 'mercurial.thirdparty.zope', |
|
849 | 'mercurial.thirdparty.zope', | |
850 | 'mercurial.thirdparty.zope.interface', |
|
850 | 'mercurial.thirdparty.zope.interface', | |
851 | 'mercurial.utils', |
|
851 | 'mercurial.utils', | |
852 | 'mercurial.revlogutils', |
|
852 | 'mercurial.revlogutils', | |
853 | 'mercurial.testing', |
|
853 | 'mercurial.testing', | |
854 | 'hgext', 'hgext.convert', 'hgext.fsmonitor', |
|
854 | 'hgext', 'hgext.convert', 'hgext.fsmonitor', | |
855 | 'hgext.fastannotate', |
|
855 | 'hgext.fastannotate', | |
856 | 'hgext.fsmonitor.pywatchman', |
|
856 | 'hgext.fsmonitor.pywatchman', | |
857 | 'hgext.infinitepush', |
|
857 | 'hgext.infinitepush', | |
858 | 'hgext.highlight', |
|
858 | 'hgext.highlight', | |
859 | 'hgext.largefiles', 'hgext.lfs', 'hgext.narrow', |
|
859 | 'hgext.largefiles', 'hgext.lfs', 'hgext.narrow', | |
860 | 'hgext.remotefilelog', |
|
860 | 'hgext.remotefilelog', | |
861 | 'hgext.zeroconf', 'hgext3rd', |
|
861 | 'hgext.zeroconf', 'hgext3rd', | |
862 | 'hgdemandimport'] |
|
862 | 'hgdemandimport'] | |
863 | if sys.version_info[0] == 2: |
|
863 | if sys.version_info[0] == 2: | |
864 | packages.extend(['mercurial.thirdparty.concurrent', |
|
864 | packages.extend(['mercurial.thirdparty.concurrent', | |
865 | 'mercurial.thirdparty.concurrent.futures']) |
|
865 | 'mercurial.thirdparty.concurrent.futures']) | |
866 |
|
866 | |||
867 | common_depends = ['mercurial/bitmanipulation.h', |
|
867 | common_depends = ['mercurial/bitmanipulation.h', | |
868 | 'mercurial/compat.h', |
|
868 | 'mercurial/compat.h', | |
869 | 'mercurial/cext/util.h'] |
|
869 | 'mercurial/cext/util.h'] | |
870 | common_include_dirs = ['mercurial'] |
|
870 | common_include_dirs = ['mercurial'] | |
871 |
|
871 | |||
872 | osutil_cflags = [] |
|
872 | osutil_cflags = [] | |
873 | osutil_ldflags = [] |
|
873 | osutil_ldflags = [] | |
874 |
|
874 | |||
875 | # platform specific macros |
|
875 | # platform specific macros | |
876 | for plat, func in [('bsd', 'setproctitle')]: |
|
876 | for plat, func in [('bsd', 'setproctitle')]: | |
877 | if re.search(plat, sys.platform) and hasfunction(new_compiler(), func): |
|
877 | if re.search(plat, sys.platform) and hasfunction(new_compiler(), func): | |
878 | osutil_cflags.append('-DHAVE_%s' % func.upper()) |
|
878 | osutil_cflags.append('-DHAVE_%s' % func.upper()) | |
879 |
|
879 | |||
880 | for plat, macro, code in [ |
|
880 | for plat, macro, code in [ | |
881 | ('bsd|darwin', 'BSD_STATFS', ''' |
|
881 | ('bsd|darwin', 'BSD_STATFS', ''' | |
882 | #include <sys/param.h> |
|
882 | #include <sys/param.h> | |
883 | #include <sys/mount.h> |
|
883 | #include <sys/mount.h> | |
884 | int main() { struct statfs s; return sizeof(s.f_fstypename); } |
|
884 | int main() { struct statfs s; return sizeof(s.f_fstypename); } | |
885 | '''), |
|
885 | '''), | |
886 | ('linux', 'LINUX_STATFS', ''' |
|
886 | ('linux', 'LINUX_STATFS', ''' | |
887 | #include <linux/magic.h> |
|
887 | #include <linux/magic.h> | |
888 | #include <sys/vfs.h> |
|
888 | #include <sys/vfs.h> | |
889 | int main() { struct statfs s; return sizeof(s.f_type); } |
|
889 | int main() { struct statfs s; return sizeof(s.f_type); } | |
890 | '''), |
|
890 | '''), | |
891 | ]: |
|
891 | ]: | |
892 | if re.search(plat, sys.platform) and cancompile(new_compiler(), code): |
|
892 | if re.search(plat, sys.platform) and cancompile(new_compiler(), code): | |
893 | osutil_cflags.append('-DHAVE_%s' % macro) |
|
893 | osutil_cflags.append('-DHAVE_%s' % macro) | |
894 |
|
894 | |||
895 | if sys.platform == 'darwin': |
|
895 | if sys.platform == 'darwin': | |
896 | osutil_ldflags += ['-framework', 'ApplicationServices'] |
|
896 | osutil_ldflags += ['-framework', 'ApplicationServices'] | |
897 |
|
897 | |||
898 | xdiff_srcs = [ |
|
898 | xdiff_srcs = [ | |
899 | 'mercurial/thirdparty/xdiff/xdiffi.c', |
|
899 | 'mercurial/thirdparty/xdiff/xdiffi.c', | |
900 | 'mercurial/thirdparty/xdiff/xprepare.c', |
|
900 | 'mercurial/thirdparty/xdiff/xprepare.c', | |
901 | 'mercurial/thirdparty/xdiff/xutils.c', |
|
901 | 'mercurial/thirdparty/xdiff/xutils.c', | |
902 | ] |
|
902 | ] | |
903 |
|
903 | |||
904 | xdiff_headers = [ |
|
904 | xdiff_headers = [ | |
905 | 'mercurial/thirdparty/xdiff/xdiff.h', |
|
905 | 'mercurial/thirdparty/xdiff/xdiff.h', | |
906 | 'mercurial/thirdparty/xdiff/xdiffi.h', |
|
906 | 'mercurial/thirdparty/xdiff/xdiffi.h', | |
907 | 'mercurial/thirdparty/xdiff/xinclude.h', |
|
907 | 'mercurial/thirdparty/xdiff/xinclude.h', | |
908 | 'mercurial/thirdparty/xdiff/xmacros.h', |
|
908 | 'mercurial/thirdparty/xdiff/xmacros.h', | |
909 | 'mercurial/thirdparty/xdiff/xprepare.h', |
|
909 | 'mercurial/thirdparty/xdiff/xprepare.h', | |
910 | 'mercurial/thirdparty/xdiff/xtypes.h', |
|
910 | 'mercurial/thirdparty/xdiff/xtypes.h', | |
911 | 'mercurial/thirdparty/xdiff/xutils.h', |
|
911 | 'mercurial/thirdparty/xdiff/xutils.h', | |
912 | ] |
|
912 | ] | |
913 |
|
913 | |||
914 | class RustCompilationError(CCompilerError): |
|
914 | class RustCompilationError(CCompilerError): | |
915 | """Exception class for Rust compilation errors.""" |
|
915 | """Exception class for Rust compilation errors.""" | |
916 |
|
916 | |||
917 | class RustExtension(Extension): |
|
917 | class RustExtension(Extension): | |
918 | """Base classes for concrete Rust Extension classes. |
|
918 | """Base classes for concrete Rust Extension classes. | |
919 | """ |
|
919 | """ | |
920 |
|
920 | |||
921 | rusttargetdir = os.path.join('rust', 'target', 'release') |
|
921 | rusttargetdir = os.path.join('rust', 'target', 'release') | |
922 |
|
922 | |||
923 | def __init__(self, mpath, sources, rustlibname, subcrate, |
|
923 | def __init__(self, mpath, sources, rustlibname, subcrate, | |
924 | py3_features=None, **kw): |
|
924 | py3_features=None, **kw): | |
925 | Extension.__init__(self, mpath, sources, **kw) |
|
925 | Extension.__init__(self, mpath, sources, **kw) | |
926 | if hgrustext is None: |
|
926 | if hgrustext is None: | |
927 | return |
|
927 | return | |
928 | srcdir = self.rustsrcdir = os.path.join('rust', subcrate) |
|
928 | srcdir = self.rustsrcdir = os.path.join('rust', subcrate) | |
929 | self.py3_features = py3_features |
|
929 | self.py3_features = py3_features | |
930 |
|
930 | |||
931 | # adding Rust source and control files to depends so that the extension |
|
931 | # adding Rust source and control files to depends so that the extension | |
932 | # gets rebuilt if they've changed |
|
932 | # gets rebuilt if they've changed | |
933 | self.depends.append(os.path.join(srcdir, 'Cargo.toml')) |
|
933 | self.depends.append(os.path.join(srcdir, 'Cargo.toml')) | |
934 | cargo_lock = os.path.join(srcdir, 'Cargo.lock') |
|
934 | cargo_lock = os.path.join(srcdir, 'Cargo.lock') | |
935 | if os.path.exists(cargo_lock): |
|
935 | if os.path.exists(cargo_lock): | |
936 | self.depends.append(cargo_lock) |
|
936 | self.depends.append(cargo_lock) | |
937 | for dirpath, subdir, fnames in os.walk(os.path.join(srcdir, 'src')): |
|
937 | for dirpath, subdir, fnames in os.walk(os.path.join(srcdir, 'src')): | |
938 | self.depends.extend(os.path.join(dirpath, fname) |
|
938 | self.depends.extend(os.path.join(dirpath, fname) | |
939 | for fname in fnames |
|
939 | for fname in fnames | |
940 | if os.path.splitext(fname)[1] == '.rs') |
|
940 | if os.path.splitext(fname)[1] == '.rs') | |
941 |
|
941 | |||
942 | def rustbuild(self): |
|
942 | def rustbuild(self): | |
943 | if hgrustext is None: |
|
943 | if hgrustext is None: | |
944 | return |
|
944 | return | |
945 | env = os.environ.copy() |
|
945 | env = os.environ.copy() | |
946 | if 'HGTEST_RESTOREENV' in env: |
|
946 | if 'HGTEST_RESTOREENV' in env: | |
947 | # Mercurial tests change HOME to a temporary directory, |
|
947 | # Mercurial tests change HOME to a temporary directory, | |
948 | # but, if installed with rustup, the Rust toolchain needs |
|
948 | # but, if installed with rustup, the Rust toolchain needs | |
949 | # HOME to be correct (otherwise the 'no default toolchain' |
|
949 | # HOME to be correct (otherwise the 'no default toolchain' | |
950 | # error message is issued and the build fails). |
|
950 | # error message is issued and the build fails). | |
951 | # This happens currently with test-hghave.t, which does |
|
951 | # This happens currently with test-hghave.t, which does | |
952 | # invoke this build. |
|
952 | # invoke this build. | |
953 |
|
953 | |||
954 | # Unix only fix (os.path.expanduser not really reliable if |
|
954 | # Unix only fix (os.path.expanduser not really reliable if | |
955 | # HOME is shadowed like this) |
|
955 | # HOME is shadowed like this) | |
956 | import pwd |
|
956 | import pwd | |
957 | env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir |
|
957 | env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir | |
958 |
|
958 | |||
959 | cargocmd = ['cargo', 'build', '-vv', '--release'] |
|
959 | cargocmd = ['cargo', 'build', '-vv', '--release'] | |
960 | if sys.version_info[0] == 3 and self.py3_features is not None: |
|
960 | if sys.version_info[0] == 3 and self.py3_features is not None: | |
961 | cargocmd.extend(('--features', self.py3_features, |
|
961 | cargocmd.extend(('--features', self.py3_features, | |
962 | '--no-default-features')) |
|
962 | '--no-default-features')) | |
963 | try: |
|
963 | try: | |
964 | subprocess.check_call(cargocmd, env=env, cwd=self.rustsrcdir) |
|
964 | subprocess.check_call(cargocmd, env=env, cwd=self.rustsrcdir) | |
965 | except OSError as exc: |
|
965 | except OSError as exc: | |
966 | if exc.errno == errno.ENOENT: |
|
966 | if exc.errno == errno.ENOENT: | |
967 | raise RustCompilationError("Cargo not found") |
|
967 | raise RustCompilationError("Cargo not found") | |
968 | elif exc.errno == errno.EACCES: |
|
968 | elif exc.errno == errno.EACCES: | |
969 | raise RustCompilationError( |
|
969 | raise RustCompilationError( | |
970 | "Cargo found, but permisssion to execute it is denied") |
|
970 | "Cargo found, but permisssion to execute it is denied") | |
971 | else: |
|
971 | else: | |
972 | raise |
|
972 | raise | |
973 | except subprocess.CalledProcessError: |
|
973 | except subprocess.CalledProcessError: | |
974 | raise RustCompilationError( |
|
974 | raise RustCompilationError( | |
975 | "Cargo failed. Working directory: %r, " |
|
975 | "Cargo failed. Working directory: %r, " | |
976 | "command: %r, environment: %r" % (self.rustsrcdir, cmd, env)) |
|
976 | "command: %r, environment: %r" % (self.rustsrcdir, cmd, env)) | |
977 |
|
977 | |||
978 | class RustEnhancedExtension(RustExtension): |
|
978 | class RustEnhancedExtension(RustExtension): | |
979 | """A C Extension, conditionally enhanced with Rust code. |
|
979 | """A C Extension, conditionally enhanced with Rust code. | |
980 |
|
980 | |||
981 | If the HGRUSTEXT environment variable is set to something else |
|
981 | If the HGRUSTEXT environment variable is set to something else | |
982 | than 'cpython', the Rust sources get compiled and linked within the |
|
982 | than 'cpython', the Rust sources get compiled and linked within the | |
983 | C target shared library object. |
|
983 | C target shared library object. | |
984 | """ |
|
984 | """ | |
985 |
|
985 | |||
986 | def __init__(self, mpath, sources, rustlibname, subcrate, **kw): |
|
986 | def __init__(self, mpath, sources, rustlibname, subcrate, **kw): | |
987 | RustExtension.__init__(self, mpath, sources, rustlibname, subcrate, |
|
987 | RustExtension.__init__(self, mpath, sources, rustlibname, subcrate, | |
988 | **kw) |
|
988 | **kw) | |
989 | if hgrustext != 'direct-ffi': |
|
989 | if hgrustext != 'direct-ffi': | |
990 | return |
|
990 | return | |
991 | self.extra_compile_args.append('-DWITH_RUST') |
|
991 | self.extra_compile_args.append('-DWITH_RUST') | |
992 | self.libraries.append(rustlibname) |
|
992 | self.libraries.append(rustlibname) | |
993 | self.library_dirs.append(self.rusttargetdir) |
|
993 | self.library_dirs.append(self.rusttargetdir) | |
994 |
|
994 | |||
995 | class RustStandaloneExtension(RustExtension): |
|
995 | class RustStandaloneExtension(RustExtension): | |
996 |
|
996 | |||
997 | def __init__(self, pydottedname, rustcrate, dylibname, **kw): |
|
997 | def __init__(self, pydottedname, rustcrate, dylibname, **kw): | |
998 | RustExtension.__init__(self, pydottedname, [], dylibname, rustcrate, |
|
998 | RustExtension.__init__(self, pydottedname, [], dylibname, rustcrate, | |
999 | **kw) |
|
999 | **kw) | |
1000 | self.dylibname = dylibname |
|
1000 | self.dylibname = dylibname | |
1001 |
|
1001 | |||
1002 | def build(self, target_dir): |
|
1002 | def build(self, target_dir): | |
1003 | self.rustbuild() |
|
1003 | self.rustbuild() | |
1004 | target = [target_dir] |
|
1004 | target = [target_dir] | |
1005 | target.extend(self.name.split('.')) |
|
1005 | target.extend(self.name.split('.')) | |
1006 | ext = '.so' # TODO Unix only |
|
1006 | ext = '.so' # TODO Unix only | |
1007 | target[-1] += ext |
|
1007 | target[-1] += ext | |
1008 | shutil.copy2(os.path.join(self.rusttargetdir, self.dylibname + ext), |
|
1008 | shutil.copy2(os.path.join(self.rusttargetdir, self.dylibname + ext), | |
1009 | os.path.join(*target)) |
|
1009 | os.path.join(*target)) | |
1010 |
|
1010 | |||
1011 |
|
1011 | |||
1012 | extmodules = [ |
|
1012 | extmodules = [ | |
1013 | Extension('mercurial.cext.base85', ['mercurial/cext/base85.c'], |
|
1013 | Extension('mercurial.cext.base85', ['mercurial/cext/base85.c'], | |
1014 | include_dirs=common_include_dirs, |
|
1014 | include_dirs=common_include_dirs, | |
1015 | depends=common_depends), |
|
1015 | depends=common_depends), | |
1016 | Extension('mercurial.cext.bdiff', ['mercurial/bdiff.c', |
|
1016 | Extension('mercurial.cext.bdiff', ['mercurial/bdiff.c', | |
1017 | 'mercurial/cext/bdiff.c'] + xdiff_srcs, |
|
1017 | 'mercurial/cext/bdiff.c'] + xdiff_srcs, | |
1018 | include_dirs=common_include_dirs, |
|
1018 | include_dirs=common_include_dirs, | |
1019 | depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers), |
|
1019 | depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers), | |
1020 | Extension('mercurial.cext.mpatch', ['mercurial/mpatch.c', |
|
1020 | Extension('mercurial.cext.mpatch', ['mercurial/mpatch.c', | |
1021 | 'mercurial/cext/mpatch.c'], |
|
1021 | 'mercurial/cext/mpatch.c'], | |
1022 | include_dirs=common_include_dirs, |
|
1022 | include_dirs=common_include_dirs, | |
1023 | depends=common_depends), |
|
1023 | depends=common_depends), | |
1024 | RustEnhancedExtension( |
|
1024 | RustEnhancedExtension( | |
1025 | 'mercurial.cext.parsers', ['mercurial/cext/charencode.c', |
|
1025 | 'mercurial.cext.parsers', ['mercurial/cext/charencode.c', | |
1026 | 'mercurial/cext/dirs.c', |
|
1026 | 'mercurial/cext/dirs.c', | |
1027 | 'mercurial/cext/manifest.c', |
|
1027 | 'mercurial/cext/manifest.c', | |
1028 | 'mercurial/cext/parsers.c', |
|
1028 | 'mercurial/cext/parsers.c', | |
1029 | 'mercurial/cext/pathencode.c', |
|
1029 | 'mercurial/cext/pathencode.c', | |
1030 | 'mercurial/cext/revlog.c'], |
|
1030 | 'mercurial/cext/revlog.c'], | |
1031 | 'hgdirectffi', |
|
1031 | 'hgdirectffi', | |
1032 | 'hg-direct-ffi', |
|
1032 | 'hg-direct-ffi', | |
1033 | include_dirs=common_include_dirs, |
|
1033 | include_dirs=common_include_dirs, | |
1034 | depends=common_depends + ['mercurial/cext/charencode.h', |
|
1034 | depends=common_depends + ['mercurial/cext/charencode.h', | |
1035 | 'mercurial/cext/revlog.h', |
|
1035 | 'mercurial/cext/revlog.h', | |
1036 | 'rust/hg-core/src/ancestors.rs', |
|
1036 | 'rust/hg-core/src/ancestors.rs', | |
1037 | 'rust/hg-core/src/lib.rs']), |
|
1037 | 'rust/hg-core/src/lib.rs']), | |
1038 | Extension('mercurial.cext.osutil', ['mercurial/cext/osutil.c'], |
|
1038 | Extension('mercurial.cext.osutil', ['mercurial/cext/osutil.c'], | |
1039 | include_dirs=common_include_dirs, |
|
1039 | include_dirs=common_include_dirs, | |
1040 | extra_compile_args=osutil_cflags, |
|
1040 | extra_compile_args=osutil_cflags, | |
1041 | extra_link_args=osutil_ldflags, |
|
1041 | extra_link_args=osutil_ldflags, | |
1042 | depends=common_depends), |
|
1042 | depends=common_depends), | |
1043 | Extension( |
|
1043 | Extension( | |
1044 | 'mercurial.thirdparty.zope.interface._zope_interface_coptimizations', [ |
|
1044 | 'mercurial.thirdparty.zope.interface._zope_interface_coptimizations', [ | |
1045 | 'mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c', |
|
1045 | 'mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c', | |
1046 | ]), |
|
1046 | ]), | |
1047 | Extension('hgext.fsmonitor.pywatchman.bser', |
|
1047 | Extension('hgext.fsmonitor.pywatchman.bser', | |
1048 | ['hgext/fsmonitor/pywatchman/bser.c']), |
|
1048 | ['hgext/fsmonitor/pywatchman/bser.c']), | |
1049 | ] |
|
1049 | ] | |
1050 |
|
1050 | |||
1051 | if hgrustext == 'cpython': |
|
1051 | if hgrustext == 'cpython': | |
1052 | extmodules.append( |
|
1052 | extmodules.append( | |
1053 | RustStandaloneExtension('mercurial.rustext', 'hg-cpython', 'librusthg', |
|
1053 | RustStandaloneExtension('mercurial.rustext', 'hg-cpython', 'librusthg', | |
1054 | py3_features='python3') |
|
1054 | py3_features='python3') | |
1055 | ) |
|
1055 | ) | |
1056 |
|
1056 | |||
1057 |
|
1057 | |||
1058 | sys.path.insert(0, 'contrib/python-zstandard') |
|
1058 | sys.path.insert(0, 'contrib/python-zstandard') | |
1059 | import setup_zstd |
|
1059 | import setup_zstd | |
1060 | extmodules.append(setup_zstd.get_c_extension( |
|
1060 | extmodules.append(setup_zstd.get_c_extension( | |
1061 | name='mercurial.zstd', |
|
1061 | name='mercurial.zstd', | |
1062 | root=os.path.abspath(os.path.dirname(__file__)))) |
|
1062 | root=os.path.abspath(os.path.dirname(__file__)))) | |
1063 |
|
1063 | |||
1064 | try: |
|
1064 | try: | |
1065 | from distutils import cygwinccompiler |
|
1065 | from distutils import cygwinccompiler | |
1066 |
|
1066 | |||
1067 | # the -mno-cygwin option has been deprecated for years |
|
1067 | # the -mno-cygwin option has been deprecated for years | |
1068 | mingw32compilerclass = cygwinccompiler.Mingw32CCompiler |
|
1068 | mingw32compilerclass = cygwinccompiler.Mingw32CCompiler | |
1069 |
|
1069 | |||
1070 | class HackedMingw32CCompiler(cygwinccompiler.Mingw32CCompiler): |
|
1070 | class HackedMingw32CCompiler(cygwinccompiler.Mingw32CCompiler): | |
1071 | def __init__(self, *args, **kwargs): |
|
1071 | def __init__(self, *args, **kwargs): | |
1072 | mingw32compilerclass.__init__(self, *args, **kwargs) |
|
1072 | mingw32compilerclass.__init__(self, *args, **kwargs) | |
1073 | for i in 'compiler compiler_so linker_exe linker_so'.split(): |
|
1073 | for i in 'compiler compiler_so linker_exe linker_so'.split(): | |
1074 | try: |
|
1074 | try: | |
1075 | getattr(self, i).remove('-mno-cygwin') |
|
1075 | getattr(self, i).remove('-mno-cygwin') | |
1076 | except ValueError: |
|
1076 | except ValueError: | |
1077 | pass |
|
1077 | pass | |
1078 |
|
1078 | |||
1079 | cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler |
|
1079 | cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler | |
1080 | except ImportError: |
|
1080 | except ImportError: | |
1081 | # the cygwinccompiler package is not available on some Python |
|
1081 | # the cygwinccompiler package is not available on some Python | |
1082 | # distributions like the ones from the optware project for Synology |
|
1082 | # distributions like the ones from the optware project for Synology | |
1083 | # DiskStation boxes |
|
1083 | # DiskStation boxes | |
1084 | class HackedMingw32CCompiler(object): |
|
1084 | class HackedMingw32CCompiler(object): | |
1085 | pass |
|
1085 | pass | |
1086 |
|
1086 | |||
1087 | if os.name == 'nt': |
|
1087 | if os.name == 'nt': | |
1088 | # Allow compiler/linker flags to be added to Visual Studio builds. Passing |
|
1088 | # Allow compiler/linker flags to be added to Visual Studio builds. Passing | |
1089 | # extra_link_args to distutils.extensions.Extension() doesn't have any |
|
1089 | # extra_link_args to distutils.extensions.Extension() doesn't have any | |
1090 | # effect. |
|
1090 | # effect. | |
1091 | from distutils import msvccompiler |
|
1091 | from distutils import msvccompiler | |
1092 |
|
1092 | |||
1093 | msvccompilerclass = msvccompiler.MSVCCompiler |
|
1093 | msvccompilerclass = msvccompiler.MSVCCompiler | |
1094 |
|
1094 | |||
1095 | class HackedMSVCCompiler(msvccompiler.MSVCCompiler): |
|
1095 | class HackedMSVCCompiler(msvccompiler.MSVCCompiler): | |
1096 | def initialize(self): |
|
1096 | def initialize(self): | |
1097 | msvccompilerclass.initialize(self) |
|
1097 | msvccompilerclass.initialize(self) | |
1098 | # "warning LNK4197: export 'func' specified multiple times" |
|
1098 | # "warning LNK4197: export 'func' specified multiple times" | |
1099 | self.ldflags_shared.append('/ignore:4197') |
|
1099 | self.ldflags_shared.append('/ignore:4197') | |
1100 | self.ldflags_shared_debug.append('/ignore:4197') |
|
1100 | self.ldflags_shared_debug.append('/ignore:4197') | |
1101 |
|
1101 | |||
1102 | msvccompiler.MSVCCompiler = HackedMSVCCompiler |
|
1102 | msvccompiler.MSVCCompiler = HackedMSVCCompiler | |
1103 |
|
1103 | |||
1104 | packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo', |
|
1104 | packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo', | |
1105 | 'help/*.txt', |
|
1105 | 'help/*.txt', | |
1106 | 'help/internals/*.txt', |
|
1106 | 'help/internals/*.txt', | |
1107 | 'default.d/*.rc', |
|
1107 | 'default.d/*.rc', | |
1108 | 'dummycert.pem']} |
|
1108 | 'dummycert.pem']} | |
1109 |
|
1109 | |||
1110 | def ordinarypath(p): |
|
1110 | def ordinarypath(p): | |
1111 | return p and p[0] != '.' and p[-1] != '~' |
|
1111 | return p and p[0] != '.' and p[-1] != '~' | |
1112 |
|
1112 | |||
1113 | for root in ('templates',): |
|
1113 | for root in ('templates',): | |
1114 | for curdir, dirs, files in os.walk(os.path.join('mercurial', root)): |
|
1114 | for curdir, dirs, files in os.walk(os.path.join('mercurial', root)): | |
1115 | curdir = curdir.split(os.sep, 1)[1] |
|
1115 | curdir = curdir.split(os.sep, 1)[1] | |
1116 | dirs[:] = filter(ordinarypath, dirs) |
|
1116 | dirs[:] = filter(ordinarypath, dirs) | |
1117 | for f in filter(ordinarypath, files): |
|
1117 | for f in filter(ordinarypath, files): | |
1118 | f = os.path.join(curdir, f) |
|
1118 | f = os.path.join(curdir, f) | |
1119 | packagedata['mercurial'].append(f) |
|
1119 | packagedata['mercurial'].append(f) | |
1120 |
|
1120 | |||
1121 | datafiles = [] |
|
1121 | datafiles = [] | |
1122 |
|
1122 | |||
1123 | # distutils expects version to be str/unicode. Converting it to |
|
1123 | # distutils expects version to be str/unicode. Converting it to | |
1124 | # unicode on Python 2 still works because it won't contain any |
|
1124 | # unicode on Python 2 still works because it won't contain any | |
1125 | # non-ascii bytes and will be implicitly converted back to bytes |
|
1125 | # non-ascii bytes and will be implicitly converted back to bytes | |
1126 | # when operated on. |
|
1126 | # when operated on. | |
1127 | assert isinstance(version, bytes) |
|
1127 | assert isinstance(version, bytes) | |
1128 | setupversion = version.decode('ascii') |
|
1128 | setupversion = version.decode('ascii') | |
1129 |
|
1129 | |||
1130 | extra = {} |
|
1130 | extra = {} | |
1131 |
|
1131 | |||
1132 | if issetuptools: |
|
1132 | if issetuptools: | |
1133 | extra['python_requires'] = supportedpy |
|
1133 | extra['python_requires'] = supportedpy | |
1134 | if py2exeloaded: |
|
1134 | if py2exeloaded: | |
1135 | extra['console'] = [ |
|
1135 | extra['console'] = [ | |
1136 | {'script':'hg', |
|
1136 | {'script':'hg', | |
1137 | 'copyright':'Copyright (C) 2005-2019 Matt Mackall and others', |
|
1137 | 'copyright':'Copyright (C) 2005-2019 Matt Mackall and others', | |
1138 | 'product_version':version}] |
|
1138 | 'product_version':version}] | |
1139 | # sub command of 'build' because 'py2exe' does not handle sub_commands |
|
1139 | # sub command of 'build' because 'py2exe' does not handle sub_commands | |
1140 | build.sub_commands.insert(0, ('build_hgextindex', None)) |
|
1140 | build.sub_commands.insert(0, ('build_hgextindex', None)) | |
1141 | # put dlls in sub directory so that they won't pollute PATH |
|
1141 | # put dlls in sub directory so that they won't pollute PATH | |
1142 | extra['zipfile'] = 'lib/library.zip' |
|
1142 | extra['zipfile'] = 'lib/library.zip' | |
1143 |
|
1143 | |||
1144 | if os.name == 'nt': |
|
1144 | if os.name == 'nt': | |
1145 | # Windows binary file versions for exe/dll files must have the |
|
1145 | # Windows binary file versions for exe/dll files must have the | |
1146 | # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535 |
|
1146 | # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535 | |
1147 | setupversion = setupversion.split(r'+', 1)[0] |
|
1147 | setupversion = setupversion.split(r'+', 1)[0] | |
1148 |
|
1148 | |||
1149 | if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'): |
|
1149 | if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'): | |
1150 | version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines() |
|
1150 | version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines() | |
1151 | if version: |
|
1151 | if version: | |
1152 | version = version[0] |
|
1152 | version = version[0] | |
1153 | if sys.version_info[0] == 3: |
|
1153 | if sys.version_info[0] == 3: | |
1154 | version = version.decode('utf-8') |
|
1154 | version = version.decode('utf-8') | |
1155 | xcode4 = (version.startswith('Xcode') and |
|
1155 | xcode4 = (version.startswith('Xcode') and | |
1156 | StrictVersion(version.split()[1]) >= StrictVersion('4.0')) |
|
1156 | StrictVersion(version.split()[1]) >= StrictVersion('4.0')) | |
1157 | xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None |
|
1157 | xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None | |
1158 | else: |
|
1158 | else: | |
1159 | # xcodebuild returns empty on OS X Lion with XCode 4.3 not |
|
1159 | # xcodebuild returns empty on OS X Lion with XCode 4.3 not | |
1160 | # installed, but instead with only command-line tools. Assume |
|
1160 | # installed, but instead with only command-line tools. Assume | |
1161 | # that only happens on >= Lion, thus no PPC support. |
|
1161 | # that only happens on >= Lion, thus no PPC support. | |
1162 | xcode4 = True |
|
1162 | xcode4 = True | |
1163 | xcode51 = False |
|
1163 | xcode51 = False | |
1164 |
|
1164 | |||
1165 | # XCode 4.0 dropped support for ppc architecture, which is hardcoded in |
|
1165 | # XCode 4.0 dropped support for ppc architecture, which is hardcoded in | |
1166 | # distutils.sysconfig |
|
1166 | # distutils.sysconfig | |
1167 | if xcode4: |
|
1167 | if xcode4: | |
1168 | os.environ['ARCHFLAGS'] = '' |
|
1168 | os.environ['ARCHFLAGS'] = '' | |
1169 |
|
1169 | |||
1170 | # XCode 5.1 changes clang such that it now fails to compile if the |
|
1170 | # XCode 5.1 changes clang such that it now fails to compile if the | |
1171 | # -mno-fused-madd flag is passed, but the version of Python shipped with |
|
1171 | # -mno-fused-madd flag is passed, but the version of Python shipped with | |
1172 | # OS X 10.9 Mavericks includes this flag. This causes problems in all |
|
1172 | # OS X 10.9 Mavericks includes this flag. This causes problems in all | |
1173 | # C extension modules, and a bug has been filed upstream at |
|
1173 | # C extension modules, and a bug has been filed upstream at | |
1174 | # http://bugs.python.org/issue21244. We also need to patch this here |
|
1174 | # http://bugs.python.org/issue21244. We also need to patch this here | |
1175 | # so Mercurial can continue to compile in the meantime. |
|
1175 | # so Mercurial can continue to compile in the meantime. | |
1176 | if xcode51: |
|
1176 | if xcode51: | |
1177 | cflags = get_config_var('CFLAGS') |
|
1177 | cflags = get_config_var('CFLAGS') | |
1178 | if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None: |
|
1178 | if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None: | |
1179 | os.environ['CFLAGS'] = ( |
|
1179 | os.environ['CFLAGS'] = ( | |
1180 | os.environ.get('CFLAGS', '') + ' -Qunused-arguments') |
|
1180 | os.environ.get('CFLAGS', '') + ' -Qunused-arguments') | |
1181 |
|
1181 | |||
1182 | setup(name='mercurial', |
|
1182 | setup(name='mercurial', | |
1183 | version=setupversion, |
|
1183 | version=setupversion, | |
1184 | author='Matt Mackall and many others', |
|
1184 | author='Matt Mackall and many others', | |
1185 | author_email='mercurial@mercurial-scm.org', |
|
1185 | author_email='mercurial@mercurial-scm.org', | |
1186 | url='https://mercurial-scm.org/', |
|
1186 | url='https://mercurial-scm.org/', | |
1187 | download_url='https://mercurial-scm.org/release/', |
|
1187 | download_url='https://mercurial-scm.org/release/', | |
1188 | description=('Fast scalable distributed SCM (revision control, version ' |
|
1188 | description=('Fast scalable distributed SCM (revision control, version ' | |
1189 | 'control) system'), |
|
1189 | 'control) system'), | |
1190 | long_description=('Mercurial is a distributed SCM tool written in Python.' |
|
1190 | long_description=('Mercurial is a distributed SCM tool written in Python.' | |
1191 | ' It is used by a number of large projects that require' |
|
1191 | ' It is used by a number of large projects that require' | |
1192 | ' fast, reliable distributed revision control, such as ' |
|
1192 | ' fast, reliable distributed revision control, such as ' | |
1193 | 'Mozilla.'), |
|
1193 | 'Mozilla.'), | |
1194 | license='GNU GPLv2 or any later version', |
|
1194 | license='GNU GPLv2 or any later version', | |
1195 | classifiers=[ |
|
1195 | classifiers=[ | |
1196 | 'Development Status :: 6 - Mature', |
|
1196 | 'Development Status :: 6 - Mature', | |
1197 | 'Environment :: Console', |
|
1197 | 'Environment :: Console', | |
1198 | 'Intended Audience :: Developers', |
|
1198 | 'Intended Audience :: Developers', | |
1199 | 'Intended Audience :: System Administrators', |
|
1199 | 'Intended Audience :: System Administrators', | |
1200 | 'License :: OSI Approved :: GNU General Public License (GPL)', |
|
1200 | 'License :: OSI Approved :: GNU General Public License (GPL)', | |
1201 | 'Natural Language :: Danish', |
|
1201 | 'Natural Language :: Danish', | |
1202 | 'Natural Language :: English', |
|
1202 | 'Natural Language :: English', | |
1203 | 'Natural Language :: German', |
|
1203 | 'Natural Language :: German', | |
1204 | 'Natural Language :: Italian', |
|
1204 | 'Natural Language :: Italian', | |
1205 | 'Natural Language :: Japanese', |
|
1205 | 'Natural Language :: Japanese', | |
1206 | 'Natural Language :: Portuguese (Brazilian)', |
|
1206 | 'Natural Language :: Portuguese (Brazilian)', | |
1207 | 'Operating System :: Microsoft :: Windows', |
|
1207 | 'Operating System :: Microsoft :: Windows', | |
1208 | 'Operating System :: OS Independent', |
|
1208 | 'Operating System :: OS Independent', | |
1209 | 'Operating System :: POSIX', |
|
1209 | 'Operating System :: POSIX', | |
1210 | 'Programming Language :: C', |
|
1210 | 'Programming Language :: C', | |
1211 | 'Programming Language :: Python', |
|
1211 | 'Programming Language :: Python', | |
1212 | 'Topic :: Software Development :: Version Control', |
|
1212 | 'Topic :: Software Development :: Version Control', | |
1213 | ], |
|
1213 | ], | |
1214 | scripts=scripts, |
|
1214 | scripts=scripts, | |
1215 | packages=packages, |
|
1215 | packages=packages, | |
1216 | ext_modules=extmodules, |
|
1216 | ext_modules=extmodules, | |
1217 | data_files=datafiles, |
|
1217 | data_files=datafiles, | |
1218 | package_data=packagedata, |
|
1218 | package_data=packagedata, | |
1219 | cmdclass=cmdclass, |
|
1219 | cmdclass=cmdclass, | |
1220 | distclass=hgdist, |
|
1220 | distclass=hgdist, | |
1221 | options={ |
|
1221 | options={ | |
1222 | 'py2exe': { |
|
1222 | 'py2exe': { | |
1223 | 'packages': [ |
|
1223 | 'packages': [ | |
1224 | 'hgdemandimport', |
|
1224 | 'hgdemandimport', | |
1225 | 'hgext', |
|
1225 | 'hgext', | |
1226 | 'email', |
|
1226 | 'email', | |
1227 | # implicitly imported per module policy |
|
1227 | # implicitly imported per module policy | |
1228 | # (cffi wouldn't be used as a frozen exe) |
|
1228 | # (cffi wouldn't be used as a frozen exe) | |
1229 | 'mercurial.cext', |
|
1229 | 'mercurial.cext', | |
1230 | #'mercurial.cffi', |
|
1230 | #'mercurial.cffi', | |
1231 | 'mercurial.pure', |
|
1231 | 'mercurial.pure', | |
1232 | ], |
|
1232 | ], | |
1233 | }, |
|
1233 | }, | |
1234 | 'bdist_mpkg': { |
|
1234 | 'bdist_mpkg': { | |
1235 | 'zipdist': False, |
|
1235 | 'zipdist': False, | |
1236 | 'license': 'COPYING', |
|
1236 | 'license': 'COPYING', | |
1237 | 'readme': 'contrib/packaging/macosx/Readme.html', |
|
1237 | 'readme': 'contrib/packaging/macosx/Readme.html', | |
1238 | 'welcome': 'contrib/packaging/macosx/Welcome.html', |
|
1238 | 'welcome': 'contrib/packaging/macosx/Welcome.html', | |
1239 | }, |
|
1239 | }, | |
1240 | }, |
|
1240 | }, | |
1241 | **extra) |
|
1241 | **extra) |
General Comments 0
You need to be logged in to leave comments.
Login now