Show More
@@ -1,911 +1,911 b'' | |||||
1 | # rebase.py - rebasing feature for mercurial |
|
1 | # rebase.py - rebasing feature for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com> |
|
3 | # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''command to move sets of revisions to a different ancestor |
|
8 | '''command to move sets of revisions to a different ancestor | |
9 |
|
9 | |||
10 | This extension lets you rebase changesets in an existing Mercurial |
|
10 | This extension lets you rebase changesets in an existing Mercurial | |
11 | repository. |
|
11 | repository. | |
12 |
|
12 | |||
13 | For more information: |
|
13 | For more information: | |
14 | http://mercurial.selenic.com/wiki/RebaseExtension |
|
14 | http://mercurial.selenic.com/wiki/RebaseExtension | |
15 | ''' |
|
15 | ''' | |
16 |
|
16 | |||
17 | from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks |
|
17 | from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks | |
18 | from mercurial import extensions, patch, scmutil, phases, obsolete, error |
|
18 | from mercurial import extensions, patch, scmutil, phases, obsolete, error | |
19 | from mercurial.commands import templateopts |
|
19 | from mercurial.commands import templateopts | |
20 | from mercurial.node import nullrev |
|
20 | from mercurial.node import nullrev | |
21 | from mercurial.lock import release |
|
21 | from mercurial.lock import release | |
22 | from mercurial.i18n import _ |
|
22 | from mercurial.i18n import _ | |
23 | import os, errno |
|
23 | import os, errno | |
24 |
|
24 | |||
25 | nullmerge = -2 |
|
25 | nullmerge = -2 | |
26 | revignored = -3 |
|
26 | revignored = -3 | |
27 |
|
27 | |||
28 | cmdtable = {} |
|
28 | cmdtable = {} | |
29 | command = cmdutil.command(cmdtable) |
|
29 | command = cmdutil.command(cmdtable) | |
30 | testedwith = 'internal' |
|
30 | testedwith = 'internal' | |
31 |
|
31 | |||
32 | def _savegraft(ctx, extra): |
|
32 | def _savegraft(ctx, extra): | |
33 | s = ctx.extra().get('source', None) |
|
33 | s = ctx.extra().get('source', None) | |
34 | if s is not None: |
|
34 | if s is not None: | |
35 | extra['source'] = s |
|
35 | extra['source'] = s | |
36 |
|
36 | |||
37 | def _savebranch(ctx, extra): |
|
37 | def _savebranch(ctx, extra): | |
38 | extra['branch'] = ctx.branch() |
|
38 | extra['branch'] = ctx.branch() | |
39 |
|
39 | |||
40 | def _makeextrafn(copiers): |
|
40 | def _makeextrafn(copiers): | |
41 | """make an extrafn out of the given copy-functions. |
|
41 | """make an extrafn out of the given copy-functions. | |
42 |
|
42 | |||
43 | A copy function takes a context and an extra dict, and mutates the |
|
43 | A copy function takes a context and an extra dict, and mutates the | |
44 | extra dict as needed based on the given context. |
|
44 | extra dict as needed based on the given context. | |
45 | """ |
|
45 | """ | |
46 | def extrafn(ctx, extra): |
|
46 | def extrafn(ctx, extra): | |
47 | for c in copiers: |
|
47 | for c in copiers: | |
48 | c(ctx, extra) |
|
48 | c(ctx, extra) | |
49 | return extrafn |
|
49 | return extrafn | |
50 |
|
50 | |||
51 | @command('rebase', |
|
51 | @command('rebase', | |
52 | [('s', 'source', '', |
|
52 | [('s', 'source', '', | |
53 | _('rebase from the specified changeset'), _('REV')), |
|
53 | _('rebase from the specified changeset'), _('REV')), | |
54 | ('b', 'base', '', |
|
54 | ('b', 'base', '', | |
55 | _('rebase from the base of the specified changeset ' |
|
55 | _('rebase from the base of the specified changeset ' | |
56 | '(up to greatest common ancestor of base and dest)'), |
|
56 | '(up to greatest common ancestor of base and dest)'), | |
57 | _('REV')), |
|
57 | _('REV')), | |
58 | ('r', 'rev', [], |
|
58 | ('r', 'rev', [], | |
59 | _('rebase these revisions'), |
|
59 | _('rebase these revisions'), | |
60 | _('REV')), |
|
60 | _('REV')), | |
61 | ('d', 'dest', '', |
|
61 | ('d', 'dest', '', | |
62 | _('rebase onto the specified changeset'), _('REV')), |
|
62 | _('rebase onto the specified changeset'), _('REV')), | |
63 | ('', 'collapse', False, _('collapse the rebased changesets')), |
|
63 | ('', 'collapse', False, _('collapse the rebased changesets')), | |
64 | ('m', 'message', '', |
|
64 | ('m', 'message', '', | |
65 | _('use text as collapse commit message'), _('TEXT')), |
|
65 | _('use text as collapse commit message'), _('TEXT')), | |
66 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
66 | ('e', 'edit', False, _('invoke editor on commit messages')), | |
67 | ('l', 'logfile', '', |
|
67 | ('l', 'logfile', '', | |
68 | _('read collapse commit message from file'), _('FILE')), |
|
68 | _('read collapse commit message from file'), _('FILE')), | |
69 | ('', 'keep', False, _('keep original changesets')), |
|
69 | ('', 'keep', False, _('keep original changesets')), | |
70 | ('', 'keepbranches', False, _('keep original branch names')), |
|
70 | ('', 'keepbranches', False, _('keep original branch names')), | |
71 | ('D', 'detach', False, _('(DEPRECATED)')), |
|
71 | ('D', 'detach', False, _('(DEPRECATED)')), | |
72 | ('t', 'tool', '', _('specify merge tool')), |
|
72 | ('t', 'tool', '', _('specify merge tool')), | |
73 | ('c', 'continue', False, _('continue an interrupted rebase')), |
|
73 | ('c', 'continue', False, _('continue an interrupted rebase')), | |
74 | ('a', 'abort', False, _('abort an interrupted rebase'))] + |
|
74 | ('a', 'abort', False, _('abort an interrupted rebase'))] + | |
75 | templateopts, |
|
75 | templateopts, | |
76 | _('[-s REV | -b REV] [-d REV] [OPTION]')) |
|
76 | _('[-s REV | -b REV] [-d REV] [OPTION]')) | |
77 | def rebase(ui, repo, **opts): |
|
77 | def rebase(ui, repo, **opts): | |
78 | """move changeset (and descendants) to a different branch |
|
78 | """move changeset (and descendants) to a different branch | |
79 |
|
79 | |||
80 | Rebase uses repeated merging to graft changesets from one part of |
|
80 | Rebase uses repeated merging to graft changesets from one part of | |
81 | history (the source) onto another (the destination). This can be |
|
81 | history (the source) onto another (the destination). This can be | |
82 | useful for linearizing *local* changes relative to a master |
|
82 | useful for linearizing *local* changes relative to a master | |
83 | development tree. |
|
83 | development tree. | |
84 |
|
84 | |||
85 | You should not rebase changesets that have already been shared |
|
85 | You should not rebase changesets that have already been shared | |
86 | with others. Doing so will force everybody else to perform the |
|
86 | with others. Doing so will force everybody else to perform the | |
87 | same rebase or they will end up with duplicated changesets after |
|
87 | same rebase or they will end up with duplicated changesets after | |
88 | pulling in your rebased changesets. |
|
88 | pulling in your rebased changesets. | |
89 |
|
89 | |||
90 | In its default configuration, Mercurial will prevent you from |
|
90 | In its default configuration, Mercurial will prevent you from | |
91 | rebasing published changes. See :hg:`help phases` for details. |
|
91 | rebasing published changes. See :hg:`help phases` for details. | |
92 |
|
92 | |||
93 | If you don't specify a destination changeset (``-d/--dest``), |
|
93 | If you don't specify a destination changeset (``-d/--dest``), | |
94 | rebase uses the current branch tip as the destination. (The |
|
94 | rebase uses the current branch tip as the destination. (The | |
95 | destination changeset is not modified by rebasing, but new |
|
95 | destination changeset is not modified by rebasing, but new | |
96 | changesets are added as its descendants.) |
|
96 | changesets are added as its descendants.) | |
97 |
|
97 | |||
98 | You can specify which changesets to rebase in two ways: as a |
|
98 | You can specify which changesets to rebase in two ways: as a | |
99 | "source" changeset or as a "base" changeset. Both are shorthand |
|
99 | "source" changeset or as a "base" changeset. Both are shorthand | |
100 | for a topologically related set of changesets (the "source |
|
100 | for a topologically related set of changesets (the "source | |
101 | branch"). If you specify source (``-s/--source``), rebase will |
|
101 | branch"). If you specify source (``-s/--source``), rebase will | |
102 | rebase that changeset and all of its descendants onto dest. If you |
|
102 | rebase that changeset and all of its descendants onto dest. If you | |
103 | specify base (``-b/--base``), rebase will select ancestors of base |
|
103 | specify base (``-b/--base``), rebase will select ancestors of base | |
104 | back to but not including the common ancestor with dest. Thus, |
|
104 | back to but not including the common ancestor with dest. Thus, | |
105 | ``-b`` is less precise but more convenient than ``-s``: you can |
|
105 | ``-b`` is less precise but more convenient than ``-s``: you can | |
106 | specify any changeset in the source branch, and rebase will select |
|
106 | specify any changeset in the source branch, and rebase will select | |
107 | the whole branch. If you specify neither ``-s`` nor ``-b``, rebase |
|
107 | the whole branch. If you specify neither ``-s`` nor ``-b``, rebase | |
108 | uses the parent of the working directory as the base. |
|
108 | uses the parent of the working directory as the base. | |
109 |
|
109 | |||
110 | For advanced usage, a third way is available through the ``--rev`` |
|
110 | For advanced usage, a third way is available through the ``--rev`` | |
111 | option. It allows you to specify an arbitrary set of changesets to |
|
111 | option. It allows you to specify an arbitrary set of changesets to | |
112 | rebase. Descendants of revs you specify with this option are not |
|
112 | rebase. Descendants of revs you specify with this option are not | |
113 | automatically included in the rebase. |
|
113 | automatically included in the rebase. | |
114 |
|
114 | |||
115 | By default, rebase recreates the changesets in the source branch |
|
115 | By default, rebase recreates the changesets in the source branch | |
116 | as descendants of dest and then destroys the originals. Use |
|
116 | as descendants of dest and then destroys the originals. Use | |
117 | ``--keep`` to preserve the original source changesets. Some |
|
117 | ``--keep`` to preserve the original source changesets. Some | |
118 | changesets in the source branch (e.g. merges from the destination |
|
118 | changesets in the source branch (e.g. merges from the destination | |
119 | branch) may be dropped if they no longer contribute any change. |
|
119 | branch) may be dropped if they no longer contribute any change. | |
120 |
|
120 | |||
121 | One result of the rules for selecting the destination changeset |
|
121 | One result of the rules for selecting the destination changeset | |
122 | and source branch is that, unlike ``merge``, rebase will do |
|
122 | and source branch is that, unlike ``merge``, rebase will do | |
123 | nothing if you are at the branch tip of a named branch |
|
123 | nothing if you are at the branch tip of a named branch | |
124 | with two heads. You need to explicitly specify source and/or |
|
124 | with two heads. You need to explicitly specify source and/or | |
125 | destination (or ``update`` to the other head, if it's the head of |
|
125 | destination (or ``update`` to the other head, if it's the head of | |
126 | the intended source branch). |
|
126 | the intended source branch). | |
127 |
|
127 | |||
128 | If a rebase is interrupted to manually resolve a merge, it can be |
|
128 | If a rebase is interrupted to manually resolve a merge, it can be | |
129 | continued with --continue/-c or aborted with --abort/-a. |
|
129 | continued with --continue/-c or aborted with --abort/-a. | |
130 |
|
130 | |||
131 | Returns 0 on success, 1 if nothing to rebase or there are |
|
131 | Returns 0 on success, 1 if nothing to rebase or there are | |
132 | unresolved conflicts. |
|
132 | unresolved conflicts. | |
133 | """ |
|
133 | """ | |
134 | originalwd = target = None |
|
134 | originalwd = target = None | |
135 | activebookmark = None |
|
135 | activebookmark = None | |
136 | external = nullrev |
|
136 | external = nullrev | |
137 | state = {} |
|
137 | state = {} | |
138 | skipped = set() |
|
138 | skipped = set() | |
139 | targetancestors = set() |
|
139 | targetancestors = set() | |
140 |
|
140 | |||
141 | editor = None |
|
141 | editor = None | |
142 | if opts.get('edit'): |
|
142 | if opts.get('edit'): | |
143 | editor = cmdutil.commitforceeditor |
|
143 | editor = cmdutil.commitforceeditor | |
144 |
|
144 | |||
145 | lock = wlock = None |
|
145 | lock = wlock = None | |
146 | try: |
|
146 | try: | |
147 | wlock = repo.wlock() |
|
147 | wlock = repo.wlock() | |
148 | lock = repo.lock() |
|
148 | lock = repo.lock() | |
149 |
|
149 | |||
150 | # Validate input and define rebasing points |
|
150 | # Validate input and define rebasing points | |
151 | destf = opts.get('dest', None) |
|
151 | destf = opts.get('dest', None) | |
152 | srcf = opts.get('source', None) |
|
152 | srcf = opts.get('source', None) | |
153 | basef = opts.get('base', None) |
|
153 | basef = opts.get('base', None) | |
154 | revf = opts.get('rev', []) |
|
154 | revf = opts.get('rev', []) | |
155 | contf = opts.get('continue') |
|
155 | contf = opts.get('continue') | |
156 | abortf = opts.get('abort') |
|
156 | abortf = opts.get('abort') | |
157 | collapsef = opts.get('collapse', False) |
|
157 | collapsef = opts.get('collapse', False) | |
158 | collapsemsg = cmdutil.logmessage(ui, opts) |
|
158 | collapsemsg = cmdutil.logmessage(ui, opts) | |
159 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion |
|
159 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion | |
160 | extrafns = [_savegraft] |
|
160 | extrafns = [_savegraft] | |
161 | if e: |
|
161 | if e: | |
162 | extrafns = [e] |
|
162 | extrafns = [e] | |
163 | keepf = opts.get('keep', False) |
|
163 | keepf = opts.get('keep', False) | |
164 | keepbranchesf = opts.get('keepbranches', False) |
|
164 | keepbranchesf = opts.get('keepbranches', False) | |
165 | # keepopen is not meant for use on the command line, but by |
|
165 | # keepopen is not meant for use on the command line, but by | |
166 | # other extensions |
|
166 | # other extensions | |
167 | keepopen = opts.get('keepopen', False) |
|
167 | keepopen = opts.get('keepopen', False) | |
168 |
|
168 | |||
169 | if collapsemsg and not collapsef: |
|
169 | if collapsemsg and not collapsef: | |
170 | raise util.Abort( |
|
170 | raise util.Abort( | |
171 | _('message can only be specified with collapse')) |
|
171 | _('message can only be specified with collapse')) | |
172 |
|
172 | |||
173 | if contf or abortf: |
|
173 | if contf or abortf: | |
174 | if contf and abortf: |
|
174 | if contf and abortf: | |
175 | raise util.Abort(_('cannot use both abort and continue')) |
|
175 | raise util.Abort(_('cannot use both abort and continue')) | |
176 | if collapsef: |
|
176 | if collapsef: | |
177 | raise util.Abort( |
|
177 | raise util.Abort( | |
178 | _('cannot use collapse with continue or abort')) |
|
178 | _('cannot use collapse with continue or abort')) | |
179 | if srcf or basef or destf: |
|
179 | if srcf or basef or destf: | |
180 | raise util.Abort( |
|
180 | raise util.Abort( | |
181 | _('abort and continue do not allow specifying revisions')) |
|
181 | _('abort and continue do not allow specifying revisions')) | |
182 | if opts.get('tool', False): |
|
182 | if opts.get('tool', False): | |
183 | ui.warn(_('tool option will be ignored\n')) |
|
183 | ui.warn(_('tool option will be ignored\n')) | |
184 |
|
184 | |||
185 | try: |
|
185 | try: | |
186 | (originalwd, target, state, skipped, collapsef, keepf, |
|
186 | (originalwd, target, state, skipped, collapsef, keepf, | |
187 | keepbranchesf, external, activebookmark) = restorestatus(repo) |
|
187 | keepbranchesf, external, activebookmark) = restorestatus(repo) | |
188 | except error.RepoLookupError: |
|
188 | except error.RepoLookupError: | |
189 | if abortf: |
|
189 | if abortf: | |
190 | clearstatus(repo) |
|
190 | clearstatus(repo) | |
191 | repo.ui.warn(_('rebase aborted (no revision is removed,' |
|
191 | repo.ui.warn(_('rebase aborted (no revision is removed,' | |
192 | ' only broken state is cleared)\n')) |
|
192 | ' only broken state is cleared)\n')) | |
193 | return 0 |
|
193 | return 0 | |
194 | else: |
|
194 | else: | |
195 | msg = _('cannot continue inconsistent rebase') |
|
195 | msg = _('cannot continue inconsistent rebase') | |
196 | hint = _('use "hg rebase --abort" to clear borken state') |
|
196 | hint = _('use "hg rebase --abort" to clear borken state') | |
197 | raise util.Abort(msg, hint=hint) |
|
197 | raise util.Abort(msg, hint=hint) | |
198 | if abortf: |
|
198 | if abortf: | |
199 | return abort(repo, originalwd, target, state) |
|
199 | return abort(repo, originalwd, target, state) | |
200 | else: |
|
200 | else: | |
201 | if srcf and basef: |
|
201 | if srcf and basef: | |
202 | raise util.Abort(_('cannot specify both a ' |
|
202 | raise util.Abort(_('cannot specify both a ' | |
203 | 'source and a base')) |
|
203 | 'source and a base')) | |
204 | if revf and basef: |
|
204 | if revf and basef: | |
205 | raise util.Abort(_('cannot specify both a ' |
|
205 | raise util.Abort(_('cannot specify both a ' | |
206 | 'revision and a base')) |
|
206 | 'revision and a base')) | |
207 | if revf and srcf: |
|
207 | if revf and srcf: | |
208 | raise util.Abort(_('cannot specify both a ' |
|
208 | raise util.Abort(_('cannot specify both a ' | |
209 | 'revision and a source')) |
|
209 | 'revision and a source')) | |
210 |
|
210 | |||
211 | cmdutil.checkunfinished(repo) |
|
211 | cmdutil.checkunfinished(repo) | |
212 | cmdutil.bailifchanged(repo) |
|
212 | cmdutil.bailifchanged(repo) | |
213 |
|
213 | |||
214 | if not destf: |
|
214 | if not destf: | |
215 | # Destination defaults to the latest revision in the |
|
215 | # Destination defaults to the latest revision in the | |
216 | # current branch |
|
216 | # current branch | |
217 | branch = repo[None].branch() |
|
217 | branch = repo[None].branch() | |
218 | dest = repo[branch] |
|
218 | dest = repo[branch] | |
219 | else: |
|
219 | else: | |
220 | dest = scmutil.revsingle(repo, destf) |
|
220 | dest = scmutil.revsingle(repo, destf) | |
221 |
|
221 | |||
222 | if revf: |
|
222 | if revf: | |
223 | rebaseset = scmutil.revrange(repo, revf) |
|
223 | rebaseset = scmutil.revrange(repo, revf) | |
224 | elif srcf: |
|
224 | elif srcf: | |
225 | src = scmutil.revrange(repo, [srcf]) |
|
225 | src = scmutil.revrange(repo, [srcf]) | |
226 | rebaseset = repo.revs('(%ld)::', src) |
|
226 | rebaseset = repo.revs('(%ld)::', src) | |
227 | else: |
|
227 | else: | |
228 | base = scmutil.revrange(repo, [basef or '.']) |
|
228 | base = scmutil.revrange(repo, [basef or '.']) | |
229 | rebaseset = repo.revs( |
|
229 | rebaseset = repo.revs( | |
230 | '(children(ancestor(%ld, %d)) and ::(%ld))::', |
|
230 | '(children(ancestor(%ld, %d)) and ::(%ld))::', | |
231 | base, dest, base) |
|
231 | base, dest, base) | |
232 | if rebaseset: |
|
232 | if rebaseset: | |
233 | root = min(rebaseset) |
|
233 | root = min(rebaseset) | |
234 | else: |
|
234 | else: | |
235 | root = None |
|
235 | root = None | |
236 |
|
236 | |||
237 | if not rebaseset: |
|
237 | if not rebaseset: | |
238 | repo.ui.debug('base is ancestor of destination\n') |
|
238 | repo.ui.debug('base is ancestor of destination\n') | |
239 | result = None |
|
239 | result = None | |
240 | elif (not (keepf or obsolete._enabled) |
|
240 | elif (not (keepf or obsolete._enabled) | |
241 | and repo.revs('first(children(%ld) - %ld)', |
|
241 | and repo.revs('first(children(%ld) - %ld)', | |
242 | rebaseset, rebaseset)): |
|
242 | rebaseset, rebaseset)): | |
243 | raise util.Abort( |
|
243 | raise util.Abort( | |
244 | _("can't remove original changesets with" |
|
244 | _("can't remove original changesets with" | |
245 | " unrebased descendants"), |
|
245 | " unrebased descendants"), | |
246 | hint=_('use --keep to keep original changesets')) |
|
246 | hint=_('use --keep to keep original changesets')) | |
247 | else: |
|
247 | else: | |
248 | result = buildstate(repo, dest, rebaseset, collapsef) |
|
248 | result = buildstate(repo, dest, rebaseset, collapsef) | |
249 |
|
249 | |||
250 | if not result: |
|
250 | if not result: | |
251 | # Empty state built, nothing to rebase |
|
251 | # Empty state built, nothing to rebase | |
252 | ui.status(_('nothing to rebase\n')) |
|
252 | ui.status(_('nothing to rebase\n')) | |
253 | return 1 |
|
253 | return 1 | |
254 | elif not keepf and not repo[root].mutable(): |
|
254 | elif not keepf and not repo[root].mutable(): | |
255 | raise util.Abort(_("can't rebase immutable changeset %s") |
|
255 | raise util.Abort(_("can't rebase immutable changeset %s") | |
256 | % repo[root], |
|
256 | % repo[root], | |
257 | hint=_('see hg help phases for details')) |
|
257 | hint=_('see hg help phases for details')) | |
258 | else: |
|
258 | else: | |
259 | originalwd, target, state = result |
|
259 | originalwd, target, state = result | |
260 | if collapsef: |
|
260 | if collapsef: | |
261 | targetancestors = repo.changelog.ancestors([target], |
|
261 | targetancestors = repo.changelog.ancestors([target], | |
262 | inclusive=True) |
|
262 | inclusive=True) | |
263 | external = externalparent(repo, state, targetancestors) |
|
263 | external = externalparent(repo, state, targetancestors) | |
264 |
|
264 | |||
265 | if keepbranchesf: |
|
265 | if keepbranchesf: | |
266 | # insert _savebranch at the start of extrafns so if |
|
266 | # insert _savebranch at the start of extrafns so if | |
267 | # there's a user-provided extrafn it can clobber branch if |
|
267 | # there's a user-provided extrafn it can clobber branch if | |
268 | # desired |
|
268 | # desired | |
269 | extrafns.insert(0, _savebranch) |
|
269 | extrafns.insert(0, _savebranch) | |
270 | if collapsef: |
|
270 | if collapsef: | |
271 | branches = set() |
|
271 | branches = set() | |
272 | for rev in state: |
|
272 | for rev in state: | |
273 | branches.add(repo[rev].branch()) |
|
273 | branches.add(repo[rev].branch()) | |
274 | if len(branches) > 1: |
|
274 | if len(branches) > 1: | |
275 | raise util.Abort(_('cannot collapse multiple named ' |
|
275 | raise util.Abort(_('cannot collapse multiple named ' | |
276 | 'branches')) |
|
276 | 'branches')) | |
277 |
|
277 | |||
278 |
|
278 | |||
279 | # Rebase |
|
279 | # Rebase | |
280 | if not targetancestors: |
|
280 | if not targetancestors: | |
281 | targetancestors = repo.changelog.ancestors([target], inclusive=True) |
|
281 | targetancestors = repo.changelog.ancestors([target], inclusive=True) | |
282 |
|
282 | |||
283 | # Keep track of the current bookmarks in order to reset them later |
|
283 | # Keep track of the current bookmarks in order to reset them later | |
284 | currentbookmarks = repo._bookmarks.copy() |
|
284 | currentbookmarks = repo._bookmarks.copy() | |
285 | activebookmark = activebookmark or repo._bookmarkcurrent |
|
285 | activebookmark = activebookmark or repo._bookmarkcurrent | |
286 | if activebookmark: |
|
286 | if activebookmark: | |
287 | bookmarks.unsetcurrent(repo) |
|
287 | bookmarks.unsetcurrent(repo) | |
288 |
|
288 | |||
289 | extrafn = _makeextrafn(extrafns) |
|
289 | extrafn = _makeextrafn(extrafns) | |
290 |
|
290 | |||
291 | sortedstate = sorted(state) |
|
291 | sortedstate = sorted(state) | |
292 | total = len(sortedstate) |
|
292 | total = len(sortedstate) | |
293 | pos = 0 |
|
293 | pos = 0 | |
294 | for rev in sortedstate: |
|
294 | for rev in sortedstate: | |
295 | pos += 1 |
|
295 | pos += 1 | |
296 | if state[rev] == -1: |
|
296 | if state[rev] == -1: | |
297 | ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), |
|
297 | ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), | |
298 | _('changesets'), total) |
|
298 | _('changesets'), total) | |
299 | p1, p2 = defineparents(repo, rev, target, state, |
|
299 | p1, p2 = defineparents(repo, rev, target, state, | |
300 | targetancestors) |
|
300 | targetancestors) | |
301 | storestatus(repo, originalwd, target, state, collapsef, keepf, |
|
301 | storestatus(repo, originalwd, target, state, collapsef, keepf, | |
302 | keepbranchesf, external, activebookmark) |
|
302 | keepbranchesf, external, activebookmark) | |
303 | if len(repo.parents()) == 2: |
|
303 | if len(repo.parents()) == 2: | |
304 | repo.ui.debug('resuming interrupted rebase\n') |
|
304 | repo.ui.debug('resuming interrupted rebase\n') | |
305 | else: |
|
305 | else: | |
306 | try: |
|
306 | try: | |
307 | ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) |
|
307 | ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) | |
308 | stats = rebasenode(repo, rev, p1, state, collapsef) |
|
308 | stats = rebasenode(repo, rev, p1, state, collapsef) | |
309 | if stats and stats[3] > 0: |
|
309 | if stats and stats[3] > 0: | |
310 | raise error.InterventionRequired( |
|
310 | raise error.InterventionRequired( | |
311 | _('unresolved conflicts (see hg ' |
|
311 | _('unresolved conflicts (see hg ' | |
312 | 'resolve, then hg rebase --continue)')) |
|
312 | 'resolve, then hg rebase --continue)')) | |
313 | finally: |
|
313 | finally: | |
314 | ui.setconfig('ui', 'forcemerge', '') |
|
314 | ui.setconfig('ui', 'forcemerge', '') | |
315 | cmdutil.duplicatecopies(repo, rev, target) |
|
315 | cmdutil.duplicatecopies(repo, rev, target) | |
316 | if not collapsef: |
|
316 | if not collapsef: | |
317 | newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, |
|
317 | newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, | |
318 | editor=editor) |
|
318 | editor=editor) | |
319 | else: |
|
319 | else: | |
320 | # Skip commit if we are collapsing |
|
320 | # Skip commit if we are collapsing | |
321 | repo.setparents(repo[p1].node()) |
|
321 | repo.setparents(repo[p1].node()) | |
322 | newrev = None |
|
322 | newrev = None | |
323 | # Update the state |
|
323 | # Update the state | |
324 | if newrev is not None: |
|
324 | if newrev is not None: | |
325 | state[rev] = repo[newrev].rev() |
|
325 | state[rev] = repo[newrev].rev() | |
326 | else: |
|
326 | else: | |
327 | if not collapsef: |
|
327 | if not collapsef: | |
328 | ui.note(_('no changes, revision %d skipped\n') % rev) |
|
328 | ui.note(_('no changes, revision %d skipped\n') % rev) | |
329 | ui.debug('next revision set to %s\n' % p1) |
|
329 | ui.debug('next revision set to %s\n' % p1) | |
330 | skipped.add(rev) |
|
330 | skipped.add(rev) | |
331 | state[rev] = p1 |
|
331 | state[rev] = p1 | |
332 |
|
332 | |||
333 | ui.progress(_('rebasing'), None) |
|
333 | ui.progress(_('rebasing'), None) | |
334 | ui.note(_('rebase merging completed\n')) |
|
334 | ui.note(_('rebase merging completed\n')) | |
335 |
|
335 | |||
336 | if collapsef and not keepopen: |
|
336 | if collapsef and not keepopen: | |
337 | p1, p2 = defineparents(repo, min(state), target, |
|
337 | p1, p2 = defineparents(repo, min(state), target, | |
338 | state, targetancestors) |
|
338 | state, targetancestors) | |
339 | if collapsemsg: |
|
339 | if collapsemsg: | |
340 | commitmsg = collapsemsg |
|
340 | commitmsg = collapsemsg | |
341 | else: |
|
341 | else: | |
342 | commitmsg = 'Collapsed revision' |
|
342 | commitmsg = 'Collapsed revision' | |
343 | for rebased in state: |
|
343 | for rebased in state: | |
344 | if rebased not in skipped and state[rebased] > nullmerge: |
|
344 | if rebased not in skipped and state[rebased] > nullmerge: | |
345 | commitmsg += '\n* %s' % repo[rebased].description() |
|
345 | commitmsg += '\n* %s' % repo[rebased].description() | |
346 | commitmsg = ui.edit(commitmsg, repo.ui.username()) |
|
346 | commitmsg = ui.edit(commitmsg, repo.ui.username()) | |
347 | newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, |
|
347 | newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, | |
348 | extrafn=extrafn, editor=editor) |
|
348 | extrafn=extrafn, editor=editor) | |
349 |
|
349 | |||
350 | if 'qtip' in repo.tags(): |
|
350 | if 'qtip' in repo.tags(): | |
351 | updatemq(repo, state, skipped, **opts) |
|
351 | updatemq(repo, state, skipped, **opts) | |
352 |
|
352 | |||
353 | if currentbookmarks: |
|
353 | if currentbookmarks: | |
354 | # Nodeids are needed to reset bookmarks |
|
354 | # Nodeids are needed to reset bookmarks | |
355 | nstate = {} |
|
355 | nstate = {} | |
356 | for k, v in state.iteritems(): |
|
356 | for k, v in state.iteritems(): | |
357 | if v > nullmerge: |
|
357 | if v > nullmerge: | |
358 | nstate[repo[k].node()] = repo[v].node() |
|
358 | nstate[repo[k].node()] = repo[v].node() | |
359 | # XXX this is the same as dest.node() for the non-continue path -- |
|
359 | # XXX this is the same as dest.node() for the non-continue path -- | |
360 | # this should probably be cleaned up |
|
360 | # this should probably be cleaned up | |
361 | targetnode = repo[target].node() |
|
361 | targetnode = repo[target].node() | |
362 |
|
362 | |||
363 | # restore original working directory |
|
363 | # restore original working directory | |
364 | # (we do this before stripping) |
|
364 | # (we do this before stripping) | |
365 | newwd = state.get(originalwd, originalwd) |
|
365 | newwd = state.get(originalwd, originalwd) | |
366 | if newwd not in [c.rev() for c in repo[None].parents()]: |
|
366 | if newwd not in [c.rev() for c in repo[None].parents()]: | |
367 | ui.note(_("update back to initial working directory parent\n")) |
|
367 | ui.note(_("update back to initial working directory parent\n")) | |
368 | hg.updaterepo(repo, newwd, False) |
|
368 | hg.updaterepo(repo, newwd, False) | |
369 |
|
369 | |||
370 | if not keepf: |
|
370 | if not keepf: | |
371 | collapsedas = None |
|
371 | collapsedas = None | |
372 | if collapsef: |
|
372 | if collapsef: | |
373 | collapsedas = newrev |
|
373 | collapsedas = newrev | |
374 | clearrebased(ui, repo, state, skipped, collapsedas) |
|
374 | clearrebased(ui, repo, state, skipped, collapsedas) | |
375 |
|
375 | |||
376 | if currentbookmarks: |
|
376 | if currentbookmarks: | |
377 | updatebookmarks(repo, targetnode, nstate, currentbookmarks) |
|
377 | updatebookmarks(repo, targetnode, nstate, currentbookmarks) | |
378 |
|
378 | |||
379 | clearstatus(repo) |
|
379 | clearstatus(repo) | |
380 | ui.note(_("rebase completed\n")) |
|
380 | ui.note(_("rebase completed\n")) | |
381 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) |
|
381 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) | |
382 | if skipped: |
|
382 | if skipped: | |
383 | ui.note(_("%d revisions have been skipped\n") % len(skipped)) |
|
383 | ui.note(_("%d revisions have been skipped\n") % len(skipped)) | |
384 |
|
384 | |||
385 | if (activebookmark and |
|
385 | if (activebookmark and | |
386 | repo['.'].node() == repo._bookmarks[activebookmark]): |
|
386 | repo['.'].node() == repo._bookmarks[activebookmark]): | |
387 | bookmarks.setcurrent(repo, activebookmark) |
|
387 | bookmarks.setcurrent(repo, activebookmark) | |
388 |
|
388 | |||
389 | finally: |
|
389 | finally: | |
390 | release(lock, wlock) |
|
390 | release(lock, wlock) | |
391 |
|
391 | |||
392 | def externalparent(repo, state, targetancestors): |
|
392 | def externalparent(repo, state, targetancestors): | |
393 | """Return the revision that should be used as the second parent |
|
393 | """Return the revision that should be used as the second parent | |
394 | when the revisions in state is collapsed on top of targetancestors. |
|
394 | when the revisions in state is collapsed on top of targetancestors. | |
395 | Abort if there is more than one parent. |
|
395 | Abort if there is more than one parent. | |
396 | """ |
|
396 | """ | |
397 | parents = set() |
|
397 | parents = set() | |
398 | source = min(state) |
|
398 | source = min(state) | |
399 | for rev in state: |
|
399 | for rev in state: | |
400 | if rev == source: |
|
400 | if rev == source: | |
401 | continue |
|
401 | continue | |
402 | for p in repo[rev].parents(): |
|
402 | for p in repo[rev].parents(): | |
403 | if (p.rev() not in state |
|
403 | if (p.rev() not in state | |
404 | and p.rev() not in targetancestors): |
|
404 | and p.rev() not in targetancestors): | |
405 | parents.add(p.rev()) |
|
405 | parents.add(p.rev()) | |
406 | if not parents: |
|
406 | if not parents: | |
407 | return nullrev |
|
407 | return nullrev | |
408 | if len(parents) == 1: |
|
408 | if len(parents) == 1: | |
409 | return parents.pop() |
|
409 | return parents.pop() | |
410 | raise util.Abort(_('unable to collapse on top of %s, there is more ' |
|
410 | raise util.Abort(_('unable to collapse on top of %s, there is more ' | |
411 | 'than one external parent: %s') % |
|
411 | 'than one external parent: %s') % | |
412 | (max(targetancestors), |
|
412 | (max(targetancestors), | |
413 | ', '.join(str(p) for p in sorted(parents)))) |
|
413 | ', '.join(str(p) for p in sorted(parents)))) | |
414 |
|
414 | |||
415 | def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None): |
|
415 | def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None): | |
416 | 'Commit the changes and store useful information in extra' |
|
416 | 'Commit the changes and store useful information in extra' | |
417 | try: |
|
417 | try: | |
418 | repo.setparents(repo[p1].node(), repo[p2].node()) |
|
418 | repo.setparents(repo[p1].node(), repo[p2].node()) | |
419 | ctx = repo[rev] |
|
419 | ctx = repo[rev] | |
420 | if commitmsg is None: |
|
420 | if commitmsg is None: | |
421 | commitmsg = ctx.description() |
|
421 | commitmsg = ctx.description() | |
422 | extra = {'rebase_source': ctx.hex()} |
|
422 | extra = {'rebase_source': ctx.hex()} | |
423 | if extrafn: |
|
423 | if extrafn: | |
424 | extrafn(ctx, extra) |
|
424 | extrafn(ctx, extra) | |
425 | # Commit might fail if unresolved files exist |
|
425 | # Commit might fail if unresolved files exist | |
426 | newrev = repo.commit(text=commitmsg, user=ctx.user(), |
|
426 | newrev = repo.commit(text=commitmsg, user=ctx.user(), | |
427 | date=ctx.date(), extra=extra, editor=editor) |
|
427 | date=ctx.date(), extra=extra, editor=editor) | |
428 | repo.dirstate.setbranch(repo[newrev].branch()) |
|
428 | repo.dirstate.setbranch(repo[newrev].branch()) | |
429 | targetphase = max(ctx.phase(), phases.draft) |
|
429 | targetphase = max(ctx.phase(), phases.draft) | |
430 | # retractboundary doesn't overwrite upper phase inherited from parent |
|
430 | # retractboundary doesn't overwrite upper phase inherited from parent | |
431 | newnode = repo[newrev].node() |
|
431 | newnode = repo[newrev].node() | |
432 | if newnode: |
|
432 | if newnode: | |
433 | phases.retractboundary(repo, targetphase, [newnode]) |
|
433 | phases.retractboundary(repo, targetphase, [newnode]) | |
434 | return newrev |
|
434 | return newrev | |
435 | except util.Abort: |
|
435 | except util.Abort: | |
436 | # Invalidate the previous setparents |
|
436 | # Invalidate the previous setparents | |
437 | repo.dirstate.invalidate() |
|
437 | repo.dirstate.invalidate() | |
438 | raise |
|
438 | raise | |
439 |
|
439 | |||
440 | def rebasenode(repo, rev, p1, state, collapse): |
|
440 | def rebasenode(repo, rev, p1, state, collapse): | |
441 | 'Rebase a single revision' |
|
441 | 'Rebase a single revision' | |
442 | # Merge phase |
|
442 | # Merge phase | |
443 | # Update to target and merge it with local |
|
443 | # Update to target and merge it with local | |
444 | if repo['.'].rev() != repo[p1].rev(): |
|
444 | if repo['.'].rev() != repo[p1].rev(): | |
445 | repo.ui.debug(" update to %d:%s\n" % (repo[p1].rev(), repo[p1])) |
|
445 | repo.ui.debug(" update to %d:%s\n" % (repo[p1].rev(), repo[p1])) | |
446 | merge.update(repo, p1, False, True, False) |
|
446 | merge.update(repo, p1, False, True, False) | |
447 | else: |
|
447 | else: | |
448 | repo.ui.debug(" already in target\n") |
|
448 | repo.ui.debug(" already in target\n") | |
449 | repo.dirstate.write() |
|
449 | repo.dirstate.write() | |
450 | repo.ui.debug(" merge against %d:%s\n" % (repo[rev].rev(), repo[rev])) |
|
450 | repo.ui.debug(" merge against %d:%s\n" % (repo[rev].rev(), repo[rev])) | |
451 | if repo[rev].rev() == repo[min(state)].rev(): |
|
451 | if repo[rev].rev() == repo[min(state)].rev(): | |
452 | # Case (1) initial changeset of a non-detaching rebase. |
|
452 | # Case (1) initial changeset of a non-detaching rebase. | |
453 | # Let the merge mechanism find the base itself. |
|
453 | # Let the merge mechanism find the base itself. | |
454 | base = None |
|
454 | base = None | |
455 | elif not repo[rev].p2(): |
|
455 | elif not repo[rev].p2(): | |
456 | # Case (2) detaching the node with a single parent, use this parent |
|
456 | # Case (2) detaching the node with a single parent, use this parent | |
457 | base = repo[rev].p1().node() |
|
457 | base = repo[rev].p1().node() | |
458 | else: |
|
458 | else: | |
459 | # In case of merge, we need to pick the right parent as merge base. |
|
459 | # In case of merge, we need to pick the right parent as merge base. | |
460 | # |
|
460 | # | |
461 | # Imagine we have: |
|
461 | # Imagine we have: | |
462 | # - M: currently rebase revision in this step |
|
462 | # - M: currently rebase revision in this step | |
463 | # - A: one parent of M |
|
463 | # - A: one parent of M | |
464 | # - B: second parent of M |
|
464 | # - B: second parent of M | |
465 | # - D: destination of this merge step (p1 var) |
|
465 | # - D: destination of this merge step (p1 var) | |
466 | # |
|
466 | # | |
467 | # If we are rebasing on D, D is the successors of A or B. The right |
|
467 | # If we are rebasing on D, D is the successors of A or B. The right | |
468 | # merge base is the one D succeed to. We pretend it is B for the rest |
|
468 | # merge base is the one D succeed to. We pretend it is B for the rest | |
469 | # of this comment |
|
469 | # of this comment | |
470 | # |
|
470 | # | |
471 | # If we pick B as the base, the merge involves: |
|
471 | # If we pick B as the base, the merge involves: | |
472 | # - changes from B to M (actual changeset payload) |
|
472 | # - changes from B to M (actual changeset payload) | |
473 | # - changes from B to D (induced by rebase) as D is a rebased |
|
473 | # - changes from B to D (induced by rebase) as D is a rebased | |
474 | # version of B) |
|
474 | # version of B) | |
475 | # Which exactly represent the rebase operation. |
|
475 | # Which exactly represent the rebase operation. | |
476 | # |
|
476 | # | |
477 | # If we pick the A as the base, the merge involves |
|
477 | # If we pick the A as the base, the merge involves | |
478 | # - changes from A to M (actual changeset payload) |
|
478 | # - changes from A to M (actual changeset payload) | |
479 | # - changes from A to D (with include changes between unrelated A and B |
|
479 | # - changes from A to D (with include changes between unrelated A and B | |
480 | # plus changes induced by rebase) |
|
480 | # plus changes induced by rebase) | |
481 | # Which does not represent anything sensible and creates a lot of |
|
481 | # Which does not represent anything sensible and creates a lot of | |
482 | # conflicts. |
|
482 | # conflicts. | |
483 | for p in repo[rev].parents(): |
|
483 | for p in repo[rev].parents(): | |
484 | if state.get(p.rev()) == repo[p1].rev(): |
|
484 | if state.get(p.rev()) == repo[p1].rev(): | |
485 | base = p.node() |
|
485 | base = p.node() | |
486 | break |
|
486 | break | |
487 | if base is not None: |
|
487 | if base is not None: | |
488 | repo.ui.debug(" detach base %d:%s\n" % (repo[base].rev(), repo[base])) |
|
488 | repo.ui.debug(" detach base %d:%s\n" % (repo[base].rev(), repo[base])) | |
489 | # When collapsing in-place, the parent is the common ancestor, we |
|
489 | # When collapsing in-place, the parent is the common ancestor, we | |
490 | # have to allow merging with it. |
|
490 | # have to allow merging with it. | |
491 | return merge.update(repo, rev, True, True, False, base, collapse) |
|
491 | return merge.update(repo, rev, True, True, False, base, collapse) | |
492 |
|
492 | |||
493 | def nearestrebased(repo, rev, state): |
|
493 | def nearestrebased(repo, rev, state): | |
494 | """return the nearest ancestors of rev in the rebase result""" |
|
494 | """return the nearest ancestors of rev in the rebase result""" | |
495 | rebased = [r for r in state if state[r] > nullmerge] |
|
495 | rebased = [r for r in state if state[r] > nullmerge] | |
496 | candidates = repo.revs('max(%ld and (::%d))', rebased, rev) |
|
496 | candidates = repo.revs('max(%ld and (::%d))', rebased, rev) | |
497 | if candidates: |
|
497 | if candidates: | |
498 | return state[candidates[0]] |
|
498 | return state[candidates[0]] | |
499 | else: |
|
499 | else: | |
500 | return None |
|
500 | return None | |
501 |
|
501 | |||
502 | def defineparents(repo, rev, target, state, targetancestors): |
|
502 | def defineparents(repo, rev, target, state, targetancestors): | |
503 | 'Return the new parent relationship of the revision that will be rebased' |
|
503 | 'Return the new parent relationship of the revision that will be rebased' | |
504 | parents = repo[rev].parents() |
|
504 | parents = repo[rev].parents() | |
505 | p1 = p2 = nullrev |
|
505 | p1 = p2 = nullrev | |
506 |
|
506 | |||
507 | P1n = parents[0].rev() |
|
507 | P1n = parents[0].rev() | |
508 | if P1n in targetancestors: |
|
508 | if P1n in targetancestors: | |
509 | p1 = target |
|
509 | p1 = target | |
510 | elif P1n in state: |
|
510 | elif P1n in state: | |
511 | if state[P1n] == nullmerge: |
|
511 | if state[P1n] == nullmerge: | |
512 | p1 = target |
|
512 | p1 = target | |
513 | elif state[P1n] == revignored: |
|
513 | elif state[P1n] == revignored: | |
514 | p1 = nearestrebased(repo, P1n, state) |
|
514 | p1 = nearestrebased(repo, P1n, state) | |
515 | if p1 is None: |
|
515 | if p1 is None: | |
516 | p1 = target |
|
516 | p1 = target | |
517 | else: |
|
517 | else: | |
518 | p1 = state[P1n] |
|
518 | p1 = state[P1n] | |
519 | else: # P1n external |
|
519 | else: # P1n external | |
520 | p1 = target |
|
520 | p1 = target | |
521 | p2 = P1n |
|
521 | p2 = P1n | |
522 |
|
522 | |||
523 | if len(parents) == 2 and parents[1].rev() not in targetancestors: |
|
523 | if len(parents) == 2 and parents[1].rev() not in targetancestors: | |
524 | P2n = parents[1].rev() |
|
524 | P2n = parents[1].rev() | |
525 | # interesting second parent |
|
525 | # interesting second parent | |
526 | if P2n in state: |
|
526 | if P2n in state: | |
527 | if p1 == target: # P1n in targetancestors or external |
|
527 | if p1 == target: # P1n in targetancestors or external | |
528 | p1 = state[P2n] |
|
528 | p1 = state[P2n] | |
529 | elif state[P2n] == revignored: |
|
529 | elif state[P2n] == revignored: | |
530 | p2 = nearestrebased(repo, P2n, state) |
|
530 | p2 = nearestrebased(repo, P2n, state) | |
531 | if p2 is None: |
|
531 | if p2 is None: | |
532 | # no ancestors rebased yet, detach |
|
532 | # no ancestors rebased yet, detach | |
533 | p2 = target |
|
533 | p2 = target | |
534 | else: |
|
534 | else: | |
535 | p2 = state[P2n] |
|
535 | p2 = state[P2n] | |
536 | else: # P2n external |
|
536 | else: # P2n external | |
537 | if p2 != nullrev: # P1n external too => rev is a merged revision |
|
537 | if p2 != nullrev: # P1n external too => rev is a merged revision | |
538 | raise util.Abort(_('cannot use revision %d as base, result ' |
|
538 | raise util.Abort(_('cannot use revision %d as base, result ' | |
539 | 'would have 3 parents') % rev) |
|
539 | 'would have 3 parents') % rev) | |
540 | p2 = P2n |
|
540 | p2 = P2n | |
541 | repo.ui.debug(" future parents are %d and %d\n" % |
|
541 | repo.ui.debug(" future parents are %d and %d\n" % | |
542 | (repo[p1].rev(), repo[p2].rev())) |
|
542 | (repo[p1].rev(), repo[p2].rev())) | |
543 | return p1, p2 |
|
543 | return p1, p2 | |
544 |
|
544 | |||
545 | def isagitpatch(repo, patchname): |
|
545 | def isagitpatch(repo, patchname): | |
546 | 'Return true if the given patch is in git format' |
|
546 | 'Return true if the given patch is in git format' | |
547 | mqpatch = os.path.join(repo.mq.path, patchname) |
|
547 | mqpatch = os.path.join(repo.mq.path, patchname) | |
548 | for line in patch.linereader(file(mqpatch, 'rb')): |
|
548 | for line in patch.linereader(file(mqpatch, 'rb')): | |
549 | if line.startswith('diff --git'): |
|
549 | if line.startswith('diff --git'): | |
550 | return True |
|
550 | return True | |
551 | return False |
|
551 | return False | |
552 |
|
552 | |||
553 | def updatemq(repo, state, skipped, **opts): |
|
553 | def updatemq(repo, state, skipped, **opts): | |
554 | 'Update rebased mq patches - finalize and then import them' |
|
554 | 'Update rebased mq patches - finalize and then import them' | |
555 | mqrebase = {} |
|
555 | mqrebase = {} | |
556 | mq = repo.mq |
|
556 | mq = repo.mq | |
557 | original_series = mq.fullseries[:] |
|
557 | original_series = mq.fullseries[:] | |
558 | skippedpatches = set() |
|
558 | skippedpatches = set() | |
559 |
|
559 | |||
560 | for p in mq.applied: |
|
560 | for p in mq.applied: | |
561 | rev = repo[p.node].rev() |
|
561 | rev = repo[p.node].rev() | |
562 | if rev in state: |
|
562 | if rev in state: | |
563 | repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' % |
|
563 | repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' % | |
564 | (rev, p.name)) |
|
564 | (rev, p.name)) | |
565 | mqrebase[rev] = (p.name, isagitpatch(repo, p.name)) |
|
565 | mqrebase[rev] = (p.name, isagitpatch(repo, p.name)) | |
566 | else: |
|
566 | else: | |
567 | # Applied but not rebased, not sure this should happen |
|
567 | # Applied but not rebased, not sure this should happen | |
568 | skippedpatches.add(p.name) |
|
568 | skippedpatches.add(p.name) | |
569 |
|
569 | |||
570 | if mqrebase: |
|
570 | if mqrebase: | |
571 | mq.finish(repo, mqrebase.keys()) |
|
571 | mq.finish(repo, mqrebase.keys()) | |
572 |
|
572 | |||
573 | # We must start import from the newest revision |
|
573 | # We must start import from the newest revision | |
574 | for rev in sorted(mqrebase, reverse=True): |
|
574 | for rev in sorted(mqrebase, reverse=True): | |
575 | if rev not in skipped: |
|
575 | if rev not in skipped: | |
576 | name, isgit = mqrebase[rev] |
|
576 | name, isgit = mqrebase[rev] | |
577 | repo.ui.debug('import mq patch %d (%s)\n' % (state[rev], name)) |
|
577 | repo.ui.debug('import mq patch %d (%s)\n' % (state[rev], name)) | |
578 | mq.qimport(repo, (), patchname=name, git=isgit, |
|
578 | mq.qimport(repo, (), patchname=name, git=isgit, | |
579 | rev=[str(state[rev])]) |
|
579 | rev=[str(state[rev])]) | |
580 | else: |
|
580 | else: | |
581 | # Rebased and skipped |
|
581 | # Rebased and skipped | |
582 | skippedpatches.add(mqrebase[rev][0]) |
|
582 | skippedpatches.add(mqrebase[rev][0]) | |
583 |
|
583 | |||
584 | # Patches were either applied and rebased and imported in |
|
584 | # Patches were either applied and rebased and imported in | |
585 | # order, applied and removed or unapplied. Discard the removed |
|
585 | # order, applied and removed or unapplied. Discard the removed | |
586 | # ones while preserving the original series order and guards. |
|
586 | # ones while preserving the original series order and guards. | |
587 | newseries = [s for s in original_series |
|
587 | newseries = [s for s in original_series | |
588 | if mq.guard_re.split(s, 1)[0] not in skippedpatches] |
|
588 | if mq.guard_re.split(s, 1)[0] not in skippedpatches] | |
589 | mq.fullseries[:] = newseries |
|
589 | mq.fullseries[:] = newseries | |
590 | mq.seriesdirty = True |
|
590 | mq.seriesdirty = True | |
591 | mq.savedirty() |
|
591 | mq.savedirty() | |
592 |
|
592 | |||
593 | def updatebookmarks(repo, targetnode, nstate, originalbookmarks): |
|
593 | def updatebookmarks(repo, targetnode, nstate, originalbookmarks): | |
594 | 'Move bookmarks to their correct changesets, and delete divergent ones' |
|
594 | 'Move bookmarks to their correct changesets, and delete divergent ones' | |
595 | marks = repo._bookmarks |
|
595 | marks = repo._bookmarks | |
596 | for k, v in originalbookmarks.iteritems(): |
|
596 | for k, v in originalbookmarks.iteritems(): | |
597 | if v in nstate: |
|
597 | if v in nstate: | |
598 | # update the bookmarks for revs that have moved |
|
598 | # update the bookmarks for revs that have moved | |
599 | marks[k] = nstate[v] |
|
599 | marks[k] = nstate[v] | |
600 | bookmarks.deletedivergent(repo, [targetnode], k) |
|
600 | bookmarks.deletedivergent(repo, [targetnode], k) | |
601 |
|
601 | |||
602 | marks.write() |
|
602 | marks.write() | |
603 |
|
603 | |||
604 | def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches, |
|
604 | def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches, | |
605 | external, activebookmark): |
|
605 | external, activebookmark): | |
606 | 'Store the current status to allow recovery' |
|
606 | 'Store the current status to allow recovery' | |
607 | f = repo.opener("rebasestate", "w") |
|
607 | f = repo.opener("rebasestate", "w") | |
608 | f.write(repo[originalwd].hex() + '\n') |
|
608 | f.write(repo[originalwd].hex() + '\n') | |
609 | f.write(repo[target].hex() + '\n') |
|
609 | f.write(repo[target].hex() + '\n') | |
610 | f.write(repo[external].hex() + '\n') |
|
610 | f.write(repo[external].hex() + '\n') | |
611 | f.write('%d\n' % int(collapse)) |
|
611 | f.write('%d\n' % int(collapse)) | |
612 | f.write('%d\n' % int(keep)) |
|
612 | f.write('%d\n' % int(keep)) | |
613 | f.write('%d\n' % int(keepbranches)) |
|
613 | f.write('%d\n' % int(keepbranches)) | |
614 | f.write('%s\n' % (activebookmark or '')) |
|
614 | f.write('%s\n' % (activebookmark or '')) | |
615 | for d, v in state.iteritems(): |
|
615 | for d, v in state.iteritems(): | |
616 | oldrev = repo[d].hex() |
|
616 | oldrev = repo[d].hex() | |
617 | if v > nullmerge: |
|
617 | if v > nullmerge: | |
618 | newrev = repo[v].hex() |
|
618 | newrev = repo[v].hex() | |
619 | else: |
|
619 | else: | |
620 | newrev = v |
|
620 | newrev = v | |
621 | f.write("%s:%s\n" % (oldrev, newrev)) |
|
621 | f.write("%s:%s\n" % (oldrev, newrev)) | |
622 | f.close() |
|
622 | f.close() | |
623 | repo.ui.debug('rebase status stored\n') |
|
623 | repo.ui.debug('rebase status stored\n') | |
624 |
|
624 | |||
625 | def clearstatus(repo): |
|
625 | def clearstatus(repo): | |
626 | 'Remove the status files' |
|
626 | 'Remove the status files' | |
627 | util.unlinkpath(repo.join("rebasestate"), ignoremissing=True) |
|
627 | util.unlinkpath(repo.join("rebasestate"), ignoremissing=True) | |
628 |
|
628 | |||
629 | def restorestatus(repo): |
|
629 | def restorestatus(repo): | |
630 | 'Restore a previously stored status' |
|
630 | 'Restore a previously stored status' | |
631 | try: |
|
631 | try: | |
632 | target = None |
|
632 | target = None | |
633 | collapse = False |
|
633 | collapse = False | |
634 | external = nullrev |
|
634 | external = nullrev | |
635 | activebookmark = None |
|
635 | activebookmark = None | |
636 | state = {} |
|
636 | state = {} | |
637 | f = repo.opener("rebasestate") |
|
637 | f = repo.opener("rebasestate") | |
638 | for i, l in enumerate(f.read().splitlines()): |
|
638 | for i, l in enumerate(f.read().splitlines()): | |
639 | if i == 0: |
|
639 | if i == 0: | |
640 | originalwd = repo[l].rev() |
|
640 | originalwd = repo[l].rev() | |
641 | elif i == 1: |
|
641 | elif i == 1: | |
642 | target = repo[l].rev() |
|
642 | target = repo[l].rev() | |
643 | elif i == 2: |
|
643 | elif i == 2: | |
644 | external = repo[l].rev() |
|
644 | external = repo[l].rev() | |
645 | elif i == 3: |
|
645 | elif i == 3: | |
646 | collapse = bool(int(l)) |
|
646 | collapse = bool(int(l)) | |
647 | elif i == 4: |
|
647 | elif i == 4: | |
648 | keep = bool(int(l)) |
|
648 | keep = bool(int(l)) | |
649 | elif i == 5: |
|
649 | elif i == 5: | |
650 | keepbranches = bool(int(l)) |
|
650 | keepbranches = bool(int(l)) | |
651 | elif i == 6 and not (len(l) == 81 and ':' in l): |
|
651 | elif i == 6 and not (len(l) == 81 and ':' in l): | |
652 | # line 6 is a recent addition, so for backwards compatibility |
|
652 | # line 6 is a recent addition, so for backwards compatibility | |
653 | # check that the line doesn't look like the oldrev:newrev lines |
|
653 | # check that the line doesn't look like the oldrev:newrev lines | |
654 | activebookmark = l |
|
654 | activebookmark = l | |
655 | else: |
|
655 | else: | |
656 | oldrev, newrev = l.split(':') |
|
656 | oldrev, newrev = l.split(':') | |
657 | if newrev in (str(nullmerge), str(revignored)): |
|
657 | if newrev in (str(nullmerge), str(revignored)): | |
658 | state[repo[oldrev].rev()] = int(newrev) |
|
658 | state[repo[oldrev].rev()] = int(newrev) | |
659 | else: |
|
659 | else: | |
660 | state[repo[oldrev].rev()] = repo[newrev].rev() |
|
660 | state[repo[oldrev].rev()] = repo[newrev].rev() | |
661 | skipped = set() |
|
661 | skipped = set() | |
662 | # recompute the set of skipped revs |
|
662 | # recompute the set of skipped revs | |
663 | if not collapse: |
|
663 | if not collapse: | |
664 | seen = set([target]) |
|
664 | seen = set([target]) | |
665 | for old, new in sorted(state.items()): |
|
665 | for old, new in sorted(state.items()): | |
666 | if new != nullrev and new in seen: |
|
666 | if new != nullrev and new in seen: | |
667 | skipped.add(old) |
|
667 | skipped.add(old) | |
668 | seen.add(new) |
|
668 | seen.add(new) | |
669 | repo.ui.debug('computed skipped revs: %s\n' % skipped) |
|
669 | repo.ui.debug('computed skipped revs: %s\n' % skipped) | |
670 | repo.ui.debug('rebase status resumed\n') |
|
670 | repo.ui.debug('rebase status resumed\n') | |
671 | return (originalwd, target, state, skipped, |
|
671 | return (originalwd, target, state, skipped, | |
672 | collapse, keep, keepbranches, external, activebookmark) |
|
672 | collapse, keep, keepbranches, external, activebookmark) | |
673 | except IOError, err: |
|
673 | except IOError, err: | |
674 | if err.errno != errno.ENOENT: |
|
674 | if err.errno != errno.ENOENT: | |
675 | raise |
|
675 | raise | |
676 | raise util.Abort(_('no rebase in progress')) |
|
676 | raise util.Abort(_('no rebase in progress')) | |
677 |
|
677 | |||
678 | def inrebase(repo, originalwd, state): |
|
678 | def inrebase(repo, originalwd, state): | |
679 | '''check whether the working dir is in an interrupted rebase''' |
|
679 | '''check whether the working dir is in an interrupted rebase''' | |
680 | parents = [p.rev() for p in repo.parents()] |
|
680 | parents = [p.rev() for p in repo.parents()] | |
681 | if originalwd in parents: |
|
681 | if originalwd in parents: | |
682 | return True |
|
682 | return True | |
683 |
|
683 | |||
684 | for newrev in state.itervalues(): |
|
684 | for newrev in state.itervalues(): | |
685 | if newrev in parents: |
|
685 | if newrev in parents: | |
686 | return True |
|
686 | return True | |
687 |
|
687 | |||
688 | return False |
|
688 | return False | |
689 |
|
689 | |||
690 | def abort(repo, originalwd, target, state): |
|
690 | def abort(repo, originalwd, target, state): | |
691 | 'Restore the repository to its original state' |
|
691 | 'Restore the repository to its original state' | |
692 |
dstates = [s for s in state.values() if s |
|
692 | dstates = [s for s in state.values() if s > nullrev] | |
693 | immutable = [d for d in dstates if not repo[d].mutable()] |
|
693 | immutable = [d for d in dstates if not repo[d].mutable()] | |
694 | cleanup = True |
|
694 | cleanup = True | |
695 | if immutable: |
|
695 | if immutable: | |
696 | repo.ui.warn(_("warning: can't clean up immutable changesets %s\n") |
|
696 | repo.ui.warn(_("warning: can't clean up immutable changesets %s\n") | |
697 | % ', '.join(str(repo[r]) for r in immutable), |
|
697 | % ', '.join(str(repo[r]) for r in immutable), | |
698 | hint=_('see hg help phases for details')) |
|
698 | hint=_('see hg help phases for details')) | |
699 | cleanup = False |
|
699 | cleanup = False | |
700 |
|
700 | |||
701 | descendants = set() |
|
701 | descendants = set() | |
702 | if dstates: |
|
702 | if dstates: | |
703 | descendants = set(repo.changelog.descendants(dstates)) |
|
703 | descendants = set(repo.changelog.descendants(dstates)) | |
704 | if descendants - set(dstates): |
|
704 | if descendants - set(dstates): | |
705 | repo.ui.warn(_("warning: new changesets detected on target branch, " |
|
705 | repo.ui.warn(_("warning: new changesets detected on target branch, " | |
706 | "can't strip\n")) |
|
706 | "can't strip\n")) | |
707 | cleanup = False |
|
707 | cleanup = False | |
708 |
|
708 | |||
709 | if cleanup: |
|
709 | if cleanup: | |
710 | # Update away from the rebase if necessary |
|
710 | # Update away from the rebase if necessary | |
711 | if inrebase(repo, originalwd, state): |
|
711 | if inrebase(repo, originalwd, state): | |
712 | merge.update(repo, repo[originalwd].rev(), False, True, False) |
|
712 | merge.update(repo, repo[originalwd].rev(), False, True, False) | |
713 |
|
713 | |||
714 | # Strip from the first rebased revision |
|
714 | # Strip from the first rebased revision | |
715 | rebased = filter(lambda x: x > -1 and x != target, state.values()) |
|
715 | rebased = filter(lambda x: x > -1 and x != target, state.values()) | |
716 | if rebased: |
|
716 | if rebased: | |
717 | strippoints = [c.node() for c in repo.set('roots(%ld)', rebased)] |
|
717 | strippoints = [c.node() for c in repo.set('roots(%ld)', rebased)] | |
718 | # no backup of rebased cset versions needed |
|
718 | # no backup of rebased cset versions needed | |
719 | repair.strip(repo.ui, repo, strippoints) |
|
719 | repair.strip(repo.ui, repo, strippoints) | |
720 |
|
720 | |||
721 | clearstatus(repo) |
|
721 | clearstatus(repo) | |
722 | repo.ui.warn(_('rebase aborted\n')) |
|
722 | repo.ui.warn(_('rebase aborted\n')) | |
723 | return 0 |
|
723 | return 0 | |
724 |
|
724 | |||
725 | def buildstate(repo, dest, rebaseset, collapse): |
|
725 | def buildstate(repo, dest, rebaseset, collapse): | |
726 | '''Define which revisions are going to be rebased and where |
|
726 | '''Define which revisions are going to be rebased and where | |
727 |
|
727 | |||
728 | repo: repo |
|
728 | repo: repo | |
729 | dest: context |
|
729 | dest: context | |
730 | rebaseset: set of rev |
|
730 | rebaseset: set of rev | |
731 | ''' |
|
731 | ''' | |
732 |
|
732 | |||
733 | # This check isn't strictly necessary, since mq detects commits over an |
|
733 | # This check isn't strictly necessary, since mq detects commits over an | |
734 | # applied patch. But it prevents messing up the working directory when |
|
734 | # applied patch. But it prevents messing up the working directory when | |
735 | # a partially completed rebase is blocked by mq. |
|
735 | # a partially completed rebase is blocked by mq. | |
736 | if 'qtip' in repo.tags() and (dest.node() in |
|
736 | if 'qtip' in repo.tags() and (dest.node() in | |
737 | [s.node for s in repo.mq.applied]): |
|
737 | [s.node for s in repo.mq.applied]): | |
738 | raise util.Abort(_('cannot rebase onto an applied mq patch')) |
|
738 | raise util.Abort(_('cannot rebase onto an applied mq patch')) | |
739 |
|
739 | |||
740 | roots = list(repo.set('roots(%ld)', rebaseset)) |
|
740 | roots = list(repo.set('roots(%ld)', rebaseset)) | |
741 | if not roots: |
|
741 | if not roots: | |
742 | raise util.Abort(_('no matching revisions')) |
|
742 | raise util.Abort(_('no matching revisions')) | |
743 | roots.sort() |
|
743 | roots.sort() | |
744 | state = {} |
|
744 | state = {} | |
745 | detachset = set() |
|
745 | detachset = set() | |
746 | for root in roots: |
|
746 | for root in roots: | |
747 | commonbase = root.ancestor(dest) |
|
747 | commonbase = root.ancestor(dest) | |
748 | if commonbase == root: |
|
748 | if commonbase == root: | |
749 | raise util.Abort(_('source is ancestor of destination')) |
|
749 | raise util.Abort(_('source is ancestor of destination')) | |
750 | if commonbase == dest: |
|
750 | if commonbase == dest: | |
751 | samebranch = root.branch() == dest.branch() |
|
751 | samebranch = root.branch() == dest.branch() | |
752 | if not collapse and samebranch and root in dest.children(): |
|
752 | if not collapse and samebranch and root in dest.children(): | |
753 | repo.ui.debug('source is a child of destination\n') |
|
753 | repo.ui.debug('source is a child of destination\n') | |
754 | return None |
|
754 | return None | |
755 |
|
755 | |||
756 | repo.ui.debug('rebase onto %d starting from %s\n' % (dest, roots)) |
|
756 | repo.ui.debug('rebase onto %d starting from %s\n' % (dest, roots)) | |
757 | state.update(dict.fromkeys(rebaseset, nullrev)) |
|
757 | state.update(dict.fromkeys(rebaseset, nullrev)) | |
758 | # Rebase tries to turn <dest> into a parent of <root> while |
|
758 | # Rebase tries to turn <dest> into a parent of <root> while | |
759 | # preserving the number of parents of rebased changesets: |
|
759 | # preserving the number of parents of rebased changesets: | |
760 | # |
|
760 | # | |
761 | # - A changeset with a single parent will always be rebased as a |
|
761 | # - A changeset with a single parent will always be rebased as a | |
762 | # changeset with a single parent. |
|
762 | # changeset with a single parent. | |
763 | # |
|
763 | # | |
764 | # - A merge will be rebased as merge unless its parents are both |
|
764 | # - A merge will be rebased as merge unless its parents are both | |
765 | # ancestors of <dest> or are themselves in the rebased set and |
|
765 | # ancestors of <dest> or are themselves in the rebased set and | |
766 | # pruned while rebased. |
|
766 | # pruned while rebased. | |
767 | # |
|
767 | # | |
768 | # If one parent of <root> is an ancestor of <dest>, the rebased |
|
768 | # If one parent of <root> is an ancestor of <dest>, the rebased | |
769 | # version of this parent will be <dest>. This is always true with |
|
769 | # version of this parent will be <dest>. This is always true with | |
770 | # --base option. |
|
770 | # --base option. | |
771 | # |
|
771 | # | |
772 | # Otherwise, we need to *replace* the original parents with |
|
772 | # Otherwise, we need to *replace* the original parents with | |
773 | # <dest>. This "detaches" the rebased set from its former location |
|
773 | # <dest>. This "detaches" the rebased set from its former location | |
774 | # and rebases it onto <dest>. Changes introduced by ancestors of |
|
774 | # and rebases it onto <dest>. Changes introduced by ancestors of | |
775 | # <root> not common with <dest> (the detachset, marked as |
|
775 | # <root> not common with <dest> (the detachset, marked as | |
776 | # nullmerge) are "removed" from the rebased changesets. |
|
776 | # nullmerge) are "removed" from the rebased changesets. | |
777 | # |
|
777 | # | |
778 | # - If <root> has a single parent, set it to <dest>. |
|
778 | # - If <root> has a single parent, set it to <dest>. | |
779 | # |
|
779 | # | |
780 | # - If <root> is a merge, we cannot decide which parent to |
|
780 | # - If <root> is a merge, we cannot decide which parent to | |
781 | # replace, the rebase operation is not clearly defined. |
|
781 | # replace, the rebase operation is not clearly defined. | |
782 | # |
|
782 | # | |
783 | # The table below sums up this behavior: |
|
783 | # The table below sums up this behavior: | |
784 | # |
|
784 | # | |
785 | # +------------------+----------------------+-------------------------+ |
|
785 | # +------------------+----------------------+-------------------------+ | |
786 | # | | one parent | merge | |
|
786 | # | | one parent | merge | | |
787 | # +------------------+----------------------+-------------------------+ |
|
787 | # +------------------+----------------------+-------------------------+ | |
788 | # | parent in | new parent is <dest> | parents in ::<dest> are | |
|
788 | # | parent in | new parent is <dest> | parents in ::<dest> are | | |
789 | # | ::<dest> | | remapped to <dest> | |
|
789 | # | ::<dest> | | remapped to <dest> | | |
790 | # +------------------+----------------------+-------------------------+ |
|
790 | # +------------------+----------------------+-------------------------+ | |
791 | # | unrelated source | new parent is <dest> | ambiguous, abort | |
|
791 | # | unrelated source | new parent is <dest> | ambiguous, abort | | |
792 | # +------------------+----------------------+-------------------------+ |
|
792 | # +------------------+----------------------+-------------------------+ | |
793 | # |
|
793 | # | |
794 | # The actual abort is handled by `defineparents` |
|
794 | # The actual abort is handled by `defineparents` | |
795 | if len(root.parents()) <= 1: |
|
795 | if len(root.parents()) <= 1: | |
796 | # ancestors of <root> not ancestors of <dest> |
|
796 | # ancestors of <root> not ancestors of <dest> | |
797 | detachset.update(repo.changelog.findmissingrevs([commonbase.rev()], |
|
797 | detachset.update(repo.changelog.findmissingrevs([commonbase.rev()], | |
798 | [root.rev()])) |
|
798 | [root.rev()])) | |
799 | for r in detachset: |
|
799 | for r in detachset: | |
800 | if r not in state: |
|
800 | if r not in state: | |
801 | state[r] = nullmerge |
|
801 | state[r] = nullmerge | |
802 | if len(roots) > 1: |
|
802 | if len(roots) > 1: | |
803 | # If we have multiple roots, we may have "hole" in the rebase set. |
|
803 | # If we have multiple roots, we may have "hole" in the rebase set. | |
804 | # Rebase roots that descend from those "hole" should not be detached as |
|
804 | # Rebase roots that descend from those "hole" should not be detached as | |
805 | # other root are. We use the special `revignored` to inform rebase that |
|
805 | # other root are. We use the special `revignored` to inform rebase that | |
806 | # the revision should be ignored but that `defineparents` should search |
|
806 | # the revision should be ignored but that `defineparents` should search | |
807 | # a rebase destination that make sense regarding rebased topology. |
|
807 | # a rebase destination that make sense regarding rebased topology. | |
808 | rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset)) |
|
808 | rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset)) | |
809 | for ignored in set(rebasedomain) - set(rebaseset): |
|
809 | for ignored in set(rebasedomain) - set(rebaseset): | |
810 | state[ignored] = revignored |
|
810 | state[ignored] = revignored | |
811 | return repo['.'].rev(), dest.rev(), state |
|
811 | return repo['.'].rev(), dest.rev(), state | |
812 |
|
812 | |||
813 | def clearrebased(ui, repo, state, skipped, collapsedas=None): |
|
813 | def clearrebased(ui, repo, state, skipped, collapsedas=None): | |
814 | """dispose of rebased revision at the end of the rebase |
|
814 | """dispose of rebased revision at the end of the rebase | |
815 |
|
815 | |||
816 | If `collapsedas` is not None, the rebase was a collapse whose result if the |
|
816 | If `collapsedas` is not None, the rebase was a collapse whose result if the | |
817 | `collapsedas` node.""" |
|
817 | `collapsedas` node.""" | |
818 | if obsolete._enabled: |
|
818 | if obsolete._enabled: | |
819 | markers = [] |
|
819 | markers = [] | |
820 | for rev, newrev in sorted(state.items()): |
|
820 | for rev, newrev in sorted(state.items()): | |
821 | if newrev >= 0: |
|
821 | if newrev >= 0: | |
822 | if rev in skipped: |
|
822 | if rev in skipped: | |
823 | succs = () |
|
823 | succs = () | |
824 | elif collapsedas is not None: |
|
824 | elif collapsedas is not None: | |
825 | succs = (repo[collapsedas],) |
|
825 | succs = (repo[collapsedas],) | |
826 | else: |
|
826 | else: | |
827 | succs = (repo[newrev],) |
|
827 | succs = (repo[newrev],) | |
828 | markers.append((repo[rev], succs)) |
|
828 | markers.append((repo[rev], succs)) | |
829 | if markers: |
|
829 | if markers: | |
830 | obsolete.createmarkers(repo, markers) |
|
830 | obsolete.createmarkers(repo, markers) | |
831 | else: |
|
831 | else: | |
832 | rebased = [rev for rev in state if state[rev] > nullmerge] |
|
832 | rebased = [rev for rev in state if state[rev] > nullmerge] | |
833 | if rebased: |
|
833 | if rebased: | |
834 | stripped = [] |
|
834 | stripped = [] | |
835 | for root in repo.set('roots(%ld)', rebased): |
|
835 | for root in repo.set('roots(%ld)', rebased): | |
836 | if set(repo.changelog.descendants([root.rev()])) - set(state): |
|
836 | if set(repo.changelog.descendants([root.rev()])) - set(state): | |
837 | ui.warn(_("warning: new changesets detected " |
|
837 | ui.warn(_("warning: new changesets detected " | |
838 | "on source branch, not stripping\n")) |
|
838 | "on source branch, not stripping\n")) | |
839 | else: |
|
839 | else: | |
840 | stripped.append(root.node()) |
|
840 | stripped.append(root.node()) | |
841 | if stripped: |
|
841 | if stripped: | |
842 | # backup the old csets by default |
|
842 | # backup the old csets by default | |
843 | repair.strip(ui, repo, stripped, "all") |
|
843 | repair.strip(ui, repo, stripped, "all") | |
844 |
|
844 | |||
845 |
|
845 | |||
846 | def pullrebase(orig, ui, repo, *args, **opts): |
|
846 | def pullrebase(orig, ui, repo, *args, **opts): | |
847 | 'Call rebase after pull if the latter has been invoked with --rebase' |
|
847 | 'Call rebase after pull if the latter has been invoked with --rebase' | |
848 | if opts.get('rebase'): |
|
848 | if opts.get('rebase'): | |
849 | if opts.get('update'): |
|
849 | if opts.get('update'): | |
850 | del opts['update'] |
|
850 | del opts['update'] | |
851 | ui.debug('--update and --rebase are not compatible, ignoring ' |
|
851 | ui.debug('--update and --rebase are not compatible, ignoring ' | |
852 | 'the update flag\n') |
|
852 | 'the update flag\n') | |
853 |
|
853 | |||
854 | movemarkfrom = repo['.'].node() |
|
854 | movemarkfrom = repo['.'].node() | |
855 | revsprepull = len(repo) |
|
855 | revsprepull = len(repo) | |
856 | origpostincoming = commands.postincoming |
|
856 | origpostincoming = commands.postincoming | |
857 | def _dummy(*args, **kwargs): |
|
857 | def _dummy(*args, **kwargs): | |
858 | pass |
|
858 | pass | |
859 | commands.postincoming = _dummy |
|
859 | commands.postincoming = _dummy | |
860 | try: |
|
860 | try: | |
861 | orig(ui, repo, *args, **opts) |
|
861 | orig(ui, repo, *args, **opts) | |
862 | finally: |
|
862 | finally: | |
863 | commands.postincoming = origpostincoming |
|
863 | commands.postincoming = origpostincoming | |
864 | revspostpull = len(repo) |
|
864 | revspostpull = len(repo) | |
865 | if revspostpull > revsprepull: |
|
865 | if revspostpull > revsprepull: | |
866 | # --rev option from pull conflict with rebase own --rev |
|
866 | # --rev option from pull conflict with rebase own --rev | |
867 | # dropping it |
|
867 | # dropping it | |
868 | if 'rev' in opts: |
|
868 | if 'rev' in opts: | |
869 | del opts['rev'] |
|
869 | del opts['rev'] | |
870 | rebase(ui, repo, **opts) |
|
870 | rebase(ui, repo, **opts) | |
871 | branch = repo[None].branch() |
|
871 | branch = repo[None].branch() | |
872 | dest = repo[branch].rev() |
|
872 | dest = repo[branch].rev() | |
873 | if dest != repo['.'].rev(): |
|
873 | if dest != repo['.'].rev(): | |
874 | # there was nothing to rebase we force an update |
|
874 | # there was nothing to rebase we force an update | |
875 | hg.update(repo, dest) |
|
875 | hg.update(repo, dest) | |
876 | if bookmarks.update(repo, [movemarkfrom], repo['.'].node()): |
|
876 | if bookmarks.update(repo, [movemarkfrom], repo['.'].node()): | |
877 | ui.status(_("updating bookmark %s\n") |
|
877 | ui.status(_("updating bookmark %s\n") | |
878 | % repo._bookmarkcurrent) |
|
878 | % repo._bookmarkcurrent) | |
879 | else: |
|
879 | else: | |
880 | if opts.get('tool'): |
|
880 | if opts.get('tool'): | |
881 | raise util.Abort(_('--tool can only be used with --rebase')) |
|
881 | raise util.Abort(_('--tool can only be used with --rebase')) | |
882 | orig(ui, repo, *args, **opts) |
|
882 | orig(ui, repo, *args, **opts) | |
883 |
|
883 | |||
884 | def summaryhook(ui, repo): |
|
884 | def summaryhook(ui, repo): | |
885 | if not os.path.exists(repo.join('rebasestate')): |
|
885 | if not os.path.exists(repo.join('rebasestate')): | |
886 | return |
|
886 | return | |
887 | try: |
|
887 | try: | |
888 | state = restorestatus(repo)[2] |
|
888 | state = restorestatus(repo)[2] | |
889 | except error.RepoLookupError: |
|
889 | except error.RepoLookupError: | |
890 | # i18n: column positioning for "hg summary" |
|
890 | # i18n: column positioning for "hg summary" | |
891 | msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n') |
|
891 | msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n') | |
892 | ui.write(msg) |
|
892 | ui.write(msg) | |
893 | return |
|
893 | return | |
894 | numrebased = len([i for i in state.itervalues() if i != -1]) |
|
894 | numrebased = len([i for i in state.itervalues() if i != -1]) | |
895 | # i18n: column positioning for "hg summary" |
|
895 | # i18n: column positioning for "hg summary" | |
896 | ui.write(_('rebase: %s, %s (rebase --continue)\n') % |
|
896 | ui.write(_('rebase: %s, %s (rebase --continue)\n') % | |
897 | (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased, |
|
897 | (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased, | |
898 | ui.label(_('%d remaining'), 'rebase.remaining') % |
|
898 | ui.label(_('%d remaining'), 'rebase.remaining') % | |
899 | (len(state) - numrebased))) |
|
899 | (len(state) - numrebased))) | |
900 |
|
900 | |||
901 | def uisetup(ui): |
|
901 | def uisetup(ui): | |
902 | 'Replace pull with a decorator to provide --rebase option' |
|
902 | 'Replace pull with a decorator to provide --rebase option' | |
903 | entry = extensions.wrapcommand(commands.table, 'pull', pullrebase) |
|
903 | entry = extensions.wrapcommand(commands.table, 'pull', pullrebase) | |
904 | entry[1].append(('', 'rebase', None, |
|
904 | entry[1].append(('', 'rebase', None, | |
905 | _("rebase working directory to branch head"))) |
|
905 | _("rebase working directory to branch head"))) | |
906 | entry[1].append(('t', 'tool', '', |
|
906 | entry[1].append(('t', 'tool', '', | |
907 | _("specify merge tool for rebase"))) |
|
907 | _("specify merge tool for rebase"))) | |
908 | cmdutil.summaryhooks.add('rebase', summaryhook) |
|
908 | cmdutil.summaryhooks.add('rebase', summaryhook) | |
909 | cmdutil.unfinishedstates.append( |
|
909 | cmdutil.unfinishedstates.append( | |
910 | ['rebasestate', False, False, _('rebase in progress'), |
|
910 | ['rebasestate', False, False, _('rebase in progress'), | |
911 | _("use 'hg rebase --continue' or 'hg rebase --abort'")]) |
|
911 | _("use 'hg rebase --continue' or 'hg rebase --abort'")]) |
@@ -1,408 +1,410 b'' | |||||
1 | """ Mercurial phases support code |
|
1 | """ Mercurial phases support code | |
2 |
|
2 | |||
3 | --- |
|
3 | --- | |
4 |
|
4 | |||
5 | Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
5 | Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
6 | Logilab SA <contact@logilab.fr> |
|
6 | Logilab SA <contact@logilab.fr> | |
7 | Augie Fackler <durin42@gmail.com> |
|
7 | Augie Fackler <durin42@gmail.com> | |
8 |
|
8 | |||
9 | This software may be used and distributed according to the terms |
|
9 | This software may be used and distributed according to the terms | |
10 | of the GNU General Public License version 2 or any later version. |
|
10 | of the GNU General Public License version 2 or any later version. | |
11 |
|
11 | |||
12 | --- |
|
12 | --- | |
13 |
|
13 | |||
14 | This module implements most phase logic in mercurial. |
|
14 | This module implements most phase logic in mercurial. | |
15 |
|
15 | |||
16 |
|
16 | |||
17 | Basic Concept |
|
17 | Basic Concept | |
18 | ============= |
|
18 | ============= | |
19 |
|
19 | |||
20 | A 'changeset phase' is an indicator that tells us how a changeset is |
|
20 | A 'changeset phase' is an indicator that tells us how a changeset is | |
21 | manipulated and communicated. The details of each phase is described |
|
21 | manipulated and communicated. The details of each phase is described | |
22 | below, here we describe the properties they have in common. |
|
22 | below, here we describe the properties they have in common. | |
23 |
|
23 | |||
24 | Like bookmarks, phases are not stored in history and thus are not |
|
24 | Like bookmarks, phases are not stored in history and thus are not | |
25 | permanent and leave no audit trail. |
|
25 | permanent and leave no audit trail. | |
26 |
|
26 | |||
27 | First, no changeset can be in two phases at once. Phases are ordered, |
|
27 | First, no changeset can be in two phases at once. Phases are ordered, | |
28 | so they can be considered from lowest to highest. The default, lowest |
|
28 | so they can be considered from lowest to highest. The default, lowest | |
29 | phase is 'public' - this is the normal phase of existing changesets. A |
|
29 | phase is 'public' - this is the normal phase of existing changesets. A | |
30 | child changeset can not be in a lower phase than its parents. |
|
30 | child changeset can not be in a lower phase than its parents. | |
31 |
|
31 | |||
32 | These phases share a hierarchy of traits: |
|
32 | These phases share a hierarchy of traits: | |
33 |
|
33 | |||
34 | immutable shared |
|
34 | immutable shared | |
35 | public: X X |
|
35 | public: X X | |
36 | draft: X |
|
36 | draft: X | |
37 | secret: |
|
37 | secret: | |
38 |
|
38 | |||
39 | Local commits are draft by default. |
|
39 | Local commits are draft by default. | |
40 |
|
40 | |||
41 | Phase Movement and Exchange |
|
41 | Phase Movement and Exchange | |
42 | =========================== |
|
42 | =========================== | |
43 |
|
43 | |||
44 | Phase data is exchanged by pushkey on pull and push. Some servers have |
|
44 | Phase data is exchanged by pushkey on pull and push. Some servers have | |
45 | a publish option set, we call such a server a "publishing server". |
|
45 | a publish option set, we call such a server a "publishing server". | |
46 | Pushing a draft changeset to a publishing server changes the phase to |
|
46 | Pushing a draft changeset to a publishing server changes the phase to | |
47 | public. |
|
47 | public. | |
48 |
|
48 | |||
49 | A small list of fact/rules define the exchange of phase: |
|
49 | A small list of fact/rules define the exchange of phase: | |
50 |
|
50 | |||
51 | * old client never changes server states |
|
51 | * old client never changes server states | |
52 | * pull never changes server states |
|
52 | * pull never changes server states | |
53 | * publish and old server changesets are seen as public by client |
|
53 | * publish and old server changesets are seen as public by client | |
54 | * any secret changeset seen in another repository is lowered to at |
|
54 | * any secret changeset seen in another repository is lowered to at | |
55 | least draft |
|
55 | least draft | |
56 |
|
56 | |||
57 | Here is the final table summing up the 49 possible use cases of phase |
|
57 | Here is the final table summing up the 49 possible use cases of phase | |
58 | exchange: |
|
58 | exchange: | |
59 |
|
59 | |||
60 | server |
|
60 | server | |
61 | old publish non-publish |
|
61 | old publish non-publish | |
62 | N X N D P N D P |
|
62 | N X N D P N D P | |
63 | old client |
|
63 | old client | |
64 | pull |
|
64 | pull | |
65 | N - X/X - X/D X/P - X/D X/P |
|
65 | N - X/X - X/D X/P - X/D X/P | |
66 | X - X/X - X/D X/P - X/D X/P |
|
66 | X - X/X - X/D X/P - X/D X/P | |
67 | push |
|
67 | push | |
68 | X X/X X/X X/P X/P X/P X/D X/D X/P |
|
68 | X X/X X/X X/P X/P X/P X/D X/D X/P | |
69 | new client |
|
69 | new client | |
70 | pull |
|
70 | pull | |
71 | N - P/X - P/D P/P - D/D P/P |
|
71 | N - P/X - P/D P/P - D/D P/P | |
72 | D - P/X - P/D P/P - D/D P/P |
|
72 | D - P/X - P/D P/P - D/D P/P | |
73 | P - P/X - P/D P/P - P/D P/P |
|
73 | P - P/X - P/D P/P - P/D P/P | |
74 | push |
|
74 | push | |
75 | D P/X P/X P/P P/P P/P D/D D/D P/P |
|
75 | D P/X P/X P/P P/P P/P D/D D/D P/P | |
76 | P P/X P/X P/P P/P P/P P/P P/P P/P |
|
76 | P P/X P/X P/P P/P P/P P/P P/P P/P | |
77 |
|
77 | |||
78 | Legend: |
|
78 | Legend: | |
79 |
|
79 | |||
80 | A/B = final state on client / state on server |
|
80 | A/B = final state on client / state on server | |
81 |
|
81 | |||
82 | * N = new/not present, |
|
82 | * N = new/not present, | |
83 | * P = public, |
|
83 | * P = public, | |
84 | * D = draft, |
|
84 | * D = draft, | |
85 | * X = not tracked (i.e., the old client or server has no internal |
|
85 | * X = not tracked (i.e., the old client or server has no internal | |
86 | way of recording the phase.) |
|
86 | way of recording the phase.) | |
87 |
|
87 | |||
88 | passive = only pushes |
|
88 | passive = only pushes | |
89 |
|
89 | |||
90 |
|
90 | |||
91 | A cell here can be read like this: |
|
91 | A cell here can be read like this: | |
92 |
|
92 | |||
93 | "When a new client pushes a draft changeset (D) to a publishing |
|
93 | "When a new client pushes a draft changeset (D) to a publishing | |
94 | server where it's not present (N), it's marked public on both |
|
94 | server where it's not present (N), it's marked public on both | |
95 | sides (P/P)." |
|
95 | sides (P/P)." | |
96 |
|
96 | |||
97 | Note: old client behave as a publishing server with draft only content |
|
97 | Note: old client behave as a publishing server with draft only content | |
98 | - other people see it as public |
|
98 | - other people see it as public | |
99 | - content is pushed as draft |
|
99 | - content is pushed as draft | |
100 |
|
100 | |||
101 | """ |
|
101 | """ | |
102 |
|
102 | |||
103 | import errno |
|
103 | import errno | |
104 | from node import nullid, nullrev, bin, hex, short |
|
104 | from node import nullid, nullrev, bin, hex, short | |
105 | from i18n import _ |
|
105 | from i18n import _ | |
106 | import util, error |
|
106 | import util, error | |
107 |
|
107 | |||
108 | allphases = public, draft, secret = range(3) |
|
108 | allphases = public, draft, secret = range(3) | |
109 | trackedphases = allphases[1:] |
|
109 | trackedphases = allphases[1:] | |
110 | phasenames = ['public', 'draft', 'secret'] |
|
110 | phasenames = ['public', 'draft', 'secret'] | |
111 |
|
111 | |||
112 | def _readroots(repo, phasedefaults=None): |
|
112 | def _readroots(repo, phasedefaults=None): | |
113 | """Read phase roots from disk |
|
113 | """Read phase roots from disk | |
114 |
|
114 | |||
115 | phasedefaults is a list of fn(repo, roots) callable, which are |
|
115 | phasedefaults is a list of fn(repo, roots) callable, which are | |
116 | executed if the phase roots file does not exist. When phases are |
|
116 | executed if the phase roots file does not exist. When phases are | |
117 | being initialized on an existing repository, this could be used to |
|
117 | being initialized on an existing repository, this could be used to | |
118 | set selected changesets phase to something else than public. |
|
118 | set selected changesets phase to something else than public. | |
119 |
|
119 | |||
120 | Return (roots, dirty) where dirty is true if roots differ from |
|
120 | Return (roots, dirty) where dirty is true if roots differ from | |
121 | what is being stored. |
|
121 | what is being stored. | |
122 | """ |
|
122 | """ | |
123 | repo = repo.unfiltered() |
|
123 | repo = repo.unfiltered() | |
124 | dirty = False |
|
124 | dirty = False | |
125 | roots = [set() for i in allphases] |
|
125 | roots = [set() for i in allphases] | |
126 | try: |
|
126 | try: | |
127 | f = repo.sopener('phaseroots') |
|
127 | f = repo.sopener('phaseroots') | |
128 | try: |
|
128 | try: | |
129 | for line in f: |
|
129 | for line in f: | |
130 | phase, nh = line.split() |
|
130 | phase, nh = line.split() | |
131 | roots[int(phase)].add(bin(nh)) |
|
131 | roots[int(phase)].add(bin(nh)) | |
132 | finally: |
|
132 | finally: | |
133 | f.close() |
|
133 | f.close() | |
134 | except IOError, inst: |
|
134 | except IOError, inst: | |
135 | if inst.errno != errno.ENOENT: |
|
135 | if inst.errno != errno.ENOENT: | |
136 | raise |
|
136 | raise | |
137 | if phasedefaults: |
|
137 | if phasedefaults: | |
138 | for f in phasedefaults: |
|
138 | for f in phasedefaults: | |
139 | roots = f(repo, roots) |
|
139 | roots = f(repo, roots) | |
140 | dirty = True |
|
140 | dirty = True | |
141 | return roots, dirty |
|
141 | return roots, dirty | |
142 |
|
142 | |||
143 | class phasecache(object): |
|
143 | class phasecache(object): | |
144 | def __init__(self, repo, phasedefaults, _load=True): |
|
144 | def __init__(self, repo, phasedefaults, _load=True): | |
145 | if _load: |
|
145 | if _load: | |
146 | # Cheap trick to allow shallow-copy without copy module |
|
146 | # Cheap trick to allow shallow-copy without copy module | |
147 | self.phaseroots, self.dirty = _readroots(repo, phasedefaults) |
|
147 | self.phaseroots, self.dirty = _readroots(repo, phasedefaults) | |
148 | self._phaserevs = None |
|
148 | self._phaserevs = None | |
149 | self.filterunknown(repo) |
|
149 | self.filterunknown(repo) | |
150 | self.opener = repo.sopener |
|
150 | self.opener = repo.sopener | |
151 |
|
151 | |||
152 | def copy(self): |
|
152 | def copy(self): | |
153 | # Shallow copy meant to ensure isolation in |
|
153 | # Shallow copy meant to ensure isolation in | |
154 | # advance/retractboundary(), nothing more. |
|
154 | # advance/retractboundary(), nothing more. | |
155 | ph = phasecache(None, None, _load=False) |
|
155 | ph = phasecache(None, None, _load=False) | |
156 | ph.phaseroots = self.phaseroots[:] |
|
156 | ph.phaseroots = self.phaseroots[:] | |
157 | ph.dirty = self.dirty |
|
157 | ph.dirty = self.dirty | |
158 | ph.opener = self.opener |
|
158 | ph.opener = self.opener | |
159 | ph._phaserevs = self._phaserevs |
|
159 | ph._phaserevs = self._phaserevs | |
160 | return ph |
|
160 | return ph | |
161 |
|
161 | |||
162 | def replace(self, phcache): |
|
162 | def replace(self, phcache): | |
163 | for a in 'phaseroots dirty opener _phaserevs'.split(): |
|
163 | for a in 'phaseroots dirty opener _phaserevs'.split(): | |
164 | setattr(self, a, getattr(phcache, a)) |
|
164 | setattr(self, a, getattr(phcache, a)) | |
165 |
|
165 | |||
166 | def getphaserevs(self, repo, rebuild=False): |
|
166 | def getphaserevs(self, repo, rebuild=False): | |
167 | if rebuild or self._phaserevs is None: |
|
167 | if rebuild or self._phaserevs is None: | |
168 | repo = repo.unfiltered() |
|
168 | repo = repo.unfiltered() | |
169 | revs = [public] * len(repo.changelog) |
|
169 | revs = [public] * len(repo.changelog) | |
170 | for phase in trackedphases: |
|
170 | for phase in trackedphases: | |
171 | roots = map(repo.changelog.rev, self.phaseroots[phase]) |
|
171 | roots = map(repo.changelog.rev, self.phaseroots[phase]) | |
172 | if roots: |
|
172 | if roots: | |
173 | for rev in roots: |
|
173 | for rev in roots: | |
174 | revs[rev] = phase |
|
174 | revs[rev] = phase | |
175 | for rev in repo.changelog.descendants(roots): |
|
175 | for rev in repo.changelog.descendants(roots): | |
176 | revs[rev] = phase |
|
176 | revs[rev] = phase | |
177 | self._phaserevs = revs |
|
177 | self._phaserevs = revs | |
178 | return self._phaserevs |
|
178 | return self._phaserevs | |
179 |
|
179 | |||
180 | def phase(self, repo, rev): |
|
180 | def phase(self, repo, rev): | |
181 | # We need a repo argument here to be able to build _phaserevs |
|
181 | # We need a repo argument here to be able to build _phaserevs | |
182 | # if necessary. The repository instance is not stored in |
|
182 | # if necessary. The repository instance is not stored in | |
183 | # phasecache to avoid reference cycles. The changelog instance |
|
183 | # phasecache to avoid reference cycles. The changelog instance | |
184 | # is not stored because it is a filecache() property and can |
|
184 | # is not stored because it is a filecache() property and can | |
185 | # be replaced without us being notified. |
|
185 | # be replaced without us being notified. | |
186 | if rev == nullrev: |
|
186 | if rev == nullrev: | |
187 | return public |
|
187 | return public | |
|
188 | if rev < nullrev: | |||
|
189 | raise ValueError(_('cannot lookup negative revision')) | |||
188 | if self._phaserevs is None or rev >= len(self._phaserevs): |
|
190 | if self._phaserevs is None or rev >= len(self._phaserevs): | |
189 | self._phaserevs = self.getphaserevs(repo, rebuild=True) |
|
191 | self._phaserevs = self.getphaserevs(repo, rebuild=True) | |
190 | return self._phaserevs[rev] |
|
192 | return self._phaserevs[rev] | |
191 |
|
193 | |||
192 | def write(self): |
|
194 | def write(self): | |
193 | if not self.dirty: |
|
195 | if not self.dirty: | |
194 | return |
|
196 | return | |
195 | f = self.opener('phaseroots', 'w', atomictemp=True) |
|
197 | f = self.opener('phaseroots', 'w', atomictemp=True) | |
196 | try: |
|
198 | try: | |
197 | for phase, roots in enumerate(self.phaseroots): |
|
199 | for phase, roots in enumerate(self.phaseroots): | |
198 | for h in roots: |
|
200 | for h in roots: | |
199 | f.write('%i %s\n' % (phase, hex(h))) |
|
201 | f.write('%i %s\n' % (phase, hex(h))) | |
200 | finally: |
|
202 | finally: | |
201 | f.close() |
|
203 | f.close() | |
202 | self.dirty = False |
|
204 | self.dirty = False | |
203 |
|
205 | |||
204 | def _updateroots(self, phase, newroots): |
|
206 | def _updateroots(self, phase, newroots): | |
205 | self.phaseroots[phase] = newroots |
|
207 | self.phaseroots[phase] = newroots | |
206 | self._phaserevs = None |
|
208 | self._phaserevs = None | |
207 | self.dirty = True |
|
209 | self.dirty = True | |
208 |
|
210 | |||
209 | def advanceboundary(self, repo, targetphase, nodes): |
|
211 | def advanceboundary(self, repo, targetphase, nodes): | |
210 | # Be careful to preserve shallow-copied values: do not update |
|
212 | # Be careful to preserve shallow-copied values: do not update | |
211 | # phaseroots values, replace them. |
|
213 | # phaseroots values, replace them. | |
212 |
|
214 | |||
213 | repo = repo.unfiltered() |
|
215 | repo = repo.unfiltered() | |
214 | delroots = [] # set of root deleted by this path |
|
216 | delroots = [] # set of root deleted by this path | |
215 | for phase in xrange(targetphase + 1, len(allphases)): |
|
217 | for phase in xrange(targetphase + 1, len(allphases)): | |
216 | # filter nodes that are not in a compatible phase already |
|
218 | # filter nodes that are not in a compatible phase already | |
217 | nodes = [n for n in nodes |
|
219 | nodes = [n for n in nodes | |
218 | if self.phase(repo, repo[n].rev()) >= phase] |
|
220 | if self.phase(repo, repo[n].rev()) >= phase] | |
219 | if not nodes: |
|
221 | if not nodes: | |
220 | break # no roots to move anymore |
|
222 | break # no roots to move anymore | |
221 | olds = self.phaseroots[phase] |
|
223 | olds = self.phaseroots[phase] | |
222 | roots = set(ctx.node() for ctx in repo.set( |
|
224 | roots = set(ctx.node() for ctx in repo.set( | |
223 | 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes)) |
|
225 | 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes)) | |
224 | if olds != roots: |
|
226 | if olds != roots: | |
225 | self._updateroots(phase, roots) |
|
227 | self._updateroots(phase, roots) | |
226 | # some roots may need to be declared for lower phases |
|
228 | # some roots may need to be declared for lower phases | |
227 | delroots.extend(olds - roots) |
|
229 | delroots.extend(olds - roots) | |
228 | # declare deleted root in the target phase |
|
230 | # declare deleted root in the target phase | |
229 | if targetphase != 0: |
|
231 | if targetphase != 0: | |
230 | self.retractboundary(repo, targetphase, delroots) |
|
232 | self.retractboundary(repo, targetphase, delroots) | |
231 | repo.invalidatevolatilesets() |
|
233 | repo.invalidatevolatilesets() | |
232 |
|
234 | |||
233 | def retractboundary(self, repo, targetphase, nodes): |
|
235 | def retractboundary(self, repo, targetphase, nodes): | |
234 | # Be careful to preserve shallow-copied values: do not update |
|
236 | # Be careful to preserve shallow-copied values: do not update | |
235 | # phaseroots values, replace them. |
|
237 | # phaseroots values, replace them. | |
236 |
|
238 | |||
237 | repo = repo.unfiltered() |
|
239 | repo = repo.unfiltered() | |
238 | currentroots = self.phaseroots[targetphase] |
|
240 | currentroots = self.phaseroots[targetphase] | |
239 | newroots = [n for n in nodes |
|
241 | newroots = [n for n in nodes | |
240 | if self.phase(repo, repo[n].rev()) < targetphase] |
|
242 | if self.phase(repo, repo[n].rev()) < targetphase] | |
241 | if newroots: |
|
243 | if newroots: | |
242 | if nullid in newroots: |
|
244 | if nullid in newroots: | |
243 | raise util.Abort(_('cannot change null revision phase')) |
|
245 | raise util.Abort(_('cannot change null revision phase')) | |
244 | currentroots = currentroots.copy() |
|
246 | currentroots = currentroots.copy() | |
245 | currentroots.update(newroots) |
|
247 | currentroots.update(newroots) | |
246 | ctxs = repo.set('roots(%ln::)', currentroots) |
|
248 | ctxs = repo.set('roots(%ln::)', currentroots) | |
247 | currentroots.intersection_update(ctx.node() for ctx in ctxs) |
|
249 | currentroots.intersection_update(ctx.node() for ctx in ctxs) | |
248 | self._updateroots(targetphase, currentroots) |
|
250 | self._updateroots(targetphase, currentroots) | |
249 | repo.invalidatevolatilesets() |
|
251 | repo.invalidatevolatilesets() | |
250 |
|
252 | |||
251 | def filterunknown(self, repo): |
|
253 | def filterunknown(self, repo): | |
252 | """remove unknown nodes from the phase boundary |
|
254 | """remove unknown nodes from the phase boundary | |
253 |
|
255 | |||
254 | Nothing is lost as unknown nodes only hold data for their descendants. |
|
256 | Nothing is lost as unknown nodes only hold data for their descendants. | |
255 | """ |
|
257 | """ | |
256 | filtered = False |
|
258 | filtered = False | |
257 | nodemap = repo.changelog.nodemap # to filter unknown nodes |
|
259 | nodemap = repo.changelog.nodemap # to filter unknown nodes | |
258 | for phase, nodes in enumerate(self.phaseroots): |
|
260 | for phase, nodes in enumerate(self.phaseroots): | |
259 | missing = [node for node in nodes if node not in nodemap] |
|
261 | missing = [node for node in nodes if node not in nodemap] | |
260 | if missing: |
|
262 | if missing: | |
261 | for mnode in missing: |
|
263 | for mnode in missing: | |
262 | repo.ui.debug( |
|
264 | repo.ui.debug( | |
263 | 'removing unknown node %s from %i-phase boundary\n' |
|
265 | 'removing unknown node %s from %i-phase boundary\n' | |
264 | % (short(mnode), phase)) |
|
266 | % (short(mnode), phase)) | |
265 | nodes.symmetric_difference_update(missing) |
|
267 | nodes.symmetric_difference_update(missing) | |
266 | filtered = True |
|
268 | filtered = True | |
267 | if filtered: |
|
269 | if filtered: | |
268 | self.dirty = True |
|
270 | self.dirty = True | |
269 | # filterunknown is called by repo.destroyed, we may have no changes in |
|
271 | # filterunknown is called by repo.destroyed, we may have no changes in | |
270 | # root but phaserevs contents is certainly invalid (or at least we |
|
272 | # root but phaserevs contents is certainly invalid (or at least we | |
271 | # have not proper way to check that). related to issue 3858. |
|
273 | # have not proper way to check that). related to issue 3858. | |
272 | # |
|
274 | # | |
273 | # The other caller is __init__ that have no _phaserevs initialized |
|
275 | # The other caller is __init__ that have no _phaserevs initialized | |
274 | # anyway. If this change we should consider adding a dedicated |
|
276 | # anyway. If this change we should consider adding a dedicated | |
275 | # "destroyed" function to phasecache or a proper cache key mechanism |
|
277 | # "destroyed" function to phasecache or a proper cache key mechanism | |
276 | # (see branchmap one) |
|
278 | # (see branchmap one) | |
277 | self._phaserevs = None |
|
279 | self._phaserevs = None | |
278 |
|
280 | |||
279 | def advanceboundary(repo, targetphase, nodes): |
|
281 | def advanceboundary(repo, targetphase, nodes): | |
280 | """Add nodes to a phase changing other nodes phases if necessary. |
|
282 | """Add nodes to a phase changing other nodes phases if necessary. | |
281 |
|
283 | |||
282 | This function move boundary *forward* this means that all nodes |
|
284 | This function move boundary *forward* this means that all nodes | |
283 | are set in the target phase or kept in a *lower* phase. |
|
285 | are set in the target phase or kept in a *lower* phase. | |
284 |
|
286 | |||
285 | Simplify boundary to contains phase roots only.""" |
|
287 | Simplify boundary to contains phase roots only.""" | |
286 | phcache = repo._phasecache.copy() |
|
288 | phcache = repo._phasecache.copy() | |
287 | phcache.advanceboundary(repo, targetphase, nodes) |
|
289 | phcache.advanceboundary(repo, targetphase, nodes) | |
288 | repo._phasecache.replace(phcache) |
|
290 | repo._phasecache.replace(phcache) | |
289 |
|
291 | |||
290 | def retractboundary(repo, targetphase, nodes): |
|
292 | def retractboundary(repo, targetphase, nodes): | |
291 | """Set nodes back to a phase changing other nodes phases if |
|
293 | """Set nodes back to a phase changing other nodes phases if | |
292 | necessary. |
|
294 | necessary. | |
293 |
|
295 | |||
294 | This function move boundary *backward* this means that all nodes |
|
296 | This function move boundary *backward* this means that all nodes | |
295 | are set in the target phase or kept in a *higher* phase. |
|
297 | are set in the target phase or kept in a *higher* phase. | |
296 |
|
298 | |||
297 | Simplify boundary to contains phase roots only.""" |
|
299 | Simplify boundary to contains phase roots only.""" | |
298 | phcache = repo._phasecache.copy() |
|
300 | phcache = repo._phasecache.copy() | |
299 | phcache.retractboundary(repo, targetphase, nodes) |
|
301 | phcache.retractboundary(repo, targetphase, nodes) | |
300 | repo._phasecache.replace(phcache) |
|
302 | repo._phasecache.replace(phcache) | |
301 |
|
303 | |||
302 | def listphases(repo): |
|
304 | def listphases(repo): | |
303 | """List phases root for serialization over pushkey""" |
|
305 | """List phases root for serialization over pushkey""" | |
304 | keys = {} |
|
306 | keys = {} | |
305 | value = '%i' % draft |
|
307 | value = '%i' % draft | |
306 | for root in repo._phasecache.phaseroots[draft]: |
|
308 | for root in repo._phasecache.phaseroots[draft]: | |
307 | keys[hex(root)] = value |
|
309 | keys[hex(root)] = value | |
308 |
|
310 | |||
309 | if repo.ui.configbool('phases', 'publish', True): |
|
311 | if repo.ui.configbool('phases', 'publish', True): | |
310 | # Add an extra data to let remote know we are a publishing |
|
312 | # Add an extra data to let remote know we are a publishing | |
311 | # repo. Publishing repo can't just pretend they are old repo. |
|
313 | # repo. Publishing repo can't just pretend they are old repo. | |
312 | # When pushing to a publishing repo, the client still need to |
|
314 | # When pushing to a publishing repo, the client still need to | |
313 | # push phase boundary |
|
315 | # push phase boundary | |
314 | # |
|
316 | # | |
315 | # Push do not only push changeset. It also push phase data. |
|
317 | # Push do not only push changeset. It also push phase data. | |
316 | # New phase data may apply to common changeset which won't be |
|
318 | # New phase data may apply to common changeset which won't be | |
317 | # push (as they are common). Here is a very simple example: |
|
319 | # push (as they are common). Here is a very simple example: | |
318 | # |
|
320 | # | |
319 | # 1) repo A push changeset X as draft to repo B |
|
321 | # 1) repo A push changeset X as draft to repo B | |
320 | # 2) repo B make changeset X public |
|
322 | # 2) repo B make changeset X public | |
321 | # 3) repo B push to repo A. X is not pushed but the data that |
|
323 | # 3) repo B push to repo A. X is not pushed but the data that | |
322 | # X as now public should |
|
324 | # X as now public should | |
323 | # |
|
325 | # | |
324 | # The server can't handle it on it's own as it has no idea of |
|
326 | # The server can't handle it on it's own as it has no idea of | |
325 | # client phase data. |
|
327 | # client phase data. | |
326 | keys['publishing'] = 'True' |
|
328 | keys['publishing'] = 'True' | |
327 | return keys |
|
329 | return keys | |
328 |
|
330 | |||
329 | def pushphase(repo, nhex, oldphasestr, newphasestr): |
|
331 | def pushphase(repo, nhex, oldphasestr, newphasestr): | |
330 | """List phases root for serialization over pushkey""" |
|
332 | """List phases root for serialization over pushkey""" | |
331 | repo = repo.unfiltered() |
|
333 | repo = repo.unfiltered() | |
332 | lock = repo.lock() |
|
334 | lock = repo.lock() | |
333 | try: |
|
335 | try: | |
334 | currentphase = repo[nhex].phase() |
|
336 | currentphase = repo[nhex].phase() | |
335 | newphase = abs(int(newphasestr)) # let's avoid negative index surprise |
|
337 | newphase = abs(int(newphasestr)) # let's avoid negative index surprise | |
336 | oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise |
|
338 | oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise | |
337 | if currentphase == oldphase and newphase < oldphase: |
|
339 | if currentphase == oldphase and newphase < oldphase: | |
338 | advanceboundary(repo, newphase, [bin(nhex)]) |
|
340 | advanceboundary(repo, newphase, [bin(nhex)]) | |
339 | return 1 |
|
341 | return 1 | |
340 | elif currentphase == newphase: |
|
342 | elif currentphase == newphase: | |
341 | # raced, but got correct result |
|
343 | # raced, but got correct result | |
342 | return 1 |
|
344 | return 1 | |
343 | else: |
|
345 | else: | |
344 | return 0 |
|
346 | return 0 | |
345 | finally: |
|
347 | finally: | |
346 | lock.release() |
|
348 | lock.release() | |
347 |
|
349 | |||
348 | def analyzeremotephases(repo, subset, roots): |
|
350 | def analyzeremotephases(repo, subset, roots): | |
349 | """Compute phases heads and root in a subset of node from root dict |
|
351 | """Compute phases heads and root in a subset of node from root dict | |
350 |
|
352 | |||
351 | * subset is heads of the subset |
|
353 | * subset is heads of the subset | |
352 | * roots is {<nodeid> => phase} mapping. key and value are string. |
|
354 | * roots is {<nodeid> => phase} mapping. key and value are string. | |
353 |
|
355 | |||
354 | Accept unknown element input |
|
356 | Accept unknown element input | |
355 | """ |
|
357 | """ | |
356 | repo = repo.unfiltered() |
|
358 | repo = repo.unfiltered() | |
357 | # build list from dictionary |
|
359 | # build list from dictionary | |
358 | draftroots = [] |
|
360 | draftroots = [] | |
359 | nodemap = repo.changelog.nodemap # to filter unknown nodes |
|
361 | nodemap = repo.changelog.nodemap # to filter unknown nodes | |
360 | for nhex, phase in roots.iteritems(): |
|
362 | for nhex, phase in roots.iteritems(): | |
361 | if nhex == 'publishing': # ignore data related to publish option |
|
363 | if nhex == 'publishing': # ignore data related to publish option | |
362 | continue |
|
364 | continue | |
363 | node = bin(nhex) |
|
365 | node = bin(nhex) | |
364 | phase = int(phase) |
|
366 | phase = int(phase) | |
365 | if phase == 0: |
|
367 | if phase == 0: | |
366 | if node != nullid: |
|
368 | if node != nullid: | |
367 | repo.ui.warn(_('ignoring inconsistent public root' |
|
369 | repo.ui.warn(_('ignoring inconsistent public root' | |
368 | ' from remote: %s\n') % nhex) |
|
370 | ' from remote: %s\n') % nhex) | |
369 | elif phase == 1: |
|
371 | elif phase == 1: | |
370 | if node in nodemap: |
|
372 | if node in nodemap: | |
371 | draftroots.append(node) |
|
373 | draftroots.append(node) | |
372 | else: |
|
374 | else: | |
373 | repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n') |
|
375 | repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n') | |
374 | % (phase, nhex)) |
|
376 | % (phase, nhex)) | |
375 | # compute heads |
|
377 | # compute heads | |
376 | publicheads = newheads(repo, subset, draftroots) |
|
378 | publicheads = newheads(repo, subset, draftroots) | |
377 | return publicheads, draftroots |
|
379 | return publicheads, draftroots | |
378 |
|
380 | |||
379 | def newheads(repo, heads, roots): |
|
381 | def newheads(repo, heads, roots): | |
380 | """compute new head of a subset minus another |
|
382 | """compute new head of a subset minus another | |
381 |
|
383 | |||
382 | * `heads`: define the first subset |
|
384 | * `heads`: define the first subset | |
383 | * `roots`: define the second we subtract from the first""" |
|
385 | * `roots`: define the second we subtract from the first""" | |
384 | repo = repo.unfiltered() |
|
386 | repo = repo.unfiltered() | |
385 | revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))', |
|
387 | revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))', | |
386 | heads, roots, roots, heads) |
|
388 | heads, roots, roots, heads) | |
387 | return [c.node() for c in revset] |
|
389 | return [c.node() for c in revset] | |
388 |
|
390 | |||
389 |
|
391 | |||
390 | def newcommitphase(ui): |
|
392 | def newcommitphase(ui): | |
391 | """helper to get the target phase of new commit |
|
393 | """helper to get the target phase of new commit | |
392 |
|
394 | |||
393 | Handle all possible values for the phases.new-commit options. |
|
395 | Handle all possible values for the phases.new-commit options. | |
394 |
|
396 | |||
395 | """ |
|
397 | """ | |
396 | v = ui.config('phases', 'new-commit', draft) |
|
398 | v = ui.config('phases', 'new-commit', draft) | |
397 | try: |
|
399 | try: | |
398 | return phasenames.index(v) |
|
400 | return phasenames.index(v) | |
399 | except ValueError: |
|
401 | except ValueError: | |
400 | try: |
|
402 | try: | |
401 | return int(v) |
|
403 | return int(v) | |
402 | except ValueError: |
|
404 | except ValueError: | |
403 | msg = _("phases.new-commit: not a valid phase name ('%s')") |
|
405 | msg = _("phases.new-commit: not a valid phase name ('%s')") | |
404 | raise error.ConfigError(msg % v) |
|
406 | raise error.ConfigError(msg % v) | |
405 |
|
407 | |||
406 | def hassecret(repo): |
|
408 | def hassecret(repo): | |
407 | """utility function that check if a repo have any secret changeset.""" |
|
409 | """utility function that check if a repo have any secret changeset.""" | |
408 | return bool(repo._phasecache.phaseroots[2]) |
|
410 | return bool(repo._phasecache.phaseroots[2]) |
@@ -1,183 +1,226 b'' | |||||
1 | $ cat >> $HGRCPATH <<EOF |
|
1 | $ cat >> $HGRCPATH <<EOF | |
2 | > [extensions] |
|
2 | > [extensions] | |
3 | > graphlog= |
|
3 | > graphlog= | |
4 | > rebase= |
|
4 | > rebase= | |
5 | > |
|
5 | > | |
6 | > [phases] |
|
6 | > [phases] | |
7 | > publish=False |
|
7 | > publish=False | |
8 | > |
|
8 | > | |
9 | > [alias] |
|
9 | > [alias] | |
10 | > tglog = log -G --template "{rev}:{phase} '{desc}' {branches}\n" |
|
10 | > tglog = log -G --template "{rev}:{phase} '{desc}' {branches}\n" | |
11 | > EOF |
|
11 | > EOF | |
12 |
|
12 | |||
13 |
|
13 | |||
14 | $ hg init a |
|
14 | $ hg init a | |
15 | $ cd a |
|
15 | $ cd a | |
16 |
|
16 | |||
17 | $ echo c1 > common |
|
17 | $ echo c1 > common | |
18 | $ hg add common |
|
18 | $ hg add common | |
19 | $ hg ci -m C1 |
|
19 | $ hg ci -m C1 | |
20 |
|
20 | |||
21 | $ echo c2 >> common |
|
21 | $ echo c2 >> common | |
22 | $ hg ci -m C2 |
|
22 | $ hg ci -m C2 | |
23 |
|
23 | |||
24 | $ echo c3 >> common |
|
24 | $ echo c3 >> common | |
25 | $ hg ci -m C3 |
|
25 | $ hg ci -m C3 | |
26 |
|
26 | |||
27 | $ hg up -q -C 1 |
|
27 | $ hg up -q -C 1 | |
28 |
|
28 | |||
29 | $ echo l1 >> extra |
|
29 | $ echo l1 >> extra | |
30 | $ hg add extra |
|
30 | $ hg add extra | |
31 | $ hg ci -m L1 |
|
31 | $ hg ci -m L1 | |
32 | created new head |
|
32 | created new head | |
33 |
|
33 | |||
34 | $ sed -e 's/c2/l2/' common > common.new |
|
34 | $ sed -e 's/c2/l2/' common > common.new | |
35 | $ mv common.new common |
|
35 | $ mv common.new common | |
36 | $ hg ci -m L2 |
|
36 | $ hg ci -m L2 | |
37 |
|
37 | |||
38 | $ hg phase --force --secret 2 |
|
38 | $ hg phase --force --secret 2 | |
39 |
|
39 | |||
40 | $ hg tglog |
|
40 | $ hg tglog | |
41 | @ 4:draft 'L2' |
|
41 | @ 4:draft 'L2' | |
42 | | |
|
42 | | | |
43 | o 3:draft 'L1' |
|
43 | o 3:draft 'L1' | |
44 | | |
|
44 | | | |
45 | | o 2:secret 'C3' |
|
45 | | o 2:secret 'C3' | |
46 | |/ |
|
46 | |/ | |
47 | o 1:draft 'C2' |
|
47 | o 1:draft 'C2' | |
48 | | |
|
48 | | | |
49 | o 0:draft 'C1' |
|
49 | o 0:draft 'C1' | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | Conflicting rebase: |
|
52 | Conflicting rebase: | |
53 |
|
53 | |||
54 | $ hg rebase -s 3 -d 2 |
|
54 | $ hg rebase -s 3 -d 2 | |
55 | merging common |
|
55 | merging common | |
56 | warning: conflicts during merge. |
|
56 | warning: conflicts during merge. | |
57 | merging common incomplete! (edit conflicts, then use 'hg resolve --mark') |
|
57 | merging common incomplete! (edit conflicts, then use 'hg resolve --mark') | |
58 | unresolved conflicts (see hg resolve, then hg rebase --continue) |
|
58 | unresolved conflicts (see hg resolve, then hg rebase --continue) | |
59 | [1] |
|
59 | [1] | |
60 |
|
60 | |||
61 | Abort: |
|
61 | Abort: | |
62 |
|
62 | |||
63 | $ hg rebase --abort |
|
63 | $ hg rebase --abort | |
64 | saved backup bundle to $TESTTMP/a/.hg/strip-backup/*-backup.hg (glob) |
|
64 | saved backup bundle to $TESTTMP/a/.hg/strip-backup/*-backup.hg (glob) | |
65 | rebase aborted |
|
65 | rebase aborted | |
66 |
|
66 | |||
67 | $ hg tglog |
|
67 | $ hg tglog | |
68 | @ 4:draft 'L2' |
|
68 | @ 4:draft 'L2' | |
69 | | |
|
69 | | | |
70 | o 3:draft 'L1' |
|
70 | o 3:draft 'L1' | |
71 | | |
|
71 | | | |
72 | | o 2:secret 'C3' |
|
72 | | o 2:secret 'C3' | |
73 | |/ |
|
73 | |/ | |
74 | o 1:draft 'C2' |
|
74 | o 1:draft 'C2' | |
75 | | |
|
75 | | | |
76 | o 0:draft 'C1' |
|
76 | o 0:draft 'C1' | |
77 |
|
77 | |||
78 | Test safety for inconsistent rebase state, which may be created (and |
|
78 | Test safety for inconsistent rebase state, which may be created (and | |
79 | forgotten) by Mercurial earlier than 2.7. This emulates Mercurial |
|
79 | forgotten) by Mercurial earlier than 2.7. This emulates Mercurial | |
80 | earlier than 2.7 by renaming ".hg/rebasestate" temporarily. |
|
80 | earlier than 2.7 by renaming ".hg/rebasestate" temporarily. | |
81 |
|
81 | |||
82 | $ hg rebase -s 3 -d 2 |
|
82 | $ hg rebase -s 3 -d 2 | |
83 | merging common |
|
83 | merging common | |
84 | warning: conflicts during merge. |
|
84 | warning: conflicts during merge. | |
85 | merging common incomplete! (edit conflicts, then use 'hg resolve --mark') |
|
85 | merging common incomplete! (edit conflicts, then use 'hg resolve --mark') | |
86 | unresolved conflicts (see hg resolve, then hg rebase --continue) |
|
86 | unresolved conflicts (see hg resolve, then hg rebase --continue) | |
87 | [1] |
|
87 | [1] | |
88 |
|
88 | |||
89 | $ mv .hg/rebasestate .hg/rebasestate.back |
|
89 | $ mv .hg/rebasestate .hg/rebasestate.back | |
90 | $ hg update --quiet --clean 2 |
|
90 | $ hg update --quiet --clean 2 | |
91 | $ hg --config extensions.mq= strip --quiet "destination()" |
|
91 | $ hg --config extensions.mq= strip --quiet "destination()" | |
92 | $ mv .hg/rebasestate.back .hg/rebasestate |
|
92 | $ mv .hg/rebasestate.back .hg/rebasestate | |
93 |
|
93 | |||
94 | $ hg rebase --continue |
|
94 | $ hg rebase --continue | |
95 | abort: cannot continue inconsistent rebase |
|
95 | abort: cannot continue inconsistent rebase | |
96 | (use "hg rebase --abort" to clear borken state) |
|
96 | (use "hg rebase --abort" to clear borken state) | |
97 | [255] |
|
97 | [255] | |
98 | $ hg summary | grep '^rebase: ' |
|
98 | $ hg summary | grep '^rebase: ' | |
99 | rebase: (use "hg rebase --abort" to clear broken state) |
|
99 | rebase: (use "hg rebase --abort" to clear broken state) | |
100 | $ hg rebase --abort |
|
100 | $ hg rebase --abort | |
101 | rebase aborted (no revision is removed, only broken state is cleared) |
|
101 | rebase aborted (no revision is removed, only broken state is cleared) | |
102 |
|
102 | |||
103 | $ cd .. |
|
103 | $ cd .. | |
104 |
|
104 | |||
105 |
|
105 | |||
106 | Construct new repo: |
|
106 | Construct new repo: | |
107 |
|
107 | |||
108 | $ hg init b |
|
108 | $ hg init b | |
109 | $ cd b |
|
109 | $ cd b | |
110 |
|
110 | |||
111 | $ echo a > a |
|
111 | $ echo a > a | |
112 | $ hg ci -Am A |
|
112 | $ hg ci -Am A | |
113 | adding a |
|
113 | adding a | |
114 |
|
114 | |||
115 | $ echo b > b |
|
115 | $ echo b > b | |
116 | $ hg ci -Am B |
|
116 | $ hg ci -Am B | |
117 | adding b |
|
117 | adding b | |
118 |
|
118 | |||
119 | $ echo c > c |
|
119 | $ echo c > c | |
120 | $ hg ci -Am C |
|
120 | $ hg ci -Am C | |
121 | adding c |
|
121 | adding c | |
122 |
|
122 | |||
123 | $ hg up -q 0 |
|
123 | $ hg up -q 0 | |
124 |
|
124 | |||
125 | $ echo b > b |
|
125 | $ echo b > b | |
126 | $ hg ci -Am 'B bis' |
|
126 | $ hg ci -Am 'B bis' | |
127 | adding b |
|
127 | adding b | |
128 | created new head |
|
128 | created new head | |
129 |
|
129 | |||
130 | $ echo c1 > c |
|
130 | $ echo c1 > c | |
131 | $ hg ci -Am C1 |
|
131 | $ hg ci -Am C1 | |
132 | adding c |
|
132 | adding c | |
133 |
|
133 | |||
134 | $ hg phase --force --secret 1 |
|
134 | $ hg phase --force --secret 1 | |
135 | $ hg phase --public 1 |
|
135 | $ hg phase --public 1 | |
136 |
|
136 | |||
137 | Rebase and abort without generating new changesets: |
|
137 | Rebase and abort without generating new changesets: | |
138 |
|
138 | |||
139 | $ hg tglog |
|
139 | $ hg tglog | |
140 | @ 4:draft 'C1' |
|
140 | @ 4:draft 'C1' | |
141 | | |
|
141 | | | |
142 | o 3:draft 'B bis' |
|
142 | o 3:draft 'B bis' | |
143 | | |
|
143 | | | |
144 | | o 2:secret 'C' |
|
144 | | o 2:secret 'C' | |
145 | | | |
|
145 | | | | |
146 | | o 1:public 'B' |
|
146 | | o 1:public 'B' | |
147 | |/ |
|
147 | |/ | |
148 | o 0:public 'A' |
|
148 | o 0:public 'A' | |
149 |
|
149 | |||
150 | $ hg rebase -b 4 -d 2 |
|
150 | $ hg rebase -b 4 -d 2 | |
151 | merging c |
|
151 | merging c | |
152 | warning: conflicts during merge. |
|
152 | warning: conflicts during merge. | |
153 | merging c incomplete! (edit conflicts, then use 'hg resolve --mark') |
|
153 | merging c incomplete! (edit conflicts, then use 'hg resolve --mark') | |
154 | unresolved conflicts (see hg resolve, then hg rebase --continue) |
|
154 | unresolved conflicts (see hg resolve, then hg rebase --continue) | |
155 | [1] |
|
155 | [1] | |
156 |
|
156 | |||
157 | $ hg tglog |
|
157 | $ hg tglog | |
158 | @ 4:draft 'C1' |
|
158 | @ 4:draft 'C1' | |
159 | | |
|
159 | | | |
160 | o 3:draft 'B bis' |
|
160 | o 3:draft 'B bis' | |
161 | | |
|
161 | | | |
162 | | @ 2:secret 'C' |
|
162 | | @ 2:secret 'C' | |
163 | | | |
|
163 | | | | |
164 | | o 1:public 'B' |
|
164 | | o 1:public 'B' | |
165 | |/ |
|
165 | |/ | |
166 | o 0:public 'A' |
|
166 | o 0:public 'A' | |
167 |
|
167 | |||
168 | $ hg rebase -a |
|
168 | $ hg rebase -a | |
169 | rebase aborted |
|
169 | rebase aborted | |
170 |
|
170 | |||
171 | $ hg tglog |
|
171 | $ hg tglog | |
172 | @ 4:draft 'C1' |
|
172 | @ 4:draft 'C1' | |
173 | | |
|
173 | | | |
174 | o 3:draft 'B bis' |
|
174 | o 3:draft 'B bis' | |
175 | | |
|
175 | | | |
176 | | o 2:secret 'C' |
|
176 | | o 2:secret 'C' | |
177 | | | |
|
177 | | | | |
178 | | o 1:public 'B' |
|
178 | | o 1:public 'B' | |
179 | |/ |
|
179 | |/ | |
180 | o 0:public 'A' |
|
180 | o 0:public 'A' | |
181 |
|
181 | |||
182 |
|
182 | |||
183 | $ cd .. |
|
183 | $ cd .. | |
|
184 | ||||
|
185 | rebase abort should not leave working copy in a merge state if tip-1 is public | |||
|
186 | (issue4082) | |||
|
187 | ||||
|
188 | $ hg init abortpublic | |||
|
189 | $ cd abortpublic | |||
|
190 | $ echo a > a && hg ci -Aqm a | |||
|
191 | $ hg book master | |||
|
192 | $ hg book foo | |||
|
193 | $ echo b > b && hg ci -Aqm b | |||
|
194 | $ hg up -q master | |||
|
195 | $ echo c > c && hg ci -Aqm c | |||
|
196 | $ hg phase -p -r . | |||
|
197 | $ hg up -q foo | |||
|
198 | $ echo C > c && hg ci -Aqm C | |||
|
199 | $ hg log -G --template "{rev} {desc} {bookmarks}" | |||
|
200 | @ 3 C foo | |||
|
201 | | | |||
|
202 | | o 2 c master | |||
|
203 | | | | |||
|
204 | o | 1 b | |||
|
205 | |/ | |||
|
206 | o 0 a | |||
|
207 | ||||
|
208 | ||||
|
209 | $ hg rebase -d master -r foo | |||
|
210 | merging c | |||
|
211 | warning: conflicts during merge. | |||
|
212 | merging c incomplete! (edit conflicts, then use 'hg resolve --mark') | |||
|
213 | unresolved conflicts (see hg resolve, then hg rebase --continue) | |||
|
214 | [1] | |||
|
215 | $ hg rebase --abort | |||
|
216 | rebase aborted | |||
|
217 | $ hg log -G --template "{rev} {desc} {bookmarks}" | |||
|
218 | @ 3 C foo | |||
|
219 | | | |||
|
220 | | o 2 c master | |||
|
221 | | | | |||
|
222 | o | 1 b | |||
|
223 | |/ | |||
|
224 | o 0 a | |||
|
225 | ||||
|
226 | $ cd .. |
General Comments 0
You need to be logged in to leave comments.
Login now