Show More
@@ -1,1682 +1,1683 b'' | |||||
1 | # rebase.py - rebasing feature for mercurial |
|
1 | # rebase.py - rebasing feature for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com> |
|
3 | # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''command to move sets of revisions to a different ancestor |
|
8 | '''command to move sets of revisions to a different ancestor | |
9 |
|
9 | |||
10 | This extension lets you rebase changesets in an existing Mercurial |
|
10 | This extension lets you rebase changesets in an existing Mercurial | |
11 | repository. |
|
11 | repository. | |
12 |
|
12 | |||
13 | For more information: |
|
13 | For more information: | |
14 | https://mercurial-scm.org/wiki/RebaseExtension |
|
14 | https://mercurial-scm.org/wiki/RebaseExtension | |
15 | ''' |
|
15 | ''' | |
16 |
|
16 | |||
17 | from __future__ import absolute_import |
|
17 | from __future__ import absolute_import | |
18 |
|
18 | |||
19 | import errno |
|
19 | import errno | |
20 | import os |
|
20 | import os | |
21 |
|
21 | |||
22 | from mercurial.i18n import _ |
|
22 | from mercurial.i18n import _ | |
23 | from mercurial.node import ( |
|
23 | from mercurial.node import ( | |
24 | nullid, |
|
24 | nullid, | |
25 | nullrev, |
|
25 | nullrev, | |
26 | short, |
|
26 | short, | |
27 | ) |
|
27 | ) | |
28 | from mercurial import ( |
|
28 | from mercurial import ( | |
29 | bookmarks, |
|
29 | bookmarks, | |
30 | cmdutil, |
|
30 | cmdutil, | |
31 | commands, |
|
31 | commands, | |
32 | copies, |
|
32 | copies, | |
33 | destutil, |
|
33 | destutil, | |
34 | dirstateguard, |
|
34 | dirstateguard, | |
35 | error, |
|
35 | error, | |
36 | extensions, |
|
36 | extensions, | |
37 | hg, |
|
37 | hg, | |
38 | lock, |
|
38 | lock, | |
39 | merge as mergemod, |
|
39 | merge as mergemod, | |
40 | mergeutil, |
|
40 | mergeutil, | |
41 | obsolete, |
|
41 | obsolete, | |
42 | obsutil, |
|
42 | obsutil, | |
43 | patch, |
|
43 | patch, | |
44 | phases, |
|
44 | phases, | |
45 | registrar, |
|
45 | registrar, | |
46 | repair, |
|
46 | repair, | |
47 | revset, |
|
47 | revset, | |
48 | revsetlang, |
|
48 | revsetlang, | |
49 | scmutil, |
|
49 | scmutil, | |
50 | smartset, |
|
50 | smartset, | |
51 | util, |
|
51 | util, | |
52 | ) |
|
52 | ) | |
53 |
|
53 | |||
54 | release = lock.release |
|
54 | release = lock.release | |
55 | templateopts = cmdutil.templateopts |
|
55 | templateopts = cmdutil.templateopts | |
56 |
|
56 | |||
57 | # The following constants are used throughout the rebase module. The ordering of |
|
57 | # The following constants are used throughout the rebase module. The ordering of | |
58 | # their values must be maintained. |
|
58 | # their values must be maintained. | |
59 |
|
59 | |||
60 | # Indicates that a revision needs to be rebased |
|
60 | # Indicates that a revision needs to be rebased | |
61 | revtodo = -1 |
|
61 | revtodo = -1 | |
62 | revtodostr = '-1' |
|
62 | revtodostr = '-1' | |
63 |
|
63 | |||
64 | # legacy revstates no longer needed in current code |
|
64 | # legacy revstates no longer needed in current code | |
65 | # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned |
|
65 | # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned | |
66 | legacystates = {'-2', '-3', '-4', '-5'} |
|
66 | legacystates = {'-2', '-3', '-4', '-5'} | |
67 |
|
67 | |||
68 | cmdtable = {} |
|
68 | cmdtable = {} | |
69 | command = registrar.command(cmdtable) |
|
69 | command = registrar.command(cmdtable) | |
70 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
70 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
71 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
71 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
72 | # be specifying the version(s) of Mercurial they are tested with, or |
|
72 | # be specifying the version(s) of Mercurial they are tested with, or | |
73 | # leave the attribute unspecified. |
|
73 | # leave the attribute unspecified. | |
74 | testedwith = 'ships-with-hg-core' |
|
74 | testedwith = 'ships-with-hg-core' | |
75 |
|
75 | |||
76 | configtable = {} |
|
76 | configtable = {} | |
77 | configitem = registrar.configitem(configtable) |
|
77 | configitem = registrar.configitem(configtable) | |
78 |
|
78 | |||
79 | configitem('commands', 'rebase.requiredest', |
|
79 | configitem('commands', 'rebase.requiredest', | |
80 | default=False, |
|
80 | default=False, | |
81 | ) |
|
81 | ) | |
82 |
|
82 | |||
83 | configitem('experimental', 'rebaseskipobsolete', |
|
83 | configitem('experimental', 'rebaseskipobsolete', | |
84 | default=True, |
|
84 | default=True, | |
85 | ) |
|
85 | ) | |
86 | configitem('rebase', 'singletransaction', |
|
86 | configitem('rebase', 'singletransaction', | |
87 | default=False, |
|
87 | default=False, | |
88 | ) |
|
88 | ) | |
89 |
|
89 | |||
90 | def _nothingtorebase(): |
|
90 | def _nothingtorebase(): | |
91 | return 1 |
|
91 | return 1 | |
92 |
|
92 | |||
93 | def _savegraft(ctx, extra): |
|
93 | def _savegraft(ctx, extra): | |
94 | s = ctx.extra().get('source', None) |
|
94 | s = ctx.extra().get('source', None) | |
95 | if s is not None: |
|
95 | if s is not None: | |
96 | extra['source'] = s |
|
96 | extra['source'] = s | |
97 | s = ctx.extra().get('intermediate-source', None) |
|
97 | s = ctx.extra().get('intermediate-source', None) | |
98 | if s is not None: |
|
98 | if s is not None: | |
99 | extra['intermediate-source'] = s |
|
99 | extra['intermediate-source'] = s | |
100 |
|
100 | |||
101 | def _savebranch(ctx, extra): |
|
101 | def _savebranch(ctx, extra): | |
102 | extra['branch'] = ctx.branch() |
|
102 | extra['branch'] = ctx.branch() | |
103 |
|
103 | |||
104 | def _makeextrafn(copiers): |
|
104 | def _makeextrafn(copiers): | |
105 | """make an extrafn out of the given copy-functions. |
|
105 | """make an extrafn out of the given copy-functions. | |
106 |
|
106 | |||
107 | A copy function takes a context and an extra dict, and mutates the |
|
107 | A copy function takes a context and an extra dict, and mutates the | |
108 | extra dict as needed based on the given context. |
|
108 | extra dict as needed based on the given context. | |
109 | """ |
|
109 | """ | |
110 | def extrafn(ctx, extra): |
|
110 | def extrafn(ctx, extra): | |
111 | for c in copiers: |
|
111 | for c in copiers: | |
112 | c(ctx, extra) |
|
112 | c(ctx, extra) | |
113 | return extrafn |
|
113 | return extrafn | |
114 |
|
114 | |||
115 | def _destrebase(repo, sourceset, destspace=None): |
|
115 | def _destrebase(repo, sourceset, destspace=None): | |
116 | """small wrapper around destmerge to pass the right extra args |
|
116 | """small wrapper around destmerge to pass the right extra args | |
117 |
|
117 | |||
118 | Please wrap destutil.destmerge instead.""" |
|
118 | Please wrap destutil.destmerge instead.""" | |
119 | return destutil.destmerge(repo, action='rebase', sourceset=sourceset, |
|
119 | return destutil.destmerge(repo, action='rebase', sourceset=sourceset, | |
120 | onheadcheck=False, destspace=destspace) |
|
120 | onheadcheck=False, destspace=destspace) | |
121 |
|
121 | |||
122 | revsetpredicate = registrar.revsetpredicate() |
|
122 | revsetpredicate = registrar.revsetpredicate() | |
123 |
|
123 | |||
124 | @revsetpredicate('_destrebase') |
|
124 | @revsetpredicate('_destrebase') | |
125 | def _revsetdestrebase(repo, subset, x): |
|
125 | def _revsetdestrebase(repo, subset, x): | |
126 | # ``_rebasedefaultdest()`` |
|
126 | # ``_rebasedefaultdest()`` | |
127 |
|
127 | |||
128 | # default destination for rebase. |
|
128 | # default destination for rebase. | |
129 | # # XXX: Currently private because I expect the signature to change. |
|
129 | # # XXX: Currently private because I expect the signature to change. | |
130 | # # XXX: - bailing out in case of ambiguity vs returning all data. |
|
130 | # # XXX: - bailing out in case of ambiguity vs returning all data. | |
131 | # i18n: "_rebasedefaultdest" is a keyword |
|
131 | # i18n: "_rebasedefaultdest" is a keyword | |
132 | sourceset = None |
|
132 | sourceset = None | |
133 | if x is not None: |
|
133 | if x is not None: | |
134 | sourceset = revset.getset(repo, smartset.fullreposet(repo), x) |
|
134 | sourceset = revset.getset(repo, smartset.fullreposet(repo), x) | |
135 | return subset & smartset.baseset([_destrebase(repo, sourceset)]) |
|
135 | return subset & smartset.baseset([_destrebase(repo, sourceset)]) | |
136 |
|
136 | |||
137 | def _ctxdesc(ctx): |
|
137 | def _ctxdesc(ctx): | |
138 | """short description for a context""" |
|
138 | """short description for a context""" | |
139 | desc = '%d:%s "%s"' % (ctx.rev(), ctx, |
|
139 | desc = '%d:%s "%s"' % (ctx.rev(), ctx, | |
140 | ctx.description().split('\n', 1)[0]) |
|
140 | ctx.description().split('\n', 1)[0]) | |
141 | repo = ctx.repo() |
|
141 | repo = ctx.repo() | |
142 | names = [] |
|
142 | names = [] | |
143 | for nsname, ns in repo.names.iteritems(): |
|
143 | for nsname, ns in repo.names.iteritems(): | |
144 | if nsname == 'branches': |
|
144 | if nsname == 'branches': | |
145 | continue |
|
145 | continue | |
146 | names.extend(ns.names(repo, ctx.node())) |
|
146 | names.extend(ns.names(repo, ctx.node())) | |
147 | if names: |
|
147 | if names: | |
148 | desc += ' (%s)' % ' '.join(names) |
|
148 | desc += ' (%s)' % ' '.join(names) | |
149 | return desc |
|
149 | return desc | |
150 |
|
150 | |||
151 | class rebaseruntime(object): |
|
151 | class rebaseruntime(object): | |
152 | """This class is a container for rebase runtime state""" |
|
152 | """This class is a container for rebase runtime state""" | |
153 | def __init__(self, repo, ui, opts=None): |
|
153 | def __init__(self, repo, ui, opts=None): | |
154 | if opts is None: |
|
154 | if opts is None: | |
155 | opts = {} |
|
155 | opts = {} | |
156 |
|
156 | |||
157 | # prepared: whether we have rebasestate prepared or not. Currently it |
|
157 | # prepared: whether we have rebasestate prepared or not. Currently it | |
158 | # decides whether "self.repo" is unfiltered or not. |
|
158 | # decides whether "self.repo" is unfiltered or not. | |
159 | # The rebasestate has explicit hash to hash instructions not depending |
|
159 | # The rebasestate has explicit hash to hash instructions not depending | |
160 | # on visibility. If rebasestate exists (in-memory or on-disk), use |
|
160 | # on visibility. If rebasestate exists (in-memory or on-disk), use | |
161 | # unfiltered repo to avoid visibility issues. |
|
161 | # unfiltered repo to avoid visibility issues. | |
162 | # Before knowing rebasestate (i.e. when starting a new rebase (not |
|
162 | # Before knowing rebasestate (i.e. when starting a new rebase (not | |
163 | # --continue or --abort)), the original repo should be used so |
|
163 | # --continue or --abort)), the original repo should be used so | |
164 | # visibility-dependent revsets are correct. |
|
164 | # visibility-dependent revsets are correct. | |
165 | self.prepared = False |
|
165 | self.prepared = False | |
166 | self._repo = repo |
|
166 | self._repo = repo | |
167 |
|
167 | |||
168 | self.ui = ui |
|
168 | self.ui = ui | |
169 | self.opts = opts |
|
169 | self.opts = opts | |
170 | self.originalwd = None |
|
170 | self.originalwd = None | |
171 | self.external = nullrev |
|
171 | self.external = nullrev | |
172 | # Mapping between the old revision id and either what is the new rebased |
|
172 | # Mapping between the old revision id and either what is the new rebased | |
173 | # revision or what needs to be done with the old revision. The state |
|
173 | # revision or what needs to be done with the old revision. The state | |
174 | # dict will be what contains most of the rebase progress state. |
|
174 | # dict will be what contains most of the rebase progress state. | |
175 | self.state = {} |
|
175 | self.state = {} | |
176 | self.activebookmark = None |
|
176 | self.activebookmark = None | |
177 | self.destmap = {} |
|
177 | self.destmap = {} | |
178 | self.skipped = set() |
|
178 | self.skipped = set() | |
179 |
|
179 | |||
180 | self.collapsef = opts.get('collapse', False) |
|
180 | self.collapsef = opts.get('collapse', False) | |
181 | self.collapsemsg = cmdutil.logmessage(ui, opts) |
|
181 | self.collapsemsg = cmdutil.logmessage(ui, opts) | |
182 | self.date = opts.get('date', None) |
|
182 | self.date = opts.get('date', None) | |
183 |
|
183 | |||
184 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion |
|
184 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion | |
185 | self.extrafns = [_savegraft] |
|
185 | self.extrafns = [_savegraft] | |
186 | if e: |
|
186 | if e: | |
187 | self.extrafns = [e] |
|
187 | self.extrafns = [e] | |
188 |
|
188 | |||
189 | self.keepf = opts.get('keep', False) |
|
189 | self.keepf = opts.get('keep', False) | |
190 | self.keepbranchesf = opts.get('keepbranches', False) |
|
190 | self.keepbranchesf = opts.get('keepbranches', False) | |
191 | # keepopen is not meant for use on the command line, but by |
|
191 | # keepopen is not meant for use on the command line, but by | |
192 | # other extensions |
|
192 | # other extensions | |
193 | self.keepopen = opts.get('keepopen', False) |
|
193 | self.keepopen = opts.get('keepopen', False) | |
194 | self.obsoletenotrebased = {} |
|
194 | self.obsoletenotrebased = {} | |
195 |
|
195 | |||
196 | @property |
|
196 | @property | |
197 | def repo(self): |
|
197 | def repo(self): | |
198 | if self.prepared: |
|
198 | if self.prepared: | |
199 | return self._repo.unfiltered() |
|
199 | return self._repo.unfiltered() | |
200 | else: |
|
200 | else: | |
201 | return self._repo |
|
201 | return self._repo | |
202 |
|
202 | |||
203 | def storestatus(self, tr=None): |
|
203 | def storestatus(self, tr=None): | |
204 | """Store the current status to allow recovery""" |
|
204 | """Store the current status to allow recovery""" | |
205 | if tr: |
|
205 | if tr: | |
206 | tr.addfilegenerator('rebasestate', ('rebasestate',), |
|
206 | tr.addfilegenerator('rebasestate', ('rebasestate',), | |
207 | self._writestatus, location='plain') |
|
207 | self._writestatus, location='plain') | |
208 | else: |
|
208 | else: | |
209 | with self.repo.vfs("rebasestate", "w") as f: |
|
209 | with self.repo.vfs("rebasestate", "w") as f: | |
210 | self._writestatus(f) |
|
210 | self._writestatus(f) | |
211 |
|
211 | |||
212 | def _writestatus(self, f): |
|
212 | def _writestatus(self, f): | |
213 | repo = self.repo |
|
213 | repo = self.repo | |
214 | assert repo.filtername is None |
|
214 | assert repo.filtername is None | |
215 | f.write(repo[self.originalwd].hex() + '\n') |
|
215 | f.write(repo[self.originalwd].hex() + '\n') | |
216 | # was "dest". we now write dest per src root below. |
|
216 | # was "dest". we now write dest per src root below. | |
217 | f.write('\n') |
|
217 | f.write('\n') | |
218 | f.write(repo[self.external].hex() + '\n') |
|
218 | f.write(repo[self.external].hex() + '\n') | |
219 | f.write('%d\n' % int(self.collapsef)) |
|
219 | f.write('%d\n' % int(self.collapsef)) | |
220 | f.write('%d\n' % int(self.keepf)) |
|
220 | f.write('%d\n' % int(self.keepf)) | |
221 | f.write('%d\n' % int(self.keepbranchesf)) |
|
221 | f.write('%d\n' % int(self.keepbranchesf)) | |
222 | f.write('%s\n' % (self.activebookmark or '')) |
|
222 | f.write('%s\n' % (self.activebookmark or '')) | |
223 | destmap = self.destmap |
|
223 | destmap = self.destmap | |
224 | for d, v in self.state.iteritems(): |
|
224 | for d, v in self.state.iteritems(): | |
225 | oldrev = repo[d].hex() |
|
225 | oldrev = repo[d].hex() | |
226 | if v >= 0: |
|
226 | if v >= 0: | |
227 | newrev = repo[v].hex() |
|
227 | newrev = repo[v].hex() | |
228 | else: |
|
228 | else: | |
229 | newrev = v |
|
229 | newrev = v | |
230 | destnode = repo[destmap[d]].hex() |
|
230 | destnode = repo[destmap[d]].hex() | |
231 | f.write("%s:%s:%s\n" % (oldrev, newrev, destnode)) |
|
231 | f.write("%s:%s:%s\n" % (oldrev, newrev, destnode)) | |
232 | repo.ui.debug('rebase status stored\n') |
|
232 | repo.ui.debug('rebase status stored\n') | |
233 |
|
233 | |||
234 | def restorestatus(self): |
|
234 | def restorestatus(self): | |
235 | """Restore a previously stored status""" |
|
235 | """Restore a previously stored status""" | |
236 | self.prepared = True |
|
236 | self.prepared = True | |
237 | repo = self.repo |
|
237 | repo = self.repo | |
238 | assert repo.filtername is None |
|
238 | assert repo.filtername is None | |
239 | keepbranches = None |
|
239 | keepbranches = None | |
240 | legacydest = None |
|
240 | legacydest = None | |
241 | collapse = False |
|
241 | collapse = False | |
242 | external = nullrev |
|
242 | external = nullrev | |
243 | activebookmark = None |
|
243 | activebookmark = None | |
244 | state = {} |
|
244 | state = {} | |
245 | destmap = {} |
|
245 | destmap = {} | |
246 |
|
246 | |||
247 | try: |
|
247 | try: | |
248 | f = repo.vfs("rebasestate") |
|
248 | f = repo.vfs("rebasestate") | |
249 | for i, l in enumerate(f.read().splitlines()): |
|
249 | for i, l in enumerate(f.read().splitlines()): | |
250 | if i == 0: |
|
250 | if i == 0: | |
251 | originalwd = repo[l].rev() |
|
251 | originalwd = repo[l].rev() | |
252 | elif i == 1: |
|
252 | elif i == 1: | |
253 | # this line should be empty in newer version. but legacy |
|
253 | # this line should be empty in newer version. but legacy | |
254 | # clients may still use it |
|
254 | # clients may still use it | |
255 | if l: |
|
255 | if l: | |
256 | legacydest = repo[l].rev() |
|
256 | legacydest = repo[l].rev() | |
257 | elif i == 2: |
|
257 | elif i == 2: | |
258 | external = repo[l].rev() |
|
258 | external = repo[l].rev() | |
259 | elif i == 3: |
|
259 | elif i == 3: | |
260 | collapse = bool(int(l)) |
|
260 | collapse = bool(int(l)) | |
261 | elif i == 4: |
|
261 | elif i == 4: | |
262 | keep = bool(int(l)) |
|
262 | keep = bool(int(l)) | |
263 | elif i == 5: |
|
263 | elif i == 5: | |
264 | keepbranches = bool(int(l)) |
|
264 | keepbranches = bool(int(l)) | |
265 | elif i == 6 and not (len(l) == 81 and ':' in l): |
|
265 | elif i == 6 and not (len(l) == 81 and ':' in l): | |
266 | # line 6 is a recent addition, so for backwards |
|
266 | # line 6 is a recent addition, so for backwards | |
267 | # compatibility check that the line doesn't look like the |
|
267 | # compatibility check that the line doesn't look like the | |
268 | # oldrev:newrev lines |
|
268 | # oldrev:newrev lines | |
269 | activebookmark = l |
|
269 | activebookmark = l | |
270 | else: |
|
270 | else: | |
271 | args = l.split(':') |
|
271 | args = l.split(':') | |
272 | oldrev = args[0] |
|
272 | oldrev = args[0] | |
273 | newrev = args[1] |
|
273 | newrev = args[1] | |
274 | if newrev in legacystates: |
|
274 | if newrev in legacystates: | |
275 | continue |
|
275 | continue | |
276 | if len(args) > 2: |
|
276 | if len(args) > 2: | |
277 | destnode = args[2] |
|
277 | destnode = args[2] | |
278 | else: |
|
278 | else: | |
279 | destnode = legacydest |
|
279 | destnode = legacydest | |
280 | destmap[repo[oldrev].rev()] = repo[destnode].rev() |
|
280 | destmap[repo[oldrev].rev()] = repo[destnode].rev() | |
281 | if newrev in (nullid, revtodostr): |
|
281 | if newrev in (nullid, revtodostr): | |
282 | state[repo[oldrev].rev()] = revtodo |
|
282 | state[repo[oldrev].rev()] = revtodo | |
283 | # Legacy compat special case |
|
283 | # Legacy compat special case | |
284 | else: |
|
284 | else: | |
285 | state[repo[oldrev].rev()] = repo[newrev].rev() |
|
285 | state[repo[oldrev].rev()] = repo[newrev].rev() | |
286 |
|
286 | |||
287 | except IOError as err: |
|
287 | except IOError as err: | |
288 | if err.errno != errno.ENOENT: |
|
288 | if err.errno != errno.ENOENT: | |
289 | raise |
|
289 | raise | |
290 | cmdutil.wrongtooltocontinue(repo, _('rebase')) |
|
290 | cmdutil.wrongtooltocontinue(repo, _('rebase')) | |
291 |
|
291 | |||
292 | if keepbranches is None: |
|
292 | if keepbranches is None: | |
293 | raise error.Abort(_('.hg/rebasestate is incomplete')) |
|
293 | raise error.Abort(_('.hg/rebasestate is incomplete')) | |
294 |
|
294 | |||
295 | skipped = set() |
|
295 | skipped = set() | |
296 | # recompute the set of skipped revs |
|
296 | # recompute the set of skipped revs | |
297 | if not collapse: |
|
297 | if not collapse: | |
298 | seen = set(destmap.values()) |
|
298 | seen = set(destmap.values()) | |
299 | for old, new in sorted(state.items()): |
|
299 | for old, new in sorted(state.items()): | |
300 | if new != revtodo and new in seen: |
|
300 | if new != revtodo and new in seen: | |
301 | skipped.add(old) |
|
301 | skipped.add(old) | |
302 | seen.add(new) |
|
302 | seen.add(new) | |
303 | repo.ui.debug('computed skipped revs: %s\n' % |
|
303 | repo.ui.debug('computed skipped revs: %s\n' % | |
304 | (' '.join(str(r) for r in sorted(skipped)) or None)) |
|
304 | (' '.join(str(r) for r in sorted(skipped)) or None)) | |
305 | repo.ui.debug('rebase status resumed\n') |
|
305 | repo.ui.debug('rebase status resumed\n') | |
306 |
|
306 | |||
307 | self.originalwd = originalwd |
|
307 | self.originalwd = originalwd | |
308 | self.destmap = destmap |
|
308 | self.destmap = destmap | |
309 | self.state = state |
|
309 | self.state = state | |
310 | self.skipped = skipped |
|
310 | self.skipped = skipped | |
311 | self.collapsef = collapse |
|
311 | self.collapsef = collapse | |
312 | self.keepf = keep |
|
312 | self.keepf = keep | |
313 | self.keepbranchesf = keepbranches |
|
313 | self.keepbranchesf = keepbranches | |
314 | self.external = external |
|
314 | self.external = external | |
315 | self.activebookmark = activebookmark |
|
315 | self.activebookmark = activebookmark | |
316 |
|
316 | |||
317 | def _handleskippingobsolete(self, obsoleterevs, destmap): |
|
317 | def _handleskippingobsolete(self, obsoleterevs, destmap): | |
318 | """Compute structures necessary for skipping obsolete revisions |
|
318 | """Compute structures necessary for skipping obsolete revisions | |
319 |
|
319 | |||
320 | obsoleterevs: iterable of all obsolete revisions in rebaseset |
|
320 | obsoleterevs: iterable of all obsolete revisions in rebaseset | |
321 | destmap: {srcrev: destrev} destination revisions |
|
321 | destmap: {srcrev: destrev} destination revisions | |
322 | """ |
|
322 | """ | |
323 | self.obsoletenotrebased = {} |
|
323 | self.obsoletenotrebased = {} | |
324 | if not self.ui.configbool('experimental', 'rebaseskipobsolete'): |
|
324 | if not self.ui.configbool('experimental', 'rebaseskipobsolete'): | |
325 | return |
|
325 | return | |
326 | obsoleteset = set(obsoleterevs) |
|
326 | obsoleteset = set(obsoleterevs) | |
327 | self.obsoletenotrebased = _computeobsoletenotrebased(self.repo, |
|
327 | self.obsoletenotrebased = _computeobsoletenotrebased(self.repo, | |
328 | obsoleteset, destmap) |
|
328 | obsoleteset, destmap) | |
329 | skippedset = set(self.obsoletenotrebased) |
|
329 | skippedset = set(self.obsoletenotrebased) | |
330 | _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset) |
|
330 | _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset) | |
331 |
|
331 | |||
332 | def _prepareabortorcontinue(self, isabort): |
|
332 | def _prepareabortorcontinue(self, isabort): | |
333 | try: |
|
333 | try: | |
334 | self.restorestatus() |
|
334 | self.restorestatus() | |
335 | self.collapsemsg = restorecollapsemsg(self.repo, isabort) |
|
335 | self.collapsemsg = restorecollapsemsg(self.repo, isabort) | |
336 | except error.RepoLookupError: |
|
336 | except error.RepoLookupError: | |
337 | if isabort: |
|
337 | if isabort: | |
338 | clearstatus(self.repo) |
|
338 | clearstatus(self.repo) | |
339 | clearcollapsemsg(self.repo) |
|
339 | clearcollapsemsg(self.repo) | |
340 | self.repo.ui.warn(_('rebase aborted (no revision is removed,' |
|
340 | self.repo.ui.warn(_('rebase aborted (no revision is removed,' | |
341 | ' only broken state is cleared)\n')) |
|
341 | ' only broken state is cleared)\n')) | |
342 | return 0 |
|
342 | return 0 | |
343 | else: |
|
343 | else: | |
344 | msg = _('cannot continue inconsistent rebase') |
|
344 | msg = _('cannot continue inconsistent rebase') | |
345 | hint = _('use "hg rebase --abort" to clear broken state') |
|
345 | hint = _('use "hg rebase --abort" to clear broken state') | |
346 | raise error.Abort(msg, hint=hint) |
|
346 | raise error.Abort(msg, hint=hint) | |
347 | if isabort: |
|
347 | if isabort: | |
348 | return abort(self.repo, self.originalwd, self.destmap, |
|
348 | return abort(self.repo, self.originalwd, self.destmap, | |
349 | self.state, activebookmark=self.activebookmark) |
|
349 | self.state, activebookmark=self.activebookmark) | |
350 |
|
350 | |||
351 | def _preparenewrebase(self, destmap): |
|
351 | def _preparenewrebase(self, destmap): | |
352 | if not destmap: |
|
352 | if not destmap: | |
353 | return _nothingtorebase() |
|
353 | return _nothingtorebase() | |
354 |
|
354 | |||
355 | rebaseset = destmap.keys() |
|
355 | rebaseset = destmap.keys() | |
356 | allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt) |
|
356 | allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt) | |
357 | if (not (self.keepf or allowunstable) |
|
357 | if (not (self.keepf or allowunstable) | |
358 | and self.repo.revs('first(children(%ld) - %ld)', |
|
358 | and self.repo.revs('first(children(%ld) - %ld)', | |
359 | rebaseset, rebaseset)): |
|
359 | rebaseset, rebaseset)): | |
360 | raise error.Abort( |
|
360 | raise error.Abort( | |
361 | _("can't remove original changesets with" |
|
361 | _("can't remove original changesets with" | |
362 | " unrebased descendants"), |
|
362 | " unrebased descendants"), | |
363 | hint=_('use --keep to keep original changesets')) |
|
363 | hint=_('use --keep to keep original changesets')) | |
364 |
|
364 | |||
365 | result = buildstate(self.repo, destmap, self.collapsef) |
|
365 | result = buildstate(self.repo, destmap, self.collapsef) | |
366 |
|
366 | |||
367 | if not result: |
|
367 | if not result: | |
368 | # Empty state built, nothing to rebase |
|
368 | # Empty state built, nothing to rebase | |
369 | self.ui.status(_('nothing to rebase\n')) |
|
369 | self.ui.status(_('nothing to rebase\n')) | |
370 | return _nothingtorebase() |
|
370 | return _nothingtorebase() | |
371 |
|
371 | |||
372 | for root in self.repo.set('roots(%ld)', rebaseset): |
|
372 | for root in self.repo.set('roots(%ld)', rebaseset): | |
373 | if not self.keepf and not root.mutable(): |
|
373 | if not self.keepf and not root.mutable(): | |
374 | raise error.Abort(_("can't rebase public changeset %s") |
|
374 | raise error.Abort(_("can't rebase public changeset %s") | |
375 | % root, |
|
375 | % root, | |
376 | hint=_("see 'hg help phases' for details")) |
|
376 | hint=_("see 'hg help phases' for details")) | |
377 |
|
377 | |||
378 | (self.originalwd, self.destmap, self.state) = result |
|
378 | (self.originalwd, self.destmap, self.state) = result | |
379 | if self.collapsef: |
|
379 | if self.collapsef: | |
380 | dests = set(self.destmap.values()) |
|
380 | dests = set(self.destmap.values()) | |
381 | if len(dests) != 1: |
|
381 | if len(dests) != 1: | |
382 | raise error.Abort( |
|
382 | raise error.Abort( | |
383 | _('--collapse does not work with multiple destinations')) |
|
383 | _('--collapse does not work with multiple destinations')) | |
384 | destrev = next(iter(dests)) |
|
384 | destrev = next(iter(dests)) | |
385 | destancestors = self.repo.changelog.ancestors([destrev], |
|
385 | destancestors = self.repo.changelog.ancestors([destrev], | |
386 | inclusive=True) |
|
386 | inclusive=True) | |
387 | self.external = externalparent(self.repo, self.state, destancestors) |
|
387 | self.external = externalparent(self.repo, self.state, destancestors) | |
388 |
|
388 | |||
389 | for destrev in sorted(set(destmap.values())): |
|
389 | for destrev in sorted(set(destmap.values())): | |
390 | dest = self.repo[destrev] |
|
390 | dest = self.repo[destrev] | |
391 | if dest.closesbranch() and not self.keepbranchesf: |
|
391 | if dest.closesbranch() and not self.keepbranchesf: | |
392 | self.ui.status(_('reopening closed branch head %s\n') % dest) |
|
392 | self.ui.status(_('reopening closed branch head %s\n') % dest) | |
393 |
|
393 | |||
394 | self.prepared = True |
|
394 | self.prepared = True | |
395 |
|
395 | |||
396 | def _performrebase(self, tr): |
|
396 | def _performrebase(self, tr): | |
397 | repo, ui = self.repo, self.ui |
|
397 | repo, ui = self.repo, self.ui | |
398 | if self.keepbranchesf: |
|
398 | if self.keepbranchesf: | |
399 | # insert _savebranch at the start of extrafns so if |
|
399 | # insert _savebranch at the start of extrafns so if | |
400 | # there's a user-provided extrafn it can clobber branch if |
|
400 | # there's a user-provided extrafn it can clobber branch if | |
401 | # desired |
|
401 | # desired | |
402 | self.extrafns.insert(0, _savebranch) |
|
402 | self.extrafns.insert(0, _savebranch) | |
403 | if self.collapsef: |
|
403 | if self.collapsef: | |
404 | branches = set() |
|
404 | branches = set() | |
405 | for rev in self.state: |
|
405 | for rev in self.state: | |
406 | branches.add(repo[rev].branch()) |
|
406 | branches.add(repo[rev].branch()) | |
407 | if len(branches) > 1: |
|
407 | if len(branches) > 1: | |
408 | raise error.Abort(_('cannot collapse multiple named ' |
|
408 | raise error.Abort(_('cannot collapse multiple named ' | |
409 | 'branches')) |
|
409 | 'branches')) | |
410 |
|
410 | |||
411 | # Calculate self.obsoletenotrebased |
|
411 | # Calculate self.obsoletenotrebased | |
412 | obsrevs = _filterobsoleterevs(self.repo, self.state) |
|
412 | obsrevs = _filterobsoleterevs(self.repo, self.state) | |
413 | self._handleskippingobsolete(obsrevs, self.destmap) |
|
413 | self._handleskippingobsolete(obsrevs, self.destmap) | |
414 |
|
414 | |||
415 | # Keep track of the active bookmarks in order to reset them later |
|
415 | # Keep track of the active bookmarks in order to reset them later | |
416 | self.activebookmark = self.activebookmark or repo._activebookmark |
|
416 | self.activebookmark = self.activebookmark or repo._activebookmark | |
417 | if self.activebookmark: |
|
417 | if self.activebookmark: | |
418 | bookmarks.deactivate(repo) |
|
418 | bookmarks.deactivate(repo) | |
419 |
|
419 | |||
420 | # Store the state before we begin so users can run 'hg rebase --abort' |
|
420 | # Store the state before we begin so users can run 'hg rebase --abort' | |
421 | # if we fail before the transaction closes. |
|
421 | # if we fail before the transaction closes. | |
422 | self.storestatus() |
|
422 | self.storestatus() | |
423 |
|
423 | |||
424 | cands = [k for k, v in self.state.iteritems() if v == revtodo] |
|
424 | cands = [k for k, v in self.state.iteritems() if v == revtodo] | |
425 | total = len(cands) |
|
425 | total = len(cands) | |
426 | pos = 0 |
|
426 | pos = 0 | |
427 | for subset in sortsource(self.destmap): |
|
427 | for subset in sortsource(self.destmap): | |
428 | pos = self._performrebasesubset(tr, subset, pos, total) |
|
428 | pos = self._performrebasesubset(tr, subset, pos, total) | |
429 | ui.progress(_('rebasing'), None) |
|
429 | ui.progress(_('rebasing'), None) | |
430 | ui.note(_('rebase merging completed\n')) |
|
430 | ui.note(_('rebase merging completed\n')) | |
431 |
|
431 | |||
432 | def _performrebasesubset(self, tr, subset, pos, total): |
|
432 | def _performrebasesubset(self, tr, subset, pos, total): | |
433 | repo, ui, opts = self.repo, self.ui, self.opts |
|
433 | repo, ui, opts = self.repo, self.ui, self.opts | |
434 | sortedrevs = repo.revs('sort(%ld, -topo)', subset) |
|
434 | sortedrevs = repo.revs('sort(%ld, -topo)', subset) | |
435 | for rev in sortedrevs: |
|
435 | for rev in sortedrevs: | |
436 | dest = self.destmap[rev] |
|
436 | dest = self.destmap[rev] | |
437 | ctx = repo[rev] |
|
437 | ctx = repo[rev] | |
438 | desc = _ctxdesc(ctx) |
|
438 | desc = _ctxdesc(ctx) | |
439 | if self.state[rev] == rev: |
|
439 | if self.state[rev] == rev: | |
440 | ui.status(_('already rebased %s\n') % desc) |
|
440 | ui.status(_('already rebased %s\n') % desc) | |
441 | elif rev in self.obsoletenotrebased: |
|
441 | elif rev in self.obsoletenotrebased: | |
442 | succ = self.obsoletenotrebased[rev] |
|
442 | succ = self.obsoletenotrebased[rev] | |
443 | if succ is None: |
|
443 | if succ is None: | |
444 | msg = _('note: not rebasing %s, it has no ' |
|
444 | msg = _('note: not rebasing %s, it has no ' | |
445 | 'successor\n') % desc |
|
445 | 'successor\n') % desc | |
446 | else: |
|
446 | else: | |
447 | succdesc = _ctxdesc(repo[succ]) |
|
447 | succdesc = _ctxdesc(repo[succ]) | |
448 | msg = (_('note: not rebasing %s, already in ' |
|
448 | msg = (_('note: not rebasing %s, already in ' | |
449 | 'destination as %s\n') % (desc, succdesc)) |
|
449 | 'destination as %s\n') % (desc, succdesc)) | |
450 | repo.ui.status(msg) |
|
450 | repo.ui.status(msg) | |
451 | # Make clearrebased aware state[rev] is not a true successor |
|
451 | # Make clearrebased aware state[rev] is not a true successor | |
452 | self.skipped.add(rev) |
|
452 | self.skipped.add(rev) | |
453 | # Record rev as moved to its desired destination in self.state. |
|
453 | # Record rev as moved to its desired destination in self.state. | |
454 | # This helps bookmark and working parent movement. |
|
454 | # This helps bookmark and working parent movement. | |
455 | dest = max(adjustdest(repo, rev, self.destmap, self.state, |
|
455 | dest = max(adjustdest(repo, rev, self.destmap, self.state, | |
456 | self.skipped)) |
|
456 | self.skipped)) | |
457 | self.state[rev] = dest |
|
457 | self.state[rev] = dest | |
458 | elif self.state[rev] == revtodo: |
|
458 | elif self.state[rev] == revtodo: | |
459 | pos += 1 |
|
459 | pos += 1 | |
460 | ui.status(_('rebasing %s\n') % desc) |
|
460 | ui.status(_('rebasing %s\n') % desc) | |
461 | ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)), |
|
461 | ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)), | |
462 | _('changesets'), total) |
|
462 | _('changesets'), total) | |
463 | p1, p2, base = defineparents(repo, rev, self.destmap, |
|
463 | p1, p2, base = defineparents(repo, rev, self.destmap, | |
464 | self.state, self.skipped, |
|
464 | self.state, self.skipped, | |
465 | self.obsoletenotrebased) |
|
465 | self.obsoletenotrebased) | |
466 | self.storestatus(tr=tr) |
|
466 | self.storestatus(tr=tr) | |
467 | storecollapsemsg(repo, self.collapsemsg) |
|
467 | storecollapsemsg(repo, self.collapsemsg) | |
468 | if len(repo[None].parents()) == 2: |
|
468 | if len(repo[None].parents()) == 2: | |
469 | repo.ui.debug('resuming interrupted rebase\n') |
|
469 | repo.ui.debug('resuming interrupted rebase\n') | |
470 | else: |
|
470 | else: | |
471 | try: |
|
471 | try: | |
472 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), |
|
472 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), | |
473 | 'rebase') |
|
473 | 'rebase') | |
474 | stats = rebasenode(repo, rev, p1, base, self.state, |
|
474 | stats = rebasenode(repo, rev, p1, base, self.state, | |
475 | self.collapsef, dest) |
|
475 | self.collapsef, dest) | |
476 | if stats and stats[3] > 0: |
|
476 | if stats and stats[3] > 0: | |
477 | raise error.InterventionRequired( |
|
477 | raise error.InterventionRequired( | |
478 | _('unresolved conflicts (see hg ' |
|
478 | _('unresolved conflicts (see hg ' | |
479 | 'resolve, then hg rebase --continue)')) |
|
479 | 'resolve, then hg rebase --continue)')) | |
480 | finally: |
|
480 | finally: | |
481 | ui.setconfig('ui', 'forcemerge', '', 'rebase') |
|
481 | ui.setconfig('ui', 'forcemerge', '', 'rebase') | |
482 | if not self.collapsef: |
|
482 | if not self.collapsef: | |
483 | merging = p2 != nullrev |
|
483 | merging = p2 != nullrev | |
484 | editform = cmdutil.mergeeditform(merging, 'rebase') |
|
484 | editform = cmdutil.mergeeditform(merging, 'rebase') | |
485 | editor = cmdutil.getcommiteditor(editform=editform, **opts) |
|
485 | editor = cmdutil.getcommiteditor(editform=editform, **opts) | |
486 | newnode = concludenode(repo, rev, p1, p2, |
|
486 | newnode = concludenode(repo, rev, p1, p2, | |
487 | extrafn=_makeextrafn(self.extrafns), |
|
487 | extrafn=_makeextrafn(self.extrafns), | |
488 | editor=editor, |
|
488 | editor=editor, | |
489 | keepbranches=self.keepbranchesf, |
|
489 | keepbranches=self.keepbranchesf, | |
490 | date=self.date) |
|
490 | date=self.date) | |
491 | if newnode is None: |
|
491 | if newnode is None: | |
492 | # If it ended up being a no-op commit, then the normal |
|
492 | # If it ended up being a no-op commit, then the normal | |
493 | # merge state clean-up path doesn't happen, so do it |
|
493 | # merge state clean-up path doesn't happen, so do it | |
494 | # here. Fix issue5494 |
|
494 | # here. Fix issue5494 | |
495 | mergemod.mergestate.clean(repo) |
|
495 | mergemod.mergestate.clean(repo) | |
496 | else: |
|
496 | else: | |
497 | # Skip commit if we are collapsing |
|
497 | # Skip commit if we are collapsing | |
498 | repo.setparents(repo[p1].node()) |
|
498 | repo.setparents(repo[p1].node()) | |
499 | newnode = None |
|
499 | newnode = None | |
500 | # Update the state |
|
500 | # Update the state | |
501 | if newnode is not None: |
|
501 | if newnode is not None: | |
502 | self.state[rev] = repo[newnode].rev() |
|
502 | self.state[rev] = repo[newnode].rev() | |
503 | ui.debug('rebased as %s\n' % short(newnode)) |
|
503 | ui.debug('rebased as %s\n' % short(newnode)) | |
504 | else: |
|
504 | else: | |
505 | if not self.collapsef: |
|
505 | if not self.collapsef: | |
506 | ui.warn(_('note: rebase of %d:%s created no changes ' |
|
506 | ui.warn(_('note: rebase of %d:%s created no changes ' | |
507 | 'to commit\n') % (rev, ctx)) |
|
507 | 'to commit\n') % (rev, ctx)) | |
508 | self.skipped.add(rev) |
|
508 | self.skipped.add(rev) | |
509 | self.state[rev] = p1 |
|
509 | self.state[rev] = p1 | |
510 | ui.debug('next revision set to %s\n' % p1) |
|
510 | ui.debug('next revision set to %s\n' % p1) | |
511 | else: |
|
511 | else: | |
512 | ui.status(_('already rebased %s as %s\n') % |
|
512 | ui.status(_('already rebased %s as %s\n') % | |
513 | (desc, repo[self.state[rev]])) |
|
513 | (desc, repo[self.state[rev]])) | |
514 | return pos |
|
514 | return pos | |
515 |
|
515 | |||
516 | def _finishrebase(self): |
|
516 | def _finishrebase(self): | |
517 | repo, ui, opts = self.repo, self.ui, self.opts |
|
517 | repo, ui, opts = self.repo, self.ui, self.opts | |
518 | if self.collapsef and not self.keepopen: |
|
518 | if self.collapsef and not self.keepopen: | |
519 | p1, p2, _base = defineparents(repo, min(self.state), self.destmap, |
|
519 | p1, p2, _base = defineparents(repo, min(self.state), self.destmap, | |
520 | self.state, self.skipped, |
|
520 | self.state, self.skipped, | |
521 | self.obsoletenotrebased) |
|
521 | self.obsoletenotrebased) | |
522 | editopt = opts.get('edit') |
|
522 | editopt = opts.get('edit') | |
523 | editform = 'rebase.collapse' |
|
523 | editform = 'rebase.collapse' | |
524 | if self.collapsemsg: |
|
524 | if self.collapsemsg: | |
525 | commitmsg = self.collapsemsg |
|
525 | commitmsg = self.collapsemsg | |
526 | else: |
|
526 | else: | |
527 | commitmsg = 'Collapsed revision' |
|
527 | commitmsg = 'Collapsed revision' | |
528 | for rebased in sorted(self.state): |
|
528 | for rebased in sorted(self.state): | |
529 | if rebased not in self.skipped: |
|
529 | if rebased not in self.skipped: | |
530 | commitmsg += '\n* %s' % repo[rebased].description() |
|
530 | commitmsg += '\n* %s' % repo[rebased].description() | |
531 | editopt = True |
|
531 | editopt = True | |
532 | editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) |
|
532 | editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) | |
533 | revtoreuse = max(self.state) |
|
533 | revtoreuse = max(self.state) | |
534 |
|
534 | |||
535 | dsguard = None |
|
535 | dsguard = None | |
536 | if ui.configbool('rebase', 'singletransaction'): |
|
536 | if ui.configbool('rebase', 'singletransaction'): | |
537 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') |
|
537 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') | |
538 | with util.acceptintervention(dsguard): |
|
538 | with util.acceptintervention(dsguard): | |
539 | newnode = concludenode(repo, revtoreuse, p1, self.external, |
|
539 | newnode = concludenode(repo, revtoreuse, p1, self.external, | |
540 | commitmsg=commitmsg, |
|
540 | commitmsg=commitmsg, | |
541 | extrafn=_makeextrafn(self.extrafns), |
|
541 | extrafn=_makeextrafn(self.extrafns), | |
542 | editor=editor, |
|
542 | editor=editor, | |
543 | keepbranches=self.keepbranchesf, |
|
543 | keepbranches=self.keepbranchesf, | |
544 | date=self.date) |
|
544 | date=self.date) | |
545 | if newnode is not None: |
|
545 | if newnode is not None: | |
546 | newrev = repo[newnode].rev() |
|
546 | newrev = repo[newnode].rev() | |
547 | for oldrev in self.state.iterkeys(): |
|
547 | for oldrev in self.state.iterkeys(): | |
548 | self.state[oldrev] = newrev |
|
548 | self.state[oldrev] = newrev | |
549 |
|
549 | |||
550 | if 'qtip' in repo.tags(): |
|
550 | if 'qtip' in repo.tags(): | |
551 | updatemq(repo, self.state, self.skipped, **opts) |
|
551 | updatemq(repo, self.state, self.skipped, **opts) | |
552 |
|
552 | |||
553 | # restore original working directory |
|
553 | # restore original working directory | |
554 | # (we do this before stripping) |
|
554 | # (we do this before stripping) | |
555 | newwd = self.state.get(self.originalwd, self.originalwd) |
|
555 | newwd = self.state.get(self.originalwd, self.originalwd) | |
556 | if newwd < 0: |
|
556 | if newwd < 0: | |
557 | # original directory is a parent of rebase set root or ignored |
|
557 | # original directory is a parent of rebase set root or ignored | |
558 | newwd = self.originalwd |
|
558 | newwd = self.originalwd | |
559 | if newwd not in [c.rev() for c in repo[None].parents()]: |
|
559 | if newwd not in [c.rev() for c in repo[None].parents()]: | |
560 | ui.note(_("update back to initial working directory parent\n")) |
|
560 | ui.note(_("update back to initial working directory parent\n")) | |
561 | hg.updaterepo(repo, newwd, False) |
|
561 | hg.updaterepo(repo, newwd, False) | |
562 |
|
562 | |||
563 | collapsedas = None |
|
563 | collapsedas = None | |
564 | if not self.keepf: |
|
564 | if not self.keepf: | |
565 | if self.collapsef: |
|
565 | if self.collapsef: | |
566 | collapsedas = newnode |
|
566 | collapsedas = newnode | |
567 | clearrebased(ui, repo, self.destmap, self.state, self.skipped, |
|
567 | clearrebased(ui, repo, self.destmap, self.state, self.skipped, | |
568 | collapsedas, self.keepf) |
|
568 | collapsedas, self.keepf) | |
569 |
|
569 | |||
570 | clearstatus(repo) |
|
570 | clearstatus(repo) | |
571 | clearcollapsemsg(repo) |
|
571 | clearcollapsemsg(repo) | |
572 |
|
572 | |||
573 | ui.note(_("rebase completed\n")) |
|
573 | ui.note(_("rebase completed\n")) | |
574 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) |
|
574 | util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) | |
575 | if self.skipped: |
|
575 | if self.skipped: | |
576 | skippedlen = len(self.skipped) |
|
576 | skippedlen = len(self.skipped) | |
577 | ui.note(_("%d revisions have been skipped\n") % skippedlen) |
|
577 | ui.note(_("%d revisions have been skipped\n") % skippedlen) | |
578 |
|
578 | |||
579 | if (self.activebookmark and self.activebookmark in repo._bookmarks and |
|
579 | if (self.activebookmark and self.activebookmark in repo._bookmarks and | |
580 | repo['.'].node() == repo._bookmarks[self.activebookmark]): |
|
580 | repo['.'].node() == repo._bookmarks[self.activebookmark]): | |
581 | bookmarks.activate(repo, self.activebookmark) |
|
581 | bookmarks.activate(repo, self.activebookmark) | |
582 |
|
582 | |||
583 | @command('rebase', |
|
583 | @command('rebase', | |
584 | [('s', 'source', '', |
|
584 | [('s', 'source', '', | |
585 | _('rebase the specified changeset and descendants'), _('REV')), |
|
585 | _('rebase the specified changeset and descendants'), _('REV')), | |
586 | ('b', 'base', '', |
|
586 | ('b', 'base', '', | |
587 | _('rebase everything from branching point of specified changeset'), |
|
587 | _('rebase everything from branching point of specified changeset'), | |
588 | _('REV')), |
|
588 | _('REV')), | |
589 | ('r', 'rev', [], |
|
589 | ('r', 'rev', [], | |
590 | _('rebase these revisions'), |
|
590 | _('rebase these revisions'), | |
591 | _('REV')), |
|
591 | _('REV')), | |
592 | ('d', 'dest', '', |
|
592 | ('d', 'dest', '', | |
593 | _('rebase onto the specified changeset'), _('REV')), |
|
593 | _('rebase onto the specified changeset'), _('REV')), | |
594 | ('', 'collapse', False, _('collapse the rebased changesets')), |
|
594 | ('', 'collapse', False, _('collapse the rebased changesets')), | |
595 | ('m', 'message', '', |
|
595 | ('m', 'message', '', | |
596 | _('use text as collapse commit message'), _('TEXT')), |
|
596 | _('use text as collapse commit message'), _('TEXT')), | |
597 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
597 | ('e', 'edit', False, _('invoke editor on commit messages')), | |
598 | ('l', 'logfile', '', |
|
598 | ('l', 'logfile', '', | |
599 | _('read collapse commit message from file'), _('FILE')), |
|
599 | _('read collapse commit message from file'), _('FILE')), | |
600 | ('k', 'keep', False, _('keep original changesets')), |
|
600 | ('k', 'keep', False, _('keep original changesets')), | |
601 | ('', 'keepbranches', False, _('keep original branch names')), |
|
601 | ('', 'keepbranches', False, _('keep original branch names')), | |
602 | ('D', 'detach', False, _('(DEPRECATED)')), |
|
602 | ('D', 'detach', False, _('(DEPRECATED)')), | |
603 | ('i', 'interactive', False, _('(DEPRECATED)')), |
|
603 | ('i', 'interactive', False, _('(DEPRECATED)')), | |
604 | ('t', 'tool', '', _('specify merge tool')), |
|
604 | ('t', 'tool', '', _('specify merge tool')), | |
605 | ('c', 'continue', False, _('continue an interrupted rebase')), |
|
605 | ('c', 'continue', False, _('continue an interrupted rebase')), | |
606 | ('a', 'abort', False, _('abort an interrupted rebase'))] + |
|
606 | ('a', 'abort', False, _('abort an interrupted rebase'))] + | |
607 | templateopts, |
|
607 | templateopts, | |
608 | _('[-s REV | -b REV] [-d REV] [OPTION]')) |
|
608 | _('[-s REV | -b REV] [-d REV] [OPTION]')) | |
609 | def rebase(ui, repo, **opts): |
|
609 | def rebase(ui, repo, **opts): | |
610 | """move changeset (and descendants) to a different branch |
|
610 | """move changeset (and descendants) to a different branch | |
611 |
|
611 | |||
612 | Rebase uses repeated merging to graft changesets from one part of |
|
612 | Rebase uses repeated merging to graft changesets from one part of | |
613 | history (the source) onto another (the destination). This can be |
|
613 | history (the source) onto another (the destination). This can be | |
614 | useful for linearizing *local* changes relative to a master |
|
614 | useful for linearizing *local* changes relative to a master | |
615 | development tree. |
|
615 | development tree. | |
616 |
|
616 | |||
617 | Published commits cannot be rebased (see :hg:`help phases`). |
|
617 | Published commits cannot be rebased (see :hg:`help phases`). | |
618 | To copy commits, see :hg:`help graft`. |
|
618 | To copy commits, see :hg:`help graft`. | |
619 |
|
619 | |||
620 | If you don't specify a destination changeset (``-d/--dest``), rebase |
|
620 | If you don't specify a destination changeset (``-d/--dest``), rebase | |
621 | will use the same logic as :hg:`merge` to pick a destination. if |
|
621 | will use the same logic as :hg:`merge` to pick a destination. if | |
622 | the current branch contains exactly one other head, the other head |
|
622 | the current branch contains exactly one other head, the other head | |
623 | is merged with by default. Otherwise, an explicit revision with |
|
623 | is merged with by default. Otherwise, an explicit revision with | |
624 | which to merge with must be provided. (destination changeset is not |
|
624 | which to merge with must be provided. (destination changeset is not | |
625 | modified by rebasing, but new changesets are added as its |
|
625 | modified by rebasing, but new changesets are added as its | |
626 | descendants.) |
|
626 | descendants.) | |
627 |
|
627 | |||
628 | Here are the ways to select changesets: |
|
628 | Here are the ways to select changesets: | |
629 |
|
629 | |||
630 | 1. Explicitly select them using ``--rev``. |
|
630 | 1. Explicitly select them using ``--rev``. | |
631 |
|
631 | |||
632 | 2. Use ``--source`` to select a root changeset and include all of its |
|
632 | 2. Use ``--source`` to select a root changeset and include all of its | |
633 | descendants. |
|
633 | descendants. | |
634 |
|
634 | |||
635 | 3. Use ``--base`` to select a changeset; rebase will find ancestors |
|
635 | 3. Use ``--base`` to select a changeset; rebase will find ancestors | |
636 | and their descendants which are not also ancestors of the destination. |
|
636 | and their descendants which are not also ancestors of the destination. | |
637 |
|
637 | |||
638 | 4. If you do not specify any of ``--rev``, ``source``, or ``--base``, |
|
638 | 4. If you do not specify any of ``--rev``, ``source``, or ``--base``, | |
639 | rebase will use ``--base .`` as above. |
|
639 | rebase will use ``--base .`` as above. | |
640 |
|
640 | |||
641 | Rebase will destroy original changesets unless you use ``--keep``. |
|
641 | Rebase will destroy original changesets unless you use ``--keep``. | |
642 | It will also move your bookmarks (even if you do). |
|
642 | It will also move your bookmarks (even if you do). | |
643 |
|
643 | |||
644 | Some changesets may be dropped if they do not contribute changes |
|
644 | Some changesets may be dropped if they do not contribute changes | |
645 | (e.g. merges from the destination branch). |
|
645 | (e.g. merges from the destination branch). | |
646 |
|
646 | |||
647 | Unlike ``merge``, rebase will do nothing if you are at the branch tip of |
|
647 | Unlike ``merge``, rebase will do nothing if you are at the branch tip of | |
648 | a named branch with two heads. You will need to explicitly specify source |
|
648 | a named branch with two heads. You will need to explicitly specify source | |
649 | and/or destination. |
|
649 | and/or destination. | |
650 |
|
650 | |||
651 | If you need to use a tool to automate merge/conflict decisions, you |
|
651 | If you need to use a tool to automate merge/conflict decisions, you | |
652 | can specify one with ``--tool``, see :hg:`help merge-tools`. |
|
652 | can specify one with ``--tool``, see :hg:`help merge-tools`. | |
653 | As a caveat: the tool will not be used to mediate when a file was |
|
653 | As a caveat: the tool will not be used to mediate when a file was | |
654 | deleted, there is no hook presently available for this. |
|
654 | deleted, there is no hook presently available for this. | |
655 |
|
655 | |||
656 | If a rebase is interrupted to manually resolve a conflict, it can be |
|
656 | If a rebase is interrupted to manually resolve a conflict, it can be | |
657 | continued with --continue/-c or aborted with --abort/-a. |
|
657 | continued with --continue/-c or aborted with --abort/-a. | |
658 |
|
658 | |||
659 | .. container:: verbose |
|
659 | .. container:: verbose | |
660 |
|
660 | |||
661 | Examples: |
|
661 | Examples: | |
662 |
|
662 | |||
663 | - move "local changes" (current commit back to branching point) |
|
663 | - move "local changes" (current commit back to branching point) | |
664 | to the current branch tip after a pull:: |
|
664 | to the current branch tip after a pull:: | |
665 |
|
665 | |||
666 | hg rebase |
|
666 | hg rebase | |
667 |
|
667 | |||
668 | - move a single changeset to the stable branch:: |
|
668 | - move a single changeset to the stable branch:: | |
669 |
|
669 | |||
670 | hg rebase -r 5f493448 -d stable |
|
670 | hg rebase -r 5f493448 -d stable | |
671 |
|
671 | |||
672 | - splice a commit and all its descendants onto another part of history:: |
|
672 | - splice a commit and all its descendants onto another part of history:: | |
673 |
|
673 | |||
674 | hg rebase --source c0c3 --dest 4cf9 |
|
674 | hg rebase --source c0c3 --dest 4cf9 | |
675 |
|
675 | |||
676 | - rebase everything on a branch marked by a bookmark onto the |
|
676 | - rebase everything on a branch marked by a bookmark onto the | |
677 | default branch:: |
|
677 | default branch:: | |
678 |
|
678 | |||
679 | hg rebase --base myfeature --dest default |
|
679 | hg rebase --base myfeature --dest default | |
680 |
|
680 | |||
681 | - collapse a sequence of changes into a single commit:: |
|
681 | - collapse a sequence of changes into a single commit:: | |
682 |
|
682 | |||
683 | hg rebase --collapse -r 1520:1525 -d . |
|
683 | hg rebase --collapse -r 1520:1525 -d . | |
684 |
|
684 | |||
685 | - move a named branch while preserving its name:: |
|
685 | - move a named branch while preserving its name:: | |
686 |
|
686 | |||
687 | hg rebase -r "branch(featureX)" -d 1.3 --keepbranches |
|
687 | hg rebase -r "branch(featureX)" -d 1.3 --keepbranches | |
688 |
|
688 | |||
689 | Configuration Options: |
|
689 | Configuration Options: | |
690 |
|
690 | |||
691 | You can make rebase require a destination if you set the following config |
|
691 | You can make rebase require a destination if you set the following config | |
692 | option:: |
|
692 | option:: | |
693 |
|
693 | |||
694 | [commands] |
|
694 | [commands] | |
695 | rebase.requiredest = True |
|
695 | rebase.requiredest = True | |
696 |
|
696 | |||
697 | By default, rebase will close the transaction after each commit. For |
|
697 | By default, rebase will close the transaction after each commit. For | |
698 | performance purposes, you can configure rebase to use a single transaction |
|
698 | performance purposes, you can configure rebase to use a single transaction | |
699 | across the entire rebase. WARNING: This setting introduces a significant |
|
699 | across the entire rebase. WARNING: This setting introduces a significant | |
700 | risk of losing the work you've done in a rebase if the rebase aborts |
|
700 | risk of losing the work you've done in a rebase if the rebase aborts | |
701 | unexpectedly:: |
|
701 | unexpectedly:: | |
702 |
|
702 | |||
703 | [rebase] |
|
703 | [rebase] | |
704 | singletransaction = True |
|
704 | singletransaction = True | |
705 |
|
705 | |||
706 | Return Values: |
|
706 | Return Values: | |
707 |
|
707 | |||
708 | Returns 0 on success, 1 if nothing to rebase or there are |
|
708 | Returns 0 on success, 1 if nothing to rebase or there are | |
709 | unresolved conflicts. |
|
709 | unresolved conflicts. | |
710 |
|
710 | |||
711 | """ |
|
711 | """ | |
712 | rbsrt = rebaseruntime(repo, ui, opts) |
|
712 | rbsrt = rebaseruntime(repo, ui, opts) | |
713 |
|
713 | |||
714 | with repo.wlock(), repo.lock(): |
|
714 | with repo.wlock(), repo.lock(): | |
715 | # Validate input and define rebasing points |
|
715 | # Validate input and define rebasing points | |
716 | destf = opts.get('dest', None) |
|
716 | destf = opts.get('dest', None) | |
717 | srcf = opts.get('source', None) |
|
717 | srcf = opts.get('source', None) | |
718 | basef = opts.get('base', None) |
|
718 | basef = opts.get('base', None) | |
719 | revf = opts.get('rev', []) |
|
719 | revf = opts.get('rev', []) | |
720 | # search default destination in this space |
|
720 | # search default destination in this space | |
721 | # used in the 'hg pull --rebase' case, see issue 5214. |
|
721 | # used in the 'hg pull --rebase' case, see issue 5214. | |
722 | destspace = opts.get('_destspace') |
|
722 | destspace = opts.get('_destspace') | |
723 | contf = opts.get('continue') |
|
723 | contf = opts.get('continue') | |
724 | abortf = opts.get('abort') |
|
724 | abortf = opts.get('abort') | |
725 | if opts.get('interactive'): |
|
725 | if opts.get('interactive'): | |
726 | try: |
|
726 | try: | |
727 | if extensions.find('histedit'): |
|
727 | if extensions.find('histedit'): | |
728 | enablehistedit = '' |
|
728 | enablehistedit = '' | |
729 | except KeyError: |
|
729 | except KeyError: | |
730 | enablehistedit = " --config extensions.histedit=" |
|
730 | enablehistedit = " --config extensions.histedit=" | |
731 | help = "hg%s help -e histedit" % enablehistedit |
|
731 | help = "hg%s help -e histedit" % enablehistedit | |
732 | msg = _("interactive history editing is supported by the " |
|
732 | msg = _("interactive history editing is supported by the " | |
733 | "'histedit' extension (see \"%s\")") % help |
|
733 | "'histedit' extension (see \"%s\")") % help | |
734 | raise error.Abort(msg) |
|
734 | raise error.Abort(msg) | |
735 |
|
735 | |||
736 | if rbsrt.collapsemsg and not rbsrt.collapsef: |
|
736 | if rbsrt.collapsemsg and not rbsrt.collapsef: | |
737 | raise error.Abort( |
|
737 | raise error.Abort( | |
738 | _('message can only be specified with collapse')) |
|
738 | _('message can only be specified with collapse')) | |
739 |
|
739 | |||
740 | if contf or abortf: |
|
740 | if contf or abortf: | |
741 | if contf and abortf: |
|
741 | if contf and abortf: | |
742 | raise error.Abort(_('cannot use both abort and continue')) |
|
742 | raise error.Abort(_('cannot use both abort and continue')) | |
743 | if rbsrt.collapsef: |
|
743 | if rbsrt.collapsef: | |
744 | raise error.Abort( |
|
744 | raise error.Abort( | |
745 | _('cannot use collapse with continue or abort')) |
|
745 | _('cannot use collapse with continue or abort')) | |
746 | if srcf or basef or destf: |
|
746 | if srcf or basef or destf: | |
747 | raise error.Abort( |
|
747 | raise error.Abort( | |
748 | _('abort and continue do not allow specifying revisions')) |
|
748 | _('abort and continue do not allow specifying revisions')) | |
749 | if abortf and opts.get('tool', False): |
|
749 | if abortf and opts.get('tool', False): | |
750 | ui.warn(_('tool option will be ignored\n')) |
|
750 | ui.warn(_('tool option will be ignored\n')) | |
751 | if contf: |
|
751 | if contf: | |
752 | ms = mergemod.mergestate.read(repo) |
|
752 | ms = mergemod.mergestate.read(repo) | |
753 | mergeutil.checkunresolved(ms) |
|
753 | mergeutil.checkunresolved(ms) | |
754 |
|
754 | |||
755 | retcode = rbsrt._prepareabortorcontinue(abortf) |
|
755 | retcode = rbsrt._prepareabortorcontinue(abortf) | |
756 | if retcode is not None: |
|
756 | if retcode is not None: | |
757 | return retcode |
|
757 | return retcode | |
758 | else: |
|
758 | else: | |
759 | destmap = _definedestmap(ui, repo, destf, srcf, basef, revf, |
|
759 | destmap = _definedestmap(ui, repo, destf, srcf, basef, revf, | |
760 | destspace=destspace) |
|
760 | destspace=destspace) | |
761 | retcode = rbsrt._preparenewrebase(destmap) |
|
761 | retcode = rbsrt._preparenewrebase(destmap) | |
762 | if retcode is not None: |
|
762 | if retcode is not None: | |
763 | return retcode |
|
763 | return retcode | |
764 |
|
764 | |||
765 | tr = None |
|
765 | tr = None | |
766 | dsguard = None |
|
766 | dsguard = None | |
767 |
|
767 | |||
768 | singletr = ui.configbool('rebase', 'singletransaction') |
|
768 | singletr = ui.configbool('rebase', 'singletransaction') | |
769 | if singletr: |
|
769 | if singletr: | |
770 | tr = repo.transaction('rebase') |
|
770 | tr = repo.transaction('rebase') | |
771 | with util.acceptintervention(tr): |
|
771 | with util.acceptintervention(tr): | |
772 | if singletr: |
|
772 | if singletr: | |
773 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') |
|
773 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') | |
774 | with util.acceptintervention(dsguard): |
|
774 | with util.acceptintervention(dsguard): | |
775 | rbsrt._performrebase(tr) |
|
775 | rbsrt._performrebase(tr) | |
776 |
|
776 | |||
777 | rbsrt._finishrebase() |
|
777 | rbsrt._finishrebase() | |
778 |
|
778 | |||
779 | def _definedestmap(ui, repo, destf=None, srcf=None, basef=None, revf=None, |
|
779 | def _definedestmap(ui, repo, destf=None, srcf=None, basef=None, revf=None, | |
780 | destspace=None): |
|
780 | destspace=None): | |
781 | """use revisions argument to define destmap {srcrev: destrev}""" |
|
781 | """use revisions argument to define destmap {srcrev: destrev}""" | |
782 | if revf is None: |
|
782 | if revf is None: | |
783 | revf = [] |
|
783 | revf = [] | |
784 |
|
784 | |||
785 | # destspace is here to work around issues with `hg pull --rebase` see |
|
785 | # destspace is here to work around issues with `hg pull --rebase` see | |
786 | # issue5214 for details |
|
786 | # issue5214 for details | |
787 | if srcf and basef: |
|
787 | if srcf and basef: | |
788 | raise error.Abort(_('cannot specify both a source and a base')) |
|
788 | raise error.Abort(_('cannot specify both a source and a base')) | |
789 | if revf and basef: |
|
789 | if revf and basef: | |
790 | raise error.Abort(_('cannot specify both a revision and a base')) |
|
790 | raise error.Abort(_('cannot specify both a revision and a base')) | |
791 | if revf and srcf: |
|
791 | if revf and srcf: | |
792 | raise error.Abort(_('cannot specify both a revision and a source')) |
|
792 | raise error.Abort(_('cannot specify both a revision and a source')) | |
793 |
|
793 | |||
794 | cmdutil.checkunfinished(repo) |
|
794 | cmdutil.checkunfinished(repo) | |
795 | cmdutil.bailifchanged(repo) |
|
795 | cmdutil.bailifchanged(repo) | |
796 |
|
796 | |||
797 | if ui.configbool('commands', 'rebase.requiredest') and not destf: |
|
797 | if ui.configbool('commands', 'rebase.requiredest') and not destf: | |
798 | raise error.Abort(_('you must specify a destination'), |
|
798 | raise error.Abort(_('you must specify a destination'), | |
799 | hint=_('use: hg rebase -d REV')) |
|
799 | hint=_('use: hg rebase -d REV')) | |
800 |
|
800 | |||
801 | dest = None |
|
801 | dest = None | |
802 |
|
802 | |||
803 | if revf: |
|
803 | if revf: | |
804 | rebaseset = scmutil.revrange(repo, revf) |
|
804 | rebaseset = scmutil.revrange(repo, revf) | |
805 | if not rebaseset: |
|
805 | if not rebaseset: | |
806 | ui.status(_('empty "rev" revision set - nothing to rebase\n')) |
|
806 | ui.status(_('empty "rev" revision set - nothing to rebase\n')) | |
807 | return None |
|
807 | return None | |
808 | elif srcf: |
|
808 | elif srcf: | |
809 | src = scmutil.revrange(repo, [srcf]) |
|
809 | src = scmutil.revrange(repo, [srcf]) | |
810 | if not src: |
|
810 | if not src: | |
811 | ui.status(_('empty "source" revision set - nothing to rebase\n')) |
|
811 | ui.status(_('empty "source" revision set - nothing to rebase\n')) | |
812 | return None |
|
812 | return None | |
813 | rebaseset = repo.revs('(%ld)::', src) |
|
813 | rebaseset = repo.revs('(%ld)::', src) | |
814 | assert rebaseset |
|
814 | assert rebaseset | |
815 | else: |
|
815 | else: | |
816 | base = scmutil.revrange(repo, [basef or '.']) |
|
816 | base = scmutil.revrange(repo, [basef or '.']) | |
817 | if not base: |
|
817 | if not base: | |
818 | ui.status(_('empty "base" revision set - ' |
|
818 | ui.status(_('empty "base" revision set - ' | |
819 | "can't compute rebase set\n")) |
|
819 | "can't compute rebase set\n")) | |
820 | return None |
|
820 | return None | |
821 | if destf: |
|
821 | if destf: | |
822 | # --base does not support multiple destinations |
|
822 | # --base does not support multiple destinations | |
823 | dest = scmutil.revsingle(repo, destf) |
|
823 | dest = scmutil.revsingle(repo, destf) | |
824 | else: |
|
824 | else: | |
825 | dest = repo[_destrebase(repo, base, destspace=destspace)] |
|
825 | dest = repo[_destrebase(repo, base, destspace=destspace)] | |
826 | destf = str(dest) |
|
826 | destf = str(dest) | |
827 |
|
827 | |||
828 | roots = [] # selected children of branching points |
|
828 | roots = [] # selected children of branching points | |
829 | bpbase = {} # {branchingpoint: [origbase]} |
|
829 | bpbase = {} # {branchingpoint: [origbase]} | |
830 | for b in base: # group bases by branching points |
|
830 | for b in base: # group bases by branching points | |
831 | bp = repo.revs('ancestor(%d, %d)', b, dest).first() |
|
831 | bp = repo.revs('ancestor(%d, %d)', b, dest).first() | |
832 | bpbase[bp] = bpbase.get(bp, []) + [b] |
|
832 | bpbase[bp] = bpbase.get(bp, []) + [b] | |
833 | if None in bpbase: |
|
833 | if None in bpbase: | |
834 | # emulate the old behavior, showing "nothing to rebase" (a better |
|
834 | # emulate the old behavior, showing "nothing to rebase" (a better | |
835 | # behavior may be abort with "cannot find branching point" error) |
|
835 | # behavior may be abort with "cannot find branching point" error) | |
836 | bpbase.clear() |
|
836 | bpbase.clear() | |
837 | for bp, bs in bpbase.iteritems(): # calculate roots |
|
837 | for bp, bs in bpbase.iteritems(): # calculate roots | |
838 | roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs)) |
|
838 | roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs)) | |
839 |
|
839 | |||
840 | rebaseset = repo.revs('%ld::', roots) |
|
840 | rebaseset = repo.revs('%ld::', roots) | |
841 |
|
841 | |||
842 | if not rebaseset: |
|
842 | if not rebaseset: | |
843 | # transform to list because smartsets are not comparable to |
|
843 | # transform to list because smartsets are not comparable to | |
844 | # lists. This should be improved to honor laziness of |
|
844 | # lists. This should be improved to honor laziness of | |
845 | # smartset. |
|
845 | # smartset. | |
846 | if list(base) == [dest.rev()]: |
|
846 | if list(base) == [dest.rev()]: | |
847 | if basef: |
|
847 | if basef: | |
848 | ui.status(_('nothing to rebase - %s is both "base"' |
|
848 | ui.status(_('nothing to rebase - %s is both "base"' | |
849 | ' and destination\n') % dest) |
|
849 | ' and destination\n') % dest) | |
850 | else: |
|
850 | else: | |
851 | ui.status(_('nothing to rebase - working directory ' |
|
851 | ui.status(_('nothing to rebase - working directory ' | |
852 | 'parent is also destination\n')) |
|
852 | 'parent is also destination\n')) | |
853 | elif not repo.revs('%ld - ::%d', base, dest): |
|
853 | elif not repo.revs('%ld - ::%d', base, dest): | |
854 | if basef: |
|
854 | if basef: | |
855 | ui.status(_('nothing to rebase - "base" %s is ' |
|
855 | ui.status(_('nothing to rebase - "base" %s is ' | |
856 | 'already an ancestor of destination ' |
|
856 | 'already an ancestor of destination ' | |
857 | '%s\n') % |
|
857 | '%s\n') % | |
858 | ('+'.join(str(repo[r]) for r in base), |
|
858 | ('+'.join(str(repo[r]) for r in base), | |
859 | dest)) |
|
859 | dest)) | |
860 | else: |
|
860 | else: | |
861 | ui.status(_('nothing to rebase - working ' |
|
861 | ui.status(_('nothing to rebase - working ' | |
862 | 'directory parent is already an ' |
|
862 | 'directory parent is already an ' | |
863 | 'ancestor of destination %s\n') % dest) |
|
863 | 'ancestor of destination %s\n') % dest) | |
864 | else: # can it happen? |
|
864 | else: # can it happen? | |
865 | ui.status(_('nothing to rebase from %s to %s\n') % |
|
865 | ui.status(_('nothing to rebase from %s to %s\n') % | |
866 | ('+'.join(str(repo[r]) for r in base), dest)) |
|
866 | ('+'.join(str(repo[r]) for r in base), dest)) | |
867 | return None |
|
867 | return None | |
868 |
|
868 | |||
869 | if not destf: |
|
869 | if not destf: | |
870 | dest = repo[_destrebase(repo, rebaseset, destspace=destspace)] |
|
870 | dest = repo[_destrebase(repo, rebaseset, destspace=destspace)] | |
871 | destf = str(dest) |
|
871 | destf = str(dest) | |
872 |
|
872 | |||
873 | allsrc = revsetlang.formatspec('%ld', rebaseset) |
|
873 | allsrc = revsetlang.formatspec('%ld', rebaseset) | |
874 | alias = {'ALLSRC': allsrc} |
|
874 | alias = {'ALLSRC': allsrc} | |
875 |
|
875 | |||
876 | if dest is None: |
|
876 | if dest is None: | |
877 | try: |
|
877 | try: | |
878 | # fast path: try to resolve dest without SRC alias |
|
878 | # fast path: try to resolve dest without SRC alias | |
879 | dest = scmutil.revsingle(repo, destf, localalias=alias) |
|
879 | dest = scmutil.revsingle(repo, destf, localalias=alias) | |
880 | except error.RepoLookupError: |
|
880 | except error.RepoLookupError: | |
881 | if not ui.configbool('experimental', 'rebase.multidest'): |
|
881 | if not ui.configbool('experimental', 'rebase.multidest'): | |
882 | raise |
|
882 | raise | |
883 | # multi-dest path: resolve dest for each SRC separately |
|
883 | # multi-dest path: resolve dest for each SRC separately | |
884 | destmap = {} |
|
884 | destmap = {} | |
885 | for r in rebaseset: |
|
885 | for r in rebaseset: | |
886 | alias['SRC'] = revsetlang.formatspec('%d', r) |
|
886 | alias['SRC'] = revsetlang.formatspec('%d', r) | |
887 | # use repo.anyrevs instead of scmutil.revsingle because we |
|
887 | # use repo.anyrevs instead of scmutil.revsingle because we | |
888 | # don't want to abort if destset is empty. |
|
888 | # don't want to abort if destset is empty. | |
889 | destset = repo.anyrevs([destf], user=True, localalias=alias) |
|
889 | destset = repo.anyrevs([destf], user=True, localalias=alias) | |
890 | size = len(destset) |
|
890 | size = len(destset) | |
891 | if size == 1: |
|
891 | if size == 1: | |
892 | destmap[r] = destset.first() |
|
892 | destmap[r] = destset.first() | |
893 | elif size == 0: |
|
893 | elif size == 0: | |
894 | ui.note(_('skipping %s - empty destination\n') % repo[r]) |
|
894 | ui.note(_('skipping %s - empty destination\n') % repo[r]) | |
895 | else: |
|
895 | else: | |
896 | raise error.Abort(_('rebase destination for %s is not ' |
|
896 | raise error.Abort(_('rebase destination for %s is not ' | |
897 | 'unique') % repo[r]) |
|
897 | 'unique') % repo[r]) | |
898 |
|
898 | |||
899 | if dest is not None: |
|
899 | if dest is not None: | |
900 | # single-dest case: assign dest to each rev in rebaseset |
|
900 | # single-dest case: assign dest to each rev in rebaseset | |
901 | destrev = dest.rev() |
|
901 | destrev = dest.rev() | |
902 | destmap = {r: destrev for r in rebaseset} # {srcrev: destrev} |
|
902 | destmap = {r: destrev for r in rebaseset} # {srcrev: destrev} | |
903 |
|
903 | |||
904 | if not destmap: |
|
904 | if not destmap: | |
905 | ui.status(_('nothing to rebase - empty destination\n')) |
|
905 | ui.status(_('nothing to rebase - empty destination\n')) | |
906 | return None |
|
906 | return None | |
907 |
|
907 | |||
908 | return destmap |
|
908 | return destmap | |
909 |
|
909 | |||
910 | def externalparent(repo, state, destancestors): |
|
910 | def externalparent(repo, state, destancestors): | |
911 | """Return the revision that should be used as the second parent |
|
911 | """Return the revision that should be used as the second parent | |
912 | when the revisions in state is collapsed on top of destancestors. |
|
912 | when the revisions in state is collapsed on top of destancestors. | |
913 | Abort if there is more than one parent. |
|
913 | Abort if there is more than one parent. | |
914 | """ |
|
914 | """ | |
915 | parents = set() |
|
915 | parents = set() | |
916 | source = min(state) |
|
916 | source = min(state) | |
917 | for rev in state: |
|
917 | for rev in state: | |
918 | if rev == source: |
|
918 | if rev == source: | |
919 | continue |
|
919 | continue | |
920 | for p in repo[rev].parents(): |
|
920 | for p in repo[rev].parents(): | |
921 | if (p.rev() not in state |
|
921 | if (p.rev() not in state | |
922 | and p.rev() not in destancestors): |
|
922 | and p.rev() not in destancestors): | |
923 | parents.add(p.rev()) |
|
923 | parents.add(p.rev()) | |
924 | if not parents: |
|
924 | if not parents: | |
925 | return nullrev |
|
925 | return nullrev | |
926 | if len(parents) == 1: |
|
926 | if len(parents) == 1: | |
927 | return parents.pop() |
|
927 | return parents.pop() | |
928 | raise error.Abort(_('unable to collapse on top of %s, there is more ' |
|
928 | raise error.Abort(_('unable to collapse on top of %s, there is more ' | |
929 | 'than one external parent: %s') % |
|
929 | 'than one external parent: %s') % | |
930 | (max(destancestors), |
|
930 | (max(destancestors), | |
931 | ', '.join(str(p) for p in sorted(parents)))) |
|
931 | ', '.join(str(p) for p in sorted(parents)))) | |
932 |
|
932 | |||
933 | def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None, |
|
933 | def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None, | |
934 | keepbranches=False, date=None): |
|
934 | keepbranches=False, date=None): | |
935 | '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev |
|
935 | '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev | |
936 | but also store useful information in extra. |
|
936 | but also store useful information in extra. | |
937 | Return node of committed revision.''' |
|
937 | Return node of committed revision.''' | |
938 | dsguard = util.nullcontextmanager() |
|
938 | dsguard = util.nullcontextmanager() | |
939 | if not repo.ui.configbool('rebase', 'singletransaction'): |
|
939 | if not repo.ui.configbool('rebase', 'singletransaction'): | |
940 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') |
|
940 | dsguard = dirstateguard.dirstateguard(repo, 'rebase') | |
941 | with dsguard: |
|
941 | with dsguard: | |
942 | repo.setparents(repo[p1].node(), repo[p2].node()) |
|
942 | repo.setparents(repo[p1].node(), repo[p2].node()) | |
943 | ctx = repo[rev] |
|
943 | ctx = repo[rev] | |
944 | if commitmsg is None: |
|
944 | if commitmsg is None: | |
945 | commitmsg = ctx.description() |
|
945 | commitmsg = ctx.description() | |
946 | keepbranch = keepbranches and repo[p1].branch() != ctx.branch() |
|
946 | keepbranch = keepbranches and repo[p1].branch() != ctx.branch() | |
947 | extra = {'rebase_source': ctx.hex()} |
|
947 | extra = {'rebase_source': ctx.hex()} | |
948 | if extrafn: |
|
948 | if extrafn: | |
949 | extrafn(ctx, extra) |
|
949 | extrafn(ctx, extra) | |
950 |
|
950 | |||
951 | destphase = max(ctx.phase(), phases.draft) |
|
951 | destphase = max(ctx.phase(), phases.draft) | |
952 | overrides = {('phases', 'new-commit'): destphase} |
|
952 | overrides = {('phases', 'new-commit'): destphase} | |
953 | with repo.ui.configoverride(overrides, 'rebase'): |
|
953 | with repo.ui.configoverride(overrides, 'rebase'): | |
954 | if keepbranch: |
|
954 | if keepbranch: | |
955 | repo.ui.setconfig('ui', 'allowemptycommit', True) |
|
955 | repo.ui.setconfig('ui', 'allowemptycommit', True) | |
956 | # Commit might fail if unresolved files exist |
|
956 | # Commit might fail if unresolved files exist | |
957 | if date is None: |
|
957 | if date is None: | |
958 | date = ctx.date() |
|
958 | date = ctx.date() | |
959 | newnode = repo.commit(text=commitmsg, user=ctx.user(), |
|
959 | newnode = repo.commit(text=commitmsg, user=ctx.user(), | |
960 | date=date, extra=extra, editor=editor) |
|
960 | date=date, extra=extra, editor=editor) | |
961 |
|
961 | |||
962 | repo.dirstate.setbranch(repo[newnode].branch()) |
|
962 | repo.dirstate.setbranch(repo[newnode].branch()) | |
963 | return newnode |
|
963 | return newnode | |
964 |
|
964 | |||
965 | def rebasenode(repo, rev, p1, base, state, collapse, dest): |
|
965 | def rebasenode(repo, rev, p1, base, state, collapse, dest): | |
966 | 'Rebase a single revision rev on top of p1 using base as merge ancestor' |
|
966 | 'Rebase a single revision rev on top of p1 using base as merge ancestor' | |
967 | # Merge phase |
|
967 | # Merge phase | |
968 | # Update to destination and merge it with local |
|
968 | # Update to destination and merge it with local | |
969 | if repo['.'].rev() != p1: |
|
969 | if repo['.'].rev() != p1: | |
970 | repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1])) |
|
970 | repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1])) | |
971 | mergemod.update(repo, p1, False, True) |
|
971 | mergemod.update(repo, p1, False, True) | |
972 | else: |
|
972 | else: | |
973 | repo.ui.debug(" already in destination\n") |
|
973 | repo.ui.debug(" already in destination\n") | |
974 | repo.dirstate.write(repo.currenttransaction()) |
|
974 | repo.dirstate.write(repo.currenttransaction()) | |
975 | repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev])) |
|
975 | repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev])) | |
976 | if base is not None: |
|
976 | if base is not None: | |
977 | repo.ui.debug(" detach base %d:%s\n" % (base, repo[base])) |
|
977 | repo.ui.debug(" detach base %d:%s\n" % (base, repo[base])) | |
978 | # When collapsing in-place, the parent is the common ancestor, we |
|
978 | # When collapsing in-place, the parent is the common ancestor, we | |
979 | # have to allow merging with it. |
|
979 | # have to allow merging with it. | |
|
980 | wctx = repo[None] | |||
980 | stats = mergemod.update(repo, rev, True, True, base, collapse, |
|
981 | stats = mergemod.update(repo, rev, True, True, base, collapse, | |
981 | labels=['dest', 'source']) |
|
982 | labels=['dest', 'source']) | |
982 | if collapse: |
|
983 | if collapse: | |
983 | copies.duplicatecopies(repo, rev, dest) |
|
984 | copies.duplicatecopies(repo, wctx, rev, dest) | |
984 | else: |
|
985 | else: | |
985 | # If we're not using --collapse, we need to |
|
986 | # If we're not using --collapse, we need to | |
986 | # duplicate copies between the revision we're |
|
987 | # duplicate copies between the revision we're | |
987 | # rebasing and its first parent, but *not* |
|
988 | # rebasing and its first parent, but *not* | |
988 | # duplicate any copies that have already been |
|
989 | # duplicate any copies that have already been | |
989 | # performed in the destination. |
|
990 | # performed in the destination. | |
990 | p1rev = repo[rev].p1().rev() |
|
991 | p1rev = repo[rev].p1().rev() | |
991 | copies.duplicatecopies(repo, rev, p1rev, skiprev=dest) |
|
992 | copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest) | |
992 | return stats |
|
993 | return stats | |
993 |
|
994 | |||
994 | def adjustdest(repo, rev, destmap, state, skipped): |
|
995 | def adjustdest(repo, rev, destmap, state, skipped): | |
995 | """adjust rebase destination given the current rebase state |
|
996 | """adjust rebase destination given the current rebase state | |
996 |
|
997 | |||
997 | rev is what is being rebased. Return a list of two revs, which are the |
|
998 | rev is what is being rebased. Return a list of two revs, which are the | |
998 | adjusted destinations for rev's p1 and p2, respectively. If a parent is |
|
999 | adjusted destinations for rev's p1 and p2, respectively. If a parent is | |
999 | nullrev, return dest without adjustment for it. |
|
1000 | nullrev, return dest without adjustment for it. | |
1000 |
|
1001 | |||
1001 | For example, when doing rebasing B+E to F, C to G, rebase will first move B |
|
1002 | For example, when doing rebasing B+E to F, C to G, rebase will first move B | |
1002 | to B1, and E's destination will be adjusted from F to B1. |
|
1003 | to B1, and E's destination will be adjusted from F to B1. | |
1003 |
|
1004 | |||
1004 | B1 <- written during rebasing B |
|
1005 | B1 <- written during rebasing B | |
1005 | | |
|
1006 | | | |
1006 | F <- original destination of B, E |
|
1007 | F <- original destination of B, E | |
1007 | | |
|
1008 | | | |
1008 | | E <- rev, which is being rebased |
|
1009 | | E <- rev, which is being rebased | |
1009 | | | |
|
1010 | | | | |
1010 | | D <- prev, one parent of rev being checked |
|
1011 | | D <- prev, one parent of rev being checked | |
1011 | | | |
|
1012 | | | | |
1012 | | x <- skipped, ex. no successor or successor in (::dest) |
|
1013 | | x <- skipped, ex. no successor or successor in (::dest) | |
1013 | | | |
|
1014 | | | | |
1014 | | C <- rebased as C', different destination |
|
1015 | | C <- rebased as C', different destination | |
1015 | | | |
|
1016 | | | | |
1016 | | B <- rebased as B1 C' |
|
1017 | | B <- rebased as B1 C' | |
1017 | |/ | |
|
1018 | |/ | | |
1018 | A G <- destination of C, different |
|
1019 | A G <- destination of C, different | |
1019 |
|
1020 | |||
1020 | Another example about merge changeset, rebase -r C+G+H -d K, rebase will |
|
1021 | Another example about merge changeset, rebase -r C+G+H -d K, rebase will | |
1021 | first move C to C1, G to G1, and when it's checking H, the adjusted |
|
1022 | first move C to C1, G to G1, and when it's checking H, the adjusted | |
1022 | destinations will be [C1, G1]. |
|
1023 | destinations will be [C1, G1]. | |
1023 |
|
1024 | |||
1024 | H C1 G1 |
|
1025 | H C1 G1 | |
1025 | /| | / |
|
1026 | /| | / | |
1026 | F G |/ |
|
1027 | F G |/ | |
1027 | K | | -> K |
|
1028 | K | | -> K | |
1028 | | C D | |
|
1029 | | C D | | |
1029 | | |/ | |
|
1030 | | |/ | | |
1030 | | B | ... |
|
1031 | | B | ... | |
1031 | |/ |/ |
|
1032 | |/ |/ | |
1032 | A A |
|
1033 | A A | |
1033 |
|
1034 | |||
1034 | Besides, adjust dest according to existing rebase information. For example, |
|
1035 | Besides, adjust dest according to existing rebase information. For example, | |
1035 |
|
1036 | |||
1036 | B C D B needs to be rebased on top of C, C needs to be rebased on top |
|
1037 | B C D B needs to be rebased on top of C, C needs to be rebased on top | |
1037 | \|/ of D. We will rebase C first. |
|
1038 | \|/ of D. We will rebase C first. | |
1038 | A |
|
1039 | A | |
1039 |
|
1040 | |||
1040 | C' After rebasing C, when considering B's destination, use C' |
|
1041 | C' After rebasing C, when considering B's destination, use C' | |
1041 | | instead of the original C. |
|
1042 | | instead of the original C. | |
1042 | B D |
|
1043 | B D | |
1043 | \ / |
|
1044 | \ / | |
1044 | A |
|
1045 | A | |
1045 | """ |
|
1046 | """ | |
1046 | # pick already rebased revs with same dest from state as interesting source |
|
1047 | # pick already rebased revs with same dest from state as interesting source | |
1047 | dest = destmap[rev] |
|
1048 | dest = destmap[rev] | |
1048 | source = [s for s, d in state.items() |
|
1049 | source = [s for s, d in state.items() | |
1049 | if d > 0 and destmap[s] == dest and s not in skipped] |
|
1050 | if d > 0 and destmap[s] == dest and s not in skipped] | |
1050 |
|
1051 | |||
1051 | result = [] |
|
1052 | result = [] | |
1052 | for prev in repo.changelog.parentrevs(rev): |
|
1053 | for prev in repo.changelog.parentrevs(rev): | |
1053 | adjusted = dest |
|
1054 | adjusted = dest | |
1054 | if prev != nullrev: |
|
1055 | if prev != nullrev: | |
1055 | candidate = repo.revs('max(%ld and (::%d))', source, prev).first() |
|
1056 | candidate = repo.revs('max(%ld and (::%d))', source, prev).first() | |
1056 | if candidate is not None: |
|
1057 | if candidate is not None: | |
1057 | adjusted = state[candidate] |
|
1058 | adjusted = state[candidate] | |
1058 | if adjusted == dest and dest in state: |
|
1059 | if adjusted == dest and dest in state: | |
1059 | adjusted = state[dest] |
|
1060 | adjusted = state[dest] | |
1060 | if adjusted == revtodo: |
|
1061 | if adjusted == revtodo: | |
1061 | # sortsource should produce an order that makes this impossible |
|
1062 | # sortsource should produce an order that makes this impossible | |
1062 | raise error.ProgrammingError( |
|
1063 | raise error.ProgrammingError( | |
1063 | 'rev %d should be rebased already at this time' % dest) |
|
1064 | 'rev %d should be rebased already at this time' % dest) | |
1064 | result.append(adjusted) |
|
1065 | result.append(adjusted) | |
1065 | return result |
|
1066 | return result | |
1066 |
|
1067 | |||
1067 | def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped): |
|
1068 | def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped): | |
1068 | """ |
|
1069 | """ | |
1069 | Abort if rebase will create divergence or rebase is noop because of markers |
|
1070 | Abort if rebase will create divergence or rebase is noop because of markers | |
1070 |
|
1071 | |||
1071 | `rebaseobsrevs`: set of obsolete revision in source |
|
1072 | `rebaseobsrevs`: set of obsolete revision in source | |
1072 | `rebaseobsskipped`: set of revisions from source skipped because they have |
|
1073 | `rebaseobsskipped`: set of revisions from source skipped because they have | |
1073 | successors in destination |
|
1074 | successors in destination | |
1074 | """ |
|
1075 | """ | |
1075 | # Obsolete node with successors not in dest leads to divergence |
|
1076 | # Obsolete node with successors not in dest leads to divergence | |
1076 | divergenceok = ui.configbool('experimental', |
|
1077 | divergenceok = ui.configbool('experimental', | |
1077 | 'allowdivergence') |
|
1078 | 'allowdivergence') | |
1078 | divergencebasecandidates = rebaseobsrevs - rebaseobsskipped |
|
1079 | divergencebasecandidates = rebaseobsrevs - rebaseobsskipped | |
1079 |
|
1080 | |||
1080 | if divergencebasecandidates and not divergenceok: |
|
1081 | if divergencebasecandidates and not divergenceok: | |
1081 | divhashes = (str(repo[r]) |
|
1082 | divhashes = (str(repo[r]) | |
1082 | for r in divergencebasecandidates) |
|
1083 | for r in divergencebasecandidates) | |
1083 | msg = _("this rebase will cause " |
|
1084 | msg = _("this rebase will cause " | |
1084 | "divergences from: %s") |
|
1085 | "divergences from: %s") | |
1085 | h = _("to force the rebase please set " |
|
1086 | h = _("to force the rebase please set " | |
1086 | "experimental.allowdivergence=True") |
|
1087 | "experimental.allowdivergence=True") | |
1087 | raise error.Abort(msg % (",".join(divhashes),), hint=h) |
|
1088 | raise error.Abort(msg % (",".join(divhashes),), hint=h) | |
1088 |
|
1089 | |||
1089 | def successorrevs(unfi, rev): |
|
1090 | def successorrevs(unfi, rev): | |
1090 | """yield revision numbers for successors of rev""" |
|
1091 | """yield revision numbers for successors of rev""" | |
1091 | assert unfi.filtername is None |
|
1092 | assert unfi.filtername is None | |
1092 | nodemap = unfi.changelog.nodemap |
|
1093 | nodemap = unfi.changelog.nodemap | |
1093 | for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]): |
|
1094 | for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]): | |
1094 | if s in nodemap: |
|
1095 | if s in nodemap: | |
1095 | yield nodemap[s] |
|
1096 | yield nodemap[s] | |
1096 |
|
1097 | |||
1097 | def defineparents(repo, rev, destmap, state, skipped, obsskipped): |
|
1098 | def defineparents(repo, rev, destmap, state, skipped, obsskipped): | |
1098 | """Return new parents and optionally a merge base for rev being rebased |
|
1099 | """Return new parents and optionally a merge base for rev being rebased | |
1099 |
|
1100 | |||
1100 | The destination specified by "dest" cannot always be used directly because |
|
1101 | The destination specified by "dest" cannot always be used directly because | |
1101 | previously rebase result could affect destination. For example, |
|
1102 | previously rebase result could affect destination. For example, | |
1102 |
|
1103 | |||
1103 | D E rebase -r C+D+E -d B |
|
1104 | D E rebase -r C+D+E -d B | |
1104 | |/ C will be rebased to C' |
|
1105 | |/ C will be rebased to C' | |
1105 | B C D's new destination will be C' instead of B |
|
1106 | B C D's new destination will be C' instead of B | |
1106 | |/ E's new destination will be C' instead of B |
|
1107 | |/ E's new destination will be C' instead of B | |
1107 | A |
|
1108 | A | |
1108 |
|
1109 | |||
1109 | The new parents of a merge is slightly more complicated. See the comment |
|
1110 | The new parents of a merge is slightly more complicated. See the comment | |
1110 | block below. |
|
1111 | block below. | |
1111 | """ |
|
1112 | """ | |
1112 | # use unfiltered changelog since successorrevs may return filtered nodes |
|
1113 | # use unfiltered changelog since successorrevs may return filtered nodes | |
1113 | assert repo.filtername is None |
|
1114 | assert repo.filtername is None | |
1114 | cl = repo.changelog |
|
1115 | cl = repo.changelog | |
1115 | def isancestor(a, b): |
|
1116 | def isancestor(a, b): | |
1116 | # take revision numbers instead of nodes |
|
1117 | # take revision numbers instead of nodes | |
1117 | if a == b: |
|
1118 | if a == b: | |
1118 | return True |
|
1119 | return True | |
1119 | elif a > b: |
|
1120 | elif a > b: | |
1120 | return False |
|
1121 | return False | |
1121 | return cl.isancestor(cl.node(a), cl.node(b)) |
|
1122 | return cl.isancestor(cl.node(a), cl.node(b)) | |
1122 |
|
1123 | |||
1123 | dest = destmap[rev] |
|
1124 | dest = destmap[rev] | |
1124 | oldps = repo.changelog.parentrevs(rev) # old parents |
|
1125 | oldps = repo.changelog.parentrevs(rev) # old parents | |
1125 | newps = [nullrev, nullrev] # new parents |
|
1126 | newps = [nullrev, nullrev] # new parents | |
1126 | dests = adjustdest(repo, rev, destmap, state, skipped) |
|
1127 | dests = adjustdest(repo, rev, destmap, state, skipped) | |
1127 | bases = list(oldps) # merge base candidates, initially just old parents |
|
1128 | bases = list(oldps) # merge base candidates, initially just old parents | |
1128 |
|
1129 | |||
1129 | if all(r == nullrev for r in oldps[1:]): |
|
1130 | if all(r == nullrev for r in oldps[1:]): | |
1130 | # For non-merge changeset, just move p to adjusted dest as requested. |
|
1131 | # For non-merge changeset, just move p to adjusted dest as requested. | |
1131 | newps[0] = dests[0] |
|
1132 | newps[0] = dests[0] | |
1132 | else: |
|
1133 | else: | |
1133 | # For merge changeset, if we move p to dests[i] unconditionally, both |
|
1134 | # For merge changeset, if we move p to dests[i] unconditionally, both | |
1134 | # parents may change and the end result looks like "the merge loses a |
|
1135 | # parents may change and the end result looks like "the merge loses a | |
1135 | # parent", which is a surprise. This is a limit because "--dest" only |
|
1136 | # parent", which is a surprise. This is a limit because "--dest" only | |
1136 | # accepts one dest per src. |
|
1137 | # accepts one dest per src. | |
1137 | # |
|
1138 | # | |
1138 | # Therefore, only move p with reasonable conditions (in this order): |
|
1139 | # Therefore, only move p with reasonable conditions (in this order): | |
1139 | # 1. use dest, if dest is a descendent of (p or one of p's successors) |
|
1140 | # 1. use dest, if dest is a descendent of (p or one of p's successors) | |
1140 | # 2. use p's rebased result, if p is rebased (state[p] > 0) |
|
1141 | # 2. use p's rebased result, if p is rebased (state[p] > 0) | |
1141 | # |
|
1142 | # | |
1142 | # Comparing with adjustdest, the logic here does some additional work: |
|
1143 | # Comparing with adjustdest, the logic here does some additional work: | |
1143 | # 1. decide which parents will not be moved towards dest |
|
1144 | # 1. decide which parents will not be moved towards dest | |
1144 | # 2. if the above decision is "no", should a parent still be moved |
|
1145 | # 2. if the above decision is "no", should a parent still be moved | |
1145 | # because it was rebased? |
|
1146 | # because it was rebased? | |
1146 | # |
|
1147 | # | |
1147 | # For example: |
|
1148 | # For example: | |
1148 | # |
|
1149 | # | |
1149 | # C # "rebase -r C -d D" is an error since none of the parents |
|
1150 | # C # "rebase -r C -d D" is an error since none of the parents | |
1150 | # /| # can be moved. "rebase -r B+C -d D" will move C's parent |
|
1151 | # /| # can be moved. "rebase -r B+C -d D" will move C's parent | |
1151 | # A B D # B (using rule "2."), since B will be rebased. |
|
1152 | # A B D # B (using rule "2."), since B will be rebased. | |
1152 | # |
|
1153 | # | |
1153 | # The loop tries to be not rely on the fact that a Mercurial node has |
|
1154 | # The loop tries to be not rely on the fact that a Mercurial node has | |
1154 | # at most 2 parents. |
|
1155 | # at most 2 parents. | |
1155 | for i, p in enumerate(oldps): |
|
1156 | for i, p in enumerate(oldps): | |
1156 | np = p # new parent |
|
1157 | np = p # new parent | |
1157 | if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)): |
|
1158 | if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)): | |
1158 | np = dests[i] |
|
1159 | np = dests[i] | |
1159 | elif p in state and state[p] > 0: |
|
1160 | elif p in state and state[p] > 0: | |
1160 | np = state[p] |
|
1161 | np = state[p] | |
1161 |
|
1162 | |||
1162 | # "bases" only record "special" merge bases that cannot be |
|
1163 | # "bases" only record "special" merge bases that cannot be | |
1163 | # calculated from changelog DAG (i.e. isancestor(p, np) is False). |
|
1164 | # calculated from changelog DAG (i.e. isancestor(p, np) is False). | |
1164 | # For example: |
|
1165 | # For example: | |
1165 | # |
|
1166 | # | |
1166 | # B' # rebase -s B -d D, when B was rebased to B'. dest for C |
|
1167 | # B' # rebase -s B -d D, when B was rebased to B'. dest for C | |
1167 | # | C # is B', but merge base for C is B, instead of |
|
1168 | # | C # is B', but merge base for C is B, instead of | |
1168 | # D | # changelog.ancestor(C, B') == A. If changelog DAG and |
|
1169 | # D | # changelog.ancestor(C, B') == A. If changelog DAG and | |
1169 | # | B # "state" edges are merged (so there will be an edge from |
|
1170 | # | B # "state" edges are merged (so there will be an edge from | |
1170 | # |/ # B to B'), the merge base is still ancestor(C, B') in |
|
1171 | # |/ # B to B'), the merge base is still ancestor(C, B') in | |
1171 | # A # the merged graph. |
|
1172 | # A # the merged graph. | |
1172 | # |
|
1173 | # | |
1173 | # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8 |
|
1174 | # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8 | |
1174 | # which uses "virtual null merge" to explain this situation. |
|
1175 | # which uses "virtual null merge" to explain this situation. | |
1175 | if isancestor(p, np): |
|
1176 | if isancestor(p, np): | |
1176 | bases[i] = nullrev |
|
1177 | bases[i] = nullrev | |
1177 |
|
1178 | |||
1178 | # If one parent becomes an ancestor of the other, drop the ancestor |
|
1179 | # If one parent becomes an ancestor of the other, drop the ancestor | |
1179 | for j, x in enumerate(newps[:i]): |
|
1180 | for j, x in enumerate(newps[:i]): | |
1180 | if x == nullrev: |
|
1181 | if x == nullrev: | |
1181 | continue |
|
1182 | continue | |
1182 | if isancestor(np, x): # CASE-1 |
|
1183 | if isancestor(np, x): # CASE-1 | |
1183 | np = nullrev |
|
1184 | np = nullrev | |
1184 | elif isancestor(x, np): # CASE-2 |
|
1185 | elif isancestor(x, np): # CASE-2 | |
1185 | newps[j] = np |
|
1186 | newps[j] = np | |
1186 | np = nullrev |
|
1187 | np = nullrev | |
1187 | # New parents forming an ancestor relationship does not |
|
1188 | # New parents forming an ancestor relationship does not | |
1188 | # mean the old parents have a similar relationship. Do not |
|
1189 | # mean the old parents have a similar relationship. Do not | |
1189 | # set bases[x] to nullrev. |
|
1190 | # set bases[x] to nullrev. | |
1190 | bases[j], bases[i] = bases[i], bases[j] |
|
1191 | bases[j], bases[i] = bases[i], bases[j] | |
1191 |
|
1192 | |||
1192 | newps[i] = np |
|
1193 | newps[i] = np | |
1193 |
|
1194 | |||
1194 | # "rebasenode" updates to new p1, and the old p1 will be used as merge |
|
1195 | # "rebasenode" updates to new p1, and the old p1 will be used as merge | |
1195 | # base. If only p2 changes, merging using unchanged p1 as merge base is |
|
1196 | # base. If only p2 changes, merging using unchanged p1 as merge base is | |
1196 | # suboptimal. Therefore swap parents to make the merge sane. |
|
1197 | # suboptimal. Therefore swap parents to make the merge sane. | |
1197 | if newps[1] != nullrev and oldps[0] == newps[0]: |
|
1198 | if newps[1] != nullrev and oldps[0] == newps[0]: | |
1198 | assert len(newps) == 2 and len(oldps) == 2 |
|
1199 | assert len(newps) == 2 and len(oldps) == 2 | |
1199 | newps.reverse() |
|
1200 | newps.reverse() | |
1200 | bases.reverse() |
|
1201 | bases.reverse() | |
1201 |
|
1202 | |||
1202 | # No parent change might be an error because we fail to make rev a |
|
1203 | # No parent change might be an error because we fail to make rev a | |
1203 | # descendent of requested dest. This can happen, for example: |
|
1204 | # descendent of requested dest. This can happen, for example: | |
1204 | # |
|
1205 | # | |
1205 | # C # rebase -r C -d D |
|
1206 | # C # rebase -r C -d D | |
1206 | # /| # None of A and B will be changed to D and rebase fails. |
|
1207 | # /| # None of A and B will be changed to D and rebase fails. | |
1207 | # A B D |
|
1208 | # A B D | |
1208 | if set(newps) == set(oldps) and dest not in newps: |
|
1209 | if set(newps) == set(oldps) and dest not in newps: | |
1209 | raise error.Abort(_('cannot rebase %d:%s without ' |
|
1210 | raise error.Abort(_('cannot rebase %d:%s without ' | |
1210 | 'moving at least one of its parents') |
|
1211 | 'moving at least one of its parents') | |
1211 | % (rev, repo[rev])) |
|
1212 | % (rev, repo[rev])) | |
1212 |
|
1213 | |||
1213 | # Source should not be ancestor of dest. The check here guarantees it's |
|
1214 | # Source should not be ancestor of dest. The check here guarantees it's | |
1214 | # impossible. With multi-dest, the initial check does not cover complex |
|
1215 | # impossible. With multi-dest, the initial check does not cover complex | |
1215 | # cases since we don't have abstractions to dry-run rebase cheaply. |
|
1216 | # cases since we don't have abstractions to dry-run rebase cheaply. | |
1216 | if any(p != nullrev and isancestor(rev, p) for p in newps): |
|
1217 | if any(p != nullrev and isancestor(rev, p) for p in newps): | |
1217 | raise error.Abort(_('source is ancestor of destination')) |
|
1218 | raise error.Abort(_('source is ancestor of destination')) | |
1218 |
|
1219 | |||
1219 | # "rebasenode" updates to new p1, use the corresponding merge base. |
|
1220 | # "rebasenode" updates to new p1, use the corresponding merge base. | |
1220 | if bases[0] != nullrev: |
|
1221 | if bases[0] != nullrev: | |
1221 | base = bases[0] |
|
1222 | base = bases[0] | |
1222 | else: |
|
1223 | else: | |
1223 | base = None |
|
1224 | base = None | |
1224 |
|
1225 | |||
1225 | # Check if the merge will contain unwanted changes. That may happen if |
|
1226 | # Check if the merge will contain unwanted changes. That may happen if | |
1226 | # there are multiple special (non-changelog ancestor) merge bases, which |
|
1227 | # there are multiple special (non-changelog ancestor) merge bases, which | |
1227 | # cannot be handled well by the 3-way merge algorithm. For example: |
|
1228 | # cannot be handled well by the 3-way merge algorithm. For example: | |
1228 | # |
|
1229 | # | |
1229 | # F |
|
1230 | # F | |
1230 | # /| |
|
1231 | # /| | |
1231 | # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen |
|
1232 | # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen | |
1232 | # | | # as merge base, the difference between D and F will include |
|
1233 | # | | # as merge base, the difference between D and F will include | |
1233 | # B C # C, so the rebased F will contain C surprisingly. If "E" was |
|
1234 | # B C # C, so the rebased F will contain C surprisingly. If "E" was | |
1234 | # |/ # chosen, the rebased F will contain B. |
|
1235 | # |/ # chosen, the rebased F will contain B. | |
1235 | # A Z |
|
1236 | # A Z | |
1236 | # |
|
1237 | # | |
1237 | # But our merge base candidates (D and E in above case) could still be |
|
1238 | # But our merge base candidates (D and E in above case) could still be | |
1238 | # better than the default (ancestor(F, Z) == null). Therefore still |
|
1239 | # better than the default (ancestor(F, Z) == null). Therefore still | |
1239 | # pick one (so choose p1 above). |
|
1240 | # pick one (so choose p1 above). | |
1240 | if sum(1 for b in bases if b != nullrev) > 1: |
|
1241 | if sum(1 for b in bases if b != nullrev) > 1: | |
1241 | unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i] |
|
1242 | unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i] | |
1242 | for i, base in enumerate(bases): |
|
1243 | for i, base in enumerate(bases): | |
1243 | if base == nullrev: |
|
1244 | if base == nullrev: | |
1244 | continue |
|
1245 | continue | |
1245 | # Revisions in the side (not chosen as merge base) branch that |
|
1246 | # Revisions in the side (not chosen as merge base) branch that | |
1246 | # might contain "surprising" contents |
|
1247 | # might contain "surprising" contents | |
1247 | siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))', |
|
1248 | siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))', | |
1248 | bases, base, base, dest)) |
|
1249 | bases, base, base, dest)) | |
1249 |
|
1250 | |||
1250 | # If those revisions are covered by rebaseset, the result is good. |
|
1251 | # If those revisions are covered by rebaseset, the result is good. | |
1251 | # A merge in rebaseset would be considered to cover its ancestors. |
|
1252 | # A merge in rebaseset would be considered to cover its ancestors. | |
1252 | if siderevs: |
|
1253 | if siderevs: | |
1253 | rebaseset = [r for r, d in state.items() |
|
1254 | rebaseset = [r for r, d in state.items() | |
1254 | if d > 0 and r not in obsskipped] |
|
1255 | if d > 0 and r not in obsskipped] | |
1255 | merges = [r for r in rebaseset |
|
1256 | merges = [r for r in rebaseset | |
1256 | if cl.parentrevs(r)[1] != nullrev] |
|
1257 | if cl.parentrevs(r)[1] != nullrev] | |
1257 | unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld', |
|
1258 | unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld', | |
1258 | siderevs, merges, rebaseset)) |
|
1259 | siderevs, merges, rebaseset)) | |
1259 |
|
1260 | |||
1260 | # Choose a merge base that has a minimal number of unwanted revs. |
|
1261 | # Choose a merge base that has a minimal number of unwanted revs. | |
1261 | l, i = min((len(revs), i) |
|
1262 | l, i = min((len(revs), i) | |
1262 | for i, revs in enumerate(unwanted) if revs is not None) |
|
1263 | for i, revs in enumerate(unwanted) if revs is not None) | |
1263 | base = bases[i] |
|
1264 | base = bases[i] | |
1264 |
|
1265 | |||
1265 | # newps[0] should match merge base if possible. Currently, if newps[i] |
|
1266 | # newps[0] should match merge base if possible. Currently, if newps[i] | |
1266 | # is nullrev, the only case is newps[i] and newps[j] (j < i), one is |
|
1267 | # is nullrev, the only case is newps[i] and newps[j] (j < i), one is | |
1267 | # the other's ancestor. In that case, it's fine to not swap newps here. |
|
1268 | # the other's ancestor. In that case, it's fine to not swap newps here. | |
1268 | # (see CASE-1 and CASE-2 above) |
|
1269 | # (see CASE-1 and CASE-2 above) | |
1269 | if i != 0 and newps[i] != nullrev: |
|
1270 | if i != 0 and newps[i] != nullrev: | |
1270 | newps[0], newps[i] = newps[i], newps[0] |
|
1271 | newps[0], newps[i] = newps[i], newps[0] | |
1271 |
|
1272 | |||
1272 | # The merge will include unwanted revisions. Abort now. Revisit this if |
|
1273 | # The merge will include unwanted revisions. Abort now. Revisit this if | |
1273 | # we have a more advanced merge algorithm that handles multiple bases. |
|
1274 | # we have a more advanced merge algorithm that handles multiple bases. | |
1274 | if l > 0: |
|
1275 | if l > 0: | |
1275 | unwanteddesc = _(' or ').join( |
|
1276 | unwanteddesc = _(' or ').join( | |
1276 | (', '.join('%d:%s' % (r, repo[r]) for r in revs) |
|
1277 | (', '.join('%d:%s' % (r, repo[r]) for r in revs) | |
1277 | for revs in unwanted if revs is not None)) |
|
1278 | for revs in unwanted if revs is not None)) | |
1278 | raise error.Abort( |
|
1279 | raise error.Abort( | |
1279 | _('rebasing %d:%s will include unwanted changes from %s') |
|
1280 | _('rebasing %d:%s will include unwanted changes from %s') | |
1280 | % (rev, repo[rev], unwanteddesc)) |
|
1281 | % (rev, repo[rev], unwanteddesc)) | |
1281 |
|
1282 | |||
1282 | repo.ui.debug(" future parents are %d and %d\n" % tuple(newps)) |
|
1283 | repo.ui.debug(" future parents are %d and %d\n" % tuple(newps)) | |
1283 |
|
1284 | |||
1284 | return newps[0], newps[1], base |
|
1285 | return newps[0], newps[1], base | |
1285 |
|
1286 | |||
1286 | def isagitpatch(repo, patchname): |
|
1287 | def isagitpatch(repo, patchname): | |
1287 | 'Return true if the given patch is in git format' |
|
1288 | 'Return true if the given patch is in git format' | |
1288 | mqpatch = os.path.join(repo.mq.path, patchname) |
|
1289 | mqpatch = os.path.join(repo.mq.path, patchname) | |
1289 | for line in patch.linereader(file(mqpatch, 'rb')): |
|
1290 | for line in patch.linereader(file(mqpatch, 'rb')): | |
1290 | if line.startswith('diff --git'): |
|
1291 | if line.startswith('diff --git'): | |
1291 | return True |
|
1292 | return True | |
1292 | return False |
|
1293 | return False | |
1293 |
|
1294 | |||
1294 | def updatemq(repo, state, skipped, **opts): |
|
1295 | def updatemq(repo, state, skipped, **opts): | |
1295 | 'Update rebased mq patches - finalize and then import them' |
|
1296 | 'Update rebased mq patches - finalize and then import them' | |
1296 | mqrebase = {} |
|
1297 | mqrebase = {} | |
1297 | mq = repo.mq |
|
1298 | mq = repo.mq | |
1298 | original_series = mq.fullseries[:] |
|
1299 | original_series = mq.fullseries[:] | |
1299 | skippedpatches = set() |
|
1300 | skippedpatches = set() | |
1300 |
|
1301 | |||
1301 | for p in mq.applied: |
|
1302 | for p in mq.applied: | |
1302 | rev = repo[p.node].rev() |
|
1303 | rev = repo[p.node].rev() | |
1303 | if rev in state: |
|
1304 | if rev in state: | |
1304 | repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' % |
|
1305 | repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' % | |
1305 | (rev, p.name)) |
|
1306 | (rev, p.name)) | |
1306 | mqrebase[rev] = (p.name, isagitpatch(repo, p.name)) |
|
1307 | mqrebase[rev] = (p.name, isagitpatch(repo, p.name)) | |
1307 | else: |
|
1308 | else: | |
1308 | # Applied but not rebased, not sure this should happen |
|
1309 | # Applied but not rebased, not sure this should happen | |
1309 | skippedpatches.add(p.name) |
|
1310 | skippedpatches.add(p.name) | |
1310 |
|
1311 | |||
1311 | if mqrebase: |
|
1312 | if mqrebase: | |
1312 | mq.finish(repo, mqrebase.keys()) |
|
1313 | mq.finish(repo, mqrebase.keys()) | |
1313 |
|
1314 | |||
1314 | # We must start import from the newest revision |
|
1315 | # We must start import from the newest revision | |
1315 | for rev in sorted(mqrebase, reverse=True): |
|
1316 | for rev in sorted(mqrebase, reverse=True): | |
1316 | if rev not in skipped: |
|
1317 | if rev not in skipped: | |
1317 | name, isgit = mqrebase[rev] |
|
1318 | name, isgit = mqrebase[rev] | |
1318 | repo.ui.note(_('updating mq patch %s to %s:%s\n') % |
|
1319 | repo.ui.note(_('updating mq patch %s to %s:%s\n') % | |
1319 | (name, state[rev], repo[state[rev]])) |
|
1320 | (name, state[rev], repo[state[rev]])) | |
1320 | mq.qimport(repo, (), patchname=name, git=isgit, |
|
1321 | mq.qimport(repo, (), patchname=name, git=isgit, | |
1321 | rev=[str(state[rev])]) |
|
1322 | rev=[str(state[rev])]) | |
1322 | else: |
|
1323 | else: | |
1323 | # Rebased and skipped |
|
1324 | # Rebased and skipped | |
1324 | skippedpatches.add(mqrebase[rev][0]) |
|
1325 | skippedpatches.add(mqrebase[rev][0]) | |
1325 |
|
1326 | |||
1326 | # Patches were either applied and rebased and imported in |
|
1327 | # Patches were either applied and rebased and imported in | |
1327 | # order, applied and removed or unapplied. Discard the removed |
|
1328 | # order, applied and removed or unapplied. Discard the removed | |
1328 | # ones while preserving the original series order and guards. |
|
1329 | # ones while preserving the original series order and guards. | |
1329 | newseries = [s for s in original_series |
|
1330 | newseries = [s for s in original_series | |
1330 | if mq.guard_re.split(s, 1)[0] not in skippedpatches] |
|
1331 | if mq.guard_re.split(s, 1)[0] not in skippedpatches] | |
1331 | mq.fullseries[:] = newseries |
|
1332 | mq.fullseries[:] = newseries | |
1332 | mq.seriesdirty = True |
|
1333 | mq.seriesdirty = True | |
1333 | mq.savedirty() |
|
1334 | mq.savedirty() | |
1334 |
|
1335 | |||
1335 | def storecollapsemsg(repo, collapsemsg): |
|
1336 | def storecollapsemsg(repo, collapsemsg): | |
1336 | 'Store the collapse message to allow recovery' |
|
1337 | 'Store the collapse message to allow recovery' | |
1337 | collapsemsg = collapsemsg or '' |
|
1338 | collapsemsg = collapsemsg or '' | |
1338 | f = repo.vfs("last-message.txt", "w") |
|
1339 | f = repo.vfs("last-message.txt", "w") | |
1339 | f.write("%s\n" % collapsemsg) |
|
1340 | f.write("%s\n" % collapsemsg) | |
1340 | f.close() |
|
1341 | f.close() | |
1341 |
|
1342 | |||
1342 | def clearcollapsemsg(repo): |
|
1343 | def clearcollapsemsg(repo): | |
1343 | 'Remove collapse message file' |
|
1344 | 'Remove collapse message file' | |
1344 | repo.vfs.unlinkpath("last-message.txt", ignoremissing=True) |
|
1345 | repo.vfs.unlinkpath("last-message.txt", ignoremissing=True) | |
1345 |
|
1346 | |||
1346 | def restorecollapsemsg(repo, isabort): |
|
1347 | def restorecollapsemsg(repo, isabort): | |
1347 | 'Restore previously stored collapse message' |
|
1348 | 'Restore previously stored collapse message' | |
1348 | try: |
|
1349 | try: | |
1349 | f = repo.vfs("last-message.txt") |
|
1350 | f = repo.vfs("last-message.txt") | |
1350 | collapsemsg = f.readline().strip() |
|
1351 | collapsemsg = f.readline().strip() | |
1351 | f.close() |
|
1352 | f.close() | |
1352 | except IOError as err: |
|
1353 | except IOError as err: | |
1353 | if err.errno != errno.ENOENT: |
|
1354 | if err.errno != errno.ENOENT: | |
1354 | raise |
|
1355 | raise | |
1355 | if isabort: |
|
1356 | if isabort: | |
1356 | # Oh well, just abort like normal |
|
1357 | # Oh well, just abort like normal | |
1357 | collapsemsg = '' |
|
1358 | collapsemsg = '' | |
1358 | else: |
|
1359 | else: | |
1359 | raise error.Abort(_('missing .hg/last-message.txt for rebase')) |
|
1360 | raise error.Abort(_('missing .hg/last-message.txt for rebase')) | |
1360 | return collapsemsg |
|
1361 | return collapsemsg | |
1361 |
|
1362 | |||
1362 | def clearstatus(repo): |
|
1363 | def clearstatus(repo): | |
1363 | 'Remove the status files' |
|
1364 | 'Remove the status files' | |
1364 | # Make sure the active transaction won't write the state file |
|
1365 | # Make sure the active transaction won't write the state file | |
1365 | tr = repo.currenttransaction() |
|
1366 | tr = repo.currenttransaction() | |
1366 | if tr: |
|
1367 | if tr: | |
1367 | tr.removefilegenerator('rebasestate') |
|
1368 | tr.removefilegenerator('rebasestate') | |
1368 | repo.vfs.unlinkpath("rebasestate", ignoremissing=True) |
|
1369 | repo.vfs.unlinkpath("rebasestate", ignoremissing=True) | |
1369 |
|
1370 | |||
1370 | def needupdate(repo, state): |
|
1371 | def needupdate(repo, state): | |
1371 | '''check whether we should `update --clean` away from a merge, or if |
|
1372 | '''check whether we should `update --clean` away from a merge, or if | |
1372 | somehow the working dir got forcibly updated, e.g. by older hg''' |
|
1373 | somehow the working dir got forcibly updated, e.g. by older hg''' | |
1373 | parents = [p.rev() for p in repo[None].parents()] |
|
1374 | parents = [p.rev() for p in repo[None].parents()] | |
1374 |
|
1375 | |||
1375 | # Are we in a merge state at all? |
|
1376 | # Are we in a merge state at all? | |
1376 | if len(parents) < 2: |
|
1377 | if len(parents) < 2: | |
1377 | return False |
|
1378 | return False | |
1378 |
|
1379 | |||
1379 | # We should be standing on the first as-of-yet unrebased commit. |
|
1380 | # We should be standing on the first as-of-yet unrebased commit. | |
1380 | firstunrebased = min([old for old, new in state.iteritems() |
|
1381 | firstunrebased = min([old for old, new in state.iteritems() | |
1381 | if new == nullrev]) |
|
1382 | if new == nullrev]) | |
1382 | if firstunrebased in parents: |
|
1383 | if firstunrebased in parents: | |
1383 | return True |
|
1384 | return True | |
1384 |
|
1385 | |||
1385 | return False |
|
1386 | return False | |
1386 |
|
1387 | |||
1387 | def abort(repo, originalwd, destmap, state, activebookmark=None): |
|
1388 | def abort(repo, originalwd, destmap, state, activebookmark=None): | |
1388 | '''Restore the repository to its original state. Additional args: |
|
1389 | '''Restore the repository to its original state. Additional args: | |
1389 |
|
1390 | |||
1390 | activebookmark: the name of the bookmark that should be active after the |
|
1391 | activebookmark: the name of the bookmark that should be active after the | |
1391 | restore''' |
|
1392 | restore''' | |
1392 |
|
1393 | |||
1393 | try: |
|
1394 | try: | |
1394 | # If the first commits in the rebased set get skipped during the rebase, |
|
1395 | # If the first commits in the rebased set get skipped during the rebase, | |
1395 | # their values within the state mapping will be the dest rev id. The |
|
1396 | # their values within the state mapping will be the dest rev id. The | |
1396 | # dstates list must must not contain the dest rev (issue4896) |
|
1397 | # dstates list must must not contain the dest rev (issue4896) | |
1397 | dstates = [s for r, s in state.items() if s >= 0 and s != destmap[r]] |
|
1398 | dstates = [s for r, s in state.items() if s >= 0 and s != destmap[r]] | |
1398 | immutable = [d for d in dstates if not repo[d].mutable()] |
|
1399 | immutable = [d for d in dstates if not repo[d].mutable()] | |
1399 | cleanup = True |
|
1400 | cleanup = True | |
1400 | if immutable: |
|
1401 | if immutable: | |
1401 | repo.ui.warn(_("warning: can't clean up public changesets %s\n") |
|
1402 | repo.ui.warn(_("warning: can't clean up public changesets %s\n") | |
1402 | % ', '.join(str(repo[r]) for r in immutable), |
|
1403 | % ', '.join(str(repo[r]) for r in immutable), | |
1403 | hint=_("see 'hg help phases' for details")) |
|
1404 | hint=_("see 'hg help phases' for details")) | |
1404 | cleanup = False |
|
1405 | cleanup = False | |
1405 |
|
1406 | |||
1406 | descendants = set() |
|
1407 | descendants = set() | |
1407 | if dstates: |
|
1408 | if dstates: | |
1408 | descendants = set(repo.changelog.descendants(dstates)) |
|
1409 | descendants = set(repo.changelog.descendants(dstates)) | |
1409 | if descendants - set(dstates): |
|
1410 | if descendants - set(dstates): | |
1410 | repo.ui.warn(_("warning: new changesets detected on destination " |
|
1411 | repo.ui.warn(_("warning: new changesets detected on destination " | |
1411 | "branch, can't strip\n")) |
|
1412 | "branch, can't strip\n")) | |
1412 | cleanup = False |
|
1413 | cleanup = False | |
1413 |
|
1414 | |||
1414 | if cleanup: |
|
1415 | if cleanup: | |
1415 | shouldupdate = False |
|
1416 | shouldupdate = False | |
1416 | rebased = [s for r, s in state.items() |
|
1417 | rebased = [s for r, s in state.items() | |
1417 | if s >= 0 and s != destmap[r]] |
|
1418 | if s >= 0 and s != destmap[r]] | |
1418 | if rebased: |
|
1419 | if rebased: | |
1419 | strippoints = [ |
|
1420 | strippoints = [ | |
1420 | c.node() for c in repo.set('roots(%ld)', rebased)] |
|
1421 | c.node() for c in repo.set('roots(%ld)', rebased)] | |
1421 |
|
1422 | |||
1422 | updateifonnodes = set(rebased) |
|
1423 | updateifonnodes = set(rebased) | |
1423 | updateifonnodes.update(destmap.values()) |
|
1424 | updateifonnodes.update(destmap.values()) | |
1424 | updateifonnodes.add(originalwd) |
|
1425 | updateifonnodes.add(originalwd) | |
1425 | shouldupdate = repo['.'].rev() in updateifonnodes |
|
1426 | shouldupdate = repo['.'].rev() in updateifonnodes | |
1426 |
|
1427 | |||
1427 | # Update away from the rebase if necessary |
|
1428 | # Update away from the rebase if necessary | |
1428 | if shouldupdate or needupdate(repo, state): |
|
1429 | if shouldupdate or needupdate(repo, state): | |
1429 | mergemod.update(repo, originalwd, False, True) |
|
1430 | mergemod.update(repo, originalwd, False, True) | |
1430 |
|
1431 | |||
1431 | # Strip from the first rebased revision |
|
1432 | # Strip from the first rebased revision | |
1432 | if rebased: |
|
1433 | if rebased: | |
1433 | # no backup of rebased cset versions needed |
|
1434 | # no backup of rebased cset versions needed | |
1434 | repair.strip(repo.ui, repo, strippoints) |
|
1435 | repair.strip(repo.ui, repo, strippoints) | |
1435 |
|
1436 | |||
1436 | if activebookmark and activebookmark in repo._bookmarks: |
|
1437 | if activebookmark and activebookmark in repo._bookmarks: | |
1437 | bookmarks.activate(repo, activebookmark) |
|
1438 | bookmarks.activate(repo, activebookmark) | |
1438 |
|
1439 | |||
1439 | finally: |
|
1440 | finally: | |
1440 | clearstatus(repo) |
|
1441 | clearstatus(repo) | |
1441 | clearcollapsemsg(repo) |
|
1442 | clearcollapsemsg(repo) | |
1442 | repo.ui.warn(_('rebase aborted\n')) |
|
1443 | repo.ui.warn(_('rebase aborted\n')) | |
1443 | return 0 |
|
1444 | return 0 | |
1444 |
|
1445 | |||
1445 | def sortsource(destmap): |
|
1446 | def sortsource(destmap): | |
1446 | """yield source revisions in an order that we only rebase things once |
|
1447 | """yield source revisions in an order that we only rebase things once | |
1447 |
|
1448 | |||
1448 | If source and destination overlaps, we should filter out revisions |
|
1449 | If source and destination overlaps, we should filter out revisions | |
1449 | depending on other revisions which hasn't been rebased yet. |
|
1450 | depending on other revisions which hasn't been rebased yet. | |
1450 |
|
1451 | |||
1451 | Yield a sorted list of revisions each time. |
|
1452 | Yield a sorted list of revisions each time. | |
1452 |
|
1453 | |||
1453 | For example, when rebasing A to B, B to C. This function yields [B], then |
|
1454 | For example, when rebasing A to B, B to C. This function yields [B], then | |
1454 | [A], indicating B needs to be rebased first. |
|
1455 | [A], indicating B needs to be rebased first. | |
1455 |
|
1456 | |||
1456 | Raise if there is a cycle so the rebase is impossible. |
|
1457 | Raise if there is a cycle so the rebase is impossible. | |
1457 | """ |
|
1458 | """ | |
1458 | srcset = set(destmap) |
|
1459 | srcset = set(destmap) | |
1459 | while srcset: |
|
1460 | while srcset: | |
1460 | srclist = sorted(srcset) |
|
1461 | srclist = sorted(srcset) | |
1461 | result = [] |
|
1462 | result = [] | |
1462 | for r in srclist: |
|
1463 | for r in srclist: | |
1463 | if destmap[r] not in srcset: |
|
1464 | if destmap[r] not in srcset: | |
1464 | result.append(r) |
|
1465 | result.append(r) | |
1465 | if not result: |
|
1466 | if not result: | |
1466 | raise error.Abort(_('source and destination form a cycle')) |
|
1467 | raise error.Abort(_('source and destination form a cycle')) | |
1467 | srcset -= set(result) |
|
1468 | srcset -= set(result) | |
1468 | yield result |
|
1469 | yield result | |
1469 |
|
1470 | |||
1470 | def buildstate(repo, destmap, collapse): |
|
1471 | def buildstate(repo, destmap, collapse): | |
1471 | '''Define which revisions are going to be rebased and where |
|
1472 | '''Define which revisions are going to be rebased and where | |
1472 |
|
1473 | |||
1473 | repo: repo |
|
1474 | repo: repo | |
1474 | destmap: {srcrev: destrev} |
|
1475 | destmap: {srcrev: destrev} | |
1475 | ''' |
|
1476 | ''' | |
1476 | rebaseset = destmap.keys() |
|
1477 | rebaseset = destmap.keys() | |
1477 | originalwd = repo['.'].rev() |
|
1478 | originalwd = repo['.'].rev() | |
1478 |
|
1479 | |||
1479 | # This check isn't strictly necessary, since mq detects commits over an |
|
1480 | # This check isn't strictly necessary, since mq detects commits over an | |
1480 | # applied patch. But it prevents messing up the working directory when |
|
1481 | # applied patch. But it prevents messing up the working directory when | |
1481 | # a partially completed rebase is blocked by mq. |
|
1482 | # a partially completed rebase is blocked by mq. | |
1482 | if 'qtip' in repo.tags(): |
|
1483 | if 'qtip' in repo.tags(): | |
1483 | mqapplied = set(repo[s.node].rev() for s in repo.mq.applied) |
|
1484 | mqapplied = set(repo[s.node].rev() for s in repo.mq.applied) | |
1484 | if set(destmap.values()) & mqapplied: |
|
1485 | if set(destmap.values()) & mqapplied: | |
1485 | raise error.Abort(_('cannot rebase onto an applied mq patch')) |
|
1486 | raise error.Abort(_('cannot rebase onto an applied mq patch')) | |
1486 |
|
1487 | |||
1487 | # Get "cycle" error early by exhausting the generator. |
|
1488 | # Get "cycle" error early by exhausting the generator. | |
1488 | sortedsrc = list(sortsource(destmap)) # a list of sorted revs |
|
1489 | sortedsrc = list(sortsource(destmap)) # a list of sorted revs | |
1489 | if not sortedsrc: |
|
1490 | if not sortedsrc: | |
1490 | raise error.Abort(_('no matching revisions')) |
|
1491 | raise error.Abort(_('no matching revisions')) | |
1491 |
|
1492 | |||
1492 | # Only check the first batch of revisions to rebase not depending on other |
|
1493 | # Only check the first batch of revisions to rebase not depending on other | |
1493 | # rebaseset. This means "source is ancestor of destination" for the second |
|
1494 | # rebaseset. This means "source is ancestor of destination" for the second | |
1494 | # (and following) batches of revisions are not checked here. We rely on |
|
1495 | # (and following) batches of revisions are not checked here. We rely on | |
1495 | # "defineparents" to do that check. |
|
1496 | # "defineparents" to do that check. | |
1496 | roots = list(repo.set('roots(%ld)', sortedsrc[0])) |
|
1497 | roots = list(repo.set('roots(%ld)', sortedsrc[0])) | |
1497 | if not roots: |
|
1498 | if not roots: | |
1498 | raise error.Abort(_('no matching revisions')) |
|
1499 | raise error.Abort(_('no matching revisions')) | |
1499 | roots.sort() |
|
1500 | roots.sort() | |
1500 | state = dict.fromkeys(rebaseset, revtodo) |
|
1501 | state = dict.fromkeys(rebaseset, revtodo) | |
1501 | emptyrebase = (len(sortedsrc) == 1) |
|
1502 | emptyrebase = (len(sortedsrc) == 1) | |
1502 | for root in roots: |
|
1503 | for root in roots: | |
1503 | dest = repo[destmap[root.rev()]] |
|
1504 | dest = repo[destmap[root.rev()]] | |
1504 | commonbase = root.ancestor(dest) |
|
1505 | commonbase = root.ancestor(dest) | |
1505 | if commonbase == root: |
|
1506 | if commonbase == root: | |
1506 | raise error.Abort(_('source is ancestor of destination')) |
|
1507 | raise error.Abort(_('source is ancestor of destination')) | |
1507 | if commonbase == dest: |
|
1508 | if commonbase == dest: | |
1508 | wctx = repo[None] |
|
1509 | wctx = repo[None] | |
1509 | if dest == wctx.p1(): |
|
1510 | if dest == wctx.p1(): | |
1510 | # when rebasing to '.', it will use the current wd branch name |
|
1511 | # when rebasing to '.', it will use the current wd branch name | |
1511 | samebranch = root.branch() == wctx.branch() |
|
1512 | samebranch = root.branch() == wctx.branch() | |
1512 | else: |
|
1513 | else: | |
1513 | samebranch = root.branch() == dest.branch() |
|
1514 | samebranch = root.branch() == dest.branch() | |
1514 | if not collapse and samebranch and dest in root.parents(): |
|
1515 | if not collapse and samebranch and dest in root.parents(): | |
1515 | # mark the revision as done by setting its new revision |
|
1516 | # mark the revision as done by setting its new revision | |
1516 | # equal to its old (current) revisions |
|
1517 | # equal to its old (current) revisions | |
1517 | state[root.rev()] = root.rev() |
|
1518 | state[root.rev()] = root.rev() | |
1518 | repo.ui.debug('source is a child of destination\n') |
|
1519 | repo.ui.debug('source is a child of destination\n') | |
1519 | continue |
|
1520 | continue | |
1520 |
|
1521 | |||
1521 | emptyrebase = False |
|
1522 | emptyrebase = False | |
1522 | repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root)) |
|
1523 | repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root)) | |
1523 | if emptyrebase: |
|
1524 | if emptyrebase: | |
1524 | return None |
|
1525 | return None | |
1525 | for rev in sorted(state): |
|
1526 | for rev in sorted(state): | |
1526 | parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev] |
|
1527 | parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev] | |
1527 | # if all parents of this revision are done, then so is this revision |
|
1528 | # if all parents of this revision are done, then so is this revision | |
1528 | if parents and all((state.get(p) == p for p in parents)): |
|
1529 | if parents and all((state.get(p) == p for p in parents)): | |
1529 | state[rev] = rev |
|
1530 | state[rev] = rev | |
1530 | return originalwd, destmap, state |
|
1531 | return originalwd, destmap, state | |
1531 |
|
1532 | |||
1532 | def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None, |
|
1533 | def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None, | |
1533 | keepf=False): |
|
1534 | keepf=False): | |
1534 | """dispose of rebased revision at the end of the rebase |
|
1535 | """dispose of rebased revision at the end of the rebase | |
1535 |
|
1536 | |||
1536 | If `collapsedas` is not None, the rebase was a collapse whose result if the |
|
1537 | If `collapsedas` is not None, the rebase was a collapse whose result if the | |
1537 | `collapsedas` node. |
|
1538 | `collapsedas` node. | |
1538 |
|
1539 | |||
1539 | If `keepf` is not True, the rebase has --keep set and no nodes should be |
|
1540 | If `keepf` is not True, the rebase has --keep set and no nodes should be | |
1540 | removed (but bookmarks still need to be moved). |
|
1541 | removed (but bookmarks still need to be moved). | |
1541 | """ |
|
1542 | """ | |
1542 | tonode = repo.changelog.node |
|
1543 | tonode = repo.changelog.node | |
1543 | replacements = {} |
|
1544 | replacements = {} | |
1544 | moves = {} |
|
1545 | moves = {} | |
1545 | for rev, newrev in sorted(state.items()): |
|
1546 | for rev, newrev in sorted(state.items()): | |
1546 | if newrev >= 0 and newrev != rev: |
|
1547 | if newrev >= 0 and newrev != rev: | |
1547 | oldnode = tonode(rev) |
|
1548 | oldnode = tonode(rev) | |
1548 | newnode = collapsedas or tonode(newrev) |
|
1549 | newnode = collapsedas or tonode(newrev) | |
1549 | moves[oldnode] = newnode |
|
1550 | moves[oldnode] = newnode | |
1550 | if not keepf: |
|
1551 | if not keepf: | |
1551 | if rev in skipped: |
|
1552 | if rev in skipped: | |
1552 | succs = () |
|
1553 | succs = () | |
1553 | else: |
|
1554 | else: | |
1554 | succs = (newnode,) |
|
1555 | succs = (newnode,) | |
1555 | replacements[oldnode] = succs |
|
1556 | replacements[oldnode] = succs | |
1556 | scmutil.cleanupnodes(repo, replacements, 'rebase', moves) |
|
1557 | scmutil.cleanupnodes(repo, replacements, 'rebase', moves) | |
1557 |
|
1558 | |||
1558 | def pullrebase(orig, ui, repo, *args, **opts): |
|
1559 | def pullrebase(orig, ui, repo, *args, **opts): | |
1559 | 'Call rebase after pull if the latter has been invoked with --rebase' |
|
1560 | 'Call rebase after pull if the latter has been invoked with --rebase' | |
1560 | ret = None |
|
1561 | ret = None | |
1561 | if opts.get('rebase'): |
|
1562 | if opts.get('rebase'): | |
1562 | if ui.configbool('commands', 'rebase.requiredest'): |
|
1563 | if ui.configbool('commands', 'rebase.requiredest'): | |
1563 | msg = _('rebase destination required by configuration') |
|
1564 | msg = _('rebase destination required by configuration') | |
1564 | hint = _('use hg pull followed by hg rebase -d DEST') |
|
1565 | hint = _('use hg pull followed by hg rebase -d DEST') | |
1565 | raise error.Abort(msg, hint=hint) |
|
1566 | raise error.Abort(msg, hint=hint) | |
1566 |
|
1567 | |||
1567 | with repo.wlock(), repo.lock(): |
|
1568 | with repo.wlock(), repo.lock(): | |
1568 | if opts.get('update'): |
|
1569 | if opts.get('update'): | |
1569 | del opts['update'] |
|
1570 | del opts['update'] | |
1570 | ui.debug('--update and --rebase are not compatible, ignoring ' |
|
1571 | ui.debug('--update and --rebase are not compatible, ignoring ' | |
1571 | 'the update flag\n') |
|
1572 | 'the update flag\n') | |
1572 |
|
1573 | |||
1573 | cmdutil.checkunfinished(repo) |
|
1574 | cmdutil.checkunfinished(repo) | |
1574 | cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: ' |
|
1575 | cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: ' | |
1575 | 'please commit or shelve your changes first')) |
|
1576 | 'please commit or shelve your changes first')) | |
1576 |
|
1577 | |||
1577 | revsprepull = len(repo) |
|
1578 | revsprepull = len(repo) | |
1578 | origpostincoming = commands.postincoming |
|
1579 | origpostincoming = commands.postincoming | |
1579 | def _dummy(*args, **kwargs): |
|
1580 | def _dummy(*args, **kwargs): | |
1580 | pass |
|
1581 | pass | |
1581 | commands.postincoming = _dummy |
|
1582 | commands.postincoming = _dummy | |
1582 | try: |
|
1583 | try: | |
1583 | ret = orig(ui, repo, *args, **opts) |
|
1584 | ret = orig(ui, repo, *args, **opts) | |
1584 | finally: |
|
1585 | finally: | |
1585 | commands.postincoming = origpostincoming |
|
1586 | commands.postincoming = origpostincoming | |
1586 | revspostpull = len(repo) |
|
1587 | revspostpull = len(repo) | |
1587 | if revspostpull > revsprepull: |
|
1588 | if revspostpull > revsprepull: | |
1588 | # --rev option from pull conflict with rebase own --rev |
|
1589 | # --rev option from pull conflict with rebase own --rev | |
1589 | # dropping it |
|
1590 | # dropping it | |
1590 | if 'rev' in opts: |
|
1591 | if 'rev' in opts: | |
1591 | del opts['rev'] |
|
1592 | del opts['rev'] | |
1592 | # positional argument from pull conflicts with rebase's own |
|
1593 | # positional argument from pull conflicts with rebase's own | |
1593 | # --source. |
|
1594 | # --source. | |
1594 | if 'source' in opts: |
|
1595 | if 'source' in opts: | |
1595 | del opts['source'] |
|
1596 | del opts['source'] | |
1596 | # revsprepull is the len of the repo, not revnum of tip. |
|
1597 | # revsprepull is the len of the repo, not revnum of tip. | |
1597 | destspace = list(repo.changelog.revs(start=revsprepull)) |
|
1598 | destspace = list(repo.changelog.revs(start=revsprepull)) | |
1598 | opts['_destspace'] = destspace |
|
1599 | opts['_destspace'] = destspace | |
1599 | try: |
|
1600 | try: | |
1600 | rebase(ui, repo, **opts) |
|
1601 | rebase(ui, repo, **opts) | |
1601 | except error.NoMergeDestAbort: |
|
1602 | except error.NoMergeDestAbort: | |
1602 | # we can maybe update instead |
|
1603 | # we can maybe update instead | |
1603 | rev, _a, _b = destutil.destupdate(repo) |
|
1604 | rev, _a, _b = destutil.destupdate(repo) | |
1604 | if rev == repo['.'].rev(): |
|
1605 | if rev == repo['.'].rev(): | |
1605 | ui.status(_('nothing to rebase\n')) |
|
1606 | ui.status(_('nothing to rebase\n')) | |
1606 | else: |
|
1607 | else: | |
1607 | ui.status(_('nothing to rebase - updating instead\n')) |
|
1608 | ui.status(_('nothing to rebase - updating instead\n')) | |
1608 | # not passing argument to get the bare update behavior |
|
1609 | # not passing argument to get the bare update behavior | |
1609 | # with warning and trumpets |
|
1610 | # with warning and trumpets | |
1610 | commands.update(ui, repo) |
|
1611 | commands.update(ui, repo) | |
1611 | else: |
|
1612 | else: | |
1612 | if opts.get('tool'): |
|
1613 | if opts.get('tool'): | |
1613 | raise error.Abort(_('--tool can only be used with --rebase')) |
|
1614 | raise error.Abort(_('--tool can only be used with --rebase')) | |
1614 | ret = orig(ui, repo, *args, **opts) |
|
1615 | ret = orig(ui, repo, *args, **opts) | |
1615 |
|
1616 | |||
1616 | return ret |
|
1617 | return ret | |
1617 |
|
1618 | |||
1618 | def _filterobsoleterevs(repo, revs): |
|
1619 | def _filterobsoleterevs(repo, revs): | |
1619 | """returns a set of the obsolete revisions in revs""" |
|
1620 | """returns a set of the obsolete revisions in revs""" | |
1620 | return set(r for r in revs if repo[r].obsolete()) |
|
1621 | return set(r for r in revs if repo[r].obsolete()) | |
1621 |
|
1622 | |||
1622 | def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap): |
|
1623 | def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap): | |
1623 | """return a mapping obsolete => successor for all obsolete nodes to be |
|
1624 | """return a mapping obsolete => successor for all obsolete nodes to be | |
1624 | rebased that have a successors in the destination |
|
1625 | rebased that have a successors in the destination | |
1625 |
|
1626 | |||
1626 | obsolete => None entries in the mapping indicate nodes with no successor""" |
|
1627 | obsolete => None entries in the mapping indicate nodes with no successor""" | |
1627 | obsoletenotrebased = {} |
|
1628 | obsoletenotrebased = {} | |
1628 |
|
1629 | |||
1629 | assert repo.filtername is None |
|
1630 | assert repo.filtername is None | |
1630 | cl = repo.changelog |
|
1631 | cl = repo.changelog | |
1631 | nodemap = cl.nodemap |
|
1632 | nodemap = cl.nodemap | |
1632 | for srcrev in rebaseobsrevs: |
|
1633 | for srcrev in rebaseobsrevs: | |
1633 | srcnode = cl.node(srcrev) |
|
1634 | srcnode = cl.node(srcrev) | |
1634 | destnode = cl.node(destmap[srcrev]) |
|
1635 | destnode = cl.node(destmap[srcrev]) | |
1635 | # XXX: more advanced APIs are required to handle split correctly |
|
1636 | # XXX: more advanced APIs are required to handle split correctly | |
1636 | successors = list(obsutil.allsuccessors(repo.obsstore, [srcnode])) |
|
1637 | successors = list(obsutil.allsuccessors(repo.obsstore, [srcnode])) | |
1637 | if len(successors) == 1: |
|
1638 | if len(successors) == 1: | |
1638 | # obsutil.allsuccessors includes node itself. When the list only |
|
1639 | # obsutil.allsuccessors includes node itself. When the list only | |
1639 | # contains one element, it means there are no successors. |
|
1640 | # contains one element, it means there are no successors. | |
1640 | obsoletenotrebased[srcrev] = None |
|
1641 | obsoletenotrebased[srcrev] = None | |
1641 | else: |
|
1642 | else: | |
1642 | for succnode in successors: |
|
1643 | for succnode in successors: | |
1643 | if succnode == srcnode or succnode not in nodemap: |
|
1644 | if succnode == srcnode or succnode not in nodemap: | |
1644 | continue |
|
1645 | continue | |
1645 | if cl.isancestor(succnode, destnode): |
|
1646 | if cl.isancestor(succnode, destnode): | |
1646 | obsoletenotrebased[srcrev] = nodemap[succnode] |
|
1647 | obsoletenotrebased[srcrev] = nodemap[succnode] | |
1647 | break |
|
1648 | break | |
1648 |
|
1649 | |||
1649 | return obsoletenotrebased |
|
1650 | return obsoletenotrebased | |
1650 |
|
1651 | |||
1651 | def summaryhook(ui, repo): |
|
1652 | def summaryhook(ui, repo): | |
1652 | if not repo.vfs.exists('rebasestate'): |
|
1653 | if not repo.vfs.exists('rebasestate'): | |
1653 | return |
|
1654 | return | |
1654 | try: |
|
1655 | try: | |
1655 | rbsrt = rebaseruntime(repo, ui, {}) |
|
1656 | rbsrt = rebaseruntime(repo, ui, {}) | |
1656 | rbsrt.restorestatus() |
|
1657 | rbsrt.restorestatus() | |
1657 | state = rbsrt.state |
|
1658 | state = rbsrt.state | |
1658 | except error.RepoLookupError: |
|
1659 | except error.RepoLookupError: | |
1659 | # i18n: column positioning for "hg summary" |
|
1660 | # i18n: column positioning for "hg summary" | |
1660 | msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n') |
|
1661 | msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n') | |
1661 | ui.write(msg) |
|
1662 | ui.write(msg) | |
1662 | return |
|
1663 | return | |
1663 | numrebased = len([i for i in state.itervalues() if i >= 0]) |
|
1664 | numrebased = len([i for i in state.itervalues() if i >= 0]) | |
1664 | # i18n: column positioning for "hg summary" |
|
1665 | # i18n: column positioning for "hg summary" | |
1665 | ui.write(_('rebase: %s, %s (rebase --continue)\n') % |
|
1666 | ui.write(_('rebase: %s, %s (rebase --continue)\n') % | |
1666 | (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased, |
|
1667 | (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased, | |
1667 | ui.label(_('%d remaining'), 'rebase.remaining') % |
|
1668 | ui.label(_('%d remaining'), 'rebase.remaining') % | |
1668 | (len(state) - numrebased))) |
|
1669 | (len(state) - numrebased))) | |
1669 |
|
1670 | |||
1670 | def uisetup(ui): |
|
1671 | def uisetup(ui): | |
1671 | #Replace pull with a decorator to provide --rebase option |
|
1672 | #Replace pull with a decorator to provide --rebase option | |
1672 | entry = extensions.wrapcommand(commands.table, 'pull', pullrebase) |
|
1673 | entry = extensions.wrapcommand(commands.table, 'pull', pullrebase) | |
1673 | entry[1].append(('', 'rebase', None, |
|
1674 | entry[1].append(('', 'rebase', None, | |
1674 | _("rebase working directory to branch head"))) |
|
1675 | _("rebase working directory to branch head"))) | |
1675 | entry[1].append(('t', 'tool', '', |
|
1676 | entry[1].append(('t', 'tool', '', | |
1676 | _("specify merge tool for rebase"))) |
|
1677 | _("specify merge tool for rebase"))) | |
1677 | cmdutil.summaryhooks.add('rebase', summaryhook) |
|
1678 | cmdutil.summaryhooks.add('rebase', summaryhook) | |
1678 | cmdutil.unfinishedstates.append( |
|
1679 | cmdutil.unfinishedstates.append( | |
1679 | ['rebasestate', False, False, _('rebase in progress'), |
|
1680 | ['rebasestate', False, False, _('rebase in progress'), | |
1680 | _("use 'hg rebase --continue' or 'hg rebase --abort'")]) |
|
1681 | _("use 'hg rebase --continue' or 'hg rebase --abort'")]) | |
1681 | cmdutil.afterresolvedstates.append( |
|
1682 | cmdutil.afterresolvedstates.append( | |
1682 | ['rebasestate', _('hg rebase --continue')]) |
|
1683 | ['rebasestate', _('hg rebase --continue')]) |
@@ -1,2592 +1,2597 b'' | |||||
1 | # context.py - changeset and file context objects for mercurial |
|
1 | # context.py - changeset and file context objects for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import filecmp |
|
11 | import filecmp | |
12 | import os |
|
12 | import os | |
13 | import re |
|
13 | import re | |
14 | import stat |
|
14 | import stat | |
15 |
|
15 | |||
16 | from .i18n import _ |
|
16 | from .i18n import _ | |
17 | from .node import ( |
|
17 | from .node import ( | |
18 | addednodeid, |
|
18 | addednodeid, | |
19 | bin, |
|
19 | bin, | |
20 | hex, |
|
20 | hex, | |
21 | modifiednodeid, |
|
21 | modifiednodeid, | |
22 | nullid, |
|
22 | nullid, | |
23 | nullrev, |
|
23 | nullrev, | |
24 | short, |
|
24 | short, | |
25 | wdirid, |
|
25 | wdirid, | |
26 | wdirnodes, |
|
26 | wdirnodes, | |
27 | wdirrev, |
|
27 | wdirrev, | |
28 | ) |
|
28 | ) | |
29 | from .thirdparty import ( |
|
29 | from .thirdparty import ( | |
30 | attr, |
|
30 | attr, | |
31 | ) |
|
31 | ) | |
32 | from . import ( |
|
32 | from . import ( | |
33 | encoding, |
|
33 | encoding, | |
34 | error, |
|
34 | error, | |
35 | fileset, |
|
35 | fileset, | |
36 | match as matchmod, |
|
36 | match as matchmod, | |
37 | mdiff, |
|
37 | mdiff, | |
38 | obsolete as obsmod, |
|
38 | obsolete as obsmod, | |
39 | patch, |
|
39 | patch, | |
40 | pathutil, |
|
40 | pathutil, | |
41 | phases, |
|
41 | phases, | |
42 | pycompat, |
|
42 | pycompat, | |
43 | repoview, |
|
43 | repoview, | |
44 | revlog, |
|
44 | revlog, | |
45 | scmutil, |
|
45 | scmutil, | |
46 | sparse, |
|
46 | sparse, | |
47 | subrepo, |
|
47 | subrepo, | |
48 | util, |
|
48 | util, | |
49 | ) |
|
49 | ) | |
50 |
|
50 | |||
51 | propertycache = util.propertycache |
|
51 | propertycache = util.propertycache | |
52 |
|
52 | |||
53 | nonascii = re.compile(r'[^\x21-\x7f]').search |
|
53 | nonascii = re.compile(r'[^\x21-\x7f]').search | |
54 |
|
54 | |||
55 | class basectx(object): |
|
55 | class basectx(object): | |
56 | """A basectx object represents the common logic for its children: |
|
56 | """A basectx object represents the common logic for its children: | |
57 | changectx: read-only context that is already present in the repo, |
|
57 | changectx: read-only context that is already present in the repo, | |
58 | workingctx: a context that represents the working directory and can |
|
58 | workingctx: a context that represents the working directory and can | |
59 | be committed, |
|
59 | be committed, | |
60 | memctx: a context that represents changes in-memory and can also |
|
60 | memctx: a context that represents changes in-memory and can also | |
61 | be committed.""" |
|
61 | be committed.""" | |
62 | def __new__(cls, repo, changeid='', *args, **kwargs): |
|
62 | def __new__(cls, repo, changeid='', *args, **kwargs): | |
63 | if isinstance(changeid, basectx): |
|
63 | if isinstance(changeid, basectx): | |
64 | return changeid |
|
64 | return changeid | |
65 |
|
65 | |||
66 | o = super(basectx, cls).__new__(cls) |
|
66 | o = super(basectx, cls).__new__(cls) | |
67 |
|
67 | |||
68 | o._repo = repo |
|
68 | o._repo = repo | |
69 | o._rev = nullrev |
|
69 | o._rev = nullrev | |
70 | o._node = nullid |
|
70 | o._node = nullid | |
71 |
|
71 | |||
72 | return o |
|
72 | return o | |
73 |
|
73 | |||
74 | def __bytes__(self): |
|
74 | def __bytes__(self): | |
75 | return short(self.node()) |
|
75 | return short(self.node()) | |
76 |
|
76 | |||
77 | __str__ = encoding.strmethod(__bytes__) |
|
77 | __str__ = encoding.strmethod(__bytes__) | |
78 |
|
78 | |||
79 | def __int__(self): |
|
79 | def __int__(self): | |
80 | return self.rev() |
|
80 | return self.rev() | |
81 |
|
81 | |||
82 | def __repr__(self): |
|
82 | def __repr__(self): | |
83 | return r"<%s %s>" % (type(self).__name__, str(self)) |
|
83 | return r"<%s %s>" % (type(self).__name__, str(self)) | |
84 |
|
84 | |||
85 | def __eq__(self, other): |
|
85 | def __eq__(self, other): | |
86 | try: |
|
86 | try: | |
87 | return type(self) == type(other) and self._rev == other._rev |
|
87 | return type(self) == type(other) and self._rev == other._rev | |
88 | except AttributeError: |
|
88 | except AttributeError: | |
89 | return False |
|
89 | return False | |
90 |
|
90 | |||
91 | def __ne__(self, other): |
|
91 | def __ne__(self, other): | |
92 | return not (self == other) |
|
92 | return not (self == other) | |
93 |
|
93 | |||
94 | def __contains__(self, key): |
|
94 | def __contains__(self, key): | |
95 | return key in self._manifest |
|
95 | return key in self._manifest | |
96 |
|
96 | |||
97 | def __getitem__(self, key): |
|
97 | def __getitem__(self, key): | |
98 | return self.filectx(key) |
|
98 | return self.filectx(key) | |
99 |
|
99 | |||
100 | def __iter__(self): |
|
100 | def __iter__(self): | |
101 | return iter(self._manifest) |
|
101 | return iter(self._manifest) | |
102 |
|
102 | |||
103 | def _buildstatusmanifest(self, status): |
|
103 | def _buildstatusmanifest(self, status): | |
104 | """Builds a manifest that includes the given status results, if this is |
|
104 | """Builds a manifest that includes the given status results, if this is | |
105 | a working copy context. For non-working copy contexts, it just returns |
|
105 | a working copy context. For non-working copy contexts, it just returns | |
106 | the normal manifest.""" |
|
106 | the normal manifest.""" | |
107 | return self.manifest() |
|
107 | return self.manifest() | |
108 |
|
108 | |||
109 | def _matchstatus(self, other, match): |
|
109 | def _matchstatus(self, other, match): | |
110 | """This internal method provides a way for child objects to override the |
|
110 | """This internal method provides a way for child objects to override the | |
111 | match operator. |
|
111 | match operator. | |
112 | """ |
|
112 | """ | |
113 | return match |
|
113 | return match | |
114 |
|
114 | |||
115 | def _buildstatus(self, other, s, match, listignored, listclean, |
|
115 | def _buildstatus(self, other, s, match, listignored, listclean, | |
116 | listunknown): |
|
116 | listunknown): | |
117 | """build a status with respect to another context""" |
|
117 | """build a status with respect to another context""" | |
118 | # Load earliest manifest first for caching reasons. More specifically, |
|
118 | # Load earliest manifest first for caching reasons. More specifically, | |
119 | # if you have revisions 1000 and 1001, 1001 is probably stored as a |
|
119 | # if you have revisions 1000 and 1001, 1001 is probably stored as a | |
120 | # delta against 1000. Thus, if you read 1000 first, we'll reconstruct |
|
120 | # delta against 1000. Thus, if you read 1000 first, we'll reconstruct | |
121 | # 1000 and cache it so that when you read 1001, we just need to apply a |
|
121 | # 1000 and cache it so that when you read 1001, we just need to apply a | |
122 | # delta to what's in the cache. So that's one full reconstruction + one |
|
122 | # delta to what's in the cache. So that's one full reconstruction + one | |
123 | # delta application. |
|
123 | # delta application. | |
124 | mf2 = None |
|
124 | mf2 = None | |
125 | if self.rev() is not None and self.rev() < other.rev(): |
|
125 | if self.rev() is not None and self.rev() < other.rev(): | |
126 | mf2 = self._buildstatusmanifest(s) |
|
126 | mf2 = self._buildstatusmanifest(s) | |
127 | mf1 = other._buildstatusmanifest(s) |
|
127 | mf1 = other._buildstatusmanifest(s) | |
128 | if mf2 is None: |
|
128 | if mf2 is None: | |
129 | mf2 = self._buildstatusmanifest(s) |
|
129 | mf2 = self._buildstatusmanifest(s) | |
130 |
|
130 | |||
131 | modified, added = [], [] |
|
131 | modified, added = [], [] | |
132 | removed = [] |
|
132 | removed = [] | |
133 | clean = [] |
|
133 | clean = [] | |
134 | deleted, unknown, ignored = s.deleted, s.unknown, s.ignored |
|
134 | deleted, unknown, ignored = s.deleted, s.unknown, s.ignored | |
135 | deletedset = set(deleted) |
|
135 | deletedset = set(deleted) | |
136 | d = mf1.diff(mf2, match=match, clean=listclean) |
|
136 | d = mf1.diff(mf2, match=match, clean=listclean) | |
137 | for fn, value in d.iteritems(): |
|
137 | for fn, value in d.iteritems(): | |
138 | if fn in deletedset: |
|
138 | if fn in deletedset: | |
139 | continue |
|
139 | continue | |
140 | if value is None: |
|
140 | if value is None: | |
141 | clean.append(fn) |
|
141 | clean.append(fn) | |
142 | continue |
|
142 | continue | |
143 | (node1, flag1), (node2, flag2) = value |
|
143 | (node1, flag1), (node2, flag2) = value | |
144 | if node1 is None: |
|
144 | if node1 is None: | |
145 | added.append(fn) |
|
145 | added.append(fn) | |
146 | elif node2 is None: |
|
146 | elif node2 is None: | |
147 | removed.append(fn) |
|
147 | removed.append(fn) | |
148 | elif flag1 != flag2: |
|
148 | elif flag1 != flag2: | |
149 | modified.append(fn) |
|
149 | modified.append(fn) | |
150 | elif node2 not in wdirnodes: |
|
150 | elif node2 not in wdirnodes: | |
151 | # When comparing files between two commits, we save time by |
|
151 | # When comparing files between two commits, we save time by | |
152 | # not comparing the file contents when the nodeids differ. |
|
152 | # not comparing the file contents when the nodeids differ. | |
153 | # Note that this means we incorrectly report a reverted change |
|
153 | # Note that this means we incorrectly report a reverted change | |
154 | # to a file as a modification. |
|
154 | # to a file as a modification. | |
155 | modified.append(fn) |
|
155 | modified.append(fn) | |
156 | elif self[fn].cmp(other[fn]): |
|
156 | elif self[fn].cmp(other[fn]): | |
157 | modified.append(fn) |
|
157 | modified.append(fn) | |
158 | else: |
|
158 | else: | |
159 | clean.append(fn) |
|
159 | clean.append(fn) | |
160 |
|
160 | |||
161 | if removed: |
|
161 | if removed: | |
162 | # need to filter files if they are already reported as removed |
|
162 | # need to filter files if they are already reported as removed | |
163 | unknown = [fn for fn in unknown if fn not in mf1 and |
|
163 | unknown = [fn for fn in unknown if fn not in mf1 and | |
164 | (not match or match(fn))] |
|
164 | (not match or match(fn))] | |
165 | ignored = [fn for fn in ignored if fn not in mf1 and |
|
165 | ignored = [fn for fn in ignored if fn not in mf1 and | |
166 | (not match or match(fn))] |
|
166 | (not match or match(fn))] | |
167 | # if they're deleted, don't report them as removed |
|
167 | # if they're deleted, don't report them as removed | |
168 | removed = [fn for fn in removed if fn not in deletedset] |
|
168 | removed = [fn for fn in removed if fn not in deletedset] | |
169 |
|
169 | |||
170 | return scmutil.status(modified, added, removed, deleted, unknown, |
|
170 | return scmutil.status(modified, added, removed, deleted, unknown, | |
171 | ignored, clean) |
|
171 | ignored, clean) | |
172 |
|
172 | |||
173 | @propertycache |
|
173 | @propertycache | |
174 | def substate(self): |
|
174 | def substate(self): | |
175 | return subrepo.state(self, self._repo.ui) |
|
175 | return subrepo.state(self, self._repo.ui) | |
176 |
|
176 | |||
177 | def subrev(self, subpath): |
|
177 | def subrev(self, subpath): | |
178 | return self.substate[subpath][1] |
|
178 | return self.substate[subpath][1] | |
179 |
|
179 | |||
180 | def rev(self): |
|
180 | def rev(self): | |
181 | return self._rev |
|
181 | return self._rev | |
182 | def node(self): |
|
182 | def node(self): | |
183 | return self._node |
|
183 | return self._node | |
184 | def hex(self): |
|
184 | def hex(self): | |
185 | return hex(self.node()) |
|
185 | return hex(self.node()) | |
186 | def manifest(self): |
|
186 | def manifest(self): | |
187 | return self._manifest |
|
187 | return self._manifest | |
188 | def manifestctx(self): |
|
188 | def manifestctx(self): | |
189 | return self._manifestctx |
|
189 | return self._manifestctx | |
190 | def repo(self): |
|
190 | def repo(self): | |
191 | return self._repo |
|
191 | return self._repo | |
192 | def phasestr(self): |
|
192 | def phasestr(self): | |
193 | return phases.phasenames[self.phase()] |
|
193 | return phases.phasenames[self.phase()] | |
194 | def mutable(self): |
|
194 | def mutable(self): | |
195 | return self.phase() > phases.public |
|
195 | return self.phase() > phases.public | |
196 |
|
196 | |||
197 | def getfileset(self, expr): |
|
197 | def getfileset(self, expr): | |
198 | return fileset.getfileset(self, expr) |
|
198 | return fileset.getfileset(self, expr) | |
199 |
|
199 | |||
200 | def obsolete(self): |
|
200 | def obsolete(self): | |
201 | """True if the changeset is obsolete""" |
|
201 | """True if the changeset is obsolete""" | |
202 | return self.rev() in obsmod.getrevs(self._repo, 'obsolete') |
|
202 | return self.rev() in obsmod.getrevs(self._repo, 'obsolete') | |
203 |
|
203 | |||
204 | def extinct(self): |
|
204 | def extinct(self): | |
205 | """True if the changeset is extinct""" |
|
205 | """True if the changeset is extinct""" | |
206 | return self.rev() in obsmod.getrevs(self._repo, 'extinct') |
|
206 | return self.rev() in obsmod.getrevs(self._repo, 'extinct') | |
207 |
|
207 | |||
208 | def unstable(self): |
|
208 | def unstable(self): | |
209 | msg = ("'context.unstable' is deprecated, " |
|
209 | msg = ("'context.unstable' is deprecated, " | |
210 | "use 'context.orphan'") |
|
210 | "use 'context.orphan'") | |
211 | self._repo.ui.deprecwarn(msg, '4.4') |
|
211 | self._repo.ui.deprecwarn(msg, '4.4') | |
212 | return self.orphan() |
|
212 | return self.orphan() | |
213 |
|
213 | |||
214 | def orphan(self): |
|
214 | def orphan(self): | |
215 | """True if the changeset is not obsolete but it's ancestor are""" |
|
215 | """True if the changeset is not obsolete but it's ancestor are""" | |
216 | return self.rev() in obsmod.getrevs(self._repo, 'orphan') |
|
216 | return self.rev() in obsmod.getrevs(self._repo, 'orphan') | |
217 |
|
217 | |||
218 | def bumped(self): |
|
218 | def bumped(self): | |
219 | msg = ("'context.bumped' is deprecated, " |
|
219 | msg = ("'context.bumped' is deprecated, " | |
220 | "use 'context.phasedivergent'") |
|
220 | "use 'context.phasedivergent'") | |
221 | self._repo.ui.deprecwarn(msg, '4.4') |
|
221 | self._repo.ui.deprecwarn(msg, '4.4') | |
222 | return self.phasedivergent() |
|
222 | return self.phasedivergent() | |
223 |
|
223 | |||
224 | def phasedivergent(self): |
|
224 | def phasedivergent(self): | |
225 | """True if the changeset try to be a successor of a public changeset |
|
225 | """True if the changeset try to be a successor of a public changeset | |
226 |
|
226 | |||
227 | Only non-public and non-obsolete changesets may be bumped. |
|
227 | Only non-public and non-obsolete changesets may be bumped. | |
228 | """ |
|
228 | """ | |
229 | return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent') |
|
229 | return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent') | |
230 |
|
230 | |||
231 | def divergent(self): |
|
231 | def divergent(self): | |
232 | msg = ("'context.divergent' is deprecated, " |
|
232 | msg = ("'context.divergent' is deprecated, " | |
233 | "use 'context.contentdivergent'") |
|
233 | "use 'context.contentdivergent'") | |
234 | self._repo.ui.deprecwarn(msg, '4.4') |
|
234 | self._repo.ui.deprecwarn(msg, '4.4') | |
235 | return self.contentdivergent() |
|
235 | return self.contentdivergent() | |
236 |
|
236 | |||
237 | def contentdivergent(self): |
|
237 | def contentdivergent(self): | |
238 | """Is a successors of a changeset with multiple possible successors set |
|
238 | """Is a successors of a changeset with multiple possible successors set | |
239 |
|
239 | |||
240 | Only non-public and non-obsolete changesets may be divergent. |
|
240 | Only non-public and non-obsolete changesets may be divergent. | |
241 | """ |
|
241 | """ | |
242 | return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent') |
|
242 | return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent') | |
243 |
|
243 | |||
244 | def troubled(self): |
|
244 | def troubled(self): | |
245 | msg = ("'context.troubled' is deprecated, " |
|
245 | msg = ("'context.troubled' is deprecated, " | |
246 | "use 'context.isunstable'") |
|
246 | "use 'context.isunstable'") | |
247 | self._repo.ui.deprecwarn(msg, '4.4') |
|
247 | self._repo.ui.deprecwarn(msg, '4.4') | |
248 | return self.isunstable() |
|
248 | return self.isunstable() | |
249 |
|
249 | |||
250 | def isunstable(self): |
|
250 | def isunstable(self): | |
251 | """True if the changeset is either unstable, bumped or divergent""" |
|
251 | """True if the changeset is either unstable, bumped or divergent""" | |
252 | return self.orphan() or self.phasedivergent() or self.contentdivergent() |
|
252 | return self.orphan() or self.phasedivergent() or self.contentdivergent() | |
253 |
|
253 | |||
254 | def troubles(self): |
|
254 | def troubles(self): | |
255 | """Keep the old version around in order to avoid breaking extensions |
|
255 | """Keep the old version around in order to avoid breaking extensions | |
256 | about different return values. |
|
256 | about different return values. | |
257 | """ |
|
257 | """ | |
258 | msg = ("'context.troubles' is deprecated, " |
|
258 | msg = ("'context.troubles' is deprecated, " | |
259 | "use 'context.instabilities'") |
|
259 | "use 'context.instabilities'") | |
260 | self._repo.ui.deprecwarn(msg, '4.4') |
|
260 | self._repo.ui.deprecwarn(msg, '4.4') | |
261 |
|
261 | |||
262 | troubles = [] |
|
262 | troubles = [] | |
263 | if self.orphan(): |
|
263 | if self.orphan(): | |
264 | troubles.append('orphan') |
|
264 | troubles.append('orphan') | |
265 | if self.phasedivergent(): |
|
265 | if self.phasedivergent(): | |
266 | troubles.append('bumped') |
|
266 | troubles.append('bumped') | |
267 | if self.contentdivergent(): |
|
267 | if self.contentdivergent(): | |
268 | troubles.append('divergent') |
|
268 | troubles.append('divergent') | |
269 | return troubles |
|
269 | return troubles | |
270 |
|
270 | |||
271 | def instabilities(self): |
|
271 | def instabilities(self): | |
272 | """return the list of instabilities affecting this changeset. |
|
272 | """return the list of instabilities affecting this changeset. | |
273 |
|
273 | |||
274 | Instabilities are returned as strings. possible values are: |
|
274 | Instabilities are returned as strings. possible values are: | |
275 | - orphan, |
|
275 | - orphan, | |
276 | - phase-divergent, |
|
276 | - phase-divergent, | |
277 | - content-divergent. |
|
277 | - content-divergent. | |
278 | """ |
|
278 | """ | |
279 | instabilities = [] |
|
279 | instabilities = [] | |
280 | if self.orphan(): |
|
280 | if self.orphan(): | |
281 | instabilities.append('orphan') |
|
281 | instabilities.append('orphan') | |
282 | if self.phasedivergent(): |
|
282 | if self.phasedivergent(): | |
283 | instabilities.append('phase-divergent') |
|
283 | instabilities.append('phase-divergent') | |
284 | if self.contentdivergent(): |
|
284 | if self.contentdivergent(): | |
285 | instabilities.append('content-divergent') |
|
285 | instabilities.append('content-divergent') | |
286 | return instabilities |
|
286 | return instabilities | |
287 |
|
287 | |||
288 | def parents(self): |
|
288 | def parents(self): | |
289 | """return contexts for each parent changeset""" |
|
289 | """return contexts for each parent changeset""" | |
290 | return self._parents |
|
290 | return self._parents | |
291 |
|
291 | |||
292 | def p1(self): |
|
292 | def p1(self): | |
293 | return self._parents[0] |
|
293 | return self._parents[0] | |
294 |
|
294 | |||
295 | def p2(self): |
|
295 | def p2(self): | |
296 | parents = self._parents |
|
296 | parents = self._parents | |
297 | if len(parents) == 2: |
|
297 | if len(parents) == 2: | |
298 | return parents[1] |
|
298 | return parents[1] | |
299 | return changectx(self._repo, nullrev) |
|
299 | return changectx(self._repo, nullrev) | |
300 |
|
300 | |||
301 | def _fileinfo(self, path): |
|
301 | def _fileinfo(self, path): | |
302 | if r'_manifest' in self.__dict__: |
|
302 | if r'_manifest' in self.__dict__: | |
303 | try: |
|
303 | try: | |
304 | return self._manifest[path], self._manifest.flags(path) |
|
304 | return self._manifest[path], self._manifest.flags(path) | |
305 | except KeyError: |
|
305 | except KeyError: | |
306 | raise error.ManifestLookupError(self._node, path, |
|
306 | raise error.ManifestLookupError(self._node, path, | |
307 | _('not found in manifest')) |
|
307 | _('not found in manifest')) | |
308 | if r'_manifestdelta' in self.__dict__ or path in self.files(): |
|
308 | if r'_manifestdelta' in self.__dict__ or path in self.files(): | |
309 | if path in self._manifestdelta: |
|
309 | if path in self._manifestdelta: | |
310 | return (self._manifestdelta[path], |
|
310 | return (self._manifestdelta[path], | |
311 | self._manifestdelta.flags(path)) |
|
311 | self._manifestdelta.flags(path)) | |
312 | mfl = self._repo.manifestlog |
|
312 | mfl = self._repo.manifestlog | |
313 | try: |
|
313 | try: | |
314 | node, flag = mfl[self._changeset.manifest].find(path) |
|
314 | node, flag = mfl[self._changeset.manifest].find(path) | |
315 | except KeyError: |
|
315 | except KeyError: | |
316 | raise error.ManifestLookupError(self._node, path, |
|
316 | raise error.ManifestLookupError(self._node, path, | |
317 | _('not found in manifest')) |
|
317 | _('not found in manifest')) | |
318 |
|
318 | |||
319 | return node, flag |
|
319 | return node, flag | |
320 |
|
320 | |||
321 | def filenode(self, path): |
|
321 | def filenode(self, path): | |
322 | return self._fileinfo(path)[0] |
|
322 | return self._fileinfo(path)[0] | |
323 |
|
323 | |||
324 | def flags(self, path): |
|
324 | def flags(self, path): | |
325 | try: |
|
325 | try: | |
326 | return self._fileinfo(path)[1] |
|
326 | return self._fileinfo(path)[1] | |
327 | except error.LookupError: |
|
327 | except error.LookupError: | |
328 | return '' |
|
328 | return '' | |
329 |
|
329 | |||
330 | def sub(self, path, allowcreate=True): |
|
330 | def sub(self, path, allowcreate=True): | |
331 | '''return a subrepo for the stored revision of path, never wdir()''' |
|
331 | '''return a subrepo for the stored revision of path, never wdir()''' | |
332 | return subrepo.subrepo(self, path, allowcreate=allowcreate) |
|
332 | return subrepo.subrepo(self, path, allowcreate=allowcreate) | |
333 |
|
333 | |||
334 | def nullsub(self, path, pctx): |
|
334 | def nullsub(self, path, pctx): | |
335 | return subrepo.nullsubrepo(self, path, pctx) |
|
335 | return subrepo.nullsubrepo(self, path, pctx) | |
336 |
|
336 | |||
337 | def workingsub(self, path): |
|
337 | def workingsub(self, path): | |
338 | '''return a subrepo for the stored revision, or wdir if this is a wdir |
|
338 | '''return a subrepo for the stored revision, or wdir if this is a wdir | |
339 | context. |
|
339 | context. | |
340 | ''' |
|
340 | ''' | |
341 | return subrepo.subrepo(self, path, allowwdir=True) |
|
341 | return subrepo.subrepo(self, path, allowwdir=True) | |
342 |
|
342 | |||
343 | def match(self, pats=None, include=None, exclude=None, default='glob', |
|
343 | def match(self, pats=None, include=None, exclude=None, default='glob', | |
344 | listsubrepos=False, badfn=None): |
|
344 | listsubrepos=False, badfn=None): | |
345 | r = self._repo |
|
345 | r = self._repo | |
346 | return matchmod.match(r.root, r.getcwd(), pats, |
|
346 | return matchmod.match(r.root, r.getcwd(), pats, | |
347 | include, exclude, default, |
|
347 | include, exclude, default, | |
348 | auditor=r.nofsauditor, ctx=self, |
|
348 | auditor=r.nofsauditor, ctx=self, | |
349 | listsubrepos=listsubrepos, badfn=badfn) |
|
349 | listsubrepos=listsubrepos, badfn=badfn) | |
350 |
|
350 | |||
351 | def diff(self, ctx2=None, match=None, **opts): |
|
351 | def diff(self, ctx2=None, match=None, **opts): | |
352 | """Returns a diff generator for the given contexts and matcher""" |
|
352 | """Returns a diff generator for the given contexts and matcher""" | |
353 | if ctx2 is None: |
|
353 | if ctx2 is None: | |
354 | ctx2 = self.p1() |
|
354 | ctx2 = self.p1() | |
355 | if ctx2 is not None: |
|
355 | if ctx2 is not None: | |
356 | ctx2 = self._repo[ctx2] |
|
356 | ctx2 = self._repo[ctx2] | |
357 | diffopts = patch.diffopts(self._repo.ui, opts) |
|
357 | diffopts = patch.diffopts(self._repo.ui, opts) | |
358 | return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts) |
|
358 | return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts) | |
359 |
|
359 | |||
360 | def dirs(self): |
|
360 | def dirs(self): | |
361 | return self._manifest.dirs() |
|
361 | return self._manifest.dirs() | |
362 |
|
362 | |||
363 | def hasdir(self, dir): |
|
363 | def hasdir(self, dir): | |
364 | return self._manifest.hasdir(dir) |
|
364 | return self._manifest.hasdir(dir) | |
365 |
|
365 | |||
366 | def status(self, other=None, match=None, listignored=False, |
|
366 | def status(self, other=None, match=None, listignored=False, | |
367 | listclean=False, listunknown=False, listsubrepos=False): |
|
367 | listclean=False, listunknown=False, listsubrepos=False): | |
368 | """return status of files between two nodes or node and working |
|
368 | """return status of files between two nodes or node and working | |
369 | directory. |
|
369 | directory. | |
370 |
|
370 | |||
371 | If other is None, compare this node with working directory. |
|
371 | If other is None, compare this node with working directory. | |
372 |
|
372 | |||
373 | returns (modified, added, removed, deleted, unknown, ignored, clean) |
|
373 | returns (modified, added, removed, deleted, unknown, ignored, clean) | |
374 | """ |
|
374 | """ | |
375 |
|
375 | |||
376 | ctx1 = self |
|
376 | ctx1 = self | |
377 | ctx2 = self._repo[other] |
|
377 | ctx2 = self._repo[other] | |
378 |
|
378 | |||
379 | # This next code block is, admittedly, fragile logic that tests for |
|
379 | # This next code block is, admittedly, fragile logic that tests for | |
380 | # reversing the contexts and wouldn't need to exist if it weren't for |
|
380 | # reversing the contexts and wouldn't need to exist if it weren't for | |
381 | # the fast (and common) code path of comparing the working directory |
|
381 | # the fast (and common) code path of comparing the working directory | |
382 | # with its first parent. |
|
382 | # with its first parent. | |
383 | # |
|
383 | # | |
384 | # What we're aiming for here is the ability to call: |
|
384 | # What we're aiming for here is the ability to call: | |
385 | # |
|
385 | # | |
386 | # workingctx.status(parentctx) |
|
386 | # workingctx.status(parentctx) | |
387 | # |
|
387 | # | |
388 | # If we always built the manifest for each context and compared those, |
|
388 | # If we always built the manifest for each context and compared those, | |
389 | # then we'd be done. But the special case of the above call means we |
|
389 | # then we'd be done. But the special case of the above call means we | |
390 | # just copy the manifest of the parent. |
|
390 | # just copy the manifest of the parent. | |
391 | reversed = False |
|
391 | reversed = False | |
392 | if (not isinstance(ctx1, changectx) |
|
392 | if (not isinstance(ctx1, changectx) | |
393 | and isinstance(ctx2, changectx)): |
|
393 | and isinstance(ctx2, changectx)): | |
394 | reversed = True |
|
394 | reversed = True | |
395 | ctx1, ctx2 = ctx2, ctx1 |
|
395 | ctx1, ctx2 = ctx2, ctx1 | |
396 |
|
396 | |||
397 | match = match or matchmod.always(self._repo.root, self._repo.getcwd()) |
|
397 | match = match or matchmod.always(self._repo.root, self._repo.getcwd()) | |
398 | match = ctx2._matchstatus(ctx1, match) |
|
398 | match = ctx2._matchstatus(ctx1, match) | |
399 | r = scmutil.status([], [], [], [], [], [], []) |
|
399 | r = scmutil.status([], [], [], [], [], [], []) | |
400 | r = ctx2._buildstatus(ctx1, r, match, listignored, listclean, |
|
400 | r = ctx2._buildstatus(ctx1, r, match, listignored, listclean, | |
401 | listunknown) |
|
401 | listunknown) | |
402 |
|
402 | |||
403 | if reversed: |
|
403 | if reversed: | |
404 | # Reverse added and removed. Clear deleted, unknown and ignored as |
|
404 | # Reverse added and removed. Clear deleted, unknown and ignored as | |
405 | # these make no sense to reverse. |
|
405 | # these make no sense to reverse. | |
406 | r = scmutil.status(r.modified, r.removed, r.added, [], [], [], |
|
406 | r = scmutil.status(r.modified, r.removed, r.added, [], [], [], | |
407 | r.clean) |
|
407 | r.clean) | |
408 |
|
408 | |||
409 | if listsubrepos: |
|
409 | if listsubrepos: | |
410 | for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): |
|
410 | for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): | |
411 | try: |
|
411 | try: | |
412 | rev2 = ctx2.subrev(subpath) |
|
412 | rev2 = ctx2.subrev(subpath) | |
413 | except KeyError: |
|
413 | except KeyError: | |
414 | # A subrepo that existed in node1 was deleted between |
|
414 | # A subrepo that existed in node1 was deleted between | |
415 | # node1 and node2 (inclusive). Thus, ctx2's substate |
|
415 | # node1 and node2 (inclusive). Thus, ctx2's substate | |
416 | # won't contain that subpath. The best we can do ignore it. |
|
416 | # won't contain that subpath. The best we can do ignore it. | |
417 | rev2 = None |
|
417 | rev2 = None | |
418 | submatch = matchmod.subdirmatcher(subpath, match) |
|
418 | submatch = matchmod.subdirmatcher(subpath, match) | |
419 | s = sub.status(rev2, match=submatch, ignored=listignored, |
|
419 | s = sub.status(rev2, match=submatch, ignored=listignored, | |
420 | clean=listclean, unknown=listunknown, |
|
420 | clean=listclean, unknown=listunknown, | |
421 | listsubrepos=True) |
|
421 | listsubrepos=True) | |
422 | for rfiles, sfiles in zip(r, s): |
|
422 | for rfiles, sfiles in zip(r, s): | |
423 | rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) |
|
423 | rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) | |
424 |
|
424 | |||
425 | for l in r: |
|
425 | for l in r: | |
426 | l.sort() |
|
426 | l.sort() | |
427 |
|
427 | |||
428 | return r |
|
428 | return r | |
429 |
|
429 | |||
430 | def _filterederror(repo, changeid): |
|
430 | def _filterederror(repo, changeid): | |
431 | """build an exception to be raised about a filtered changeid |
|
431 | """build an exception to be raised about a filtered changeid | |
432 |
|
432 | |||
433 | This is extracted in a function to help extensions (eg: evolve) to |
|
433 | This is extracted in a function to help extensions (eg: evolve) to | |
434 | experiment with various message variants.""" |
|
434 | experiment with various message variants.""" | |
435 | if repo.filtername.startswith('visible'): |
|
435 | if repo.filtername.startswith('visible'): | |
436 | msg = _("hidden revision '%s'") % changeid |
|
436 | msg = _("hidden revision '%s'") % changeid | |
437 | hint = _('use --hidden to access hidden revisions') |
|
437 | hint = _('use --hidden to access hidden revisions') | |
438 | return error.FilteredRepoLookupError(msg, hint=hint) |
|
438 | return error.FilteredRepoLookupError(msg, hint=hint) | |
439 | msg = _("filtered revision '%s' (not in '%s' subset)") |
|
439 | msg = _("filtered revision '%s' (not in '%s' subset)") | |
440 | msg %= (changeid, repo.filtername) |
|
440 | msg %= (changeid, repo.filtername) | |
441 | return error.FilteredRepoLookupError(msg) |
|
441 | return error.FilteredRepoLookupError(msg) | |
442 |
|
442 | |||
443 | class changectx(basectx): |
|
443 | class changectx(basectx): | |
444 | """A changecontext object makes access to data related to a particular |
|
444 | """A changecontext object makes access to data related to a particular | |
445 | changeset convenient. It represents a read-only context already present in |
|
445 | changeset convenient. It represents a read-only context already present in | |
446 | the repo.""" |
|
446 | the repo.""" | |
447 | def __init__(self, repo, changeid=''): |
|
447 | def __init__(self, repo, changeid=''): | |
448 | """changeid is a revision number, node, or tag""" |
|
448 | """changeid is a revision number, node, or tag""" | |
449 |
|
449 | |||
450 | # since basectx.__new__ already took care of copying the object, we |
|
450 | # since basectx.__new__ already took care of copying the object, we | |
451 | # don't need to do anything in __init__, so we just exit here |
|
451 | # don't need to do anything in __init__, so we just exit here | |
452 | if isinstance(changeid, basectx): |
|
452 | if isinstance(changeid, basectx): | |
453 | return |
|
453 | return | |
454 |
|
454 | |||
455 | if changeid == '': |
|
455 | if changeid == '': | |
456 | changeid = '.' |
|
456 | changeid = '.' | |
457 | self._repo = repo |
|
457 | self._repo = repo | |
458 |
|
458 | |||
459 | try: |
|
459 | try: | |
460 | if isinstance(changeid, int): |
|
460 | if isinstance(changeid, int): | |
461 | self._node = repo.changelog.node(changeid) |
|
461 | self._node = repo.changelog.node(changeid) | |
462 | self._rev = changeid |
|
462 | self._rev = changeid | |
463 | return |
|
463 | return | |
464 | if not pycompat.ispy3 and isinstance(changeid, long): |
|
464 | if not pycompat.ispy3 and isinstance(changeid, long): | |
465 | changeid = str(changeid) |
|
465 | changeid = str(changeid) | |
466 | if changeid == 'null': |
|
466 | if changeid == 'null': | |
467 | self._node = nullid |
|
467 | self._node = nullid | |
468 | self._rev = nullrev |
|
468 | self._rev = nullrev | |
469 | return |
|
469 | return | |
470 | if changeid == 'tip': |
|
470 | if changeid == 'tip': | |
471 | self._node = repo.changelog.tip() |
|
471 | self._node = repo.changelog.tip() | |
472 | self._rev = repo.changelog.rev(self._node) |
|
472 | self._rev = repo.changelog.rev(self._node) | |
473 | return |
|
473 | return | |
474 | if changeid == '.' or changeid == repo.dirstate.p1(): |
|
474 | if changeid == '.' or changeid == repo.dirstate.p1(): | |
475 | # this is a hack to delay/avoid loading obsmarkers |
|
475 | # this is a hack to delay/avoid loading obsmarkers | |
476 | # when we know that '.' won't be hidden |
|
476 | # when we know that '.' won't be hidden | |
477 | self._node = repo.dirstate.p1() |
|
477 | self._node = repo.dirstate.p1() | |
478 | self._rev = repo.unfiltered().changelog.rev(self._node) |
|
478 | self._rev = repo.unfiltered().changelog.rev(self._node) | |
479 | return |
|
479 | return | |
480 | if len(changeid) == 20: |
|
480 | if len(changeid) == 20: | |
481 | try: |
|
481 | try: | |
482 | self._node = changeid |
|
482 | self._node = changeid | |
483 | self._rev = repo.changelog.rev(changeid) |
|
483 | self._rev = repo.changelog.rev(changeid) | |
484 | return |
|
484 | return | |
485 | except error.FilteredRepoLookupError: |
|
485 | except error.FilteredRepoLookupError: | |
486 | raise |
|
486 | raise | |
487 | except LookupError: |
|
487 | except LookupError: | |
488 | pass |
|
488 | pass | |
489 |
|
489 | |||
490 | try: |
|
490 | try: | |
491 | r = int(changeid) |
|
491 | r = int(changeid) | |
492 | if '%d' % r != changeid: |
|
492 | if '%d' % r != changeid: | |
493 | raise ValueError |
|
493 | raise ValueError | |
494 | l = len(repo.changelog) |
|
494 | l = len(repo.changelog) | |
495 | if r < 0: |
|
495 | if r < 0: | |
496 | r += l |
|
496 | r += l | |
497 | if r < 0 or r >= l and r != wdirrev: |
|
497 | if r < 0 or r >= l and r != wdirrev: | |
498 | raise ValueError |
|
498 | raise ValueError | |
499 | self._rev = r |
|
499 | self._rev = r | |
500 | self._node = repo.changelog.node(r) |
|
500 | self._node = repo.changelog.node(r) | |
501 | return |
|
501 | return | |
502 | except error.FilteredIndexError: |
|
502 | except error.FilteredIndexError: | |
503 | raise |
|
503 | raise | |
504 | except (ValueError, OverflowError, IndexError): |
|
504 | except (ValueError, OverflowError, IndexError): | |
505 | pass |
|
505 | pass | |
506 |
|
506 | |||
507 | if len(changeid) == 40: |
|
507 | if len(changeid) == 40: | |
508 | try: |
|
508 | try: | |
509 | self._node = bin(changeid) |
|
509 | self._node = bin(changeid) | |
510 | self._rev = repo.changelog.rev(self._node) |
|
510 | self._rev = repo.changelog.rev(self._node) | |
511 | return |
|
511 | return | |
512 | except error.FilteredLookupError: |
|
512 | except error.FilteredLookupError: | |
513 | raise |
|
513 | raise | |
514 | except (TypeError, LookupError): |
|
514 | except (TypeError, LookupError): | |
515 | pass |
|
515 | pass | |
516 |
|
516 | |||
517 | # lookup bookmarks through the name interface |
|
517 | # lookup bookmarks through the name interface | |
518 | try: |
|
518 | try: | |
519 | self._node = repo.names.singlenode(repo, changeid) |
|
519 | self._node = repo.names.singlenode(repo, changeid) | |
520 | self._rev = repo.changelog.rev(self._node) |
|
520 | self._rev = repo.changelog.rev(self._node) | |
521 | return |
|
521 | return | |
522 | except KeyError: |
|
522 | except KeyError: | |
523 | pass |
|
523 | pass | |
524 | except error.FilteredRepoLookupError: |
|
524 | except error.FilteredRepoLookupError: | |
525 | raise |
|
525 | raise | |
526 | except error.RepoLookupError: |
|
526 | except error.RepoLookupError: | |
527 | pass |
|
527 | pass | |
528 |
|
528 | |||
529 | self._node = repo.unfiltered().changelog._partialmatch(changeid) |
|
529 | self._node = repo.unfiltered().changelog._partialmatch(changeid) | |
530 | if self._node is not None: |
|
530 | if self._node is not None: | |
531 | self._rev = repo.changelog.rev(self._node) |
|
531 | self._rev = repo.changelog.rev(self._node) | |
532 | return |
|
532 | return | |
533 |
|
533 | |||
534 | # lookup failed |
|
534 | # lookup failed | |
535 | # check if it might have come from damaged dirstate |
|
535 | # check if it might have come from damaged dirstate | |
536 | # |
|
536 | # | |
537 | # XXX we could avoid the unfiltered if we had a recognizable |
|
537 | # XXX we could avoid the unfiltered if we had a recognizable | |
538 | # exception for filtered changeset access |
|
538 | # exception for filtered changeset access | |
539 | if changeid in repo.unfiltered().dirstate.parents(): |
|
539 | if changeid in repo.unfiltered().dirstate.parents(): | |
540 | msg = _("working directory has unknown parent '%s'!") |
|
540 | msg = _("working directory has unknown parent '%s'!") | |
541 | raise error.Abort(msg % short(changeid)) |
|
541 | raise error.Abort(msg % short(changeid)) | |
542 | try: |
|
542 | try: | |
543 | if len(changeid) == 20 and nonascii(changeid): |
|
543 | if len(changeid) == 20 and nonascii(changeid): | |
544 | changeid = hex(changeid) |
|
544 | changeid = hex(changeid) | |
545 | except TypeError: |
|
545 | except TypeError: | |
546 | pass |
|
546 | pass | |
547 | except (error.FilteredIndexError, error.FilteredLookupError, |
|
547 | except (error.FilteredIndexError, error.FilteredLookupError, | |
548 | error.FilteredRepoLookupError): |
|
548 | error.FilteredRepoLookupError): | |
549 | raise _filterederror(repo, changeid) |
|
549 | raise _filterederror(repo, changeid) | |
550 | except IndexError: |
|
550 | except IndexError: | |
551 | pass |
|
551 | pass | |
552 | raise error.RepoLookupError( |
|
552 | raise error.RepoLookupError( | |
553 | _("unknown revision '%s'") % changeid) |
|
553 | _("unknown revision '%s'") % changeid) | |
554 |
|
554 | |||
555 | def __hash__(self): |
|
555 | def __hash__(self): | |
556 | try: |
|
556 | try: | |
557 | return hash(self._rev) |
|
557 | return hash(self._rev) | |
558 | except AttributeError: |
|
558 | except AttributeError: | |
559 | return id(self) |
|
559 | return id(self) | |
560 |
|
560 | |||
561 | def __nonzero__(self): |
|
561 | def __nonzero__(self): | |
562 | return self._rev != nullrev |
|
562 | return self._rev != nullrev | |
563 |
|
563 | |||
564 | __bool__ = __nonzero__ |
|
564 | __bool__ = __nonzero__ | |
565 |
|
565 | |||
566 | @propertycache |
|
566 | @propertycache | |
567 | def _changeset(self): |
|
567 | def _changeset(self): | |
568 | return self._repo.changelog.changelogrevision(self.rev()) |
|
568 | return self._repo.changelog.changelogrevision(self.rev()) | |
569 |
|
569 | |||
570 | @propertycache |
|
570 | @propertycache | |
571 | def _manifest(self): |
|
571 | def _manifest(self): | |
572 | return self._manifestctx.read() |
|
572 | return self._manifestctx.read() | |
573 |
|
573 | |||
574 | @property |
|
574 | @property | |
575 | def _manifestctx(self): |
|
575 | def _manifestctx(self): | |
576 | return self._repo.manifestlog[self._changeset.manifest] |
|
576 | return self._repo.manifestlog[self._changeset.manifest] | |
577 |
|
577 | |||
578 | @propertycache |
|
578 | @propertycache | |
579 | def _manifestdelta(self): |
|
579 | def _manifestdelta(self): | |
580 | return self._manifestctx.readdelta() |
|
580 | return self._manifestctx.readdelta() | |
581 |
|
581 | |||
582 | @propertycache |
|
582 | @propertycache | |
583 | def _parents(self): |
|
583 | def _parents(self): | |
584 | repo = self._repo |
|
584 | repo = self._repo | |
585 | p1, p2 = repo.changelog.parentrevs(self._rev) |
|
585 | p1, p2 = repo.changelog.parentrevs(self._rev) | |
586 | if p2 == nullrev: |
|
586 | if p2 == nullrev: | |
587 | return [changectx(repo, p1)] |
|
587 | return [changectx(repo, p1)] | |
588 | return [changectx(repo, p1), changectx(repo, p2)] |
|
588 | return [changectx(repo, p1), changectx(repo, p2)] | |
589 |
|
589 | |||
590 | def changeset(self): |
|
590 | def changeset(self): | |
591 | c = self._changeset |
|
591 | c = self._changeset | |
592 | return ( |
|
592 | return ( | |
593 | c.manifest, |
|
593 | c.manifest, | |
594 | c.user, |
|
594 | c.user, | |
595 | c.date, |
|
595 | c.date, | |
596 | c.files, |
|
596 | c.files, | |
597 | c.description, |
|
597 | c.description, | |
598 | c.extra, |
|
598 | c.extra, | |
599 | ) |
|
599 | ) | |
600 | def manifestnode(self): |
|
600 | def manifestnode(self): | |
601 | return self._changeset.manifest |
|
601 | return self._changeset.manifest | |
602 |
|
602 | |||
603 | def user(self): |
|
603 | def user(self): | |
604 | return self._changeset.user |
|
604 | return self._changeset.user | |
605 | def date(self): |
|
605 | def date(self): | |
606 | return self._changeset.date |
|
606 | return self._changeset.date | |
607 | def files(self): |
|
607 | def files(self): | |
608 | return self._changeset.files |
|
608 | return self._changeset.files | |
609 | def description(self): |
|
609 | def description(self): | |
610 | return self._changeset.description |
|
610 | return self._changeset.description | |
611 | def branch(self): |
|
611 | def branch(self): | |
612 | return encoding.tolocal(self._changeset.extra.get("branch")) |
|
612 | return encoding.tolocal(self._changeset.extra.get("branch")) | |
613 | def closesbranch(self): |
|
613 | def closesbranch(self): | |
614 | return 'close' in self._changeset.extra |
|
614 | return 'close' in self._changeset.extra | |
615 | def extra(self): |
|
615 | def extra(self): | |
616 | return self._changeset.extra |
|
616 | return self._changeset.extra | |
617 | def tags(self): |
|
617 | def tags(self): | |
618 | return self._repo.nodetags(self._node) |
|
618 | return self._repo.nodetags(self._node) | |
619 | def bookmarks(self): |
|
619 | def bookmarks(self): | |
620 | return self._repo.nodebookmarks(self._node) |
|
620 | return self._repo.nodebookmarks(self._node) | |
621 | def phase(self): |
|
621 | def phase(self): | |
622 | return self._repo._phasecache.phase(self._repo, self._rev) |
|
622 | return self._repo._phasecache.phase(self._repo, self._rev) | |
623 | def hidden(self): |
|
623 | def hidden(self): | |
624 | return self._rev in repoview.filterrevs(self._repo, 'visible') |
|
624 | return self._rev in repoview.filterrevs(self._repo, 'visible') | |
625 |
|
625 | |||
626 | def isinmemory(self): |
|
626 | def isinmemory(self): | |
627 | return False |
|
627 | return False | |
628 |
|
628 | |||
629 | def children(self): |
|
629 | def children(self): | |
630 | """return contexts for each child changeset""" |
|
630 | """return contexts for each child changeset""" | |
631 | c = self._repo.changelog.children(self._node) |
|
631 | c = self._repo.changelog.children(self._node) | |
632 | return [changectx(self._repo, x) for x in c] |
|
632 | return [changectx(self._repo, x) for x in c] | |
633 |
|
633 | |||
634 | def ancestors(self): |
|
634 | def ancestors(self): | |
635 | for a in self._repo.changelog.ancestors([self._rev]): |
|
635 | for a in self._repo.changelog.ancestors([self._rev]): | |
636 | yield changectx(self._repo, a) |
|
636 | yield changectx(self._repo, a) | |
637 |
|
637 | |||
638 | def descendants(self): |
|
638 | def descendants(self): | |
639 | for d in self._repo.changelog.descendants([self._rev]): |
|
639 | for d in self._repo.changelog.descendants([self._rev]): | |
640 | yield changectx(self._repo, d) |
|
640 | yield changectx(self._repo, d) | |
641 |
|
641 | |||
642 | def filectx(self, path, fileid=None, filelog=None): |
|
642 | def filectx(self, path, fileid=None, filelog=None): | |
643 | """get a file context from this changeset""" |
|
643 | """get a file context from this changeset""" | |
644 | if fileid is None: |
|
644 | if fileid is None: | |
645 | fileid = self.filenode(path) |
|
645 | fileid = self.filenode(path) | |
646 | return filectx(self._repo, path, fileid=fileid, |
|
646 | return filectx(self._repo, path, fileid=fileid, | |
647 | changectx=self, filelog=filelog) |
|
647 | changectx=self, filelog=filelog) | |
648 |
|
648 | |||
649 | def ancestor(self, c2, warn=False): |
|
649 | def ancestor(self, c2, warn=False): | |
650 | """return the "best" ancestor context of self and c2 |
|
650 | """return the "best" ancestor context of self and c2 | |
651 |
|
651 | |||
652 | If there are multiple candidates, it will show a message and check |
|
652 | If there are multiple candidates, it will show a message and check | |
653 | merge.preferancestor configuration before falling back to the |
|
653 | merge.preferancestor configuration before falling back to the | |
654 | revlog ancestor.""" |
|
654 | revlog ancestor.""" | |
655 | # deal with workingctxs |
|
655 | # deal with workingctxs | |
656 | n2 = c2._node |
|
656 | n2 = c2._node | |
657 | if n2 is None: |
|
657 | if n2 is None: | |
658 | n2 = c2._parents[0]._node |
|
658 | n2 = c2._parents[0]._node | |
659 | cahs = self._repo.changelog.commonancestorsheads(self._node, n2) |
|
659 | cahs = self._repo.changelog.commonancestorsheads(self._node, n2) | |
660 | if not cahs: |
|
660 | if not cahs: | |
661 | anc = nullid |
|
661 | anc = nullid | |
662 | elif len(cahs) == 1: |
|
662 | elif len(cahs) == 1: | |
663 | anc = cahs[0] |
|
663 | anc = cahs[0] | |
664 | else: |
|
664 | else: | |
665 | # experimental config: merge.preferancestor |
|
665 | # experimental config: merge.preferancestor | |
666 | for r in self._repo.ui.configlist('merge', 'preferancestor'): |
|
666 | for r in self._repo.ui.configlist('merge', 'preferancestor'): | |
667 | try: |
|
667 | try: | |
668 | ctx = changectx(self._repo, r) |
|
668 | ctx = changectx(self._repo, r) | |
669 | except error.RepoLookupError: |
|
669 | except error.RepoLookupError: | |
670 | continue |
|
670 | continue | |
671 | anc = ctx.node() |
|
671 | anc = ctx.node() | |
672 | if anc in cahs: |
|
672 | if anc in cahs: | |
673 | break |
|
673 | break | |
674 | else: |
|
674 | else: | |
675 | anc = self._repo.changelog.ancestor(self._node, n2) |
|
675 | anc = self._repo.changelog.ancestor(self._node, n2) | |
676 | if warn: |
|
676 | if warn: | |
677 | self._repo.ui.status( |
|
677 | self._repo.ui.status( | |
678 | (_("note: using %s as ancestor of %s and %s\n") % |
|
678 | (_("note: using %s as ancestor of %s and %s\n") % | |
679 | (short(anc), short(self._node), short(n2))) + |
|
679 | (short(anc), short(self._node), short(n2))) + | |
680 | ''.join(_(" alternatively, use --config " |
|
680 | ''.join(_(" alternatively, use --config " | |
681 | "merge.preferancestor=%s\n") % |
|
681 | "merge.preferancestor=%s\n") % | |
682 | short(n) for n in sorted(cahs) if n != anc)) |
|
682 | short(n) for n in sorted(cahs) if n != anc)) | |
683 | return changectx(self._repo, anc) |
|
683 | return changectx(self._repo, anc) | |
684 |
|
684 | |||
685 | def descendant(self, other): |
|
685 | def descendant(self, other): | |
686 | """True if other is descendant of this changeset""" |
|
686 | """True if other is descendant of this changeset""" | |
687 | return self._repo.changelog.descendant(self._rev, other._rev) |
|
687 | return self._repo.changelog.descendant(self._rev, other._rev) | |
688 |
|
688 | |||
689 | def walk(self, match): |
|
689 | def walk(self, match): | |
690 | '''Generates matching file names.''' |
|
690 | '''Generates matching file names.''' | |
691 |
|
691 | |||
692 | # Wrap match.bad method to have message with nodeid |
|
692 | # Wrap match.bad method to have message with nodeid | |
693 | def bad(fn, msg): |
|
693 | def bad(fn, msg): | |
694 | # The manifest doesn't know about subrepos, so don't complain about |
|
694 | # The manifest doesn't know about subrepos, so don't complain about | |
695 | # paths into valid subrepos. |
|
695 | # paths into valid subrepos. | |
696 | if any(fn == s or fn.startswith(s + '/') |
|
696 | if any(fn == s or fn.startswith(s + '/') | |
697 | for s in self.substate): |
|
697 | for s in self.substate): | |
698 | return |
|
698 | return | |
699 | match.bad(fn, _('no such file in rev %s') % self) |
|
699 | match.bad(fn, _('no such file in rev %s') % self) | |
700 |
|
700 | |||
701 | m = matchmod.badmatch(match, bad) |
|
701 | m = matchmod.badmatch(match, bad) | |
702 | return self._manifest.walk(m) |
|
702 | return self._manifest.walk(m) | |
703 |
|
703 | |||
704 | def matches(self, match): |
|
704 | def matches(self, match): | |
705 | return self.walk(match) |
|
705 | return self.walk(match) | |
706 |
|
706 | |||
707 | class basefilectx(object): |
|
707 | class basefilectx(object): | |
708 | """A filecontext object represents the common logic for its children: |
|
708 | """A filecontext object represents the common logic for its children: | |
709 | filectx: read-only access to a filerevision that is already present |
|
709 | filectx: read-only access to a filerevision that is already present | |
710 | in the repo, |
|
710 | in the repo, | |
711 | workingfilectx: a filecontext that represents files from the working |
|
711 | workingfilectx: a filecontext that represents files from the working | |
712 | directory, |
|
712 | directory, | |
713 | memfilectx: a filecontext that represents files in-memory, |
|
713 | memfilectx: a filecontext that represents files in-memory, | |
714 | overlayfilectx: duplicate another filecontext with some fields overridden. |
|
714 | overlayfilectx: duplicate another filecontext with some fields overridden. | |
715 | """ |
|
715 | """ | |
716 | @propertycache |
|
716 | @propertycache | |
717 | def _filelog(self): |
|
717 | def _filelog(self): | |
718 | return self._repo.file(self._path) |
|
718 | return self._repo.file(self._path) | |
719 |
|
719 | |||
720 | @propertycache |
|
720 | @propertycache | |
721 | def _changeid(self): |
|
721 | def _changeid(self): | |
722 | if r'_changeid' in self.__dict__: |
|
722 | if r'_changeid' in self.__dict__: | |
723 | return self._changeid |
|
723 | return self._changeid | |
724 | elif r'_changectx' in self.__dict__: |
|
724 | elif r'_changectx' in self.__dict__: | |
725 | return self._changectx.rev() |
|
725 | return self._changectx.rev() | |
726 | elif r'_descendantrev' in self.__dict__: |
|
726 | elif r'_descendantrev' in self.__dict__: | |
727 | # this file context was created from a revision with a known |
|
727 | # this file context was created from a revision with a known | |
728 | # descendant, we can (lazily) correct for linkrev aliases |
|
728 | # descendant, we can (lazily) correct for linkrev aliases | |
729 | return self._adjustlinkrev(self._descendantrev) |
|
729 | return self._adjustlinkrev(self._descendantrev) | |
730 | else: |
|
730 | else: | |
731 | return self._filelog.linkrev(self._filerev) |
|
731 | return self._filelog.linkrev(self._filerev) | |
732 |
|
732 | |||
733 | @propertycache |
|
733 | @propertycache | |
734 | def _filenode(self): |
|
734 | def _filenode(self): | |
735 | if r'_fileid' in self.__dict__: |
|
735 | if r'_fileid' in self.__dict__: | |
736 | return self._filelog.lookup(self._fileid) |
|
736 | return self._filelog.lookup(self._fileid) | |
737 | else: |
|
737 | else: | |
738 | return self._changectx.filenode(self._path) |
|
738 | return self._changectx.filenode(self._path) | |
739 |
|
739 | |||
740 | @propertycache |
|
740 | @propertycache | |
741 | def _filerev(self): |
|
741 | def _filerev(self): | |
742 | return self._filelog.rev(self._filenode) |
|
742 | return self._filelog.rev(self._filenode) | |
743 |
|
743 | |||
744 | @propertycache |
|
744 | @propertycache | |
745 | def _repopath(self): |
|
745 | def _repopath(self): | |
746 | return self._path |
|
746 | return self._path | |
747 |
|
747 | |||
748 | def __nonzero__(self): |
|
748 | def __nonzero__(self): | |
749 | try: |
|
749 | try: | |
750 | self._filenode |
|
750 | self._filenode | |
751 | return True |
|
751 | return True | |
752 | except error.LookupError: |
|
752 | except error.LookupError: | |
753 | # file is missing |
|
753 | # file is missing | |
754 | return False |
|
754 | return False | |
755 |
|
755 | |||
756 | __bool__ = __nonzero__ |
|
756 | __bool__ = __nonzero__ | |
757 |
|
757 | |||
758 | def __bytes__(self): |
|
758 | def __bytes__(self): | |
759 | try: |
|
759 | try: | |
760 | return "%s@%s" % (self.path(), self._changectx) |
|
760 | return "%s@%s" % (self.path(), self._changectx) | |
761 | except error.LookupError: |
|
761 | except error.LookupError: | |
762 | return "%s@???" % self.path() |
|
762 | return "%s@???" % self.path() | |
763 |
|
763 | |||
764 | __str__ = encoding.strmethod(__bytes__) |
|
764 | __str__ = encoding.strmethod(__bytes__) | |
765 |
|
765 | |||
766 | def __repr__(self): |
|
766 | def __repr__(self): | |
767 | return "<%s %s>" % (type(self).__name__, str(self)) |
|
767 | return "<%s %s>" % (type(self).__name__, str(self)) | |
768 |
|
768 | |||
769 | def __hash__(self): |
|
769 | def __hash__(self): | |
770 | try: |
|
770 | try: | |
771 | return hash((self._path, self._filenode)) |
|
771 | return hash((self._path, self._filenode)) | |
772 | except AttributeError: |
|
772 | except AttributeError: | |
773 | return id(self) |
|
773 | return id(self) | |
774 |
|
774 | |||
775 | def __eq__(self, other): |
|
775 | def __eq__(self, other): | |
776 | try: |
|
776 | try: | |
777 | return (type(self) == type(other) and self._path == other._path |
|
777 | return (type(self) == type(other) and self._path == other._path | |
778 | and self._filenode == other._filenode) |
|
778 | and self._filenode == other._filenode) | |
779 | except AttributeError: |
|
779 | except AttributeError: | |
780 | return False |
|
780 | return False | |
781 |
|
781 | |||
782 | def __ne__(self, other): |
|
782 | def __ne__(self, other): | |
783 | return not (self == other) |
|
783 | return not (self == other) | |
784 |
|
784 | |||
785 | def filerev(self): |
|
785 | def filerev(self): | |
786 | return self._filerev |
|
786 | return self._filerev | |
787 | def filenode(self): |
|
787 | def filenode(self): | |
788 | return self._filenode |
|
788 | return self._filenode | |
789 | @propertycache |
|
789 | @propertycache | |
790 | def _flags(self): |
|
790 | def _flags(self): | |
791 | return self._changectx.flags(self._path) |
|
791 | return self._changectx.flags(self._path) | |
792 | def flags(self): |
|
792 | def flags(self): | |
793 | return self._flags |
|
793 | return self._flags | |
794 | def filelog(self): |
|
794 | def filelog(self): | |
795 | return self._filelog |
|
795 | return self._filelog | |
796 | def rev(self): |
|
796 | def rev(self): | |
797 | return self._changeid |
|
797 | return self._changeid | |
798 | def linkrev(self): |
|
798 | def linkrev(self): | |
799 | return self._filelog.linkrev(self._filerev) |
|
799 | return self._filelog.linkrev(self._filerev) | |
800 | def node(self): |
|
800 | def node(self): | |
801 | return self._changectx.node() |
|
801 | return self._changectx.node() | |
802 | def hex(self): |
|
802 | def hex(self): | |
803 | return self._changectx.hex() |
|
803 | return self._changectx.hex() | |
804 | def user(self): |
|
804 | def user(self): | |
805 | return self._changectx.user() |
|
805 | return self._changectx.user() | |
806 | def date(self): |
|
806 | def date(self): | |
807 | return self._changectx.date() |
|
807 | return self._changectx.date() | |
808 | def files(self): |
|
808 | def files(self): | |
809 | return self._changectx.files() |
|
809 | return self._changectx.files() | |
810 | def description(self): |
|
810 | def description(self): | |
811 | return self._changectx.description() |
|
811 | return self._changectx.description() | |
812 | def branch(self): |
|
812 | def branch(self): | |
813 | return self._changectx.branch() |
|
813 | return self._changectx.branch() | |
814 | def extra(self): |
|
814 | def extra(self): | |
815 | return self._changectx.extra() |
|
815 | return self._changectx.extra() | |
816 | def phase(self): |
|
816 | def phase(self): | |
817 | return self._changectx.phase() |
|
817 | return self._changectx.phase() | |
818 | def phasestr(self): |
|
818 | def phasestr(self): | |
819 | return self._changectx.phasestr() |
|
819 | return self._changectx.phasestr() | |
820 | def manifest(self): |
|
820 | def manifest(self): | |
821 | return self._changectx.manifest() |
|
821 | return self._changectx.manifest() | |
822 | def changectx(self): |
|
822 | def changectx(self): | |
823 | return self._changectx |
|
823 | return self._changectx | |
824 | def renamed(self): |
|
824 | def renamed(self): | |
825 | return self._copied |
|
825 | return self._copied | |
826 | def repo(self): |
|
826 | def repo(self): | |
827 | return self._repo |
|
827 | return self._repo | |
828 | def size(self): |
|
828 | def size(self): | |
829 | return len(self.data()) |
|
829 | return len(self.data()) | |
830 |
|
830 | |||
831 | def path(self): |
|
831 | def path(self): | |
832 | return self._path |
|
832 | return self._path | |
833 |
|
833 | |||
834 | def isbinary(self): |
|
834 | def isbinary(self): | |
835 | try: |
|
835 | try: | |
836 | return util.binary(self.data()) |
|
836 | return util.binary(self.data()) | |
837 | except IOError: |
|
837 | except IOError: | |
838 | return False |
|
838 | return False | |
839 | def isexec(self): |
|
839 | def isexec(self): | |
840 | return 'x' in self.flags() |
|
840 | return 'x' in self.flags() | |
841 | def islink(self): |
|
841 | def islink(self): | |
842 | return 'l' in self.flags() |
|
842 | return 'l' in self.flags() | |
843 |
|
843 | |||
844 | def isabsent(self): |
|
844 | def isabsent(self): | |
845 | """whether this filectx represents a file not in self._changectx |
|
845 | """whether this filectx represents a file not in self._changectx | |
846 |
|
846 | |||
847 | This is mainly for merge code to detect change/delete conflicts. This is |
|
847 | This is mainly for merge code to detect change/delete conflicts. This is | |
848 | expected to be True for all subclasses of basectx.""" |
|
848 | expected to be True for all subclasses of basectx.""" | |
849 | return False |
|
849 | return False | |
850 |
|
850 | |||
851 | _customcmp = False |
|
851 | _customcmp = False | |
852 | def cmp(self, fctx): |
|
852 | def cmp(self, fctx): | |
853 | """compare with other file context |
|
853 | """compare with other file context | |
854 |
|
854 | |||
855 | returns True if different than fctx. |
|
855 | returns True if different than fctx. | |
856 | """ |
|
856 | """ | |
857 | if fctx._customcmp: |
|
857 | if fctx._customcmp: | |
858 | return fctx.cmp(self) |
|
858 | return fctx.cmp(self) | |
859 |
|
859 | |||
860 | if (fctx._filenode is None |
|
860 | if (fctx._filenode is None | |
861 | and (self._repo._encodefilterpats |
|
861 | and (self._repo._encodefilterpats | |
862 | # if file data starts with '\1\n', empty metadata block is |
|
862 | # if file data starts with '\1\n', empty metadata block is | |
863 | # prepended, which adds 4 bytes to filelog.size(). |
|
863 | # prepended, which adds 4 bytes to filelog.size(). | |
864 | or self.size() - 4 == fctx.size()) |
|
864 | or self.size() - 4 == fctx.size()) | |
865 | or self.size() == fctx.size()): |
|
865 | or self.size() == fctx.size()): | |
866 | return self._filelog.cmp(self._filenode, fctx.data()) |
|
866 | return self._filelog.cmp(self._filenode, fctx.data()) | |
867 |
|
867 | |||
868 | return True |
|
868 | return True | |
869 |
|
869 | |||
870 | def _adjustlinkrev(self, srcrev, inclusive=False): |
|
870 | def _adjustlinkrev(self, srcrev, inclusive=False): | |
871 | """return the first ancestor of <srcrev> introducing <fnode> |
|
871 | """return the first ancestor of <srcrev> introducing <fnode> | |
872 |
|
872 | |||
873 | If the linkrev of the file revision does not point to an ancestor of |
|
873 | If the linkrev of the file revision does not point to an ancestor of | |
874 | srcrev, we'll walk down the ancestors until we find one introducing |
|
874 | srcrev, we'll walk down the ancestors until we find one introducing | |
875 | this file revision. |
|
875 | this file revision. | |
876 |
|
876 | |||
877 | :srcrev: the changeset revision we search ancestors from |
|
877 | :srcrev: the changeset revision we search ancestors from | |
878 | :inclusive: if true, the src revision will also be checked |
|
878 | :inclusive: if true, the src revision will also be checked | |
879 | """ |
|
879 | """ | |
880 | repo = self._repo |
|
880 | repo = self._repo | |
881 | cl = repo.unfiltered().changelog |
|
881 | cl = repo.unfiltered().changelog | |
882 | mfl = repo.manifestlog |
|
882 | mfl = repo.manifestlog | |
883 | # fetch the linkrev |
|
883 | # fetch the linkrev | |
884 | lkr = self.linkrev() |
|
884 | lkr = self.linkrev() | |
885 | # hack to reuse ancestor computation when searching for renames |
|
885 | # hack to reuse ancestor computation when searching for renames | |
886 | memberanc = getattr(self, '_ancestrycontext', None) |
|
886 | memberanc = getattr(self, '_ancestrycontext', None) | |
887 | iteranc = None |
|
887 | iteranc = None | |
888 | if srcrev is None: |
|
888 | if srcrev is None: | |
889 | # wctx case, used by workingfilectx during mergecopy |
|
889 | # wctx case, used by workingfilectx during mergecopy | |
890 | revs = [p.rev() for p in self._repo[None].parents()] |
|
890 | revs = [p.rev() for p in self._repo[None].parents()] | |
891 | inclusive = True # we skipped the real (revless) source |
|
891 | inclusive = True # we skipped the real (revless) source | |
892 | else: |
|
892 | else: | |
893 | revs = [srcrev] |
|
893 | revs = [srcrev] | |
894 | if memberanc is None: |
|
894 | if memberanc is None: | |
895 | memberanc = iteranc = cl.ancestors(revs, lkr, |
|
895 | memberanc = iteranc = cl.ancestors(revs, lkr, | |
896 | inclusive=inclusive) |
|
896 | inclusive=inclusive) | |
897 | # check if this linkrev is an ancestor of srcrev |
|
897 | # check if this linkrev is an ancestor of srcrev | |
898 | if lkr not in memberanc: |
|
898 | if lkr not in memberanc: | |
899 | if iteranc is None: |
|
899 | if iteranc is None: | |
900 | iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) |
|
900 | iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) | |
901 | fnode = self._filenode |
|
901 | fnode = self._filenode | |
902 | path = self._path |
|
902 | path = self._path | |
903 | for a in iteranc: |
|
903 | for a in iteranc: | |
904 | ac = cl.read(a) # get changeset data (we avoid object creation) |
|
904 | ac = cl.read(a) # get changeset data (we avoid object creation) | |
905 | if path in ac[3]: # checking the 'files' field. |
|
905 | if path in ac[3]: # checking the 'files' field. | |
906 | # The file has been touched, check if the content is |
|
906 | # The file has been touched, check if the content is | |
907 | # similar to the one we search for. |
|
907 | # similar to the one we search for. | |
908 | if fnode == mfl[ac[0]].readfast().get(path): |
|
908 | if fnode == mfl[ac[0]].readfast().get(path): | |
909 | return a |
|
909 | return a | |
910 | # In theory, we should never get out of that loop without a result. |
|
910 | # In theory, we should never get out of that loop without a result. | |
911 | # But if manifest uses a buggy file revision (not children of the |
|
911 | # But if manifest uses a buggy file revision (not children of the | |
912 | # one it replaces) we could. Such a buggy situation will likely |
|
912 | # one it replaces) we could. Such a buggy situation will likely | |
913 | # result is crash somewhere else at to some point. |
|
913 | # result is crash somewhere else at to some point. | |
914 | return lkr |
|
914 | return lkr | |
915 |
|
915 | |||
916 | def introrev(self): |
|
916 | def introrev(self): | |
917 | """return the rev of the changeset which introduced this file revision |
|
917 | """return the rev of the changeset which introduced this file revision | |
918 |
|
918 | |||
919 | This method is different from linkrev because it take into account the |
|
919 | This method is different from linkrev because it take into account the | |
920 | changeset the filectx was created from. It ensures the returned |
|
920 | changeset the filectx was created from. It ensures the returned | |
921 | revision is one of its ancestors. This prevents bugs from |
|
921 | revision is one of its ancestors. This prevents bugs from | |
922 | 'linkrev-shadowing' when a file revision is used by multiple |
|
922 | 'linkrev-shadowing' when a file revision is used by multiple | |
923 | changesets. |
|
923 | changesets. | |
924 | """ |
|
924 | """ | |
925 | lkr = self.linkrev() |
|
925 | lkr = self.linkrev() | |
926 | attrs = vars(self) |
|
926 | attrs = vars(self) | |
927 | noctx = not ('_changeid' in attrs or '_changectx' in attrs) |
|
927 | noctx = not ('_changeid' in attrs or '_changectx' in attrs) | |
928 | if noctx or self.rev() == lkr: |
|
928 | if noctx or self.rev() == lkr: | |
929 | return self.linkrev() |
|
929 | return self.linkrev() | |
930 | return self._adjustlinkrev(self.rev(), inclusive=True) |
|
930 | return self._adjustlinkrev(self.rev(), inclusive=True) | |
931 |
|
931 | |||
932 | def _parentfilectx(self, path, fileid, filelog): |
|
932 | def _parentfilectx(self, path, fileid, filelog): | |
933 | """create parent filectx keeping ancestry info for _adjustlinkrev()""" |
|
933 | """create parent filectx keeping ancestry info for _adjustlinkrev()""" | |
934 | fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog) |
|
934 | fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog) | |
935 | if '_changeid' in vars(self) or '_changectx' in vars(self): |
|
935 | if '_changeid' in vars(self) or '_changectx' in vars(self): | |
936 | # If self is associated with a changeset (probably explicitly |
|
936 | # If self is associated with a changeset (probably explicitly | |
937 | # fed), ensure the created filectx is associated with a |
|
937 | # fed), ensure the created filectx is associated with a | |
938 | # changeset that is an ancestor of self.changectx. |
|
938 | # changeset that is an ancestor of self.changectx. | |
939 | # This lets us later use _adjustlinkrev to get a correct link. |
|
939 | # This lets us later use _adjustlinkrev to get a correct link. | |
940 | fctx._descendantrev = self.rev() |
|
940 | fctx._descendantrev = self.rev() | |
941 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) |
|
941 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) | |
942 | elif '_descendantrev' in vars(self): |
|
942 | elif '_descendantrev' in vars(self): | |
943 | # Otherwise propagate _descendantrev if we have one associated. |
|
943 | # Otherwise propagate _descendantrev if we have one associated. | |
944 | fctx._descendantrev = self._descendantrev |
|
944 | fctx._descendantrev = self._descendantrev | |
945 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) |
|
945 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) | |
946 | return fctx |
|
946 | return fctx | |
947 |
|
947 | |||
948 | def parents(self): |
|
948 | def parents(self): | |
949 | _path = self._path |
|
949 | _path = self._path | |
950 | fl = self._filelog |
|
950 | fl = self._filelog | |
951 | parents = self._filelog.parents(self._filenode) |
|
951 | parents = self._filelog.parents(self._filenode) | |
952 | pl = [(_path, node, fl) for node in parents if node != nullid] |
|
952 | pl = [(_path, node, fl) for node in parents if node != nullid] | |
953 |
|
953 | |||
954 | r = fl.renamed(self._filenode) |
|
954 | r = fl.renamed(self._filenode) | |
955 | if r: |
|
955 | if r: | |
956 | # - In the simple rename case, both parent are nullid, pl is empty. |
|
956 | # - In the simple rename case, both parent are nullid, pl is empty. | |
957 | # - In case of merge, only one of the parent is null id and should |
|
957 | # - In case of merge, only one of the parent is null id and should | |
958 | # be replaced with the rename information. This parent is -always- |
|
958 | # be replaced with the rename information. This parent is -always- | |
959 | # the first one. |
|
959 | # the first one. | |
960 | # |
|
960 | # | |
961 | # As null id have always been filtered out in the previous list |
|
961 | # As null id have always been filtered out in the previous list | |
962 | # comprehension, inserting to 0 will always result in "replacing |
|
962 | # comprehension, inserting to 0 will always result in "replacing | |
963 | # first nullid parent with rename information. |
|
963 | # first nullid parent with rename information. | |
964 | pl.insert(0, (r[0], r[1], self._repo.file(r[0]))) |
|
964 | pl.insert(0, (r[0], r[1], self._repo.file(r[0]))) | |
965 |
|
965 | |||
966 | return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl] |
|
966 | return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl] | |
967 |
|
967 | |||
968 | def p1(self): |
|
968 | def p1(self): | |
969 | return self.parents()[0] |
|
969 | return self.parents()[0] | |
970 |
|
970 | |||
971 | def p2(self): |
|
971 | def p2(self): | |
972 | p = self.parents() |
|
972 | p = self.parents() | |
973 | if len(p) == 2: |
|
973 | if len(p) == 2: | |
974 | return p[1] |
|
974 | return p[1] | |
975 | return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) |
|
975 | return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) | |
976 |
|
976 | |||
977 | def annotate(self, follow=False, linenumber=False, skiprevs=None, |
|
977 | def annotate(self, follow=False, linenumber=False, skiprevs=None, | |
978 | diffopts=None): |
|
978 | diffopts=None): | |
979 | '''returns a list of tuples of ((ctx, number), line) for each line |
|
979 | '''returns a list of tuples of ((ctx, number), line) for each line | |
980 | in the file, where ctx is the filectx of the node where |
|
980 | in the file, where ctx is the filectx of the node where | |
981 | that line was last changed; if linenumber parameter is true, number is |
|
981 | that line was last changed; if linenumber parameter is true, number is | |
982 | the line number at the first appearance in the managed file, otherwise, |
|
982 | the line number at the first appearance in the managed file, otherwise, | |
983 | number has a fixed value of False. |
|
983 | number has a fixed value of False. | |
984 | ''' |
|
984 | ''' | |
985 |
|
985 | |||
986 | def lines(text): |
|
986 | def lines(text): | |
987 | if text.endswith("\n"): |
|
987 | if text.endswith("\n"): | |
988 | return text.count("\n") |
|
988 | return text.count("\n") | |
989 | return text.count("\n") + int(bool(text)) |
|
989 | return text.count("\n") + int(bool(text)) | |
990 |
|
990 | |||
991 | if linenumber: |
|
991 | if linenumber: | |
992 | def decorate(text, rev): |
|
992 | def decorate(text, rev): | |
993 | return ([annotateline(fctx=rev, lineno=i) |
|
993 | return ([annotateline(fctx=rev, lineno=i) | |
994 | for i in xrange(1, lines(text) + 1)], text) |
|
994 | for i in xrange(1, lines(text) + 1)], text) | |
995 | else: |
|
995 | else: | |
996 | def decorate(text, rev): |
|
996 | def decorate(text, rev): | |
997 | return ([annotateline(fctx=rev)] * lines(text), text) |
|
997 | return ([annotateline(fctx=rev)] * lines(text), text) | |
998 |
|
998 | |||
999 | getlog = util.lrucachefunc(lambda x: self._repo.file(x)) |
|
999 | getlog = util.lrucachefunc(lambda x: self._repo.file(x)) | |
1000 |
|
1000 | |||
1001 | def parents(f): |
|
1001 | def parents(f): | |
1002 | # Cut _descendantrev here to mitigate the penalty of lazy linkrev |
|
1002 | # Cut _descendantrev here to mitigate the penalty of lazy linkrev | |
1003 | # adjustment. Otherwise, p._adjustlinkrev() would walk changelog |
|
1003 | # adjustment. Otherwise, p._adjustlinkrev() would walk changelog | |
1004 | # from the topmost introrev (= srcrev) down to p.linkrev() if it |
|
1004 | # from the topmost introrev (= srcrev) down to p.linkrev() if it | |
1005 | # isn't an ancestor of the srcrev. |
|
1005 | # isn't an ancestor of the srcrev. | |
1006 | f._changeid |
|
1006 | f._changeid | |
1007 | pl = f.parents() |
|
1007 | pl = f.parents() | |
1008 |
|
1008 | |||
1009 | # Don't return renamed parents if we aren't following. |
|
1009 | # Don't return renamed parents if we aren't following. | |
1010 | if not follow: |
|
1010 | if not follow: | |
1011 | pl = [p for p in pl if p.path() == f.path()] |
|
1011 | pl = [p for p in pl if p.path() == f.path()] | |
1012 |
|
1012 | |||
1013 | # renamed filectx won't have a filelog yet, so set it |
|
1013 | # renamed filectx won't have a filelog yet, so set it | |
1014 | # from the cache to save time |
|
1014 | # from the cache to save time | |
1015 | for p in pl: |
|
1015 | for p in pl: | |
1016 | if not '_filelog' in p.__dict__: |
|
1016 | if not '_filelog' in p.__dict__: | |
1017 | p._filelog = getlog(p.path()) |
|
1017 | p._filelog = getlog(p.path()) | |
1018 |
|
1018 | |||
1019 | return pl |
|
1019 | return pl | |
1020 |
|
1020 | |||
1021 | # use linkrev to find the first changeset where self appeared |
|
1021 | # use linkrev to find the first changeset where self appeared | |
1022 | base = self |
|
1022 | base = self | |
1023 | introrev = self.introrev() |
|
1023 | introrev = self.introrev() | |
1024 | if self.rev() != introrev: |
|
1024 | if self.rev() != introrev: | |
1025 | base = self.filectx(self.filenode(), changeid=introrev) |
|
1025 | base = self.filectx(self.filenode(), changeid=introrev) | |
1026 | if getattr(base, '_ancestrycontext', None) is None: |
|
1026 | if getattr(base, '_ancestrycontext', None) is None: | |
1027 | cl = self._repo.changelog |
|
1027 | cl = self._repo.changelog | |
1028 | if introrev is None: |
|
1028 | if introrev is None: | |
1029 | # wctx is not inclusive, but works because _ancestrycontext |
|
1029 | # wctx is not inclusive, but works because _ancestrycontext | |
1030 | # is used to test filelog revisions |
|
1030 | # is used to test filelog revisions | |
1031 | ac = cl.ancestors([p.rev() for p in base.parents()], |
|
1031 | ac = cl.ancestors([p.rev() for p in base.parents()], | |
1032 | inclusive=True) |
|
1032 | inclusive=True) | |
1033 | else: |
|
1033 | else: | |
1034 | ac = cl.ancestors([introrev], inclusive=True) |
|
1034 | ac = cl.ancestors([introrev], inclusive=True) | |
1035 | base._ancestrycontext = ac |
|
1035 | base._ancestrycontext = ac | |
1036 |
|
1036 | |||
1037 | # This algorithm would prefer to be recursive, but Python is a |
|
1037 | # This algorithm would prefer to be recursive, but Python is a | |
1038 | # bit recursion-hostile. Instead we do an iterative |
|
1038 | # bit recursion-hostile. Instead we do an iterative | |
1039 | # depth-first search. |
|
1039 | # depth-first search. | |
1040 |
|
1040 | |||
1041 | # 1st DFS pre-calculates pcache and needed |
|
1041 | # 1st DFS pre-calculates pcache and needed | |
1042 | visit = [base] |
|
1042 | visit = [base] | |
1043 | pcache = {} |
|
1043 | pcache = {} | |
1044 | needed = {base: 1} |
|
1044 | needed = {base: 1} | |
1045 | while visit: |
|
1045 | while visit: | |
1046 | f = visit.pop() |
|
1046 | f = visit.pop() | |
1047 | if f in pcache: |
|
1047 | if f in pcache: | |
1048 | continue |
|
1048 | continue | |
1049 | pl = parents(f) |
|
1049 | pl = parents(f) | |
1050 | pcache[f] = pl |
|
1050 | pcache[f] = pl | |
1051 | for p in pl: |
|
1051 | for p in pl: | |
1052 | needed[p] = needed.get(p, 0) + 1 |
|
1052 | needed[p] = needed.get(p, 0) + 1 | |
1053 | if p not in pcache: |
|
1053 | if p not in pcache: | |
1054 | visit.append(p) |
|
1054 | visit.append(p) | |
1055 |
|
1055 | |||
1056 | # 2nd DFS does the actual annotate |
|
1056 | # 2nd DFS does the actual annotate | |
1057 | visit[:] = [base] |
|
1057 | visit[:] = [base] | |
1058 | hist = {} |
|
1058 | hist = {} | |
1059 | while visit: |
|
1059 | while visit: | |
1060 | f = visit[-1] |
|
1060 | f = visit[-1] | |
1061 | if f in hist: |
|
1061 | if f in hist: | |
1062 | visit.pop() |
|
1062 | visit.pop() | |
1063 | continue |
|
1063 | continue | |
1064 |
|
1064 | |||
1065 | ready = True |
|
1065 | ready = True | |
1066 | pl = pcache[f] |
|
1066 | pl = pcache[f] | |
1067 | for p in pl: |
|
1067 | for p in pl: | |
1068 | if p not in hist: |
|
1068 | if p not in hist: | |
1069 | ready = False |
|
1069 | ready = False | |
1070 | visit.append(p) |
|
1070 | visit.append(p) | |
1071 | if ready: |
|
1071 | if ready: | |
1072 | visit.pop() |
|
1072 | visit.pop() | |
1073 | curr = decorate(f.data(), f) |
|
1073 | curr = decorate(f.data(), f) | |
1074 | skipchild = False |
|
1074 | skipchild = False | |
1075 | if skiprevs is not None: |
|
1075 | if skiprevs is not None: | |
1076 | skipchild = f._changeid in skiprevs |
|
1076 | skipchild = f._changeid in skiprevs | |
1077 | curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild, |
|
1077 | curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild, | |
1078 | diffopts) |
|
1078 | diffopts) | |
1079 | for p in pl: |
|
1079 | for p in pl: | |
1080 | if needed[p] == 1: |
|
1080 | if needed[p] == 1: | |
1081 | del hist[p] |
|
1081 | del hist[p] | |
1082 | del needed[p] |
|
1082 | del needed[p] | |
1083 | else: |
|
1083 | else: | |
1084 | needed[p] -= 1 |
|
1084 | needed[p] -= 1 | |
1085 |
|
1085 | |||
1086 | hist[f] = curr |
|
1086 | hist[f] = curr | |
1087 | del pcache[f] |
|
1087 | del pcache[f] | |
1088 |
|
1088 | |||
1089 | return zip(hist[base][0], hist[base][1].splitlines(True)) |
|
1089 | return zip(hist[base][0], hist[base][1].splitlines(True)) | |
1090 |
|
1090 | |||
1091 | def ancestors(self, followfirst=False): |
|
1091 | def ancestors(self, followfirst=False): | |
1092 | visit = {} |
|
1092 | visit = {} | |
1093 | c = self |
|
1093 | c = self | |
1094 | if followfirst: |
|
1094 | if followfirst: | |
1095 | cut = 1 |
|
1095 | cut = 1 | |
1096 | else: |
|
1096 | else: | |
1097 | cut = None |
|
1097 | cut = None | |
1098 |
|
1098 | |||
1099 | while True: |
|
1099 | while True: | |
1100 | for parent in c.parents()[:cut]: |
|
1100 | for parent in c.parents()[:cut]: | |
1101 | visit[(parent.linkrev(), parent.filenode())] = parent |
|
1101 | visit[(parent.linkrev(), parent.filenode())] = parent | |
1102 | if not visit: |
|
1102 | if not visit: | |
1103 | break |
|
1103 | break | |
1104 | c = visit.pop(max(visit)) |
|
1104 | c = visit.pop(max(visit)) | |
1105 | yield c |
|
1105 | yield c | |
1106 |
|
1106 | |||
1107 | def decodeddata(self): |
|
1107 | def decodeddata(self): | |
1108 | """Returns `data()` after running repository decoding filters. |
|
1108 | """Returns `data()` after running repository decoding filters. | |
1109 |
|
1109 | |||
1110 | This is often equivalent to how the data would be expressed on disk. |
|
1110 | This is often equivalent to how the data would be expressed on disk. | |
1111 | """ |
|
1111 | """ | |
1112 | return self._repo.wwritedata(self.path(), self.data()) |
|
1112 | return self._repo.wwritedata(self.path(), self.data()) | |
1113 |
|
1113 | |||
1114 | @attr.s(slots=True, frozen=True) |
|
1114 | @attr.s(slots=True, frozen=True) | |
1115 | class annotateline(object): |
|
1115 | class annotateline(object): | |
1116 | fctx = attr.ib() |
|
1116 | fctx = attr.ib() | |
1117 | lineno = attr.ib(default=False) |
|
1117 | lineno = attr.ib(default=False) | |
1118 | # Whether this annotation was the result of a skip-annotate. |
|
1118 | # Whether this annotation was the result of a skip-annotate. | |
1119 | skip = attr.ib(default=False) |
|
1119 | skip = attr.ib(default=False) | |
1120 |
|
1120 | |||
1121 | def _annotatepair(parents, childfctx, child, skipchild, diffopts): |
|
1121 | def _annotatepair(parents, childfctx, child, skipchild, diffopts): | |
1122 | r''' |
|
1122 | r''' | |
1123 | Given parent and child fctxes and annotate data for parents, for all lines |
|
1123 | Given parent and child fctxes and annotate data for parents, for all lines | |
1124 | in either parent that match the child, annotate the child with the parent's |
|
1124 | in either parent that match the child, annotate the child with the parent's | |
1125 | data. |
|
1125 | data. | |
1126 |
|
1126 | |||
1127 | Additionally, if `skipchild` is True, replace all other lines with parent |
|
1127 | Additionally, if `skipchild` is True, replace all other lines with parent | |
1128 | annotate data as well such that child is never blamed for any lines. |
|
1128 | annotate data as well such that child is never blamed for any lines. | |
1129 |
|
1129 | |||
1130 | See test-annotate.py for unit tests. |
|
1130 | See test-annotate.py for unit tests. | |
1131 | ''' |
|
1131 | ''' | |
1132 | pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts)) |
|
1132 | pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts)) | |
1133 | for parent in parents] |
|
1133 | for parent in parents] | |
1134 |
|
1134 | |||
1135 | if skipchild: |
|
1135 | if skipchild: | |
1136 | # Need to iterate over the blocks twice -- make it a list |
|
1136 | # Need to iterate over the blocks twice -- make it a list | |
1137 | pblocks = [(p, list(blocks)) for (p, blocks) in pblocks] |
|
1137 | pblocks = [(p, list(blocks)) for (p, blocks) in pblocks] | |
1138 | # Mercurial currently prefers p2 over p1 for annotate. |
|
1138 | # Mercurial currently prefers p2 over p1 for annotate. | |
1139 | # TODO: change this? |
|
1139 | # TODO: change this? | |
1140 | for parent, blocks in pblocks: |
|
1140 | for parent, blocks in pblocks: | |
1141 | for (a1, a2, b1, b2), t in blocks: |
|
1141 | for (a1, a2, b1, b2), t in blocks: | |
1142 | # Changed blocks ('!') or blocks made only of blank lines ('~') |
|
1142 | # Changed blocks ('!') or blocks made only of blank lines ('~') | |
1143 | # belong to the child. |
|
1143 | # belong to the child. | |
1144 | if t == '=': |
|
1144 | if t == '=': | |
1145 | child[0][b1:b2] = parent[0][a1:a2] |
|
1145 | child[0][b1:b2] = parent[0][a1:a2] | |
1146 |
|
1146 | |||
1147 | if skipchild: |
|
1147 | if skipchild: | |
1148 | # Now try and match up anything that couldn't be matched, |
|
1148 | # Now try and match up anything that couldn't be matched, | |
1149 | # Reversing pblocks maintains bias towards p2, matching above |
|
1149 | # Reversing pblocks maintains bias towards p2, matching above | |
1150 | # behavior. |
|
1150 | # behavior. | |
1151 | pblocks.reverse() |
|
1151 | pblocks.reverse() | |
1152 |
|
1152 | |||
1153 | # The heuristics are: |
|
1153 | # The heuristics are: | |
1154 | # * Work on blocks of changed lines (effectively diff hunks with -U0). |
|
1154 | # * Work on blocks of changed lines (effectively diff hunks with -U0). | |
1155 | # This could potentially be smarter but works well enough. |
|
1155 | # This could potentially be smarter but works well enough. | |
1156 | # * For a non-matching section, do a best-effort fit. Match lines in |
|
1156 | # * For a non-matching section, do a best-effort fit. Match lines in | |
1157 | # diff hunks 1:1, dropping lines as necessary. |
|
1157 | # diff hunks 1:1, dropping lines as necessary. | |
1158 | # * Repeat the last line as a last resort. |
|
1158 | # * Repeat the last line as a last resort. | |
1159 |
|
1159 | |||
1160 | # First, replace as much as possible without repeating the last line. |
|
1160 | # First, replace as much as possible without repeating the last line. | |
1161 | remaining = [(parent, []) for parent, _blocks in pblocks] |
|
1161 | remaining = [(parent, []) for parent, _blocks in pblocks] | |
1162 | for idx, (parent, blocks) in enumerate(pblocks): |
|
1162 | for idx, (parent, blocks) in enumerate(pblocks): | |
1163 | for (a1, a2, b1, b2), _t in blocks: |
|
1163 | for (a1, a2, b1, b2), _t in blocks: | |
1164 | if a2 - a1 >= b2 - b1: |
|
1164 | if a2 - a1 >= b2 - b1: | |
1165 | for bk in xrange(b1, b2): |
|
1165 | for bk in xrange(b1, b2): | |
1166 | if child[0][bk].fctx == childfctx: |
|
1166 | if child[0][bk].fctx == childfctx: | |
1167 | ak = min(a1 + (bk - b1), a2 - 1) |
|
1167 | ak = min(a1 + (bk - b1), a2 - 1) | |
1168 | child[0][bk] = attr.evolve(parent[0][ak], skip=True) |
|
1168 | child[0][bk] = attr.evolve(parent[0][ak], skip=True) | |
1169 | else: |
|
1169 | else: | |
1170 | remaining[idx][1].append((a1, a2, b1, b2)) |
|
1170 | remaining[idx][1].append((a1, a2, b1, b2)) | |
1171 |
|
1171 | |||
1172 | # Then, look at anything left, which might involve repeating the last |
|
1172 | # Then, look at anything left, which might involve repeating the last | |
1173 | # line. |
|
1173 | # line. | |
1174 | for parent, blocks in remaining: |
|
1174 | for parent, blocks in remaining: | |
1175 | for a1, a2, b1, b2 in blocks: |
|
1175 | for a1, a2, b1, b2 in blocks: | |
1176 | for bk in xrange(b1, b2): |
|
1176 | for bk in xrange(b1, b2): | |
1177 | if child[0][bk].fctx == childfctx: |
|
1177 | if child[0][bk].fctx == childfctx: | |
1178 | ak = min(a1 + (bk - b1), a2 - 1) |
|
1178 | ak = min(a1 + (bk - b1), a2 - 1) | |
1179 | child[0][bk] = attr.evolve(parent[0][ak], skip=True) |
|
1179 | child[0][bk] = attr.evolve(parent[0][ak], skip=True) | |
1180 | return child |
|
1180 | return child | |
1181 |
|
1181 | |||
1182 | class filectx(basefilectx): |
|
1182 | class filectx(basefilectx): | |
1183 | """A filecontext object makes access to data related to a particular |
|
1183 | """A filecontext object makes access to data related to a particular | |
1184 | filerevision convenient.""" |
|
1184 | filerevision convenient.""" | |
1185 | def __init__(self, repo, path, changeid=None, fileid=None, |
|
1185 | def __init__(self, repo, path, changeid=None, fileid=None, | |
1186 | filelog=None, changectx=None): |
|
1186 | filelog=None, changectx=None): | |
1187 | """changeid can be a changeset revision, node, or tag. |
|
1187 | """changeid can be a changeset revision, node, or tag. | |
1188 | fileid can be a file revision or node.""" |
|
1188 | fileid can be a file revision or node.""" | |
1189 | self._repo = repo |
|
1189 | self._repo = repo | |
1190 | self._path = path |
|
1190 | self._path = path | |
1191 |
|
1191 | |||
1192 | assert (changeid is not None |
|
1192 | assert (changeid is not None | |
1193 | or fileid is not None |
|
1193 | or fileid is not None | |
1194 | or changectx is not None), \ |
|
1194 | or changectx is not None), \ | |
1195 | ("bad args: changeid=%r, fileid=%r, changectx=%r" |
|
1195 | ("bad args: changeid=%r, fileid=%r, changectx=%r" | |
1196 | % (changeid, fileid, changectx)) |
|
1196 | % (changeid, fileid, changectx)) | |
1197 |
|
1197 | |||
1198 | if filelog is not None: |
|
1198 | if filelog is not None: | |
1199 | self._filelog = filelog |
|
1199 | self._filelog = filelog | |
1200 |
|
1200 | |||
1201 | if changeid is not None: |
|
1201 | if changeid is not None: | |
1202 | self._changeid = changeid |
|
1202 | self._changeid = changeid | |
1203 | if changectx is not None: |
|
1203 | if changectx is not None: | |
1204 | self._changectx = changectx |
|
1204 | self._changectx = changectx | |
1205 | if fileid is not None: |
|
1205 | if fileid is not None: | |
1206 | self._fileid = fileid |
|
1206 | self._fileid = fileid | |
1207 |
|
1207 | |||
1208 | @propertycache |
|
1208 | @propertycache | |
1209 | def _changectx(self): |
|
1209 | def _changectx(self): | |
1210 | try: |
|
1210 | try: | |
1211 | return changectx(self._repo, self._changeid) |
|
1211 | return changectx(self._repo, self._changeid) | |
1212 | except error.FilteredRepoLookupError: |
|
1212 | except error.FilteredRepoLookupError: | |
1213 | # Linkrev may point to any revision in the repository. When the |
|
1213 | # Linkrev may point to any revision in the repository. When the | |
1214 | # repository is filtered this may lead to `filectx` trying to build |
|
1214 | # repository is filtered this may lead to `filectx` trying to build | |
1215 | # `changectx` for filtered revision. In such case we fallback to |
|
1215 | # `changectx` for filtered revision. In such case we fallback to | |
1216 | # creating `changectx` on the unfiltered version of the reposition. |
|
1216 | # creating `changectx` on the unfiltered version of the reposition. | |
1217 | # This fallback should not be an issue because `changectx` from |
|
1217 | # This fallback should not be an issue because `changectx` from | |
1218 | # `filectx` are not used in complex operations that care about |
|
1218 | # `filectx` are not used in complex operations that care about | |
1219 | # filtering. |
|
1219 | # filtering. | |
1220 | # |
|
1220 | # | |
1221 | # This fallback is a cheap and dirty fix that prevent several |
|
1221 | # This fallback is a cheap and dirty fix that prevent several | |
1222 | # crashes. It does not ensure the behavior is correct. However the |
|
1222 | # crashes. It does not ensure the behavior is correct. However the | |
1223 | # behavior was not correct before filtering either and "incorrect |
|
1223 | # behavior was not correct before filtering either and "incorrect | |
1224 | # behavior" is seen as better as "crash" |
|
1224 | # behavior" is seen as better as "crash" | |
1225 | # |
|
1225 | # | |
1226 | # Linkrevs have several serious troubles with filtering that are |
|
1226 | # Linkrevs have several serious troubles with filtering that are | |
1227 | # complicated to solve. Proper handling of the issue here should be |
|
1227 | # complicated to solve. Proper handling of the issue here should be | |
1228 | # considered when solving linkrev issue are on the table. |
|
1228 | # considered when solving linkrev issue are on the table. | |
1229 | return changectx(self._repo.unfiltered(), self._changeid) |
|
1229 | return changectx(self._repo.unfiltered(), self._changeid) | |
1230 |
|
1230 | |||
1231 | def filectx(self, fileid, changeid=None): |
|
1231 | def filectx(self, fileid, changeid=None): | |
1232 | '''opens an arbitrary revision of the file without |
|
1232 | '''opens an arbitrary revision of the file without | |
1233 | opening a new filelog''' |
|
1233 | opening a new filelog''' | |
1234 | return filectx(self._repo, self._path, fileid=fileid, |
|
1234 | return filectx(self._repo, self._path, fileid=fileid, | |
1235 | filelog=self._filelog, changeid=changeid) |
|
1235 | filelog=self._filelog, changeid=changeid) | |
1236 |
|
1236 | |||
1237 | def rawdata(self): |
|
1237 | def rawdata(self): | |
1238 | return self._filelog.revision(self._filenode, raw=True) |
|
1238 | return self._filelog.revision(self._filenode, raw=True) | |
1239 |
|
1239 | |||
1240 | def rawflags(self): |
|
1240 | def rawflags(self): | |
1241 | """low-level revlog flags""" |
|
1241 | """low-level revlog flags""" | |
1242 | return self._filelog.flags(self._filerev) |
|
1242 | return self._filelog.flags(self._filerev) | |
1243 |
|
1243 | |||
1244 | def data(self): |
|
1244 | def data(self): | |
1245 | try: |
|
1245 | try: | |
1246 | return self._filelog.read(self._filenode) |
|
1246 | return self._filelog.read(self._filenode) | |
1247 | except error.CensoredNodeError: |
|
1247 | except error.CensoredNodeError: | |
1248 | if self._repo.ui.config("censor", "policy") == "ignore": |
|
1248 | if self._repo.ui.config("censor", "policy") == "ignore": | |
1249 | return "" |
|
1249 | return "" | |
1250 | raise error.Abort(_("censored node: %s") % short(self._filenode), |
|
1250 | raise error.Abort(_("censored node: %s") % short(self._filenode), | |
1251 | hint=_("set censor.policy to ignore errors")) |
|
1251 | hint=_("set censor.policy to ignore errors")) | |
1252 |
|
1252 | |||
1253 | def size(self): |
|
1253 | def size(self): | |
1254 | return self._filelog.size(self._filerev) |
|
1254 | return self._filelog.size(self._filerev) | |
1255 |
|
1255 | |||
1256 | @propertycache |
|
1256 | @propertycache | |
1257 | def _copied(self): |
|
1257 | def _copied(self): | |
1258 | """check if file was actually renamed in this changeset revision |
|
1258 | """check if file was actually renamed in this changeset revision | |
1259 |
|
1259 | |||
1260 | If rename logged in file revision, we report copy for changeset only |
|
1260 | If rename logged in file revision, we report copy for changeset only | |
1261 | if file revisions linkrev points back to the changeset in question |
|
1261 | if file revisions linkrev points back to the changeset in question | |
1262 | or both changeset parents contain different file revisions. |
|
1262 | or both changeset parents contain different file revisions. | |
1263 | """ |
|
1263 | """ | |
1264 |
|
1264 | |||
1265 | renamed = self._filelog.renamed(self._filenode) |
|
1265 | renamed = self._filelog.renamed(self._filenode) | |
1266 | if not renamed: |
|
1266 | if not renamed: | |
1267 | return renamed |
|
1267 | return renamed | |
1268 |
|
1268 | |||
1269 | if self.rev() == self.linkrev(): |
|
1269 | if self.rev() == self.linkrev(): | |
1270 | return renamed |
|
1270 | return renamed | |
1271 |
|
1271 | |||
1272 | name = self.path() |
|
1272 | name = self.path() | |
1273 | fnode = self._filenode |
|
1273 | fnode = self._filenode | |
1274 | for p in self._changectx.parents(): |
|
1274 | for p in self._changectx.parents(): | |
1275 | try: |
|
1275 | try: | |
1276 | if fnode == p.filenode(name): |
|
1276 | if fnode == p.filenode(name): | |
1277 | return None |
|
1277 | return None | |
1278 | except error.LookupError: |
|
1278 | except error.LookupError: | |
1279 | pass |
|
1279 | pass | |
1280 | return renamed |
|
1280 | return renamed | |
1281 |
|
1281 | |||
1282 | def children(self): |
|
1282 | def children(self): | |
1283 | # hard for renames |
|
1283 | # hard for renames | |
1284 | c = self._filelog.children(self._filenode) |
|
1284 | c = self._filelog.children(self._filenode) | |
1285 | return [filectx(self._repo, self._path, fileid=x, |
|
1285 | return [filectx(self._repo, self._path, fileid=x, | |
1286 | filelog=self._filelog) for x in c] |
|
1286 | filelog=self._filelog) for x in c] | |
1287 |
|
1287 | |||
1288 | class committablectx(basectx): |
|
1288 | class committablectx(basectx): | |
1289 | """A committablectx object provides common functionality for a context that |
|
1289 | """A committablectx object provides common functionality for a context that | |
1290 | wants the ability to commit, e.g. workingctx or memctx.""" |
|
1290 | wants the ability to commit, e.g. workingctx or memctx.""" | |
1291 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1291 | def __init__(self, repo, text="", user=None, date=None, extra=None, | |
1292 | changes=None): |
|
1292 | changes=None): | |
1293 | self._repo = repo |
|
1293 | self._repo = repo | |
1294 | self._rev = None |
|
1294 | self._rev = None | |
1295 | self._node = None |
|
1295 | self._node = None | |
1296 | self._text = text |
|
1296 | self._text = text | |
1297 | if date: |
|
1297 | if date: | |
1298 | self._date = util.parsedate(date) |
|
1298 | self._date = util.parsedate(date) | |
1299 | if user: |
|
1299 | if user: | |
1300 | self._user = user |
|
1300 | self._user = user | |
1301 | if changes: |
|
1301 | if changes: | |
1302 | self._status = changes |
|
1302 | self._status = changes | |
1303 |
|
1303 | |||
1304 | self._extra = {} |
|
1304 | self._extra = {} | |
1305 | if extra: |
|
1305 | if extra: | |
1306 | self._extra = extra.copy() |
|
1306 | self._extra = extra.copy() | |
1307 | if 'branch' not in self._extra: |
|
1307 | if 'branch' not in self._extra: | |
1308 | try: |
|
1308 | try: | |
1309 | branch = encoding.fromlocal(self._repo.dirstate.branch()) |
|
1309 | branch = encoding.fromlocal(self._repo.dirstate.branch()) | |
1310 | except UnicodeDecodeError: |
|
1310 | except UnicodeDecodeError: | |
1311 | raise error.Abort(_('branch name not in UTF-8!')) |
|
1311 | raise error.Abort(_('branch name not in UTF-8!')) | |
1312 | self._extra['branch'] = branch |
|
1312 | self._extra['branch'] = branch | |
1313 | if self._extra['branch'] == '': |
|
1313 | if self._extra['branch'] == '': | |
1314 | self._extra['branch'] = 'default' |
|
1314 | self._extra['branch'] = 'default' | |
1315 |
|
1315 | |||
1316 | def __bytes__(self): |
|
1316 | def __bytes__(self): | |
1317 | return bytes(self._parents[0]) + "+" |
|
1317 | return bytes(self._parents[0]) + "+" | |
1318 |
|
1318 | |||
1319 | __str__ = encoding.strmethod(__bytes__) |
|
1319 | __str__ = encoding.strmethod(__bytes__) | |
1320 |
|
1320 | |||
1321 | def __nonzero__(self): |
|
1321 | def __nonzero__(self): | |
1322 | return True |
|
1322 | return True | |
1323 |
|
1323 | |||
1324 | __bool__ = __nonzero__ |
|
1324 | __bool__ = __nonzero__ | |
1325 |
|
1325 | |||
1326 | def _buildflagfunc(self): |
|
1326 | def _buildflagfunc(self): | |
1327 | # Create a fallback function for getting file flags when the |
|
1327 | # Create a fallback function for getting file flags when the | |
1328 | # filesystem doesn't support them |
|
1328 | # filesystem doesn't support them | |
1329 |
|
1329 | |||
1330 | copiesget = self._repo.dirstate.copies().get |
|
1330 | copiesget = self._repo.dirstate.copies().get | |
1331 | parents = self.parents() |
|
1331 | parents = self.parents() | |
1332 | if len(parents) < 2: |
|
1332 | if len(parents) < 2: | |
1333 | # when we have one parent, it's easy: copy from parent |
|
1333 | # when we have one parent, it's easy: copy from parent | |
1334 | man = parents[0].manifest() |
|
1334 | man = parents[0].manifest() | |
1335 | def func(f): |
|
1335 | def func(f): | |
1336 | f = copiesget(f, f) |
|
1336 | f = copiesget(f, f) | |
1337 | return man.flags(f) |
|
1337 | return man.flags(f) | |
1338 | else: |
|
1338 | else: | |
1339 | # merges are tricky: we try to reconstruct the unstored |
|
1339 | # merges are tricky: we try to reconstruct the unstored | |
1340 | # result from the merge (issue1802) |
|
1340 | # result from the merge (issue1802) | |
1341 | p1, p2 = parents |
|
1341 | p1, p2 = parents | |
1342 | pa = p1.ancestor(p2) |
|
1342 | pa = p1.ancestor(p2) | |
1343 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
|
1343 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() | |
1344 |
|
1344 | |||
1345 | def func(f): |
|
1345 | def func(f): | |
1346 | f = copiesget(f, f) # may be wrong for merges with copies |
|
1346 | f = copiesget(f, f) # may be wrong for merges with copies | |
1347 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) |
|
1347 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) | |
1348 | if fl1 == fl2: |
|
1348 | if fl1 == fl2: | |
1349 | return fl1 |
|
1349 | return fl1 | |
1350 | if fl1 == fla: |
|
1350 | if fl1 == fla: | |
1351 | return fl2 |
|
1351 | return fl2 | |
1352 | if fl2 == fla: |
|
1352 | if fl2 == fla: | |
1353 | return fl1 |
|
1353 | return fl1 | |
1354 | return '' # punt for conflicts |
|
1354 | return '' # punt for conflicts | |
1355 |
|
1355 | |||
1356 | return func |
|
1356 | return func | |
1357 |
|
1357 | |||
1358 | @propertycache |
|
1358 | @propertycache | |
1359 | def _flagfunc(self): |
|
1359 | def _flagfunc(self): | |
1360 | return self._repo.dirstate.flagfunc(self._buildflagfunc) |
|
1360 | return self._repo.dirstate.flagfunc(self._buildflagfunc) | |
1361 |
|
1361 | |||
1362 | @propertycache |
|
1362 | @propertycache | |
1363 | def _status(self): |
|
1363 | def _status(self): | |
1364 | return self._repo.status() |
|
1364 | return self._repo.status() | |
1365 |
|
1365 | |||
1366 | @propertycache |
|
1366 | @propertycache | |
1367 | def _user(self): |
|
1367 | def _user(self): | |
1368 | return self._repo.ui.username() |
|
1368 | return self._repo.ui.username() | |
1369 |
|
1369 | |||
1370 | @propertycache |
|
1370 | @propertycache | |
1371 | def _date(self): |
|
1371 | def _date(self): | |
1372 | ui = self._repo.ui |
|
1372 | ui = self._repo.ui | |
1373 | date = ui.configdate('devel', 'default-date') |
|
1373 | date = ui.configdate('devel', 'default-date') | |
1374 | if date is None: |
|
1374 | if date is None: | |
1375 | date = util.makedate() |
|
1375 | date = util.makedate() | |
1376 | return date |
|
1376 | return date | |
1377 |
|
1377 | |||
1378 | def subrev(self, subpath): |
|
1378 | def subrev(self, subpath): | |
1379 | return None |
|
1379 | return None | |
1380 |
|
1380 | |||
1381 | def manifestnode(self): |
|
1381 | def manifestnode(self): | |
1382 | return None |
|
1382 | return None | |
1383 | def user(self): |
|
1383 | def user(self): | |
1384 | return self._user or self._repo.ui.username() |
|
1384 | return self._user or self._repo.ui.username() | |
1385 | def date(self): |
|
1385 | def date(self): | |
1386 | return self._date |
|
1386 | return self._date | |
1387 | def description(self): |
|
1387 | def description(self): | |
1388 | return self._text |
|
1388 | return self._text | |
1389 | def files(self): |
|
1389 | def files(self): | |
1390 | return sorted(self._status.modified + self._status.added + |
|
1390 | return sorted(self._status.modified + self._status.added + | |
1391 | self._status.removed) |
|
1391 | self._status.removed) | |
1392 |
|
1392 | |||
1393 | def modified(self): |
|
1393 | def modified(self): | |
1394 | return self._status.modified |
|
1394 | return self._status.modified | |
1395 | def added(self): |
|
1395 | def added(self): | |
1396 | return self._status.added |
|
1396 | return self._status.added | |
1397 | def removed(self): |
|
1397 | def removed(self): | |
1398 | return self._status.removed |
|
1398 | return self._status.removed | |
1399 | def deleted(self): |
|
1399 | def deleted(self): | |
1400 | return self._status.deleted |
|
1400 | return self._status.deleted | |
1401 | def branch(self): |
|
1401 | def branch(self): | |
1402 | return encoding.tolocal(self._extra['branch']) |
|
1402 | return encoding.tolocal(self._extra['branch']) | |
1403 | def closesbranch(self): |
|
1403 | def closesbranch(self): | |
1404 | return 'close' in self._extra |
|
1404 | return 'close' in self._extra | |
1405 | def extra(self): |
|
1405 | def extra(self): | |
1406 | return self._extra |
|
1406 | return self._extra | |
1407 |
|
1407 | |||
1408 | def isinmemory(self): |
|
1408 | def isinmemory(self): | |
1409 | return False |
|
1409 | return False | |
1410 |
|
1410 | |||
1411 | def tags(self): |
|
1411 | def tags(self): | |
1412 | return [] |
|
1412 | return [] | |
1413 |
|
1413 | |||
1414 | def bookmarks(self): |
|
1414 | def bookmarks(self): | |
1415 | b = [] |
|
1415 | b = [] | |
1416 | for p in self.parents(): |
|
1416 | for p in self.parents(): | |
1417 | b.extend(p.bookmarks()) |
|
1417 | b.extend(p.bookmarks()) | |
1418 | return b |
|
1418 | return b | |
1419 |
|
1419 | |||
1420 | def phase(self): |
|
1420 | def phase(self): | |
1421 | phase = phases.draft # default phase to draft |
|
1421 | phase = phases.draft # default phase to draft | |
1422 | for p in self.parents(): |
|
1422 | for p in self.parents(): | |
1423 | phase = max(phase, p.phase()) |
|
1423 | phase = max(phase, p.phase()) | |
1424 | return phase |
|
1424 | return phase | |
1425 |
|
1425 | |||
1426 | def hidden(self): |
|
1426 | def hidden(self): | |
1427 | return False |
|
1427 | return False | |
1428 |
|
1428 | |||
1429 | def children(self): |
|
1429 | def children(self): | |
1430 | return [] |
|
1430 | return [] | |
1431 |
|
1431 | |||
1432 | def flags(self, path): |
|
1432 | def flags(self, path): | |
1433 | if r'_manifest' in self.__dict__: |
|
1433 | if r'_manifest' in self.__dict__: | |
1434 | try: |
|
1434 | try: | |
1435 | return self._manifest.flags(path) |
|
1435 | return self._manifest.flags(path) | |
1436 | except KeyError: |
|
1436 | except KeyError: | |
1437 | return '' |
|
1437 | return '' | |
1438 |
|
1438 | |||
1439 | try: |
|
1439 | try: | |
1440 | return self._flagfunc(path) |
|
1440 | return self._flagfunc(path) | |
1441 | except OSError: |
|
1441 | except OSError: | |
1442 | return '' |
|
1442 | return '' | |
1443 |
|
1443 | |||
1444 | def ancestor(self, c2): |
|
1444 | def ancestor(self, c2): | |
1445 | """return the "best" ancestor context of self and c2""" |
|
1445 | """return the "best" ancestor context of self and c2""" | |
1446 | return self._parents[0].ancestor(c2) # punt on two parents for now |
|
1446 | return self._parents[0].ancestor(c2) # punt on two parents for now | |
1447 |
|
1447 | |||
1448 | def walk(self, match): |
|
1448 | def walk(self, match): | |
1449 | '''Generates matching file names.''' |
|
1449 | '''Generates matching file names.''' | |
1450 | return sorted(self._repo.dirstate.walk(match, |
|
1450 | return sorted(self._repo.dirstate.walk(match, | |
1451 | subrepos=sorted(self.substate), |
|
1451 | subrepos=sorted(self.substate), | |
1452 | unknown=True, ignored=False)) |
|
1452 | unknown=True, ignored=False)) | |
1453 |
|
1453 | |||
1454 | def matches(self, match): |
|
1454 | def matches(self, match): | |
1455 | return sorted(self._repo.dirstate.matches(match)) |
|
1455 | return sorted(self._repo.dirstate.matches(match)) | |
1456 |
|
1456 | |||
1457 | def ancestors(self): |
|
1457 | def ancestors(self): | |
1458 | for p in self._parents: |
|
1458 | for p in self._parents: | |
1459 | yield p |
|
1459 | yield p | |
1460 | for a in self._repo.changelog.ancestors( |
|
1460 | for a in self._repo.changelog.ancestors( | |
1461 | [p.rev() for p in self._parents]): |
|
1461 | [p.rev() for p in self._parents]): | |
1462 | yield changectx(self._repo, a) |
|
1462 | yield changectx(self._repo, a) | |
1463 |
|
1463 | |||
1464 | def markcommitted(self, node): |
|
1464 | def markcommitted(self, node): | |
1465 | """Perform post-commit cleanup necessary after committing this ctx |
|
1465 | """Perform post-commit cleanup necessary after committing this ctx | |
1466 |
|
1466 | |||
1467 | Specifically, this updates backing stores this working context |
|
1467 | Specifically, this updates backing stores this working context | |
1468 | wraps to reflect the fact that the changes reflected by this |
|
1468 | wraps to reflect the fact that the changes reflected by this | |
1469 | workingctx have been committed. For example, it marks |
|
1469 | workingctx have been committed. For example, it marks | |
1470 | modified and added files as normal in the dirstate. |
|
1470 | modified and added files as normal in the dirstate. | |
1471 |
|
1471 | |||
1472 | """ |
|
1472 | """ | |
1473 |
|
1473 | |||
1474 | with self._repo.dirstate.parentchange(): |
|
1474 | with self._repo.dirstate.parentchange(): | |
1475 | for f in self.modified() + self.added(): |
|
1475 | for f in self.modified() + self.added(): | |
1476 | self._repo.dirstate.normal(f) |
|
1476 | self._repo.dirstate.normal(f) | |
1477 | for f in self.removed(): |
|
1477 | for f in self.removed(): | |
1478 | self._repo.dirstate.drop(f) |
|
1478 | self._repo.dirstate.drop(f) | |
1479 | self._repo.dirstate.setparents(node) |
|
1479 | self._repo.dirstate.setparents(node) | |
1480 |
|
1480 | |||
1481 | # write changes out explicitly, because nesting wlock at |
|
1481 | # write changes out explicitly, because nesting wlock at | |
1482 | # runtime may prevent 'wlock.release()' in 'repo.commit()' |
|
1482 | # runtime may prevent 'wlock.release()' in 'repo.commit()' | |
1483 | # from immediately doing so for subsequent changing files |
|
1483 | # from immediately doing so for subsequent changing files | |
1484 | self._repo.dirstate.write(self._repo.currenttransaction()) |
|
1484 | self._repo.dirstate.write(self._repo.currenttransaction()) | |
1485 |
|
1485 | |||
1486 | def dirty(self, missing=False, merge=True, branch=True): |
|
1486 | def dirty(self, missing=False, merge=True, branch=True): | |
1487 | return False |
|
1487 | return False | |
1488 |
|
1488 | |||
1489 | class workingctx(committablectx): |
|
1489 | class workingctx(committablectx): | |
1490 | """A workingctx object makes access to data related to |
|
1490 | """A workingctx object makes access to data related to | |
1491 | the current working directory convenient. |
|
1491 | the current working directory convenient. | |
1492 | date - any valid date string or (unixtime, offset), or None. |
|
1492 | date - any valid date string or (unixtime, offset), or None. | |
1493 | user - username string, or None. |
|
1493 | user - username string, or None. | |
1494 | extra - a dictionary of extra values, or None. |
|
1494 | extra - a dictionary of extra values, or None. | |
1495 | changes - a list of file lists as returned by localrepo.status() |
|
1495 | changes - a list of file lists as returned by localrepo.status() | |
1496 | or None to use the repository status. |
|
1496 | or None to use the repository status. | |
1497 | """ |
|
1497 | """ | |
1498 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1498 | def __init__(self, repo, text="", user=None, date=None, extra=None, | |
1499 | changes=None): |
|
1499 | changes=None): | |
1500 | super(workingctx, self).__init__(repo, text, user, date, extra, changes) |
|
1500 | super(workingctx, self).__init__(repo, text, user, date, extra, changes) | |
1501 |
|
1501 | |||
1502 | def __iter__(self): |
|
1502 | def __iter__(self): | |
1503 | d = self._repo.dirstate |
|
1503 | d = self._repo.dirstate | |
1504 | for f in d: |
|
1504 | for f in d: | |
1505 | if d[f] != 'r': |
|
1505 | if d[f] != 'r': | |
1506 | yield f |
|
1506 | yield f | |
1507 |
|
1507 | |||
1508 | def __contains__(self, key): |
|
1508 | def __contains__(self, key): | |
1509 | return self._repo.dirstate[key] not in "?r" |
|
1509 | return self._repo.dirstate[key] not in "?r" | |
1510 |
|
1510 | |||
1511 | def hex(self): |
|
1511 | def hex(self): | |
1512 | return hex(wdirid) |
|
1512 | return hex(wdirid) | |
1513 |
|
1513 | |||
1514 | @propertycache |
|
1514 | @propertycache | |
1515 | def _parents(self): |
|
1515 | def _parents(self): | |
1516 | p = self._repo.dirstate.parents() |
|
1516 | p = self._repo.dirstate.parents() | |
1517 | if p[1] == nullid: |
|
1517 | if p[1] == nullid: | |
1518 | p = p[:-1] |
|
1518 | p = p[:-1] | |
1519 | return [changectx(self._repo, x) for x in p] |
|
1519 | return [changectx(self._repo, x) for x in p] | |
1520 |
|
1520 | |||
1521 | def filectx(self, path, filelog=None): |
|
1521 | def filectx(self, path, filelog=None): | |
1522 | """get a file context from the working directory""" |
|
1522 | """get a file context from the working directory""" | |
1523 | return workingfilectx(self._repo, path, workingctx=self, |
|
1523 | return workingfilectx(self._repo, path, workingctx=self, | |
1524 | filelog=filelog) |
|
1524 | filelog=filelog) | |
1525 |
|
1525 | |||
1526 | def dirty(self, missing=False, merge=True, branch=True): |
|
1526 | def dirty(self, missing=False, merge=True, branch=True): | |
1527 | "check whether a working directory is modified" |
|
1527 | "check whether a working directory is modified" | |
1528 | # check subrepos first |
|
1528 | # check subrepos first | |
1529 | for s in sorted(self.substate): |
|
1529 | for s in sorted(self.substate): | |
1530 | if self.sub(s).dirty(missing=missing): |
|
1530 | if self.sub(s).dirty(missing=missing): | |
1531 | return True |
|
1531 | return True | |
1532 | # check current working dir |
|
1532 | # check current working dir | |
1533 | return ((merge and self.p2()) or |
|
1533 | return ((merge and self.p2()) or | |
1534 | (branch and self.branch() != self.p1().branch()) or |
|
1534 | (branch and self.branch() != self.p1().branch()) or | |
1535 | self.modified() or self.added() or self.removed() or |
|
1535 | self.modified() or self.added() or self.removed() or | |
1536 | (missing and self.deleted())) |
|
1536 | (missing and self.deleted())) | |
1537 |
|
1537 | |||
1538 | def add(self, list, prefix=""): |
|
1538 | def add(self, list, prefix=""): | |
1539 | with self._repo.wlock(): |
|
1539 | with self._repo.wlock(): | |
1540 | ui, ds = self._repo.ui, self._repo.dirstate |
|
1540 | ui, ds = self._repo.ui, self._repo.dirstate | |
1541 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) |
|
1541 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) | |
1542 | rejected = [] |
|
1542 | rejected = [] | |
1543 | lstat = self._repo.wvfs.lstat |
|
1543 | lstat = self._repo.wvfs.lstat | |
1544 | for f in list: |
|
1544 | for f in list: | |
1545 | # ds.pathto() returns an absolute file when this is invoked from |
|
1545 | # ds.pathto() returns an absolute file when this is invoked from | |
1546 | # the keyword extension. That gets flagged as non-portable on |
|
1546 | # the keyword extension. That gets flagged as non-portable on | |
1547 | # Windows, since it contains the drive letter and colon. |
|
1547 | # Windows, since it contains the drive letter and colon. | |
1548 | scmutil.checkportable(ui, os.path.join(prefix, f)) |
|
1548 | scmutil.checkportable(ui, os.path.join(prefix, f)) | |
1549 | try: |
|
1549 | try: | |
1550 | st = lstat(f) |
|
1550 | st = lstat(f) | |
1551 | except OSError: |
|
1551 | except OSError: | |
1552 | ui.warn(_("%s does not exist!\n") % uipath(f)) |
|
1552 | ui.warn(_("%s does not exist!\n") % uipath(f)) | |
1553 | rejected.append(f) |
|
1553 | rejected.append(f) | |
1554 | continue |
|
1554 | continue | |
1555 | if st.st_size > 10000000: |
|
1555 | if st.st_size > 10000000: | |
1556 | ui.warn(_("%s: up to %d MB of RAM may be required " |
|
1556 | ui.warn(_("%s: up to %d MB of RAM may be required " | |
1557 | "to manage this file\n" |
|
1557 | "to manage this file\n" | |
1558 | "(use 'hg revert %s' to cancel the " |
|
1558 | "(use 'hg revert %s' to cancel the " | |
1559 | "pending addition)\n") |
|
1559 | "pending addition)\n") | |
1560 | % (f, 3 * st.st_size // 1000000, uipath(f))) |
|
1560 | % (f, 3 * st.st_size // 1000000, uipath(f))) | |
1561 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1561 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | |
1562 | ui.warn(_("%s not added: only files and symlinks " |
|
1562 | ui.warn(_("%s not added: only files and symlinks " | |
1563 | "supported currently\n") % uipath(f)) |
|
1563 | "supported currently\n") % uipath(f)) | |
1564 | rejected.append(f) |
|
1564 | rejected.append(f) | |
1565 | elif ds[f] in 'amn': |
|
1565 | elif ds[f] in 'amn': | |
1566 | ui.warn(_("%s already tracked!\n") % uipath(f)) |
|
1566 | ui.warn(_("%s already tracked!\n") % uipath(f)) | |
1567 | elif ds[f] == 'r': |
|
1567 | elif ds[f] == 'r': | |
1568 | ds.normallookup(f) |
|
1568 | ds.normallookup(f) | |
1569 | else: |
|
1569 | else: | |
1570 | ds.add(f) |
|
1570 | ds.add(f) | |
1571 | return rejected |
|
1571 | return rejected | |
1572 |
|
1572 | |||
1573 | def forget(self, files, prefix=""): |
|
1573 | def forget(self, files, prefix=""): | |
1574 | with self._repo.wlock(): |
|
1574 | with self._repo.wlock(): | |
1575 | ds = self._repo.dirstate |
|
1575 | ds = self._repo.dirstate | |
1576 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) |
|
1576 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) | |
1577 | rejected = [] |
|
1577 | rejected = [] | |
1578 | for f in files: |
|
1578 | for f in files: | |
1579 | if f not in self._repo.dirstate: |
|
1579 | if f not in self._repo.dirstate: | |
1580 | self._repo.ui.warn(_("%s not tracked!\n") % uipath(f)) |
|
1580 | self._repo.ui.warn(_("%s not tracked!\n") % uipath(f)) | |
1581 | rejected.append(f) |
|
1581 | rejected.append(f) | |
1582 | elif self._repo.dirstate[f] != 'a': |
|
1582 | elif self._repo.dirstate[f] != 'a': | |
1583 | self._repo.dirstate.remove(f) |
|
1583 | self._repo.dirstate.remove(f) | |
1584 | else: |
|
1584 | else: | |
1585 | self._repo.dirstate.drop(f) |
|
1585 | self._repo.dirstate.drop(f) | |
1586 | return rejected |
|
1586 | return rejected | |
1587 |
|
1587 | |||
1588 | def undelete(self, list): |
|
1588 | def undelete(self, list): | |
1589 | pctxs = self.parents() |
|
1589 | pctxs = self.parents() | |
1590 | with self._repo.wlock(): |
|
1590 | with self._repo.wlock(): | |
1591 | ds = self._repo.dirstate |
|
1591 | ds = self._repo.dirstate | |
1592 | for f in list: |
|
1592 | for f in list: | |
1593 | if self._repo.dirstate[f] != 'r': |
|
1593 | if self._repo.dirstate[f] != 'r': | |
1594 | self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f)) |
|
1594 | self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f)) | |
1595 | else: |
|
1595 | else: | |
1596 | fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f] |
|
1596 | fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f] | |
1597 | t = fctx.data() |
|
1597 | t = fctx.data() | |
1598 | self._repo.wwrite(f, t, fctx.flags()) |
|
1598 | self._repo.wwrite(f, t, fctx.flags()) | |
1599 | self._repo.dirstate.normal(f) |
|
1599 | self._repo.dirstate.normal(f) | |
1600 |
|
1600 | |||
1601 | def copy(self, source, dest): |
|
1601 | def copy(self, source, dest): | |
1602 | try: |
|
1602 | try: | |
1603 | st = self._repo.wvfs.lstat(dest) |
|
1603 | st = self._repo.wvfs.lstat(dest) | |
1604 | except OSError as err: |
|
1604 | except OSError as err: | |
1605 | if err.errno != errno.ENOENT: |
|
1605 | if err.errno != errno.ENOENT: | |
1606 | raise |
|
1606 | raise | |
1607 | self._repo.ui.warn(_("%s does not exist!\n") |
|
1607 | self._repo.ui.warn(_("%s does not exist!\n") | |
1608 | % self._repo.dirstate.pathto(dest)) |
|
1608 | % self._repo.dirstate.pathto(dest)) | |
1609 | return |
|
1609 | return | |
1610 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1610 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | |
1611 | self._repo.ui.warn(_("copy failed: %s is not a file or a " |
|
1611 | self._repo.ui.warn(_("copy failed: %s is not a file or a " | |
1612 | "symbolic link\n") |
|
1612 | "symbolic link\n") | |
1613 | % self._repo.dirstate.pathto(dest)) |
|
1613 | % self._repo.dirstate.pathto(dest)) | |
1614 | else: |
|
1614 | else: | |
1615 | with self._repo.wlock(): |
|
1615 | with self._repo.wlock(): | |
1616 | if self._repo.dirstate[dest] in '?': |
|
1616 | if self._repo.dirstate[dest] in '?': | |
1617 | self._repo.dirstate.add(dest) |
|
1617 | self._repo.dirstate.add(dest) | |
1618 | elif self._repo.dirstate[dest] in 'r': |
|
1618 | elif self._repo.dirstate[dest] in 'r': | |
1619 | self._repo.dirstate.normallookup(dest) |
|
1619 | self._repo.dirstate.normallookup(dest) | |
1620 | self._repo.dirstate.copy(source, dest) |
|
1620 | self._repo.dirstate.copy(source, dest) | |
1621 |
|
1621 | |||
1622 | def match(self, pats=None, include=None, exclude=None, default='glob', |
|
1622 | def match(self, pats=None, include=None, exclude=None, default='glob', | |
1623 | listsubrepos=False, badfn=None): |
|
1623 | listsubrepos=False, badfn=None): | |
1624 | r = self._repo |
|
1624 | r = self._repo | |
1625 |
|
1625 | |||
1626 | # Only a case insensitive filesystem needs magic to translate user input |
|
1626 | # Only a case insensitive filesystem needs magic to translate user input | |
1627 | # to actual case in the filesystem. |
|
1627 | # to actual case in the filesystem. | |
1628 | icasefs = not util.fscasesensitive(r.root) |
|
1628 | icasefs = not util.fscasesensitive(r.root) | |
1629 | return matchmod.match(r.root, r.getcwd(), pats, include, exclude, |
|
1629 | return matchmod.match(r.root, r.getcwd(), pats, include, exclude, | |
1630 | default, auditor=r.auditor, ctx=self, |
|
1630 | default, auditor=r.auditor, ctx=self, | |
1631 | listsubrepos=listsubrepos, badfn=badfn, |
|
1631 | listsubrepos=listsubrepos, badfn=badfn, | |
1632 | icasefs=icasefs) |
|
1632 | icasefs=icasefs) | |
1633 |
|
1633 | |||
1634 | def flushall(self): |
|
1634 | def flushall(self): | |
1635 | pass # For overlayworkingfilectx compatibility. |
|
1635 | pass # For overlayworkingfilectx compatibility. | |
1636 |
|
1636 | |||
1637 | def _filtersuspectsymlink(self, files): |
|
1637 | def _filtersuspectsymlink(self, files): | |
1638 | if not files or self._repo.dirstate._checklink: |
|
1638 | if not files or self._repo.dirstate._checklink: | |
1639 | return files |
|
1639 | return files | |
1640 |
|
1640 | |||
1641 | # Symlink placeholders may get non-symlink-like contents |
|
1641 | # Symlink placeholders may get non-symlink-like contents | |
1642 | # via user error or dereferencing by NFS or Samba servers, |
|
1642 | # via user error or dereferencing by NFS or Samba servers, | |
1643 | # so we filter out any placeholders that don't look like a |
|
1643 | # so we filter out any placeholders that don't look like a | |
1644 | # symlink |
|
1644 | # symlink | |
1645 | sane = [] |
|
1645 | sane = [] | |
1646 | for f in files: |
|
1646 | for f in files: | |
1647 | if self.flags(f) == 'l': |
|
1647 | if self.flags(f) == 'l': | |
1648 | d = self[f].data() |
|
1648 | d = self[f].data() | |
1649 | if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d): |
|
1649 | if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d): | |
1650 | self._repo.ui.debug('ignoring suspect symlink placeholder' |
|
1650 | self._repo.ui.debug('ignoring suspect symlink placeholder' | |
1651 | ' "%s"\n' % f) |
|
1651 | ' "%s"\n' % f) | |
1652 | continue |
|
1652 | continue | |
1653 | sane.append(f) |
|
1653 | sane.append(f) | |
1654 | return sane |
|
1654 | return sane | |
1655 |
|
1655 | |||
1656 | def _checklookup(self, files): |
|
1656 | def _checklookup(self, files): | |
1657 | # check for any possibly clean files |
|
1657 | # check for any possibly clean files | |
1658 | if not files: |
|
1658 | if not files: | |
1659 | return [], [], [] |
|
1659 | return [], [], [] | |
1660 |
|
1660 | |||
1661 | modified = [] |
|
1661 | modified = [] | |
1662 | deleted = [] |
|
1662 | deleted = [] | |
1663 | fixup = [] |
|
1663 | fixup = [] | |
1664 | pctx = self._parents[0] |
|
1664 | pctx = self._parents[0] | |
1665 | # do a full compare of any files that might have changed |
|
1665 | # do a full compare of any files that might have changed | |
1666 | for f in sorted(files): |
|
1666 | for f in sorted(files): | |
1667 | try: |
|
1667 | try: | |
1668 | # This will return True for a file that got replaced by a |
|
1668 | # This will return True for a file that got replaced by a | |
1669 | # directory in the interim, but fixing that is pretty hard. |
|
1669 | # directory in the interim, but fixing that is pretty hard. | |
1670 | if (f not in pctx or self.flags(f) != pctx.flags(f) |
|
1670 | if (f not in pctx or self.flags(f) != pctx.flags(f) | |
1671 | or pctx[f].cmp(self[f])): |
|
1671 | or pctx[f].cmp(self[f])): | |
1672 | modified.append(f) |
|
1672 | modified.append(f) | |
1673 | else: |
|
1673 | else: | |
1674 | fixup.append(f) |
|
1674 | fixup.append(f) | |
1675 | except (IOError, OSError): |
|
1675 | except (IOError, OSError): | |
1676 | # A file become inaccessible in between? Mark it as deleted, |
|
1676 | # A file become inaccessible in between? Mark it as deleted, | |
1677 | # matching dirstate behavior (issue5584). |
|
1677 | # matching dirstate behavior (issue5584). | |
1678 | # The dirstate has more complex behavior around whether a |
|
1678 | # The dirstate has more complex behavior around whether a | |
1679 | # missing file matches a directory, etc, but we don't need to |
|
1679 | # missing file matches a directory, etc, but we don't need to | |
1680 | # bother with that: if f has made it to this point, we're sure |
|
1680 | # bother with that: if f has made it to this point, we're sure | |
1681 | # it's in the dirstate. |
|
1681 | # it's in the dirstate. | |
1682 | deleted.append(f) |
|
1682 | deleted.append(f) | |
1683 |
|
1683 | |||
1684 | return modified, deleted, fixup |
|
1684 | return modified, deleted, fixup | |
1685 |
|
1685 | |||
1686 | def _poststatusfixup(self, status, fixup): |
|
1686 | def _poststatusfixup(self, status, fixup): | |
1687 | """update dirstate for files that are actually clean""" |
|
1687 | """update dirstate for files that are actually clean""" | |
1688 | poststatus = self._repo.postdsstatus() |
|
1688 | poststatus = self._repo.postdsstatus() | |
1689 | if fixup or poststatus: |
|
1689 | if fixup or poststatus: | |
1690 | try: |
|
1690 | try: | |
1691 | oldid = self._repo.dirstate.identity() |
|
1691 | oldid = self._repo.dirstate.identity() | |
1692 |
|
1692 | |||
1693 | # updating the dirstate is optional |
|
1693 | # updating the dirstate is optional | |
1694 | # so we don't wait on the lock |
|
1694 | # so we don't wait on the lock | |
1695 | # wlock can invalidate the dirstate, so cache normal _after_ |
|
1695 | # wlock can invalidate the dirstate, so cache normal _after_ | |
1696 | # taking the lock |
|
1696 | # taking the lock | |
1697 | with self._repo.wlock(False): |
|
1697 | with self._repo.wlock(False): | |
1698 | if self._repo.dirstate.identity() == oldid: |
|
1698 | if self._repo.dirstate.identity() == oldid: | |
1699 | if fixup: |
|
1699 | if fixup: | |
1700 | normal = self._repo.dirstate.normal |
|
1700 | normal = self._repo.dirstate.normal | |
1701 | for f in fixup: |
|
1701 | for f in fixup: | |
1702 | normal(f) |
|
1702 | normal(f) | |
1703 | # write changes out explicitly, because nesting |
|
1703 | # write changes out explicitly, because nesting | |
1704 | # wlock at runtime may prevent 'wlock.release()' |
|
1704 | # wlock at runtime may prevent 'wlock.release()' | |
1705 | # after this block from doing so for subsequent |
|
1705 | # after this block from doing so for subsequent | |
1706 | # changing files |
|
1706 | # changing files | |
1707 | tr = self._repo.currenttransaction() |
|
1707 | tr = self._repo.currenttransaction() | |
1708 | self._repo.dirstate.write(tr) |
|
1708 | self._repo.dirstate.write(tr) | |
1709 |
|
1709 | |||
1710 | if poststatus: |
|
1710 | if poststatus: | |
1711 | for ps in poststatus: |
|
1711 | for ps in poststatus: | |
1712 | ps(self, status) |
|
1712 | ps(self, status) | |
1713 | else: |
|
1713 | else: | |
1714 | # in this case, writing changes out breaks |
|
1714 | # in this case, writing changes out breaks | |
1715 | # consistency, because .hg/dirstate was |
|
1715 | # consistency, because .hg/dirstate was | |
1716 | # already changed simultaneously after last |
|
1716 | # already changed simultaneously after last | |
1717 | # caching (see also issue5584 for detail) |
|
1717 | # caching (see also issue5584 for detail) | |
1718 | self._repo.ui.debug('skip updating dirstate: ' |
|
1718 | self._repo.ui.debug('skip updating dirstate: ' | |
1719 | 'identity mismatch\n') |
|
1719 | 'identity mismatch\n') | |
1720 | except error.LockError: |
|
1720 | except error.LockError: | |
1721 | pass |
|
1721 | pass | |
1722 | finally: |
|
1722 | finally: | |
1723 | # Even if the wlock couldn't be grabbed, clear out the list. |
|
1723 | # Even if the wlock couldn't be grabbed, clear out the list. | |
1724 | self._repo.clearpostdsstatus() |
|
1724 | self._repo.clearpostdsstatus() | |
1725 |
|
1725 | |||
1726 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
|
1726 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): | |
1727 | '''Gets the status from the dirstate -- internal use only.''' |
|
1727 | '''Gets the status from the dirstate -- internal use only.''' | |
1728 | subrepos = [] |
|
1728 | subrepos = [] | |
1729 | if '.hgsub' in self: |
|
1729 | if '.hgsub' in self: | |
1730 | subrepos = sorted(self.substate) |
|
1730 | subrepos = sorted(self.substate) | |
1731 | cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored, |
|
1731 | cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored, | |
1732 | clean=clean, unknown=unknown) |
|
1732 | clean=clean, unknown=unknown) | |
1733 |
|
1733 | |||
1734 | # check for any possibly clean files |
|
1734 | # check for any possibly clean files | |
1735 | fixup = [] |
|
1735 | fixup = [] | |
1736 | if cmp: |
|
1736 | if cmp: | |
1737 | modified2, deleted2, fixup = self._checklookup(cmp) |
|
1737 | modified2, deleted2, fixup = self._checklookup(cmp) | |
1738 | s.modified.extend(modified2) |
|
1738 | s.modified.extend(modified2) | |
1739 | s.deleted.extend(deleted2) |
|
1739 | s.deleted.extend(deleted2) | |
1740 |
|
1740 | |||
1741 | if fixup and clean: |
|
1741 | if fixup and clean: | |
1742 | s.clean.extend(fixup) |
|
1742 | s.clean.extend(fixup) | |
1743 |
|
1743 | |||
1744 | self._poststatusfixup(s, fixup) |
|
1744 | self._poststatusfixup(s, fixup) | |
1745 |
|
1745 | |||
1746 | if match.always(): |
|
1746 | if match.always(): | |
1747 | # cache for performance |
|
1747 | # cache for performance | |
1748 | if s.unknown or s.ignored or s.clean: |
|
1748 | if s.unknown or s.ignored or s.clean: | |
1749 | # "_status" is cached with list*=False in the normal route |
|
1749 | # "_status" is cached with list*=False in the normal route | |
1750 | self._status = scmutil.status(s.modified, s.added, s.removed, |
|
1750 | self._status = scmutil.status(s.modified, s.added, s.removed, | |
1751 | s.deleted, [], [], []) |
|
1751 | s.deleted, [], [], []) | |
1752 | else: |
|
1752 | else: | |
1753 | self._status = s |
|
1753 | self._status = s | |
1754 |
|
1754 | |||
1755 | return s |
|
1755 | return s | |
1756 |
|
1756 | |||
1757 | @propertycache |
|
1757 | @propertycache | |
1758 | def _manifest(self): |
|
1758 | def _manifest(self): | |
1759 | """generate a manifest corresponding to the values in self._status |
|
1759 | """generate a manifest corresponding to the values in self._status | |
1760 |
|
1760 | |||
1761 | This reuse the file nodeid from parent, but we use special node |
|
1761 | This reuse the file nodeid from parent, but we use special node | |
1762 | identifiers for added and modified files. This is used by manifests |
|
1762 | identifiers for added and modified files. This is used by manifests | |
1763 | merge to see that files are different and by update logic to avoid |
|
1763 | merge to see that files are different and by update logic to avoid | |
1764 | deleting newly added files. |
|
1764 | deleting newly added files. | |
1765 | """ |
|
1765 | """ | |
1766 | return self._buildstatusmanifest(self._status) |
|
1766 | return self._buildstatusmanifest(self._status) | |
1767 |
|
1767 | |||
1768 | def _buildstatusmanifest(self, status): |
|
1768 | def _buildstatusmanifest(self, status): | |
1769 | """Builds a manifest that includes the given status results.""" |
|
1769 | """Builds a manifest that includes the given status results.""" | |
1770 | parents = self.parents() |
|
1770 | parents = self.parents() | |
1771 |
|
1771 | |||
1772 | man = parents[0].manifest().copy() |
|
1772 | man = parents[0].manifest().copy() | |
1773 |
|
1773 | |||
1774 | ff = self._flagfunc |
|
1774 | ff = self._flagfunc | |
1775 | for i, l in ((addednodeid, status.added), |
|
1775 | for i, l in ((addednodeid, status.added), | |
1776 | (modifiednodeid, status.modified)): |
|
1776 | (modifiednodeid, status.modified)): | |
1777 | for f in l: |
|
1777 | for f in l: | |
1778 | man[f] = i |
|
1778 | man[f] = i | |
1779 | try: |
|
1779 | try: | |
1780 | man.setflag(f, ff(f)) |
|
1780 | man.setflag(f, ff(f)) | |
1781 | except OSError: |
|
1781 | except OSError: | |
1782 | pass |
|
1782 | pass | |
1783 |
|
1783 | |||
1784 | for f in status.deleted + status.removed: |
|
1784 | for f in status.deleted + status.removed: | |
1785 | if f in man: |
|
1785 | if f in man: | |
1786 | del man[f] |
|
1786 | del man[f] | |
1787 |
|
1787 | |||
1788 | return man |
|
1788 | return man | |
1789 |
|
1789 | |||
1790 | def _buildstatus(self, other, s, match, listignored, listclean, |
|
1790 | def _buildstatus(self, other, s, match, listignored, listclean, | |
1791 | listunknown): |
|
1791 | listunknown): | |
1792 | """build a status with respect to another context |
|
1792 | """build a status with respect to another context | |
1793 |
|
1793 | |||
1794 | This includes logic for maintaining the fast path of status when |
|
1794 | This includes logic for maintaining the fast path of status when | |
1795 | comparing the working directory against its parent, which is to skip |
|
1795 | comparing the working directory against its parent, which is to skip | |
1796 | building a new manifest if self (working directory) is not comparing |
|
1796 | building a new manifest if self (working directory) is not comparing | |
1797 | against its parent (repo['.']). |
|
1797 | against its parent (repo['.']). | |
1798 | """ |
|
1798 | """ | |
1799 | s = self._dirstatestatus(match, listignored, listclean, listunknown) |
|
1799 | s = self._dirstatestatus(match, listignored, listclean, listunknown) | |
1800 | # Filter out symlinks that, in the case of FAT32 and NTFS filesystems, |
|
1800 | # Filter out symlinks that, in the case of FAT32 and NTFS filesystems, | |
1801 | # might have accidentally ended up with the entire contents of the file |
|
1801 | # might have accidentally ended up with the entire contents of the file | |
1802 | # they are supposed to be linking to. |
|
1802 | # they are supposed to be linking to. | |
1803 | s.modified[:] = self._filtersuspectsymlink(s.modified) |
|
1803 | s.modified[:] = self._filtersuspectsymlink(s.modified) | |
1804 | if other != self._repo['.']: |
|
1804 | if other != self._repo['.']: | |
1805 | s = super(workingctx, self)._buildstatus(other, s, match, |
|
1805 | s = super(workingctx, self)._buildstatus(other, s, match, | |
1806 | listignored, listclean, |
|
1806 | listignored, listclean, | |
1807 | listunknown) |
|
1807 | listunknown) | |
1808 | return s |
|
1808 | return s | |
1809 |
|
1809 | |||
1810 | def _matchstatus(self, other, match): |
|
1810 | def _matchstatus(self, other, match): | |
1811 | """override the match method with a filter for directory patterns |
|
1811 | """override the match method with a filter for directory patterns | |
1812 |
|
1812 | |||
1813 | We use inheritance to customize the match.bad method only in cases of |
|
1813 | We use inheritance to customize the match.bad method only in cases of | |
1814 | workingctx since it belongs only to the working directory when |
|
1814 | workingctx since it belongs only to the working directory when | |
1815 | comparing against the parent changeset. |
|
1815 | comparing against the parent changeset. | |
1816 |
|
1816 | |||
1817 | If we aren't comparing against the working directory's parent, then we |
|
1817 | If we aren't comparing against the working directory's parent, then we | |
1818 | just use the default match object sent to us. |
|
1818 | just use the default match object sent to us. | |
1819 | """ |
|
1819 | """ | |
1820 | if other != self._repo['.']: |
|
1820 | if other != self._repo['.']: | |
1821 | def bad(f, msg): |
|
1821 | def bad(f, msg): | |
1822 | # 'f' may be a directory pattern from 'match.files()', |
|
1822 | # 'f' may be a directory pattern from 'match.files()', | |
1823 | # so 'f not in ctx1' is not enough |
|
1823 | # so 'f not in ctx1' is not enough | |
1824 | if f not in other and not other.hasdir(f): |
|
1824 | if f not in other and not other.hasdir(f): | |
1825 | self._repo.ui.warn('%s: %s\n' % |
|
1825 | self._repo.ui.warn('%s: %s\n' % | |
1826 | (self._repo.dirstate.pathto(f), msg)) |
|
1826 | (self._repo.dirstate.pathto(f), msg)) | |
1827 | match.bad = bad |
|
1827 | match.bad = bad | |
1828 | return match |
|
1828 | return match | |
1829 |
|
1829 | |||
1830 | def markcommitted(self, node): |
|
1830 | def markcommitted(self, node): | |
1831 | super(workingctx, self).markcommitted(node) |
|
1831 | super(workingctx, self).markcommitted(node) | |
1832 |
|
1832 | |||
1833 | sparse.aftercommit(self._repo, node) |
|
1833 | sparse.aftercommit(self._repo, node) | |
1834 |
|
1834 | |||
1835 | class committablefilectx(basefilectx): |
|
1835 | class committablefilectx(basefilectx): | |
1836 | """A committablefilectx provides common functionality for a file context |
|
1836 | """A committablefilectx provides common functionality for a file context | |
1837 | that wants the ability to commit, e.g. workingfilectx or memfilectx.""" |
|
1837 | that wants the ability to commit, e.g. workingfilectx or memfilectx.""" | |
1838 | def __init__(self, repo, path, filelog=None, ctx=None): |
|
1838 | def __init__(self, repo, path, filelog=None, ctx=None): | |
1839 | self._repo = repo |
|
1839 | self._repo = repo | |
1840 | self._path = path |
|
1840 | self._path = path | |
1841 | self._changeid = None |
|
1841 | self._changeid = None | |
1842 | self._filerev = self._filenode = None |
|
1842 | self._filerev = self._filenode = None | |
1843 |
|
1843 | |||
1844 | if filelog is not None: |
|
1844 | if filelog is not None: | |
1845 | self._filelog = filelog |
|
1845 | self._filelog = filelog | |
1846 | if ctx: |
|
1846 | if ctx: | |
1847 | self._changectx = ctx |
|
1847 | self._changectx = ctx | |
1848 |
|
1848 | |||
1849 | def __nonzero__(self): |
|
1849 | def __nonzero__(self): | |
1850 | return True |
|
1850 | return True | |
1851 |
|
1851 | |||
1852 | __bool__ = __nonzero__ |
|
1852 | __bool__ = __nonzero__ | |
1853 |
|
1853 | |||
1854 | def linkrev(self): |
|
1854 | def linkrev(self): | |
1855 | # linked to self._changectx no matter if file is modified or not |
|
1855 | # linked to self._changectx no matter if file is modified or not | |
1856 | return self.rev() |
|
1856 | return self.rev() | |
1857 |
|
1857 | |||
1858 | def parents(self): |
|
1858 | def parents(self): | |
1859 | '''return parent filectxs, following copies if necessary''' |
|
1859 | '''return parent filectxs, following copies if necessary''' | |
1860 | def filenode(ctx, path): |
|
1860 | def filenode(ctx, path): | |
1861 | return ctx._manifest.get(path, nullid) |
|
1861 | return ctx._manifest.get(path, nullid) | |
1862 |
|
1862 | |||
1863 | path = self._path |
|
1863 | path = self._path | |
1864 | fl = self._filelog |
|
1864 | fl = self._filelog | |
1865 | pcl = self._changectx._parents |
|
1865 | pcl = self._changectx._parents | |
1866 | renamed = self.renamed() |
|
1866 | renamed = self.renamed() | |
1867 |
|
1867 | |||
1868 | if renamed: |
|
1868 | if renamed: | |
1869 | pl = [renamed + (None,)] |
|
1869 | pl = [renamed + (None,)] | |
1870 | else: |
|
1870 | else: | |
1871 | pl = [(path, filenode(pcl[0], path), fl)] |
|
1871 | pl = [(path, filenode(pcl[0], path), fl)] | |
1872 |
|
1872 | |||
1873 | for pc in pcl[1:]: |
|
1873 | for pc in pcl[1:]: | |
1874 | pl.append((path, filenode(pc, path), fl)) |
|
1874 | pl.append((path, filenode(pc, path), fl)) | |
1875 |
|
1875 | |||
1876 | return [self._parentfilectx(p, fileid=n, filelog=l) |
|
1876 | return [self._parentfilectx(p, fileid=n, filelog=l) | |
1877 | for p, n, l in pl if n != nullid] |
|
1877 | for p, n, l in pl if n != nullid] | |
1878 |
|
1878 | |||
1879 | def children(self): |
|
1879 | def children(self): | |
1880 | return [] |
|
1880 | return [] | |
1881 |
|
1881 | |||
1882 | class workingfilectx(committablefilectx): |
|
1882 | class workingfilectx(committablefilectx): | |
1883 | """A workingfilectx object makes access to data related to a particular |
|
1883 | """A workingfilectx object makes access to data related to a particular | |
1884 | file in the working directory convenient.""" |
|
1884 | file in the working directory convenient.""" | |
1885 | def __init__(self, repo, path, filelog=None, workingctx=None): |
|
1885 | def __init__(self, repo, path, filelog=None, workingctx=None): | |
1886 | super(workingfilectx, self).__init__(repo, path, filelog, workingctx) |
|
1886 | super(workingfilectx, self).__init__(repo, path, filelog, workingctx) | |
1887 |
|
1887 | |||
1888 | @propertycache |
|
1888 | @propertycache | |
1889 | def _changectx(self): |
|
1889 | def _changectx(self): | |
1890 | return workingctx(self._repo) |
|
1890 | return workingctx(self._repo) | |
1891 |
|
1891 | |||
1892 | def data(self): |
|
1892 | def data(self): | |
1893 | return self._repo.wread(self._path) |
|
1893 | return self._repo.wread(self._path) | |
1894 | def renamed(self): |
|
1894 | def renamed(self): | |
1895 | rp = self._repo.dirstate.copied(self._path) |
|
1895 | rp = self._repo.dirstate.copied(self._path) | |
1896 | if not rp: |
|
1896 | if not rp: | |
1897 | return None |
|
1897 | return None | |
1898 | return rp, self._changectx._parents[0]._manifest.get(rp, nullid) |
|
1898 | return rp, self._changectx._parents[0]._manifest.get(rp, nullid) | |
1899 |
|
1899 | |||
1900 | def size(self): |
|
1900 | def size(self): | |
1901 | return self._repo.wvfs.lstat(self._path).st_size |
|
1901 | return self._repo.wvfs.lstat(self._path).st_size | |
1902 | def date(self): |
|
1902 | def date(self): | |
1903 | t, tz = self._changectx.date() |
|
1903 | t, tz = self._changectx.date() | |
1904 | try: |
|
1904 | try: | |
1905 | return (self._repo.wvfs.lstat(self._path).st_mtime, tz) |
|
1905 | return (self._repo.wvfs.lstat(self._path).st_mtime, tz) | |
1906 | except OSError as err: |
|
1906 | except OSError as err: | |
1907 | if err.errno != errno.ENOENT: |
|
1907 | if err.errno != errno.ENOENT: | |
1908 | raise |
|
1908 | raise | |
1909 | return (t, tz) |
|
1909 | return (t, tz) | |
1910 |
|
1910 | |||
1911 | def exists(self): |
|
1911 | def exists(self): | |
1912 | return self._repo.wvfs.exists(self._path) |
|
1912 | return self._repo.wvfs.exists(self._path) | |
1913 |
|
1913 | |||
1914 | def lexists(self): |
|
1914 | def lexists(self): | |
1915 | return self._repo.wvfs.lexists(self._path) |
|
1915 | return self._repo.wvfs.lexists(self._path) | |
1916 |
|
1916 | |||
1917 | def audit(self): |
|
1917 | def audit(self): | |
1918 | return self._repo.wvfs.audit(self._path) |
|
1918 | return self._repo.wvfs.audit(self._path) | |
1919 |
|
1919 | |||
1920 | def cmp(self, fctx): |
|
1920 | def cmp(self, fctx): | |
1921 | """compare with other file context |
|
1921 | """compare with other file context | |
1922 |
|
1922 | |||
1923 | returns True if different than fctx. |
|
1923 | returns True if different than fctx. | |
1924 | """ |
|
1924 | """ | |
1925 | # fctx should be a filectx (not a workingfilectx) |
|
1925 | # fctx should be a filectx (not a workingfilectx) | |
1926 | # invert comparison to reuse the same code path |
|
1926 | # invert comparison to reuse the same code path | |
1927 | return fctx.cmp(self) |
|
1927 | return fctx.cmp(self) | |
1928 |
|
1928 | |||
1929 | def remove(self, ignoremissing=False): |
|
1929 | def remove(self, ignoremissing=False): | |
1930 | """wraps unlink for a repo's working directory""" |
|
1930 | """wraps unlink for a repo's working directory""" | |
1931 | self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing) |
|
1931 | self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing) | |
1932 |
|
1932 | |||
1933 | def write(self, data, flags, backgroundclose=False): |
|
1933 | def write(self, data, flags, backgroundclose=False): | |
1934 | """wraps repo.wwrite""" |
|
1934 | """wraps repo.wwrite""" | |
1935 | self._repo.wwrite(self._path, data, flags, |
|
1935 | self._repo.wwrite(self._path, data, flags, | |
1936 | backgroundclose=backgroundclose) |
|
1936 | backgroundclose=backgroundclose) | |
1937 |
|
1937 | |||
|
1938 | def markcopied(self, src): | |||
|
1939 | """marks this file a copy of `src`""" | |||
|
1940 | if self._repo.dirstate[self._path] in "nma": | |||
|
1941 | self._repo.dirstate.copy(src, self._path) | |||
|
1942 | ||||
1938 | def clearunknown(self): |
|
1943 | def clearunknown(self): | |
1939 | """Removes conflicting items in the working directory so that |
|
1944 | """Removes conflicting items in the working directory so that | |
1940 | ``write()`` can be called successfully. |
|
1945 | ``write()`` can be called successfully. | |
1941 | """ |
|
1946 | """ | |
1942 | wvfs = self._repo.wvfs |
|
1947 | wvfs = self._repo.wvfs | |
1943 | f = self._path |
|
1948 | f = self._path | |
1944 | if wvfs.isdir(f) and not wvfs.islink(f): |
|
1949 | if wvfs.isdir(f) and not wvfs.islink(f): | |
1945 | wvfs.rmtree(f, forcibly=True) |
|
1950 | wvfs.rmtree(f, forcibly=True) | |
1946 | for p in reversed(list(util.finddirs(f))): |
|
1951 | for p in reversed(list(util.finddirs(f))): | |
1947 | if wvfs.isfileorlink(p): |
|
1952 | if wvfs.isfileorlink(p): | |
1948 | wvfs.unlink(p) |
|
1953 | wvfs.unlink(p) | |
1949 | break |
|
1954 | break | |
1950 |
|
1955 | |||
1951 | def setflags(self, l, x): |
|
1956 | def setflags(self, l, x): | |
1952 | self._repo.wvfs.setflags(self._path, l, x) |
|
1957 | self._repo.wvfs.setflags(self._path, l, x) | |
1953 |
|
1958 | |||
1954 | class overlayworkingctx(workingctx): |
|
1959 | class overlayworkingctx(workingctx): | |
1955 | """Wraps another mutable context with a write-back cache that can be flushed |
|
1960 | """Wraps another mutable context with a write-back cache that can be flushed | |
1956 | at a later time. |
|
1961 | at a later time. | |
1957 |
|
1962 | |||
1958 | self._cache[path] maps to a dict with keys: { |
|
1963 | self._cache[path] maps to a dict with keys: { | |
1959 | 'exists': bool? |
|
1964 | 'exists': bool? | |
1960 | 'date': date? |
|
1965 | 'date': date? | |
1961 | 'data': str? |
|
1966 | 'data': str? | |
1962 | 'flags': str? |
|
1967 | 'flags': str? | |
1963 | } |
|
1968 | } | |
1964 | If `exists` is True, `flags` must be non-None and 'date' is non-None. If it |
|
1969 | If `exists` is True, `flags` must be non-None and 'date' is non-None. If it | |
1965 | is `False`, the file was deleted. |
|
1970 | is `False`, the file was deleted. | |
1966 | """ |
|
1971 | """ | |
1967 |
|
1972 | |||
1968 | def __init__(self, repo, wrappedctx): |
|
1973 | def __init__(self, repo, wrappedctx): | |
1969 | super(overlayworkingctx, self).__init__(repo) |
|
1974 | super(overlayworkingctx, self).__init__(repo) | |
1970 | self._repo = repo |
|
1975 | self._repo = repo | |
1971 | self._wrappedctx = wrappedctx |
|
1976 | self._wrappedctx = wrappedctx | |
1972 | self._clean() |
|
1977 | self._clean() | |
1973 |
|
1978 | |||
1974 | def data(self, path): |
|
1979 | def data(self, path): | |
1975 | if self.isdirty(path): |
|
1980 | if self.isdirty(path): | |
1976 | if self._cache[path]['exists']: |
|
1981 | if self._cache[path]['exists']: | |
1977 | if self._cache[path]['data']: |
|
1982 | if self._cache[path]['data']: | |
1978 | return self._cache[path]['data'] |
|
1983 | return self._cache[path]['data'] | |
1979 | else: |
|
1984 | else: | |
1980 | # Must fallback here, too, because we only set flags. |
|
1985 | # Must fallback here, too, because we only set flags. | |
1981 | return self._wrappedctx[path].data() |
|
1986 | return self._wrappedctx[path].data() | |
1982 | else: |
|
1987 | else: | |
1983 | raise error.ProgrammingError("No such file or directory: %s" % |
|
1988 | raise error.ProgrammingError("No such file or directory: %s" % | |
1984 | self._path) |
|
1989 | self._path) | |
1985 | else: |
|
1990 | else: | |
1986 | return self._wrappedctx[path].data() |
|
1991 | return self._wrappedctx[path].data() | |
1987 |
|
1992 | |||
1988 | def isinmemory(self): |
|
1993 | def isinmemory(self): | |
1989 | return True |
|
1994 | return True | |
1990 |
|
1995 | |||
1991 | def filedate(self, path): |
|
1996 | def filedate(self, path): | |
1992 | if self.isdirty(path): |
|
1997 | if self.isdirty(path): | |
1993 | return self._cache[path]['date'] |
|
1998 | return self._cache[path]['date'] | |
1994 | else: |
|
1999 | else: | |
1995 | return self._wrappedctx[path].date() |
|
2000 | return self._wrappedctx[path].date() | |
1996 |
|
2001 | |||
1997 | def flags(self, path): |
|
2002 | def flags(self, path): | |
1998 | if self.isdirty(path): |
|
2003 | if self.isdirty(path): | |
1999 | if self._cache[path]['exists']: |
|
2004 | if self._cache[path]['exists']: | |
2000 | return self._cache[path]['flags'] |
|
2005 | return self._cache[path]['flags'] | |
2001 | else: |
|
2006 | else: | |
2002 | raise error.ProgrammingError("No such file or directory: %s" % |
|
2007 | raise error.ProgrammingError("No such file or directory: %s" % | |
2003 | self._path) |
|
2008 | self._path) | |
2004 | else: |
|
2009 | else: | |
2005 | return self._wrappedctx[path].flags() |
|
2010 | return self._wrappedctx[path].flags() | |
2006 |
|
2011 | |||
2007 | def write(self, path, data, flags=''): |
|
2012 | def write(self, path, data, flags=''): | |
2008 | if data is None: |
|
2013 | if data is None: | |
2009 | raise error.ProgrammingError("data must be non-None") |
|
2014 | raise error.ProgrammingError("data must be non-None") | |
2010 | self._markdirty(path, exists=True, data=data, date=util.makedate(), |
|
2015 | self._markdirty(path, exists=True, data=data, date=util.makedate(), | |
2011 | flags=flags) |
|
2016 | flags=flags) | |
2012 |
|
2017 | |||
2013 | def setflags(self, path, l, x): |
|
2018 | def setflags(self, path, l, x): | |
2014 | self._markdirty(path, exists=True, date=util.makedate(), |
|
2019 | self._markdirty(path, exists=True, date=util.makedate(), | |
2015 | flags=(l and 'l' or '') + (x and 'x' or '')) |
|
2020 | flags=(l and 'l' or '') + (x and 'x' or '')) | |
2016 |
|
2021 | |||
2017 | def remove(self, path): |
|
2022 | def remove(self, path): | |
2018 | self._markdirty(path, exists=False) |
|
2023 | self._markdirty(path, exists=False) | |
2019 |
|
2024 | |||
2020 | def exists(self, path): |
|
2025 | def exists(self, path): | |
2021 | """exists behaves like `lexists`, but needs to follow symlinks and |
|
2026 | """exists behaves like `lexists`, but needs to follow symlinks and | |
2022 | return False if they are broken. |
|
2027 | return False if they are broken. | |
2023 | """ |
|
2028 | """ | |
2024 | if self.isdirty(path): |
|
2029 | if self.isdirty(path): | |
2025 | # If this path exists and is a symlink, "follow" it by calling |
|
2030 | # If this path exists and is a symlink, "follow" it by calling | |
2026 | # exists on the destination path. |
|
2031 | # exists on the destination path. | |
2027 | if (self._cache[path]['exists'] and |
|
2032 | if (self._cache[path]['exists'] and | |
2028 | 'l' in self._cache[path]['flags']): |
|
2033 | 'l' in self._cache[path]['flags']): | |
2029 | return self.exists(self._cache[path]['data'].strip()) |
|
2034 | return self.exists(self._cache[path]['data'].strip()) | |
2030 | else: |
|
2035 | else: | |
2031 | return self._cache[path]['exists'] |
|
2036 | return self._cache[path]['exists'] | |
2032 | return self._wrappedctx[path].exists() |
|
2037 | return self._wrappedctx[path].exists() | |
2033 |
|
2038 | |||
2034 | def lexists(self, path): |
|
2039 | def lexists(self, path): | |
2035 | """lexists returns True if the path exists""" |
|
2040 | """lexists returns True if the path exists""" | |
2036 | if self.isdirty(path): |
|
2041 | if self.isdirty(path): | |
2037 | return self._cache[path]['exists'] |
|
2042 | return self._cache[path]['exists'] | |
2038 | return self._wrappedctx[path].lexists() |
|
2043 | return self._wrappedctx[path].lexists() | |
2039 |
|
2044 | |||
2040 | def size(self, path): |
|
2045 | def size(self, path): | |
2041 | if self.isdirty(path): |
|
2046 | if self.isdirty(path): | |
2042 | if self._cache[path]['exists']: |
|
2047 | if self._cache[path]['exists']: | |
2043 | return len(self._cache[path]['data']) |
|
2048 | return len(self._cache[path]['data']) | |
2044 | else: |
|
2049 | else: | |
2045 | raise error.ProgrammingError("No such file or directory: %s" % |
|
2050 | raise error.ProgrammingError("No such file or directory: %s" % | |
2046 | self._path) |
|
2051 | self._path) | |
2047 | return self._wrappedctx[path].size() |
|
2052 | return self._wrappedctx[path].size() | |
2048 |
|
2053 | |||
2049 | def flushall(self): |
|
2054 | def flushall(self): | |
2050 | for path in self._writeorder: |
|
2055 | for path in self._writeorder: | |
2051 | entry = self._cache[path] |
|
2056 | entry = self._cache[path] | |
2052 | if entry['exists']: |
|
2057 | if entry['exists']: | |
2053 | self._wrappedctx[path].clearunknown() |
|
2058 | self._wrappedctx[path].clearunknown() | |
2054 | if entry['data'] is not None: |
|
2059 | if entry['data'] is not None: | |
2055 | if entry['flags'] is None: |
|
2060 | if entry['flags'] is None: | |
2056 | raise error.ProgrammingError('data set but not flags') |
|
2061 | raise error.ProgrammingError('data set but not flags') | |
2057 | self._wrappedctx[path].write( |
|
2062 | self._wrappedctx[path].write( | |
2058 | entry['data'], |
|
2063 | entry['data'], | |
2059 | entry['flags']) |
|
2064 | entry['flags']) | |
2060 | else: |
|
2065 | else: | |
2061 | self._wrappedctx[path].setflags( |
|
2066 | self._wrappedctx[path].setflags( | |
2062 | 'l' in entry['flags'], |
|
2067 | 'l' in entry['flags'], | |
2063 | 'x' in entry['flags']) |
|
2068 | 'x' in entry['flags']) | |
2064 | else: |
|
2069 | else: | |
2065 | self._wrappedctx[path].remove(path) |
|
2070 | self._wrappedctx[path].remove(path) | |
2066 | self._clean() |
|
2071 | self._clean() | |
2067 |
|
2072 | |||
2068 | def isdirty(self, path): |
|
2073 | def isdirty(self, path): | |
2069 | return path in self._cache |
|
2074 | return path in self._cache | |
2070 |
|
2075 | |||
2071 | def _clean(self): |
|
2076 | def _clean(self): | |
2072 | self._cache = {} |
|
2077 | self._cache = {} | |
2073 | self._writeorder = [] |
|
2078 | self._writeorder = [] | |
2074 |
|
2079 | |||
2075 | def _markdirty(self, path, exists, data=None, date=None, flags=''): |
|
2080 | def _markdirty(self, path, exists, data=None, date=None, flags=''): | |
2076 | if path not in self._cache: |
|
2081 | if path not in self._cache: | |
2077 | self._writeorder.append(path) |
|
2082 | self._writeorder.append(path) | |
2078 |
|
2083 | |||
2079 | self._cache[path] = { |
|
2084 | self._cache[path] = { | |
2080 | 'exists': exists, |
|
2085 | 'exists': exists, | |
2081 | 'data': data, |
|
2086 | 'data': data, | |
2082 | 'date': date, |
|
2087 | 'date': date, | |
2083 | 'flags': flags, |
|
2088 | 'flags': flags, | |
2084 | } |
|
2089 | } | |
2085 |
|
2090 | |||
2086 | def filectx(self, path, filelog=None): |
|
2091 | def filectx(self, path, filelog=None): | |
2087 | return overlayworkingfilectx(self._repo, path, parent=self, |
|
2092 | return overlayworkingfilectx(self._repo, path, parent=self, | |
2088 | filelog=filelog) |
|
2093 | filelog=filelog) | |
2089 |
|
2094 | |||
2090 | class overlayworkingfilectx(workingfilectx): |
|
2095 | class overlayworkingfilectx(workingfilectx): | |
2091 | """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory |
|
2096 | """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory | |
2092 | cache, which can be flushed through later by calling ``flush()``.""" |
|
2097 | cache, which can be flushed through later by calling ``flush()``.""" | |
2093 |
|
2098 | |||
2094 | def __init__(self, repo, path, filelog=None, parent=None): |
|
2099 | def __init__(self, repo, path, filelog=None, parent=None): | |
2095 | super(overlayworkingfilectx, self).__init__(repo, path, filelog, |
|
2100 | super(overlayworkingfilectx, self).__init__(repo, path, filelog, | |
2096 | parent) |
|
2101 | parent) | |
2097 | self._repo = repo |
|
2102 | self._repo = repo | |
2098 | self._parent = parent |
|
2103 | self._parent = parent | |
2099 | self._path = path |
|
2104 | self._path = path | |
2100 |
|
2105 | |||
2101 | def cmp(self, fctx): |
|
2106 | def cmp(self, fctx): | |
2102 | return self.data() != fctx.data() |
|
2107 | return self.data() != fctx.data() | |
2103 |
|
2108 | |||
2104 | def ctx(self): |
|
2109 | def ctx(self): | |
2105 | return self._parent |
|
2110 | return self._parent | |
2106 |
|
2111 | |||
2107 | def data(self): |
|
2112 | def data(self): | |
2108 | return self._parent.data(self._path) |
|
2113 | return self._parent.data(self._path) | |
2109 |
|
2114 | |||
2110 | def date(self): |
|
2115 | def date(self): | |
2111 | return self._parent.filedate(self._path) |
|
2116 | return self._parent.filedate(self._path) | |
2112 |
|
2117 | |||
2113 | def exists(self): |
|
2118 | def exists(self): | |
2114 | return self.lexists() |
|
2119 | return self.lexists() | |
2115 |
|
2120 | |||
2116 | def lexists(self): |
|
2121 | def lexists(self): | |
2117 | return self._parent.exists(self._path) |
|
2122 | return self._parent.exists(self._path) | |
2118 |
|
2123 | |||
2119 | def renamed(self): |
|
2124 | def renamed(self): | |
2120 | # Copies are currently tracked in the dirstate as before. Straight copy |
|
2125 | # Copies are currently tracked in the dirstate as before. Straight copy | |
2121 | # from workingfilectx. |
|
2126 | # from workingfilectx. | |
2122 | rp = self._repo.dirstate.copied(self._path) |
|
2127 | rp = self._repo.dirstate.copied(self._path) | |
2123 | if not rp: |
|
2128 | if not rp: | |
2124 | return None |
|
2129 | return None | |
2125 | return rp, self._changectx._parents[0]._manifest.get(rp, nullid) |
|
2130 | return rp, self._changectx._parents[0]._manifest.get(rp, nullid) | |
2126 |
|
2131 | |||
2127 | def size(self): |
|
2132 | def size(self): | |
2128 | return self._parent.size(self._path) |
|
2133 | return self._parent.size(self._path) | |
2129 |
|
2134 | |||
2130 | def audit(self): |
|
2135 | def audit(self): | |
2131 | pass |
|
2136 | pass | |
2132 |
|
2137 | |||
2133 | def flags(self): |
|
2138 | def flags(self): | |
2134 | return self._parent.flags(self._path) |
|
2139 | return self._parent.flags(self._path) | |
2135 |
|
2140 | |||
2136 | def setflags(self, islink, isexec): |
|
2141 | def setflags(self, islink, isexec): | |
2137 | return self._parent.setflags(self._path, islink, isexec) |
|
2142 | return self._parent.setflags(self._path, islink, isexec) | |
2138 |
|
2143 | |||
2139 | def write(self, data, flags, backgroundclose=False): |
|
2144 | def write(self, data, flags, backgroundclose=False): | |
2140 | return self._parent.write(self._path, data, flags) |
|
2145 | return self._parent.write(self._path, data, flags) | |
2141 |
|
2146 | |||
2142 | def remove(self, ignoremissing=False): |
|
2147 | def remove(self, ignoremissing=False): | |
2143 | return self._parent.remove(self._path) |
|
2148 | return self._parent.remove(self._path) | |
2144 |
|
2149 | |||
2145 | class workingcommitctx(workingctx): |
|
2150 | class workingcommitctx(workingctx): | |
2146 | """A workingcommitctx object makes access to data related to |
|
2151 | """A workingcommitctx object makes access to data related to | |
2147 | the revision being committed convenient. |
|
2152 | the revision being committed convenient. | |
2148 |
|
2153 | |||
2149 | This hides changes in the working directory, if they aren't |
|
2154 | This hides changes in the working directory, if they aren't | |
2150 | committed in this context. |
|
2155 | committed in this context. | |
2151 | """ |
|
2156 | """ | |
2152 | def __init__(self, repo, changes, |
|
2157 | def __init__(self, repo, changes, | |
2153 | text="", user=None, date=None, extra=None): |
|
2158 | text="", user=None, date=None, extra=None): | |
2154 | super(workingctx, self).__init__(repo, text, user, date, extra, |
|
2159 | super(workingctx, self).__init__(repo, text, user, date, extra, | |
2155 | changes) |
|
2160 | changes) | |
2156 |
|
2161 | |||
2157 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
|
2162 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): | |
2158 | """Return matched files only in ``self._status`` |
|
2163 | """Return matched files only in ``self._status`` | |
2159 |
|
2164 | |||
2160 | Uncommitted files appear "clean" via this context, even if |
|
2165 | Uncommitted files appear "clean" via this context, even if | |
2161 | they aren't actually so in the working directory. |
|
2166 | they aren't actually so in the working directory. | |
2162 | """ |
|
2167 | """ | |
2163 | if clean: |
|
2168 | if clean: | |
2164 | clean = [f for f in self._manifest if f not in self._changedset] |
|
2169 | clean = [f for f in self._manifest if f not in self._changedset] | |
2165 | else: |
|
2170 | else: | |
2166 | clean = [] |
|
2171 | clean = [] | |
2167 | return scmutil.status([f for f in self._status.modified if match(f)], |
|
2172 | return scmutil.status([f for f in self._status.modified if match(f)], | |
2168 | [f for f in self._status.added if match(f)], |
|
2173 | [f for f in self._status.added if match(f)], | |
2169 | [f for f in self._status.removed if match(f)], |
|
2174 | [f for f in self._status.removed if match(f)], | |
2170 | [], [], [], clean) |
|
2175 | [], [], [], clean) | |
2171 |
|
2176 | |||
2172 | @propertycache |
|
2177 | @propertycache | |
2173 | def _changedset(self): |
|
2178 | def _changedset(self): | |
2174 | """Return the set of files changed in this context |
|
2179 | """Return the set of files changed in this context | |
2175 | """ |
|
2180 | """ | |
2176 | changed = set(self._status.modified) |
|
2181 | changed = set(self._status.modified) | |
2177 | changed.update(self._status.added) |
|
2182 | changed.update(self._status.added) | |
2178 | changed.update(self._status.removed) |
|
2183 | changed.update(self._status.removed) | |
2179 | return changed |
|
2184 | return changed | |
2180 |
|
2185 | |||
2181 | def makecachingfilectxfn(func): |
|
2186 | def makecachingfilectxfn(func): | |
2182 | """Create a filectxfn that caches based on the path. |
|
2187 | """Create a filectxfn that caches based on the path. | |
2183 |
|
2188 | |||
2184 | We can't use util.cachefunc because it uses all arguments as the cache |
|
2189 | We can't use util.cachefunc because it uses all arguments as the cache | |
2185 | key and this creates a cycle since the arguments include the repo and |
|
2190 | key and this creates a cycle since the arguments include the repo and | |
2186 | memctx. |
|
2191 | memctx. | |
2187 | """ |
|
2192 | """ | |
2188 | cache = {} |
|
2193 | cache = {} | |
2189 |
|
2194 | |||
2190 | def getfilectx(repo, memctx, path): |
|
2195 | def getfilectx(repo, memctx, path): | |
2191 | if path not in cache: |
|
2196 | if path not in cache: | |
2192 | cache[path] = func(repo, memctx, path) |
|
2197 | cache[path] = func(repo, memctx, path) | |
2193 | return cache[path] |
|
2198 | return cache[path] | |
2194 |
|
2199 | |||
2195 | return getfilectx |
|
2200 | return getfilectx | |
2196 |
|
2201 | |||
2197 | def memfilefromctx(ctx): |
|
2202 | def memfilefromctx(ctx): | |
2198 | """Given a context return a memfilectx for ctx[path] |
|
2203 | """Given a context return a memfilectx for ctx[path] | |
2199 |
|
2204 | |||
2200 | This is a convenience method for building a memctx based on another |
|
2205 | This is a convenience method for building a memctx based on another | |
2201 | context. |
|
2206 | context. | |
2202 | """ |
|
2207 | """ | |
2203 | def getfilectx(repo, memctx, path): |
|
2208 | def getfilectx(repo, memctx, path): | |
2204 | fctx = ctx[path] |
|
2209 | fctx = ctx[path] | |
2205 | # this is weird but apparently we only keep track of one parent |
|
2210 | # this is weird but apparently we only keep track of one parent | |
2206 | # (why not only store that instead of a tuple?) |
|
2211 | # (why not only store that instead of a tuple?) | |
2207 | copied = fctx.renamed() |
|
2212 | copied = fctx.renamed() | |
2208 | if copied: |
|
2213 | if copied: | |
2209 | copied = copied[0] |
|
2214 | copied = copied[0] | |
2210 | return memfilectx(repo, path, fctx.data(), |
|
2215 | return memfilectx(repo, path, fctx.data(), | |
2211 | islink=fctx.islink(), isexec=fctx.isexec(), |
|
2216 | islink=fctx.islink(), isexec=fctx.isexec(), | |
2212 | copied=copied, memctx=memctx) |
|
2217 | copied=copied, memctx=memctx) | |
2213 |
|
2218 | |||
2214 | return getfilectx |
|
2219 | return getfilectx | |
2215 |
|
2220 | |||
2216 | def memfilefrompatch(patchstore): |
|
2221 | def memfilefrompatch(patchstore): | |
2217 | """Given a patch (e.g. patchstore object) return a memfilectx |
|
2222 | """Given a patch (e.g. patchstore object) return a memfilectx | |
2218 |
|
2223 | |||
2219 | This is a convenience method for building a memctx based on a patchstore. |
|
2224 | This is a convenience method for building a memctx based on a patchstore. | |
2220 | """ |
|
2225 | """ | |
2221 | def getfilectx(repo, memctx, path): |
|
2226 | def getfilectx(repo, memctx, path): | |
2222 | data, mode, copied = patchstore.getfile(path) |
|
2227 | data, mode, copied = patchstore.getfile(path) | |
2223 | if data is None: |
|
2228 | if data is None: | |
2224 | return None |
|
2229 | return None | |
2225 | islink, isexec = mode |
|
2230 | islink, isexec = mode | |
2226 | return memfilectx(repo, path, data, islink=islink, |
|
2231 | return memfilectx(repo, path, data, islink=islink, | |
2227 | isexec=isexec, copied=copied, |
|
2232 | isexec=isexec, copied=copied, | |
2228 | memctx=memctx) |
|
2233 | memctx=memctx) | |
2229 |
|
2234 | |||
2230 | return getfilectx |
|
2235 | return getfilectx | |
2231 |
|
2236 | |||
2232 | class memctx(committablectx): |
|
2237 | class memctx(committablectx): | |
2233 | """Use memctx to perform in-memory commits via localrepo.commitctx(). |
|
2238 | """Use memctx to perform in-memory commits via localrepo.commitctx(). | |
2234 |
|
2239 | |||
2235 | Revision information is supplied at initialization time while |
|
2240 | Revision information is supplied at initialization time while | |
2236 | related files data and is made available through a callback |
|
2241 | related files data and is made available through a callback | |
2237 | mechanism. 'repo' is the current localrepo, 'parents' is a |
|
2242 | mechanism. 'repo' is the current localrepo, 'parents' is a | |
2238 | sequence of two parent revisions identifiers (pass None for every |
|
2243 | sequence of two parent revisions identifiers (pass None for every | |
2239 | missing parent), 'text' is the commit message and 'files' lists |
|
2244 | missing parent), 'text' is the commit message and 'files' lists | |
2240 | names of files touched by the revision (normalized and relative to |
|
2245 | names of files touched by the revision (normalized and relative to | |
2241 | repository root). |
|
2246 | repository root). | |
2242 |
|
2247 | |||
2243 | filectxfn(repo, memctx, path) is a callable receiving the |
|
2248 | filectxfn(repo, memctx, path) is a callable receiving the | |
2244 | repository, the current memctx object and the normalized path of |
|
2249 | repository, the current memctx object and the normalized path of | |
2245 | requested file, relative to repository root. It is fired by the |
|
2250 | requested file, relative to repository root. It is fired by the | |
2246 | commit function for every file in 'files', but calls order is |
|
2251 | commit function for every file in 'files', but calls order is | |
2247 | undefined. If the file is available in the revision being |
|
2252 | undefined. If the file is available in the revision being | |
2248 | committed (updated or added), filectxfn returns a memfilectx |
|
2253 | committed (updated or added), filectxfn returns a memfilectx | |
2249 | object. If the file was removed, filectxfn return None for recent |
|
2254 | object. If the file was removed, filectxfn return None for recent | |
2250 | Mercurial. Moved files are represented by marking the source file |
|
2255 | Mercurial. Moved files are represented by marking the source file | |
2251 | removed and the new file added with copy information (see |
|
2256 | removed and the new file added with copy information (see | |
2252 | memfilectx). |
|
2257 | memfilectx). | |
2253 |
|
2258 | |||
2254 | user receives the committer name and defaults to current |
|
2259 | user receives the committer name and defaults to current | |
2255 | repository username, date is the commit date in any format |
|
2260 | repository username, date is the commit date in any format | |
2256 | supported by util.parsedate() and defaults to current date, extra |
|
2261 | supported by util.parsedate() and defaults to current date, extra | |
2257 | is a dictionary of metadata or is left empty. |
|
2262 | is a dictionary of metadata or is left empty. | |
2258 | """ |
|
2263 | """ | |
2259 |
|
2264 | |||
2260 | # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files. |
|
2265 | # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files. | |
2261 | # Extensions that need to retain compatibility across Mercurial 3.1 can use |
|
2266 | # Extensions that need to retain compatibility across Mercurial 3.1 can use | |
2262 | # this field to determine what to do in filectxfn. |
|
2267 | # this field to determine what to do in filectxfn. | |
2263 | _returnnoneformissingfiles = True |
|
2268 | _returnnoneformissingfiles = True | |
2264 |
|
2269 | |||
2265 | def __init__(self, repo, parents, text, files, filectxfn, user=None, |
|
2270 | def __init__(self, repo, parents, text, files, filectxfn, user=None, | |
2266 | date=None, extra=None, branch=None, editor=False): |
|
2271 | date=None, extra=None, branch=None, editor=False): | |
2267 | super(memctx, self).__init__(repo, text, user, date, extra) |
|
2272 | super(memctx, self).__init__(repo, text, user, date, extra) | |
2268 | self._rev = None |
|
2273 | self._rev = None | |
2269 | self._node = None |
|
2274 | self._node = None | |
2270 | parents = [(p or nullid) for p in parents] |
|
2275 | parents = [(p or nullid) for p in parents] | |
2271 | p1, p2 = parents |
|
2276 | p1, p2 = parents | |
2272 | self._parents = [changectx(self._repo, p) for p in (p1, p2)] |
|
2277 | self._parents = [changectx(self._repo, p) for p in (p1, p2)] | |
2273 | files = sorted(set(files)) |
|
2278 | files = sorted(set(files)) | |
2274 | self._files = files |
|
2279 | self._files = files | |
2275 | if branch is not None: |
|
2280 | if branch is not None: | |
2276 | self._extra['branch'] = encoding.fromlocal(branch) |
|
2281 | self._extra['branch'] = encoding.fromlocal(branch) | |
2277 | self.substate = {} |
|
2282 | self.substate = {} | |
2278 |
|
2283 | |||
2279 | if isinstance(filectxfn, patch.filestore): |
|
2284 | if isinstance(filectxfn, patch.filestore): | |
2280 | filectxfn = memfilefrompatch(filectxfn) |
|
2285 | filectxfn = memfilefrompatch(filectxfn) | |
2281 | elif not callable(filectxfn): |
|
2286 | elif not callable(filectxfn): | |
2282 | # if store is not callable, wrap it in a function |
|
2287 | # if store is not callable, wrap it in a function | |
2283 | filectxfn = memfilefromctx(filectxfn) |
|
2288 | filectxfn = memfilefromctx(filectxfn) | |
2284 |
|
2289 | |||
2285 | # memoizing increases performance for e.g. vcs convert scenarios. |
|
2290 | # memoizing increases performance for e.g. vcs convert scenarios. | |
2286 | self._filectxfn = makecachingfilectxfn(filectxfn) |
|
2291 | self._filectxfn = makecachingfilectxfn(filectxfn) | |
2287 |
|
2292 | |||
2288 | if editor: |
|
2293 | if editor: | |
2289 | self._text = editor(self._repo, self, []) |
|
2294 | self._text = editor(self._repo, self, []) | |
2290 | self._repo.savecommitmessage(self._text) |
|
2295 | self._repo.savecommitmessage(self._text) | |
2291 |
|
2296 | |||
2292 | def filectx(self, path, filelog=None): |
|
2297 | def filectx(self, path, filelog=None): | |
2293 | """get a file context from the working directory |
|
2298 | """get a file context from the working directory | |
2294 |
|
2299 | |||
2295 | Returns None if file doesn't exist and should be removed.""" |
|
2300 | Returns None if file doesn't exist and should be removed.""" | |
2296 | return self._filectxfn(self._repo, self, path) |
|
2301 | return self._filectxfn(self._repo, self, path) | |
2297 |
|
2302 | |||
2298 | def commit(self): |
|
2303 | def commit(self): | |
2299 | """commit context to the repo""" |
|
2304 | """commit context to the repo""" | |
2300 | return self._repo.commitctx(self) |
|
2305 | return self._repo.commitctx(self) | |
2301 |
|
2306 | |||
2302 | @propertycache |
|
2307 | @propertycache | |
2303 | def _manifest(self): |
|
2308 | def _manifest(self): | |
2304 | """generate a manifest based on the return values of filectxfn""" |
|
2309 | """generate a manifest based on the return values of filectxfn""" | |
2305 |
|
2310 | |||
2306 | # keep this simple for now; just worry about p1 |
|
2311 | # keep this simple for now; just worry about p1 | |
2307 | pctx = self._parents[0] |
|
2312 | pctx = self._parents[0] | |
2308 | man = pctx.manifest().copy() |
|
2313 | man = pctx.manifest().copy() | |
2309 |
|
2314 | |||
2310 | for f in self._status.modified: |
|
2315 | for f in self._status.modified: | |
2311 | p1node = nullid |
|
2316 | p1node = nullid | |
2312 | p2node = nullid |
|
2317 | p2node = nullid | |
2313 | p = pctx[f].parents() # if file isn't in pctx, check p2? |
|
2318 | p = pctx[f].parents() # if file isn't in pctx, check p2? | |
2314 | if len(p) > 0: |
|
2319 | if len(p) > 0: | |
2315 | p1node = p[0].filenode() |
|
2320 | p1node = p[0].filenode() | |
2316 | if len(p) > 1: |
|
2321 | if len(p) > 1: | |
2317 | p2node = p[1].filenode() |
|
2322 | p2node = p[1].filenode() | |
2318 | man[f] = revlog.hash(self[f].data(), p1node, p2node) |
|
2323 | man[f] = revlog.hash(self[f].data(), p1node, p2node) | |
2319 |
|
2324 | |||
2320 | for f in self._status.added: |
|
2325 | for f in self._status.added: | |
2321 | man[f] = revlog.hash(self[f].data(), nullid, nullid) |
|
2326 | man[f] = revlog.hash(self[f].data(), nullid, nullid) | |
2322 |
|
2327 | |||
2323 | for f in self._status.removed: |
|
2328 | for f in self._status.removed: | |
2324 | if f in man: |
|
2329 | if f in man: | |
2325 | del man[f] |
|
2330 | del man[f] | |
2326 |
|
2331 | |||
2327 | return man |
|
2332 | return man | |
2328 |
|
2333 | |||
2329 | @propertycache |
|
2334 | @propertycache | |
2330 | def _status(self): |
|
2335 | def _status(self): | |
2331 | """Calculate exact status from ``files`` specified at construction |
|
2336 | """Calculate exact status from ``files`` specified at construction | |
2332 | """ |
|
2337 | """ | |
2333 | man1 = self.p1().manifest() |
|
2338 | man1 = self.p1().manifest() | |
2334 | p2 = self._parents[1] |
|
2339 | p2 = self._parents[1] | |
2335 | # "1 < len(self._parents)" can't be used for checking |
|
2340 | # "1 < len(self._parents)" can't be used for checking | |
2336 | # existence of the 2nd parent, because "memctx._parents" is |
|
2341 | # existence of the 2nd parent, because "memctx._parents" is | |
2337 | # explicitly initialized by the list, of which length is 2. |
|
2342 | # explicitly initialized by the list, of which length is 2. | |
2338 | if p2.node() != nullid: |
|
2343 | if p2.node() != nullid: | |
2339 | man2 = p2.manifest() |
|
2344 | man2 = p2.manifest() | |
2340 | managing = lambda f: f in man1 or f in man2 |
|
2345 | managing = lambda f: f in man1 or f in man2 | |
2341 | else: |
|
2346 | else: | |
2342 | managing = lambda f: f in man1 |
|
2347 | managing = lambda f: f in man1 | |
2343 |
|
2348 | |||
2344 | modified, added, removed = [], [], [] |
|
2349 | modified, added, removed = [], [], [] | |
2345 | for f in self._files: |
|
2350 | for f in self._files: | |
2346 | if not managing(f): |
|
2351 | if not managing(f): | |
2347 | added.append(f) |
|
2352 | added.append(f) | |
2348 | elif self[f]: |
|
2353 | elif self[f]: | |
2349 | modified.append(f) |
|
2354 | modified.append(f) | |
2350 | else: |
|
2355 | else: | |
2351 | removed.append(f) |
|
2356 | removed.append(f) | |
2352 |
|
2357 | |||
2353 | return scmutil.status(modified, added, removed, [], [], [], []) |
|
2358 | return scmutil.status(modified, added, removed, [], [], [], []) | |
2354 |
|
2359 | |||
2355 | class memfilectx(committablefilectx): |
|
2360 | class memfilectx(committablefilectx): | |
2356 | """memfilectx represents an in-memory file to commit. |
|
2361 | """memfilectx represents an in-memory file to commit. | |
2357 |
|
2362 | |||
2358 | See memctx and committablefilectx for more details. |
|
2363 | See memctx and committablefilectx for more details. | |
2359 | """ |
|
2364 | """ | |
2360 | def __init__(self, repo, path, data, islink=False, |
|
2365 | def __init__(self, repo, path, data, islink=False, | |
2361 | isexec=False, copied=None, memctx=None): |
|
2366 | isexec=False, copied=None, memctx=None): | |
2362 | """ |
|
2367 | """ | |
2363 | path is the normalized file path relative to repository root. |
|
2368 | path is the normalized file path relative to repository root. | |
2364 | data is the file content as a string. |
|
2369 | data is the file content as a string. | |
2365 | islink is True if the file is a symbolic link. |
|
2370 | islink is True if the file is a symbolic link. | |
2366 | isexec is True if the file is executable. |
|
2371 | isexec is True if the file is executable. | |
2367 | copied is the source file path if current file was copied in the |
|
2372 | copied is the source file path if current file was copied in the | |
2368 | revision being committed, or None.""" |
|
2373 | revision being committed, or None.""" | |
2369 | super(memfilectx, self).__init__(repo, path, None, memctx) |
|
2374 | super(memfilectx, self).__init__(repo, path, None, memctx) | |
2370 | self._data = data |
|
2375 | self._data = data | |
2371 | self._flags = (islink and 'l' or '') + (isexec and 'x' or '') |
|
2376 | self._flags = (islink and 'l' or '') + (isexec and 'x' or '') | |
2372 | self._copied = None |
|
2377 | self._copied = None | |
2373 | if copied: |
|
2378 | if copied: | |
2374 | self._copied = (copied, nullid) |
|
2379 | self._copied = (copied, nullid) | |
2375 |
|
2380 | |||
2376 | def data(self): |
|
2381 | def data(self): | |
2377 | return self._data |
|
2382 | return self._data | |
2378 |
|
2383 | |||
2379 | def remove(self, ignoremissing=False): |
|
2384 | def remove(self, ignoremissing=False): | |
2380 | """wraps unlink for a repo's working directory""" |
|
2385 | """wraps unlink for a repo's working directory""" | |
2381 | # need to figure out what to do here |
|
2386 | # need to figure out what to do here | |
2382 | del self._changectx[self._path] |
|
2387 | del self._changectx[self._path] | |
2383 |
|
2388 | |||
2384 | def write(self, data, flags): |
|
2389 | def write(self, data, flags): | |
2385 | """wraps repo.wwrite""" |
|
2390 | """wraps repo.wwrite""" | |
2386 | self._data = data |
|
2391 | self._data = data | |
2387 |
|
2392 | |||
2388 | class overlayfilectx(committablefilectx): |
|
2393 | class overlayfilectx(committablefilectx): | |
2389 | """Like memfilectx but take an original filectx and optional parameters to |
|
2394 | """Like memfilectx but take an original filectx and optional parameters to | |
2390 | override parts of it. This is useful when fctx.data() is expensive (i.e. |
|
2395 | override parts of it. This is useful when fctx.data() is expensive (i.e. | |
2391 | flag processor is expensive) and raw data, flags, and filenode could be |
|
2396 | flag processor is expensive) and raw data, flags, and filenode could be | |
2392 | reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file). |
|
2397 | reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file). | |
2393 | """ |
|
2398 | """ | |
2394 |
|
2399 | |||
2395 | def __init__(self, originalfctx, datafunc=None, path=None, flags=None, |
|
2400 | def __init__(self, originalfctx, datafunc=None, path=None, flags=None, | |
2396 | copied=None, ctx=None): |
|
2401 | copied=None, ctx=None): | |
2397 | """originalfctx: filecontext to duplicate |
|
2402 | """originalfctx: filecontext to duplicate | |
2398 |
|
2403 | |||
2399 | datafunc: None or a function to override data (file content). It is a |
|
2404 | datafunc: None or a function to override data (file content). It is a | |
2400 | function to be lazy. path, flags, copied, ctx: None or overridden value |
|
2405 | function to be lazy. path, flags, copied, ctx: None or overridden value | |
2401 |
|
2406 | |||
2402 | copied could be (path, rev), or False. copied could also be just path, |
|
2407 | copied could be (path, rev), or False. copied could also be just path, | |
2403 | and will be converted to (path, nullid). This simplifies some callers. |
|
2408 | and will be converted to (path, nullid). This simplifies some callers. | |
2404 | """ |
|
2409 | """ | |
2405 |
|
2410 | |||
2406 | if path is None: |
|
2411 | if path is None: | |
2407 | path = originalfctx.path() |
|
2412 | path = originalfctx.path() | |
2408 | if ctx is None: |
|
2413 | if ctx is None: | |
2409 | ctx = originalfctx.changectx() |
|
2414 | ctx = originalfctx.changectx() | |
2410 | ctxmatch = lambda: True |
|
2415 | ctxmatch = lambda: True | |
2411 | else: |
|
2416 | else: | |
2412 | ctxmatch = lambda: ctx == originalfctx.changectx() |
|
2417 | ctxmatch = lambda: ctx == originalfctx.changectx() | |
2413 |
|
2418 | |||
2414 | repo = originalfctx.repo() |
|
2419 | repo = originalfctx.repo() | |
2415 | flog = originalfctx.filelog() |
|
2420 | flog = originalfctx.filelog() | |
2416 | super(overlayfilectx, self).__init__(repo, path, flog, ctx) |
|
2421 | super(overlayfilectx, self).__init__(repo, path, flog, ctx) | |
2417 |
|
2422 | |||
2418 | if copied is None: |
|
2423 | if copied is None: | |
2419 | copied = originalfctx.renamed() |
|
2424 | copied = originalfctx.renamed() | |
2420 | copiedmatch = lambda: True |
|
2425 | copiedmatch = lambda: True | |
2421 | else: |
|
2426 | else: | |
2422 | if copied and not isinstance(copied, tuple): |
|
2427 | if copied and not isinstance(copied, tuple): | |
2423 | # repo._filecommit will recalculate copyrev so nullid is okay |
|
2428 | # repo._filecommit will recalculate copyrev so nullid is okay | |
2424 | copied = (copied, nullid) |
|
2429 | copied = (copied, nullid) | |
2425 | copiedmatch = lambda: copied == originalfctx.renamed() |
|
2430 | copiedmatch = lambda: copied == originalfctx.renamed() | |
2426 |
|
2431 | |||
2427 | # When data, copied (could affect data), ctx (could affect filelog |
|
2432 | # When data, copied (could affect data), ctx (could affect filelog | |
2428 | # parents) are not overridden, rawdata, rawflags, and filenode may be |
|
2433 | # parents) are not overridden, rawdata, rawflags, and filenode may be | |
2429 | # reused (repo._filecommit should double check filelog parents). |
|
2434 | # reused (repo._filecommit should double check filelog parents). | |
2430 | # |
|
2435 | # | |
2431 | # path, flags are not hashed in filelog (but in manifestlog) so they do |
|
2436 | # path, flags are not hashed in filelog (but in manifestlog) so they do | |
2432 | # not affect reusable here. |
|
2437 | # not affect reusable here. | |
2433 | # |
|
2438 | # | |
2434 | # If ctx or copied is overridden to a same value with originalfctx, |
|
2439 | # If ctx or copied is overridden to a same value with originalfctx, | |
2435 | # still consider it's reusable. originalfctx.renamed() may be a bit |
|
2440 | # still consider it's reusable. originalfctx.renamed() may be a bit | |
2436 | # expensive so it's not called unless necessary. Assuming datafunc is |
|
2441 | # expensive so it's not called unless necessary. Assuming datafunc is | |
2437 | # always expensive, do not call it for this "reusable" test. |
|
2442 | # always expensive, do not call it for this "reusable" test. | |
2438 | reusable = datafunc is None and ctxmatch() and copiedmatch() |
|
2443 | reusable = datafunc is None and ctxmatch() and copiedmatch() | |
2439 |
|
2444 | |||
2440 | if datafunc is None: |
|
2445 | if datafunc is None: | |
2441 | datafunc = originalfctx.data |
|
2446 | datafunc = originalfctx.data | |
2442 | if flags is None: |
|
2447 | if flags is None: | |
2443 | flags = originalfctx.flags() |
|
2448 | flags = originalfctx.flags() | |
2444 |
|
2449 | |||
2445 | self._datafunc = datafunc |
|
2450 | self._datafunc = datafunc | |
2446 | self._flags = flags |
|
2451 | self._flags = flags | |
2447 | self._copied = copied |
|
2452 | self._copied = copied | |
2448 |
|
2453 | |||
2449 | if reusable: |
|
2454 | if reusable: | |
2450 | # copy extra fields from originalfctx |
|
2455 | # copy extra fields from originalfctx | |
2451 | attrs = ['rawdata', 'rawflags', '_filenode', '_filerev'] |
|
2456 | attrs = ['rawdata', 'rawflags', '_filenode', '_filerev'] | |
2452 | for attr_ in attrs: |
|
2457 | for attr_ in attrs: | |
2453 | if util.safehasattr(originalfctx, attr_): |
|
2458 | if util.safehasattr(originalfctx, attr_): | |
2454 | setattr(self, attr_, getattr(originalfctx, attr_)) |
|
2459 | setattr(self, attr_, getattr(originalfctx, attr_)) | |
2455 |
|
2460 | |||
2456 | def data(self): |
|
2461 | def data(self): | |
2457 | return self._datafunc() |
|
2462 | return self._datafunc() | |
2458 |
|
2463 | |||
2459 | class metadataonlyctx(committablectx): |
|
2464 | class metadataonlyctx(committablectx): | |
2460 | """Like memctx but it's reusing the manifest of different commit. |
|
2465 | """Like memctx but it's reusing the manifest of different commit. | |
2461 | Intended to be used by lightweight operations that are creating |
|
2466 | Intended to be used by lightweight operations that are creating | |
2462 | metadata-only changes. |
|
2467 | metadata-only changes. | |
2463 |
|
2468 | |||
2464 | Revision information is supplied at initialization time. 'repo' is the |
|
2469 | Revision information is supplied at initialization time. 'repo' is the | |
2465 | current localrepo, 'ctx' is original revision which manifest we're reuisng |
|
2470 | current localrepo, 'ctx' is original revision which manifest we're reuisng | |
2466 | 'parents' is a sequence of two parent revisions identifiers (pass None for |
|
2471 | 'parents' is a sequence of two parent revisions identifiers (pass None for | |
2467 | every missing parent), 'text' is the commit. |
|
2472 | every missing parent), 'text' is the commit. | |
2468 |
|
2473 | |||
2469 | user receives the committer name and defaults to current repository |
|
2474 | user receives the committer name and defaults to current repository | |
2470 | username, date is the commit date in any format supported by |
|
2475 | username, date is the commit date in any format supported by | |
2471 | util.parsedate() and defaults to current date, extra is a dictionary of |
|
2476 | util.parsedate() and defaults to current date, extra is a dictionary of | |
2472 | metadata or is left empty. |
|
2477 | metadata or is left empty. | |
2473 | """ |
|
2478 | """ | |
2474 | def __new__(cls, repo, originalctx, *args, **kwargs): |
|
2479 | def __new__(cls, repo, originalctx, *args, **kwargs): | |
2475 | return super(metadataonlyctx, cls).__new__(cls, repo) |
|
2480 | return super(metadataonlyctx, cls).__new__(cls, repo) | |
2476 |
|
2481 | |||
2477 | def __init__(self, repo, originalctx, parents=None, text=None, user=None, |
|
2482 | def __init__(self, repo, originalctx, parents=None, text=None, user=None, | |
2478 | date=None, extra=None, editor=False): |
|
2483 | date=None, extra=None, editor=False): | |
2479 | if text is None: |
|
2484 | if text is None: | |
2480 | text = originalctx.description() |
|
2485 | text = originalctx.description() | |
2481 | super(metadataonlyctx, self).__init__(repo, text, user, date, extra) |
|
2486 | super(metadataonlyctx, self).__init__(repo, text, user, date, extra) | |
2482 | self._rev = None |
|
2487 | self._rev = None | |
2483 | self._node = None |
|
2488 | self._node = None | |
2484 | self._originalctx = originalctx |
|
2489 | self._originalctx = originalctx | |
2485 | self._manifestnode = originalctx.manifestnode() |
|
2490 | self._manifestnode = originalctx.manifestnode() | |
2486 | if parents is None: |
|
2491 | if parents is None: | |
2487 | parents = originalctx.parents() |
|
2492 | parents = originalctx.parents() | |
2488 | else: |
|
2493 | else: | |
2489 | parents = [repo[p] for p in parents if p is not None] |
|
2494 | parents = [repo[p] for p in parents if p is not None] | |
2490 | parents = parents[:] |
|
2495 | parents = parents[:] | |
2491 | while len(parents) < 2: |
|
2496 | while len(parents) < 2: | |
2492 | parents.append(repo[nullid]) |
|
2497 | parents.append(repo[nullid]) | |
2493 | p1, p2 = self._parents = parents |
|
2498 | p1, p2 = self._parents = parents | |
2494 |
|
2499 | |||
2495 | # sanity check to ensure that the reused manifest parents are |
|
2500 | # sanity check to ensure that the reused manifest parents are | |
2496 | # manifests of our commit parents |
|
2501 | # manifests of our commit parents | |
2497 | mp1, mp2 = self.manifestctx().parents |
|
2502 | mp1, mp2 = self.manifestctx().parents | |
2498 | if p1 != nullid and p1.manifestnode() != mp1: |
|
2503 | if p1 != nullid and p1.manifestnode() != mp1: | |
2499 | raise RuntimeError('can\'t reuse the manifest: ' |
|
2504 | raise RuntimeError('can\'t reuse the manifest: ' | |
2500 | 'its p1 doesn\'t match the new ctx p1') |
|
2505 | 'its p1 doesn\'t match the new ctx p1') | |
2501 | if p2 != nullid and p2.manifestnode() != mp2: |
|
2506 | if p2 != nullid and p2.manifestnode() != mp2: | |
2502 | raise RuntimeError('can\'t reuse the manifest: ' |
|
2507 | raise RuntimeError('can\'t reuse the manifest: ' | |
2503 | 'its p2 doesn\'t match the new ctx p2') |
|
2508 | 'its p2 doesn\'t match the new ctx p2') | |
2504 |
|
2509 | |||
2505 | self._files = originalctx.files() |
|
2510 | self._files = originalctx.files() | |
2506 | self.substate = {} |
|
2511 | self.substate = {} | |
2507 |
|
2512 | |||
2508 | if editor: |
|
2513 | if editor: | |
2509 | self._text = editor(self._repo, self, []) |
|
2514 | self._text = editor(self._repo, self, []) | |
2510 | self._repo.savecommitmessage(self._text) |
|
2515 | self._repo.savecommitmessage(self._text) | |
2511 |
|
2516 | |||
2512 | def manifestnode(self): |
|
2517 | def manifestnode(self): | |
2513 | return self._manifestnode |
|
2518 | return self._manifestnode | |
2514 |
|
2519 | |||
2515 | @property |
|
2520 | @property | |
2516 | def _manifestctx(self): |
|
2521 | def _manifestctx(self): | |
2517 | return self._repo.manifestlog[self._manifestnode] |
|
2522 | return self._repo.manifestlog[self._manifestnode] | |
2518 |
|
2523 | |||
2519 | def filectx(self, path, filelog=None): |
|
2524 | def filectx(self, path, filelog=None): | |
2520 | return self._originalctx.filectx(path, filelog=filelog) |
|
2525 | return self._originalctx.filectx(path, filelog=filelog) | |
2521 |
|
2526 | |||
2522 | def commit(self): |
|
2527 | def commit(self): | |
2523 | """commit context to the repo""" |
|
2528 | """commit context to the repo""" | |
2524 | return self._repo.commitctx(self) |
|
2529 | return self._repo.commitctx(self) | |
2525 |
|
2530 | |||
2526 | @property |
|
2531 | @property | |
2527 | def _manifest(self): |
|
2532 | def _manifest(self): | |
2528 | return self._originalctx.manifest() |
|
2533 | return self._originalctx.manifest() | |
2529 |
|
2534 | |||
2530 | @propertycache |
|
2535 | @propertycache | |
2531 | def _status(self): |
|
2536 | def _status(self): | |
2532 | """Calculate exact status from ``files`` specified in the ``origctx`` |
|
2537 | """Calculate exact status from ``files`` specified in the ``origctx`` | |
2533 | and parents manifests. |
|
2538 | and parents manifests. | |
2534 | """ |
|
2539 | """ | |
2535 | man1 = self.p1().manifest() |
|
2540 | man1 = self.p1().manifest() | |
2536 | p2 = self._parents[1] |
|
2541 | p2 = self._parents[1] | |
2537 | # "1 < len(self._parents)" can't be used for checking |
|
2542 | # "1 < len(self._parents)" can't be used for checking | |
2538 | # existence of the 2nd parent, because "metadataonlyctx._parents" is |
|
2543 | # existence of the 2nd parent, because "metadataonlyctx._parents" is | |
2539 | # explicitly initialized by the list, of which length is 2. |
|
2544 | # explicitly initialized by the list, of which length is 2. | |
2540 | if p2.node() != nullid: |
|
2545 | if p2.node() != nullid: | |
2541 | man2 = p2.manifest() |
|
2546 | man2 = p2.manifest() | |
2542 | managing = lambda f: f in man1 or f in man2 |
|
2547 | managing = lambda f: f in man1 or f in man2 | |
2543 | else: |
|
2548 | else: | |
2544 | managing = lambda f: f in man1 |
|
2549 | managing = lambda f: f in man1 | |
2545 |
|
2550 | |||
2546 | modified, added, removed = [], [], [] |
|
2551 | modified, added, removed = [], [], [] | |
2547 | for f in self._files: |
|
2552 | for f in self._files: | |
2548 | if not managing(f): |
|
2553 | if not managing(f): | |
2549 | added.append(f) |
|
2554 | added.append(f) | |
2550 | elif f in self: |
|
2555 | elif f in self: | |
2551 | modified.append(f) |
|
2556 | modified.append(f) | |
2552 | else: |
|
2557 | else: | |
2553 | removed.append(f) |
|
2558 | removed.append(f) | |
2554 |
|
2559 | |||
2555 | return scmutil.status(modified, added, removed, [], [], [], []) |
|
2560 | return scmutil.status(modified, added, removed, [], [], [], []) | |
2556 |
|
2561 | |||
2557 | class arbitraryfilectx(object): |
|
2562 | class arbitraryfilectx(object): | |
2558 | """Allows you to use filectx-like functions on a file in an arbitrary |
|
2563 | """Allows you to use filectx-like functions on a file in an arbitrary | |
2559 | location on disk, possibly not in the working directory. |
|
2564 | location on disk, possibly not in the working directory. | |
2560 | """ |
|
2565 | """ | |
2561 | def __init__(self, path, repo=None): |
|
2566 | def __init__(self, path, repo=None): | |
2562 | # Repo is optional because contrib/simplemerge uses this class. |
|
2567 | # Repo is optional because contrib/simplemerge uses this class. | |
2563 | self._repo = repo |
|
2568 | self._repo = repo | |
2564 | self._path = path |
|
2569 | self._path = path | |
2565 |
|
2570 | |||
2566 | def cmp(self, fctx): |
|
2571 | def cmp(self, fctx): | |
2567 | if isinstance(fctx, workingfilectx) and self._repo: |
|
2572 | if isinstance(fctx, workingfilectx) and self._repo: | |
2568 | # Add a fast-path for merge if both sides are disk-backed. |
|
2573 | # Add a fast-path for merge if both sides are disk-backed. | |
2569 | # Note that filecmp uses the opposite return values as cmp. |
|
2574 | # Note that filecmp uses the opposite return values as cmp. | |
2570 | return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path())) |
|
2575 | return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path())) | |
2571 | return self.data() != fctx.data() |
|
2576 | return self.data() != fctx.data() | |
2572 |
|
2577 | |||
2573 | def path(self): |
|
2578 | def path(self): | |
2574 | return self._path |
|
2579 | return self._path | |
2575 |
|
2580 | |||
2576 | def flags(self): |
|
2581 | def flags(self): | |
2577 | return '' |
|
2582 | return '' | |
2578 |
|
2583 | |||
2579 | def data(self): |
|
2584 | def data(self): | |
2580 | return util.readfile(self._path) |
|
2585 | return util.readfile(self._path) | |
2581 |
|
2586 | |||
2582 | def decodeddata(self): |
|
2587 | def decodeddata(self): | |
2583 | with open(self._path, "rb") as f: |
|
2588 | with open(self._path, "rb") as f: | |
2584 | return f.read() |
|
2589 | return f.read() | |
2585 |
|
2590 | |||
2586 | def remove(self): |
|
2591 | def remove(self): | |
2587 | util.unlink(self._path) |
|
2592 | util.unlink(self._path) | |
2588 |
|
2593 | |||
2589 | def write(self, data, flags): |
|
2594 | def write(self, data, flags): | |
2590 | assert not flags |
|
2595 | assert not flags | |
2591 | with open(self._path, "w") as f: |
|
2596 | with open(self._path, "w") as f: | |
2592 | f.write(data) |
|
2597 | f.write(data) |
@@ -1,867 +1,866 b'' | |||||
1 | # copies.py - copy detection for Mercurial |
|
1 | # copies.py - copy detection for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2008 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import collections |
|
10 | import collections | |
11 | import heapq |
|
11 | import heapq | |
12 | import os |
|
12 | import os | |
13 |
|
13 | |||
14 | from . import ( |
|
14 | from . import ( | |
15 | match as matchmod, |
|
15 | match as matchmod, | |
16 | node, |
|
16 | node, | |
17 | pathutil, |
|
17 | pathutil, | |
18 | scmutil, |
|
18 | scmutil, | |
19 | util, |
|
19 | util, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | def _findlimit(repo, a, b): |
|
22 | def _findlimit(repo, a, b): | |
23 | """ |
|
23 | """ | |
24 | Find the last revision that needs to be checked to ensure that a full |
|
24 | Find the last revision that needs to be checked to ensure that a full | |
25 | transitive closure for file copies can be properly calculated. |
|
25 | transitive closure for file copies can be properly calculated. | |
26 | Generally, this means finding the earliest revision number that's an |
|
26 | Generally, this means finding the earliest revision number that's an | |
27 | ancestor of a or b but not both, except when a or b is a direct descendent |
|
27 | ancestor of a or b but not both, except when a or b is a direct descendent | |
28 | of the other, in which case we can return the minimum revnum of a and b. |
|
28 | of the other, in which case we can return the minimum revnum of a and b. | |
29 | None if no such revision exists. |
|
29 | None if no such revision exists. | |
30 | """ |
|
30 | """ | |
31 |
|
31 | |||
32 | # basic idea: |
|
32 | # basic idea: | |
33 | # - mark a and b with different sides |
|
33 | # - mark a and b with different sides | |
34 | # - if a parent's children are all on the same side, the parent is |
|
34 | # - if a parent's children are all on the same side, the parent is | |
35 | # on that side, otherwise it is on no side |
|
35 | # on that side, otherwise it is on no side | |
36 | # - walk the graph in topological order with the help of a heap; |
|
36 | # - walk the graph in topological order with the help of a heap; | |
37 | # - add unseen parents to side map |
|
37 | # - add unseen parents to side map | |
38 | # - clear side of any parent that has children on different sides |
|
38 | # - clear side of any parent that has children on different sides | |
39 | # - track number of interesting revs that might still be on a side |
|
39 | # - track number of interesting revs that might still be on a side | |
40 | # - track the lowest interesting rev seen |
|
40 | # - track the lowest interesting rev seen | |
41 | # - quit when interesting revs is zero |
|
41 | # - quit when interesting revs is zero | |
42 |
|
42 | |||
43 | cl = repo.changelog |
|
43 | cl = repo.changelog | |
44 | working = len(cl) # pseudo rev for the working directory |
|
44 | working = len(cl) # pseudo rev for the working directory | |
45 | if a is None: |
|
45 | if a is None: | |
46 | a = working |
|
46 | a = working | |
47 | if b is None: |
|
47 | if b is None: | |
48 | b = working |
|
48 | b = working | |
49 |
|
49 | |||
50 | side = {a: -1, b: 1} |
|
50 | side = {a: -1, b: 1} | |
51 | visit = [-a, -b] |
|
51 | visit = [-a, -b] | |
52 | heapq.heapify(visit) |
|
52 | heapq.heapify(visit) | |
53 | interesting = len(visit) |
|
53 | interesting = len(visit) | |
54 | hascommonancestor = False |
|
54 | hascommonancestor = False | |
55 | limit = working |
|
55 | limit = working | |
56 |
|
56 | |||
57 | while interesting: |
|
57 | while interesting: | |
58 | r = -heapq.heappop(visit) |
|
58 | r = -heapq.heappop(visit) | |
59 | if r == working: |
|
59 | if r == working: | |
60 | parents = [cl.rev(p) for p in repo.dirstate.parents()] |
|
60 | parents = [cl.rev(p) for p in repo.dirstate.parents()] | |
61 | else: |
|
61 | else: | |
62 | parents = cl.parentrevs(r) |
|
62 | parents = cl.parentrevs(r) | |
63 | for p in parents: |
|
63 | for p in parents: | |
64 | if p < 0: |
|
64 | if p < 0: | |
65 | continue |
|
65 | continue | |
66 | if p not in side: |
|
66 | if p not in side: | |
67 | # first time we see p; add it to visit |
|
67 | # first time we see p; add it to visit | |
68 | side[p] = side[r] |
|
68 | side[p] = side[r] | |
69 | if side[p]: |
|
69 | if side[p]: | |
70 | interesting += 1 |
|
70 | interesting += 1 | |
71 | heapq.heappush(visit, -p) |
|
71 | heapq.heappush(visit, -p) | |
72 | elif side[p] and side[p] != side[r]: |
|
72 | elif side[p] and side[p] != side[r]: | |
73 | # p was interesting but now we know better |
|
73 | # p was interesting but now we know better | |
74 | side[p] = 0 |
|
74 | side[p] = 0 | |
75 | interesting -= 1 |
|
75 | interesting -= 1 | |
76 | hascommonancestor = True |
|
76 | hascommonancestor = True | |
77 | if side[r]: |
|
77 | if side[r]: | |
78 | limit = r # lowest rev visited |
|
78 | limit = r # lowest rev visited | |
79 | interesting -= 1 |
|
79 | interesting -= 1 | |
80 |
|
80 | |||
81 | if not hascommonancestor: |
|
81 | if not hascommonancestor: | |
82 | return None |
|
82 | return None | |
83 |
|
83 | |||
84 | # Consider the following flow (see test-commit-amend.t under issue4405): |
|
84 | # Consider the following flow (see test-commit-amend.t under issue4405): | |
85 | # 1/ File 'a0' committed |
|
85 | # 1/ File 'a0' committed | |
86 | # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1') |
|
86 | # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1') | |
87 | # 3/ Move back to first commit |
|
87 | # 3/ Move back to first commit | |
88 | # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend') |
|
88 | # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend') | |
89 | # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg' |
|
89 | # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg' | |
90 | # |
|
90 | # | |
91 | # During the amend in step five, we will be in this state: |
|
91 | # During the amend in step five, we will be in this state: | |
92 | # |
|
92 | # | |
93 | # @ 3 temporary amend commit for a1-amend |
|
93 | # @ 3 temporary amend commit for a1-amend | |
94 | # | |
|
94 | # | | |
95 | # o 2 a1-amend |
|
95 | # o 2 a1-amend | |
96 | # | |
|
96 | # | | |
97 | # | o 1 a1 |
|
97 | # | o 1 a1 | |
98 | # |/ |
|
98 | # |/ | |
99 | # o 0 a0 |
|
99 | # o 0 a0 | |
100 | # |
|
100 | # | |
101 | # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2, |
|
101 | # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2, | |
102 | # yet the filelog has the copy information in rev 1 and we will not look |
|
102 | # yet the filelog has the copy information in rev 1 and we will not look | |
103 | # back far enough unless we also look at the a and b as candidates. |
|
103 | # back far enough unless we also look at the a and b as candidates. | |
104 | # This only occurs when a is a descendent of b or visa-versa. |
|
104 | # This only occurs when a is a descendent of b or visa-versa. | |
105 | return min(limit, a, b) |
|
105 | return min(limit, a, b) | |
106 |
|
106 | |||
107 | def _chain(src, dst, a, b): |
|
107 | def _chain(src, dst, a, b): | |
108 | '''chain two sets of copies a->b''' |
|
108 | '''chain two sets of copies a->b''' | |
109 | t = a.copy() |
|
109 | t = a.copy() | |
110 | for k, v in b.iteritems(): |
|
110 | for k, v in b.iteritems(): | |
111 | if v in t: |
|
111 | if v in t: | |
112 | # found a chain |
|
112 | # found a chain | |
113 | if t[v] != k: |
|
113 | if t[v] != k: | |
114 | # file wasn't renamed back to itself |
|
114 | # file wasn't renamed back to itself | |
115 | t[k] = t[v] |
|
115 | t[k] = t[v] | |
116 | if v not in dst: |
|
116 | if v not in dst: | |
117 | # chain was a rename, not a copy |
|
117 | # chain was a rename, not a copy | |
118 | del t[v] |
|
118 | del t[v] | |
119 | if v in src: |
|
119 | if v in src: | |
120 | # file is a copy of an existing file |
|
120 | # file is a copy of an existing file | |
121 | t[k] = v |
|
121 | t[k] = v | |
122 |
|
122 | |||
123 | # remove criss-crossed copies |
|
123 | # remove criss-crossed copies | |
124 | for k, v in t.items(): |
|
124 | for k, v in t.items(): | |
125 | if k in src and v in dst: |
|
125 | if k in src and v in dst: | |
126 | del t[k] |
|
126 | del t[k] | |
127 |
|
127 | |||
128 | return t |
|
128 | return t | |
129 |
|
129 | |||
130 | def _tracefile(fctx, am, limit=-1): |
|
130 | def _tracefile(fctx, am, limit=-1): | |
131 | '''return file context that is the ancestor of fctx present in ancestor |
|
131 | '''return file context that is the ancestor of fctx present in ancestor | |
132 | manifest am, stopping after the first ancestor lower than limit''' |
|
132 | manifest am, stopping after the first ancestor lower than limit''' | |
133 |
|
133 | |||
134 | for f in fctx.ancestors(): |
|
134 | for f in fctx.ancestors(): | |
135 | if am.get(f.path(), None) == f.filenode(): |
|
135 | if am.get(f.path(), None) == f.filenode(): | |
136 | return f |
|
136 | return f | |
137 | if limit >= 0 and f.linkrev() < limit and f.rev() < limit: |
|
137 | if limit >= 0 and f.linkrev() < limit and f.rev() < limit: | |
138 | return None |
|
138 | return None | |
139 |
|
139 | |||
140 | def _dirstatecopies(d): |
|
140 | def _dirstatecopies(d): | |
141 | ds = d._repo.dirstate |
|
141 | ds = d._repo.dirstate | |
142 | c = ds.copies().copy() |
|
142 | c = ds.copies().copy() | |
143 | for k in list(c): |
|
143 | for k in list(c): | |
144 | if ds[k] not in 'anm': |
|
144 | if ds[k] not in 'anm': | |
145 | del c[k] |
|
145 | del c[k] | |
146 | return c |
|
146 | return c | |
147 |
|
147 | |||
148 | def _computeforwardmissing(a, b, match=None): |
|
148 | def _computeforwardmissing(a, b, match=None): | |
149 | """Computes which files are in b but not a. |
|
149 | """Computes which files are in b but not a. | |
150 | This is its own function so extensions can easily wrap this call to see what |
|
150 | This is its own function so extensions can easily wrap this call to see what | |
151 | files _forwardcopies is about to process. |
|
151 | files _forwardcopies is about to process. | |
152 | """ |
|
152 | """ | |
153 | ma = a.manifest() |
|
153 | ma = a.manifest() | |
154 | mb = b.manifest() |
|
154 | mb = b.manifest() | |
155 | return mb.filesnotin(ma, match=match) |
|
155 | return mb.filesnotin(ma, match=match) | |
156 |
|
156 | |||
157 | def _forwardcopies(a, b, match=None): |
|
157 | def _forwardcopies(a, b, match=None): | |
158 | '''find {dst@b: src@a} copy mapping where a is an ancestor of b''' |
|
158 | '''find {dst@b: src@a} copy mapping where a is an ancestor of b''' | |
159 |
|
159 | |||
160 | # check for working copy |
|
160 | # check for working copy | |
161 | w = None |
|
161 | w = None | |
162 | if b.rev() is None: |
|
162 | if b.rev() is None: | |
163 | w = b |
|
163 | w = b | |
164 | b = w.p1() |
|
164 | b = w.p1() | |
165 | if a == b: |
|
165 | if a == b: | |
166 | # short-circuit to avoid issues with merge states |
|
166 | # short-circuit to avoid issues with merge states | |
167 | return _dirstatecopies(w) |
|
167 | return _dirstatecopies(w) | |
168 |
|
168 | |||
169 | # files might have to be traced back to the fctx parent of the last |
|
169 | # files might have to be traced back to the fctx parent of the last | |
170 | # one-side-only changeset, but not further back than that |
|
170 | # one-side-only changeset, but not further back than that | |
171 | limit = _findlimit(a._repo, a.rev(), b.rev()) |
|
171 | limit = _findlimit(a._repo, a.rev(), b.rev()) | |
172 | if limit is None: |
|
172 | if limit is None: | |
173 | limit = -1 |
|
173 | limit = -1 | |
174 | am = a.manifest() |
|
174 | am = a.manifest() | |
175 |
|
175 | |||
176 | # find where new files came from |
|
176 | # find where new files came from | |
177 | # we currently don't try to find where old files went, too expensive |
|
177 | # we currently don't try to find where old files went, too expensive | |
178 | # this means we can miss a case like 'hg rm b; hg cp a b' |
|
178 | # this means we can miss a case like 'hg rm b; hg cp a b' | |
179 | cm = {} |
|
179 | cm = {} | |
180 |
|
180 | |||
181 | # Computing the forward missing is quite expensive on large manifests, since |
|
181 | # Computing the forward missing is quite expensive on large manifests, since | |
182 | # it compares the entire manifests. We can optimize it in the common use |
|
182 | # it compares the entire manifests. We can optimize it in the common use | |
183 | # case of computing what copies are in a commit versus its parent (like |
|
183 | # case of computing what copies are in a commit versus its parent (like | |
184 | # during a rebase or histedit). Note, we exclude merge commits from this |
|
184 | # during a rebase or histedit). Note, we exclude merge commits from this | |
185 | # optimization, since the ctx.files() for a merge commit is not correct for |
|
185 | # optimization, since the ctx.files() for a merge commit is not correct for | |
186 | # this comparison. |
|
186 | # this comparison. | |
187 | forwardmissingmatch = match |
|
187 | forwardmissingmatch = match | |
188 | if b.p1() == a and b.p2().node() == node.nullid: |
|
188 | if b.p1() == a and b.p2().node() == node.nullid: | |
189 | filesmatcher = scmutil.matchfiles(a._repo, b.files()) |
|
189 | filesmatcher = scmutil.matchfiles(a._repo, b.files()) | |
190 | forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher) |
|
190 | forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher) | |
191 | missing = _computeforwardmissing(a, b, match=forwardmissingmatch) |
|
191 | missing = _computeforwardmissing(a, b, match=forwardmissingmatch) | |
192 |
|
192 | |||
193 | ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True) |
|
193 | ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True) | |
194 | for f in missing: |
|
194 | for f in missing: | |
195 | fctx = b[f] |
|
195 | fctx = b[f] | |
196 | fctx._ancestrycontext = ancestrycontext |
|
196 | fctx._ancestrycontext = ancestrycontext | |
197 | ofctx = _tracefile(fctx, am, limit) |
|
197 | ofctx = _tracefile(fctx, am, limit) | |
198 | if ofctx: |
|
198 | if ofctx: | |
199 | cm[f] = ofctx.path() |
|
199 | cm[f] = ofctx.path() | |
200 |
|
200 | |||
201 | # combine copies from dirstate if necessary |
|
201 | # combine copies from dirstate if necessary | |
202 | if w is not None: |
|
202 | if w is not None: | |
203 | cm = _chain(a, w, cm, _dirstatecopies(w)) |
|
203 | cm = _chain(a, w, cm, _dirstatecopies(w)) | |
204 |
|
204 | |||
205 | return cm |
|
205 | return cm | |
206 |
|
206 | |||
207 | def _backwardrenames(a, b): |
|
207 | def _backwardrenames(a, b): | |
208 | if a._repo.ui.config('experimental', 'copytrace') == 'off': |
|
208 | if a._repo.ui.config('experimental', 'copytrace') == 'off': | |
209 | return {} |
|
209 | return {} | |
210 |
|
210 | |||
211 | # Even though we're not taking copies into account, 1:n rename situations |
|
211 | # Even though we're not taking copies into account, 1:n rename situations | |
212 | # can still exist (e.g. hg cp a b; hg mv a c). In those cases we |
|
212 | # can still exist (e.g. hg cp a b; hg mv a c). In those cases we | |
213 | # arbitrarily pick one of the renames. |
|
213 | # arbitrarily pick one of the renames. | |
214 | f = _forwardcopies(b, a) |
|
214 | f = _forwardcopies(b, a) | |
215 | r = {} |
|
215 | r = {} | |
216 | for k, v in sorted(f.iteritems()): |
|
216 | for k, v in sorted(f.iteritems()): | |
217 | # remove copies |
|
217 | # remove copies | |
218 | if v in a: |
|
218 | if v in a: | |
219 | continue |
|
219 | continue | |
220 | r[v] = k |
|
220 | r[v] = k | |
221 | return r |
|
221 | return r | |
222 |
|
222 | |||
223 | def pathcopies(x, y, match=None): |
|
223 | def pathcopies(x, y, match=None): | |
224 | '''find {dst@y: src@x} copy mapping for directed compare''' |
|
224 | '''find {dst@y: src@x} copy mapping for directed compare''' | |
225 | if x == y or not x or not y: |
|
225 | if x == y or not x or not y: | |
226 | return {} |
|
226 | return {} | |
227 | a = y.ancestor(x) |
|
227 | a = y.ancestor(x) | |
228 | if a == x: |
|
228 | if a == x: | |
229 | return _forwardcopies(x, y, match=match) |
|
229 | return _forwardcopies(x, y, match=match) | |
230 | if a == y: |
|
230 | if a == y: | |
231 | return _backwardrenames(x, y) |
|
231 | return _backwardrenames(x, y) | |
232 | return _chain(x, y, _backwardrenames(x, a), |
|
232 | return _chain(x, y, _backwardrenames(x, a), | |
233 | _forwardcopies(a, y, match=match)) |
|
233 | _forwardcopies(a, y, match=match)) | |
234 |
|
234 | |||
235 | def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''): |
|
235 | def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''): | |
236 | """Computes, based on addedinm1 and addedinm2, the files exclusive to c1 |
|
236 | """Computes, based on addedinm1 and addedinm2, the files exclusive to c1 | |
237 | and c2. This is its own function so extensions can easily wrap this call |
|
237 | and c2. This is its own function so extensions can easily wrap this call | |
238 | to see what files mergecopies is about to process. |
|
238 | to see what files mergecopies is about to process. | |
239 |
|
239 | |||
240 | Even though c1 and c2 are not used in this function, they are useful in |
|
240 | Even though c1 and c2 are not used in this function, they are useful in | |
241 | other extensions for being able to read the file nodes of the changed files. |
|
241 | other extensions for being able to read the file nodes of the changed files. | |
242 |
|
242 | |||
243 | "baselabel" can be passed to help distinguish the multiple computations |
|
243 | "baselabel" can be passed to help distinguish the multiple computations | |
244 | done in the graft case. |
|
244 | done in the graft case. | |
245 | """ |
|
245 | """ | |
246 | u1 = sorted(addedinm1 - addedinm2) |
|
246 | u1 = sorted(addedinm1 - addedinm2) | |
247 | u2 = sorted(addedinm2 - addedinm1) |
|
247 | u2 = sorted(addedinm2 - addedinm1) | |
248 |
|
248 | |||
249 | header = " unmatched files in %s" |
|
249 | header = " unmatched files in %s" | |
250 | if baselabel: |
|
250 | if baselabel: | |
251 | header += ' (from %s)' % baselabel |
|
251 | header += ' (from %s)' % baselabel | |
252 | if u1: |
|
252 | if u1: | |
253 | repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1))) |
|
253 | repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1))) | |
254 | if u2: |
|
254 | if u2: | |
255 | repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2))) |
|
255 | repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2))) | |
256 | return u1, u2 |
|
256 | return u1, u2 | |
257 |
|
257 | |||
258 | def _makegetfctx(ctx): |
|
258 | def _makegetfctx(ctx): | |
259 | """return a 'getfctx' function suitable for _checkcopies usage |
|
259 | """return a 'getfctx' function suitable for _checkcopies usage | |
260 |
|
260 | |||
261 | We have to re-setup the function building 'filectx' for each |
|
261 | We have to re-setup the function building 'filectx' for each | |
262 | '_checkcopies' to ensure the linkrev adjustment is properly setup for |
|
262 | '_checkcopies' to ensure the linkrev adjustment is properly setup for | |
263 | each. Linkrev adjustment is important to avoid bug in rename |
|
263 | each. Linkrev adjustment is important to avoid bug in rename | |
264 | detection. Moreover, having a proper '_ancestrycontext' setup ensures |
|
264 | detection. Moreover, having a proper '_ancestrycontext' setup ensures | |
265 | the performance impact of this adjustment is kept limited. Without it, |
|
265 | the performance impact of this adjustment is kept limited. Without it, | |
266 | each file could do a full dag traversal making the time complexity of |
|
266 | each file could do a full dag traversal making the time complexity of | |
267 | the operation explode (see issue4537). |
|
267 | the operation explode (see issue4537). | |
268 |
|
268 | |||
269 | This function exists here mostly to limit the impact on stable. Feel |
|
269 | This function exists here mostly to limit the impact on stable. Feel | |
270 | free to refactor on default. |
|
270 | free to refactor on default. | |
271 | """ |
|
271 | """ | |
272 | rev = ctx.rev() |
|
272 | rev = ctx.rev() | |
273 | repo = ctx._repo |
|
273 | repo = ctx._repo | |
274 | ac = getattr(ctx, '_ancestrycontext', None) |
|
274 | ac = getattr(ctx, '_ancestrycontext', None) | |
275 | if ac is None: |
|
275 | if ac is None: | |
276 | revs = [rev] |
|
276 | revs = [rev] | |
277 | if rev is None: |
|
277 | if rev is None: | |
278 | revs = [p.rev() for p in ctx.parents()] |
|
278 | revs = [p.rev() for p in ctx.parents()] | |
279 | ac = repo.changelog.ancestors(revs, inclusive=True) |
|
279 | ac = repo.changelog.ancestors(revs, inclusive=True) | |
280 | ctx._ancestrycontext = ac |
|
280 | ctx._ancestrycontext = ac | |
281 | def makectx(f, n): |
|
281 | def makectx(f, n): | |
282 | if n in node.wdirnodes: # in a working context? |
|
282 | if n in node.wdirnodes: # in a working context? | |
283 | if ctx.rev() is None: |
|
283 | if ctx.rev() is None: | |
284 | return ctx.filectx(f) |
|
284 | return ctx.filectx(f) | |
285 | return repo[None][f] |
|
285 | return repo[None][f] | |
286 | fctx = repo.filectx(f, fileid=n) |
|
286 | fctx = repo.filectx(f, fileid=n) | |
287 | # setup only needed for filectx not create from a changectx |
|
287 | # setup only needed for filectx not create from a changectx | |
288 | fctx._ancestrycontext = ac |
|
288 | fctx._ancestrycontext = ac | |
289 | fctx._descendantrev = rev |
|
289 | fctx._descendantrev = rev | |
290 | return fctx |
|
290 | return fctx | |
291 | return util.lrucachefunc(makectx) |
|
291 | return util.lrucachefunc(makectx) | |
292 |
|
292 | |||
293 | def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge): |
|
293 | def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge): | |
294 | """combine partial copy paths""" |
|
294 | """combine partial copy paths""" | |
295 | remainder = {} |
|
295 | remainder = {} | |
296 | for f in copyfrom: |
|
296 | for f in copyfrom: | |
297 | if f in copyto: |
|
297 | if f in copyto: | |
298 | finalcopy[copyto[f]] = copyfrom[f] |
|
298 | finalcopy[copyto[f]] = copyfrom[f] | |
299 | del copyto[f] |
|
299 | del copyto[f] | |
300 | for f in incompletediverge: |
|
300 | for f in incompletediverge: | |
301 | assert f not in diverge |
|
301 | assert f not in diverge | |
302 | ic = incompletediverge[f] |
|
302 | ic = incompletediverge[f] | |
303 | if ic[0] in copyto: |
|
303 | if ic[0] in copyto: | |
304 | diverge[f] = [copyto[ic[0]], ic[1]] |
|
304 | diverge[f] = [copyto[ic[0]], ic[1]] | |
305 | else: |
|
305 | else: | |
306 | remainder[f] = ic |
|
306 | remainder[f] = ic | |
307 | return remainder |
|
307 | return remainder | |
308 |
|
308 | |||
309 | def mergecopies(repo, c1, c2, base): |
|
309 | def mergecopies(repo, c1, c2, base): | |
310 | """ |
|
310 | """ | |
311 | The function calling different copytracing algorithms on the basis of config |
|
311 | The function calling different copytracing algorithms on the basis of config | |
312 | which find moves and copies between context c1 and c2 that are relevant for |
|
312 | which find moves and copies between context c1 and c2 that are relevant for | |
313 | merging. 'base' will be used as the merge base. |
|
313 | merging. 'base' will be used as the merge base. | |
314 |
|
314 | |||
315 | Copytracing is used in commands like rebase, merge, unshelve, etc to merge |
|
315 | Copytracing is used in commands like rebase, merge, unshelve, etc to merge | |
316 | files that were moved/ copied in one merge parent and modified in another. |
|
316 | files that were moved/ copied in one merge parent and modified in another. | |
317 | For example: |
|
317 | For example: | |
318 |
|
318 | |||
319 | o ---> 4 another commit |
|
319 | o ---> 4 another commit | |
320 | | |
|
320 | | | |
321 | | o ---> 3 commit that modifies a.txt |
|
321 | | o ---> 3 commit that modifies a.txt | |
322 | | / |
|
322 | | / | |
323 | o / ---> 2 commit that moves a.txt to b.txt |
|
323 | o / ---> 2 commit that moves a.txt to b.txt | |
324 | |/ |
|
324 | |/ | |
325 | o ---> 1 merge base |
|
325 | o ---> 1 merge base | |
326 |
|
326 | |||
327 | If we try to rebase revision 3 on revision 4, since there is no a.txt in |
|
327 | If we try to rebase revision 3 on revision 4, since there is no a.txt in | |
328 | revision 4, and if user have copytrace disabled, we prints the following |
|
328 | revision 4, and if user have copytrace disabled, we prints the following | |
329 | message: |
|
329 | message: | |
330 |
|
330 | |||
331 | ```other changed <file> which local deleted``` |
|
331 | ```other changed <file> which local deleted``` | |
332 |
|
332 | |||
333 | Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and |
|
333 | Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and | |
334 | "dirmove". |
|
334 | "dirmove". | |
335 |
|
335 | |||
336 | "copy" is a mapping from destination name -> source name, |
|
336 | "copy" is a mapping from destination name -> source name, | |
337 | where source is in c1 and destination is in c2 or vice-versa. |
|
337 | where source is in c1 and destination is in c2 or vice-versa. | |
338 |
|
338 | |||
339 | "movewithdir" is a mapping from source name -> destination name, |
|
339 | "movewithdir" is a mapping from source name -> destination name, | |
340 | where the file at source present in one context but not the other |
|
340 | where the file at source present in one context but not the other | |
341 | needs to be moved to destination by the merge process, because the |
|
341 | needs to be moved to destination by the merge process, because the | |
342 | other context moved the directory it is in. |
|
342 | other context moved the directory it is in. | |
343 |
|
343 | |||
344 | "diverge" is a mapping of source name -> list of destination names |
|
344 | "diverge" is a mapping of source name -> list of destination names | |
345 | for divergent renames. |
|
345 | for divergent renames. | |
346 |
|
346 | |||
347 | "renamedelete" is a mapping of source name -> list of destination |
|
347 | "renamedelete" is a mapping of source name -> list of destination | |
348 | names for files deleted in c1 that were renamed in c2 or vice-versa. |
|
348 | names for files deleted in c1 that were renamed in c2 or vice-versa. | |
349 |
|
349 | |||
350 | "dirmove" is a mapping of detected source dir -> destination dir renames. |
|
350 | "dirmove" is a mapping of detected source dir -> destination dir renames. | |
351 | This is needed for handling changes to new files previously grafted into |
|
351 | This is needed for handling changes to new files previously grafted into | |
352 | renamed directories. |
|
352 | renamed directories. | |
353 | """ |
|
353 | """ | |
354 | # avoid silly behavior for update from empty dir |
|
354 | # avoid silly behavior for update from empty dir | |
355 | if not c1 or not c2 or c1 == c2: |
|
355 | if not c1 or not c2 or c1 == c2: | |
356 | return {}, {}, {}, {}, {} |
|
356 | return {}, {}, {}, {}, {} | |
357 |
|
357 | |||
358 | # avoid silly behavior for parent -> working dir |
|
358 | # avoid silly behavior for parent -> working dir | |
359 | if c2.node() is None and c1.node() == repo.dirstate.p1(): |
|
359 | if c2.node() is None and c1.node() == repo.dirstate.p1(): | |
360 | return repo.dirstate.copies(), {}, {}, {}, {} |
|
360 | return repo.dirstate.copies(), {}, {}, {}, {} | |
361 |
|
361 | |||
362 | copytracing = repo.ui.config('experimental', 'copytrace') |
|
362 | copytracing = repo.ui.config('experimental', 'copytrace') | |
363 |
|
363 | |||
364 | # Copy trace disabling is explicitly below the node == p1 logic above |
|
364 | # Copy trace disabling is explicitly below the node == p1 logic above | |
365 | # because the logic above is required for a simple copy to be kept across a |
|
365 | # because the logic above is required for a simple copy to be kept across a | |
366 | # rebase. |
|
366 | # rebase. | |
367 | if copytracing == 'off': |
|
367 | if copytracing == 'off': | |
368 | return {}, {}, {}, {}, {} |
|
368 | return {}, {}, {}, {}, {} | |
369 | elif copytracing == 'heuristics': |
|
369 | elif copytracing == 'heuristics': | |
370 | # Do full copytracing if only non-public revisions are involved as |
|
370 | # Do full copytracing if only non-public revisions are involved as | |
371 | # that will be fast enough and will also cover the copies which could |
|
371 | # that will be fast enough and will also cover the copies which could | |
372 | # be missed by heuristics |
|
372 | # be missed by heuristics | |
373 | if _isfullcopytraceable(repo, c1, base): |
|
373 | if _isfullcopytraceable(repo, c1, base): | |
374 | return _fullcopytracing(repo, c1, c2, base) |
|
374 | return _fullcopytracing(repo, c1, c2, base) | |
375 | return _heuristicscopytracing(repo, c1, c2, base) |
|
375 | return _heuristicscopytracing(repo, c1, c2, base) | |
376 | else: |
|
376 | else: | |
377 | return _fullcopytracing(repo, c1, c2, base) |
|
377 | return _fullcopytracing(repo, c1, c2, base) | |
378 |
|
378 | |||
379 | def _isfullcopytraceable(repo, c1, base): |
|
379 | def _isfullcopytraceable(repo, c1, base): | |
380 | """ Checks that if base, source and destination are all no-public branches, |
|
380 | """ Checks that if base, source and destination are all no-public branches, | |
381 | if yes let's use the full copytrace algorithm for increased capabilities |
|
381 | if yes let's use the full copytrace algorithm for increased capabilities | |
382 | since it will be fast enough. |
|
382 | since it will be fast enough. | |
383 |
|
383 | |||
384 | `experimental.copytrace.sourcecommitlimit` can be used to set a limit for |
|
384 | `experimental.copytrace.sourcecommitlimit` can be used to set a limit for | |
385 | number of changesets from c1 to base such that if number of changesets are |
|
385 | number of changesets from c1 to base such that if number of changesets are | |
386 | more than the limit, full copytracing algorithm won't be used. |
|
386 | more than the limit, full copytracing algorithm won't be used. | |
387 | """ |
|
387 | """ | |
388 | if c1.rev() is None: |
|
388 | if c1.rev() is None: | |
389 | c1 = c1.p1() |
|
389 | c1 = c1.p1() | |
390 | if c1.mutable() and base.mutable(): |
|
390 | if c1.mutable() and base.mutable(): | |
391 | sourcecommitlimit = repo.ui.configint('experimental', |
|
391 | sourcecommitlimit = repo.ui.configint('experimental', | |
392 | 'copytrace.sourcecommitlimit') |
|
392 | 'copytrace.sourcecommitlimit') | |
393 | commits = len(repo.revs('%d::%d', base.rev(), c1.rev())) |
|
393 | commits = len(repo.revs('%d::%d', base.rev(), c1.rev())) | |
394 | return commits < sourcecommitlimit |
|
394 | return commits < sourcecommitlimit | |
395 | return False |
|
395 | return False | |
396 |
|
396 | |||
397 | def _fullcopytracing(repo, c1, c2, base): |
|
397 | def _fullcopytracing(repo, c1, c2, base): | |
398 | """ The full copytracing algorithm which finds all the new files that were |
|
398 | """ The full copytracing algorithm which finds all the new files that were | |
399 | added from merge base up to the top commit and for each file it checks if |
|
399 | added from merge base up to the top commit and for each file it checks if | |
400 | this file was copied from another file. |
|
400 | this file was copied from another file. | |
401 |
|
401 | |||
402 | This is pretty slow when a lot of changesets are involved but will track all |
|
402 | This is pretty slow when a lot of changesets are involved but will track all | |
403 | the copies. |
|
403 | the copies. | |
404 | """ |
|
404 | """ | |
405 | # In certain scenarios (e.g. graft, update or rebase), base can be |
|
405 | # In certain scenarios (e.g. graft, update or rebase), base can be | |
406 | # overridden We still need to know a real common ancestor in this case We |
|
406 | # overridden We still need to know a real common ancestor in this case We | |
407 | # can't just compute _c1.ancestor(_c2) and compare it to ca, because there |
|
407 | # can't just compute _c1.ancestor(_c2) and compare it to ca, because there | |
408 | # can be multiple common ancestors, e.g. in case of bidmerge. Because our |
|
408 | # can be multiple common ancestors, e.g. in case of bidmerge. Because our | |
409 | # caller may not know if the revision passed in lieu of the CA is a genuine |
|
409 | # caller may not know if the revision passed in lieu of the CA is a genuine | |
410 | # common ancestor or not without explicitly checking it, it's better to |
|
410 | # common ancestor or not without explicitly checking it, it's better to | |
411 | # determine that here. |
|
411 | # determine that here. | |
412 | # |
|
412 | # | |
413 | # base.descendant(wc) and base.descendant(base) are False, work around that |
|
413 | # base.descendant(wc) and base.descendant(base) are False, work around that | |
414 | _c1 = c1.p1() if c1.rev() is None else c1 |
|
414 | _c1 = c1.p1() if c1.rev() is None else c1 | |
415 | _c2 = c2.p1() if c2.rev() is None else c2 |
|
415 | _c2 = c2.p1() if c2.rev() is None else c2 | |
416 | # an endpoint is "dirty" if it isn't a descendant of the merge base |
|
416 | # an endpoint is "dirty" if it isn't a descendant of the merge base | |
417 | # if we have a dirty endpoint, we need to trigger graft logic, and also |
|
417 | # if we have a dirty endpoint, we need to trigger graft logic, and also | |
418 | # keep track of which endpoint is dirty |
|
418 | # keep track of which endpoint is dirty | |
419 | dirtyc1 = not (base == _c1 or base.descendant(_c1)) |
|
419 | dirtyc1 = not (base == _c1 or base.descendant(_c1)) | |
420 | dirtyc2 = not (base == _c2 or base.descendant(_c2)) |
|
420 | dirtyc2 = not (base == _c2 or base.descendant(_c2)) | |
421 | graft = dirtyc1 or dirtyc2 |
|
421 | graft = dirtyc1 or dirtyc2 | |
422 | tca = base |
|
422 | tca = base | |
423 | if graft: |
|
423 | if graft: | |
424 | tca = _c1.ancestor(_c2) |
|
424 | tca = _c1.ancestor(_c2) | |
425 |
|
425 | |||
426 | limit = _findlimit(repo, c1.rev(), c2.rev()) |
|
426 | limit = _findlimit(repo, c1.rev(), c2.rev()) | |
427 | if limit is None: |
|
427 | if limit is None: | |
428 | # no common ancestor, no copies |
|
428 | # no common ancestor, no copies | |
429 | return {}, {}, {}, {}, {} |
|
429 | return {}, {}, {}, {}, {} | |
430 | repo.ui.debug(" searching for copies back to rev %d\n" % limit) |
|
430 | repo.ui.debug(" searching for copies back to rev %d\n" % limit) | |
431 |
|
431 | |||
432 | m1 = c1.manifest() |
|
432 | m1 = c1.manifest() | |
433 | m2 = c2.manifest() |
|
433 | m2 = c2.manifest() | |
434 | mb = base.manifest() |
|
434 | mb = base.manifest() | |
435 |
|
435 | |||
436 | # gather data from _checkcopies: |
|
436 | # gather data from _checkcopies: | |
437 | # - diverge = record all diverges in this dict |
|
437 | # - diverge = record all diverges in this dict | |
438 | # - copy = record all non-divergent copies in this dict |
|
438 | # - copy = record all non-divergent copies in this dict | |
439 | # - fullcopy = record all copies in this dict |
|
439 | # - fullcopy = record all copies in this dict | |
440 | # - incomplete = record non-divergent partial copies here |
|
440 | # - incomplete = record non-divergent partial copies here | |
441 | # - incompletediverge = record divergent partial copies here |
|
441 | # - incompletediverge = record divergent partial copies here | |
442 | diverge = {} # divergence data is shared |
|
442 | diverge = {} # divergence data is shared | |
443 | incompletediverge = {} |
|
443 | incompletediverge = {} | |
444 | data1 = {'copy': {}, |
|
444 | data1 = {'copy': {}, | |
445 | 'fullcopy': {}, |
|
445 | 'fullcopy': {}, | |
446 | 'incomplete': {}, |
|
446 | 'incomplete': {}, | |
447 | 'diverge': diverge, |
|
447 | 'diverge': diverge, | |
448 | 'incompletediverge': incompletediverge, |
|
448 | 'incompletediverge': incompletediverge, | |
449 | } |
|
449 | } | |
450 | data2 = {'copy': {}, |
|
450 | data2 = {'copy': {}, | |
451 | 'fullcopy': {}, |
|
451 | 'fullcopy': {}, | |
452 | 'incomplete': {}, |
|
452 | 'incomplete': {}, | |
453 | 'diverge': diverge, |
|
453 | 'diverge': diverge, | |
454 | 'incompletediverge': incompletediverge, |
|
454 | 'incompletediverge': incompletediverge, | |
455 | } |
|
455 | } | |
456 |
|
456 | |||
457 | # find interesting file sets from manifests |
|
457 | # find interesting file sets from manifests | |
458 | addedinm1 = m1.filesnotin(mb) |
|
458 | addedinm1 = m1.filesnotin(mb) | |
459 | addedinm2 = m2.filesnotin(mb) |
|
459 | addedinm2 = m2.filesnotin(mb) | |
460 | bothnew = sorted(addedinm1 & addedinm2) |
|
460 | bothnew = sorted(addedinm1 & addedinm2) | |
461 | if tca == base: |
|
461 | if tca == base: | |
462 | # unmatched file from base |
|
462 | # unmatched file from base | |
463 | u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2) |
|
463 | u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2) | |
464 | u1u, u2u = u1r, u2r |
|
464 | u1u, u2u = u1r, u2r | |
465 | else: |
|
465 | else: | |
466 | # unmatched file from base (DAG rotation in the graft case) |
|
466 | # unmatched file from base (DAG rotation in the graft case) | |
467 | u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, |
|
467 | u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, | |
468 | baselabel='base') |
|
468 | baselabel='base') | |
469 | # unmatched file from topological common ancestors (no DAG rotation) |
|
469 | # unmatched file from topological common ancestors (no DAG rotation) | |
470 | # need to recompute this for directory move handling when grafting |
|
470 | # need to recompute this for directory move handling when grafting | |
471 | mta = tca.manifest() |
|
471 | mta = tca.manifest() | |
472 | u1u, u2u = _computenonoverlap(repo, c1, c2, m1.filesnotin(mta), |
|
472 | u1u, u2u = _computenonoverlap(repo, c1, c2, m1.filesnotin(mta), | |
473 | m2.filesnotin(mta), |
|
473 | m2.filesnotin(mta), | |
474 | baselabel='topological common ancestor') |
|
474 | baselabel='topological common ancestor') | |
475 |
|
475 | |||
476 | for f in u1u: |
|
476 | for f in u1u: | |
477 | _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1) |
|
477 | _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1) | |
478 |
|
478 | |||
479 | for f in u2u: |
|
479 | for f in u2u: | |
480 | _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2) |
|
480 | _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2) | |
481 |
|
481 | |||
482 | copy = dict(data1['copy']) |
|
482 | copy = dict(data1['copy']) | |
483 | copy.update(data2['copy']) |
|
483 | copy.update(data2['copy']) | |
484 | fullcopy = dict(data1['fullcopy']) |
|
484 | fullcopy = dict(data1['fullcopy']) | |
485 | fullcopy.update(data2['fullcopy']) |
|
485 | fullcopy.update(data2['fullcopy']) | |
486 |
|
486 | |||
487 | if dirtyc1: |
|
487 | if dirtyc1: | |
488 | _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge, |
|
488 | _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge, | |
489 | incompletediverge) |
|
489 | incompletediverge) | |
490 | else: |
|
490 | else: | |
491 | _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge, |
|
491 | _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge, | |
492 | incompletediverge) |
|
492 | incompletediverge) | |
493 |
|
493 | |||
494 | renamedelete = {} |
|
494 | renamedelete = {} | |
495 | renamedeleteset = set() |
|
495 | renamedeleteset = set() | |
496 | divergeset = set() |
|
496 | divergeset = set() | |
497 | for of, fl in list(diverge.items()): |
|
497 | for of, fl in list(diverge.items()): | |
498 | if len(fl) == 1 or of in c1 or of in c2: |
|
498 | if len(fl) == 1 or of in c1 or of in c2: | |
499 | del diverge[of] # not actually divergent, or not a rename |
|
499 | del diverge[of] # not actually divergent, or not a rename | |
500 | if of not in c1 and of not in c2: |
|
500 | if of not in c1 and of not in c2: | |
501 | # renamed on one side, deleted on the other side, but filter |
|
501 | # renamed on one side, deleted on the other side, but filter | |
502 | # out files that have been renamed and then deleted |
|
502 | # out files that have been renamed and then deleted | |
503 | renamedelete[of] = [f for f in fl if f in c1 or f in c2] |
|
503 | renamedelete[of] = [f for f in fl if f in c1 or f in c2] | |
504 | renamedeleteset.update(fl) # reverse map for below |
|
504 | renamedeleteset.update(fl) # reverse map for below | |
505 | else: |
|
505 | else: | |
506 | divergeset.update(fl) # reverse map for below |
|
506 | divergeset.update(fl) # reverse map for below | |
507 |
|
507 | |||
508 | if bothnew: |
|
508 | if bothnew: | |
509 | repo.ui.debug(" unmatched files new in both:\n %s\n" |
|
509 | repo.ui.debug(" unmatched files new in both:\n %s\n" | |
510 | % "\n ".join(bothnew)) |
|
510 | % "\n ".join(bothnew)) | |
511 | bothdiverge = {} |
|
511 | bothdiverge = {} | |
512 | bothincompletediverge = {} |
|
512 | bothincompletediverge = {} | |
513 | remainder = {} |
|
513 | remainder = {} | |
514 | both1 = {'copy': {}, |
|
514 | both1 = {'copy': {}, | |
515 | 'fullcopy': {}, |
|
515 | 'fullcopy': {}, | |
516 | 'incomplete': {}, |
|
516 | 'incomplete': {}, | |
517 | 'diverge': bothdiverge, |
|
517 | 'diverge': bothdiverge, | |
518 | 'incompletediverge': bothincompletediverge |
|
518 | 'incompletediverge': bothincompletediverge | |
519 | } |
|
519 | } | |
520 | both2 = {'copy': {}, |
|
520 | both2 = {'copy': {}, | |
521 | 'fullcopy': {}, |
|
521 | 'fullcopy': {}, | |
522 | 'incomplete': {}, |
|
522 | 'incomplete': {}, | |
523 | 'diverge': bothdiverge, |
|
523 | 'diverge': bothdiverge, | |
524 | 'incompletediverge': bothincompletediverge |
|
524 | 'incompletediverge': bothincompletediverge | |
525 | } |
|
525 | } | |
526 | for f in bothnew: |
|
526 | for f in bothnew: | |
527 | _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1) |
|
527 | _checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1) | |
528 | _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2) |
|
528 | _checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2) | |
529 | if dirtyc1: |
|
529 | if dirtyc1: | |
530 | # incomplete copies may only be found on the "dirty" side for bothnew |
|
530 | # incomplete copies may only be found on the "dirty" side for bothnew | |
531 | assert not both2['incomplete'] |
|
531 | assert not both2['incomplete'] | |
532 | remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge, |
|
532 | remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge, | |
533 | bothincompletediverge) |
|
533 | bothincompletediverge) | |
534 | elif dirtyc2: |
|
534 | elif dirtyc2: | |
535 | assert not both1['incomplete'] |
|
535 | assert not both1['incomplete'] | |
536 | remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge, |
|
536 | remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge, | |
537 | bothincompletediverge) |
|
537 | bothincompletediverge) | |
538 | else: |
|
538 | else: | |
539 | # incomplete copies and divergences can't happen outside grafts |
|
539 | # incomplete copies and divergences can't happen outside grafts | |
540 | assert not both1['incomplete'] |
|
540 | assert not both1['incomplete'] | |
541 | assert not both2['incomplete'] |
|
541 | assert not both2['incomplete'] | |
542 | assert not bothincompletediverge |
|
542 | assert not bothincompletediverge | |
543 | for f in remainder: |
|
543 | for f in remainder: | |
544 | assert f not in bothdiverge |
|
544 | assert f not in bothdiverge | |
545 | ic = remainder[f] |
|
545 | ic = remainder[f] | |
546 | if ic[0] in (m1 if dirtyc1 else m2): |
|
546 | if ic[0] in (m1 if dirtyc1 else m2): | |
547 | # backed-out rename on one side, but watch out for deleted files |
|
547 | # backed-out rename on one side, but watch out for deleted files | |
548 | bothdiverge[f] = ic |
|
548 | bothdiverge[f] = ic | |
549 | for of, fl in bothdiverge.items(): |
|
549 | for of, fl in bothdiverge.items(): | |
550 | if len(fl) == 2 and fl[0] == fl[1]: |
|
550 | if len(fl) == 2 and fl[0] == fl[1]: | |
551 | copy[fl[0]] = of # not actually divergent, just matching renames |
|
551 | copy[fl[0]] = of # not actually divergent, just matching renames | |
552 |
|
552 | |||
553 | if fullcopy and repo.ui.debugflag: |
|
553 | if fullcopy and repo.ui.debugflag: | |
554 | repo.ui.debug(" all copies found (* = to merge, ! = divergent, " |
|
554 | repo.ui.debug(" all copies found (* = to merge, ! = divergent, " | |
555 | "% = renamed and deleted):\n") |
|
555 | "% = renamed and deleted):\n") | |
556 | for f in sorted(fullcopy): |
|
556 | for f in sorted(fullcopy): | |
557 | note = "" |
|
557 | note = "" | |
558 | if f in copy: |
|
558 | if f in copy: | |
559 | note += "*" |
|
559 | note += "*" | |
560 | if f in divergeset: |
|
560 | if f in divergeset: | |
561 | note += "!" |
|
561 | note += "!" | |
562 | if f in renamedeleteset: |
|
562 | if f in renamedeleteset: | |
563 | note += "%" |
|
563 | note += "%" | |
564 | repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, |
|
564 | repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, | |
565 | note)) |
|
565 | note)) | |
566 | del divergeset |
|
566 | del divergeset | |
567 |
|
567 | |||
568 | if not fullcopy: |
|
568 | if not fullcopy: | |
569 | return copy, {}, diverge, renamedelete, {} |
|
569 | return copy, {}, diverge, renamedelete, {} | |
570 |
|
570 | |||
571 | repo.ui.debug(" checking for directory renames\n") |
|
571 | repo.ui.debug(" checking for directory renames\n") | |
572 |
|
572 | |||
573 | # generate a directory move map |
|
573 | # generate a directory move map | |
574 | d1, d2 = c1.dirs(), c2.dirs() |
|
574 | d1, d2 = c1.dirs(), c2.dirs() | |
575 | # Hack for adding '', which is not otherwise added, to d1 and d2 |
|
575 | # Hack for adding '', which is not otherwise added, to d1 and d2 | |
576 | d1.addpath('/') |
|
576 | d1.addpath('/') | |
577 | d2.addpath('/') |
|
577 | d2.addpath('/') | |
578 | invalid = set() |
|
578 | invalid = set() | |
579 | dirmove = {} |
|
579 | dirmove = {} | |
580 |
|
580 | |||
581 | # examine each file copy for a potential directory move, which is |
|
581 | # examine each file copy for a potential directory move, which is | |
582 | # when all the files in a directory are moved to a new directory |
|
582 | # when all the files in a directory are moved to a new directory | |
583 | for dst, src in fullcopy.iteritems(): |
|
583 | for dst, src in fullcopy.iteritems(): | |
584 | dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst) |
|
584 | dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst) | |
585 | if dsrc in invalid: |
|
585 | if dsrc in invalid: | |
586 | # already seen to be uninteresting |
|
586 | # already seen to be uninteresting | |
587 | continue |
|
587 | continue | |
588 | elif dsrc in d1 and ddst in d1: |
|
588 | elif dsrc in d1 and ddst in d1: | |
589 | # directory wasn't entirely moved locally |
|
589 | # directory wasn't entirely moved locally | |
590 | invalid.add(dsrc + "/") |
|
590 | invalid.add(dsrc + "/") | |
591 | elif dsrc in d2 and ddst in d2: |
|
591 | elif dsrc in d2 and ddst in d2: | |
592 | # directory wasn't entirely moved remotely |
|
592 | # directory wasn't entirely moved remotely | |
593 | invalid.add(dsrc + "/") |
|
593 | invalid.add(dsrc + "/") | |
594 | elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/": |
|
594 | elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/": | |
595 | # files from the same directory moved to two different places |
|
595 | # files from the same directory moved to two different places | |
596 | invalid.add(dsrc + "/") |
|
596 | invalid.add(dsrc + "/") | |
597 | else: |
|
597 | else: | |
598 | # looks good so far |
|
598 | # looks good so far | |
599 | dirmove[dsrc + "/"] = ddst + "/" |
|
599 | dirmove[dsrc + "/"] = ddst + "/" | |
600 |
|
600 | |||
601 | for i in invalid: |
|
601 | for i in invalid: | |
602 | if i in dirmove: |
|
602 | if i in dirmove: | |
603 | del dirmove[i] |
|
603 | del dirmove[i] | |
604 | del d1, d2, invalid |
|
604 | del d1, d2, invalid | |
605 |
|
605 | |||
606 | if not dirmove: |
|
606 | if not dirmove: | |
607 | return copy, {}, diverge, renamedelete, {} |
|
607 | return copy, {}, diverge, renamedelete, {} | |
608 |
|
608 | |||
609 | for d in dirmove: |
|
609 | for d in dirmove: | |
610 | repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" % |
|
610 | repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" % | |
611 | (d, dirmove[d])) |
|
611 | (d, dirmove[d])) | |
612 |
|
612 | |||
613 | movewithdir = {} |
|
613 | movewithdir = {} | |
614 | # check unaccounted nonoverlapping files against directory moves |
|
614 | # check unaccounted nonoverlapping files against directory moves | |
615 | for f in u1r + u2r: |
|
615 | for f in u1r + u2r: | |
616 | if f not in fullcopy: |
|
616 | if f not in fullcopy: | |
617 | for d in dirmove: |
|
617 | for d in dirmove: | |
618 | if f.startswith(d): |
|
618 | if f.startswith(d): | |
619 | # new file added in a directory that was moved, move it |
|
619 | # new file added in a directory that was moved, move it | |
620 | df = dirmove[d] + f[len(d):] |
|
620 | df = dirmove[d] + f[len(d):] | |
621 | if df not in copy: |
|
621 | if df not in copy: | |
622 | movewithdir[f] = df |
|
622 | movewithdir[f] = df | |
623 | repo.ui.debug((" pending file src: '%s' -> " |
|
623 | repo.ui.debug((" pending file src: '%s' -> " | |
624 | "dst: '%s'\n") % (f, df)) |
|
624 | "dst: '%s'\n") % (f, df)) | |
625 | break |
|
625 | break | |
626 |
|
626 | |||
627 | return copy, movewithdir, diverge, renamedelete, dirmove |
|
627 | return copy, movewithdir, diverge, renamedelete, dirmove | |
628 |
|
628 | |||
629 | def _heuristicscopytracing(repo, c1, c2, base): |
|
629 | def _heuristicscopytracing(repo, c1, c2, base): | |
630 | """ Fast copytracing using filename heuristics |
|
630 | """ Fast copytracing using filename heuristics | |
631 |
|
631 | |||
632 | Assumes that moves or renames are of following two types: |
|
632 | Assumes that moves or renames are of following two types: | |
633 |
|
633 | |||
634 | 1) Inside a directory only (same directory name but different filenames) |
|
634 | 1) Inside a directory only (same directory name but different filenames) | |
635 | 2) Move from one directory to another |
|
635 | 2) Move from one directory to another | |
636 | (same filenames but different directory names) |
|
636 | (same filenames but different directory names) | |
637 |
|
637 | |||
638 | Works only when there are no merge commits in the "source branch". |
|
638 | Works only when there are no merge commits in the "source branch". | |
639 | Source branch is commits from base up to c2 not including base. |
|
639 | Source branch is commits from base up to c2 not including base. | |
640 |
|
640 | |||
641 | If merge is involved it fallbacks to _fullcopytracing(). |
|
641 | If merge is involved it fallbacks to _fullcopytracing(). | |
642 |
|
642 | |||
643 | Can be used by setting the following config: |
|
643 | Can be used by setting the following config: | |
644 |
|
644 | |||
645 | [experimental] |
|
645 | [experimental] | |
646 | copytrace = heuristics |
|
646 | copytrace = heuristics | |
647 | """ |
|
647 | """ | |
648 |
|
648 | |||
649 | if c1.rev() is None: |
|
649 | if c1.rev() is None: | |
650 | c1 = c1.p1() |
|
650 | c1 = c1.p1() | |
651 | if c2.rev() is None: |
|
651 | if c2.rev() is None: | |
652 | c2 = c2.p1() |
|
652 | c2 = c2.p1() | |
653 |
|
653 | |||
654 | copies = {} |
|
654 | copies = {} | |
655 |
|
655 | |||
656 | changedfiles = set() |
|
656 | changedfiles = set() | |
657 | m1 = c1.manifest() |
|
657 | m1 = c1.manifest() | |
658 | if not repo.revs('%d::%d', base.rev(), c2.rev()): |
|
658 | if not repo.revs('%d::%d', base.rev(), c2.rev()): | |
659 | # If base is not in c2 branch, we switch to fullcopytracing |
|
659 | # If base is not in c2 branch, we switch to fullcopytracing | |
660 | repo.ui.debug("switching to full copytracing as base is not " |
|
660 | repo.ui.debug("switching to full copytracing as base is not " | |
661 | "an ancestor of c2\n") |
|
661 | "an ancestor of c2\n") | |
662 | return _fullcopytracing(repo, c1, c2, base) |
|
662 | return _fullcopytracing(repo, c1, c2, base) | |
663 |
|
663 | |||
664 | ctx = c2 |
|
664 | ctx = c2 | |
665 | while ctx != base: |
|
665 | while ctx != base: | |
666 | if len(ctx.parents()) == 2: |
|
666 | if len(ctx.parents()) == 2: | |
667 | # To keep things simple let's not handle merges |
|
667 | # To keep things simple let's not handle merges | |
668 | repo.ui.debug("switching to full copytracing because of merges\n") |
|
668 | repo.ui.debug("switching to full copytracing because of merges\n") | |
669 | return _fullcopytracing(repo, c1, c2, base) |
|
669 | return _fullcopytracing(repo, c1, c2, base) | |
670 | changedfiles.update(ctx.files()) |
|
670 | changedfiles.update(ctx.files()) | |
671 | ctx = ctx.p1() |
|
671 | ctx = ctx.p1() | |
672 |
|
672 | |||
673 | cp = _forwardcopies(base, c2) |
|
673 | cp = _forwardcopies(base, c2) | |
674 | for dst, src in cp.iteritems(): |
|
674 | for dst, src in cp.iteritems(): | |
675 | if src in m1: |
|
675 | if src in m1: | |
676 | copies[dst] = src |
|
676 | copies[dst] = src | |
677 |
|
677 | |||
678 | # file is missing if it isn't present in the destination, but is present in |
|
678 | # file is missing if it isn't present in the destination, but is present in | |
679 | # the base and present in the source. |
|
679 | # the base and present in the source. | |
680 | # Presence in the base is important to exclude added files, presence in the |
|
680 | # Presence in the base is important to exclude added files, presence in the | |
681 | # source is important to exclude removed files. |
|
681 | # source is important to exclude removed files. | |
682 | missingfiles = filter(lambda f: f not in m1 and f in base and f in c2, |
|
682 | missingfiles = filter(lambda f: f not in m1 and f in base and f in c2, | |
683 | changedfiles) |
|
683 | changedfiles) | |
684 |
|
684 | |||
685 | if missingfiles: |
|
685 | if missingfiles: | |
686 | basenametofilename = collections.defaultdict(list) |
|
686 | basenametofilename = collections.defaultdict(list) | |
687 | dirnametofilename = collections.defaultdict(list) |
|
687 | dirnametofilename = collections.defaultdict(list) | |
688 |
|
688 | |||
689 | for f in m1.filesnotin(base.manifest()): |
|
689 | for f in m1.filesnotin(base.manifest()): | |
690 | basename = os.path.basename(f) |
|
690 | basename = os.path.basename(f) | |
691 | dirname = os.path.dirname(f) |
|
691 | dirname = os.path.dirname(f) | |
692 | basenametofilename[basename].append(f) |
|
692 | basenametofilename[basename].append(f) | |
693 | dirnametofilename[dirname].append(f) |
|
693 | dirnametofilename[dirname].append(f) | |
694 |
|
694 | |||
695 | # in case of a rebase/graft, base may not be a common ancestor |
|
695 | # in case of a rebase/graft, base may not be a common ancestor | |
696 | anc = c1.ancestor(c2) |
|
696 | anc = c1.ancestor(c2) | |
697 |
|
697 | |||
698 | for f in missingfiles: |
|
698 | for f in missingfiles: | |
699 | basename = os.path.basename(f) |
|
699 | basename = os.path.basename(f) | |
700 | dirname = os.path.dirname(f) |
|
700 | dirname = os.path.dirname(f) | |
701 | samebasename = basenametofilename[basename] |
|
701 | samebasename = basenametofilename[basename] | |
702 | samedirname = dirnametofilename[dirname] |
|
702 | samedirname = dirnametofilename[dirname] | |
703 | movecandidates = samebasename + samedirname |
|
703 | movecandidates = samebasename + samedirname | |
704 | # f is guaranteed to be present in c2, that's why |
|
704 | # f is guaranteed to be present in c2, that's why | |
705 | # c2.filectx(f) won't fail |
|
705 | # c2.filectx(f) won't fail | |
706 | f2 = c2.filectx(f) |
|
706 | f2 = c2.filectx(f) | |
707 | for candidate in movecandidates: |
|
707 | for candidate in movecandidates: | |
708 | f1 = c1.filectx(candidate) |
|
708 | f1 = c1.filectx(candidate) | |
709 | if _related(f1, f2, anc.rev()): |
|
709 | if _related(f1, f2, anc.rev()): | |
710 | # if there are a few related copies then we'll merge |
|
710 | # if there are a few related copies then we'll merge | |
711 | # changes into all of them. This matches the behaviour |
|
711 | # changes into all of them. This matches the behaviour | |
712 | # of upstream copytracing |
|
712 | # of upstream copytracing | |
713 | copies[candidate] = f |
|
713 | copies[candidate] = f | |
714 |
|
714 | |||
715 | return copies, {}, {}, {}, {} |
|
715 | return copies, {}, {}, {}, {} | |
716 |
|
716 | |||
717 | def _related(f1, f2, limit): |
|
717 | def _related(f1, f2, limit): | |
718 | """return True if f1 and f2 filectx have a common ancestor |
|
718 | """return True if f1 and f2 filectx have a common ancestor | |
719 |
|
719 | |||
720 | Walk back to common ancestor to see if the two files originate |
|
720 | Walk back to common ancestor to see if the two files originate | |
721 | from the same file. Since workingfilectx's rev() is None it messes |
|
721 | from the same file. Since workingfilectx's rev() is None it messes | |
722 | up the integer comparison logic, hence the pre-step check for |
|
722 | up the integer comparison logic, hence the pre-step check for | |
723 | None (f1 and f2 can only be workingfilectx's initially). |
|
723 | None (f1 and f2 can only be workingfilectx's initially). | |
724 | """ |
|
724 | """ | |
725 |
|
725 | |||
726 | if f1 == f2: |
|
726 | if f1 == f2: | |
727 | return f1 # a match |
|
727 | return f1 # a match | |
728 |
|
728 | |||
729 | g1, g2 = f1.ancestors(), f2.ancestors() |
|
729 | g1, g2 = f1.ancestors(), f2.ancestors() | |
730 | try: |
|
730 | try: | |
731 | f1r, f2r = f1.linkrev(), f2.linkrev() |
|
731 | f1r, f2r = f1.linkrev(), f2.linkrev() | |
732 |
|
732 | |||
733 | if f1r is None: |
|
733 | if f1r is None: | |
734 | f1 = next(g1) |
|
734 | f1 = next(g1) | |
735 | if f2r is None: |
|
735 | if f2r is None: | |
736 | f2 = next(g2) |
|
736 | f2 = next(g2) | |
737 |
|
737 | |||
738 | while True: |
|
738 | while True: | |
739 | f1r, f2r = f1.linkrev(), f2.linkrev() |
|
739 | f1r, f2r = f1.linkrev(), f2.linkrev() | |
740 | if f1r > f2r: |
|
740 | if f1r > f2r: | |
741 | f1 = next(g1) |
|
741 | f1 = next(g1) | |
742 | elif f2r > f1r: |
|
742 | elif f2r > f1r: | |
743 | f2 = next(g2) |
|
743 | f2 = next(g2) | |
744 | elif f1 == f2: |
|
744 | elif f1 == f2: | |
745 | return f1 # a match |
|
745 | return f1 # a match | |
746 | elif f1r == f2r or f1r < limit or f2r < limit: |
|
746 | elif f1r == f2r or f1r < limit or f2r < limit: | |
747 | return False # copy no longer relevant |
|
747 | return False # copy no longer relevant | |
748 | except StopIteration: |
|
748 | except StopIteration: | |
749 | return False |
|
749 | return False | |
750 |
|
750 | |||
751 | def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data): |
|
751 | def _checkcopies(srcctx, dstctx, f, base, tca, remotebase, limit, data): | |
752 | """ |
|
752 | """ | |
753 | check possible copies of f from msrc to mdst |
|
753 | check possible copies of f from msrc to mdst | |
754 |
|
754 | |||
755 | srcctx = starting context for f in msrc |
|
755 | srcctx = starting context for f in msrc | |
756 | dstctx = destination context for f in mdst |
|
756 | dstctx = destination context for f in mdst | |
757 | f = the filename to check (as in msrc) |
|
757 | f = the filename to check (as in msrc) | |
758 | base = the changectx used as a merge base |
|
758 | base = the changectx used as a merge base | |
759 | tca = topological common ancestor for graft-like scenarios |
|
759 | tca = topological common ancestor for graft-like scenarios | |
760 | remotebase = True if base is outside tca::srcctx, False otherwise |
|
760 | remotebase = True if base is outside tca::srcctx, False otherwise | |
761 | limit = the rev number to not search beyond |
|
761 | limit = the rev number to not search beyond | |
762 | data = dictionary of dictionary to store copy data. (see mergecopies) |
|
762 | data = dictionary of dictionary to store copy data. (see mergecopies) | |
763 |
|
763 | |||
764 | note: limit is only an optimization, and provides no guarantee that |
|
764 | note: limit is only an optimization, and provides no guarantee that | |
765 | irrelevant revisions will not be visited |
|
765 | irrelevant revisions will not be visited | |
766 | there is no easy way to make this algorithm stop in a guaranteed way |
|
766 | there is no easy way to make this algorithm stop in a guaranteed way | |
767 | once it "goes behind a certain revision". |
|
767 | once it "goes behind a certain revision". | |
768 | """ |
|
768 | """ | |
769 |
|
769 | |||
770 | msrc = srcctx.manifest() |
|
770 | msrc = srcctx.manifest() | |
771 | mdst = dstctx.manifest() |
|
771 | mdst = dstctx.manifest() | |
772 | mb = base.manifest() |
|
772 | mb = base.manifest() | |
773 | mta = tca.manifest() |
|
773 | mta = tca.manifest() | |
774 | # Might be true if this call is about finding backward renames, |
|
774 | # Might be true if this call is about finding backward renames, | |
775 | # This happens in the case of grafts because the DAG is then rotated. |
|
775 | # This happens in the case of grafts because the DAG is then rotated. | |
776 | # If the file exists in both the base and the source, we are not looking |
|
776 | # If the file exists in both the base and the source, we are not looking | |
777 | # for a rename on the source side, but on the part of the DAG that is |
|
777 | # for a rename on the source side, but on the part of the DAG that is | |
778 | # traversed backwards. |
|
778 | # traversed backwards. | |
779 | # |
|
779 | # | |
780 | # In the case there is both backward and forward renames (before and after |
|
780 | # In the case there is both backward and forward renames (before and after | |
781 | # the base) this is more complicated as we must detect a divergence. |
|
781 | # the base) this is more complicated as we must detect a divergence. | |
782 | # We use 'backwards = False' in that case. |
|
782 | # We use 'backwards = False' in that case. | |
783 | backwards = not remotebase and base != tca and f in mb |
|
783 | backwards = not remotebase and base != tca and f in mb | |
784 | getsrcfctx = _makegetfctx(srcctx) |
|
784 | getsrcfctx = _makegetfctx(srcctx) | |
785 | getdstfctx = _makegetfctx(dstctx) |
|
785 | getdstfctx = _makegetfctx(dstctx) | |
786 |
|
786 | |||
787 | if msrc[f] == mb.get(f) and not remotebase: |
|
787 | if msrc[f] == mb.get(f) and not remotebase: | |
788 | # Nothing to merge |
|
788 | # Nothing to merge | |
789 | return |
|
789 | return | |
790 |
|
790 | |||
791 | of = None |
|
791 | of = None | |
792 | seen = {f} |
|
792 | seen = {f} | |
793 | for oc in getsrcfctx(f, msrc[f]).ancestors(): |
|
793 | for oc in getsrcfctx(f, msrc[f]).ancestors(): | |
794 | ocr = oc.linkrev() |
|
794 | ocr = oc.linkrev() | |
795 | of = oc.path() |
|
795 | of = oc.path() | |
796 | if of in seen: |
|
796 | if of in seen: | |
797 | # check limit late - grab last rename before |
|
797 | # check limit late - grab last rename before | |
798 | if ocr < limit: |
|
798 | if ocr < limit: | |
799 | break |
|
799 | break | |
800 | continue |
|
800 | continue | |
801 | seen.add(of) |
|
801 | seen.add(of) | |
802 |
|
802 | |||
803 | # remember for dir rename detection |
|
803 | # remember for dir rename detection | |
804 | if backwards: |
|
804 | if backwards: | |
805 | data['fullcopy'][of] = f # grafting backwards through renames |
|
805 | data['fullcopy'][of] = f # grafting backwards through renames | |
806 | else: |
|
806 | else: | |
807 | data['fullcopy'][f] = of |
|
807 | data['fullcopy'][f] = of | |
808 | if of not in mdst: |
|
808 | if of not in mdst: | |
809 | continue # no match, keep looking |
|
809 | continue # no match, keep looking | |
810 | if mdst[of] == mb.get(of): |
|
810 | if mdst[of] == mb.get(of): | |
811 | return # no merge needed, quit early |
|
811 | return # no merge needed, quit early | |
812 | c2 = getdstfctx(of, mdst[of]) |
|
812 | c2 = getdstfctx(of, mdst[of]) | |
813 | # c2 might be a plain new file on added on destination side that is |
|
813 | # c2 might be a plain new file on added on destination side that is | |
814 | # unrelated to the droids we are looking for. |
|
814 | # unrelated to the droids we are looking for. | |
815 | cr = _related(oc, c2, tca.rev()) |
|
815 | cr = _related(oc, c2, tca.rev()) | |
816 | if cr and (of == f or of == c2.path()): # non-divergent |
|
816 | if cr and (of == f or of == c2.path()): # non-divergent | |
817 | if backwards: |
|
817 | if backwards: | |
818 | data['copy'][of] = f |
|
818 | data['copy'][of] = f | |
819 | elif of in mb: |
|
819 | elif of in mb: | |
820 | data['copy'][f] = of |
|
820 | data['copy'][f] = of | |
821 | elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename |
|
821 | elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename | |
822 | data['copy'][of] = f |
|
822 | data['copy'][of] = f | |
823 | del data['fullcopy'][f] |
|
823 | del data['fullcopy'][f] | |
824 | data['fullcopy'][of] = f |
|
824 | data['fullcopy'][of] = f | |
825 | else: # divergence w.r.t. graft CA on one side of topological CA |
|
825 | else: # divergence w.r.t. graft CA on one side of topological CA | |
826 | for sf in seen: |
|
826 | for sf in seen: | |
827 | if sf in mb: |
|
827 | if sf in mb: | |
828 | assert sf not in data['diverge'] |
|
828 | assert sf not in data['diverge'] | |
829 | data['diverge'][sf] = [f, of] |
|
829 | data['diverge'][sf] = [f, of] | |
830 | break |
|
830 | break | |
831 | return |
|
831 | return | |
832 |
|
832 | |||
833 | if of in mta: |
|
833 | if of in mta: | |
834 | if backwards or remotebase: |
|
834 | if backwards or remotebase: | |
835 | data['incomplete'][of] = f |
|
835 | data['incomplete'][of] = f | |
836 | else: |
|
836 | else: | |
837 | for sf in seen: |
|
837 | for sf in seen: | |
838 | if sf in mb: |
|
838 | if sf in mb: | |
839 | if tca == base: |
|
839 | if tca == base: | |
840 | data['diverge'].setdefault(sf, []).append(f) |
|
840 | data['diverge'].setdefault(sf, []).append(f) | |
841 | else: |
|
841 | else: | |
842 | data['incompletediverge'][sf] = [of, f] |
|
842 | data['incompletediverge'][sf] = [of, f] | |
843 | return |
|
843 | return | |
844 |
|
844 | |||
845 | def duplicatecopies(repo, rev, fromrev, skiprev=None): |
|
845 | def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None): | |
846 | '''reproduce copies from fromrev to rev in the dirstate |
|
846 | '''reproduce copies from fromrev to rev in the dirstate | |
847 |
|
847 | |||
848 | If skiprev is specified, it's a revision that should be used to |
|
848 | If skiprev is specified, it's a revision that should be used to | |
849 | filter copy records. Any copies that occur between fromrev and |
|
849 | filter copy records. Any copies that occur between fromrev and | |
850 | skiprev will not be duplicated, even if they appear in the set of |
|
850 | skiprev will not be duplicated, even if they appear in the set of | |
851 | copies between fromrev and rev. |
|
851 | copies between fromrev and rev. | |
852 | ''' |
|
852 | ''' | |
853 | exclude = {} |
|
853 | exclude = {} | |
854 | if (skiprev is not None and |
|
854 | if (skiprev is not None and | |
855 | repo.ui.config('experimental', 'copytrace') != 'off'): |
|
855 | repo.ui.config('experimental', 'copytrace') != 'off'): | |
856 | # copytrace='off' skips this line, but not the entire function because |
|
856 | # copytrace='off' skips this line, but not the entire function because | |
857 | # the line below is O(size of the repo) during a rebase, while the rest |
|
857 | # the line below is O(size of the repo) during a rebase, while the rest | |
858 | # of the function is much faster (and is required for carrying copy |
|
858 | # of the function is much faster (and is required for carrying copy | |
859 | # metadata across the rebase anyway). |
|
859 | # metadata across the rebase anyway). | |
860 | exclude = pathcopies(repo[fromrev], repo[skiprev]) |
|
860 | exclude = pathcopies(repo[fromrev], repo[skiprev]) | |
861 | for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems(): |
|
861 | for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems(): | |
862 | # copies.pathcopies returns backward renames, so dst might not |
|
862 | # copies.pathcopies returns backward renames, so dst might not | |
863 | # actually be in the dirstate |
|
863 | # actually be in the dirstate | |
864 | if dst in exclude: |
|
864 | if dst in exclude: | |
865 | continue |
|
865 | continue | |
866 | if repo.dirstate[dst] in "nma": |
|
866 | wctx[dst].markcopied(src) | |
867 | repo.dirstate.copy(src, dst) |
|
@@ -1,2005 +1,2005 b'' | |||||
1 | # merge.py - directory-level update/merge handling for Mercurial |
|
1 | # merge.py - directory-level update/merge handling for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import hashlib |
|
11 | import hashlib | |
12 | import shutil |
|
12 | import shutil | |
13 | import struct |
|
13 | import struct | |
14 |
|
14 | |||
15 | from .i18n import _ |
|
15 | from .i18n import _ | |
16 | from .node import ( |
|
16 | from .node import ( | |
17 | addednodeid, |
|
17 | addednodeid, | |
18 | bin, |
|
18 | bin, | |
19 | hex, |
|
19 | hex, | |
20 | modifiednodeid, |
|
20 | modifiednodeid, | |
21 | nullhex, |
|
21 | nullhex, | |
22 | nullid, |
|
22 | nullid, | |
23 | nullrev, |
|
23 | nullrev, | |
24 | ) |
|
24 | ) | |
25 | from . import ( |
|
25 | from . import ( | |
26 | copies, |
|
26 | copies, | |
27 | error, |
|
27 | error, | |
28 | filemerge, |
|
28 | filemerge, | |
29 | match as matchmod, |
|
29 | match as matchmod, | |
30 | obsutil, |
|
30 | obsutil, | |
31 | pycompat, |
|
31 | pycompat, | |
32 | scmutil, |
|
32 | scmutil, | |
33 | subrepo, |
|
33 | subrepo, | |
34 | util, |
|
34 | util, | |
35 | worker, |
|
35 | worker, | |
36 | ) |
|
36 | ) | |
37 |
|
37 | |||
38 | _pack = struct.pack |
|
38 | _pack = struct.pack | |
39 | _unpack = struct.unpack |
|
39 | _unpack = struct.unpack | |
40 |
|
40 | |||
41 | def _droponode(data): |
|
41 | def _droponode(data): | |
42 | # used for compatibility for v1 |
|
42 | # used for compatibility for v1 | |
43 | bits = data.split('\0') |
|
43 | bits = data.split('\0') | |
44 | bits = bits[:-2] + bits[-1:] |
|
44 | bits = bits[:-2] + bits[-1:] | |
45 | return '\0'.join(bits) |
|
45 | return '\0'.join(bits) | |
46 |
|
46 | |||
47 | class mergestate(object): |
|
47 | class mergestate(object): | |
48 | '''track 3-way merge state of individual files |
|
48 | '''track 3-way merge state of individual files | |
49 |
|
49 | |||
50 | The merge state is stored on disk when needed. Two files are used: one with |
|
50 | The merge state is stored on disk when needed. Two files are used: one with | |
51 | an old format (version 1), and one with a new format (version 2). Version 2 |
|
51 | an old format (version 1), and one with a new format (version 2). Version 2 | |
52 | stores a superset of the data in version 1, including new kinds of records |
|
52 | stores a superset of the data in version 1, including new kinds of records | |
53 | in the future. For more about the new format, see the documentation for |
|
53 | in the future. For more about the new format, see the documentation for | |
54 | `_readrecordsv2`. |
|
54 | `_readrecordsv2`. | |
55 |
|
55 | |||
56 | Each record can contain arbitrary content, and has an associated type. This |
|
56 | Each record can contain arbitrary content, and has an associated type. This | |
57 | `type` should be a letter. If `type` is uppercase, the record is mandatory: |
|
57 | `type` should be a letter. If `type` is uppercase, the record is mandatory: | |
58 | versions of Mercurial that don't support it should abort. If `type` is |
|
58 | versions of Mercurial that don't support it should abort. If `type` is | |
59 | lowercase, the record can be safely ignored. |
|
59 | lowercase, the record can be safely ignored. | |
60 |
|
60 | |||
61 | Currently known records: |
|
61 | Currently known records: | |
62 |
|
62 | |||
63 | L: the node of the "local" part of the merge (hexified version) |
|
63 | L: the node of the "local" part of the merge (hexified version) | |
64 | O: the node of the "other" part of the merge (hexified version) |
|
64 | O: the node of the "other" part of the merge (hexified version) | |
65 | F: a file to be merged entry |
|
65 | F: a file to be merged entry | |
66 | C: a change/delete or delete/change conflict |
|
66 | C: a change/delete or delete/change conflict | |
67 | D: a file that the external merge driver will merge internally |
|
67 | D: a file that the external merge driver will merge internally | |
68 | (experimental) |
|
68 | (experimental) | |
69 | P: a path conflict (file vs directory) |
|
69 | P: a path conflict (file vs directory) | |
70 | m: the external merge driver defined for this merge plus its run state |
|
70 | m: the external merge driver defined for this merge plus its run state | |
71 | (experimental) |
|
71 | (experimental) | |
72 | f: a (filename, dictionary) tuple of optional values for a given file |
|
72 | f: a (filename, dictionary) tuple of optional values for a given file | |
73 | X: unsupported mandatory record type (used in tests) |
|
73 | X: unsupported mandatory record type (used in tests) | |
74 | x: unsupported advisory record type (used in tests) |
|
74 | x: unsupported advisory record type (used in tests) | |
75 | l: the labels for the parts of the merge. |
|
75 | l: the labels for the parts of the merge. | |
76 |
|
76 | |||
77 | Merge driver run states (experimental): |
|
77 | Merge driver run states (experimental): | |
78 | u: driver-resolved files unmarked -- needs to be run next time we're about |
|
78 | u: driver-resolved files unmarked -- needs to be run next time we're about | |
79 | to resolve or commit |
|
79 | to resolve or commit | |
80 | m: driver-resolved files marked -- only needs to be run before commit |
|
80 | m: driver-resolved files marked -- only needs to be run before commit | |
81 | s: success/skipped -- does not need to be run any more |
|
81 | s: success/skipped -- does not need to be run any more | |
82 |
|
82 | |||
83 | Merge record states (stored in self._state, indexed by filename): |
|
83 | Merge record states (stored in self._state, indexed by filename): | |
84 | u: unresolved conflict |
|
84 | u: unresolved conflict | |
85 | r: resolved conflict |
|
85 | r: resolved conflict | |
86 | pu: unresolved path conflict (file conflicts with directory) |
|
86 | pu: unresolved path conflict (file conflicts with directory) | |
87 | pr: resolved path conflict |
|
87 | pr: resolved path conflict | |
88 | d: driver-resolved conflict |
|
88 | d: driver-resolved conflict | |
89 |
|
89 | |||
90 | The resolve command transitions between 'u' and 'r' for conflicts and |
|
90 | The resolve command transitions between 'u' and 'r' for conflicts and | |
91 | 'pu' and 'pr' for path conflicts. |
|
91 | 'pu' and 'pr' for path conflicts. | |
92 | ''' |
|
92 | ''' | |
93 | statepathv1 = 'merge/state' |
|
93 | statepathv1 = 'merge/state' | |
94 | statepathv2 = 'merge/state2' |
|
94 | statepathv2 = 'merge/state2' | |
95 |
|
95 | |||
96 | @staticmethod |
|
96 | @staticmethod | |
97 | def clean(repo, node=None, other=None, labels=None): |
|
97 | def clean(repo, node=None, other=None, labels=None): | |
98 | """Initialize a brand new merge state, removing any existing state on |
|
98 | """Initialize a brand new merge state, removing any existing state on | |
99 | disk.""" |
|
99 | disk.""" | |
100 | ms = mergestate(repo) |
|
100 | ms = mergestate(repo) | |
101 | ms.reset(node, other, labels) |
|
101 | ms.reset(node, other, labels) | |
102 | return ms |
|
102 | return ms | |
103 |
|
103 | |||
104 | @staticmethod |
|
104 | @staticmethod | |
105 | def read(repo): |
|
105 | def read(repo): | |
106 | """Initialize the merge state, reading it from disk.""" |
|
106 | """Initialize the merge state, reading it from disk.""" | |
107 | ms = mergestate(repo) |
|
107 | ms = mergestate(repo) | |
108 | ms._read() |
|
108 | ms._read() | |
109 | return ms |
|
109 | return ms | |
110 |
|
110 | |||
111 | def __init__(self, repo): |
|
111 | def __init__(self, repo): | |
112 | """Initialize the merge state. |
|
112 | """Initialize the merge state. | |
113 |
|
113 | |||
114 | Do not use this directly! Instead call read() or clean().""" |
|
114 | Do not use this directly! Instead call read() or clean().""" | |
115 | self._repo = repo |
|
115 | self._repo = repo | |
116 | self._dirty = False |
|
116 | self._dirty = False | |
117 | self._labels = None |
|
117 | self._labels = None | |
118 |
|
118 | |||
119 | def reset(self, node=None, other=None, labels=None): |
|
119 | def reset(self, node=None, other=None, labels=None): | |
120 | self._state = {} |
|
120 | self._state = {} | |
121 | self._stateextras = {} |
|
121 | self._stateextras = {} | |
122 | self._local = None |
|
122 | self._local = None | |
123 | self._other = None |
|
123 | self._other = None | |
124 | self._labels = labels |
|
124 | self._labels = labels | |
125 | for var in ('localctx', 'otherctx'): |
|
125 | for var in ('localctx', 'otherctx'): | |
126 | if var in vars(self): |
|
126 | if var in vars(self): | |
127 | delattr(self, var) |
|
127 | delattr(self, var) | |
128 | if node: |
|
128 | if node: | |
129 | self._local = node |
|
129 | self._local = node | |
130 | self._other = other |
|
130 | self._other = other | |
131 | self._readmergedriver = None |
|
131 | self._readmergedriver = None | |
132 | if self.mergedriver: |
|
132 | if self.mergedriver: | |
133 | self._mdstate = 's' |
|
133 | self._mdstate = 's' | |
134 | else: |
|
134 | else: | |
135 | self._mdstate = 'u' |
|
135 | self._mdstate = 'u' | |
136 | shutil.rmtree(self._repo.vfs.join('merge'), True) |
|
136 | shutil.rmtree(self._repo.vfs.join('merge'), True) | |
137 | self._results = {} |
|
137 | self._results = {} | |
138 | self._dirty = False |
|
138 | self._dirty = False | |
139 |
|
139 | |||
140 | def _read(self): |
|
140 | def _read(self): | |
141 | """Analyse each record content to restore a serialized state from disk |
|
141 | """Analyse each record content to restore a serialized state from disk | |
142 |
|
142 | |||
143 | This function process "record" entry produced by the de-serialization |
|
143 | This function process "record" entry produced by the de-serialization | |
144 | of on disk file. |
|
144 | of on disk file. | |
145 | """ |
|
145 | """ | |
146 | self._state = {} |
|
146 | self._state = {} | |
147 | self._stateextras = {} |
|
147 | self._stateextras = {} | |
148 | self._local = None |
|
148 | self._local = None | |
149 | self._other = None |
|
149 | self._other = None | |
150 | for var in ('localctx', 'otherctx'): |
|
150 | for var in ('localctx', 'otherctx'): | |
151 | if var in vars(self): |
|
151 | if var in vars(self): | |
152 | delattr(self, var) |
|
152 | delattr(self, var) | |
153 | self._readmergedriver = None |
|
153 | self._readmergedriver = None | |
154 | self._mdstate = 's' |
|
154 | self._mdstate = 's' | |
155 | unsupported = set() |
|
155 | unsupported = set() | |
156 | records = self._readrecords() |
|
156 | records = self._readrecords() | |
157 | for rtype, record in records: |
|
157 | for rtype, record in records: | |
158 | if rtype == 'L': |
|
158 | if rtype == 'L': | |
159 | self._local = bin(record) |
|
159 | self._local = bin(record) | |
160 | elif rtype == 'O': |
|
160 | elif rtype == 'O': | |
161 | self._other = bin(record) |
|
161 | self._other = bin(record) | |
162 | elif rtype == 'm': |
|
162 | elif rtype == 'm': | |
163 | bits = record.split('\0', 1) |
|
163 | bits = record.split('\0', 1) | |
164 | mdstate = bits[1] |
|
164 | mdstate = bits[1] | |
165 | if len(mdstate) != 1 or mdstate not in 'ums': |
|
165 | if len(mdstate) != 1 or mdstate not in 'ums': | |
166 | # the merge driver should be idempotent, so just rerun it |
|
166 | # the merge driver should be idempotent, so just rerun it | |
167 | mdstate = 'u' |
|
167 | mdstate = 'u' | |
168 |
|
168 | |||
169 | self._readmergedriver = bits[0] |
|
169 | self._readmergedriver = bits[0] | |
170 | self._mdstate = mdstate |
|
170 | self._mdstate = mdstate | |
171 | elif rtype in 'FDCP': |
|
171 | elif rtype in 'FDCP': | |
172 | bits = record.split('\0') |
|
172 | bits = record.split('\0') | |
173 | self._state[bits[0]] = bits[1:] |
|
173 | self._state[bits[0]] = bits[1:] | |
174 | elif rtype == 'f': |
|
174 | elif rtype == 'f': | |
175 | filename, rawextras = record.split('\0', 1) |
|
175 | filename, rawextras = record.split('\0', 1) | |
176 | extraparts = rawextras.split('\0') |
|
176 | extraparts = rawextras.split('\0') | |
177 | extras = {} |
|
177 | extras = {} | |
178 | i = 0 |
|
178 | i = 0 | |
179 | while i < len(extraparts): |
|
179 | while i < len(extraparts): | |
180 | extras[extraparts[i]] = extraparts[i + 1] |
|
180 | extras[extraparts[i]] = extraparts[i + 1] | |
181 | i += 2 |
|
181 | i += 2 | |
182 |
|
182 | |||
183 | self._stateextras[filename] = extras |
|
183 | self._stateextras[filename] = extras | |
184 | elif rtype == 'l': |
|
184 | elif rtype == 'l': | |
185 | labels = record.split('\0', 2) |
|
185 | labels = record.split('\0', 2) | |
186 | self._labels = [l for l in labels if len(l) > 0] |
|
186 | self._labels = [l for l in labels if len(l) > 0] | |
187 | elif not rtype.islower(): |
|
187 | elif not rtype.islower(): | |
188 | unsupported.add(rtype) |
|
188 | unsupported.add(rtype) | |
189 | self._results = {} |
|
189 | self._results = {} | |
190 | self._dirty = False |
|
190 | self._dirty = False | |
191 |
|
191 | |||
192 | if unsupported: |
|
192 | if unsupported: | |
193 | raise error.UnsupportedMergeRecords(unsupported) |
|
193 | raise error.UnsupportedMergeRecords(unsupported) | |
194 |
|
194 | |||
195 | def _readrecords(self): |
|
195 | def _readrecords(self): | |
196 | """Read merge state from disk and return a list of record (TYPE, data) |
|
196 | """Read merge state from disk and return a list of record (TYPE, data) | |
197 |
|
197 | |||
198 | We read data from both v1 and v2 files and decide which one to use. |
|
198 | We read data from both v1 and v2 files and decide which one to use. | |
199 |
|
199 | |||
200 | V1 has been used by version prior to 2.9.1 and contains less data than |
|
200 | V1 has been used by version prior to 2.9.1 and contains less data than | |
201 | v2. We read both versions and check if no data in v2 contradicts |
|
201 | v2. We read both versions and check if no data in v2 contradicts | |
202 | v1. If there is not contradiction we can safely assume that both v1 |
|
202 | v1. If there is not contradiction we can safely assume that both v1 | |
203 | and v2 were written at the same time and use the extract data in v2. If |
|
203 | and v2 were written at the same time and use the extract data in v2. If | |
204 | there is contradiction we ignore v2 content as we assume an old version |
|
204 | there is contradiction we ignore v2 content as we assume an old version | |
205 | of Mercurial has overwritten the mergestate file and left an old v2 |
|
205 | of Mercurial has overwritten the mergestate file and left an old v2 | |
206 | file around. |
|
206 | file around. | |
207 |
|
207 | |||
208 | returns list of record [(TYPE, data), ...]""" |
|
208 | returns list of record [(TYPE, data), ...]""" | |
209 | v1records = self._readrecordsv1() |
|
209 | v1records = self._readrecordsv1() | |
210 | v2records = self._readrecordsv2() |
|
210 | v2records = self._readrecordsv2() | |
211 | if self._v1v2match(v1records, v2records): |
|
211 | if self._v1v2match(v1records, v2records): | |
212 | return v2records |
|
212 | return v2records | |
213 | else: |
|
213 | else: | |
214 | # v1 file is newer than v2 file, use it |
|
214 | # v1 file is newer than v2 file, use it | |
215 | # we have to infer the "other" changeset of the merge |
|
215 | # we have to infer the "other" changeset of the merge | |
216 | # we cannot do better than that with v1 of the format |
|
216 | # we cannot do better than that with v1 of the format | |
217 | mctx = self._repo[None].parents()[-1] |
|
217 | mctx = self._repo[None].parents()[-1] | |
218 | v1records.append(('O', mctx.hex())) |
|
218 | v1records.append(('O', mctx.hex())) | |
219 | # add place holder "other" file node information |
|
219 | # add place holder "other" file node information | |
220 | # nobody is using it yet so we do no need to fetch the data |
|
220 | # nobody is using it yet so we do no need to fetch the data | |
221 | # if mctx was wrong `mctx[bits[-2]]` may fails. |
|
221 | # if mctx was wrong `mctx[bits[-2]]` may fails. | |
222 | for idx, r in enumerate(v1records): |
|
222 | for idx, r in enumerate(v1records): | |
223 | if r[0] == 'F': |
|
223 | if r[0] == 'F': | |
224 | bits = r[1].split('\0') |
|
224 | bits = r[1].split('\0') | |
225 | bits.insert(-2, '') |
|
225 | bits.insert(-2, '') | |
226 | v1records[idx] = (r[0], '\0'.join(bits)) |
|
226 | v1records[idx] = (r[0], '\0'.join(bits)) | |
227 | return v1records |
|
227 | return v1records | |
228 |
|
228 | |||
229 | def _v1v2match(self, v1records, v2records): |
|
229 | def _v1v2match(self, v1records, v2records): | |
230 | oldv2 = set() # old format version of v2 record |
|
230 | oldv2 = set() # old format version of v2 record | |
231 | for rec in v2records: |
|
231 | for rec in v2records: | |
232 | if rec[0] == 'L': |
|
232 | if rec[0] == 'L': | |
233 | oldv2.add(rec) |
|
233 | oldv2.add(rec) | |
234 | elif rec[0] == 'F': |
|
234 | elif rec[0] == 'F': | |
235 | # drop the onode data (not contained in v1) |
|
235 | # drop the onode data (not contained in v1) | |
236 | oldv2.add(('F', _droponode(rec[1]))) |
|
236 | oldv2.add(('F', _droponode(rec[1]))) | |
237 | for rec in v1records: |
|
237 | for rec in v1records: | |
238 | if rec not in oldv2: |
|
238 | if rec not in oldv2: | |
239 | return False |
|
239 | return False | |
240 | else: |
|
240 | else: | |
241 | return True |
|
241 | return True | |
242 |
|
242 | |||
243 | def _readrecordsv1(self): |
|
243 | def _readrecordsv1(self): | |
244 | """read on disk merge state for version 1 file |
|
244 | """read on disk merge state for version 1 file | |
245 |
|
245 | |||
246 | returns list of record [(TYPE, data), ...] |
|
246 | returns list of record [(TYPE, data), ...] | |
247 |
|
247 | |||
248 | Note: the "F" data from this file are one entry short |
|
248 | Note: the "F" data from this file are one entry short | |
249 | (no "other file node" entry) |
|
249 | (no "other file node" entry) | |
250 | """ |
|
250 | """ | |
251 | records = [] |
|
251 | records = [] | |
252 | try: |
|
252 | try: | |
253 | f = self._repo.vfs(self.statepathv1) |
|
253 | f = self._repo.vfs(self.statepathv1) | |
254 | for i, l in enumerate(f): |
|
254 | for i, l in enumerate(f): | |
255 | if i == 0: |
|
255 | if i == 0: | |
256 | records.append(('L', l[:-1])) |
|
256 | records.append(('L', l[:-1])) | |
257 | else: |
|
257 | else: | |
258 | records.append(('F', l[:-1])) |
|
258 | records.append(('F', l[:-1])) | |
259 | f.close() |
|
259 | f.close() | |
260 | except IOError as err: |
|
260 | except IOError as err: | |
261 | if err.errno != errno.ENOENT: |
|
261 | if err.errno != errno.ENOENT: | |
262 | raise |
|
262 | raise | |
263 | return records |
|
263 | return records | |
264 |
|
264 | |||
265 | def _readrecordsv2(self): |
|
265 | def _readrecordsv2(self): | |
266 | """read on disk merge state for version 2 file |
|
266 | """read on disk merge state for version 2 file | |
267 |
|
267 | |||
268 | This format is a list of arbitrary records of the form: |
|
268 | This format is a list of arbitrary records of the form: | |
269 |
|
269 | |||
270 | [type][length][content] |
|
270 | [type][length][content] | |
271 |
|
271 | |||
272 | `type` is a single character, `length` is a 4 byte integer, and |
|
272 | `type` is a single character, `length` is a 4 byte integer, and | |
273 | `content` is an arbitrary byte sequence of length `length`. |
|
273 | `content` is an arbitrary byte sequence of length `length`. | |
274 |
|
274 | |||
275 | Mercurial versions prior to 3.7 have a bug where if there are |
|
275 | Mercurial versions prior to 3.7 have a bug where if there are | |
276 | unsupported mandatory merge records, attempting to clear out the merge |
|
276 | unsupported mandatory merge records, attempting to clear out the merge | |
277 | state with hg update --clean or similar aborts. The 't' record type |
|
277 | state with hg update --clean or similar aborts. The 't' record type | |
278 | works around that by writing out what those versions treat as an |
|
278 | works around that by writing out what those versions treat as an | |
279 | advisory record, but later versions interpret as special: the first |
|
279 | advisory record, but later versions interpret as special: the first | |
280 | character is the 'real' record type and everything onwards is the data. |
|
280 | character is the 'real' record type and everything onwards is the data. | |
281 |
|
281 | |||
282 | Returns list of records [(TYPE, data), ...].""" |
|
282 | Returns list of records [(TYPE, data), ...].""" | |
283 | records = [] |
|
283 | records = [] | |
284 | try: |
|
284 | try: | |
285 | f = self._repo.vfs(self.statepathv2) |
|
285 | f = self._repo.vfs(self.statepathv2) | |
286 | data = f.read() |
|
286 | data = f.read() | |
287 | off = 0 |
|
287 | off = 0 | |
288 | end = len(data) |
|
288 | end = len(data) | |
289 | while off < end: |
|
289 | while off < end: | |
290 | rtype = data[off] |
|
290 | rtype = data[off] | |
291 | off += 1 |
|
291 | off += 1 | |
292 | length = _unpack('>I', data[off:(off + 4)])[0] |
|
292 | length = _unpack('>I', data[off:(off + 4)])[0] | |
293 | off += 4 |
|
293 | off += 4 | |
294 | record = data[off:(off + length)] |
|
294 | record = data[off:(off + length)] | |
295 | off += length |
|
295 | off += length | |
296 | if rtype == 't': |
|
296 | if rtype == 't': | |
297 | rtype, record = record[0], record[1:] |
|
297 | rtype, record = record[0], record[1:] | |
298 | records.append((rtype, record)) |
|
298 | records.append((rtype, record)) | |
299 | f.close() |
|
299 | f.close() | |
300 | except IOError as err: |
|
300 | except IOError as err: | |
301 | if err.errno != errno.ENOENT: |
|
301 | if err.errno != errno.ENOENT: | |
302 | raise |
|
302 | raise | |
303 | return records |
|
303 | return records | |
304 |
|
304 | |||
305 | @util.propertycache |
|
305 | @util.propertycache | |
306 | def mergedriver(self): |
|
306 | def mergedriver(self): | |
307 | # protect against the following: |
|
307 | # protect against the following: | |
308 | # - A configures a malicious merge driver in their hgrc, then |
|
308 | # - A configures a malicious merge driver in their hgrc, then | |
309 | # pauses the merge |
|
309 | # pauses the merge | |
310 | # - A edits their hgrc to remove references to the merge driver |
|
310 | # - A edits their hgrc to remove references to the merge driver | |
311 | # - A gives a copy of their entire repo, including .hg, to B |
|
311 | # - A gives a copy of their entire repo, including .hg, to B | |
312 | # - B inspects .hgrc and finds it to be clean |
|
312 | # - B inspects .hgrc and finds it to be clean | |
313 | # - B then continues the merge and the malicious merge driver |
|
313 | # - B then continues the merge and the malicious merge driver | |
314 | # gets invoked |
|
314 | # gets invoked | |
315 | configmergedriver = self._repo.ui.config('experimental', 'mergedriver') |
|
315 | configmergedriver = self._repo.ui.config('experimental', 'mergedriver') | |
316 | if (self._readmergedriver is not None |
|
316 | if (self._readmergedriver is not None | |
317 | and self._readmergedriver != configmergedriver): |
|
317 | and self._readmergedriver != configmergedriver): | |
318 | raise error.ConfigError( |
|
318 | raise error.ConfigError( | |
319 | _("merge driver changed since merge started"), |
|
319 | _("merge driver changed since merge started"), | |
320 | hint=_("revert merge driver change or abort merge")) |
|
320 | hint=_("revert merge driver change or abort merge")) | |
321 |
|
321 | |||
322 | return configmergedriver |
|
322 | return configmergedriver | |
323 |
|
323 | |||
324 | @util.propertycache |
|
324 | @util.propertycache | |
325 | def localctx(self): |
|
325 | def localctx(self): | |
326 | if self._local is None: |
|
326 | if self._local is None: | |
327 | msg = "localctx accessed but self._local isn't set" |
|
327 | msg = "localctx accessed but self._local isn't set" | |
328 | raise error.ProgrammingError(msg) |
|
328 | raise error.ProgrammingError(msg) | |
329 | return self._repo[self._local] |
|
329 | return self._repo[self._local] | |
330 |
|
330 | |||
331 | @util.propertycache |
|
331 | @util.propertycache | |
332 | def otherctx(self): |
|
332 | def otherctx(self): | |
333 | if self._other is None: |
|
333 | if self._other is None: | |
334 | msg = "otherctx accessed but self._other isn't set" |
|
334 | msg = "otherctx accessed but self._other isn't set" | |
335 | raise error.ProgrammingError(msg) |
|
335 | raise error.ProgrammingError(msg) | |
336 | return self._repo[self._other] |
|
336 | return self._repo[self._other] | |
337 |
|
337 | |||
338 | def active(self): |
|
338 | def active(self): | |
339 | """Whether mergestate is active. |
|
339 | """Whether mergestate is active. | |
340 |
|
340 | |||
341 | Returns True if there appears to be mergestate. This is a rough proxy |
|
341 | Returns True if there appears to be mergestate. This is a rough proxy | |
342 | for "is a merge in progress." |
|
342 | for "is a merge in progress." | |
343 | """ |
|
343 | """ | |
344 | # Check local variables before looking at filesystem for performance |
|
344 | # Check local variables before looking at filesystem for performance | |
345 | # reasons. |
|
345 | # reasons. | |
346 | return bool(self._local) or bool(self._state) or \ |
|
346 | return bool(self._local) or bool(self._state) or \ | |
347 | self._repo.vfs.exists(self.statepathv1) or \ |
|
347 | self._repo.vfs.exists(self.statepathv1) or \ | |
348 | self._repo.vfs.exists(self.statepathv2) |
|
348 | self._repo.vfs.exists(self.statepathv2) | |
349 |
|
349 | |||
350 | def commit(self): |
|
350 | def commit(self): | |
351 | """Write current state on disk (if necessary)""" |
|
351 | """Write current state on disk (if necessary)""" | |
352 | if self._dirty: |
|
352 | if self._dirty: | |
353 | records = self._makerecords() |
|
353 | records = self._makerecords() | |
354 | self._writerecords(records) |
|
354 | self._writerecords(records) | |
355 | self._dirty = False |
|
355 | self._dirty = False | |
356 |
|
356 | |||
357 | def _makerecords(self): |
|
357 | def _makerecords(self): | |
358 | records = [] |
|
358 | records = [] | |
359 | records.append(('L', hex(self._local))) |
|
359 | records.append(('L', hex(self._local))) | |
360 | records.append(('O', hex(self._other))) |
|
360 | records.append(('O', hex(self._other))) | |
361 | if self.mergedriver: |
|
361 | if self.mergedriver: | |
362 | records.append(('m', '\0'.join([ |
|
362 | records.append(('m', '\0'.join([ | |
363 | self.mergedriver, self._mdstate]))) |
|
363 | self.mergedriver, self._mdstate]))) | |
364 | # Write out state items. In all cases, the value of the state map entry |
|
364 | # Write out state items. In all cases, the value of the state map entry | |
365 | # is written as the contents of the record. The record type depends on |
|
365 | # is written as the contents of the record. The record type depends on | |
366 | # the type of state that is stored, and capital-letter records are used |
|
366 | # the type of state that is stored, and capital-letter records are used | |
367 | # to prevent older versions of Mercurial that do not support the feature |
|
367 | # to prevent older versions of Mercurial that do not support the feature | |
368 | # from loading them. |
|
368 | # from loading them. | |
369 | for filename, v in self._state.iteritems(): |
|
369 | for filename, v in self._state.iteritems(): | |
370 | if v[0] == 'd': |
|
370 | if v[0] == 'd': | |
371 | # Driver-resolved merge. These are stored in 'D' records. |
|
371 | # Driver-resolved merge. These are stored in 'D' records. | |
372 | records.append(('D', '\0'.join([filename] + v))) |
|
372 | records.append(('D', '\0'.join([filename] + v))) | |
373 | elif v[0] in ('pu', 'pr'): |
|
373 | elif v[0] in ('pu', 'pr'): | |
374 | # Path conflicts. These are stored in 'P' records. The current |
|
374 | # Path conflicts. These are stored in 'P' records. The current | |
375 | # resolution state ('pu' or 'pr') is stored within the record. |
|
375 | # resolution state ('pu' or 'pr') is stored within the record. | |
376 | records.append(('P', '\0'.join([filename] + v))) |
|
376 | records.append(('P', '\0'.join([filename] + v))) | |
377 | elif v[1] == nullhex or v[6] == nullhex: |
|
377 | elif v[1] == nullhex or v[6] == nullhex: | |
378 | # Change/Delete or Delete/Change conflicts. These are stored in |
|
378 | # Change/Delete or Delete/Change conflicts. These are stored in | |
379 | # 'C' records. v[1] is the local file, and is nullhex when the |
|
379 | # 'C' records. v[1] is the local file, and is nullhex when the | |
380 | # file is deleted locally ('dc'). v[6] is the remote file, and |
|
380 | # file is deleted locally ('dc'). v[6] is the remote file, and | |
381 | # is nullhex when the file is deleted remotely ('cd'). |
|
381 | # is nullhex when the file is deleted remotely ('cd'). | |
382 | records.append(('C', '\0'.join([filename] + v))) |
|
382 | records.append(('C', '\0'.join([filename] + v))) | |
383 | else: |
|
383 | else: | |
384 | # Normal files. These are stored in 'F' records. |
|
384 | # Normal files. These are stored in 'F' records. | |
385 | records.append(('F', '\0'.join([filename] + v))) |
|
385 | records.append(('F', '\0'.join([filename] + v))) | |
386 | for filename, extras in sorted(self._stateextras.iteritems()): |
|
386 | for filename, extras in sorted(self._stateextras.iteritems()): | |
387 | rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in |
|
387 | rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in | |
388 | extras.iteritems()) |
|
388 | extras.iteritems()) | |
389 | records.append(('f', '%s\0%s' % (filename, rawextras))) |
|
389 | records.append(('f', '%s\0%s' % (filename, rawextras))) | |
390 | if self._labels is not None: |
|
390 | if self._labels is not None: | |
391 | labels = '\0'.join(self._labels) |
|
391 | labels = '\0'.join(self._labels) | |
392 | records.append(('l', labels)) |
|
392 | records.append(('l', labels)) | |
393 | return records |
|
393 | return records | |
394 |
|
394 | |||
395 | def _writerecords(self, records): |
|
395 | def _writerecords(self, records): | |
396 | """Write current state on disk (both v1 and v2)""" |
|
396 | """Write current state on disk (both v1 and v2)""" | |
397 | self._writerecordsv1(records) |
|
397 | self._writerecordsv1(records) | |
398 | self._writerecordsv2(records) |
|
398 | self._writerecordsv2(records) | |
399 |
|
399 | |||
400 | def _writerecordsv1(self, records): |
|
400 | def _writerecordsv1(self, records): | |
401 | """Write current state on disk in a version 1 file""" |
|
401 | """Write current state on disk in a version 1 file""" | |
402 | f = self._repo.vfs(self.statepathv1, 'w') |
|
402 | f = self._repo.vfs(self.statepathv1, 'w') | |
403 | irecords = iter(records) |
|
403 | irecords = iter(records) | |
404 | lrecords = next(irecords) |
|
404 | lrecords = next(irecords) | |
405 | assert lrecords[0] == 'L' |
|
405 | assert lrecords[0] == 'L' | |
406 | f.write(hex(self._local) + '\n') |
|
406 | f.write(hex(self._local) + '\n') | |
407 | for rtype, data in irecords: |
|
407 | for rtype, data in irecords: | |
408 | if rtype == 'F': |
|
408 | if rtype == 'F': | |
409 | f.write('%s\n' % _droponode(data)) |
|
409 | f.write('%s\n' % _droponode(data)) | |
410 | f.close() |
|
410 | f.close() | |
411 |
|
411 | |||
412 | def _writerecordsv2(self, records): |
|
412 | def _writerecordsv2(self, records): | |
413 | """Write current state on disk in a version 2 file |
|
413 | """Write current state on disk in a version 2 file | |
414 |
|
414 | |||
415 | See the docstring for _readrecordsv2 for why we use 't'.""" |
|
415 | See the docstring for _readrecordsv2 for why we use 't'.""" | |
416 | # these are the records that all version 2 clients can read |
|
416 | # these are the records that all version 2 clients can read | |
417 | whitelist = 'LOF' |
|
417 | whitelist = 'LOF' | |
418 | f = self._repo.vfs(self.statepathv2, 'w') |
|
418 | f = self._repo.vfs(self.statepathv2, 'w') | |
419 | for key, data in records: |
|
419 | for key, data in records: | |
420 | assert len(key) == 1 |
|
420 | assert len(key) == 1 | |
421 | if key not in whitelist: |
|
421 | if key not in whitelist: | |
422 | key, data = 't', '%s%s' % (key, data) |
|
422 | key, data = 't', '%s%s' % (key, data) | |
423 | format = '>sI%is' % len(data) |
|
423 | format = '>sI%is' % len(data) | |
424 | f.write(_pack(format, key, len(data), data)) |
|
424 | f.write(_pack(format, key, len(data), data)) | |
425 | f.close() |
|
425 | f.close() | |
426 |
|
426 | |||
427 | def add(self, fcl, fco, fca, fd): |
|
427 | def add(self, fcl, fco, fca, fd): | |
428 | """add a new (potentially?) conflicting file the merge state |
|
428 | """add a new (potentially?) conflicting file the merge state | |
429 | fcl: file context for local, |
|
429 | fcl: file context for local, | |
430 | fco: file context for remote, |
|
430 | fco: file context for remote, | |
431 | fca: file context for ancestors, |
|
431 | fca: file context for ancestors, | |
432 | fd: file path of the resulting merge. |
|
432 | fd: file path of the resulting merge. | |
433 |
|
433 | |||
434 | note: also write the local version to the `.hg/merge` directory. |
|
434 | note: also write the local version to the `.hg/merge` directory. | |
435 | """ |
|
435 | """ | |
436 | if fcl.isabsent(): |
|
436 | if fcl.isabsent(): | |
437 | hash = nullhex |
|
437 | hash = nullhex | |
438 | else: |
|
438 | else: | |
439 | hash = hex(hashlib.sha1(fcl.path()).digest()) |
|
439 | hash = hex(hashlib.sha1(fcl.path()).digest()) | |
440 | self._repo.vfs.write('merge/' + hash, fcl.data()) |
|
440 | self._repo.vfs.write('merge/' + hash, fcl.data()) | |
441 | self._state[fd] = ['u', hash, fcl.path(), |
|
441 | self._state[fd] = ['u', hash, fcl.path(), | |
442 | fca.path(), hex(fca.filenode()), |
|
442 | fca.path(), hex(fca.filenode()), | |
443 | fco.path(), hex(fco.filenode()), |
|
443 | fco.path(), hex(fco.filenode()), | |
444 | fcl.flags()] |
|
444 | fcl.flags()] | |
445 | self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())} |
|
445 | self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())} | |
446 | self._dirty = True |
|
446 | self._dirty = True | |
447 |
|
447 | |||
448 | def addpath(self, path, frename, forigin): |
|
448 | def addpath(self, path, frename, forigin): | |
449 | """add a new conflicting path to the merge state |
|
449 | """add a new conflicting path to the merge state | |
450 | path: the path that conflicts |
|
450 | path: the path that conflicts | |
451 | frename: the filename the conflicting file was renamed to |
|
451 | frename: the filename the conflicting file was renamed to | |
452 | forigin: origin of the file ('l' or 'r' for local/remote) |
|
452 | forigin: origin of the file ('l' or 'r' for local/remote) | |
453 | """ |
|
453 | """ | |
454 | self._state[path] = ['pu', frename, forigin] |
|
454 | self._state[path] = ['pu', frename, forigin] | |
455 | self._dirty = True |
|
455 | self._dirty = True | |
456 |
|
456 | |||
457 | def __contains__(self, dfile): |
|
457 | def __contains__(self, dfile): | |
458 | return dfile in self._state |
|
458 | return dfile in self._state | |
459 |
|
459 | |||
460 | def __getitem__(self, dfile): |
|
460 | def __getitem__(self, dfile): | |
461 | return self._state[dfile][0] |
|
461 | return self._state[dfile][0] | |
462 |
|
462 | |||
463 | def __iter__(self): |
|
463 | def __iter__(self): | |
464 | return iter(sorted(self._state)) |
|
464 | return iter(sorted(self._state)) | |
465 |
|
465 | |||
466 | def files(self): |
|
466 | def files(self): | |
467 | return self._state.keys() |
|
467 | return self._state.keys() | |
468 |
|
468 | |||
469 | def mark(self, dfile, state): |
|
469 | def mark(self, dfile, state): | |
470 | self._state[dfile][0] = state |
|
470 | self._state[dfile][0] = state | |
471 | self._dirty = True |
|
471 | self._dirty = True | |
472 |
|
472 | |||
473 | def mdstate(self): |
|
473 | def mdstate(self): | |
474 | return self._mdstate |
|
474 | return self._mdstate | |
475 |
|
475 | |||
476 | def unresolved(self): |
|
476 | def unresolved(self): | |
477 | """Obtain the paths of unresolved files.""" |
|
477 | """Obtain the paths of unresolved files.""" | |
478 |
|
478 | |||
479 | for f, entry in self._state.iteritems(): |
|
479 | for f, entry in self._state.iteritems(): | |
480 | if entry[0] in ('u', 'pu'): |
|
480 | if entry[0] in ('u', 'pu'): | |
481 | yield f |
|
481 | yield f | |
482 |
|
482 | |||
483 | def driverresolved(self): |
|
483 | def driverresolved(self): | |
484 | """Obtain the paths of driver-resolved files.""" |
|
484 | """Obtain the paths of driver-resolved files.""" | |
485 |
|
485 | |||
486 | for f, entry in self._state.items(): |
|
486 | for f, entry in self._state.items(): | |
487 | if entry[0] == 'd': |
|
487 | if entry[0] == 'd': | |
488 | yield f |
|
488 | yield f | |
489 |
|
489 | |||
490 | def extras(self, filename): |
|
490 | def extras(self, filename): | |
491 | return self._stateextras.setdefault(filename, {}) |
|
491 | return self._stateextras.setdefault(filename, {}) | |
492 |
|
492 | |||
493 | def _resolve(self, preresolve, dfile, wctx): |
|
493 | def _resolve(self, preresolve, dfile, wctx): | |
494 | """rerun merge process for file path `dfile`""" |
|
494 | """rerun merge process for file path `dfile`""" | |
495 | if self[dfile] in 'rd': |
|
495 | if self[dfile] in 'rd': | |
496 | return True, 0 |
|
496 | return True, 0 | |
497 | stateentry = self._state[dfile] |
|
497 | stateentry = self._state[dfile] | |
498 | state, hash, lfile, afile, anode, ofile, onode, flags = stateentry |
|
498 | state, hash, lfile, afile, anode, ofile, onode, flags = stateentry | |
499 | octx = self._repo[self._other] |
|
499 | octx = self._repo[self._other] | |
500 | extras = self.extras(dfile) |
|
500 | extras = self.extras(dfile) | |
501 | anccommitnode = extras.get('ancestorlinknode') |
|
501 | anccommitnode = extras.get('ancestorlinknode') | |
502 | if anccommitnode: |
|
502 | if anccommitnode: | |
503 | actx = self._repo[anccommitnode] |
|
503 | actx = self._repo[anccommitnode] | |
504 | else: |
|
504 | else: | |
505 | actx = None |
|
505 | actx = None | |
506 | fcd = self._filectxorabsent(hash, wctx, dfile) |
|
506 | fcd = self._filectxorabsent(hash, wctx, dfile) | |
507 | fco = self._filectxorabsent(onode, octx, ofile) |
|
507 | fco = self._filectxorabsent(onode, octx, ofile) | |
508 | # TODO: move this to filectxorabsent |
|
508 | # TODO: move this to filectxorabsent | |
509 | fca = self._repo.filectx(afile, fileid=anode, changeid=actx) |
|
509 | fca = self._repo.filectx(afile, fileid=anode, changeid=actx) | |
510 | # "premerge" x flags |
|
510 | # "premerge" x flags | |
511 | flo = fco.flags() |
|
511 | flo = fco.flags() | |
512 | fla = fca.flags() |
|
512 | fla = fca.flags() | |
513 | if 'x' in flags + flo + fla and 'l' not in flags + flo + fla: |
|
513 | if 'x' in flags + flo + fla and 'l' not in flags + flo + fla: | |
514 | if fca.node() == nullid and flags != flo: |
|
514 | if fca.node() == nullid and flags != flo: | |
515 | if preresolve: |
|
515 | if preresolve: | |
516 | self._repo.ui.warn( |
|
516 | self._repo.ui.warn( | |
517 | _('warning: cannot merge flags for %s ' |
|
517 | _('warning: cannot merge flags for %s ' | |
518 | 'without common ancestor - keeping local flags\n') |
|
518 | 'without common ancestor - keeping local flags\n') | |
519 | % afile) |
|
519 | % afile) | |
520 | elif flags == fla: |
|
520 | elif flags == fla: | |
521 | flags = flo |
|
521 | flags = flo | |
522 | if preresolve: |
|
522 | if preresolve: | |
523 | # restore local |
|
523 | # restore local | |
524 | if hash != nullhex: |
|
524 | if hash != nullhex: | |
525 | f = self._repo.vfs('merge/' + hash) |
|
525 | f = self._repo.vfs('merge/' + hash) | |
526 | wctx[dfile].write(f.read(), flags) |
|
526 | wctx[dfile].write(f.read(), flags) | |
527 | f.close() |
|
527 | f.close() | |
528 | else: |
|
528 | else: | |
529 | wctx[dfile].remove(ignoremissing=True) |
|
529 | wctx[dfile].remove(ignoremissing=True) | |
530 | complete, r, deleted = filemerge.premerge(self._repo, wctx, |
|
530 | complete, r, deleted = filemerge.premerge(self._repo, wctx, | |
531 | self._local, lfile, fcd, |
|
531 | self._local, lfile, fcd, | |
532 | fco, fca, |
|
532 | fco, fca, | |
533 | labels=self._labels) |
|
533 | labels=self._labels) | |
534 | else: |
|
534 | else: | |
535 | complete, r, deleted = filemerge.filemerge(self._repo, wctx, |
|
535 | complete, r, deleted = filemerge.filemerge(self._repo, wctx, | |
536 | self._local, lfile, fcd, |
|
536 | self._local, lfile, fcd, | |
537 | fco, fca, |
|
537 | fco, fca, | |
538 | labels=self._labels) |
|
538 | labels=self._labels) | |
539 | if r is None: |
|
539 | if r is None: | |
540 | # no real conflict |
|
540 | # no real conflict | |
541 | del self._state[dfile] |
|
541 | del self._state[dfile] | |
542 | self._stateextras.pop(dfile, None) |
|
542 | self._stateextras.pop(dfile, None) | |
543 | self._dirty = True |
|
543 | self._dirty = True | |
544 | elif not r: |
|
544 | elif not r: | |
545 | self.mark(dfile, 'r') |
|
545 | self.mark(dfile, 'r') | |
546 |
|
546 | |||
547 | if complete: |
|
547 | if complete: | |
548 | action = None |
|
548 | action = None | |
549 | if deleted: |
|
549 | if deleted: | |
550 | if fcd.isabsent(): |
|
550 | if fcd.isabsent(): | |
551 | # dc: local picked. Need to drop if present, which may |
|
551 | # dc: local picked. Need to drop if present, which may | |
552 | # happen on re-resolves. |
|
552 | # happen on re-resolves. | |
553 | action = 'f' |
|
553 | action = 'f' | |
554 | else: |
|
554 | else: | |
555 | # cd: remote picked (or otherwise deleted) |
|
555 | # cd: remote picked (or otherwise deleted) | |
556 | action = 'r' |
|
556 | action = 'r' | |
557 | else: |
|
557 | else: | |
558 | if fcd.isabsent(): # dc: remote picked |
|
558 | if fcd.isabsent(): # dc: remote picked | |
559 | action = 'g' |
|
559 | action = 'g' | |
560 | elif fco.isabsent(): # cd: local picked |
|
560 | elif fco.isabsent(): # cd: local picked | |
561 | if dfile in self.localctx: |
|
561 | if dfile in self.localctx: | |
562 | action = 'am' |
|
562 | action = 'am' | |
563 | else: |
|
563 | else: | |
564 | action = 'a' |
|
564 | action = 'a' | |
565 | # else: regular merges (no action necessary) |
|
565 | # else: regular merges (no action necessary) | |
566 | self._results[dfile] = r, action |
|
566 | self._results[dfile] = r, action | |
567 |
|
567 | |||
568 | return complete, r |
|
568 | return complete, r | |
569 |
|
569 | |||
570 | def _filectxorabsent(self, hexnode, ctx, f): |
|
570 | def _filectxorabsent(self, hexnode, ctx, f): | |
571 | if hexnode == nullhex: |
|
571 | if hexnode == nullhex: | |
572 | return filemerge.absentfilectx(ctx, f) |
|
572 | return filemerge.absentfilectx(ctx, f) | |
573 | else: |
|
573 | else: | |
574 | return ctx[f] |
|
574 | return ctx[f] | |
575 |
|
575 | |||
576 | def preresolve(self, dfile, wctx): |
|
576 | def preresolve(self, dfile, wctx): | |
577 | """run premerge process for dfile |
|
577 | """run premerge process for dfile | |
578 |
|
578 | |||
579 | Returns whether the merge is complete, and the exit code.""" |
|
579 | Returns whether the merge is complete, and the exit code.""" | |
580 | return self._resolve(True, dfile, wctx) |
|
580 | return self._resolve(True, dfile, wctx) | |
581 |
|
581 | |||
582 | def resolve(self, dfile, wctx): |
|
582 | def resolve(self, dfile, wctx): | |
583 | """run merge process (assuming premerge was run) for dfile |
|
583 | """run merge process (assuming premerge was run) for dfile | |
584 |
|
584 | |||
585 | Returns the exit code of the merge.""" |
|
585 | Returns the exit code of the merge.""" | |
586 | return self._resolve(False, dfile, wctx)[1] |
|
586 | return self._resolve(False, dfile, wctx)[1] | |
587 |
|
587 | |||
588 | def counts(self): |
|
588 | def counts(self): | |
589 | """return counts for updated, merged and removed files in this |
|
589 | """return counts for updated, merged and removed files in this | |
590 | session""" |
|
590 | session""" | |
591 | updated, merged, removed = 0, 0, 0 |
|
591 | updated, merged, removed = 0, 0, 0 | |
592 | for r, action in self._results.itervalues(): |
|
592 | for r, action in self._results.itervalues(): | |
593 | if r is None: |
|
593 | if r is None: | |
594 | updated += 1 |
|
594 | updated += 1 | |
595 | elif r == 0: |
|
595 | elif r == 0: | |
596 | if action == 'r': |
|
596 | if action == 'r': | |
597 | removed += 1 |
|
597 | removed += 1 | |
598 | else: |
|
598 | else: | |
599 | merged += 1 |
|
599 | merged += 1 | |
600 | return updated, merged, removed |
|
600 | return updated, merged, removed | |
601 |
|
601 | |||
602 | def unresolvedcount(self): |
|
602 | def unresolvedcount(self): | |
603 | """get unresolved count for this merge (persistent)""" |
|
603 | """get unresolved count for this merge (persistent)""" | |
604 | return len(list(self.unresolved())) |
|
604 | return len(list(self.unresolved())) | |
605 |
|
605 | |||
606 | def actions(self): |
|
606 | def actions(self): | |
607 | """return lists of actions to perform on the dirstate""" |
|
607 | """return lists of actions to perform on the dirstate""" | |
608 | actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []} |
|
608 | actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []} | |
609 | for f, (r, action) in self._results.iteritems(): |
|
609 | for f, (r, action) in self._results.iteritems(): | |
610 | if action is not None: |
|
610 | if action is not None: | |
611 | actions[action].append((f, None, "merge result")) |
|
611 | actions[action].append((f, None, "merge result")) | |
612 | return actions |
|
612 | return actions | |
613 |
|
613 | |||
614 | def recordactions(self): |
|
614 | def recordactions(self): | |
615 | """record remove/add/get actions in the dirstate""" |
|
615 | """record remove/add/get actions in the dirstate""" | |
616 | branchmerge = self._repo.dirstate.p2() != nullid |
|
616 | branchmerge = self._repo.dirstate.p2() != nullid | |
617 | recordupdates(self._repo, self.actions(), branchmerge) |
|
617 | recordupdates(self._repo, self.actions(), branchmerge) | |
618 |
|
618 | |||
619 | def queueremove(self, f): |
|
619 | def queueremove(self, f): | |
620 | """queues a file to be removed from the dirstate |
|
620 | """queues a file to be removed from the dirstate | |
621 |
|
621 | |||
622 | Meant for use by custom merge drivers.""" |
|
622 | Meant for use by custom merge drivers.""" | |
623 | self._results[f] = 0, 'r' |
|
623 | self._results[f] = 0, 'r' | |
624 |
|
624 | |||
625 | def queueadd(self, f): |
|
625 | def queueadd(self, f): | |
626 | """queues a file to be added to the dirstate |
|
626 | """queues a file to be added to the dirstate | |
627 |
|
627 | |||
628 | Meant for use by custom merge drivers.""" |
|
628 | Meant for use by custom merge drivers.""" | |
629 | self._results[f] = 0, 'a' |
|
629 | self._results[f] = 0, 'a' | |
630 |
|
630 | |||
631 | def queueget(self, f): |
|
631 | def queueget(self, f): | |
632 | """queues a file to be marked modified in the dirstate |
|
632 | """queues a file to be marked modified in the dirstate | |
633 |
|
633 | |||
634 | Meant for use by custom merge drivers.""" |
|
634 | Meant for use by custom merge drivers.""" | |
635 | self._results[f] = 0, 'g' |
|
635 | self._results[f] = 0, 'g' | |
636 |
|
636 | |||
637 | def _getcheckunknownconfig(repo, section, name): |
|
637 | def _getcheckunknownconfig(repo, section, name): | |
638 | config = repo.ui.config(section, name) |
|
638 | config = repo.ui.config(section, name) | |
639 | valid = ['abort', 'ignore', 'warn'] |
|
639 | valid = ['abort', 'ignore', 'warn'] | |
640 | if config not in valid: |
|
640 | if config not in valid: | |
641 | validstr = ', '.join(["'" + v + "'" for v in valid]) |
|
641 | validstr = ', '.join(["'" + v + "'" for v in valid]) | |
642 | raise error.ConfigError(_("%s.%s not valid " |
|
642 | raise error.ConfigError(_("%s.%s not valid " | |
643 | "('%s' is none of %s)") |
|
643 | "('%s' is none of %s)") | |
644 | % (section, name, config, validstr)) |
|
644 | % (section, name, config, validstr)) | |
645 | return config |
|
645 | return config | |
646 |
|
646 | |||
647 | def _checkunknownfile(repo, wctx, mctx, f, f2=None): |
|
647 | def _checkunknownfile(repo, wctx, mctx, f, f2=None): | |
648 | if f2 is None: |
|
648 | if f2 is None: | |
649 | f2 = f |
|
649 | f2 = f | |
650 | return (repo.wvfs.audit.check(f) |
|
650 | return (repo.wvfs.audit.check(f) | |
651 | and repo.wvfs.isfileorlink(f) |
|
651 | and repo.wvfs.isfileorlink(f) | |
652 | and repo.dirstate.normalize(f) not in repo.dirstate |
|
652 | and repo.dirstate.normalize(f) not in repo.dirstate | |
653 | and mctx[f2].cmp(wctx[f])) |
|
653 | and mctx[f2].cmp(wctx[f])) | |
654 |
|
654 | |||
655 | def _checkunknowndirs(repo, f): |
|
655 | def _checkunknowndirs(repo, f): | |
656 | """ |
|
656 | """ | |
657 | Look for any unknown files or directories that may have a path conflict |
|
657 | Look for any unknown files or directories that may have a path conflict | |
658 | with a file. If any path prefix of the file exists as a file or link, |
|
658 | with a file. If any path prefix of the file exists as a file or link, | |
659 | then it conflicts. If the file itself is a directory that contains any |
|
659 | then it conflicts. If the file itself is a directory that contains any | |
660 | file that is not tracked, then it conflicts. |
|
660 | file that is not tracked, then it conflicts. | |
661 |
|
661 | |||
662 | Returns the shortest path at which a conflict occurs, or None if there is |
|
662 | Returns the shortest path at which a conflict occurs, or None if there is | |
663 | no conflict. |
|
663 | no conflict. | |
664 | """ |
|
664 | """ | |
665 |
|
665 | |||
666 | # Check for path prefixes that exist as unknown files. |
|
666 | # Check for path prefixes that exist as unknown files. | |
667 | for p in reversed(list(util.finddirs(f))): |
|
667 | for p in reversed(list(util.finddirs(f))): | |
668 | if (repo.wvfs.audit.check(p) |
|
668 | if (repo.wvfs.audit.check(p) | |
669 | and repo.wvfs.isfileorlink(p) |
|
669 | and repo.wvfs.isfileorlink(p) | |
670 | and repo.dirstate.normalize(p) not in repo.dirstate): |
|
670 | and repo.dirstate.normalize(p) not in repo.dirstate): | |
671 | return p |
|
671 | return p | |
672 |
|
672 | |||
673 | # Check if the file conflicts with a directory containing unknown files. |
|
673 | # Check if the file conflicts with a directory containing unknown files. | |
674 | if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f): |
|
674 | if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f): | |
675 | # Does the directory contain any files that are not in the dirstate? |
|
675 | # Does the directory contain any files that are not in the dirstate? | |
676 | for p, dirs, files in repo.wvfs.walk(f): |
|
676 | for p, dirs, files in repo.wvfs.walk(f): | |
677 | for fn in files: |
|
677 | for fn in files: | |
678 | relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn)) |
|
678 | relf = repo.dirstate.normalize(repo.wvfs.reljoin(p, fn)) | |
679 | if relf not in repo.dirstate: |
|
679 | if relf not in repo.dirstate: | |
680 | return f |
|
680 | return f | |
681 | return None |
|
681 | return None | |
682 |
|
682 | |||
683 | def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce): |
|
683 | def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce): | |
684 | """ |
|
684 | """ | |
685 | Considers any actions that care about the presence of conflicting unknown |
|
685 | Considers any actions that care about the presence of conflicting unknown | |
686 | files. For some actions, the result is to abort; for others, it is to |
|
686 | files. For some actions, the result is to abort; for others, it is to | |
687 | choose a different action. |
|
687 | choose a different action. | |
688 | """ |
|
688 | """ | |
689 | fileconflicts = set() |
|
689 | fileconflicts = set() | |
690 | pathconflicts = set() |
|
690 | pathconflicts = set() | |
691 | warnconflicts = set() |
|
691 | warnconflicts = set() | |
692 | abortconflicts = set() |
|
692 | abortconflicts = set() | |
693 | unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown') |
|
693 | unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown') | |
694 | ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored') |
|
694 | ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored') | |
695 | if not force: |
|
695 | if not force: | |
696 | def collectconflicts(conflicts, config): |
|
696 | def collectconflicts(conflicts, config): | |
697 | if config == 'abort': |
|
697 | if config == 'abort': | |
698 | abortconflicts.update(conflicts) |
|
698 | abortconflicts.update(conflicts) | |
699 | elif config == 'warn': |
|
699 | elif config == 'warn': | |
700 | warnconflicts.update(conflicts) |
|
700 | warnconflicts.update(conflicts) | |
701 |
|
701 | |||
702 | for f, (m, args, msg) in actions.iteritems(): |
|
702 | for f, (m, args, msg) in actions.iteritems(): | |
703 | if m in ('c', 'dc'): |
|
703 | if m in ('c', 'dc'): | |
704 | if _checkunknownfile(repo, wctx, mctx, f): |
|
704 | if _checkunknownfile(repo, wctx, mctx, f): | |
705 | fileconflicts.add(f) |
|
705 | fileconflicts.add(f) | |
706 | elif f not in wctx: |
|
706 | elif f not in wctx: | |
707 | path = _checkunknowndirs(repo, f) |
|
707 | path = _checkunknowndirs(repo, f) | |
708 | if path is not None: |
|
708 | if path is not None: | |
709 | pathconflicts.add(path) |
|
709 | pathconflicts.add(path) | |
710 | elif m == 'dg': |
|
710 | elif m == 'dg': | |
711 | if _checkunknownfile(repo, wctx, mctx, f, args[0]): |
|
711 | if _checkunknownfile(repo, wctx, mctx, f, args[0]): | |
712 | fileconflicts.add(f) |
|
712 | fileconflicts.add(f) | |
713 |
|
713 | |||
714 | allconflicts = fileconflicts | pathconflicts |
|
714 | allconflicts = fileconflicts | pathconflicts | |
715 | ignoredconflicts = set([c for c in allconflicts |
|
715 | ignoredconflicts = set([c for c in allconflicts | |
716 | if repo.dirstate._ignore(c)]) |
|
716 | if repo.dirstate._ignore(c)]) | |
717 | unknownconflicts = allconflicts - ignoredconflicts |
|
717 | unknownconflicts = allconflicts - ignoredconflicts | |
718 | collectconflicts(ignoredconflicts, ignoredconfig) |
|
718 | collectconflicts(ignoredconflicts, ignoredconfig) | |
719 | collectconflicts(unknownconflicts, unknownconfig) |
|
719 | collectconflicts(unknownconflicts, unknownconfig) | |
720 | else: |
|
720 | else: | |
721 | for f, (m, args, msg) in actions.iteritems(): |
|
721 | for f, (m, args, msg) in actions.iteritems(): | |
722 | if m == 'cm': |
|
722 | if m == 'cm': | |
723 | fl2, anc = args |
|
723 | fl2, anc = args | |
724 | different = _checkunknownfile(repo, wctx, mctx, f) |
|
724 | different = _checkunknownfile(repo, wctx, mctx, f) | |
725 | if repo.dirstate._ignore(f): |
|
725 | if repo.dirstate._ignore(f): | |
726 | config = ignoredconfig |
|
726 | config = ignoredconfig | |
727 | else: |
|
727 | else: | |
728 | config = unknownconfig |
|
728 | config = unknownconfig | |
729 |
|
729 | |||
730 | # The behavior when force is True is described by this table: |
|
730 | # The behavior when force is True is described by this table: | |
731 | # config different mergeforce | action backup |
|
731 | # config different mergeforce | action backup | |
732 | # * n * | get n |
|
732 | # * n * | get n | |
733 | # * y y | merge - |
|
733 | # * y y | merge - | |
734 | # abort y n | merge - (1) |
|
734 | # abort y n | merge - (1) | |
735 | # warn y n | warn + get y |
|
735 | # warn y n | warn + get y | |
736 | # ignore y n | get y |
|
736 | # ignore y n | get y | |
737 | # |
|
737 | # | |
738 | # (1) this is probably the wrong behavior here -- we should |
|
738 | # (1) this is probably the wrong behavior here -- we should | |
739 | # probably abort, but some actions like rebases currently |
|
739 | # probably abort, but some actions like rebases currently | |
740 | # don't like an abort happening in the middle of |
|
740 | # don't like an abort happening in the middle of | |
741 | # merge.update. |
|
741 | # merge.update. | |
742 | if not different: |
|
742 | if not different: | |
743 | actions[f] = ('g', (fl2, False), "remote created") |
|
743 | actions[f] = ('g', (fl2, False), "remote created") | |
744 | elif mergeforce or config == 'abort': |
|
744 | elif mergeforce or config == 'abort': | |
745 | actions[f] = ('m', (f, f, None, False, anc), |
|
745 | actions[f] = ('m', (f, f, None, False, anc), | |
746 | "remote differs from untracked local") |
|
746 | "remote differs from untracked local") | |
747 | elif config == 'abort': |
|
747 | elif config == 'abort': | |
748 | abortconflicts.add(f) |
|
748 | abortconflicts.add(f) | |
749 | else: |
|
749 | else: | |
750 | if config == 'warn': |
|
750 | if config == 'warn': | |
751 | warnconflicts.add(f) |
|
751 | warnconflicts.add(f) | |
752 | actions[f] = ('g', (fl2, True), "remote created") |
|
752 | actions[f] = ('g', (fl2, True), "remote created") | |
753 |
|
753 | |||
754 | for f in sorted(abortconflicts): |
|
754 | for f in sorted(abortconflicts): | |
755 | warn = repo.ui.warn |
|
755 | warn = repo.ui.warn | |
756 | if f in pathconflicts: |
|
756 | if f in pathconflicts: | |
757 | if repo.wvfs.isfileorlink(f): |
|
757 | if repo.wvfs.isfileorlink(f): | |
758 | warn(_("%s: untracked file conflicts with directory\n") % f) |
|
758 | warn(_("%s: untracked file conflicts with directory\n") % f) | |
759 | else: |
|
759 | else: | |
760 | warn(_("%s: untracked directory conflicts with file\n") % f) |
|
760 | warn(_("%s: untracked directory conflicts with file\n") % f) | |
761 | else: |
|
761 | else: | |
762 | warn(_("%s: untracked file differs\n") % f) |
|
762 | warn(_("%s: untracked file differs\n") % f) | |
763 | if abortconflicts: |
|
763 | if abortconflicts: | |
764 | raise error.Abort(_("untracked files in working directory " |
|
764 | raise error.Abort(_("untracked files in working directory " | |
765 | "differ from files in requested revision")) |
|
765 | "differ from files in requested revision")) | |
766 |
|
766 | |||
767 | for f in sorted(warnconflicts): |
|
767 | for f in sorted(warnconflicts): | |
768 | if repo.wvfs.isfileorlink(f): |
|
768 | if repo.wvfs.isfileorlink(f): | |
769 | repo.ui.warn(_("%s: replacing untracked file\n") % f) |
|
769 | repo.ui.warn(_("%s: replacing untracked file\n") % f) | |
770 | else: |
|
770 | else: | |
771 | repo.ui.warn(_("%s: replacing untracked files in directory\n") % f) |
|
771 | repo.ui.warn(_("%s: replacing untracked files in directory\n") % f) | |
772 |
|
772 | |||
773 | for f, (m, args, msg) in actions.iteritems(): |
|
773 | for f, (m, args, msg) in actions.iteritems(): | |
774 | if m == 'c': |
|
774 | if m == 'c': | |
775 | backup = (f in fileconflicts or f in pathconflicts or |
|
775 | backup = (f in fileconflicts or f in pathconflicts or | |
776 | any(p in pathconflicts for p in util.finddirs(f))) |
|
776 | any(p in pathconflicts for p in util.finddirs(f))) | |
777 | flags, = args |
|
777 | flags, = args | |
778 | actions[f] = ('g', (flags, backup), msg) |
|
778 | actions[f] = ('g', (flags, backup), msg) | |
779 |
|
779 | |||
780 | def _forgetremoved(wctx, mctx, branchmerge): |
|
780 | def _forgetremoved(wctx, mctx, branchmerge): | |
781 | """ |
|
781 | """ | |
782 | Forget removed files |
|
782 | Forget removed files | |
783 |
|
783 | |||
784 | If we're jumping between revisions (as opposed to merging), and if |
|
784 | If we're jumping between revisions (as opposed to merging), and if | |
785 | neither the working directory nor the target rev has the file, |
|
785 | neither the working directory nor the target rev has the file, | |
786 | then we need to remove it from the dirstate, to prevent the |
|
786 | then we need to remove it from the dirstate, to prevent the | |
787 | dirstate from listing the file when it is no longer in the |
|
787 | dirstate from listing the file when it is no longer in the | |
788 | manifest. |
|
788 | manifest. | |
789 |
|
789 | |||
790 | If we're merging, and the other revision has removed a file |
|
790 | If we're merging, and the other revision has removed a file | |
791 | that is not present in the working directory, we need to mark it |
|
791 | that is not present in the working directory, we need to mark it | |
792 | as removed. |
|
792 | as removed. | |
793 | """ |
|
793 | """ | |
794 |
|
794 | |||
795 | actions = {} |
|
795 | actions = {} | |
796 | m = 'f' |
|
796 | m = 'f' | |
797 | if branchmerge: |
|
797 | if branchmerge: | |
798 | m = 'r' |
|
798 | m = 'r' | |
799 | for f in wctx.deleted(): |
|
799 | for f in wctx.deleted(): | |
800 | if f not in mctx: |
|
800 | if f not in mctx: | |
801 | actions[f] = m, None, "forget deleted" |
|
801 | actions[f] = m, None, "forget deleted" | |
802 |
|
802 | |||
803 | if not branchmerge: |
|
803 | if not branchmerge: | |
804 | for f in wctx.removed(): |
|
804 | for f in wctx.removed(): | |
805 | if f not in mctx: |
|
805 | if f not in mctx: | |
806 | actions[f] = 'f', None, "forget removed" |
|
806 | actions[f] = 'f', None, "forget removed" | |
807 |
|
807 | |||
808 | return actions |
|
808 | return actions | |
809 |
|
809 | |||
810 | def _checkcollision(repo, wmf, actions): |
|
810 | def _checkcollision(repo, wmf, actions): | |
811 | # build provisional merged manifest up |
|
811 | # build provisional merged manifest up | |
812 | pmmf = set(wmf) |
|
812 | pmmf = set(wmf) | |
813 |
|
813 | |||
814 | if actions: |
|
814 | if actions: | |
815 | # k, dr, e and rd are no-op |
|
815 | # k, dr, e and rd are no-op | |
816 | for m in 'a', 'am', 'f', 'g', 'cd', 'dc': |
|
816 | for m in 'a', 'am', 'f', 'g', 'cd', 'dc': | |
817 | for f, args, msg in actions[m]: |
|
817 | for f, args, msg in actions[m]: | |
818 | pmmf.add(f) |
|
818 | pmmf.add(f) | |
819 | for f, args, msg in actions['r']: |
|
819 | for f, args, msg in actions['r']: | |
820 | pmmf.discard(f) |
|
820 | pmmf.discard(f) | |
821 | for f, args, msg in actions['dm']: |
|
821 | for f, args, msg in actions['dm']: | |
822 | f2, flags = args |
|
822 | f2, flags = args | |
823 | pmmf.discard(f2) |
|
823 | pmmf.discard(f2) | |
824 | pmmf.add(f) |
|
824 | pmmf.add(f) | |
825 | for f, args, msg in actions['dg']: |
|
825 | for f, args, msg in actions['dg']: | |
826 | pmmf.add(f) |
|
826 | pmmf.add(f) | |
827 | for f, args, msg in actions['m']: |
|
827 | for f, args, msg in actions['m']: | |
828 | f1, f2, fa, move, anc = args |
|
828 | f1, f2, fa, move, anc = args | |
829 | if move: |
|
829 | if move: | |
830 | pmmf.discard(f1) |
|
830 | pmmf.discard(f1) | |
831 | pmmf.add(f) |
|
831 | pmmf.add(f) | |
832 |
|
832 | |||
833 | # check case-folding collision in provisional merged manifest |
|
833 | # check case-folding collision in provisional merged manifest | |
834 | foldmap = {} |
|
834 | foldmap = {} | |
835 | for f in pmmf: |
|
835 | for f in pmmf: | |
836 | fold = util.normcase(f) |
|
836 | fold = util.normcase(f) | |
837 | if fold in foldmap: |
|
837 | if fold in foldmap: | |
838 | raise error.Abort(_("case-folding collision between %s and %s") |
|
838 | raise error.Abort(_("case-folding collision between %s and %s") | |
839 | % (f, foldmap[fold])) |
|
839 | % (f, foldmap[fold])) | |
840 | foldmap[fold] = f |
|
840 | foldmap[fold] = f | |
841 |
|
841 | |||
842 | # check case-folding of directories |
|
842 | # check case-folding of directories | |
843 | foldprefix = unfoldprefix = lastfull = '' |
|
843 | foldprefix = unfoldprefix = lastfull = '' | |
844 | for fold, f in sorted(foldmap.items()): |
|
844 | for fold, f in sorted(foldmap.items()): | |
845 | if fold.startswith(foldprefix) and not f.startswith(unfoldprefix): |
|
845 | if fold.startswith(foldprefix) and not f.startswith(unfoldprefix): | |
846 | # the folded prefix matches but actual casing is different |
|
846 | # the folded prefix matches but actual casing is different | |
847 | raise error.Abort(_("case-folding collision between " |
|
847 | raise error.Abort(_("case-folding collision between " | |
848 | "%s and directory of %s") % (lastfull, f)) |
|
848 | "%s and directory of %s") % (lastfull, f)) | |
849 | foldprefix = fold + '/' |
|
849 | foldprefix = fold + '/' | |
850 | unfoldprefix = f + '/' |
|
850 | unfoldprefix = f + '/' | |
851 | lastfull = f |
|
851 | lastfull = f | |
852 |
|
852 | |||
853 | def driverpreprocess(repo, ms, wctx, labels=None): |
|
853 | def driverpreprocess(repo, ms, wctx, labels=None): | |
854 | """run the preprocess step of the merge driver, if any |
|
854 | """run the preprocess step of the merge driver, if any | |
855 |
|
855 | |||
856 | This is currently not implemented -- it's an extension point.""" |
|
856 | This is currently not implemented -- it's an extension point.""" | |
857 | return True |
|
857 | return True | |
858 |
|
858 | |||
859 | def driverconclude(repo, ms, wctx, labels=None): |
|
859 | def driverconclude(repo, ms, wctx, labels=None): | |
860 | """run the conclude step of the merge driver, if any |
|
860 | """run the conclude step of the merge driver, if any | |
861 |
|
861 | |||
862 | This is currently not implemented -- it's an extension point.""" |
|
862 | This is currently not implemented -- it's an extension point.""" | |
863 | return True |
|
863 | return True | |
864 |
|
864 | |||
865 | def _filesindirs(repo, manifest, dirs): |
|
865 | def _filesindirs(repo, manifest, dirs): | |
866 | """ |
|
866 | """ | |
867 | Generator that yields pairs of all the files in the manifest that are found |
|
867 | Generator that yields pairs of all the files in the manifest that are found | |
868 | inside the directories listed in dirs, and which directory they are found |
|
868 | inside the directories listed in dirs, and which directory they are found | |
869 | in. |
|
869 | in. | |
870 | """ |
|
870 | """ | |
871 | for f in manifest: |
|
871 | for f in manifest: | |
872 | for p in util.finddirs(f): |
|
872 | for p in util.finddirs(f): | |
873 | if p in dirs: |
|
873 | if p in dirs: | |
874 | yield f, p |
|
874 | yield f, p | |
875 | break |
|
875 | break | |
876 |
|
876 | |||
877 | def checkpathconflicts(repo, wctx, mctx, actions): |
|
877 | def checkpathconflicts(repo, wctx, mctx, actions): | |
878 | """ |
|
878 | """ | |
879 | Check if any actions introduce path conflicts in the repository, updating |
|
879 | Check if any actions introduce path conflicts in the repository, updating | |
880 | actions to record or handle the path conflict accordingly. |
|
880 | actions to record or handle the path conflict accordingly. | |
881 | """ |
|
881 | """ | |
882 | mf = wctx.manifest() |
|
882 | mf = wctx.manifest() | |
883 |
|
883 | |||
884 | # The set of local files that conflict with a remote directory. |
|
884 | # The set of local files that conflict with a remote directory. | |
885 | localconflicts = set() |
|
885 | localconflicts = set() | |
886 |
|
886 | |||
887 | # The set of directories that conflict with a remote file, and so may cause |
|
887 | # The set of directories that conflict with a remote file, and so may cause | |
888 | # conflicts if they still contain any files after the merge. |
|
888 | # conflicts if they still contain any files after the merge. | |
889 | remoteconflicts = set() |
|
889 | remoteconflicts = set() | |
890 |
|
890 | |||
891 | # The set of directories that appear as both a file and a directory in the |
|
891 | # The set of directories that appear as both a file and a directory in the | |
892 | # remote manifest. These indicate an invalid remote manifest, which |
|
892 | # remote manifest. These indicate an invalid remote manifest, which | |
893 | # can't be updated to cleanly. |
|
893 | # can't be updated to cleanly. | |
894 | invalidconflicts = set() |
|
894 | invalidconflicts = set() | |
895 |
|
895 | |||
896 | # The set of files deleted by all the actions. |
|
896 | # The set of files deleted by all the actions. | |
897 | deletedfiles = set() |
|
897 | deletedfiles = set() | |
898 |
|
898 | |||
899 | for f, (m, args, msg) in actions.items(): |
|
899 | for f, (m, args, msg) in actions.items(): | |
900 | if m in ('c', 'dc', 'm', 'cm'): |
|
900 | if m in ('c', 'dc', 'm', 'cm'): | |
901 | # This action may create a new local file. |
|
901 | # This action may create a new local file. | |
902 | if mf.hasdir(f): |
|
902 | if mf.hasdir(f): | |
903 | # The file aliases a local directory. This might be ok if all |
|
903 | # The file aliases a local directory. This might be ok if all | |
904 | # the files in the local directory are being deleted. This |
|
904 | # the files in the local directory are being deleted. This | |
905 | # will be checked once we know what all the deleted files are. |
|
905 | # will be checked once we know what all the deleted files are. | |
906 | remoteconflicts.add(f) |
|
906 | remoteconflicts.add(f) | |
907 | for p in util.finddirs(f): |
|
907 | for p in util.finddirs(f): | |
908 | if p in mf: |
|
908 | if p in mf: | |
909 | if p in mctx: |
|
909 | if p in mctx: | |
910 | # The file is in a directory which aliases both a local |
|
910 | # The file is in a directory which aliases both a local | |
911 | # and a remote file. This is an internal inconsistency |
|
911 | # and a remote file. This is an internal inconsistency | |
912 | # within the remote manifest. |
|
912 | # within the remote manifest. | |
913 | invalidconflicts.add(p) |
|
913 | invalidconflicts.add(p) | |
914 | else: |
|
914 | else: | |
915 | # The file is in a directory which aliases a local file. |
|
915 | # The file is in a directory which aliases a local file. | |
916 | # We will need to rename the local file. |
|
916 | # We will need to rename the local file. | |
917 | localconflicts.add(p) |
|
917 | localconflicts.add(p) | |
918 | if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'): |
|
918 | if p in actions and actions[p][0] in ('c', 'dc', 'm', 'cm'): | |
919 | # The file is in a directory which aliases a remote file. |
|
919 | # The file is in a directory which aliases a remote file. | |
920 | # This is an internal inconsistency within the remote |
|
920 | # This is an internal inconsistency within the remote | |
921 | # manifest. |
|
921 | # manifest. | |
922 | invalidconflicts.add(p) |
|
922 | invalidconflicts.add(p) | |
923 |
|
923 | |||
924 | # Track the names of all deleted files. |
|
924 | # Track the names of all deleted files. | |
925 | if m == 'r': |
|
925 | if m == 'r': | |
926 | deletedfiles.add(f) |
|
926 | deletedfiles.add(f) | |
927 | if m == 'm': |
|
927 | if m == 'm': | |
928 | f1, f2, fa, move, anc = args |
|
928 | f1, f2, fa, move, anc = args | |
929 | if move: |
|
929 | if move: | |
930 | deletedfiles.add(f1) |
|
930 | deletedfiles.add(f1) | |
931 | if m == 'dm': |
|
931 | if m == 'dm': | |
932 | f2, flags = args |
|
932 | f2, flags = args | |
933 | deletedfiles.add(f2) |
|
933 | deletedfiles.add(f2) | |
934 |
|
934 | |||
935 | # Rename all local conflicting files that have not been deleted. |
|
935 | # Rename all local conflicting files that have not been deleted. | |
936 | for p in localconflicts: |
|
936 | for p in localconflicts: | |
937 | if p not in deletedfiles: |
|
937 | if p not in deletedfiles: | |
938 | ctxname = str(wctx).rstrip('+') |
|
938 | ctxname = str(wctx).rstrip('+') | |
939 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) |
|
939 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) | |
940 | actions[pnew] = ('pr', (p,), "local path conflict") |
|
940 | actions[pnew] = ('pr', (p,), "local path conflict") | |
941 | actions[p] = ('p', (pnew, 'l'), "path conflict") |
|
941 | actions[p] = ('p', (pnew, 'l'), "path conflict") | |
942 |
|
942 | |||
943 | if remoteconflicts: |
|
943 | if remoteconflicts: | |
944 | # Check if all files in the conflicting directories have been removed. |
|
944 | # Check if all files in the conflicting directories have been removed. | |
945 | ctxname = str(mctx).rstrip('+') |
|
945 | ctxname = str(mctx).rstrip('+') | |
946 | for f, p in _filesindirs(repo, mf, remoteconflicts): |
|
946 | for f, p in _filesindirs(repo, mf, remoteconflicts): | |
947 | if f not in deletedfiles: |
|
947 | if f not in deletedfiles: | |
948 | m, args, msg = actions[p] |
|
948 | m, args, msg = actions[p] | |
949 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) |
|
949 | pnew = util.safename(p, ctxname, wctx, set(actions.keys())) | |
950 | if m in ('dc', 'm'): |
|
950 | if m in ('dc', 'm'): | |
951 | # Action was merge, just update target. |
|
951 | # Action was merge, just update target. | |
952 | actions[pnew] = (m, args, msg) |
|
952 | actions[pnew] = (m, args, msg) | |
953 | else: |
|
953 | else: | |
954 | # Action was create, change to renamed get action. |
|
954 | # Action was create, change to renamed get action. | |
955 | fl = args[0] |
|
955 | fl = args[0] | |
956 | actions[pnew] = ('dg', (p, fl), "remote path conflict") |
|
956 | actions[pnew] = ('dg', (p, fl), "remote path conflict") | |
957 | actions[p] = ('p', (pnew, 'r'), "path conflict") |
|
957 | actions[p] = ('p', (pnew, 'r'), "path conflict") | |
958 | remoteconflicts.remove(p) |
|
958 | remoteconflicts.remove(p) | |
959 | break |
|
959 | break | |
960 |
|
960 | |||
961 | if invalidconflicts: |
|
961 | if invalidconflicts: | |
962 | for p in invalidconflicts: |
|
962 | for p in invalidconflicts: | |
963 | repo.ui.warn(_("%s: is both a file and a directory\n") % p) |
|
963 | repo.ui.warn(_("%s: is both a file and a directory\n") % p) | |
964 | raise error.Abort(_("destination manifest contains path conflicts")) |
|
964 | raise error.Abort(_("destination manifest contains path conflicts")) | |
965 |
|
965 | |||
966 | def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher, |
|
966 | def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher, | |
967 | acceptremote, followcopies, forcefulldiff=False): |
|
967 | acceptremote, followcopies, forcefulldiff=False): | |
968 | """ |
|
968 | """ | |
969 | Merge wctx and p2 with ancestor pa and generate merge action list |
|
969 | Merge wctx and p2 with ancestor pa and generate merge action list | |
970 |
|
970 | |||
971 | branchmerge and force are as passed in to update |
|
971 | branchmerge and force are as passed in to update | |
972 | matcher = matcher to filter file lists |
|
972 | matcher = matcher to filter file lists | |
973 | acceptremote = accept the incoming changes without prompting |
|
973 | acceptremote = accept the incoming changes without prompting | |
974 | """ |
|
974 | """ | |
975 | if matcher is not None and matcher.always(): |
|
975 | if matcher is not None and matcher.always(): | |
976 | matcher = None |
|
976 | matcher = None | |
977 |
|
977 | |||
978 | copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {} |
|
978 | copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {} | |
979 |
|
979 | |||
980 | # manifests fetched in order are going to be faster, so prime the caches |
|
980 | # manifests fetched in order are going to be faster, so prime the caches | |
981 | [x.manifest() for x in |
|
981 | [x.manifest() for x in | |
982 | sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)] |
|
982 | sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)] | |
983 |
|
983 | |||
984 | if followcopies: |
|
984 | if followcopies: | |
985 | ret = copies.mergecopies(repo, wctx, p2, pa) |
|
985 | ret = copies.mergecopies(repo, wctx, p2, pa) | |
986 | copy, movewithdir, diverge, renamedelete, dirmove = ret |
|
986 | copy, movewithdir, diverge, renamedelete, dirmove = ret | |
987 |
|
987 | |||
988 | boolbm = pycompat.bytestr(bool(branchmerge)) |
|
988 | boolbm = pycompat.bytestr(bool(branchmerge)) | |
989 | boolf = pycompat.bytestr(bool(force)) |
|
989 | boolf = pycompat.bytestr(bool(force)) | |
990 | boolm = pycompat.bytestr(bool(matcher)) |
|
990 | boolm = pycompat.bytestr(bool(matcher)) | |
991 | repo.ui.note(_("resolving manifests\n")) |
|
991 | repo.ui.note(_("resolving manifests\n")) | |
992 | repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n" |
|
992 | repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n" | |
993 | % (boolbm, boolf, boolm)) |
|
993 | % (boolbm, boolf, boolm)) | |
994 | repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2)) |
|
994 | repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2)) | |
995 |
|
995 | |||
996 | m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest() |
|
996 | m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest() | |
997 | copied = set(copy.values()) |
|
997 | copied = set(copy.values()) | |
998 | copied.update(movewithdir.values()) |
|
998 | copied.update(movewithdir.values()) | |
999 |
|
999 | |||
1000 | if '.hgsubstate' in m1: |
|
1000 | if '.hgsubstate' in m1: | |
1001 | # check whether sub state is modified |
|
1001 | # check whether sub state is modified | |
1002 | if any(wctx.sub(s).dirty() for s in wctx.substate): |
|
1002 | if any(wctx.sub(s).dirty() for s in wctx.substate): | |
1003 | m1['.hgsubstate'] = modifiednodeid |
|
1003 | m1['.hgsubstate'] = modifiednodeid | |
1004 |
|
1004 | |||
1005 | # Don't use m2-vs-ma optimization if: |
|
1005 | # Don't use m2-vs-ma optimization if: | |
1006 | # - ma is the same as m1 or m2, which we're just going to diff again later |
|
1006 | # - ma is the same as m1 or m2, which we're just going to diff again later | |
1007 | # - The caller specifically asks for a full diff, which is useful during bid |
|
1007 | # - The caller specifically asks for a full diff, which is useful during bid | |
1008 | # merge. |
|
1008 | # merge. | |
1009 | if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff): |
|
1009 | if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff): | |
1010 | # Identify which files are relevant to the merge, so we can limit the |
|
1010 | # Identify which files are relevant to the merge, so we can limit the | |
1011 | # total m1-vs-m2 diff to just those files. This has significant |
|
1011 | # total m1-vs-m2 diff to just those files. This has significant | |
1012 | # performance benefits in large repositories. |
|
1012 | # performance benefits in large repositories. | |
1013 | relevantfiles = set(ma.diff(m2).keys()) |
|
1013 | relevantfiles = set(ma.diff(m2).keys()) | |
1014 |
|
1014 | |||
1015 | # For copied and moved files, we need to add the source file too. |
|
1015 | # For copied and moved files, we need to add the source file too. | |
1016 | for copykey, copyvalue in copy.iteritems(): |
|
1016 | for copykey, copyvalue in copy.iteritems(): | |
1017 | if copyvalue in relevantfiles: |
|
1017 | if copyvalue in relevantfiles: | |
1018 | relevantfiles.add(copykey) |
|
1018 | relevantfiles.add(copykey) | |
1019 | for movedirkey in movewithdir: |
|
1019 | for movedirkey in movewithdir: | |
1020 | relevantfiles.add(movedirkey) |
|
1020 | relevantfiles.add(movedirkey) | |
1021 | filesmatcher = scmutil.matchfiles(repo, relevantfiles) |
|
1021 | filesmatcher = scmutil.matchfiles(repo, relevantfiles) | |
1022 | matcher = matchmod.intersectmatchers(matcher, filesmatcher) |
|
1022 | matcher = matchmod.intersectmatchers(matcher, filesmatcher) | |
1023 |
|
1023 | |||
1024 | diff = m1.diff(m2, match=matcher) |
|
1024 | diff = m1.diff(m2, match=matcher) | |
1025 |
|
1025 | |||
1026 | if matcher is None: |
|
1026 | if matcher is None: | |
1027 | matcher = matchmod.always('', '') |
|
1027 | matcher = matchmod.always('', '') | |
1028 |
|
1028 | |||
1029 | actions = {} |
|
1029 | actions = {} | |
1030 | for f, ((n1, fl1), (n2, fl2)) in diff.iteritems(): |
|
1030 | for f, ((n1, fl1), (n2, fl2)) in diff.iteritems(): | |
1031 | if n1 and n2: # file exists on both local and remote side |
|
1031 | if n1 and n2: # file exists on both local and remote side | |
1032 | if f not in ma: |
|
1032 | if f not in ma: | |
1033 | fa = copy.get(f, None) |
|
1033 | fa = copy.get(f, None) | |
1034 | if fa is not None: |
|
1034 | if fa is not None: | |
1035 | actions[f] = ('m', (f, f, fa, False, pa.node()), |
|
1035 | actions[f] = ('m', (f, f, fa, False, pa.node()), | |
1036 | "both renamed from " + fa) |
|
1036 | "both renamed from " + fa) | |
1037 | else: |
|
1037 | else: | |
1038 | actions[f] = ('m', (f, f, None, False, pa.node()), |
|
1038 | actions[f] = ('m', (f, f, None, False, pa.node()), | |
1039 | "both created") |
|
1039 | "both created") | |
1040 | else: |
|
1040 | else: | |
1041 | a = ma[f] |
|
1041 | a = ma[f] | |
1042 | fla = ma.flags(f) |
|
1042 | fla = ma.flags(f) | |
1043 | nol = 'l' not in fl1 + fl2 + fla |
|
1043 | nol = 'l' not in fl1 + fl2 + fla | |
1044 | if n2 == a and fl2 == fla: |
|
1044 | if n2 == a and fl2 == fla: | |
1045 | actions[f] = ('k', (), "remote unchanged") |
|
1045 | actions[f] = ('k', (), "remote unchanged") | |
1046 | elif n1 == a and fl1 == fla: # local unchanged - use remote |
|
1046 | elif n1 == a and fl1 == fla: # local unchanged - use remote | |
1047 | if n1 == n2: # optimization: keep local content |
|
1047 | if n1 == n2: # optimization: keep local content | |
1048 | actions[f] = ('e', (fl2,), "update permissions") |
|
1048 | actions[f] = ('e', (fl2,), "update permissions") | |
1049 | else: |
|
1049 | else: | |
1050 | actions[f] = ('g', (fl2, False), "remote is newer") |
|
1050 | actions[f] = ('g', (fl2, False), "remote is newer") | |
1051 | elif nol and n2 == a: # remote only changed 'x' |
|
1051 | elif nol and n2 == a: # remote only changed 'x' | |
1052 | actions[f] = ('e', (fl2,), "update permissions") |
|
1052 | actions[f] = ('e', (fl2,), "update permissions") | |
1053 | elif nol and n1 == a: # local only changed 'x' |
|
1053 | elif nol and n1 == a: # local only changed 'x' | |
1054 | actions[f] = ('g', (fl1, False), "remote is newer") |
|
1054 | actions[f] = ('g', (fl1, False), "remote is newer") | |
1055 | else: # both changed something |
|
1055 | else: # both changed something | |
1056 | actions[f] = ('m', (f, f, f, False, pa.node()), |
|
1056 | actions[f] = ('m', (f, f, f, False, pa.node()), | |
1057 | "versions differ") |
|
1057 | "versions differ") | |
1058 | elif n1: # file exists only on local side |
|
1058 | elif n1: # file exists only on local side | |
1059 | if f in copied: |
|
1059 | if f in copied: | |
1060 | pass # we'll deal with it on m2 side |
|
1060 | pass # we'll deal with it on m2 side | |
1061 | elif f in movewithdir: # directory rename, move local |
|
1061 | elif f in movewithdir: # directory rename, move local | |
1062 | f2 = movewithdir[f] |
|
1062 | f2 = movewithdir[f] | |
1063 | if f2 in m2: |
|
1063 | if f2 in m2: | |
1064 | actions[f2] = ('m', (f, f2, None, True, pa.node()), |
|
1064 | actions[f2] = ('m', (f, f2, None, True, pa.node()), | |
1065 | "remote directory rename, both created") |
|
1065 | "remote directory rename, both created") | |
1066 | else: |
|
1066 | else: | |
1067 | actions[f2] = ('dm', (f, fl1), |
|
1067 | actions[f2] = ('dm', (f, fl1), | |
1068 | "remote directory rename - move from " + f) |
|
1068 | "remote directory rename - move from " + f) | |
1069 | elif f in copy: |
|
1069 | elif f in copy: | |
1070 | f2 = copy[f] |
|
1070 | f2 = copy[f] | |
1071 | actions[f] = ('m', (f, f2, f2, False, pa.node()), |
|
1071 | actions[f] = ('m', (f, f2, f2, False, pa.node()), | |
1072 | "local copied/moved from " + f2) |
|
1072 | "local copied/moved from " + f2) | |
1073 | elif f in ma: # clean, a different, no remote |
|
1073 | elif f in ma: # clean, a different, no remote | |
1074 | if n1 != ma[f]: |
|
1074 | if n1 != ma[f]: | |
1075 | if acceptremote: |
|
1075 | if acceptremote: | |
1076 | actions[f] = ('r', None, "remote delete") |
|
1076 | actions[f] = ('r', None, "remote delete") | |
1077 | else: |
|
1077 | else: | |
1078 | actions[f] = ('cd', (f, None, f, False, pa.node()), |
|
1078 | actions[f] = ('cd', (f, None, f, False, pa.node()), | |
1079 | "prompt changed/deleted") |
|
1079 | "prompt changed/deleted") | |
1080 | elif n1 == addednodeid: |
|
1080 | elif n1 == addednodeid: | |
1081 | # This extra 'a' is added by working copy manifest to mark |
|
1081 | # This extra 'a' is added by working copy manifest to mark | |
1082 | # the file as locally added. We should forget it instead of |
|
1082 | # the file as locally added. We should forget it instead of | |
1083 | # deleting it. |
|
1083 | # deleting it. | |
1084 | actions[f] = ('f', None, "remote deleted") |
|
1084 | actions[f] = ('f', None, "remote deleted") | |
1085 | else: |
|
1085 | else: | |
1086 | actions[f] = ('r', None, "other deleted") |
|
1086 | actions[f] = ('r', None, "other deleted") | |
1087 | elif n2: # file exists only on remote side |
|
1087 | elif n2: # file exists only on remote side | |
1088 | if f in copied: |
|
1088 | if f in copied: | |
1089 | pass # we'll deal with it on m1 side |
|
1089 | pass # we'll deal with it on m1 side | |
1090 | elif f in movewithdir: |
|
1090 | elif f in movewithdir: | |
1091 | f2 = movewithdir[f] |
|
1091 | f2 = movewithdir[f] | |
1092 | if f2 in m1: |
|
1092 | if f2 in m1: | |
1093 | actions[f2] = ('m', (f2, f, None, False, pa.node()), |
|
1093 | actions[f2] = ('m', (f2, f, None, False, pa.node()), | |
1094 | "local directory rename, both created") |
|
1094 | "local directory rename, both created") | |
1095 | else: |
|
1095 | else: | |
1096 | actions[f2] = ('dg', (f, fl2), |
|
1096 | actions[f2] = ('dg', (f, fl2), | |
1097 | "local directory rename - get from " + f) |
|
1097 | "local directory rename - get from " + f) | |
1098 | elif f in copy: |
|
1098 | elif f in copy: | |
1099 | f2 = copy[f] |
|
1099 | f2 = copy[f] | |
1100 | if f2 in m2: |
|
1100 | if f2 in m2: | |
1101 | actions[f] = ('m', (f2, f, f2, False, pa.node()), |
|
1101 | actions[f] = ('m', (f2, f, f2, False, pa.node()), | |
1102 | "remote copied from " + f2) |
|
1102 | "remote copied from " + f2) | |
1103 | else: |
|
1103 | else: | |
1104 | actions[f] = ('m', (f2, f, f2, True, pa.node()), |
|
1104 | actions[f] = ('m', (f2, f, f2, True, pa.node()), | |
1105 | "remote moved from " + f2) |
|
1105 | "remote moved from " + f2) | |
1106 | elif f not in ma: |
|
1106 | elif f not in ma: | |
1107 | # local unknown, remote created: the logic is described by the |
|
1107 | # local unknown, remote created: the logic is described by the | |
1108 | # following table: |
|
1108 | # following table: | |
1109 | # |
|
1109 | # | |
1110 | # force branchmerge different | action |
|
1110 | # force branchmerge different | action | |
1111 | # n * * | create |
|
1111 | # n * * | create | |
1112 | # y n * | create |
|
1112 | # y n * | create | |
1113 | # y y n | create |
|
1113 | # y y n | create | |
1114 | # y y y | merge |
|
1114 | # y y y | merge | |
1115 | # |
|
1115 | # | |
1116 | # Checking whether the files are different is expensive, so we |
|
1116 | # Checking whether the files are different is expensive, so we | |
1117 | # don't do that when we can avoid it. |
|
1117 | # don't do that when we can avoid it. | |
1118 | if not force: |
|
1118 | if not force: | |
1119 | actions[f] = ('c', (fl2,), "remote created") |
|
1119 | actions[f] = ('c', (fl2,), "remote created") | |
1120 | elif not branchmerge: |
|
1120 | elif not branchmerge: | |
1121 | actions[f] = ('c', (fl2,), "remote created") |
|
1121 | actions[f] = ('c', (fl2,), "remote created") | |
1122 | else: |
|
1122 | else: | |
1123 | actions[f] = ('cm', (fl2, pa.node()), |
|
1123 | actions[f] = ('cm', (fl2, pa.node()), | |
1124 | "remote created, get or merge") |
|
1124 | "remote created, get or merge") | |
1125 | elif n2 != ma[f]: |
|
1125 | elif n2 != ma[f]: | |
1126 | df = None |
|
1126 | df = None | |
1127 | for d in dirmove: |
|
1127 | for d in dirmove: | |
1128 | if f.startswith(d): |
|
1128 | if f.startswith(d): | |
1129 | # new file added in a directory that was moved |
|
1129 | # new file added in a directory that was moved | |
1130 | df = dirmove[d] + f[len(d):] |
|
1130 | df = dirmove[d] + f[len(d):] | |
1131 | break |
|
1131 | break | |
1132 | if df is not None and df in m1: |
|
1132 | if df is not None and df in m1: | |
1133 | actions[df] = ('m', (df, f, f, False, pa.node()), |
|
1133 | actions[df] = ('m', (df, f, f, False, pa.node()), | |
1134 | "local directory rename - respect move from " + f) |
|
1134 | "local directory rename - respect move from " + f) | |
1135 | elif acceptremote: |
|
1135 | elif acceptremote: | |
1136 | actions[f] = ('c', (fl2,), "remote recreating") |
|
1136 | actions[f] = ('c', (fl2,), "remote recreating") | |
1137 | else: |
|
1137 | else: | |
1138 | actions[f] = ('dc', (None, f, f, False, pa.node()), |
|
1138 | actions[f] = ('dc', (None, f, f, False, pa.node()), | |
1139 | "prompt deleted/changed") |
|
1139 | "prompt deleted/changed") | |
1140 |
|
1140 | |||
1141 | # If we are merging, look for path conflicts. |
|
1141 | # If we are merging, look for path conflicts. | |
1142 | checkpathconflicts(repo, wctx, p2, actions) |
|
1142 | checkpathconflicts(repo, wctx, p2, actions) | |
1143 |
|
1143 | |||
1144 | return actions, diverge, renamedelete |
|
1144 | return actions, diverge, renamedelete | |
1145 |
|
1145 | |||
1146 | def _resolvetrivial(repo, wctx, mctx, ancestor, actions): |
|
1146 | def _resolvetrivial(repo, wctx, mctx, ancestor, actions): | |
1147 | """Resolves false conflicts where the nodeid changed but the content |
|
1147 | """Resolves false conflicts where the nodeid changed but the content | |
1148 | remained the same.""" |
|
1148 | remained the same.""" | |
1149 |
|
1149 | |||
1150 | for f, (m, args, msg) in actions.items(): |
|
1150 | for f, (m, args, msg) in actions.items(): | |
1151 | if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]): |
|
1151 | if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]): | |
1152 | # local did change but ended up with same content |
|
1152 | # local did change but ended up with same content | |
1153 | actions[f] = 'r', None, "prompt same" |
|
1153 | actions[f] = 'r', None, "prompt same" | |
1154 | elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]): |
|
1154 | elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]): | |
1155 | # remote did change but ended up with same content |
|
1155 | # remote did change but ended up with same content | |
1156 | del actions[f] # don't get = keep local deleted |
|
1156 | del actions[f] # don't get = keep local deleted | |
1157 |
|
1157 | |||
1158 | def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, |
|
1158 | def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, | |
1159 | acceptremote, followcopies, matcher=None, |
|
1159 | acceptremote, followcopies, matcher=None, | |
1160 | mergeforce=False): |
|
1160 | mergeforce=False): | |
1161 | """Calculate the actions needed to merge mctx into wctx using ancestors""" |
|
1161 | """Calculate the actions needed to merge mctx into wctx using ancestors""" | |
1162 | # Avoid cycle. |
|
1162 | # Avoid cycle. | |
1163 | from . import sparse |
|
1163 | from . import sparse | |
1164 |
|
1164 | |||
1165 | if len(ancestors) == 1: # default |
|
1165 | if len(ancestors) == 1: # default | |
1166 | actions, diverge, renamedelete = manifestmerge( |
|
1166 | actions, diverge, renamedelete = manifestmerge( | |
1167 | repo, wctx, mctx, ancestors[0], branchmerge, force, matcher, |
|
1167 | repo, wctx, mctx, ancestors[0], branchmerge, force, matcher, | |
1168 | acceptremote, followcopies) |
|
1168 | acceptremote, followcopies) | |
1169 | _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) |
|
1169 | _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) | |
1170 |
|
1170 | |||
1171 | else: # only when merge.preferancestor=* - the default |
|
1171 | else: # only when merge.preferancestor=* - the default | |
1172 | repo.ui.note( |
|
1172 | repo.ui.note( | |
1173 | _("note: merging %s and %s using bids from ancestors %s\n") % |
|
1173 | _("note: merging %s and %s using bids from ancestors %s\n") % | |
1174 | (wctx, mctx, _(' and ').join(pycompat.bytestr(anc) |
|
1174 | (wctx, mctx, _(' and ').join(pycompat.bytestr(anc) | |
1175 | for anc in ancestors))) |
|
1175 | for anc in ancestors))) | |
1176 |
|
1176 | |||
1177 | # Call for bids |
|
1177 | # Call for bids | |
1178 | fbids = {} # mapping filename to bids (action method to list af actions) |
|
1178 | fbids = {} # mapping filename to bids (action method to list af actions) | |
1179 | diverge, renamedelete = None, None |
|
1179 | diverge, renamedelete = None, None | |
1180 | for ancestor in ancestors: |
|
1180 | for ancestor in ancestors: | |
1181 | repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor) |
|
1181 | repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor) | |
1182 | actions, diverge1, renamedelete1 = manifestmerge( |
|
1182 | actions, diverge1, renamedelete1 = manifestmerge( | |
1183 | repo, wctx, mctx, ancestor, branchmerge, force, matcher, |
|
1183 | repo, wctx, mctx, ancestor, branchmerge, force, matcher, | |
1184 | acceptremote, followcopies, forcefulldiff=True) |
|
1184 | acceptremote, followcopies, forcefulldiff=True) | |
1185 | _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) |
|
1185 | _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) | |
1186 |
|
1186 | |||
1187 | # Track the shortest set of warning on the theory that bid |
|
1187 | # Track the shortest set of warning on the theory that bid | |
1188 | # merge will correctly incorporate more information |
|
1188 | # merge will correctly incorporate more information | |
1189 | if diverge is None or len(diverge1) < len(diverge): |
|
1189 | if diverge is None or len(diverge1) < len(diverge): | |
1190 | diverge = diverge1 |
|
1190 | diverge = diverge1 | |
1191 | if renamedelete is None or len(renamedelete) < len(renamedelete1): |
|
1191 | if renamedelete is None or len(renamedelete) < len(renamedelete1): | |
1192 | renamedelete = renamedelete1 |
|
1192 | renamedelete = renamedelete1 | |
1193 |
|
1193 | |||
1194 | for f, a in sorted(actions.iteritems()): |
|
1194 | for f, a in sorted(actions.iteritems()): | |
1195 | m, args, msg = a |
|
1195 | m, args, msg = a | |
1196 | repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m)) |
|
1196 | repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m)) | |
1197 | if f in fbids: |
|
1197 | if f in fbids: | |
1198 | d = fbids[f] |
|
1198 | d = fbids[f] | |
1199 | if m in d: |
|
1199 | if m in d: | |
1200 | d[m].append(a) |
|
1200 | d[m].append(a) | |
1201 | else: |
|
1201 | else: | |
1202 | d[m] = [a] |
|
1202 | d[m] = [a] | |
1203 | else: |
|
1203 | else: | |
1204 | fbids[f] = {m: [a]} |
|
1204 | fbids[f] = {m: [a]} | |
1205 |
|
1205 | |||
1206 | # Pick the best bid for each file |
|
1206 | # Pick the best bid for each file | |
1207 | repo.ui.note(_('\nauction for merging merge bids\n')) |
|
1207 | repo.ui.note(_('\nauction for merging merge bids\n')) | |
1208 | actions = {} |
|
1208 | actions = {} | |
1209 | dms = [] # filenames that have dm actions |
|
1209 | dms = [] # filenames that have dm actions | |
1210 | for f, bids in sorted(fbids.items()): |
|
1210 | for f, bids in sorted(fbids.items()): | |
1211 | # bids is a mapping from action method to list af actions |
|
1211 | # bids is a mapping from action method to list af actions | |
1212 | # Consensus? |
|
1212 | # Consensus? | |
1213 | if len(bids) == 1: # all bids are the same kind of method |
|
1213 | if len(bids) == 1: # all bids are the same kind of method | |
1214 | m, l = list(bids.items())[0] |
|
1214 | m, l = list(bids.items())[0] | |
1215 | if all(a == l[0] for a in l[1:]): # len(bids) is > 1 |
|
1215 | if all(a == l[0] for a in l[1:]): # len(bids) is > 1 | |
1216 | repo.ui.note(_(" %s: consensus for %s\n") % (f, m)) |
|
1216 | repo.ui.note(_(" %s: consensus for %s\n") % (f, m)) | |
1217 | actions[f] = l[0] |
|
1217 | actions[f] = l[0] | |
1218 | if m == 'dm': |
|
1218 | if m == 'dm': | |
1219 | dms.append(f) |
|
1219 | dms.append(f) | |
1220 | continue |
|
1220 | continue | |
1221 | # If keep is an option, just do it. |
|
1221 | # If keep is an option, just do it. | |
1222 | if 'k' in bids: |
|
1222 | if 'k' in bids: | |
1223 | repo.ui.note(_(" %s: picking 'keep' action\n") % f) |
|
1223 | repo.ui.note(_(" %s: picking 'keep' action\n") % f) | |
1224 | actions[f] = bids['k'][0] |
|
1224 | actions[f] = bids['k'][0] | |
1225 | continue |
|
1225 | continue | |
1226 | # If there are gets and they all agree [how could they not?], do it. |
|
1226 | # If there are gets and they all agree [how could they not?], do it. | |
1227 | if 'g' in bids: |
|
1227 | if 'g' in bids: | |
1228 | ga0 = bids['g'][0] |
|
1228 | ga0 = bids['g'][0] | |
1229 | if all(a == ga0 for a in bids['g'][1:]): |
|
1229 | if all(a == ga0 for a in bids['g'][1:]): | |
1230 | repo.ui.note(_(" %s: picking 'get' action\n") % f) |
|
1230 | repo.ui.note(_(" %s: picking 'get' action\n") % f) | |
1231 | actions[f] = ga0 |
|
1231 | actions[f] = ga0 | |
1232 | continue |
|
1232 | continue | |
1233 | # TODO: Consider other simple actions such as mode changes |
|
1233 | # TODO: Consider other simple actions such as mode changes | |
1234 | # Handle inefficient democrazy. |
|
1234 | # Handle inefficient democrazy. | |
1235 | repo.ui.note(_(' %s: multiple bids for merge action:\n') % f) |
|
1235 | repo.ui.note(_(' %s: multiple bids for merge action:\n') % f) | |
1236 | for m, l in sorted(bids.items()): |
|
1236 | for m, l in sorted(bids.items()): | |
1237 | for _f, args, msg in l: |
|
1237 | for _f, args, msg in l: | |
1238 | repo.ui.note(' %s -> %s\n' % (msg, m)) |
|
1238 | repo.ui.note(' %s -> %s\n' % (msg, m)) | |
1239 | # Pick random action. TODO: Instead, prompt user when resolving |
|
1239 | # Pick random action. TODO: Instead, prompt user when resolving | |
1240 | m, l = list(bids.items())[0] |
|
1240 | m, l = list(bids.items())[0] | |
1241 | repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') % |
|
1241 | repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') % | |
1242 | (f, m)) |
|
1242 | (f, m)) | |
1243 | actions[f] = l[0] |
|
1243 | actions[f] = l[0] | |
1244 | if m == 'dm': |
|
1244 | if m == 'dm': | |
1245 | dms.append(f) |
|
1245 | dms.append(f) | |
1246 | continue |
|
1246 | continue | |
1247 | # Work around 'dm' that can cause multiple actions for the same file |
|
1247 | # Work around 'dm' that can cause multiple actions for the same file | |
1248 | for f in dms: |
|
1248 | for f in dms: | |
1249 | dm, (f0, flags), msg = actions[f] |
|
1249 | dm, (f0, flags), msg = actions[f] | |
1250 | assert dm == 'dm', dm |
|
1250 | assert dm == 'dm', dm | |
1251 | if f0 in actions and actions[f0][0] == 'r': |
|
1251 | if f0 in actions and actions[f0][0] == 'r': | |
1252 | # We have one bid for removing a file and another for moving it. |
|
1252 | # We have one bid for removing a file and another for moving it. | |
1253 | # These two could be merged as first move and then delete ... |
|
1253 | # These two could be merged as first move and then delete ... | |
1254 | # but instead drop moving and just delete. |
|
1254 | # but instead drop moving and just delete. | |
1255 | del actions[f] |
|
1255 | del actions[f] | |
1256 | repo.ui.note(_('end of auction\n\n')) |
|
1256 | repo.ui.note(_('end of auction\n\n')) | |
1257 |
|
1257 | |||
1258 | _resolvetrivial(repo, wctx, mctx, ancestors[0], actions) |
|
1258 | _resolvetrivial(repo, wctx, mctx, ancestors[0], actions) | |
1259 |
|
1259 | |||
1260 | if wctx.rev() is None: |
|
1260 | if wctx.rev() is None: | |
1261 | fractions = _forgetremoved(wctx, mctx, branchmerge) |
|
1261 | fractions = _forgetremoved(wctx, mctx, branchmerge) | |
1262 | actions.update(fractions) |
|
1262 | actions.update(fractions) | |
1263 |
|
1263 | |||
1264 | prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, |
|
1264 | prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, | |
1265 | actions) |
|
1265 | actions) | |
1266 |
|
1266 | |||
1267 | return prunedactions, diverge, renamedelete |
|
1267 | return prunedactions, diverge, renamedelete | |
1268 |
|
1268 | |||
1269 | def _getcwd(): |
|
1269 | def _getcwd(): | |
1270 | try: |
|
1270 | try: | |
1271 | return pycompat.getcwd() |
|
1271 | return pycompat.getcwd() | |
1272 | except OSError as err: |
|
1272 | except OSError as err: | |
1273 | if err.errno == errno.ENOENT: |
|
1273 | if err.errno == errno.ENOENT: | |
1274 | return None |
|
1274 | return None | |
1275 | raise |
|
1275 | raise | |
1276 |
|
1276 | |||
1277 | def batchremove(repo, wctx, actions): |
|
1277 | def batchremove(repo, wctx, actions): | |
1278 | """apply removes to the working directory |
|
1278 | """apply removes to the working directory | |
1279 |
|
1279 | |||
1280 | yields tuples for progress updates |
|
1280 | yields tuples for progress updates | |
1281 | """ |
|
1281 | """ | |
1282 | verbose = repo.ui.verbose |
|
1282 | verbose = repo.ui.verbose | |
1283 | cwd = _getcwd() |
|
1283 | cwd = _getcwd() | |
1284 | i = 0 |
|
1284 | i = 0 | |
1285 | for f, args, msg in actions: |
|
1285 | for f, args, msg in actions: | |
1286 | repo.ui.debug(" %s: %s -> r\n" % (f, msg)) |
|
1286 | repo.ui.debug(" %s: %s -> r\n" % (f, msg)) | |
1287 | if verbose: |
|
1287 | if verbose: | |
1288 | repo.ui.note(_("removing %s\n") % f) |
|
1288 | repo.ui.note(_("removing %s\n") % f) | |
1289 | wctx[f].audit() |
|
1289 | wctx[f].audit() | |
1290 | try: |
|
1290 | try: | |
1291 | wctx[f].remove(ignoremissing=True) |
|
1291 | wctx[f].remove(ignoremissing=True) | |
1292 | except OSError as inst: |
|
1292 | except OSError as inst: | |
1293 | repo.ui.warn(_("update failed to remove %s: %s!\n") % |
|
1293 | repo.ui.warn(_("update failed to remove %s: %s!\n") % | |
1294 | (f, inst.strerror)) |
|
1294 | (f, inst.strerror)) | |
1295 | if i == 100: |
|
1295 | if i == 100: | |
1296 | yield i, f |
|
1296 | yield i, f | |
1297 | i = 0 |
|
1297 | i = 0 | |
1298 | i += 1 |
|
1298 | i += 1 | |
1299 | if i > 0: |
|
1299 | if i > 0: | |
1300 | yield i, f |
|
1300 | yield i, f | |
1301 |
|
1301 | |||
1302 | if cwd and not _getcwd(): |
|
1302 | if cwd and not _getcwd(): | |
1303 | # cwd was removed in the course of removing files; print a helpful |
|
1303 | # cwd was removed in the course of removing files; print a helpful | |
1304 | # warning. |
|
1304 | # warning. | |
1305 | repo.ui.warn(_("current directory was removed\n" |
|
1305 | repo.ui.warn(_("current directory was removed\n" | |
1306 | "(consider changing to repo root: %s)\n") % repo.root) |
|
1306 | "(consider changing to repo root: %s)\n") % repo.root) | |
1307 |
|
1307 | |||
1308 | # It's necessary to flush here in case we're inside a worker fork and will |
|
1308 | # It's necessary to flush here in case we're inside a worker fork and will | |
1309 | # quit after this function. |
|
1309 | # quit after this function. | |
1310 | wctx.flushall() |
|
1310 | wctx.flushall() | |
1311 |
|
1311 | |||
1312 | def batchget(repo, mctx, wctx, actions): |
|
1312 | def batchget(repo, mctx, wctx, actions): | |
1313 | """apply gets to the working directory |
|
1313 | """apply gets to the working directory | |
1314 |
|
1314 | |||
1315 | mctx is the context to get from |
|
1315 | mctx is the context to get from | |
1316 |
|
1316 | |||
1317 | yields tuples for progress updates |
|
1317 | yields tuples for progress updates | |
1318 | """ |
|
1318 | """ | |
1319 | verbose = repo.ui.verbose |
|
1319 | verbose = repo.ui.verbose | |
1320 | fctx = mctx.filectx |
|
1320 | fctx = mctx.filectx | |
1321 | ui = repo.ui |
|
1321 | ui = repo.ui | |
1322 | i = 0 |
|
1322 | i = 0 | |
1323 | with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)): |
|
1323 | with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)): | |
1324 | for f, (flags, backup), msg in actions: |
|
1324 | for f, (flags, backup), msg in actions: | |
1325 | repo.ui.debug(" %s: %s -> g\n" % (f, msg)) |
|
1325 | repo.ui.debug(" %s: %s -> g\n" % (f, msg)) | |
1326 | if verbose: |
|
1326 | if verbose: | |
1327 | repo.ui.note(_("getting %s\n") % f) |
|
1327 | repo.ui.note(_("getting %s\n") % f) | |
1328 |
|
1328 | |||
1329 | if backup: |
|
1329 | if backup: | |
1330 | # If a file or directory exists with the same name, back that |
|
1330 | # If a file or directory exists with the same name, back that | |
1331 | # up. Otherwise, look to see if there is a file that conflicts |
|
1331 | # up. Otherwise, look to see if there is a file that conflicts | |
1332 | # with a directory this file is in, and if so, back that up. |
|
1332 | # with a directory this file is in, and if so, back that up. | |
1333 | absf = repo.wjoin(f) |
|
1333 | absf = repo.wjoin(f) | |
1334 | if not repo.wvfs.lexists(f): |
|
1334 | if not repo.wvfs.lexists(f): | |
1335 | for p in util.finddirs(f): |
|
1335 | for p in util.finddirs(f): | |
1336 | if repo.wvfs.isfileorlink(p): |
|
1336 | if repo.wvfs.isfileorlink(p): | |
1337 | absf = repo.wjoin(p) |
|
1337 | absf = repo.wjoin(p) | |
1338 | break |
|
1338 | break | |
1339 | orig = scmutil.origpath(ui, repo, absf) |
|
1339 | orig = scmutil.origpath(ui, repo, absf) | |
1340 | if repo.wvfs.lexists(absf): |
|
1340 | if repo.wvfs.lexists(absf): | |
1341 | util.rename(absf, orig) |
|
1341 | util.rename(absf, orig) | |
1342 | wctx[f].clearunknown() |
|
1342 | wctx[f].clearunknown() | |
1343 | wctx[f].write(fctx(f).data(), flags, backgroundclose=True) |
|
1343 | wctx[f].write(fctx(f).data(), flags, backgroundclose=True) | |
1344 | if i == 100: |
|
1344 | if i == 100: | |
1345 | yield i, f |
|
1345 | yield i, f | |
1346 | i = 0 |
|
1346 | i = 0 | |
1347 | i += 1 |
|
1347 | i += 1 | |
1348 | if i > 0: |
|
1348 | if i > 0: | |
1349 | yield i, f |
|
1349 | yield i, f | |
1350 |
|
1350 | |||
1351 | # It's necessary to flush here in case we're inside a worker fork and will |
|
1351 | # It's necessary to flush here in case we're inside a worker fork and will | |
1352 | # quit after this function. |
|
1352 | # quit after this function. | |
1353 | wctx.flushall() |
|
1353 | wctx.flushall() | |
1354 |
|
1354 | |||
1355 | def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None): |
|
1355 | def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None): | |
1356 | """apply the merge action list to the working directory |
|
1356 | """apply the merge action list to the working directory | |
1357 |
|
1357 | |||
1358 | wctx is the working copy context |
|
1358 | wctx is the working copy context | |
1359 | mctx is the context to be merged into the working copy |
|
1359 | mctx is the context to be merged into the working copy | |
1360 |
|
1360 | |||
1361 | Return a tuple of counts (updated, merged, removed, unresolved) that |
|
1361 | Return a tuple of counts (updated, merged, removed, unresolved) that | |
1362 | describes how many files were affected by the update. |
|
1362 | describes how many files were affected by the update. | |
1363 | """ |
|
1363 | """ | |
1364 |
|
1364 | |||
1365 | updated, merged, removed = 0, 0, 0 |
|
1365 | updated, merged, removed = 0, 0, 0 | |
1366 | ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels) |
|
1366 | ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels) | |
1367 | moves = [] |
|
1367 | moves = [] | |
1368 | for m, l in actions.items(): |
|
1368 | for m, l in actions.items(): | |
1369 | l.sort() |
|
1369 | l.sort() | |
1370 |
|
1370 | |||
1371 | # 'cd' and 'dc' actions are treated like other merge conflicts |
|
1371 | # 'cd' and 'dc' actions are treated like other merge conflicts | |
1372 | mergeactions = sorted(actions['cd']) |
|
1372 | mergeactions = sorted(actions['cd']) | |
1373 | mergeactions.extend(sorted(actions['dc'])) |
|
1373 | mergeactions.extend(sorted(actions['dc'])) | |
1374 | mergeactions.extend(actions['m']) |
|
1374 | mergeactions.extend(actions['m']) | |
1375 | for f, args, msg in mergeactions: |
|
1375 | for f, args, msg in mergeactions: | |
1376 | f1, f2, fa, move, anc = args |
|
1376 | f1, f2, fa, move, anc = args | |
1377 | if f == '.hgsubstate': # merged internally |
|
1377 | if f == '.hgsubstate': # merged internally | |
1378 | continue |
|
1378 | continue | |
1379 | if f1 is None: |
|
1379 | if f1 is None: | |
1380 | fcl = filemerge.absentfilectx(wctx, fa) |
|
1380 | fcl = filemerge.absentfilectx(wctx, fa) | |
1381 | else: |
|
1381 | else: | |
1382 | repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f)) |
|
1382 | repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f)) | |
1383 | fcl = wctx[f1] |
|
1383 | fcl = wctx[f1] | |
1384 | if f2 is None: |
|
1384 | if f2 is None: | |
1385 | fco = filemerge.absentfilectx(mctx, fa) |
|
1385 | fco = filemerge.absentfilectx(mctx, fa) | |
1386 | else: |
|
1386 | else: | |
1387 | fco = mctx[f2] |
|
1387 | fco = mctx[f2] | |
1388 | actx = repo[anc] |
|
1388 | actx = repo[anc] | |
1389 | if fa in actx: |
|
1389 | if fa in actx: | |
1390 | fca = actx[fa] |
|
1390 | fca = actx[fa] | |
1391 | else: |
|
1391 | else: | |
1392 | # TODO: move to absentfilectx |
|
1392 | # TODO: move to absentfilectx | |
1393 | fca = repo.filectx(f1, fileid=nullrev) |
|
1393 | fca = repo.filectx(f1, fileid=nullrev) | |
1394 | ms.add(fcl, fco, fca, f) |
|
1394 | ms.add(fcl, fco, fca, f) | |
1395 | if f1 != f and move: |
|
1395 | if f1 != f and move: | |
1396 | moves.append(f1) |
|
1396 | moves.append(f1) | |
1397 |
|
1397 | |||
1398 | _updating = _('updating') |
|
1398 | _updating = _('updating') | |
1399 | _files = _('files') |
|
1399 | _files = _('files') | |
1400 | progress = repo.ui.progress |
|
1400 | progress = repo.ui.progress | |
1401 |
|
1401 | |||
1402 | # remove renamed files after safely stored |
|
1402 | # remove renamed files after safely stored | |
1403 | for f in moves: |
|
1403 | for f in moves: | |
1404 | if wctx[f].lexists(): |
|
1404 | if wctx[f].lexists(): | |
1405 | repo.ui.debug("removing %s\n" % f) |
|
1405 | repo.ui.debug("removing %s\n" % f) | |
1406 | wctx[f].audit() |
|
1406 | wctx[f].audit() | |
1407 | wctx[f].remove() |
|
1407 | wctx[f].remove() | |
1408 |
|
1408 | |||
1409 | numupdates = sum(len(l) for m, l in actions.items() if m != 'k') |
|
1409 | numupdates = sum(len(l) for m, l in actions.items() if m != 'k') | |
1410 | z = 0 |
|
1410 | z = 0 | |
1411 |
|
1411 | |||
1412 | if [a for a in actions['r'] if a[0] == '.hgsubstate']: |
|
1412 | if [a for a in actions['r'] if a[0] == '.hgsubstate']: | |
1413 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels) |
|
1413 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels) | |
1414 |
|
1414 | |||
1415 | # record path conflicts |
|
1415 | # record path conflicts | |
1416 | for f, args, msg in actions['p']: |
|
1416 | for f, args, msg in actions['p']: | |
1417 | f1, fo = args |
|
1417 | f1, fo = args | |
1418 | s = repo.ui.status |
|
1418 | s = repo.ui.status | |
1419 | s(_("%s: path conflict - a file or link has the same name as a " |
|
1419 | s(_("%s: path conflict - a file or link has the same name as a " | |
1420 | "directory\n") % f) |
|
1420 | "directory\n") % f) | |
1421 | if fo == 'l': |
|
1421 | if fo == 'l': | |
1422 | s(_("the local file has been renamed to %s\n") % f1) |
|
1422 | s(_("the local file has been renamed to %s\n") % f1) | |
1423 | else: |
|
1423 | else: | |
1424 | s(_("the remote file has been renamed to %s\n") % f1) |
|
1424 | s(_("the remote file has been renamed to %s\n") % f1) | |
1425 | s(_("resolve manually then use 'hg resolve --mark %s'\n") % f) |
|
1425 | s(_("resolve manually then use 'hg resolve --mark %s'\n") % f) | |
1426 | ms.addpath(f, f1, fo) |
|
1426 | ms.addpath(f, f1, fo) | |
1427 | z += 1 |
|
1427 | z += 1 | |
1428 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1428 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1429 |
|
1429 | |||
1430 | # When merging in-memory, we can't support worker processes, so set the |
|
1430 | # When merging in-memory, we can't support worker processes, so set the | |
1431 | # per-item cost at 0 in that case. |
|
1431 | # per-item cost at 0 in that case. | |
1432 | cost = 0 if wctx.isinmemory() else 0.001 |
|
1432 | cost = 0 if wctx.isinmemory() else 0.001 | |
1433 |
|
1433 | |||
1434 | # remove in parallel (must come before resolving path conflicts and getting) |
|
1434 | # remove in parallel (must come before resolving path conflicts and getting) | |
1435 | prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx), |
|
1435 | prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx), | |
1436 | actions['r']) |
|
1436 | actions['r']) | |
1437 | for i, item in prog: |
|
1437 | for i, item in prog: | |
1438 | z += i |
|
1438 | z += i | |
1439 | progress(_updating, z, item=item, total=numupdates, unit=_files) |
|
1439 | progress(_updating, z, item=item, total=numupdates, unit=_files) | |
1440 | removed = len(actions['r']) |
|
1440 | removed = len(actions['r']) | |
1441 |
|
1441 | |||
1442 | # resolve path conflicts (must come before getting) |
|
1442 | # resolve path conflicts (must come before getting) | |
1443 | for f, args, msg in actions['pr']: |
|
1443 | for f, args, msg in actions['pr']: | |
1444 | repo.ui.debug(" %s: %s -> pr\n" % (f, msg)) |
|
1444 | repo.ui.debug(" %s: %s -> pr\n" % (f, msg)) | |
1445 | f0, = args |
|
1445 | f0, = args | |
1446 | if wctx[f0].lexists(): |
|
1446 | if wctx[f0].lexists(): | |
1447 | repo.ui.note(_("moving %s to %s\n") % (f0, f)) |
|
1447 | repo.ui.note(_("moving %s to %s\n") % (f0, f)) | |
1448 | wctx[f].audit() |
|
1448 | wctx[f].audit() | |
1449 | wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags()) |
|
1449 | wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags()) | |
1450 | wctx[f0].remove() |
|
1450 | wctx[f0].remove() | |
1451 | z += 1 |
|
1451 | z += 1 | |
1452 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1452 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1453 |
|
1453 | |||
1454 | # We should flush before forking into worker processes, since those workers |
|
1454 | # We should flush before forking into worker processes, since those workers | |
1455 | # flush when they complete, and we don't want to duplicate work. |
|
1455 | # flush when they complete, and we don't want to duplicate work. | |
1456 | wctx.flushall() |
|
1456 | wctx.flushall() | |
1457 |
|
1457 | |||
1458 | # get in parallel |
|
1458 | # get in parallel | |
1459 | prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx), |
|
1459 | prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx), | |
1460 | actions['g']) |
|
1460 | actions['g']) | |
1461 | for i, item in prog: |
|
1461 | for i, item in prog: | |
1462 | z += i |
|
1462 | z += i | |
1463 | progress(_updating, z, item=item, total=numupdates, unit=_files) |
|
1463 | progress(_updating, z, item=item, total=numupdates, unit=_files) | |
1464 | updated = len(actions['g']) |
|
1464 | updated = len(actions['g']) | |
1465 |
|
1465 | |||
1466 | if [a for a in actions['g'] if a[0] == '.hgsubstate']: |
|
1466 | if [a for a in actions['g'] if a[0] == '.hgsubstate']: | |
1467 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels) |
|
1467 | subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels) | |
1468 |
|
1468 | |||
1469 | # forget (manifest only, just log it) (must come first) |
|
1469 | # forget (manifest only, just log it) (must come first) | |
1470 | for f, args, msg in actions['f']: |
|
1470 | for f, args, msg in actions['f']: | |
1471 | repo.ui.debug(" %s: %s -> f\n" % (f, msg)) |
|
1471 | repo.ui.debug(" %s: %s -> f\n" % (f, msg)) | |
1472 | z += 1 |
|
1472 | z += 1 | |
1473 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1473 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1474 |
|
1474 | |||
1475 | # re-add (manifest only, just log it) |
|
1475 | # re-add (manifest only, just log it) | |
1476 | for f, args, msg in actions['a']: |
|
1476 | for f, args, msg in actions['a']: | |
1477 | repo.ui.debug(" %s: %s -> a\n" % (f, msg)) |
|
1477 | repo.ui.debug(" %s: %s -> a\n" % (f, msg)) | |
1478 | z += 1 |
|
1478 | z += 1 | |
1479 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1479 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1480 |
|
1480 | |||
1481 | # re-add/mark as modified (manifest only, just log it) |
|
1481 | # re-add/mark as modified (manifest only, just log it) | |
1482 | for f, args, msg in actions['am']: |
|
1482 | for f, args, msg in actions['am']: | |
1483 | repo.ui.debug(" %s: %s -> am\n" % (f, msg)) |
|
1483 | repo.ui.debug(" %s: %s -> am\n" % (f, msg)) | |
1484 | z += 1 |
|
1484 | z += 1 | |
1485 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1485 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1486 |
|
1486 | |||
1487 | # keep (noop, just log it) |
|
1487 | # keep (noop, just log it) | |
1488 | for f, args, msg in actions['k']: |
|
1488 | for f, args, msg in actions['k']: | |
1489 | repo.ui.debug(" %s: %s -> k\n" % (f, msg)) |
|
1489 | repo.ui.debug(" %s: %s -> k\n" % (f, msg)) | |
1490 | # no progress |
|
1490 | # no progress | |
1491 |
|
1491 | |||
1492 | # directory rename, move local |
|
1492 | # directory rename, move local | |
1493 | for f, args, msg in actions['dm']: |
|
1493 | for f, args, msg in actions['dm']: | |
1494 | repo.ui.debug(" %s: %s -> dm\n" % (f, msg)) |
|
1494 | repo.ui.debug(" %s: %s -> dm\n" % (f, msg)) | |
1495 | z += 1 |
|
1495 | z += 1 | |
1496 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1496 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1497 | f0, flags = args |
|
1497 | f0, flags = args | |
1498 | repo.ui.note(_("moving %s to %s\n") % (f0, f)) |
|
1498 | repo.ui.note(_("moving %s to %s\n") % (f0, f)) | |
1499 | wctx[f].audit() |
|
1499 | wctx[f].audit() | |
1500 | wctx[f].write(wctx.filectx(f0).data(), flags) |
|
1500 | wctx[f].write(wctx.filectx(f0).data(), flags) | |
1501 | wctx[f0].remove() |
|
1501 | wctx[f0].remove() | |
1502 | updated += 1 |
|
1502 | updated += 1 | |
1503 |
|
1503 | |||
1504 | # local directory rename, get |
|
1504 | # local directory rename, get | |
1505 | for f, args, msg in actions['dg']: |
|
1505 | for f, args, msg in actions['dg']: | |
1506 | repo.ui.debug(" %s: %s -> dg\n" % (f, msg)) |
|
1506 | repo.ui.debug(" %s: %s -> dg\n" % (f, msg)) | |
1507 | z += 1 |
|
1507 | z += 1 | |
1508 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1508 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1509 | f0, flags = args |
|
1509 | f0, flags = args | |
1510 | repo.ui.note(_("getting %s to %s\n") % (f0, f)) |
|
1510 | repo.ui.note(_("getting %s to %s\n") % (f0, f)) | |
1511 | wctx[f].write(mctx.filectx(f0).data(), flags) |
|
1511 | wctx[f].write(mctx.filectx(f0).data(), flags) | |
1512 | updated += 1 |
|
1512 | updated += 1 | |
1513 |
|
1513 | |||
1514 | # exec |
|
1514 | # exec | |
1515 | for f, args, msg in actions['e']: |
|
1515 | for f, args, msg in actions['e']: | |
1516 | repo.ui.debug(" %s: %s -> e\n" % (f, msg)) |
|
1516 | repo.ui.debug(" %s: %s -> e\n" % (f, msg)) | |
1517 | z += 1 |
|
1517 | z += 1 | |
1518 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1518 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1519 | flags, = args |
|
1519 | flags, = args | |
1520 | wctx[f].audit() |
|
1520 | wctx[f].audit() | |
1521 | wctx[f].setflags('l' in flags, 'x' in flags) |
|
1521 | wctx[f].setflags('l' in flags, 'x' in flags) | |
1522 | updated += 1 |
|
1522 | updated += 1 | |
1523 |
|
1523 | |||
1524 | # the ordering is important here -- ms.mergedriver will raise if the merge |
|
1524 | # the ordering is important here -- ms.mergedriver will raise if the merge | |
1525 | # driver has changed, and we want to be able to bypass it when overwrite is |
|
1525 | # driver has changed, and we want to be able to bypass it when overwrite is | |
1526 | # True |
|
1526 | # True | |
1527 | usemergedriver = not overwrite and mergeactions and ms.mergedriver |
|
1527 | usemergedriver = not overwrite and mergeactions and ms.mergedriver | |
1528 |
|
1528 | |||
1529 | if usemergedriver: |
|
1529 | if usemergedriver: | |
1530 | ms.commit() |
|
1530 | ms.commit() | |
1531 | proceed = driverpreprocess(repo, ms, wctx, labels=labels) |
|
1531 | proceed = driverpreprocess(repo, ms, wctx, labels=labels) | |
1532 | # the driver might leave some files unresolved |
|
1532 | # the driver might leave some files unresolved | |
1533 | unresolvedf = set(ms.unresolved()) |
|
1533 | unresolvedf = set(ms.unresolved()) | |
1534 | if not proceed: |
|
1534 | if not proceed: | |
1535 | # XXX setting unresolved to at least 1 is a hack to make sure we |
|
1535 | # XXX setting unresolved to at least 1 is a hack to make sure we | |
1536 | # error out |
|
1536 | # error out | |
1537 | return updated, merged, removed, max(len(unresolvedf), 1) |
|
1537 | return updated, merged, removed, max(len(unresolvedf), 1) | |
1538 | newactions = [] |
|
1538 | newactions = [] | |
1539 | for f, args, msg in mergeactions: |
|
1539 | for f, args, msg in mergeactions: | |
1540 | if f in unresolvedf: |
|
1540 | if f in unresolvedf: | |
1541 | newactions.append((f, args, msg)) |
|
1541 | newactions.append((f, args, msg)) | |
1542 | mergeactions = newactions |
|
1542 | mergeactions = newactions | |
1543 |
|
1543 | |||
1544 | try: |
|
1544 | try: | |
1545 | # premerge |
|
1545 | # premerge | |
1546 | tocomplete = [] |
|
1546 | tocomplete = [] | |
1547 | for f, args, msg in mergeactions: |
|
1547 | for f, args, msg in mergeactions: | |
1548 | repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg)) |
|
1548 | repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg)) | |
1549 | z += 1 |
|
1549 | z += 1 | |
1550 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1550 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1551 | if f == '.hgsubstate': # subrepo states need updating |
|
1551 | if f == '.hgsubstate': # subrepo states need updating | |
1552 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), |
|
1552 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), | |
1553 | overwrite, labels) |
|
1553 | overwrite, labels) | |
1554 | continue |
|
1554 | continue | |
1555 | wctx[f].audit() |
|
1555 | wctx[f].audit() | |
1556 | complete, r = ms.preresolve(f, wctx) |
|
1556 | complete, r = ms.preresolve(f, wctx) | |
1557 | if not complete: |
|
1557 | if not complete: | |
1558 | numupdates += 1 |
|
1558 | numupdates += 1 | |
1559 | tocomplete.append((f, args, msg)) |
|
1559 | tocomplete.append((f, args, msg)) | |
1560 |
|
1560 | |||
1561 | # merge |
|
1561 | # merge | |
1562 | for f, args, msg in tocomplete: |
|
1562 | for f, args, msg in tocomplete: | |
1563 | repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg)) |
|
1563 | repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg)) | |
1564 | z += 1 |
|
1564 | z += 1 | |
1565 | progress(_updating, z, item=f, total=numupdates, unit=_files) |
|
1565 | progress(_updating, z, item=f, total=numupdates, unit=_files) | |
1566 | ms.resolve(f, wctx) |
|
1566 | ms.resolve(f, wctx) | |
1567 |
|
1567 | |||
1568 | finally: |
|
1568 | finally: | |
1569 | ms.commit() |
|
1569 | ms.commit() | |
1570 |
|
1570 | |||
1571 | unresolved = ms.unresolvedcount() |
|
1571 | unresolved = ms.unresolvedcount() | |
1572 |
|
1572 | |||
1573 | if usemergedriver and not unresolved and ms.mdstate() != 's': |
|
1573 | if usemergedriver and not unresolved and ms.mdstate() != 's': | |
1574 | if not driverconclude(repo, ms, wctx, labels=labels): |
|
1574 | if not driverconclude(repo, ms, wctx, labels=labels): | |
1575 | # XXX setting unresolved to at least 1 is a hack to make sure we |
|
1575 | # XXX setting unresolved to at least 1 is a hack to make sure we | |
1576 | # error out |
|
1576 | # error out | |
1577 | unresolved = max(unresolved, 1) |
|
1577 | unresolved = max(unresolved, 1) | |
1578 |
|
1578 | |||
1579 | ms.commit() |
|
1579 | ms.commit() | |
1580 |
|
1580 | |||
1581 | msupdated, msmerged, msremoved = ms.counts() |
|
1581 | msupdated, msmerged, msremoved = ms.counts() | |
1582 | updated += msupdated |
|
1582 | updated += msupdated | |
1583 | merged += msmerged |
|
1583 | merged += msmerged | |
1584 | removed += msremoved |
|
1584 | removed += msremoved | |
1585 |
|
1585 | |||
1586 | extraactions = ms.actions() |
|
1586 | extraactions = ms.actions() | |
1587 | if extraactions: |
|
1587 | if extraactions: | |
1588 | mfiles = set(a[0] for a in actions['m']) |
|
1588 | mfiles = set(a[0] for a in actions['m']) | |
1589 | for k, acts in extraactions.iteritems(): |
|
1589 | for k, acts in extraactions.iteritems(): | |
1590 | actions[k].extend(acts) |
|
1590 | actions[k].extend(acts) | |
1591 | # Remove these files from actions['m'] as well. This is important |
|
1591 | # Remove these files from actions['m'] as well. This is important | |
1592 | # because in recordupdates, files in actions['m'] are processed |
|
1592 | # because in recordupdates, files in actions['m'] are processed | |
1593 | # after files in other actions, and the merge driver might add |
|
1593 | # after files in other actions, and the merge driver might add | |
1594 | # files to those actions via extraactions above. This can lead to a |
|
1594 | # files to those actions via extraactions above. This can lead to a | |
1595 | # file being recorded twice, with poor results. This is especially |
|
1595 | # file being recorded twice, with poor results. This is especially | |
1596 | # problematic for actions['r'] (currently only possible with the |
|
1596 | # problematic for actions['r'] (currently only possible with the | |
1597 | # merge driver in the initial merge process; interrupted merges |
|
1597 | # merge driver in the initial merge process; interrupted merges | |
1598 | # don't go through this flow). |
|
1598 | # don't go through this flow). | |
1599 | # |
|
1599 | # | |
1600 | # The real fix here is to have indexes by both file and action so |
|
1600 | # The real fix here is to have indexes by both file and action so | |
1601 | # that when the action for a file is changed it is automatically |
|
1601 | # that when the action for a file is changed it is automatically | |
1602 | # reflected in the other action lists. But that involves a more |
|
1602 | # reflected in the other action lists. But that involves a more | |
1603 | # complex data structure, so this will do for now. |
|
1603 | # complex data structure, so this will do for now. | |
1604 | # |
|
1604 | # | |
1605 | # We don't need to do the same operation for 'dc' and 'cd' because |
|
1605 | # We don't need to do the same operation for 'dc' and 'cd' because | |
1606 | # those lists aren't consulted again. |
|
1606 | # those lists aren't consulted again. | |
1607 | mfiles.difference_update(a[0] for a in acts) |
|
1607 | mfiles.difference_update(a[0] for a in acts) | |
1608 |
|
1608 | |||
1609 | actions['m'] = [a for a in actions['m'] if a[0] in mfiles] |
|
1609 | actions['m'] = [a for a in actions['m'] if a[0] in mfiles] | |
1610 |
|
1610 | |||
1611 | progress(_updating, None, total=numupdates, unit=_files) |
|
1611 | progress(_updating, None, total=numupdates, unit=_files) | |
1612 |
|
1612 | |||
1613 | return updated, merged, removed, unresolved |
|
1613 | return updated, merged, removed, unresolved | |
1614 |
|
1614 | |||
1615 | def recordupdates(repo, actions, branchmerge): |
|
1615 | def recordupdates(repo, actions, branchmerge): | |
1616 | "record merge actions to the dirstate" |
|
1616 | "record merge actions to the dirstate" | |
1617 | # remove (must come first) |
|
1617 | # remove (must come first) | |
1618 | for f, args, msg in actions.get('r', []): |
|
1618 | for f, args, msg in actions.get('r', []): | |
1619 | if branchmerge: |
|
1619 | if branchmerge: | |
1620 | repo.dirstate.remove(f) |
|
1620 | repo.dirstate.remove(f) | |
1621 | else: |
|
1621 | else: | |
1622 | repo.dirstate.drop(f) |
|
1622 | repo.dirstate.drop(f) | |
1623 |
|
1623 | |||
1624 | # forget (must come first) |
|
1624 | # forget (must come first) | |
1625 | for f, args, msg in actions.get('f', []): |
|
1625 | for f, args, msg in actions.get('f', []): | |
1626 | repo.dirstate.drop(f) |
|
1626 | repo.dirstate.drop(f) | |
1627 |
|
1627 | |||
1628 | # resolve path conflicts |
|
1628 | # resolve path conflicts | |
1629 | for f, args, msg in actions.get('pr', []): |
|
1629 | for f, args, msg in actions.get('pr', []): | |
1630 | f0, = args |
|
1630 | f0, = args | |
1631 | origf0 = repo.dirstate.copied(f0) or f0 |
|
1631 | origf0 = repo.dirstate.copied(f0) or f0 | |
1632 | repo.dirstate.add(f) |
|
1632 | repo.dirstate.add(f) | |
1633 | repo.dirstate.copy(origf0, f) |
|
1633 | repo.dirstate.copy(origf0, f) | |
1634 | if f0 == origf0: |
|
1634 | if f0 == origf0: | |
1635 | repo.dirstate.remove(f0) |
|
1635 | repo.dirstate.remove(f0) | |
1636 | else: |
|
1636 | else: | |
1637 | repo.dirstate.drop(f0) |
|
1637 | repo.dirstate.drop(f0) | |
1638 |
|
1638 | |||
1639 | # re-add |
|
1639 | # re-add | |
1640 | for f, args, msg in actions.get('a', []): |
|
1640 | for f, args, msg in actions.get('a', []): | |
1641 | repo.dirstate.add(f) |
|
1641 | repo.dirstate.add(f) | |
1642 |
|
1642 | |||
1643 | # re-add/mark as modified |
|
1643 | # re-add/mark as modified | |
1644 | for f, args, msg in actions.get('am', []): |
|
1644 | for f, args, msg in actions.get('am', []): | |
1645 | if branchmerge: |
|
1645 | if branchmerge: | |
1646 | repo.dirstate.normallookup(f) |
|
1646 | repo.dirstate.normallookup(f) | |
1647 | else: |
|
1647 | else: | |
1648 | repo.dirstate.add(f) |
|
1648 | repo.dirstate.add(f) | |
1649 |
|
1649 | |||
1650 | # exec change |
|
1650 | # exec change | |
1651 | for f, args, msg in actions.get('e', []): |
|
1651 | for f, args, msg in actions.get('e', []): | |
1652 | repo.dirstate.normallookup(f) |
|
1652 | repo.dirstate.normallookup(f) | |
1653 |
|
1653 | |||
1654 | # keep |
|
1654 | # keep | |
1655 | for f, args, msg in actions.get('k', []): |
|
1655 | for f, args, msg in actions.get('k', []): | |
1656 | pass |
|
1656 | pass | |
1657 |
|
1657 | |||
1658 | # get |
|
1658 | # get | |
1659 | for f, args, msg in actions.get('g', []): |
|
1659 | for f, args, msg in actions.get('g', []): | |
1660 | if branchmerge: |
|
1660 | if branchmerge: | |
1661 | repo.dirstate.otherparent(f) |
|
1661 | repo.dirstate.otherparent(f) | |
1662 | else: |
|
1662 | else: | |
1663 | repo.dirstate.normal(f) |
|
1663 | repo.dirstate.normal(f) | |
1664 |
|
1664 | |||
1665 | # merge |
|
1665 | # merge | |
1666 | for f, args, msg in actions.get('m', []): |
|
1666 | for f, args, msg in actions.get('m', []): | |
1667 | f1, f2, fa, move, anc = args |
|
1667 | f1, f2, fa, move, anc = args | |
1668 | if branchmerge: |
|
1668 | if branchmerge: | |
1669 | # We've done a branch merge, mark this file as merged |
|
1669 | # We've done a branch merge, mark this file as merged | |
1670 | # so that we properly record the merger later |
|
1670 | # so that we properly record the merger later | |
1671 | repo.dirstate.merge(f) |
|
1671 | repo.dirstate.merge(f) | |
1672 | if f1 != f2: # copy/rename |
|
1672 | if f1 != f2: # copy/rename | |
1673 | if move: |
|
1673 | if move: | |
1674 | repo.dirstate.remove(f1) |
|
1674 | repo.dirstate.remove(f1) | |
1675 | if f1 != f: |
|
1675 | if f1 != f: | |
1676 | repo.dirstate.copy(f1, f) |
|
1676 | repo.dirstate.copy(f1, f) | |
1677 | else: |
|
1677 | else: | |
1678 | repo.dirstate.copy(f2, f) |
|
1678 | repo.dirstate.copy(f2, f) | |
1679 | else: |
|
1679 | else: | |
1680 | # We've update-merged a locally modified file, so |
|
1680 | # We've update-merged a locally modified file, so | |
1681 | # we set the dirstate to emulate a normal checkout |
|
1681 | # we set the dirstate to emulate a normal checkout | |
1682 | # of that file some time in the past. Thus our |
|
1682 | # of that file some time in the past. Thus our | |
1683 | # merge will appear as a normal local file |
|
1683 | # merge will appear as a normal local file | |
1684 | # modification. |
|
1684 | # modification. | |
1685 | if f2 == f: # file not locally copied/moved |
|
1685 | if f2 == f: # file not locally copied/moved | |
1686 | repo.dirstate.normallookup(f) |
|
1686 | repo.dirstate.normallookup(f) | |
1687 | if move: |
|
1687 | if move: | |
1688 | repo.dirstate.drop(f1) |
|
1688 | repo.dirstate.drop(f1) | |
1689 |
|
1689 | |||
1690 | # directory rename, move local |
|
1690 | # directory rename, move local | |
1691 | for f, args, msg in actions.get('dm', []): |
|
1691 | for f, args, msg in actions.get('dm', []): | |
1692 | f0, flag = args |
|
1692 | f0, flag = args | |
1693 | if branchmerge: |
|
1693 | if branchmerge: | |
1694 | repo.dirstate.add(f) |
|
1694 | repo.dirstate.add(f) | |
1695 | repo.dirstate.remove(f0) |
|
1695 | repo.dirstate.remove(f0) | |
1696 | repo.dirstate.copy(f0, f) |
|
1696 | repo.dirstate.copy(f0, f) | |
1697 | else: |
|
1697 | else: | |
1698 | repo.dirstate.normal(f) |
|
1698 | repo.dirstate.normal(f) | |
1699 | repo.dirstate.drop(f0) |
|
1699 | repo.dirstate.drop(f0) | |
1700 |
|
1700 | |||
1701 | # directory rename, get |
|
1701 | # directory rename, get | |
1702 | for f, args, msg in actions.get('dg', []): |
|
1702 | for f, args, msg in actions.get('dg', []): | |
1703 | f0, flag = args |
|
1703 | f0, flag = args | |
1704 | if branchmerge: |
|
1704 | if branchmerge: | |
1705 | repo.dirstate.add(f) |
|
1705 | repo.dirstate.add(f) | |
1706 | repo.dirstate.copy(f0, f) |
|
1706 | repo.dirstate.copy(f0, f) | |
1707 | else: |
|
1707 | else: | |
1708 | repo.dirstate.normal(f) |
|
1708 | repo.dirstate.normal(f) | |
1709 |
|
1709 | |||
1710 | def update(repo, node, branchmerge, force, ancestor=None, |
|
1710 | def update(repo, node, branchmerge, force, ancestor=None, | |
1711 | mergeancestor=False, labels=None, matcher=None, mergeforce=False, |
|
1711 | mergeancestor=False, labels=None, matcher=None, mergeforce=False, | |
1712 | updatecheck=None, wc=None): |
|
1712 | updatecheck=None, wc=None): | |
1713 | """ |
|
1713 | """ | |
1714 | Perform a merge between the working directory and the given node |
|
1714 | Perform a merge between the working directory and the given node | |
1715 |
|
1715 | |||
1716 | node = the node to update to |
|
1716 | node = the node to update to | |
1717 | branchmerge = whether to merge between branches |
|
1717 | branchmerge = whether to merge between branches | |
1718 | force = whether to force branch merging or file overwriting |
|
1718 | force = whether to force branch merging or file overwriting | |
1719 | matcher = a matcher to filter file lists (dirstate not updated) |
|
1719 | matcher = a matcher to filter file lists (dirstate not updated) | |
1720 | mergeancestor = whether it is merging with an ancestor. If true, |
|
1720 | mergeancestor = whether it is merging with an ancestor. If true, | |
1721 | we should accept the incoming changes for any prompts that occur. |
|
1721 | we should accept the incoming changes for any prompts that occur. | |
1722 | If false, merging with an ancestor (fast-forward) is only allowed |
|
1722 | If false, merging with an ancestor (fast-forward) is only allowed | |
1723 | between different named branches. This flag is used by rebase extension |
|
1723 | between different named branches. This flag is used by rebase extension | |
1724 | as a temporary fix and should be avoided in general. |
|
1724 | as a temporary fix and should be avoided in general. | |
1725 | labels = labels to use for base, local and other |
|
1725 | labels = labels to use for base, local and other | |
1726 | mergeforce = whether the merge was run with 'merge --force' (deprecated): if |
|
1726 | mergeforce = whether the merge was run with 'merge --force' (deprecated): if | |
1727 | this is True, then 'force' should be True as well. |
|
1727 | this is True, then 'force' should be True as well. | |
1728 |
|
1728 | |||
1729 | The table below shows all the behaviors of the update command |
|
1729 | The table below shows all the behaviors of the update command | |
1730 | given the -c and -C or no options, whether the working directory |
|
1730 | given the -c and -C or no options, whether the working directory | |
1731 | is dirty, whether a revision is specified, and the relationship of |
|
1731 | is dirty, whether a revision is specified, and the relationship of | |
1732 | the parent rev to the target rev (linear or not). Match from top first. The |
|
1732 | the parent rev to the target rev (linear or not). Match from top first. The | |
1733 | -n option doesn't exist on the command line, but represents the |
|
1733 | -n option doesn't exist on the command line, but represents the | |
1734 | experimental.updatecheck=noconflict option. |
|
1734 | experimental.updatecheck=noconflict option. | |
1735 |
|
1735 | |||
1736 | This logic is tested by test-update-branches.t. |
|
1736 | This logic is tested by test-update-branches.t. | |
1737 |
|
1737 | |||
1738 | -c -C -n -m dirty rev linear | result |
|
1738 | -c -C -n -m dirty rev linear | result | |
1739 | y y * * * * * | (1) |
|
1739 | y y * * * * * | (1) | |
1740 | y * y * * * * | (1) |
|
1740 | y * y * * * * | (1) | |
1741 | y * * y * * * | (1) |
|
1741 | y * * y * * * | (1) | |
1742 | * y y * * * * | (1) |
|
1742 | * y y * * * * | (1) | |
1743 | * y * y * * * | (1) |
|
1743 | * y * y * * * | (1) | |
1744 | * * y y * * * | (1) |
|
1744 | * * y y * * * | (1) | |
1745 | * * * * * n n | x |
|
1745 | * * * * * n n | x | |
1746 | * * * * n * * | ok |
|
1746 | * * * * n * * | ok | |
1747 | n n n n y * y | merge |
|
1747 | n n n n y * y | merge | |
1748 | n n n n y y n | (2) |
|
1748 | n n n n y y n | (2) | |
1749 | n n n y y * * | merge |
|
1749 | n n n y y * * | merge | |
1750 | n n y n y * * | merge if no conflict |
|
1750 | n n y n y * * | merge if no conflict | |
1751 | n y n n y * * | discard |
|
1751 | n y n n y * * | discard | |
1752 | y n n n y * * | (3) |
|
1752 | y n n n y * * | (3) | |
1753 |
|
1753 | |||
1754 | x = can't happen |
|
1754 | x = can't happen | |
1755 | * = don't-care |
|
1755 | * = don't-care | |
1756 | 1 = incompatible options (checked in commands.py) |
|
1756 | 1 = incompatible options (checked in commands.py) | |
1757 | 2 = abort: uncommitted changes (commit or update --clean to discard changes) |
|
1757 | 2 = abort: uncommitted changes (commit or update --clean to discard changes) | |
1758 | 3 = abort: uncommitted changes (checked in commands.py) |
|
1758 | 3 = abort: uncommitted changes (checked in commands.py) | |
1759 |
|
1759 | |||
1760 | The merge is performed inside ``wc``, a workingctx-like objects. It defaults |
|
1760 | The merge is performed inside ``wc``, a workingctx-like objects. It defaults | |
1761 | to repo[None] if None is passed. |
|
1761 | to repo[None] if None is passed. | |
1762 |
|
1762 | |||
1763 | Return the same tuple as applyupdates(). |
|
1763 | Return the same tuple as applyupdates(). | |
1764 | """ |
|
1764 | """ | |
1765 | # Avoid cycle. |
|
1765 | # Avoid cycle. | |
1766 | from . import sparse |
|
1766 | from . import sparse | |
1767 |
|
1767 | |||
1768 | # This function used to find the default destination if node was None, but |
|
1768 | # This function used to find the default destination if node was None, but | |
1769 | # that's now in destutil.py. |
|
1769 | # that's now in destutil.py. | |
1770 | assert node is not None |
|
1770 | assert node is not None | |
1771 | if not branchmerge and not force: |
|
1771 | if not branchmerge and not force: | |
1772 | # TODO: remove the default once all callers that pass branchmerge=False |
|
1772 | # TODO: remove the default once all callers that pass branchmerge=False | |
1773 | # and force=False pass a value for updatecheck. We may want to allow |
|
1773 | # and force=False pass a value for updatecheck. We may want to allow | |
1774 | # updatecheck='abort' to better suppport some of these callers. |
|
1774 | # updatecheck='abort' to better suppport some of these callers. | |
1775 | if updatecheck is None: |
|
1775 | if updatecheck is None: | |
1776 | updatecheck = 'linear' |
|
1776 | updatecheck = 'linear' | |
1777 | assert updatecheck in ('none', 'linear', 'noconflict') |
|
1777 | assert updatecheck in ('none', 'linear', 'noconflict') | |
1778 | # If we're doing a partial update, we need to skip updating |
|
1778 | # If we're doing a partial update, we need to skip updating | |
1779 | # the dirstate, so make a note of any partial-ness to the |
|
1779 | # the dirstate, so make a note of any partial-ness to the | |
1780 | # update here. |
|
1780 | # update here. | |
1781 | if matcher is None or matcher.always(): |
|
1781 | if matcher is None or matcher.always(): | |
1782 | partial = False |
|
1782 | partial = False | |
1783 | else: |
|
1783 | else: | |
1784 | partial = True |
|
1784 | partial = True | |
1785 | with repo.wlock(): |
|
1785 | with repo.wlock(): | |
1786 | if wc is None: |
|
1786 | if wc is None: | |
1787 | wc = repo[None] |
|
1787 | wc = repo[None] | |
1788 | pl = wc.parents() |
|
1788 | pl = wc.parents() | |
1789 | p1 = pl[0] |
|
1789 | p1 = pl[0] | |
1790 | pas = [None] |
|
1790 | pas = [None] | |
1791 | if ancestor is not None: |
|
1791 | if ancestor is not None: | |
1792 | pas = [repo[ancestor]] |
|
1792 | pas = [repo[ancestor]] | |
1793 |
|
1793 | |||
1794 | overwrite = force and not branchmerge |
|
1794 | overwrite = force and not branchmerge | |
1795 |
|
1795 | |||
1796 | p2 = repo[node] |
|
1796 | p2 = repo[node] | |
1797 | if pas[0] is None: |
|
1797 | if pas[0] is None: | |
1798 | if repo.ui.configlist('merge', 'preferancestor') == ['*']: |
|
1798 | if repo.ui.configlist('merge', 'preferancestor') == ['*']: | |
1799 | cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node()) |
|
1799 | cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node()) | |
1800 | pas = [repo[anc] for anc in (sorted(cahs) or [nullid])] |
|
1800 | pas = [repo[anc] for anc in (sorted(cahs) or [nullid])] | |
1801 | else: |
|
1801 | else: | |
1802 | pas = [p1.ancestor(p2, warn=branchmerge)] |
|
1802 | pas = [p1.ancestor(p2, warn=branchmerge)] | |
1803 |
|
1803 | |||
1804 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) |
|
1804 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) | |
1805 |
|
1805 | |||
1806 | ### check phase |
|
1806 | ### check phase | |
1807 | if not overwrite: |
|
1807 | if not overwrite: | |
1808 | if len(pl) > 1: |
|
1808 | if len(pl) > 1: | |
1809 | raise error.Abort(_("outstanding uncommitted merge")) |
|
1809 | raise error.Abort(_("outstanding uncommitted merge")) | |
1810 | ms = mergestate.read(repo) |
|
1810 | ms = mergestate.read(repo) | |
1811 | if list(ms.unresolved()): |
|
1811 | if list(ms.unresolved()): | |
1812 | raise error.Abort(_("outstanding merge conflicts")) |
|
1812 | raise error.Abort(_("outstanding merge conflicts")) | |
1813 | if branchmerge: |
|
1813 | if branchmerge: | |
1814 | if pas == [p2]: |
|
1814 | if pas == [p2]: | |
1815 | raise error.Abort(_("merging with a working directory ancestor" |
|
1815 | raise error.Abort(_("merging with a working directory ancestor" | |
1816 | " has no effect")) |
|
1816 | " has no effect")) | |
1817 | elif pas == [p1]: |
|
1817 | elif pas == [p1]: | |
1818 | if not mergeancestor and wc.branch() == p2.branch(): |
|
1818 | if not mergeancestor and wc.branch() == p2.branch(): | |
1819 | raise error.Abort(_("nothing to merge"), |
|
1819 | raise error.Abort(_("nothing to merge"), | |
1820 | hint=_("use 'hg update' " |
|
1820 | hint=_("use 'hg update' " | |
1821 | "or check 'hg heads'")) |
|
1821 | "or check 'hg heads'")) | |
1822 | if not force and (wc.files() or wc.deleted()): |
|
1822 | if not force and (wc.files() or wc.deleted()): | |
1823 | raise error.Abort(_("uncommitted changes"), |
|
1823 | raise error.Abort(_("uncommitted changes"), | |
1824 | hint=_("use 'hg status' to list changes")) |
|
1824 | hint=_("use 'hg status' to list changes")) | |
1825 | for s in sorted(wc.substate): |
|
1825 | for s in sorted(wc.substate): | |
1826 | wc.sub(s).bailifchanged() |
|
1826 | wc.sub(s).bailifchanged() | |
1827 |
|
1827 | |||
1828 | elif not overwrite: |
|
1828 | elif not overwrite: | |
1829 | if p1 == p2: # no-op update |
|
1829 | if p1 == p2: # no-op update | |
1830 | # call the hooks and exit early |
|
1830 | # call the hooks and exit early | |
1831 | repo.hook('preupdate', throw=True, parent1=xp2, parent2='') |
|
1831 | repo.hook('preupdate', throw=True, parent1=xp2, parent2='') | |
1832 | repo.hook('update', parent1=xp2, parent2='', error=0) |
|
1832 | repo.hook('update', parent1=xp2, parent2='', error=0) | |
1833 | return 0, 0, 0, 0 |
|
1833 | return 0, 0, 0, 0 | |
1834 |
|
1834 | |||
1835 | if (updatecheck == 'linear' and |
|
1835 | if (updatecheck == 'linear' and | |
1836 | pas not in ([p1], [p2])): # nonlinear |
|
1836 | pas not in ([p1], [p2])): # nonlinear | |
1837 | dirty = wc.dirty(missing=True) |
|
1837 | dirty = wc.dirty(missing=True) | |
1838 | if dirty: |
|
1838 | if dirty: | |
1839 | # Branching is a bit strange to ensure we do the minimal |
|
1839 | # Branching is a bit strange to ensure we do the minimal | |
1840 | # amount of call to obsutil.foreground. |
|
1840 | # amount of call to obsutil.foreground. | |
1841 | foreground = obsutil.foreground(repo, [p1.node()]) |
|
1841 | foreground = obsutil.foreground(repo, [p1.node()]) | |
1842 | # note: the <node> variable contains a random identifier |
|
1842 | # note: the <node> variable contains a random identifier | |
1843 | if repo[node].node() in foreground: |
|
1843 | if repo[node].node() in foreground: | |
1844 | pass # allow updating to successors |
|
1844 | pass # allow updating to successors | |
1845 | else: |
|
1845 | else: | |
1846 | msg = _("uncommitted changes") |
|
1846 | msg = _("uncommitted changes") | |
1847 | hint = _("commit or update --clean to discard changes") |
|
1847 | hint = _("commit or update --clean to discard changes") | |
1848 | raise error.UpdateAbort(msg, hint=hint) |
|
1848 | raise error.UpdateAbort(msg, hint=hint) | |
1849 | else: |
|
1849 | else: | |
1850 | # Allow jumping branches if clean and specific rev given |
|
1850 | # Allow jumping branches if clean and specific rev given | |
1851 | pass |
|
1851 | pass | |
1852 |
|
1852 | |||
1853 | if overwrite: |
|
1853 | if overwrite: | |
1854 | pas = [wc] |
|
1854 | pas = [wc] | |
1855 | elif not branchmerge: |
|
1855 | elif not branchmerge: | |
1856 | pas = [p1] |
|
1856 | pas = [p1] | |
1857 |
|
1857 | |||
1858 | # deprecated config: merge.followcopies |
|
1858 | # deprecated config: merge.followcopies | |
1859 | followcopies = repo.ui.configbool('merge', 'followcopies') |
|
1859 | followcopies = repo.ui.configbool('merge', 'followcopies') | |
1860 | if overwrite: |
|
1860 | if overwrite: | |
1861 | followcopies = False |
|
1861 | followcopies = False | |
1862 | elif not pas[0]: |
|
1862 | elif not pas[0]: | |
1863 | followcopies = False |
|
1863 | followcopies = False | |
1864 | if not branchmerge and not wc.dirty(missing=True): |
|
1864 | if not branchmerge and not wc.dirty(missing=True): | |
1865 | followcopies = False |
|
1865 | followcopies = False | |
1866 |
|
1866 | |||
1867 | ### calculate phase |
|
1867 | ### calculate phase | |
1868 | actionbyfile, diverge, renamedelete = calculateupdates( |
|
1868 | actionbyfile, diverge, renamedelete = calculateupdates( | |
1869 | repo, wc, p2, pas, branchmerge, force, mergeancestor, |
|
1869 | repo, wc, p2, pas, branchmerge, force, mergeancestor, | |
1870 | followcopies, matcher=matcher, mergeforce=mergeforce) |
|
1870 | followcopies, matcher=matcher, mergeforce=mergeforce) | |
1871 |
|
1871 | |||
1872 | if updatecheck == 'noconflict': |
|
1872 | if updatecheck == 'noconflict': | |
1873 | for f, (m, args, msg) in actionbyfile.iteritems(): |
|
1873 | for f, (m, args, msg) in actionbyfile.iteritems(): | |
1874 | if m not in ('g', 'k', 'e', 'r', 'pr'): |
|
1874 | if m not in ('g', 'k', 'e', 'r', 'pr'): | |
1875 | msg = _("conflicting changes") |
|
1875 | msg = _("conflicting changes") | |
1876 | hint = _("commit or update --clean to discard changes") |
|
1876 | hint = _("commit or update --clean to discard changes") | |
1877 | raise error.Abort(msg, hint=hint) |
|
1877 | raise error.Abort(msg, hint=hint) | |
1878 |
|
1878 | |||
1879 | # Prompt and create actions. Most of this is in the resolve phase |
|
1879 | # Prompt and create actions. Most of this is in the resolve phase | |
1880 | # already, but we can't handle .hgsubstate in filemerge or |
|
1880 | # already, but we can't handle .hgsubstate in filemerge or | |
1881 | # subrepo.submerge yet so we have to keep prompting for it. |
|
1881 | # subrepo.submerge yet so we have to keep prompting for it. | |
1882 | if '.hgsubstate' in actionbyfile: |
|
1882 | if '.hgsubstate' in actionbyfile: | |
1883 | f = '.hgsubstate' |
|
1883 | f = '.hgsubstate' | |
1884 | m, args, msg = actionbyfile[f] |
|
1884 | m, args, msg = actionbyfile[f] | |
1885 | prompts = filemerge.partextras(labels) |
|
1885 | prompts = filemerge.partextras(labels) | |
1886 | prompts['f'] = f |
|
1886 | prompts['f'] = f | |
1887 | if m == 'cd': |
|
1887 | if m == 'cd': | |
1888 | if repo.ui.promptchoice( |
|
1888 | if repo.ui.promptchoice( | |
1889 | _("local%(l)s changed %(f)s which other%(o)s deleted\n" |
|
1889 | _("local%(l)s changed %(f)s which other%(o)s deleted\n" | |
1890 | "use (c)hanged version or (d)elete?" |
|
1890 | "use (c)hanged version or (d)elete?" | |
1891 | "$$ &Changed $$ &Delete") % prompts, 0): |
|
1891 | "$$ &Changed $$ &Delete") % prompts, 0): | |
1892 | actionbyfile[f] = ('r', None, "prompt delete") |
|
1892 | actionbyfile[f] = ('r', None, "prompt delete") | |
1893 | elif f in p1: |
|
1893 | elif f in p1: | |
1894 | actionbyfile[f] = ('am', None, "prompt keep") |
|
1894 | actionbyfile[f] = ('am', None, "prompt keep") | |
1895 | else: |
|
1895 | else: | |
1896 | actionbyfile[f] = ('a', None, "prompt keep") |
|
1896 | actionbyfile[f] = ('a', None, "prompt keep") | |
1897 | elif m == 'dc': |
|
1897 | elif m == 'dc': | |
1898 | f1, f2, fa, move, anc = args |
|
1898 | f1, f2, fa, move, anc = args | |
1899 | flags = p2[f2].flags() |
|
1899 | flags = p2[f2].flags() | |
1900 | if repo.ui.promptchoice( |
|
1900 | if repo.ui.promptchoice( | |
1901 | _("other%(o)s changed %(f)s which local%(l)s deleted\n" |
|
1901 | _("other%(o)s changed %(f)s which local%(l)s deleted\n" | |
1902 | "use (c)hanged version or leave (d)eleted?" |
|
1902 | "use (c)hanged version or leave (d)eleted?" | |
1903 | "$$ &Changed $$ &Deleted") % prompts, 0) == 0: |
|
1903 | "$$ &Changed $$ &Deleted") % prompts, 0) == 0: | |
1904 | actionbyfile[f] = ('g', (flags, False), "prompt recreating") |
|
1904 | actionbyfile[f] = ('g', (flags, False), "prompt recreating") | |
1905 | else: |
|
1905 | else: | |
1906 | del actionbyfile[f] |
|
1906 | del actionbyfile[f] | |
1907 |
|
1907 | |||
1908 | # Convert to dictionary-of-lists format |
|
1908 | # Convert to dictionary-of-lists format | |
1909 | actions = dict((m, []) |
|
1909 | actions = dict((m, []) | |
1910 | for m in 'a am f g cd dc r dm dg m e k p pr'.split()) |
|
1910 | for m in 'a am f g cd dc r dm dg m e k p pr'.split()) | |
1911 | for f, (m, args, msg) in actionbyfile.iteritems(): |
|
1911 | for f, (m, args, msg) in actionbyfile.iteritems(): | |
1912 | if m not in actions: |
|
1912 | if m not in actions: | |
1913 | actions[m] = [] |
|
1913 | actions[m] = [] | |
1914 | actions[m].append((f, args, msg)) |
|
1914 | actions[m].append((f, args, msg)) | |
1915 |
|
1915 | |||
1916 | if not util.fscasesensitive(repo.path): |
|
1916 | if not util.fscasesensitive(repo.path): | |
1917 | # check collision between files only in p2 for clean update |
|
1917 | # check collision between files only in p2 for clean update | |
1918 | if (not branchmerge and |
|
1918 | if (not branchmerge and | |
1919 | (force or not wc.dirty(missing=True, branch=False))): |
|
1919 | (force or not wc.dirty(missing=True, branch=False))): | |
1920 | _checkcollision(repo, p2.manifest(), None) |
|
1920 | _checkcollision(repo, p2.manifest(), None) | |
1921 | else: |
|
1921 | else: | |
1922 | _checkcollision(repo, wc.manifest(), actions) |
|
1922 | _checkcollision(repo, wc.manifest(), actions) | |
1923 |
|
1923 | |||
1924 | # divergent renames |
|
1924 | # divergent renames | |
1925 | for f, fl in sorted(diverge.iteritems()): |
|
1925 | for f, fl in sorted(diverge.iteritems()): | |
1926 | repo.ui.warn(_("note: possible conflict - %s was renamed " |
|
1926 | repo.ui.warn(_("note: possible conflict - %s was renamed " | |
1927 | "multiple times to:\n") % f) |
|
1927 | "multiple times to:\n") % f) | |
1928 | for nf in fl: |
|
1928 | for nf in fl: | |
1929 | repo.ui.warn(" %s\n" % nf) |
|
1929 | repo.ui.warn(" %s\n" % nf) | |
1930 |
|
1930 | |||
1931 | # rename and delete |
|
1931 | # rename and delete | |
1932 | for f, fl in sorted(renamedelete.iteritems()): |
|
1932 | for f, fl in sorted(renamedelete.iteritems()): | |
1933 | repo.ui.warn(_("note: possible conflict - %s was deleted " |
|
1933 | repo.ui.warn(_("note: possible conflict - %s was deleted " | |
1934 | "and renamed to:\n") % f) |
|
1934 | "and renamed to:\n") % f) | |
1935 | for nf in fl: |
|
1935 | for nf in fl: | |
1936 | repo.ui.warn(" %s\n" % nf) |
|
1936 | repo.ui.warn(" %s\n" % nf) | |
1937 |
|
1937 | |||
1938 | ### apply phase |
|
1938 | ### apply phase | |
1939 | if not branchmerge: # just jump to the new rev |
|
1939 | if not branchmerge: # just jump to the new rev | |
1940 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' |
|
1940 | fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' | |
1941 | if not partial: |
|
1941 | if not partial: | |
1942 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) |
|
1942 | repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) | |
1943 | # note that we're in the middle of an update |
|
1943 | # note that we're in the middle of an update | |
1944 | repo.vfs.write('updatestate', p2.hex()) |
|
1944 | repo.vfs.write('updatestate', p2.hex()) | |
1945 |
|
1945 | |||
1946 | stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels) |
|
1946 | stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels) | |
1947 | wc.flushall() |
|
1947 | wc.flushall() | |
1948 |
|
1948 | |||
1949 | if not partial: |
|
1949 | if not partial: | |
1950 | with repo.dirstate.parentchange(): |
|
1950 | with repo.dirstate.parentchange(): | |
1951 | repo.setparents(fp1, fp2) |
|
1951 | repo.setparents(fp1, fp2) | |
1952 | recordupdates(repo, actions, branchmerge) |
|
1952 | recordupdates(repo, actions, branchmerge) | |
1953 | # update completed, clear state |
|
1953 | # update completed, clear state | |
1954 | util.unlink(repo.vfs.join('updatestate')) |
|
1954 | util.unlink(repo.vfs.join('updatestate')) | |
1955 |
|
1955 | |||
1956 | if not branchmerge: |
|
1956 | if not branchmerge: | |
1957 | repo.dirstate.setbranch(p2.branch()) |
|
1957 | repo.dirstate.setbranch(p2.branch()) | |
1958 |
|
1958 | |||
1959 | # If we're updating to a location, clean up any stale temporary includes |
|
1959 | # If we're updating to a location, clean up any stale temporary includes | |
1960 | # (ex: this happens during hg rebase --abort). |
|
1960 | # (ex: this happens during hg rebase --abort). | |
1961 | if not branchmerge: |
|
1961 | if not branchmerge: | |
1962 | sparse.prunetemporaryincludes(repo) |
|
1962 | sparse.prunetemporaryincludes(repo) | |
1963 |
|
1963 | |||
1964 | if not partial: |
|
1964 | if not partial: | |
1965 | repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) |
|
1965 | repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) | |
1966 | return stats |
|
1966 | return stats | |
1967 |
|
1967 | |||
1968 | def graft(repo, ctx, pctx, labels, keepparent=False): |
|
1968 | def graft(repo, ctx, pctx, labels, keepparent=False): | |
1969 | """Do a graft-like merge. |
|
1969 | """Do a graft-like merge. | |
1970 |
|
1970 | |||
1971 | This is a merge where the merge ancestor is chosen such that one |
|
1971 | This is a merge where the merge ancestor is chosen such that one | |
1972 | or more changesets are grafted onto the current changeset. In |
|
1972 | or more changesets are grafted onto the current changeset. In | |
1973 | addition to the merge, this fixes up the dirstate to include only |
|
1973 | addition to the merge, this fixes up the dirstate to include only | |
1974 | a single parent (if keepparent is False) and tries to duplicate any |
|
1974 | a single parent (if keepparent is False) and tries to duplicate any | |
1975 | renames/copies appropriately. |
|
1975 | renames/copies appropriately. | |
1976 |
|
1976 | |||
1977 | ctx - changeset to rebase |
|
1977 | ctx - changeset to rebase | |
1978 | pctx - merge base, usually ctx.p1() |
|
1978 | pctx - merge base, usually ctx.p1() | |
1979 | labels - merge labels eg ['local', 'graft'] |
|
1979 | labels - merge labels eg ['local', 'graft'] | |
1980 | keepparent - keep second parent if any |
|
1980 | keepparent - keep second parent if any | |
1981 |
|
1981 | |||
1982 | """ |
|
1982 | """ | |
1983 | # If we're grafting a descendant onto an ancestor, be sure to pass |
|
1983 | # If we're grafting a descendant onto an ancestor, be sure to pass | |
1984 | # mergeancestor=True to update. This does two things: 1) allows the merge if |
|
1984 | # mergeancestor=True to update. This does two things: 1) allows the merge if | |
1985 | # the destination is the same as the parent of the ctx (so we can use graft |
|
1985 | # the destination is the same as the parent of the ctx (so we can use graft | |
1986 | # to copy commits), and 2) informs update that the incoming changes are |
|
1986 | # to copy commits), and 2) informs update that the incoming changes are | |
1987 | # newer than the destination so it doesn't prompt about "remote changed foo |
|
1987 | # newer than the destination so it doesn't prompt about "remote changed foo | |
1988 | # which local deleted". |
|
1988 | # which local deleted". | |
1989 | mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node()) |
|
1989 | mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node()) | |
1990 |
|
1990 | |||
1991 | stats = update(repo, ctx.node(), True, True, pctx.node(), |
|
1991 | stats = update(repo, ctx.node(), True, True, pctx.node(), | |
1992 | mergeancestor=mergeancestor, labels=labels) |
|
1992 | mergeancestor=mergeancestor, labels=labels) | |
1993 |
|
1993 | |||
1994 | pother = nullid |
|
1994 | pother = nullid | |
1995 | parents = ctx.parents() |
|
1995 | parents = ctx.parents() | |
1996 | if keepparent and len(parents) == 2 and pctx in parents: |
|
1996 | if keepparent and len(parents) == 2 and pctx in parents: | |
1997 | parents.remove(pctx) |
|
1997 | parents.remove(pctx) | |
1998 | pother = parents[0].node() |
|
1998 | pother = parents[0].node() | |
1999 |
|
1999 | |||
2000 | with repo.dirstate.parentchange(): |
|
2000 | with repo.dirstate.parentchange(): | |
2001 | repo.setparents(repo['.'].node(), pother) |
|
2001 | repo.setparents(repo['.'].node(), pother) | |
2002 | repo.dirstate.write(repo.currenttransaction()) |
|
2002 | repo.dirstate.write(repo.currenttransaction()) | |
2003 | # fix up dirstate for copies and renames |
|
2003 | # fix up dirstate for copies and renames | |
2004 | copies.duplicatecopies(repo, ctx.rev(), pctx.rev()) |
|
2004 | copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev()) | |
2005 | return stats |
|
2005 | return stats |
General Comments 0
You need to be logged in to leave comments.
Login now