##// END OF EJS Templates
spelling: fix some minor issues found by spell checker
Mads Kiilerich -
r18644:3e92772d default
parent child Browse files
Show More
@@ -1,366 +1,366 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 from mercurial import cmdutil, scmutil, util, match, commands, obsolete
5 5 from mercurial import repoview, branchmap
6 6 import time, os, sys
7 7
8 8 cmdtable = {}
9 9 command = cmdutil.command(cmdtable)
10 10
11 11 def timer(func, title=None):
12 12 results = []
13 13 begin = time.time()
14 14 count = 0
15 15 while True:
16 16 ostart = os.times()
17 17 cstart = time.time()
18 18 r = func()
19 19 cstop = time.time()
20 20 ostop = os.times()
21 21 count += 1
22 22 a, b = ostart, ostop
23 23 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
24 24 if cstop - begin > 3 and count >= 100:
25 25 break
26 26 if cstop - begin > 10 and count >= 3:
27 27 break
28 28 if title:
29 29 sys.stderr.write("! %s\n" % title)
30 30 if r:
31 31 sys.stderr.write("! result: %s\n" % r)
32 32 m = min(results)
33 33 sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n"
34 34 % (m[0], m[1] + m[2], m[1], m[2], count))
35 35
36 36 @command('perfwalk')
37 37 def perfwalk(ui, repo, *pats):
38 38 try:
39 39 m = scmutil.match(repo[None], pats, {})
40 40 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
41 41 except Exception:
42 42 try:
43 43 m = scmutil.match(repo[None], pats, {})
44 44 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
45 45 except Exception:
46 46 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
47 47
48 48 @command('perfstatus',
49 49 [('u', 'unknown', False,
50 50 'ask status to look for unknown files')])
51 51 def perfstatus(ui, repo, **opts):
52 52 #m = match.always(repo.root, repo.getcwd())
53 53 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
54 54 # False))))
55 55 timer(lambda: sum(map(len, repo.status(**opts))))
56 56
57 57 def clearcaches(cl):
58 58 # behave somewhat consistently across internal API changes
59 59 if util.safehasattr(cl, 'clearcaches'):
60 60 cl.clearcaches()
61 61 elif util.safehasattr(cl, '_nodecache'):
62 62 from mercurial.node import nullid, nullrev
63 63 cl._nodecache = {nullid: nullrev}
64 64 cl._nodepos = None
65 65
66 66 @command('perfheads')
67 67 def perfheads(ui, repo):
68 68 cl = repo.changelog
69 69 def d():
70 70 len(cl.headrevs())
71 71 clearcaches(cl)
72 72 timer(d)
73 73
74 74 @command('perftags')
75 75 def perftags(ui, repo):
76 76 import mercurial.changelog, mercurial.manifest
77 77 def t():
78 78 repo.changelog = mercurial.changelog.changelog(repo.sopener)
79 79 repo.manifest = mercurial.manifest.manifest(repo.sopener)
80 80 repo._tags = None
81 81 return len(repo.tags())
82 82 timer(t)
83 83
84 84 @command('perfancestors')
85 85 def perfancestors(ui, repo):
86 86 heads = repo.changelog.headrevs()
87 87 def d():
88 88 for a in repo.changelog.ancestors(heads):
89 89 pass
90 90 timer(d)
91 91
92 92 @command('perfancestorset')
93 93 def perfancestorset(ui, repo, revset):
94 94 revs = repo.revs(revset)
95 95 heads = repo.changelog.headrevs()
96 96 def d():
97 97 s = repo.changelog.ancestors(heads)
98 98 for rev in revs:
99 99 rev in s
100 100 timer(d)
101 101
102 102 @command('perfdirstate')
103 103 def perfdirstate(ui, repo):
104 104 "a" in repo.dirstate
105 105 def d():
106 106 repo.dirstate.invalidate()
107 107 "a" in repo.dirstate
108 108 timer(d)
109 109
110 110 @command('perfdirstatedirs')
111 111 def perfdirstatedirs(ui, repo):
112 112 "a" in repo.dirstate
113 113 def d():
114 114 "a" in repo.dirstate._dirs
115 115 del repo.dirstate._dirs
116 116 timer(d)
117 117
118 118 @command('perfdirstatewrite')
119 119 def perfdirstatewrite(ui, repo):
120 120 ds = repo.dirstate
121 121 "a" in ds
122 122 def d():
123 123 ds._dirty = True
124 124 ds.write()
125 125 timer(d)
126 126
127 127 @command('perfmanifest')
128 128 def perfmanifest(ui, repo):
129 129 def d():
130 130 t = repo.manifest.tip()
131 131 m = repo.manifest.read(t)
132 132 repo.manifest.mapcache = None
133 133 repo.manifest._cache = None
134 134 timer(d)
135 135
136 136 @command('perfchangeset')
137 137 def perfchangeset(ui, repo, rev):
138 138 n = repo[rev].node()
139 139 def d():
140 140 c = repo.changelog.read(n)
141 141 #repo.changelog._cache = None
142 142 timer(d)
143 143
144 144 @command('perfindex')
145 145 def perfindex(ui, repo):
146 146 import mercurial.revlog
147 147 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
148 148 n = repo["tip"].node()
149 149 def d():
150 150 cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i")
151 151 cl.rev(n)
152 152 timer(d)
153 153
154 154 @command('perfstartup')
155 155 def perfstartup(ui, repo):
156 156 cmd = sys.argv[0]
157 157 def d():
158 158 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
159 159 timer(d)
160 160
161 161 @command('perfparents')
162 162 def perfparents(ui, repo):
163 163 nl = [repo.changelog.node(i) for i in xrange(1000)]
164 164 def d():
165 165 for n in nl:
166 166 repo.changelog.parents(n)
167 167 timer(d)
168 168
169 169 @command('perflookup')
170 170 def perflookup(ui, repo, rev):
171 171 timer(lambda: len(repo.lookup(rev)))
172 172
173 173 @command('perfrevrange')
174 174 def perfrevrange(ui, repo, *specs):
175 175 revrange = scmutil.revrange
176 176 timer(lambda: len(revrange(repo, specs)))
177 177
178 178 @command('perfnodelookup')
179 179 def perfnodelookup(ui, repo, rev):
180 180 import mercurial.revlog
181 181 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
182 182 n = repo[rev].node()
183 183 cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i")
184 184 def d():
185 185 cl.rev(n)
186 186 clearcaches(cl)
187 187 timer(d)
188 188
189 189 @command('perflog',
190 190 [('', 'rename', False, 'ask log to follow renames')])
191 191 def perflog(ui, repo, **opts):
192 192 ui.pushbuffer()
193 193 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
194 194 copies=opts.get('rename')))
195 195 ui.popbuffer()
196 196
197 197 @command('perftemplating')
198 198 def perftemplating(ui, repo):
199 199 ui.pushbuffer()
200 200 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
201 201 template='{date|shortdate} [{rev}:{node|short}]'
202 202 ' {author|person}: {desc|firstline}\n'))
203 203 ui.popbuffer()
204 204
205 205 @command('perfcca')
206 206 def perfcca(ui, repo):
207 207 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
208 208
209 209 @command('perffncacheload')
210 210 def perffncacheload(ui, repo):
211 211 s = repo.store
212 212 def d():
213 213 s.fncache._load()
214 214 timer(d)
215 215
216 216 @command('perffncachewrite')
217 217 def perffncachewrite(ui, repo):
218 218 s = repo.store
219 219 s.fncache._load()
220 220 def d():
221 221 s.fncache._dirty = True
222 222 s.fncache.write()
223 223 timer(d)
224 224
225 225 @command('perffncacheencode')
226 226 def perffncacheencode(ui, repo):
227 227 s = repo.store
228 228 s.fncache._load()
229 229 def d():
230 230 for p in s.fncache.entries:
231 231 s.encode(p)
232 232 timer(d)
233 233
234 234 @command('perfdiffwd')
235 235 def perfdiffwd(ui, repo):
236 236 """Profile diff of working directory changes"""
237 237 options = {
238 238 'w': 'ignore_all_space',
239 239 'b': 'ignore_space_change',
240 240 'B': 'ignore_blank_lines',
241 241 }
242 242
243 243 for diffopt in ('', 'w', 'b', 'B', 'wB'):
244 244 opts = dict((options[c], '1') for c in diffopt)
245 245 def d():
246 246 ui.pushbuffer()
247 247 commands.diff(ui, repo, **opts)
248 248 ui.popbuffer()
249 249 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
250 250 timer(d, title)
251 251
252 252 @command('perfrevlog',
253 253 [('d', 'dist', 100, 'distance between the revisions')],
254 254 "[INDEXFILE]")
255 255 def perfrevlog(ui, repo, file_, **opts):
256 256 from mercurial import revlog
257 257 dist = opts['dist']
258 258 def d():
259 259 r = revlog.revlog(lambda fn: open(fn, 'rb'), file_)
260 260 for x in xrange(0, len(r), dist):
261 261 r.revision(r.node(x))
262 262
263 263 timer(d)
264 264
265 265 @command('perfrevset',
266 266 [('C', 'clear', False, 'clear volatile cache between each call.')],
267 267 "REVSET")
268 268 def perfrevset(ui, repo, expr, clear=False):
269 269 """benchmark the execution time of a revset
270 270
271 Use the --clean option if need to evaluate the impact of build volative
271 Use the --clean option if need to evaluate the impact of build volatile
272 272 revisions set cache on the revset execution. Volatile cache hold filtered
273 273 and obsolete related cache."""
274 274 def d():
275 275 if clear:
276 276 repo.invalidatevolatilesets()
277 277 repo.revs(expr)
278 278 timer(d)
279 279
280 280 @command('perfvolatilesets')
281 281 def perfvolatilesets(ui, repo, *names):
282 282 """benchmark the computation of various volatile set
283 283
284 284 Volatile set computes element related to filtering and obsolescence."""
285 285 repo = repo.unfiltered()
286 286
287 287 def getobs(name):
288 288 def d():
289 289 repo.invalidatevolatilesets()
290 290 obsolete.getrevs(repo, name)
291 291 return d
292 292
293 293 allobs = sorted(obsolete.cachefuncs)
294 294 if names:
295 295 allobs = [n for n in allobs if n in names]
296 296
297 297 for name in allobs:
298 298 timer(getobs(name), title=name)
299 299
300 300 def getfiltered(name):
301 301 def d():
302 302 repo.invalidatevolatilesets()
303 303 repoview.filteredrevs(repo, name)
304 304 return d
305 305
306 306 allfilter = sorted(repoview.filtertable)
307 307 if names:
308 308 allfilter = [n for n in allfilter if n in names]
309 309
310 310 for name in allfilter:
311 311 timer(getfiltered(name), title=name)
312 312
313 313 @command('perfbranchmap',
314 314 [('f', 'full', False,
315 315 'Includes build time of subset'),
316 316 ])
317 317 def perfbranchmap(ui, repo, full=False):
318 318 """benchmark the update of a branchmap
319 319
320 320 This benchmarks the full repo.branchmap() call with read and write disabled
321 321 """
322 322 def getbranchmap(filtername):
323 323 """generate a benchmark function for the filtername"""
324 324 if filtername is None:
325 325 view = repo
326 326 else:
327 327 view = repo.filtered(filtername)
328 328 def d():
329 329 if full:
330 330 view._branchcaches.clear()
331 331 else:
332 332 view._branchcaches.pop(filtername, None)
333 333 view.branchmap()
334 334 return d
335 335 # add filter in smaller subset to bigger subset
336 336 possiblefilters = set(repoview.filtertable)
337 337 allfilters = []
338 338 while possiblefilters:
339 339 for name in possiblefilters:
340 340 subset = repoview.subsettable.get(name)
341 341 if subset not in possiblefilters:
342 342 break
343 343 else:
344 344 assert False, 'subset cycle %s!' % possiblefilters
345 345 allfilters.append(name)
346 346 possiblefilters.remove(name)
347 347
348 348 # warm the cache
349 349 if not full:
350 350 for name in allfilters:
351 351 repo.filtered(name).branchmap()
352 352 # add unfiltered
353 353 allfilters.append(None)
354 354 oldread = branchmap.read
355 355 oldwrite = branchmap.branchcache.write
356 356 try:
357 357 branchmap.read = lambda repo: None
358 358 branchmap.write = lambda repo: None
359 359 for name in allfilters:
360 360 timer(getbranchmap(name), title=str(name))
361 361 finally:
362 362 branchmap.read = oldread
363 363 branchmap.branchcache.write = oldwrite
364 364
365 365
366 366
@@ -1,815 +1,815 b''
1 1 # histedit.py - interactive history editing for mercurial
2 2 #
3 3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """interactive history editing
8 8
9 9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 10 is as follows, assuming the following history::
11 11
12 12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 13 | Add delta
14 14 |
15 15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 16 | Add gamma
17 17 |
18 18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 19 | Add beta
20 20 |
21 21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 22 Add alpha
23 23
24 24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 25 file open in your editor::
26 26
27 27 pick c561b4e977df Add beta
28 28 pick 030b686bedc4 Add gamma
29 29 pick 7c2fd3b9020c Add delta
30 30
31 31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 32 #
33 33 # Commands:
34 34 # p, pick = use commit
35 35 # e, edit = use commit, but stop for amending
36 36 # f, fold = use commit, but fold into previous commit (combines N and N-1)
37 37 # d, drop = remove commit from history
38 38 # m, mess = edit message without changing commit content
39 39 #
40 40
41 41 In this file, lines beginning with ``#`` are ignored. You must specify a rule
42 42 for each revision in your history. For example, if you had meant to add gamma
43 43 before beta, and then wanted to add delta in the same revision as beta, you
44 44 would reorganize the file to look like this::
45 45
46 46 pick 030b686bedc4 Add gamma
47 47 pick c561b4e977df Add beta
48 48 fold 7c2fd3b9020c Add delta
49 49
50 50 # Edit history between c561b4e977df and 7c2fd3b9020c
51 51 #
52 52 # Commands:
53 53 # p, pick = use commit
54 54 # e, edit = use commit, but stop for amending
55 55 # f, fold = use commit, but fold into previous commit (combines N and N-1)
56 56 # d, drop = remove commit from history
57 57 # m, mess = edit message without changing commit content
58 58 #
59 59
60 60 At which point you close the editor and ``histedit`` starts working. When you
61 61 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
62 62 those revisions together, offering you a chance to clean up the commit message::
63 63
64 64 Add beta
65 65 ***
66 66 Add delta
67 67
68 68 Edit the commit message to your liking, then close the editor. For
69 69 this example, let's assume that the commit message was changed to
70 70 ``Add beta and delta.`` After histedit has run and had a chance to
71 71 remove any old or temporary revisions it needed, the history looks
72 72 like this::
73 73
74 74 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
75 75 | Add beta and delta.
76 76 |
77 77 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
78 78 | Add gamma
79 79 |
80 80 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
81 81 Add alpha
82 82
83 83 Note that ``histedit`` does *not* remove any revisions (even its own temporary
84 84 ones) until after it has completed all the editing operations, so it will
85 85 probably perform several strip operations when it's done. For the above example,
86 86 it had to run strip twice. Strip can be slow depending on a variety of factors,
87 87 so you might need to be a little patient. You can choose to keep the original
88 88 revisions by passing the ``--keep`` flag.
89 89
90 90 The ``edit`` operation will drop you back to a command prompt,
91 91 allowing you to edit files freely, or even use ``hg record`` to commit
92 92 some changes as a separate commit. When you're done, any remaining
93 93 uncommitted changes will be committed as well. When done, run ``hg
94 94 histedit --continue`` to finish this step. You'll be prompted for a
95 95 new commit message, but the default commit message will be the
96 96 original message for the ``edit`` ed revision.
97 97
98 98 The ``message`` operation will give you a chance to revise a commit
99 99 message without changing the contents. It's a shortcut for doing
100 100 ``edit`` immediately followed by `hg histedit --continue``.
101 101
102 102 If ``histedit`` encounters a conflict when moving a revision (while
103 103 handling ``pick`` or ``fold``), it'll stop in a similar manner to
104 104 ``edit`` with the difference that it won't prompt you for a commit
105 105 message when done. If you decide at this point that you don't like how
106 106 much work it will be to rearrange history, or that you made a mistake,
107 107 you can use ``hg histedit --abort`` to abandon the new changes you
108 108 have made and return to the state before you attempted to edit your
109 109 history.
110 110
111 111 If we clone the histedit-ed example repository above and add four more
112 112 changes, such that we have the following history::
113 113
114 114 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
115 115 | Add theta
116 116 |
117 117 o 5 140988835471 2009-04-27 18:04 -0500 stefan
118 118 | Add eta
119 119 |
120 120 o 4 122930637314 2009-04-27 18:04 -0500 stefan
121 121 | Add zeta
122 122 |
123 123 o 3 836302820282 2009-04-27 18:04 -0500 stefan
124 124 | Add epsilon
125 125 |
126 126 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
127 127 | Add beta and delta.
128 128 |
129 129 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
130 130 | Add gamma
131 131 |
132 132 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
133 133 Add alpha
134 134
135 135 If you run ``hg histedit --outgoing`` on the clone then it is the same
136 136 as running ``hg histedit 836302820282``. If you need plan to push to a
137 137 repository that Mercurial does not detect to be related to the source
138 138 repo, you can add a ``--force`` option.
139 139 """
140 140
141 141 try:
142 142 import cPickle as pickle
143 143 except ImportError:
144 144 import pickle
145 145 import os
146 146
147 147 from mercurial import cmdutil
148 148 from mercurial import discovery
149 149 from mercurial import error
150 150 from mercurial import copies
151 151 from mercurial import context
152 152 from mercurial import hg
153 153 from mercurial import lock as lockmod
154 154 from mercurial import node
155 155 from mercurial import repair
156 156 from mercurial import scmutil
157 157 from mercurial import util
158 158 from mercurial import obsolete
159 159 from mercurial import merge as mergemod
160 160 from mercurial.i18n import _
161 161
162 162 cmdtable = {}
163 163 command = cmdutil.command(cmdtable)
164 164
165 165 testedwith = 'internal'
166 166
167 167 # i18n: command names and abbreviations must remain untranslated
168 168 editcomment = _("""# Edit history between %s and %s
169 169 #
170 170 # Commands:
171 171 # p, pick = use commit
172 172 # e, edit = use commit, but stop for amending
173 173 # f, fold = use commit, but fold into previous commit (combines N and N-1)
174 174 # d, drop = remove commit from history
175 175 # m, mess = edit message without changing commit content
176 176 #
177 177 """)
178 178
179 179 def commitfuncfor(repo, src):
180 180 """Build a commit function for the replacement of <src>
181 181
182 This function ensure we apply the same treatement to all changesets.
182 This function ensure we apply the same treatment to all changesets.
183 183
184 184 - Add a 'histedit_source' entry in extra.
185 185
186 186 Note that fold have its own separated logic because its handling is a bit
187 187 different and not easily factored out of the fold method.
188 188 """
189 189 phasemin = src.phase()
190 190 def commitfunc(**kwargs):
191 191 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
192 192 try:
193 193 repo.ui.setconfig('phases', 'new-commit', phasemin)
194 194 extra = kwargs.get('extra', {}).copy()
195 195 extra['histedit_source'] = src.hex()
196 196 kwargs['extra'] = extra
197 197 return repo.commit(**kwargs)
198 198 finally:
199 199 repo.ui.restoreconfig(phasebackup)
200 200 return commitfunc
201 201
202 202
203 203
204 204 def applychanges(ui, repo, ctx, opts):
205 205 """Merge changeset from ctx (only) in the current working directory"""
206 206 wcpar = repo.dirstate.parents()[0]
207 207 if ctx.p1().node() == wcpar:
208 208 # edition ar "in place" we do not need to make any merge,
209 209 # just applies changes on parent for edition
210 210 cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
211 211 stats = None
212 212 else:
213 213 try:
214 214 # ui.forcemerge is an internal variable, do not document
215 215 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
216 216 stats = mergemod.update(repo, ctx.node(), True, True, False,
217 217 ctx.p1().node())
218 218 finally:
219 219 repo.ui.setconfig('ui', 'forcemerge', '')
220 220 repo.setparents(wcpar, node.nullid)
221 221 repo.dirstate.write()
222 222 # fix up dirstate for copies and renames
223 223 cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
224 224 return stats
225 225
226 226 def collapse(repo, first, last, commitopts):
227 227 """collapse the set of revisions from first to last as new one.
228 228
229 229 Expected commit options are:
230 230 - message
231 231 - date
232 232 - username
233 233 Commit message is edited in all cases.
234 234
235 235 This function works in memory."""
236 236 ctxs = list(repo.set('%d::%d', first, last))
237 237 if not ctxs:
238 238 return None
239 239 base = first.parents()[0]
240 240
241 241 # commit a new version of the old changeset, including the update
242 242 # collect all files which might be affected
243 243 files = set()
244 244 for ctx in ctxs:
245 245 files.update(ctx.files())
246 246
247 247 # Recompute copies (avoid recording a -> b -> a)
248 248 copied = copies.pathcopies(first, last)
249 249
250 250 # prune files which were reverted by the updates
251 251 def samefile(f):
252 252 if f in last.manifest():
253 253 a = last.filectx(f)
254 254 if f in base.manifest():
255 255 b = base.filectx(f)
256 256 return (a.data() == b.data()
257 257 and a.flags() == b.flags())
258 258 else:
259 259 return False
260 260 else:
261 261 return f not in base.manifest()
262 262 files = [f for f in files if not samefile(f)]
263 263 # commit version of these files as defined by head
264 264 headmf = last.manifest()
265 265 def filectxfn(repo, ctx, path):
266 266 if path in headmf:
267 267 fctx = last[path]
268 268 flags = fctx.flags()
269 269 mctx = context.memfilectx(fctx.path(), fctx.data(),
270 270 islink='l' in flags,
271 271 isexec='x' in flags,
272 272 copied=copied.get(path))
273 273 return mctx
274 274 raise IOError()
275 275
276 276 if commitopts.get('message'):
277 277 message = commitopts['message']
278 278 else:
279 279 message = first.description()
280 280 user = commitopts.get('user')
281 281 date = commitopts.get('date')
282 282 extra = commitopts.get('extra')
283 283
284 284 parents = (first.p1().node(), first.p2().node())
285 285 new = context.memctx(repo,
286 286 parents=parents,
287 287 text=message,
288 288 files=files,
289 289 filectxfn=filectxfn,
290 290 user=user,
291 291 date=date,
292 292 extra=extra)
293 293 new._text = cmdutil.commitforceeditor(repo, new, [])
294 294 return repo.commitctx(new)
295 295
296 296 def pick(ui, repo, ctx, ha, opts):
297 297 oldctx = repo[ha]
298 298 if oldctx.parents()[0] == ctx:
299 299 ui.debug('node %s unchanged\n' % ha)
300 300 return oldctx, []
301 301 hg.update(repo, ctx.node())
302 302 stats = applychanges(ui, repo, oldctx, opts)
303 303 if stats and stats[3] > 0:
304 304 raise util.Abort(_('Fix up the change and run '
305 305 'hg histedit --continue'))
306 306 # drop the second merge parent
307 307 commit = commitfuncfor(repo, oldctx)
308 308 n = commit(text=oldctx.description(), user=oldctx.user(),
309 309 date=oldctx.date(), extra=oldctx.extra())
310 310 if n is None:
311 311 ui.warn(_('%s: empty changeset\n')
312 312 % node.hex(ha))
313 313 return ctx, []
314 314 new = repo[n]
315 315 return new, [(oldctx.node(), (n,))]
316 316
317 317
318 318 def edit(ui, repo, ctx, ha, opts):
319 319 oldctx = repo[ha]
320 320 hg.update(repo, ctx.node())
321 321 applychanges(ui, repo, oldctx, opts)
322 322 raise util.Abort(_('Make changes as needed, you may commit or record as '
323 323 'needed now.\nWhen you are finished, run hg'
324 324 ' histedit --continue to resume.'))
325 325
326 326 def fold(ui, repo, ctx, ha, opts):
327 327 oldctx = repo[ha]
328 328 hg.update(repo, ctx.node())
329 329 stats = applychanges(ui, repo, oldctx, opts)
330 330 if stats and stats[3] > 0:
331 331 raise util.Abort(_('Fix up the change and run '
332 332 'hg histedit --continue'))
333 333 n = repo.commit(text='fold-temp-revision %s' % ha, user=oldctx.user(),
334 334 date=oldctx.date(), extra=oldctx.extra())
335 335 if n is None:
336 336 ui.warn(_('%s: empty changeset')
337 337 % node.hex(ha))
338 338 return ctx, []
339 339 return finishfold(ui, repo, ctx, oldctx, n, opts, [])
340 340
341 341 def finishfold(ui, repo, ctx, oldctx, newnode, opts, internalchanges):
342 342 parent = ctx.parents()[0].node()
343 343 hg.update(repo, parent)
344 344 ### prepare new commit data
345 345 commitopts = opts.copy()
346 346 # username
347 347 if ctx.user() == oldctx.user():
348 348 username = ctx.user()
349 349 else:
350 350 username = ui.username()
351 351 commitopts['user'] = username
352 352 # commit message
353 353 newmessage = '\n***\n'.join(
354 354 [ctx.description()] +
355 355 [repo[r].description() for r in internalchanges] +
356 356 [oldctx.description()]) + '\n'
357 357 commitopts['message'] = newmessage
358 358 # date
359 359 commitopts['date'] = max(ctx.date(), oldctx.date())
360 360 extra = ctx.extra().copy()
361 361 # histedit_source
362 362 # note: ctx is likely a temporary commit but that the best we can do here
363 363 # This is sufficient to solve issue3681 anyway
364 364 extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex())
365 365 commitopts['extra'] = extra
366 366 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
367 367 try:
368 368 phasemin = max(ctx.phase(), oldctx.phase())
369 369 repo.ui.setconfig('phases', 'new-commit', phasemin)
370 370 n = collapse(repo, ctx, repo[newnode], commitopts)
371 371 finally:
372 372 repo.ui.restoreconfig(phasebackup)
373 373 if n is None:
374 374 return ctx, []
375 375 hg.update(repo, n)
376 376 replacements = [(oldctx.node(), (newnode,)),
377 377 (ctx.node(), (n,)),
378 378 (newnode, (n,)),
379 379 ]
380 380 for ich in internalchanges:
381 381 replacements.append((ich, (n,)))
382 382 return repo[n], replacements
383 383
384 384 def drop(ui, repo, ctx, ha, opts):
385 385 return ctx, [(repo[ha].node(), ())]
386 386
387 387
388 388 def message(ui, repo, ctx, ha, opts):
389 389 oldctx = repo[ha]
390 390 hg.update(repo, ctx.node())
391 391 stats = applychanges(ui, repo, oldctx, opts)
392 392 if stats and stats[3] > 0:
393 393 raise util.Abort(_('Fix up the change and run '
394 394 'hg histedit --continue'))
395 395 message = oldctx.description() + '\n'
396 396 message = ui.edit(message, ui.username())
397 397 commit = commitfuncfor(repo, oldctx)
398 398 new = commit(text=message, user=oldctx.user(), date=oldctx.date(),
399 399 extra=oldctx.extra())
400 400 newctx = repo[new]
401 401 if oldctx.node() != newctx.node():
402 402 return newctx, [(oldctx.node(), (new,))]
403 403 # We didn't make an edit, so just indicate no replaced nodes
404 404 return newctx, []
405 405
406 406 actiontable = {'p': pick,
407 407 'pick': pick,
408 408 'e': edit,
409 409 'edit': edit,
410 410 'f': fold,
411 411 'fold': fold,
412 412 'd': drop,
413 413 'drop': drop,
414 414 'm': message,
415 415 'mess': message,
416 416 }
417 417
418 418 @command('histedit',
419 419 [('', 'commands', '',
420 420 _('Read history edits from the specified file.')),
421 421 ('c', 'continue', False, _('continue an edit already in progress')),
422 422 ('k', 'keep', False,
423 423 _("don't strip old nodes after edit is complete")),
424 424 ('', 'abort', False, _('abort an edit in progress')),
425 425 ('o', 'outgoing', False, _('changesets not found in destination')),
426 426 ('f', 'force', False,
427 427 _('force outgoing even for unrelated repositories')),
428 428 ('r', 'rev', [], _('first revision to be edited'))],
429 429 _("[PARENT]"))
430 430 def histedit(ui, repo, *parent, **opts):
431 431 """interactively edit changeset history
432 432 """
433 433 # TODO only abort if we try and histedit mq patches, not just
434 434 # blanket if mq patches are applied somewhere
435 435 mq = getattr(repo, 'mq', None)
436 436 if mq and mq.applied:
437 437 raise util.Abort(_('source has mq patches applied'))
438 438
439 439 parent = list(parent) + opts.get('rev', [])
440 440 if opts.get('outgoing'):
441 441 if len(parent) > 1:
442 442 raise util.Abort(
443 443 _('only one repo argument allowed with --outgoing'))
444 444 elif parent:
445 445 parent = parent[0]
446 446
447 447 dest = ui.expandpath(parent or 'default-push', parent or 'default')
448 448 dest, revs = hg.parseurl(dest, None)[:2]
449 449 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
450 450
451 451 revs, checkout = hg.addbranchrevs(repo, repo, revs, None)
452 452 other = hg.peer(repo, opts, dest)
453 453
454 454 if revs:
455 455 revs = [repo.lookup(rev) for rev in revs]
456 456
457 457 # hexlify nodes from outgoing, because we're going to parse
458 458 # parent[0] using revsingle below, and if the binary hash
459 459 # contains special revset characters like ":" the revset
460 460 # parser can choke.
461 461 parent = [node.hex(n) for n in discovery.findcommonoutgoing(
462 462 repo, other, [], force=opts.get('force')).missing[0:1]]
463 463 else:
464 464 if opts.get('force'):
465 465 raise util.Abort(_('--force only allowed with --outgoing'))
466 466
467 467 if opts.get('continue', False):
468 468 if len(parent) != 0:
469 469 raise util.Abort(_('no arguments allowed with --continue'))
470 470 (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
471 471 currentparent, wantnull = repo.dirstate.parents()
472 472 parentctx = repo[parentctxnode]
473 473 parentctx, repl = bootstrapcontinue(ui, repo, parentctx, rules, opts)
474 474 replacements.extend(repl)
475 475 elif opts.get('abort', False):
476 476 if len(parent) != 0:
477 477 raise util.Abort(_('no arguments allowed with --abort'))
478 478 (parentctxnode, rules, keep, topmost, replacements) = readstate(repo)
479 479 mapping, tmpnodes, leafs, _ntm = processreplacement(repo, replacements)
480 480 ui.debug('restore wc to old parent %s\n' % node.short(topmost))
481 481 hg.clean(repo, topmost)
482 482 cleanupnode(ui, repo, 'created', tmpnodes)
483 483 cleanupnode(ui, repo, 'temp', leafs)
484 484 os.unlink(os.path.join(repo.path, 'histedit-state'))
485 485 return
486 486 else:
487 487 cmdutil.bailifchanged(repo)
488 488 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
489 489 raise util.Abort(_('history edit already in progress, try '
490 490 '--continue or --abort'))
491 491
492 492 topmost, empty = repo.dirstate.parents()
493 493
494 494 if len(parent) != 1:
495 495 raise util.Abort(_('histedit requires exactly one parent revision'))
496 496 parent = scmutil.revsingle(repo, parent[0]).node()
497 497
498 498 keep = opts.get('keep', False)
499 499 revs = between(repo, parent, topmost, keep)
500 500 if not revs:
501 501 raise util.Abort(_('%s is not an ancestor of working directory') %
502 502 node.short(parent))
503 503
504 504 ctxs = [repo[r] for r in revs]
505 505 rules = opts.get('commands', '')
506 506 if not rules:
507 507 rules = '\n'.join([makedesc(c) for c in ctxs])
508 508 rules += '\n\n'
509 509 rules += editcomment % (node.short(parent), node.short(topmost))
510 510 rules = ui.edit(rules, ui.username())
511 511 # Save edit rules in .hg/histedit-last-edit.txt in case
512 512 # the user needs to ask for help after something
513 513 # surprising happens.
514 514 f = open(repo.join('histedit-last-edit.txt'), 'w')
515 515 f.write(rules)
516 516 f.close()
517 517 else:
518 518 f = open(rules)
519 519 rules = f.read()
520 520 f.close()
521 521 rules = [l for l in (r.strip() for r in rules.splitlines())
522 522 if l and not l[0] == '#']
523 523 rules = verifyrules(rules, repo, ctxs)
524 524
525 525 parentctx = repo[parent].parents()[0]
526 526 keep = opts.get('keep', False)
527 527 replacements = []
528 528
529 529
530 530 while rules:
531 531 writestate(repo, parentctx.node(), rules, keep, topmost, replacements)
532 532 action, ha = rules.pop(0)
533 533 ui.debug('histedit: processing %s %s\n' % (action, ha))
534 534 actfunc = actiontable[action]
535 535 parentctx, replacement_ = actfunc(ui, repo, parentctx, ha, opts)
536 536 replacements.extend(replacement_)
537 537
538 538 hg.update(repo, parentctx.node())
539 539
540 540 mapping, tmpnodes, created, ntm = processreplacement(repo, replacements)
541 541 if mapping:
542 542 for prec, succs in mapping.iteritems():
543 543 if not succs:
544 544 ui.debug('histedit: %s is dropped\n' % node.short(prec))
545 545 else:
546 546 ui.debug('histedit: %s is replaced by %s\n' % (
547 547 node.short(prec), node.short(succs[0])))
548 548 if len(succs) > 1:
549 549 m = 'histedit: %s'
550 550 for n in succs[1:]:
551 551 ui.debug(m % node.short(n))
552 552
553 553 if not keep:
554 554 if mapping:
555 555 movebookmarks(ui, repo, mapping, topmost, ntm)
556 556 # TODO update mq state
557 557 if obsolete._enabled:
558 558 markers = []
559 559 # sort by revision number because it sound "right"
560 560 for prec in sorted(mapping, key=repo.changelog.rev):
561 561 succs = mapping[prec]
562 562 markers.append((repo[prec],
563 563 tuple(repo[s] for s in succs)))
564 564 if markers:
565 565 obsolete.createmarkers(repo, markers)
566 566 else:
567 567 cleanupnode(ui, repo, 'replaced', mapping)
568 568
569 569 cleanupnode(ui, repo, 'temp', tmpnodes)
570 570 os.unlink(os.path.join(repo.path, 'histedit-state'))
571 571 if os.path.exists(repo.sjoin('undo')):
572 572 os.unlink(repo.sjoin('undo'))
573 573
574 574
575 575 def bootstrapcontinue(ui, repo, parentctx, rules, opts):
576 576 action, currentnode = rules.pop(0)
577 577 ctx = repo[currentnode]
578 578 # is there any new commit between the expected parent and "."
579 579 #
580 580 # note: does not take non linear new change in account (but previous
581 581 # implementation didn't used them anyway (issue3655)
582 582 newchildren = [c.node() for c in repo.set('(%d::.)', parentctx)]
583 583 if not newchildren:
584 584 # `parentctxnode` should match but no result. This means that
585 585 # currentnode is not a descendant from parentctxnode.
586 586 msg = _('%s is not an ancestor of working directory')
587 587 hint = _('update to %s or descendant and run "hg histedit '
588 588 '--continue" again') % parentctx
589 589 raise util.Abort(msg % parentctx, hint=hint)
590 590 newchildren.pop(0) # remove parentctxnode
591 591 # Commit dirty working directory if necessary
592 592 new = None
593 593 m, a, r, d = repo.status()[:4]
594 594 if m or a or r or d:
595 595 # prepare the message for the commit to comes
596 596 if action in ('f', 'fold'):
597 597 message = 'fold-temp-revision %s' % currentnode
598 598 else:
599 599 message = ctx.description() + '\n'
600 600 if action in ('e', 'edit', 'm', 'mess'):
601 601 editor = cmdutil.commitforceeditor
602 602 else:
603 603 editor = False
604 604 commit = commitfuncfor(repo, ctx)
605 605 new = commit(text=message, user=ctx.user(),
606 606 date=ctx.date(), extra=ctx.extra(),
607 607 editor=editor)
608 608 if new is not None:
609 609 newchildren.append(new)
610 610
611 611 replacements = []
612 612 # track replacements
613 613 if ctx.node() not in newchildren:
614 614 # note: new children may be empty when the changeset is dropped.
615 615 # this happen e.g during conflicting pick where we revert content
616 616 # to parent.
617 617 replacements.append((ctx.node(), tuple(newchildren)))
618 618
619 619 if action in ('f', 'fold'):
620 620 # finalize fold operation if applicable
621 621 if new is None:
622 622 new = newchildren[-1]
623 623 else:
624 624 newchildren.pop() # remove new from internal changes
625 625 parentctx, repl = finishfold(ui, repo, parentctx, ctx, new, opts,
626 626 newchildren)
627 627 replacements.extend(repl)
628 628 elif newchildren:
629 # otherwize update "parentctx" before proceding to further operation
629 # otherwise update "parentctx" before proceeding to further operation
630 630 parentctx = repo[newchildren[-1]]
631 631 return parentctx, replacements
632 632
633 633
634 634 def between(repo, old, new, keep):
635 635 """select and validate the set of revision to edit
636 636
637 637 When keep is false, the specified set can't have children."""
638 638 ctxs = list(repo.set('%n::%n', old, new))
639 639 if ctxs and not keep:
640 640 if (not obsolete._enabled and
641 641 repo.revs('(%ld::) - (%ld)', ctxs, ctxs)):
642 642 raise util.Abort(_('cannot edit history that would orphan nodes'))
643 643 root = ctxs[0] # list is already sorted by repo.set
644 644 if not root.phase():
645 645 raise util.Abort(_('cannot edit immutable changeset: %s') % root)
646 646 return [c.node() for c in ctxs]
647 647
648 648
649 649 def writestate(repo, parentnode, rules, keep, topmost, replacements):
650 650 fp = open(os.path.join(repo.path, 'histedit-state'), 'w')
651 651 pickle.dump((parentnode, rules, keep, topmost, replacements), fp)
652 652 fp.close()
653 653
654 654 def readstate(repo):
655 655 """Returns a tuple of (parentnode, rules, keep, topmost, replacements).
656 656 """
657 657 fp = open(os.path.join(repo.path, 'histedit-state'))
658 658 return pickle.load(fp)
659 659
660 660
661 661 def makedesc(c):
662 662 """build a initial action line for a ctx `c`
663 663
664 664 line are in the form:
665 665
666 666 pick <hash> <rev> <summary>
667 667 """
668 668 summary = ''
669 669 if c.description():
670 670 summary = c.description().splitlines()[0]
671 671 line = 'pick %s %d %s' % (c, c.rev(), summary)
672 672 return line[:80] # trim to 80 chars so it's not stupidly wide in my editor
673 673
674 674 def verifyrules(rules, repo, ctxs):
675 675 """Verify that there exists exactly one edit rule per given changeset.
676 676
677 677 Will abort if there are to many or too few rules, a malformed rule,
678 678 or a rule on a changeset outside of the user-given range.
679 679 """
680 680 parsed = []
681 681 if len(rules) != len(ctxs):
682 682 raise util.Abort(_('must specify a rule for each changeset once'))
683 683 for r in rules:
684 684 if ' ' not in r:
685 685 raise util.Abort(_('malformed line "%s"') % r)
686 686 action, rest = r.split(' ', 1)
687 687 if ' ' in rest.strip():
688 688 ha, rest = rest.split(' ', 1)
689 689 else:
690 690 ha = r.strip()
691 691 try:
692 692 if repo[ha] not in ctxs:
693 693 raise util.Abort(
694 694 _('may not use changesets other than the ones listed'))
695 695 except error.RepoError:
696 696 raise util.Abort(_('unknown changeset %s listed') % ha)
697 697 if action not in actiontable:
698 698 raise util.Abort(_('unknown action "%s"') % action)
699 699 parsed.append([action, ha])
700 700 return parsed
701 701
702 702 def processreplacement(repo, replacements):
703 703 """process the list of replacements to return
704 704
705 705 1) the final mapping between original and created nodes
706 706 2) the list of temporary node created by histedit
707 707 3) the list of new commit created by histedit"""
708 708 allsuccs = set()
709 709 replaced = set()
710 710 fullmapping = {}
711 711 # initialise basic set
712 712 # fullmapping record all operation recorded in replacement
713 713 for rep in replacements:
714 714 allsuccs.update(rep[1])
715 715 replaced.add(rep[0])
716 716 fullmapping.setdefault(rep[0], set()).update(rep[1])
717 717 new = allsuccs - replaced
718 718 tmpnodes = allsuccs & replaced
719 719 # Reduce content fullmapping into direct relation between original nodes
720 720 # and final node created during history edition
721 721 # Dropped changeset are replaced by an empty list
722 722 toproceed = set(fullmapping)
723 723 final = {}
724 724 while toproceed:
725 725 for x in list(toproceed):
726 726 succs = fullmapping[x]
727 727 for s in list(succs):
728 728 if s in toproceed:
729 729 # non final node with unknown closure
730 730 # We can't process this now
731 731 break
732 732 elif s in final:
733 733 # non final node, replace with closure
734 734 succs.remove(s)
735 735 succs.update(final[s])
736 736 else:
737 737 final[x] = succs
738 738 toproceed.remove(x)
739 739 # remove tmpnodes from final mapping
740 740 for n in tmpnodes:
741 741 del final[n]
742 742 # we expect all changes involved in final to exist in the repo
743 743 # turn `final` into list (topologically sorted)
744 744 nm = repo.changelog.nodemap
745 745 for prec, succs in final.items():
746 746 final[prec] = sorted(succs, key=nm.get)
747 747
748 748 # computed topmost element (necessary for bookmark)
749 749 if new:
750 750 newtopmost = sorted(new, key=repo.changelog.rev)[-1]
751 751 elif not final:
752 752 # Nothing rewritten at all. we won't need `newtopmost`
753 753 # It is the same as `oldtopmost` and `processreplacement` know it
754 754 newtopmost = None
755 755 else:
756 756 # every body died. The newtopmost is the parent of the root.
757 757 newtopmost = repo[sorted(final, key=repo.changelog.rev)[0]].p1().node()
758 758
759 759 return final, tmpnodes, new, newtopmost
760 760
761 761 def movebookmarks(ui, repo, mapping, oldtopmost, newtopmost):
762 762 """Move bookmark from old to newly created node"""
763 763 if not mapping:
764 764 # if nothing got rewritten there is not purpose for this function
765 765 return
766 766 moves = []
767 767 for bk, old in sorted(repo._bookmarks.iteritems()):
768 768 if old == oldtopmost:
769 769 # special case ensure bookmark stay on tip.
770 770 #
771 771 # This is arguably a feature and we may only want that for the
772 772 # active bookmark. But the behavior is kept compatible with the old
773 773 # version for now.
774 774 moves.append((bk, newtopmost))
775 775 continue
776 776 base = old
777 777 new = mapping.get(base, None)
778 778 if new is None:
779 779 continue
780 780 while not new:
781 781 # base is killed, trying with parent
782 782 base = repo[base].p1().node()
783 783 new = mapping.get(base, (base,))
784 784 # nothing to move
785 785 moves.append((bk, new[-1]))
786 786 if moves:
787 787 marks = repo._bookmarks
788 788 for mark, new in moves:
789 789 old = marks[mark]
790 790 ui.note(_('histedit: moving bookmarks %s from %s to %s\n')
791 791 % (mark, node.short(old), node.short(new)))
792 792 marks[mark] = new
793 793 marks.write()
794 794
795 795 def cleanupnode(ui, repo, name, nodes):
796 796 """strip a group of nodes from the repository
797 797
798 798 The set of node to strip may contains unknown nodes."""
799 799 ui.debug('should strip %s nodes %s\n' %
800 800 (name, ', '.join([node.short(n) for n in nodes])))
801 801 lock = None
802 802 try:
803 803 lock = repo.lock()
804 804 # Find all node that need to be stripped
805 805 # (we hg %lr instead of %ln to silently ignore unknown item
806 806 nm = repo.changelog.nodemap
807 807 nodes = [n for n in nodes if n in nm]
808 808 roots = [c.node() for c in repo.set("roots(%ln)", nodes)]
809 809 for c in roots:
810 810 # We should process node in reverse order to strip tip most first.
811 811 # but this trigger a bug in changegroup hook.
812 812 # This would reduce bundle overhead
813 813 repair.strip(ui, repo, c)
814 814 finally:
815 815 lockmod.release(lock)
@@ -1,3581 +1,3581 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help command` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behaviour can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60 '''
61 61
62 62 from mercurial.i18n import _
63 63 from mercurial.node import bin, hex, short, nullid, nullrev
64 64 from mercurial.lock import release
65 65 from mercurial import commands, cmdutil, hg, scmutil, util, revset
66 66 from mercurial import repair, extensions, error, phases
67 67 from mercurial import patch as patchmod
68 68 import os, re, errno, shutil
69 69
70 70 commands.norepo += " qclone"
71 71
72 72 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
73 73
74 74 cmdtable = {}
75 75 command = cmdutil.command(cmdtable)
76 76 testedwith = 'internal'
77 77
78 78 # Patch names looks like unix-file names.
79 79 # They must be joinable with queue directory and result in the patch path.
80 80 normname = util.normpath
81 81
82 82 class statusentry(object):
83 83 def __init__(self, node, name):
84 84 self.node, self.name = node, name
85 85 def __repr__(self):
86 86 return hex(self.node) + ':' + self.name
87 87
88 88 class patchheader(object):
89 89 def __init__(self, pf, plainmode=False):
90 90 def eatdiff(lines):
91 91 while lines:
92 92 l = lines[-1]
93 93 if (l.startswith("diff -") or
94 94 l.startswith("Index:") or
95 95 l.startswith("===========")):
96 96 del lines[-1]
97 97 else:
98 98 break
99 99 def eatempty(lines):
100 100 while lines:
101 101 if not lines[-1].strip():
102 102 del lines[-1]
103 103 else:
104 104 break
105 105
106 106 message = []
107 107 comments = []
108 108 user = None
109 109 date = None
110 110 parent = None
111 111 format = None
112 112 subject = None
113 113 branch = None
114 114 nodeid = None
115 115 diffstart = 0
116 116
117 117 for line in file(pf):
118 118 line = line.rstrip()
119 119 if (line.startswith('diff --git')
120 120 or (diffstart and line.startswith('+++ '))):
121 121 diffstart = 2
122 122 break
123 123 diffstart = 0 # reset
124 124 if line.startswith("--- "):
125 125 diffstart = 1
126 126 continue
127 127 elif format == "hgpatch":
128 128 # parse values when importing the result of an hg export
129 129 if line.startswith("# User "):
130 130 user = line[7:]
131 131 elif line.startswith("# Date "):
132 132 date = line[7:]
133 133 elif line.startswith("# Parent "):
134 134 parent = line[9:].lstrip()
135 135 elif line.startswith("# Branch "):
136 136 branch = line[9:]
137 137 elif line.startswith("# Node ID "):
138 138 nodeid = line[10:]
139 139 elif not line.startswith("# ") and line:
140 140 message.append(line)
141 141 format = None
142 142 elif line == '# HG changeset patch':
143 143 message = []
144 144 format = "hgpatch"
145 145 elif (format != "tagdone" and (line.startswith("Subject: ") or
146 146 line.startswith("subject: "))):
147 147 subject = line[9:]
148 148 format = "tag"
149 149 elif (format != "tagdone" and (line.startswith("From: ") or
150 150 line.startswith("from: "))):
151 151 user = line[6:]
152 152 format = "tag"
153 153 elif (format != "tagdone" and (line.startswith("Date: ") or
154 154 line.startswith("date: "))):
155 155 date = line[6:]
156 156 format = "tag"
157 157 elif format == "tag" and line == "":
158 158 # when looking for tags (subject: from: etc) they
159 159 # end once you find a blank line in the source
160 160 format = "tagdone"
161 161 elif message or line:
162 162 message.append(line)
163 163 comments.append(line)
164 164
165 165 eatdiff(message)
166 166 eatdiff(comments)
167 167 # Remember the exact starting line of the patch diffs before consuming
168 168 # empty lines, for external use by TortoiseHg and others
169 169 self.diffstartline = len(comments)
170 170 eatempty(message)
171 171 eatempty(comments)
172 172
173 173 # make sure message isn't empty
174 174 if format and format.startswith("tag") and subject:
175 175 message.insert(0, "")
176 176 message.insert(0, subject)
177 177
178 178 self.message = message
179 179 self.comments = comments
180 180 self.user = user
181 181 self.date = date
182 182 self.parent = parent
183 183 # nodeid and branch are for external use by TortoiseHg and others
184 184 self.nodeid = nodeid
185 185 self.branch = branch
186 186 self.haspatch = diffstart > 1
187 187 self.plainmode = plainmode
188 188
189 189 def setuser(self, user):
190 190 if not self.updateheader(['From: ', '# User '], user):
191 191 try:
192 192 patchheaderat = self.comments.index('# HG changeset patch')
193 193 self.comments.insert(patchheaderat + 1, '# User ' + user)
194 194 except ValueError:
195 195 if self.plainmode or self._hasheader(['Date: ']):
196 196 self.comments = ['From: ' + user] + self.comments
197 197 else:
198 198 tmp = ['# HG changeset patch', '# User ' + user, '']
199 199 self.comments = tmp + self.comments
200 200 self.user = user
201 201
202 202 def setdate(self, date):
203 203 if not self.updateheader(['Date: ', '# Date '], date):
204 204 try:
205 205 patchheaderat = self.comments.index('# HG changeset patch')
206 206 self.comments.insert(patchheaderat + 1, '# Date ' + date)
207 207 except ValueError:
208 208 if self.plainmode or self._hasheader(['From: ']):
209 209 self.comments = ['Date: ' + date] + self.comments
210 210 else:
211 211 tmp = ['# HG changeset patch', '# Date ' + date, '']
212 212 self.comments = tmp + self.comments
213 213 self.date = date
214 214
215 215 def setparent(self, parent):
216 216 if not self.updateheader(['# Parent '], parent):
217 217 try:
218 218 patchheaderat = self.comments.index('# HG changeset patch')
219 219 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
220 220 except ValueError:
221 221 pass
222 222 self.parent = parent
223 223
224 224 def setmessage(self, message):
225 225 if self.comments:
226 226 self._delmsg()
227 227 self.message = [message]
228 228 self.comments += self.message
229 229
230 230 def updateheader(self, prefixes, new):
231 231 '''Update all references to a field in the patch header.
232 232 Return whether the field is present.'''
233 233 res = False
234 234 for prefix in prefixes:
235 235 for i in xrange(len(self.comments)):
236 236 if self.comments[i].startswith(prefix):
237 237 self.comments[i] = prefix + new
238 238 res = True
239 239 break
240 240 return res
241 241
242 242 def _hasheader(self, prefixes):
243 243 '''Check if a header starts with any of the given prefixes.'''
244 244 for prefix in prefixes:
245 245 for comment in self.comments:
246 246 if comment.startswith(prefix):
247 247 return True
248 248 return False
249 249
250 250 def __str__(self):
251 251 if not self.comments:
252 252 return ''
253 253 return '\n'.join(self.comments) + '\n\n'
254 254
255 255 def _delmsg(self):
256 256 '''Remove existing message, keeping the rest of the comments fields.
257 257 If comments contains 'subject: ', message will prepend
258 258 the field and a blank line.'''
259 259 if self.message:
260 260 subj = 'subject: ' + self.message[0].lower()
261 261 for i in xrange(len(self.comments)):
262 262 if subj == self.comments[i].lower():
263 263 del self.comments[i]
264 264 self.message = self.message[2:]
265 265 break
266 266 ci = 0
267 267 for mi in self.message:
268 268 while mi != self.comments[ci]:
269 269 ci += 1
270 270 del self.comments[ci]
271 271
272 272 def newcommit(repo, phase, *args, **kwargs):
273 273 """helper dedicated to ensure a commit respect mq.secret setting
274 274
275 275 It should be used instead of repo.commit inside the mq source for operation
276 276 creating new changeset.
277 277 """
278 278 repo = repo.unfiltered()
279 279 if phase is None:
280 280 if repo.ui.configbool('mq', 'secret', False):
281 281 phase = phases.secret
282 282 if phase is not None:
283 283 backup = repo.ui.backupconfig('phases', 'new-commit')
284 284 # Marking the repository as committing an mq patch can be used
285 # to optimize operations like _branchtags().
285 # to optimize operations like branchtags().
286 286 repo._committingpatch = True
287 287 try:
288 288 if phase is not None:
289 289 repo.ui.setconfig('phases', 'new-commit', phase)
290 290 return repo.commit(*args, **kwargs)
291 291 finally:
292 292 repo._committingpatch = False
293 293 if phase is not None:
294 294 repo.ui.restoreconfig(backup)
295 295
296 296 class AbortNoCleanup(error.Abort):
297 297 pass
298 298
299 299 class queue(object):
300 300 def __init__(self, ui, path, patchdir=None):
301 301 self.basepath = path
302 302 try:
303 303 fh = open(os.path.join(path, 'patches.queue'))
304 304 cur = fh.read().rstrip()
305 305 fh.close()
306 306 if not cur:
307 307 curpath = os.path.join(path, 'patches')
308 308 else:
309 309 curpath = os.path.join(path, 'patches-' + cur)
310 310 except IOError:
311 311 curpath = os.path.join(path, 'patches')
312 312 self.path = patchdir or curpath
313 313 self.opener = scmutil.opener(self.path)
314 314 self.ui = ui
315 315 self.applieddirty = False
316 316 self.seriesdirty = False
317 317 self.added = []
318 318 self.seriespath = "series"
319 319 self.statuspath = "status"
320 320 self.guardspath = "guards"
321 321 self.activeguards = None
322 322 self.guardsdirty = False
323 323 # Handle mq.git as a bool with extended values
324 324 try:
325 325 gitmode = ui.configbool('mq', 'git', None)
326 326 if gitmode is None:
327 327 raise error.ConfigError
328 328 self.gitmode = gitmode and 'yes' or 'no'
329 329 except error.ConfigError:
330 330 self.gitmode = ui.config('mq', 'git', 'auto').lower()
331 331 self.plainmode = ui.configbool('mq', 'plain', False)
332 332
333 333 @util.propertycache
334 334 def applied(self):
335 335 def parselines(lines):
336 336 for l in lines:
337 337 entry = l.split(':', 1)
338 338 if len(entry) > 1:
339 339 n, name = entry
340 340 yield statusentry(bin(n), name)
341 341 elif l.strip():
342 342 self.ui.warn(_('malformated mq status line: %s\n') % entry)
343 343 # else we ignore empty lines
344 344 try:
345 345 lines = self.opener.read(self.statuspath).splitlines()
346 346 return list(parselines(lines))
347 347 except IOError, e:
348 348 if e.errno == errno.ENOENT:
349 349 return []
350 350 raise
351 351
352 352 @util.propertycache
353 353 def fullseries(self):
354 354 try:
355 355 return self.opener.read(self.seriespath).splitlines()
356 356 except IOError, e:
357 357 if e.errno == errno.ENOENT:
358 358 return []
359 359 raise
360 360
361 361 @util.propertycache
362 362 def series(self):
363 363 self.parseseries()
364 364 return self.series
365 365
366 366 @util.propertycache
367 367 def seriesguards(self):
368 368 self.parseseries()
369 369 return self.seriesguards
370 370
371 371 def invalidate(self):
372 372 for a in 'applied fullseries series seriesguards'.split():
373 373 if a in self.__dict__:
374 374 delattr(self, a)
375 375 self.applieddirty = False
376 376 self.seriesdirty = False
377 377 self.guardsdirty = False
378 378 self.activeguards = None
379 379
380 380 def diffopts(self, opts={}, patchfn=None):
381 381 diffopts = patchmod.diffopts(self.ui, opts)
382 382 if self.gitmode == 'auto':
383 383 diffopts.upgrade = True
384 384 elif self.gitmode == 'keep':
385 385 pass
386 386 elif self.gitmode in ('yes', 'no'):
387 387 diffopts.git = self.gitmode == 'yes'
388 388 else:
389 389 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
390 390 ' got %s') % self.gitmode)
391 391 if patchfn:
392 392 diffopts = self.patchopts(diffopts, patchfn)
393 393 return diffopts
394 394
395 395 def patchopts(self, diffopts, *patches):
396 396 """Return a copy of input diff options with git set to true if
397 397 referenced patch is a git patch and should be preserved as such.
398 398 """
399 399 diffopts = diffopts.copy()
400 400 if not diffopts.git and self.gitmode == 'keep':
401 401 for patchfn in patches:
402 402 patchf = self.opener(patchfn, 'r')
403 403 # if the patch was a git patch, refresh it as a git patch
404 404 for line in patchf:
405 405 if line.startswith('diff --git'):
406 406 diffopts.git = True
407 407 break
408 408 patchf.close()
409 409 return diffopts
410 410
411 411 def join(self, *p):
412 412 return os.path.join(self.path, *p)
413 413
414 414 def findseries(self, patch):
415 415 def matchpatch(l):
416 416 l = l.split('#', 1)[0]
417 417 return l.strip() == patch
418 418 for index, l in enumerate(self.fullseries):
419 419 if matchpatch(l):
420 420 return index
421 421 return None
422 422
423 423 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
424 424
425 425 def parseseries(self):
426 426 self.series = []
427 427 self.seriesguards = []
428 428 for l in self.fullseries:
429 429 h = l.find('#')
430 430 if h == -1:
431 431 patch = l
432 432 comment = ''
433 433 elif h == 0:
434 434 continue
435 435 else:
436 436 patch = l[:h]
437 437 comment = l[h:]
438 438 patch = patch.strip()
439 439 if patch:
440 440 if patch in self.series:
441 441 raise util.Abort(_('%s appears more than once in %s') %
442 442 (patch, self.join(self.seriespath)))
443 443 self.series.append(patch)
444 444 self.seriesguards.append(self.guard_re.findall(comment))
445 445
446 446 def checkguard(self, guard):
447 447 if not guard:
448 448 return _('guard cannot be an empty string')
449 449 bad_chars = '# \t\r\n\f'
450 450 first = guard[0]
451 451 if first in '-+':
452 452 return (_('guard %r starts with invalid character: %r') %
453 453 (guard, first))
454 454 for c in bad_chars:
455 455 if c in guard:
456 456 return _('invalid character in guard %r: %r') % (guard, c)
457 457
458 458 def setactive(self, guards):
459 459 for guard in guards:
460 460 bad = self.checkguard(guard)
461 461 if bad:
462 462 raise util.Abort(bad)
463 463 guards = sorted(set(guards))
464 464 self.ui.debug('active guards: %s\n' % ' '.join(guards))
465 465 self.activeguards = guards
466 466 self.guardsdirty = True
467 467
468 468 def active(self):
469 469 if self.activeguards is None:
470 470 self.activeguards = []
471 471 try:
472 472 guards = self.opener.read(self.guardspath).split()
473 473 except IOError, err:
474 474 if err.errno != errno.ENOENT:
475 475 raise
476 476 guards = []
477 477 for i, guard in enumerate(guards):
478 478 bad = self.checkguard(guard)
479 479 if bad:
480 480 self.ui.warn('%s:%d: %s\n' %
481 481 (self.join(self.guardspath), i + 1, bad))
482 482 else:
483 483 self.activeguards.append(guard)
484 484 return self.activeguards
485 485
486 486 def setguards(self, idx, guards):
487 487 for g in guards:
488 488 if len(g) < 2:
489 489 raise util.Abort(_('guard %r too short') % g)
490 490 if g[0] not in '-+':
491 491 raise util.Abort(_('guard %r starts with invalid char') % g)
492 492 bad = self.checkguard(g[1:])
493 493 if bad:
494 494 raise util.Abort(bad)
495 495 drop = self.guard_re.sub('', self.fullseries[idx])
496 496 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
497 497 self.parseseries()
498 498 self.seriesdirty = True
499 499
500 500 def pushable(self, idx):
501 501 if isinstance(idx, str):
502 502 idx = self.series.index(idx)
503 503 patchguards = self.seriesguards[idx]
504 504 if not patchguards:
505 505 return True, None
506 506 guards = self.active()
507 507 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
508 508 if exactneg:
509 509 return False, repr(exactneg[0])
510 510 pos = [g for g in patchguards if g[0] == '+']
511 511 exactpos = [g for g in pos if g[1:] in guards]
512 512 if pos:
513 513 if exactpos:
514 514 return True, repr(exactpos[0])
515 515 return False, ' '.join(map(repr, pos))
516 516 return True, ''
517 517
518 518 def explainpushable(self, idx, all_patches=False):
519 519 write = all_patches and self.ui.write or self.ui.warn
520 520 if all_patches or self.ui.verbose:
521 521 if isinstance(idx, str):
522 522 idx = self.series.index(idx)
523 523 pushable, why = self.pushable(idx)
524 524 if all_patches and pushable:
525 525 if why is None:
526 526 write(_('allowing %s - no guards in effect\n') %
527 527 self.series[idx])
528 528 else:
529 529 if not why:
530 530 write(_('allowing %s - no matching negative guards\n') %
531 531 self.series[idx])
532 532 else:
533 533 write(_('allowing %s - guarded by %s\n') %
534 534 (self.series[idx], why))
535 535 if not pushable:
536 536 if why:
537 537 write(_('skipping %s - guarded by %s\n') %
538 538 (self.series[idx], why))
539 539 else:
540 540 write(_('skipping %s - no matching guards\n') %
541 541 self.series[idx])
542 542
543 543 def savedirty(self):
544 544 def writelist(items, path):
545 545 fp = self.opener(path, 'w')
546 546 for i in items:
547 547 fp.write("%s\n" % i)
548 548 fp.close()
549 549 if self.applieddirty:
550 550 writelist(map(str, self.applied), self.statuspath)
551 551 self.applieddirty = False
552 552 if self.seriesdirty:
553 553 writelist(self.fullseries, self.seriespath)
554 554 self.seriesdirty = False
555 555 if self.guardsdirty:
556 556 writelist(self.activeguards, self.guardspath)
557 557 self.guardsdirty = False
558 558 if self.added:
559 559 qrepo = self.qrepo()
560 560 if qrepo:
561 561 qrepo[None].add(f for f in self.added if f not in qrepo[None])
562 562 self.added = []
563 563
564 564 def removeundo(self, repo):
565 565 undo = repo.sjoin('undo')
566 566 if not os.path.exists(undo):
567 567 return
568 568 try:
569 569 os.unlink(undo)
570 570 except OSError, inst:
571 571 self.ui.warn(_('error removing undo: %s\n') % str(inst))
572 572
573 573 def backup(self, repo, files, copy=False):
574 574 # backup local changes in --force case
575 575 for f in sorted(files):
576 576 absf = repo.wjoin(f)
577 577 if os.path.lexists(absf):
578 578 self.ui.note(_('saving current version of %s as %s\n') %
579 579 (f, f + '.orig'))
580 580 if copy:
581 581 util.copyfile(absf, absf + '.orig')
582 582 else:
583 583 util.rename(absf, absf + '.orig')
584 584
585 585 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
586 586 fp=None, changes=None, opts={}):
587 587 stat = opts.get('stat')
588 588 m = scmutil.match(repo[node1], files, opts)
589 589 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
590 590 changes, stat, fp)
591 591
592 592 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
593 593 # first try just applying the patch
594 594 (err, n) = self.apply(repo, [patch], update_status=False,
595 595 strict=True, merge=rev)
596 596
597 597 if err == 0:
598 598 return (err, n)
599 599
600 600 if n is None:
601 601 raise util.Abort(_("apply failed for patch %s") % patch)
602 602
603 603 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
604 604
605 605 # apply failed, strip away that rev and merge.
606 606 hg.clean(repo, head)
607 607 self.strip(repo, [n], update=False, backup='strip')
608 608
609 609 ctx = repo[rev]
610 610 ret = hg.merge(repo, rev)
611 611 if ret:
612 612 raise util.Abort(_("update returned %d") % ret)
613 613 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
614 614 if n is None:
615 615 raise util.Abort(_("repo commit failed"))
616 616 try:
617 617 ph = patchheader(mergeq.join(patch), self.plainmode)
618 618 except Exception:
619 619 raise util.Abort(_("unable to read %s") % patch)
620 620
621 621 diffopts = self.patchopts(diffopts, patch)
622 622 patchf = self.opener(patch, "w")
623 623 comments = str(ph)
624 624 if comments:
625 625 patchf.write(comments)
626 626 self.printdiff(repo, diffopts, head, n, fp=patchf)
627 627 patchf.close()
628 628 self.removeundo(repo)
629 629 return (0, n)
630 630
631 631 def qparents(self, repo, rev=None):
632 632 if rev is None:
633 633 (p1, p2) = repo.dirstate.parents()
634 634 if p2 == nullid:
635 635 return p1
636 636 if not self.applied:
637 637 return None
638 638 return self.applied[-1].node
639 639 p1, p2 = repo.changelog.parents(rev)
640 640 if p2 != nullid and p2 in [x.node for x in self.applied]:
641 641 return p2
642 642 return p1
643 643
644 644 def mergepatch(self, repo, mergeq, series, diffopts):
645 645 if not self.applied:
646 646 # each of the patches merged in will have two parents. This
647 647 # can confuse the qrefresh, qdiff, and strip code because it
648 648 # needs to know which parent is actually in the patch queue.
649 649 # so, we insert a merge marker with only one parent. This way
650 650 # the first patch in the queue is never a merge patch
651 651 #
652 652 pname = ".hg.patches.merge.marker"
653 653 n = newcommit(repo, None, '[mq]: merge marker', force=True)
654 654 self.removeundo(repo)
655 655 self.applied.append(statusentry(n, pname))
656 656 self.applieddirty = True
657 657
658 658 head = self.qparents(repo)
659 659
660 660 for patch in series:
661 661 patch = mergeq.lookup(patch, strict=True)
662 662 if not patch:
663 663 self.ui.warn(_("patch %s does not exist\n") % patch)
664 664 return (1, None)
665 665 pushable, reason = self.pushable(patch)
666 666 if not pushable:
667 667 self.explainpushable(patch, all_patches=True)
668 668 continue
669 669 info = mergeq.isapplied(patch)
670 670 if not info:
671 671 self.ui.warn(_("patch %s is not applied\n") % patch)
672 672 return (1, None)
673 673 rev = info[1]
674 674 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
675 675 if head:
676 676 self.applied.append(statusentry(head, patch))
677 677 self.applieddirty = True
678 678 if err:
679 679 return (err, head)
680 680 self.savedirty()
681 681 return (0, head)
682 682
683 683 def patch(self, repo, patchfile):
684 684 '''Apply patchfile to the working directory.
685 685 patchfile: name of patch file'''
686 686 files = set()
687 687 try:
688 688 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
689 689 files=files, eolmode=None)
690 690 return (True, list(files), fuzz)
691 691 except Exception, inst:
692 692 self.ui.note(str(inst) + '\n')
693 693 if not self.ui.verbose:
694 694 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
695 695 self.ui.traceback()
696 696 return (False, list(files), False)
697 697
698 698 def apply(self, repo, series, list=False, update_status=True,
699 699 strict=False, patchdir=None, merge=None, all_files=None,
700 700 tobackup=None, keepchanges=False):
701 701 wlock = lock = tr = None
702 702 try:
703 703 wlock = repo.wlock()
704 704 lock = repo.lock()
705 705 tr = repo.transaction("qpush")
706 706 try:
707 707 ret = self._apply(repo, series, list, update_status,
708 708 strict, patchdir, merge, all_files=all_files,
709 709 tobackup=tobackup, keepchanges=keepchanges)
710 710 tr.close()
711 711 self.savedirty()
712 712 return ret
713 713 except AbortNoCleanup:
714 714 tr.close()
715 715 self.savedirty()
716 716 return 2, repo.dirstate.p1()
717 717 except: # re-raises
718 718 try:
719 719 tr.abort()
720 720 finally:
721 721 repo.invalidate()
722 722 repo.dirstate.invalidate()
723 723 self.invalidate()
724 724 raise
725 725 finally:
726 726 release(tr, lock, wlock)
727 727 self.removeundo(repo)
728 728
729 729 def _apply(self, repo, series, list=False, update_status=True,
730 730 strict=False, patchdir=None, merge=None, all_files=None,
731 731 tobackup=None, keepchanges=False):
732 732 """returns (error, hash)
733 733
734 734 error = 1 for unable to read, 2 for patch failed, 3 for patch
735 735 fuzz. tobackup is None or a set of files to backup before they
736 736 are modified by a patch.
737 737 """
738 738 # TODO unify with commands.py
739 739 if not patchdir:
740 740 patchdir = self.path
741 741 err = 0
742 742 n = None
743 743 for patchname in series:
744 744 pushable, reason = self.pushable(patchname)
745 745 if not pushable:
746 746 self.explainpushable(patchname, all_patches=True)
747 747 continue
748 748 self.ui.status(_("applying %s\n") % patchname)
749 749 pf = os.path.join(patchdir, patchname)
750 750
751 751 try:
752 752 ph = patchheader(self.join(patchname), self.plainmode)
753 753 except IOError:
754 754 self.ui.warn(_("unable to read %s\n") % patchname)
755 755 err = 1
756 756 break
757 757
758 758 message = ph.message
759 759 if not message:
760 760 # The commit message should not be translated
761 761 message = "imported patch %s\n" % patchname
762 762 else:
763 763 if list:
764 764 # The commit message should not be translated
765 765 message.append("\nimported patch %s" % patchname)
766 766 message = '\n'.join(message)
767 767
768 768 if ph.haspatch:
769 769 if tobackup:
770 770 touched = patchmod.changedfiles(self.ui, repo, pf)
771 771 touched = set(touched) & tobackup
772 772 if touched and keepchanges:
773 773 raise AbortNoCleanup(
774 774 _("local changes found, refresh first"))
775 775 self.backup(repo, touched, copy=True)
776 776 tobackup = tobackup - touched
777 777 (patcherr, files, fuzz) = self.patch(repo, pf)
778 778 if all_files is not None:
779 779 all_files.update(files)
780 780 patcherr = not patcherr
781 781 else:
782 782 self.ui.warn(_("patch %s is empty\n") % patchname)
783 783 patcherr, files, fuzz = 0, [], 0
784 784
785 785 if merge and files:
786 786 # Mark as removed/merged and update dirstate parent info
787 787 removed = []
788 788 merged = []
789 789 for f in files:
790 790 if os.path.lexists(repo.wjoin(f)):
791 791 merged.append(f)
792 792 else:
793 793 removed.append(f)
794 794 for f in removed:
795 795 repo.dirstate.remove(f)
796 796 for f in merged:
797 797 repo.dirstate.merge(f)
798 798 p1, p2 = repo.dirstate.parents()
799 799 repo.setparents(p1, merge)
800 800
801 801 match = scmutil.matchfiles(repo, files or [])
802 802 oldtip = repo['tip']
803 803 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
804 804 force=True)
805 805 if repo['tip'] == oldtip:
806 806 raise util.Abort(_("qpush exactly duplicates child changeset"))
807 807 if n is None:
808 808 raise util.Abort(_("repository commit failed"))
809 809
810 810 if update_status:
811 811 self.applied.append(statusentry(n, patchname))
812 812
813 813 if patcherr:
814 814 self.ui.warn(_("patch failed, rejects left in working dir\n"))
815 815 err = 2
816 816 break
817 817
818 818 if fuzz and strict:
819 819 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
820 820 err = 3
821 821 break
822 822 return (err, n)
823 823
824 824 def _cleanup(self, patches, numrevs, keep=False):
825 825 if not keep:
826 826 r = self.qrepo()
827 827 if r:
828 828 r[None].forget(patches)
829 829 for p in patches:
830 830 try:
831 831 os.unlink(self.join(p))
832 832 except OSError, inst:
833 833 if inst.errno != errno.ENOENT:
834 834 raise
835 835
836 836 qfinished = []
837 837 if numrevs:
838 838 qfinished = self.applied[:numrevs]
839 839 del self.applied[:numrevs]
840 840 self.applieddirty = True
841 841
842 842 unknown = []
843 843
844 844 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
845 845 reverse=True):
846 846 if i is not None:
847 847 del self.fullseries[i]
848 848 else:
849 849 unknown.append(p)
850 850
851 851 if unknown:
852 852 if numrevs:
853 853 rev = dict((entry.name, entry.node) for entry in qfinished)
854 854 for p in unknown:
855 855 msg = _('revision %s refers to unknown patches: %s\n')
856 856 self.ui.warn(msg % (short(rev[p]), p))
857 857 else:
858 858 msg = _('unknown patches: %s\n')
859 859 raise util.Abort(''.join(msg % p for p in unknown))
860 860
861 861 self.parseseries()
862 862 self.seriesdirty = True
863 863 return [entry.node for entry in qfinished]
864 864
865 865 def _revpatches(self, repo, revs):
866 866 firstrev = repo[self.applied[0].node].rev()
867 867 patches = []
868 868 for i, rev in enumerate(revs):
869 869
870 870 if rev < firstrev:
871 871 raise util.Abort(_('revision %d is not managed') % rev)
872 872
873 873 ctx = repo[rev]
874 874 base = self.applied[i].node
875 875 if ctx.node() != base:
876 876 msg = _('cannot delete revision %d above applied patches')
877 877 raise util.Abort(msg % rev)
878 878
879 879 patch = self.applied[i].name
880 880 for fmt in ('[mq]: %s', 'imported patch %s'):
881 881 if ctx.description() == fmt % patch:
882 882 msg = _('patch %s finalized without changeset message\n')
883 883 repo.ui.status(msg % patch)
884 884 break
885 885
886 886 patches.append(patch)
887 887 return patches
888 888
889 889 def finish(self, repo, revs):
890 890 # Manually trigger phase computation to ensure phasedefaults is
891 891 # executed before we remove the patches.
892 892 repo._phasecache
893 893 patches = self._revpatches(repo, sorted(revs))
894 894 qfinished = self._cleanup(patches, len(patches))
895 895 if qfinished and repo.ui.configbool('mq', 'secret', False):
896 896 # only use this logic when the secret option is added
897 897 oldqbase = repo[qfinished[0]]
898 898 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
899 899 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
900 900 phases.advanceboundary(repo, tphase, qfinished)
901 901
902 902 def delete(self, repo, patches, opts):
903 903 if not patches and not opts.get('rev'):
904 904 raise util.Abort(_('qdelete requires at least one revision or '
905 905 'patch name'))
906 906
907 907 realpatches = []
908 908 for patch in patches:
909 909 patch = self.lookup(patch, strict=True)
910 910 info = self.isapplied(patch)
911 911 if info:
912 912 raise util.Abort(_("cannot delete applied patch %s") % patch)
913 913 if patch not in self.series:
914 914 raise util.Abort(_("patch %s not in series file") % patch)
915 915 if patch not in realpatches:
916 916 realpatches.append(patch)
917 917
918 918 numrevs = 0
919 919 if opts.get('rev'):
920 920 if not self.applied:
921 921 raise util.Abort(_('no patches applied'))
922 922 revs = scmutil.revrange(repo, opts.get('rev'))
923 923 if len(revs) > 1 and revs[0] > revs[1]:
924 924 revs.reverse()
925 925 revpatches = self._revpatches(repo, revs)
926 926 realpatches += revpatches
927 927 numrevs = len(revpatches)
928 928
929 929 self._cleanup(realpatches, numrevs, opts.get('keep'))
930 930
931 931 def checktoppatch(self, repo):
932 932 '''check that working directory is at qtip'''
933 933 if self.applied:
934 934 top = self.applied[-1].node
935 935 patch = self.applied[-1].name
936 936 if repo.dirstate.p1() != top:
937 937 raise util.Abort(_("working directory revision is not qtip"))
938 938 return top, patch
939 939 return None, None
940 940
941 941 def checksubstate(self, repo, baserev=None):
942 942 '''return list of subrepos at a different revision than substate.
943 943 Abort if any subrepos have uncommitted changes.'''
944 944 inclsubs = []
945 945 wctx = repo[None]
946 946 if baserev:
947 947 bctx = repo[baserev]
948 948 else:
949 949 bctx = wctx.parents()[0]
950 950 for s in sorted(wctx.substate):
951 951 if wctx.sub(s).dirty(True):
952 952 raise util.Abort(
953 953 _("uncommitted changes in subrepository %s") % s)
954 954 elif s not in bctx.substate or bctx.sub(s).dirty():
955 955 inclsubs.append(s)
956 956 return inclsubs
957 957
958 958 def putsubstate2changes(self, substatestate, changes):
959 959 for files in changes[:3]:
960 960 if '.hgsubstate' in files:
961 961 return # already listed up
962 962 # not yet listed up
963 963 if substatestate in 'a?':
964 964 changes[1].append('.hgsubstate')
965 965 elif substatestate in 'r':
966 966 changes[2].append('.hgsubstate')
967 967 else: # modified
968 968 changes[0].append('.hgsubstate')
969 969
970 970 def localchangesfound(self, refresh=True):
971 971 if refresh:
972 972 raise util.Abort(_("local changes found, refresh first"))
973 973 else:
974 974 raise util.Abort(_("local changes found"))
975 975
976 976 def checklocalchanges(self, repo, force=False, refresh=True):
977 977 m, a, r, d = repo.status()[:4]
978 978 if (m or a or r or d) and not force:
979 979 self.localchangesfound(refresh)
980 980 return m, a, r, d
981 981
982 982 _reserved = ('series', 'status', 'guards', '.', '..')
983 983 def checkreservedname(self, name):
984 984 if name in self._reserved:
985 985 raise util.Abort(_('"%s" cannot be used as the name of a patch')
986 986 % name)
987 987 for prefix in ('.hg', '.mq'):
988 988 if name.startswith(prefix):
989 989 raise util.Abort(_('patch name cannot begin with "%s"')
990 990 % prefix)
991 991 for c in ('#', ':'):
992 992 if c in name:
993 993 raise util.Abort(_('"%s" cannot be used in the name of a patch')
994 994 % c)
995 995
996 996 def checkpatchname(self, name, force=False):
997 997 self.checkreservedname(name)
998 998 if not force and os.path.exists(self.join(name)):
999 999 if os.path.isdir(self.join(name)):
1000 1000 raise util.Abort(_('"%s" already exists as a directory')
1001 1001 % name)
1002 1002 else:
1003 1003 raise util.Abort(_('patch "%s" already exists') % name)
1004 1004
1005 1005 def checkkeepchanges(self, keepchanges, force):
1006 1006 if force and keepchanges:
1007 1007 raise util.Abort(_('cannot use both --force and --keep-changes'))
1008 1008
1009 1009 def new(self, repo, patchfn, *pats, **opts):
1010 1010 """options:
1011 1011 msg: a string or a no-argument function returning a string
1012 1012 """
1013 1013 msg = opts.get('msg')
1014 1014 user = opts.get('user')
1015 1015 date = opts.get('date')
1016 1016 if date:
1017 1017 date = util.parsedate(date)
1018 1018 diffopts = self.diffopts({'git': opts.get('git')})
1019 1019 if opts.get('checkname', True):
1020 1020 self.checkpatchname(patchfn)
1021 1021 inclsubs = self.checksubstate(repo)
1022 1022 if inclsubs:
1023 1023 inclsubs.append('.hgsubstate')
1024 1024 substatestate = repo.dirstate['.hgsubstate']
1025 1025 if opts.get('include') or opts.get('exclude') or pats:
1026 1026 if inclsubs:
1027 1027 pats = list(pats or []) + inclsubs
1028 1028 match = scmutil.match(repo[None], pats, opts)
1029 1029 # detect missing files in pats
1030 1030 def badfn(f, msg):
1031 1031 if f != '.hgsubstate': # .hgsubstate is auto-created
1032 1032 raise util.Abort('%s: %s' % (f, msg))
1033 1033 match.bad = badfn
1034 1034 changes = repo.status(match=match)
1035 1035 m, a, r, d = changes[:4]
1036 1036 else:
1037 1037 changes = self.checklocalchanges(repo, force=True)
1038 1038 m, a, r, d = changes
1039 1039 match = scmutil.matchfiles(repo, m + a + r + inclsubs)
1040 1040 if len(repo[None].parents()) > 1:
1041 1041 raise util.Abort(_('cannot manage merge changesets'))
1042 1042 commitfiles = m + a + r
1043 1043 self.checktoppatch(repo)
1044 1044 insert = self.fullseriesend()
1045 1045 wlock = repo.wlock()
1046 1046 try:
1047 1047 try:
1048 1048 # if patch file write fails, abort early
1049 1049 p = self.opener(patchfn, "w")
1050 1050 except IOError, e:
1051 1051 raise util.Abort(_('cannot write patch "%s": %s')
1052 1052 % (patchfn, e.strerror))
1053 1053 try:
1054 1054 if self.plainmode:
1055 1055 if user:
1056 1056 p.write("From: " + user + "\n")
1057 1057 if not date:
1058 1058 p.write("\n")
1059 1059 if date:
1060 1060 p.write("Date: %d %d\n\n" % date)
1061 1061 else:
1062 1062 p.write("# HG changeset patch\n")
1063 1063 p.write("# Parent "
1064 1064 + hex(repo[None].p1().node()) + "\n")
1065 1065 if user:
1066 1066 p.write("# User " + user + "\n")
1067 1067 if date:
1068 1068 p.write("# Date %s %s\n\n" % date)
1069 1069 if util.safehasattr(msg, '__call__'):
1070 1070 msg = msg()
1071 1071 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
1072 1072 n = newcommit(repo, None, commitmsg, user, date, match=match,
1073 1073 force=True)
1074 1074 if n is None:
1075 1075 raise util.Abort(_("repo commit failed"))
1076 1076 try:
1077 1077 self.fullseries[insert:insert] = [patchfn]
1078 1078 self.applied.append(statusentry(n, patchfn))
1079 1079 self.parseseries()
1080 1080 self.seriesdirty = True
1081 1081 self.applieddirty = True
1082 1082 if msg:
1083 1083 msg = msg + "\n\n"
1084 1084 p.write(msg)
1085 1085 if commitfiles:
1086 1086 parent = self.qparents(repo, n)
1087 1087 if inclsubs:
1088 1088 self.putsubstate2changes(substatestate, changes)
1089 1089 chunks = patchmod.diff(repo, node1=parent, node2=n,
1090 1090 changes=changes, opts=diffopts)
1091 1091 for chunk in chunks:
1092 1092 p.write(chunk)
1093 1093 p.close()
1094 1094 r = self.qrepo()
1095 1095 if r:
1096 1096 r[None].add([patchfn])
1097 1097 except: # re-raises
1098 1098 repo.rollback()
1099 1099 raise
1100 1100 except Exception:
1101 1101 patchpath = self.join(patchfn)
1102 1102 try:
1103 1103 os.unlink(patchpath)
1104 1104 except OSError:
1105 1105 self.ui.warn(_('error unlinking %s\n') % patchpath)
1106 1106 raise
1107 1107 self.removeundo(repo)
1108 1108 finally:
1109 1109 release(wlock)
1110 1110
1111 1111 def strip(self, repo, revs, update=True, backup="all", force=None):
1112 1112 wlock = lock = None
1113 1113 try:
1114 1114 wlock = repo.wlock()
1115 1115 lock = repo.lock()
1116 1116
1117 1117 if update:
1118 1118 self.checklocalchanges(repo, force=force, refresh=False)
1119 1119 urev = self.qparents(repo, revs[0])
1120 1120 hg.clean(repo, urev)
1121 1121 repo.dirstate.write()
1122 1122
1123 1123 repair.strip(self.ui, repo, revs, backup)
1124 1124 finally:
1125 1125 release(lock, wlock)
1126 1126
1127 1127 def isapplied(self, patch):
1128 1128 """returns (index, rev, patch)"""
1129 1129 for i, a in enumerate(self.applied):
1130 1130 if a.name == patch:
1131 1131 return (i, a.node, a.name)
1132 1132 return None
1133 1133
1134 1134 # if the exact patch name does not exist, we try a few
1135 1135 # variations. If strict is passed, we try only #1
1136 1136 #
1137 1137 # 1) a number (as string) to indicate an offset in the series file
1138 1138 # 2) a unique substring of the patch name was given
1139 1139 # 3) patchname[-+]num to indicate an offset in the series file
1140 1140 def lookup(self, patch, strict=False):
1141 1141 def partialname(s):
1142 1142 if s in self.series:
1143 1143 return s
1144 1144 matches = [x for x in self.series if s in x]
1145 1145 if len(matches) > 1:
1146 1146 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1147 1147 for m in matches:
1148 1148 self.ui.warn(' %s\n' % m)
1149 1149 return None
1150 1150 if matches:
1151 1151 return matches[0]
1152 1152 if self.series and self.applied:
1153 1153 if s == 'qtip':
1154 1154 return self.series[self.seriesend(True) - 1]
1155 1155 if s == 'qbase':
1156 1156 return self.series[0]
1157 1157 return None
1158 1158
1159 1159 if patch in self.series:
1160 1160 return patch
1161 1161
1162 1162 if not os.path.isfile(self.join(patch)):
1163 1163 try:
1164 1164 sno = int(patch)
1165 1165 except (ValueError, OverflowError):
1166 1166 pass
1167 1167 else:
1168 1168 if -len(self.series) <= sno < len(self.series):
1169 1169 return self.series[sno]
1170 1170
1171 1171 if not strict:
1172 1172 res = partialname(patch)
1173 1173 if res:
1174 1174 return res
1175 1175 minus = patch.rfind('-')
1176 1176 if minus >= 0:
1177 1177 res = partialname(patch[:minus])
1178 1178 if res:
1179 1179 i = self.series.index(res)
1180 1180 try:
1181 1181 off = int(patch[minus + 1:] or 1)
1182 1182 except (ValueError, OverflowError):
1183 1183 pass
1184 1184 else:
1185 1185 if i - off >= 0:
1186 1186 return self.series[i - off]
1187 1187 plus = patch.rfind('+')
1188 1188 if plus >= 0:
1189 1189 res = partialname(patch[:plus])
1190 1190 if res:
1191 1191 i = self.series.index(res)
1192 1192 try:
1193 1193 off = int(patch[plus + 1:] or 1)
1194 1194 except (ValueError, OverflowError):
1195 1195 pass
1196 1196 else:
1197 1197 if i + off < len(self.series):
1198 1198 return self.series[i + off]
1199 1199 raise util.Abort(_("patch %s not in series") % patch)
1200 1200
1201 1201 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1202 1202 all=False, move=False, exact=False, nobackup=False,
1203 1203 keepchanges=False):
1204 1204 self.checkkeepchanges(keepchanges, force)
1205 1205 diffopts = self.diffopts()
1206 1206 wlock = repo.wlock()
1207 1207 try:
1208 1208 heads = []
1209 1209 for b, ls in repo.branchmap().iteritems():
1210 1210 heads += ls
1211 1211 if not heads:
1212 1212 heads = [nullid]
1213 1213 if repo.dirstate.p1() not in heads and not exact:
1214 1214 self.ui.status(_("(working directory not at a head)\n"))
1215 1215
1216 1216 if not self.series:
1217 1217 self.ui.warn(_('no patches in series\n'))
1218 1218 return 0
1219 1219
1220 1220 # Suppose our series file is: A B C and the current 'top'
1221 1221 # patch is B. qpush C should be performed (moving forward)
1222 1222 # qpush B is a NOP (no change) qpush A is an error (can't
1223 1223 # go backwards with qpush)
1224 1224 if patch:
1225 1225 patch = self.lookup(patch)
1226 1226 info = self.isapplied(patch)
1227 1227 if info and info[0] >= len(self.applied) - 1:
1228 1228 self.ui.warn(
1229 1229 _('qpush: %s is already at the top\n') % patch)
1230 1230 return 0
1231 1231
1232 1232 pushable, reason = self.pushable(patch)
1233 1233 if pushable:
1234 1234 if self.series.index(patch) < self.seriesend():
1235 1235 raise util.Abort(
1236 1236 _("cannot push to a previous patch: %s") % patch)
1237 1237 else:
1238 1238 if reason:
1239 1239 reason = _('guarded by %s') % reason
1240 1240 else:
1241 1241 reason = _('no matching guards')
1242 1242 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1243 1243 return 1
1244 1244 elif all:
1245 1245 patch = self.series[-1]
1246 1246 if self.isapplied(patch):
1247 1247 self.ui.warn(_('all patches are currently applied\n'))
1248 1248 return 0
1249 1249
1250 1250 # Following the above example, starting at 'top' of B:
1251 1251 # qpush should be performed (pushes C), but a subsequent
1252 1252 # qpush without an argument is an error (nothing to
1253 1253 # apply). This allows a loop of "...while hg qpush..." to
1254 1254 # work as it detects an error when done
1255 1255 start = self.seriesend()
1256 1256 if start == len(self.series):
1257 1257 self.ui.warn(_('patch series already fully applied\n'))
1258 1258 return 1
1259 1259 if not force and not keepchanges:
1260 1260 self.checklocalchanges(repo, refresh=self.applied)
1261 1261
1262 1262 if exact:
1263 1263 if keepchanges:
1264 1264 raise util.Abort(
1265 1265 _("cannot use --exact and --keep-changes together"))
1266 1266 if move:
1267 1267 raise util.Abort(_('cannot use --exact and --move '
1268 1268 'together'))
1269 1269 if self.applied:
1270 1270 raise util.Abort(_('cannot push --exact with applied '
1271 1271 'patches'))
1272 1272 root = self.series[start]
1273 1273 target = patchheader(self.join(root), self.plainmode).parent
1274 1274 if not target:
1275 1275 raise util.Abort(
1276 1276 _("%s does not have a parent recorded") % root)
1277 1277 if not repo[target] == repo['.']:
1278 1278 hg.update(repo, target)
1279 1279
1280 1280 if move:
1281 1281 if not patch:
1282 1282 raise util.Abort(_("please specify the patch to move"))
1283 1283 for fullstart, rpn in enumerate(self.fullseries):
1284 1284 # strip markers for patch guards
1285 1285 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1286 1286 break
1287 1287 for i, rpn in enumerate(self.fullseries[fullstart:]):
1288 1288 # strip markers for patch guards
1289 1289 if self.guard_re.split(rpn, 1)[0] == patch:
1290 1290 break
1291 1291 index = fullstart + i
1292 1292 assert index < len(self.fullseries)
1293 1293 fullpatch = self.fullseries[index]
1294 1294 del self.fullseries[index]
1295 1295 self.fullseries.insert(fullstart, fullpatch)
1296 1296 self.parseseries()
1297 1297 self.seriesdirty = True
1298 1298
1299 1299 self.applieddirty = True
1300 1300 if start > 0:
1301 1301 self.checktoppatch(repo)
1302 1302 if not patch:
1303 1303 patch = self.series[start]
1304 1304 end = start + 1
1305 1305 else:
1306 1306 end = self.series.index(patch, start) + 1
1307 1307
1308 1308 tobackup = set()
1309 1309 if (not nobackup and force) or keepchanges:
1310 1310 m, a, r, d = self.checklocalchanges(repo, force=True)
1311 1311 if keepchanges:
1312 1312 tobackup.update(m + a + r + d)
1313 1313 else:
1314 1314 tobackup.update(m + a)
1315 1315
1316 1316 s = self.series[start:end]
1317 1317 all_files = set()
1318 1318 try:
1319 1319 if mergeq:
1320 1320 ret = self.mergepatch(repo, mergeq, s, diffopts)
1321 1321 else:
1322 1322 ret = self.apply(repo, s, list, all_files=all_files,
1323 1323 tobackup=tobackup, keepchanges=keepchanges)
1324 1324 except: # re-raises
1325 1325 self.ui.warn(_('cleaning up working directory...'))
1326 1326 node = repo.dirstate.p1()
1327 1327 hg.revert(repo, node, None)
1328 1328 # only remove unknown files that we know we touched or
1329 1329 # created while patching
1330 1330 for f in all_files:
1331 1331 if f not in repo.dirstate:
1332 1332 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1333 1333 self.ui.warn(_('done\n'))
1334 1334 raise
1335 1335
1336 1336 if not self.applied:
1337 1337 return ret[0]
1338 1338 top = self.applied[-1].name
1339 1339 if ret[0] and ret[0] > 1:
1340 1340 msg = _("errors during apply, please fix and refresh %s\n")
1341 1341 self.ui.write(msg % top)
1342 1342 else:
1343 1343 self.ui.write(_("now at: %s\n") % top)
1344 1344 return ret[0]
1345 1345
1346 1346 finally:
1347 1347 wlock.release()
1348 1348
1349 1349 def pop(self, repo, patch=None, force=False, update=True, all=False,
1350 1350 nobackup=False, keepchanges=False):
1351 1351 self.checkkeepchanges(keepchanges, force)
1352 1352 wlock = repo.wlock()
1353 1353 try:
1354 1354 if patch:
1355 1355 # index, rev, patch
1356 1356 info = self.isapplied(patch)
1357 1357 if not info:
1358 1358 patch = self.lookup(patch)
1359 1359 info = self.isapplied(patch)
1360 1360 if not info:
1361 1361 raise util.Abort(_("patch %s is not applied") % patch)
1362 1362
1363 1363 if not self.applied:
1364 1364 # Allow qpop -a to work repeatedly,
1365 1365 # but not qpop without an argument
1366 1366 self.ui.warn(_("no patches applied\n"))
1367 1367 return not all
1368 1368
1369 1369 if all:
1370 1370 start = 0
1371 1371 elif patch:
1372 1372 start = info[0] + 1
1373 1373 else:
1374 1374 start = len(self.applied) - 1
1375 1375
1376 1376 if start >= len(self.applied):
1377 1377 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1378 1378 return
1379 1379
1380 1380 if not update:
1381 1381 parents = repo.dirstate.parents()
1382 1382 rr = [x.node for x in self.applied]
1383 1383 for p in parents:
1384 1384 if p in rr:
1385 1385 self.ui.warn(_("qpop: forcing dirstate update\n"))
1386 1386 update = True
1387 1387 else:
1388 1388 parents = [p.node() for p in repo[None].parents()]
1389 1389 needupdate = False
1390 1390 for entry in self.applied[start:]:
1391 1391 if entry.node in parents:
1392 1392 needupdate = True
1393 1393 break
1394 1394 update = needupdate
1395 1395
1396 1396 tobackup = set()
1397 1397 if update:
1398 1398 m, a, r, d = self.checklocalchanges(
1399 1399 repo, force=force or keepchanges)
1400 1400 if force:
1401 1401 if not nobackup:
1402 1402 tobackup.update(m + a)
1403 1403 elif keepchanges:
1404 1404 tobackup.update(m + a + r + d)
1405 1405
1406 1406 self.applieddirty = True
1407 1407 end = len(self.applied)
1408 1408 rev = self.applied[start].node
1409 1409
1410 1410 try:
1411 1411 heads = repo.changelog.heads(rev)
1412 1412 except error.LookupError:
1413 1413 node = short(rev)
1414 1414 raise util.Abort(_('trying to pop unknown node %s') % node)
1415 1415
1416 1416 if heads != [self.applied[-1].node]:
1417 1417 raise util.Abort(_("popping would remove a revision not "
1418 1418 "managed by this patch queue"))
1419 1419 if not repo[self.applied[-1].node].mutable():
1420 1420 raise util.Abort(
1421 1421 _("popping would remove an immutable revision"),
1422 1422 hint=_('see "hg help phases" for details'))
1423 1423
1424 1424 # we know there are no local changes, so we can make a simplified
1425 1425 # form of hg.update.
1426 1426 if update:
1427 1427 qp = self.qparents(repo, rev)
1428 1428 ctx = repo[qp]
1429 1429 m, a, r, d = repo.status(qp, '.')[:4]
1430 1430 if d:
1431 1431 raise util.Abort(_("deletions found between repo revs"))
1432 1432
1433 1433 tobackup = set(a + m + r) & tobackup
1434 1434 if keepchanges and tobackup:
1435 1435 self.localchangesfound()
1436 1436 self.backup(repo, tobackup)
1437 1437
1438 1438 for f in a:
1439 1439 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1440 1440 repo.dirstate.drop(f)
1441 1441 for f in m + r:
1442 1442 fctx = ctx[f]
1443 1443 repo.wwrite(f, fctx.data(), fctx.flags())
1444 1444 repo.dirstate.normal(f)
1445 1445 repo.setparents(qp, nullid)
1446 1446 for patch in reversed(self.applied[start:end]):
1447 1447 self.ui.status(_("popping %s\n") % patch.name)
1448 1448 del self.applied[start:end]
1449 1449 self.strip(repo, [rev], update=False, backup='strip')
1450 1450 if self.applied:
1451 1451 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1452 1452 else:
1453 1453 self.ui.write(_("patch queue now empty\n"))
1454 1454 finally:
1455 1455 wlock.release()
1456 1456
1457 1457 def diff(self, repo, pats, opts):
1458 1458 top, patch = self.checktoppatch(repo)
1459 1459 if not top:
1460 1460 self.ui.write(_("no patches applied\n"))
1461 1461 return
1462 1462 qp = self.qparents(repo, top)
1463 1463 if opts.get('reverse'):
1464 1464 node1, node2 = None, qp
1465 1465 else:
1466 1466 node1, node2 = qp, None
1467 1467 diffopts = self.diffopts(opts, patch)
1468 1468 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1469 1469
1470 1470 def refresh(self, repo, pats=None, **opts):
1471 1471 if not self.applied:
1472 1472 self.ui.write(_("no patches applied\n"))
1473 1473 return 1
1474 1474 msg = opts.get('msg', '').rstrip()
1475 1475 newuser = opts.get('user')
1476 1476 newdate = opts.get('date')
1477 1477 if newdate:
1478 1478 newdate = '%d %d' % util.parsedate(newdate)
1479 1479 wlock = repo.wlock()
1480 1480
1481 1481 try:
1482 1482 self.checktoppatch(repo)
1483 1483 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1484 1484 if repo.changelog.heads(top) != [top]:
1485 1485 raise util.Abort(_("cannot refresh a revision with children"))
1486 1486 if not repo[top].mutable():
1487 1487 raise util.Abort(_("cannot refresh immutable revision"),
1488 1488 hint=_('see "hg help phases" for details'))
1489 1489
1490 1490 cparents = repo.changelog.parents(top)
1491 1491 patchparent = self.qparents(repo, top)
1492 1492
1493 1493 inclsubs = self.checksubstate(repo, hex(patchparent))
1494 1494 if inclsubs:
1495 1495 inclsubs.append('.hgsubstate')
1496 1496 substatestate = repo.dirstate['.hgsubstate']
1497 1497
1498 1498 ph = patchheader(self.join(patchfn), self.plainmode)
1499 1499 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1500 1500 if msg:
1501 1501 ph.setmessage(msg)
1502 1502 if newuser:
1503 1503 ph.setuser(newuser)
1504 1504 if newdate:
1505 1505 ph.setdate(newdate)
1506 1506 ph.setparent(hex(patchparent))
1507 1507
1508 1508 # only commit new patch when write is complete
1509 1509 patchf = self.opener(patchfn, 'w', atomictemp=True)
1510 1510
1511 1511 comments = str(ph)
1512 1512 if comments:
1513 1513 patchf.write(comments)
1514 1514
1515 1515 # update the dirstate in place, strip off the qtip commit
1516 1516 # and then commit.
1517 1517 #
1518 1518 # this should really read:
1519 1519 # mm, dd, aa = repo.status(top, patchparent)[:3]
1520 1520 # but we do it backwards to take advantage of manifest/changelog
1521 1521 # caching against the next repo.status call
1522 1522 mm, aa, dd = repo.status(patchparent, top)[:3]
1523 1523 changes = repo.changelog.read(top)
1524 1524 man = repo.manifest.read(changes[0])
1525 1525 aaa = aa[:]
1526 1526 matchfn = scmutil.match(repo[None], pats, opts)
1527 1527 # in short mode, we only diff the files included in the
1528 1528 # patch already plus specified files
1529 1529 if opts.get('short'):
1530 1530 # if amending a patch, we start with existing
1531 1531 # files plus specified files - unfiltered
1532 1532 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1533 1533 # filter with include/exclude options
1534 1534 matchfn = scmutil.match(repo[None], opts=opts)
1535 1535 else:
1536 1536 match = scmutil.matchall(repo)
1537 1537 m, a, r, d = repo.status(match=match)[:4]
1538 1538 mm = set(mm)
1539 1539 aa = set(aa)
1540 1540 dd = set(dd)
1541 1541
1542 1542 # we might end up with files that were added between
1543 1543 # qtip and the dirstate parent, but then changed in the
1544 1544 # local dirstate. in this case, we want them to only
1545 1545 # show up in the added section
1546 1546 for x in m:
1547 1547 if x not in aa:
1548 1548 mm.add(x)
1549 1549 # we might end up with files added by the local dirstate that
1550 1550 # were deleted by the patch. In this case, they should only
1551 1551 # show up in the changed section.
1552 1552 for x in a:
1553 1553 if x in dd:
1554 1554 dd.remove(x)
1555 1555 mm.add(x)
1556 1556 else:
1557 1557 aa.add(x)
1558 1558 # make sure any files deleted in the local dirstate
1559 1559 # are not in the add or change column of the patch
1560 1560 forget = []
1561 1561 for x in d + r:
1562 1562 if x in aa:
1563 1563 aa.remove(x)
1564 1564 forget.append(x)
1565 1565 continue
1566 1566 else:
1567 1567 mm.discard(x)
1568 1568 dd.add(x)
1569 1569
1570 1570 m = list(mm)
1571 1571 r = list(dd)
1572 1572 a = list(aa)
1573 1573
1574 # create 'match' that includes the files to be recommited.
1574 # create 'match' that includes the files to be recommitted.
1575 1575 # apply matchfn via repo.status to ensure correct case handling.
1576 1576 cm, ca, cr, cd = repo.status(patchparent, match=matchfn)[:4]
1577 1577 allmatches = set(cm + ca + cr + cd)
1578 1578 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1579 1579
1580 1580 files = set(inclsubs)
1581 1581 for x in refreshchanges:
1582 1582 files.update(x)
1583 1583 match = scmutil.matchfiles(repo, files)
1584 1584
1585 1585 bmlist = repo[top].bookmarks()
1586 1586
1587 1587 try:
1588 1588 if diffopts.git or diffopts.upgrade:
1589 1589 copies = {}
1590 1590 for dst in a:
1591 1591 src = repo.dirstate.copied(dst)
1592 1592 # during qfold, the source file for copies may
1593 1593 # be removed. Treat this as a simple add.
1594 1594 if src is not None and src in repo.dirstate:
1595 1595 copies.setdefault(src, []).append(dst)
1596 1596 repo.dirstate.add(dst)
1597 1597 # remember the copies between patchparent and qtip
1598 1598 for dst in aaa:
1599 1599 f = repo.file(dst)
1600 1600 src = f.renamed(man[dst])
1601 1601 if src:
1602 1602 copies.setdefault(src[0], []).extend(
1603 1603 copies.get(dst, []))
1604 1604 if dst in a:
1605 1605 copies[src[0]].append(dst)
1606 1606 # we can't copy a file created by the patch itself
1607 1607 if dst in copies:
1608 1608 del copies[dst]
1609 1609 for src, dsts in copies.iteritems():
1610 1610 for dst in dsts:
1611 1611 repo.dirstate.copy(src, dst)
1612 1612 else:
1613 1613 for dst in a:
1614 1614 repo.dirstate.add(dst)
1615 1615 # Drop useless copy information
1616 1616 for f in list(repo.dirstate.copies()):
1617 1617 repo.dirstate.copy(None, f)
1618 1618 for f in r:
1619 1619 repo.dirstate.remove(f)
1620 1620 # if the patch excludes a modified file, mark that
1621 1621 # file with mtime=0 so status can see it.
1622 1622 mm = []
1623 1623 for i in xrange(len(m) - 1, -1, -1):
1624 1624 if not matchfn(m[i]):
1625 1625 mm.append(m[i])
1626 1626 del m[i]
1627 1627 for f in m:
1628 1628 repo.dirstate.normal(f)
1629 1629 for f in mm:
1630 1630 repo.dirstate.normallookup(f)
1631 1631 for f in forget:
1632 1632 repo.dirstate.drop(f)
1633 1633
1634 1634 if not msg:
1635 1635 if not ph.message:
1636 1636 message = "[mq]: %s\n" % patchfn
1637 1637 else:
1638 1638 message = "\n".join(ph.message)
1639 1639 else:
1640 1640 message = msg
1641 1641
1642 1642 user = ph.user or changes[1]
1643 1643
1644 1644 oldphase = repo[top].phase()
1645 1645
1646 1646 # assumes strip can roll itself back if interrupted
1647 1647 repo.setparents(*cparents)
1648 1648 self.applied.pop()
1649 1649 self.applieddirty = True
1650 1650 self.strip(repo, [top], update=False,
1651 1651 backup='strip')
1652 1652 except: # re-raises
1653 1653 repo.dirstate.invalidate()
1654 1654 raise
1655 1655
1656 1656 try:
1657 1657 # might be nice to attempt to roll back strip after this
1658 1658
1659 1659 # Ensure we create a new changeset in the same phase than
1660 1660 # the old one.
1661 1661 n = newcommit(repo, oldphase, message, user, ph.date,
1662 1662 match=match, force=True)
1663 1663 # only write patch after a successful commit
1664 1664 c = [list(x) for x in refreshchanges]
1665 1665 if inclsubs:
1666 1666 self.putsubstate2changes(substatestate, c)
1667 1667 chunks = patchmod.diff(repo, patchparent,
1668 1668 changes=c, opts=diffopts)
1669 1669 for chunk in chunks:
1670 1670 patchf.write(chunk)
1671 1671 patchf.close()
1672 1672
1673 1673 marks = repo._bookmarks
1674 1674 for bm in bmlist:
1675 1675 marks[bm] = n
1676 1676 marks.write()
1677 1677
1678 1678 self.applied.append(statusentry(n, patchfn))
1679 1679 except: # re-raises
1680 1680 ctx = repo[cparents[0]]
1681 1681 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1682 1682 self.savedirty()
1683 1683 self.ui.warn(_('refresh interrupted while patch was popped! '
1684 1684 '(revert --all, qpush to recover)\n'))
1685 1685 raise
1686 1686 finally:
1687 1687 wlock.release()
1688 1688 self.removeundo(repo)
1689 1689
1690 1690 def init(self, repo, create=False):
1691 1691 if not create and os.path.isdir(self.path):
1692 1692 raise util.Abort(_("patch queue directory already exists"))
1693 1693 try:
1694 1694 os.mkdir(self.path)
1695 1695 except OSError, inst:
1696 1696 if inst.errno != errno.EEXIST or not create:
1697 1697 raise
1698 1698 if create:
1699 1699 return self.qrepo(create=True)
1700 1700
1701 1701 def unapplied(self, repo, patch=None):
1702 1702 if patch and patch not in self.series:
1703 1703 raise util.Abort(_("patch %s is not in series file") % patch)
1704 1704 if not patch:
1705 1705 start = self.seriesend()
1706 1706 else:
1707 1707 start = self.series.index(patch) + 1
1708 1708 unapplied = []
1709 1709 for i in xrange(start, len(self.series)):
1710 1710 pushable, reason = self.pushable(i)
1711 1711 if pushable:
1712 1712 unapplied.append((i, self.series[i]))
1713 1713 self.explainpushable(i)
1714 1714 return unapplied
1715 1715
1716 1716 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1717 1717 summary=False):
1718 1718 def displayname(pfx, patchname, state):
1719 1719 if pfx:
1720 1720 self.ui.write(pfx)
1721 1721 if summary:
1722 1722 ph = patchheader(self.join(patchname), self.plainmode)
1723 1723 msg = ph.message and ph.message[0] or ''
1724 1724 if self.ui.formatted():
1725 1725 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1726 1726 if width > 0:
1727 1727 msg = util.ellipsis(msg, width)
1728 1728 else:
1729 1729 msg = ''
1730 1730 self.ui.write(patchname, label='qseries.' + state)
1731 1731 self.ui.write(': ')
1732 1732 self.ui.write(msg, label='qseries.message.' + state)
1733 1733 else:
1734 1734 self.ui.write(patchname, label='qseries.' + state)
1735 1735 self.ui.write('\n')
1736 1736
1737 1737 applied = set([p.name for p in self.applied])
1738 1738 if length is None:
1739 1739 length = len(self.series) - start
1740 1740 if not missing:
1741 1741 if self.ui.verbose:
1742 1742 idxwidth = len(str(start + length - 1))
1743 1743 for i in xrange(start, start + length):
1744 1744 patch = self.series[i]
1745 1745 if patch in applied:
1746 1746 char, state = 'A', 'applied'
1747 1747 elif self.pushable(i)[0]:
1748 1748 char, state = 'U', 'unapplied'
1749 1749 else:
1750 1750 char, state = 'G', 'guarded'
1751 1751 pfx = ''
1752 1752 if self.ui.verbose:
1753 1753 pfx = '%*d %s ' % (idxwidth, i, char)
1754 1754 elif status and status != char:
1755 1755 continue
1756 1756 displayname(pfx, patch, state)
1757 1757 else:
1758 1758 msng_list = []
1759 1759 for root, dirs, files in os.walk(self.path):
1760 1760 d = root[len(self.path) + 1:]
1761 1761 for f in files:
1762 1762 fl = os.path.join(d, f)
1763 1763 if (fl not in self.series and
1764 1764 fl not in (self.statuspath, self.seriespath,
1765 1765 self.guardspath)
1766 1766 and not fl.startswith('.')):
1767 1767 msng_list.append(fl)
1768 1768 for x in sorted(msng_list):
1769 1769 pfx = self.ui.verbose and ('D ') or ''
1770 1770 displayname(pfx, x, 'missing')
1771 1771
1772 1772 def issaveline(self, l):
1773 1773 if l.name == '.hg.patches.save.line':
1774 1774 return True
1775 1775
1776 1776 def qrepo(self, create=False):
1777 1777 ui = self.ui.copy()
1778 1778 ui.setconfig('paths', 'default', '', overlay=False)
1779 1779 ui.setconfig('paths', 'default-push', '', overlay=False)
1780 1780 if create or os.path.isdir(self.join(".hg")):
1781 1781 return hg.repository(ui, path=self.path, create=create)
1782 1782
1783 1783 def restore(self, repo, rev, delete=None, qupdate=None):
1784 1784 desc = repo[rev].description().strip()
1785 1785 lines = desc.splitlines()
1786 1786 i = 0
1787 1787 datastart = None
1788 1788 series = []
1789 1789 applied = []
1790 1790 qpp = None
1791 1791 for i, line in enumerate(lines):
1792 1792 if line == 'Patch Data:':
1793 1793 datastart = i + 1
1794 1794 elif line.startswith('Dirstate:'):
1795 1795 l = line.rstrip()
1796 1796 l = l[10:].split(' ')
1797 1797 qpp = [bin(x) for x in l]
1798 1798 elif datastart is not None:
1799 1799 l = line.rstrip()
1800 1800 n, name = l.split(':', 1)
1801 1801 if n:
1802 1802 applied.append(statusentry(bin(n), name))
1803 1803 else:
1804 1804 series.append(l)
1805 1805 if datastart is None:
1806 1806 self.ui.warn(_("no saved patch data found\n"))
1807 1807 return 1
1808 1808 self.ui.warn(_("restoring status: %s\n") % lines[0])
1809 1809 self.fullseries = series
1810 1810 self.applied = applied
1811 1811 self.parseseries()
1812 1812 self.seriesdirty = True
1813 1813 self.applieddirty = True
1814 1814 heads = repo.changelog.heads()
1815 1815 if delete:
1816 1816 if rev not in heads:
1817 1817 self.ui.warn(_("save entry has children, leaving it alone\n"))
1818 1818 else:
1819 1819 self.ui.warn(_("removing save entry %s\n") % short(rev))
1820 1820 pp = repo.dirstate.parents()
1821 1821 if rev in pp:
1822 1822 update = True
1823 1823 else:
1824 1824 update = False
1825 1825 self.strip(repo, [rev], update=update, backup='strip')
1826 1826 if qpp:
1827 1827 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1828 1828 (short(qpp[0]), short(qpp[1])))
1829 1829 if qupdate:
1830 1830 self.ui.status(_("updating queue directory\n"))
1831 1831 r = self.qrepo()
1832 1832 if not r:
1833 1833 self.ui.warn(_("unable to load queue repository\n"))
1834 1834 return 1
1835 1835 hg.clean(r, qpp[0])
1836 1836
1837 1837 def save(self, repo, msg=None):
1838 1838 if not self.applied:
1839 1839 self.ui.warn(_("save: no patches applied, exiting\n"))
1840 1840 return 1
1841 1841 if self.issaveline(self.applied[-1]):
1842 1842 self.ui.warn(_("status is already saved\n"))
1843 1843 return 1
1844 1844
1845 1845 if not msg:
1846 1846 msg = _("hg patches saved state")
1847 1847 else:
1848 1848 msg = "hg patches: " + msg.rstrip('\r\n')
1849 1849 r = self.qrepo()
1850 1850 if r:
1851 1851 pp = r.dirstate.parents()
1852 1852 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1853 1853 msg += "\n\nPatch Data:\n"
1854 1854 msg += ''.join('%s\n' % x for x in self.applied)
1855 1855 msg += ''.join(':%s\n' % x for x in self.fullseries)
1856 1856 n = repo.commit(msg, force=True)
1857 1857 if not n:
1858 1858 self.ui.warn(_("repo commit failed\n"))
1859 1859 return 1
1860 1860 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1861 1861 self.applieddirty = True
1862 1862 self.removeundo(repo)
1863 1863
1864 1864 def fullseriesend(self):
1865 1865 if self.applied:
1866 1866 p = self.applied[-1].name
1867 1867 end = self.findseries(p)
1868 1868 if end is None:
1869 1869 return len(self.fullseries)
1870 1870 return end + 1
1871 1871 return 0
1872 1872
1873 1873 def seriesend(self, all_patches=False):
1874 1874 """If all_patches is False, return the index of the next pushable patch
1875 1875 in the series, or the series length. If all_patches is True, return the
1876 1876 index of the first patch past the last applied one.
1877 1877 """
1878 1878 end = 0
1879 1879 def next(start):
1880 1880 if all_patches or start >= len(self.series):
1881 1881 return start
1882 1882 for i in xrange(start, len(self.series)):
1883 1883 p, reason = self.pushable(i)
1884 1884 if p:
1885 1885 return i
1886 1886 self.explainpushable(i)
1887 1887 return len(self.series)
1888 1888 if self.applied:
1889 1889 p = self.applied[-1].name
1890 1890 try:
1891 1891 end = self.series.index(p)
1892 1892 except ValueError:
1893 1893 return 0
1894 1894 return next(end + 1)
1895 1895 return next(end)
1896 1896
1897 1897 def appliedname(self, index):
1898 1898 pname = self.applied[index].name
1899 1899 if not self.ui.verbose:
1900 1900 p = pname
1901 1901 else:
1902 1902 p = str(self.series.index(pname)) + " " + pname
1903 1903 return p
1904 1904
1905 1905 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1906 1906 force=None, git=False):
1907 1907 def checkseries(patchname):
1908 1908 if patchname in self.series:
1909 1909 raise util.Abort(_('patch %s is already in the series file')
1910 1910 % patchname)
1911 1911
1912 1912 if rev:
1913 1913 if files:
1914 1914 raise util.Abort(_('option "-r" not valid when importing '
1915 1915 'files'))
1916 1916 rev = scmutil.revrange(repo, rev)
1917 1917 rev.sort(reverse=True)
1918 1918 elif not files:
1919 1919 raise util.Abort(_('no files or revisions specified'))
1920 1920 if (len(files) > 1 or len(rev) > 1) and patchname:
1921 1921 raise util.Abort(_('option "-n" not valid when importing multiple '
1922 1922 'patches'))
1923 1923 imported = []
1924 1924 if rev:
1925 1925 # If mq patches are applied, we can only import revisions
1926 1926 # that form a linear path to qbase.
1927 1927 # Otherwise, they should form a linear path to a head.
1928 1928 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1929 1929 if len(heads) > 1:
1930 1930 raise util.Abort(_('revision %d is the root of more than one '
1931 1931 'branch') % rev[-1])
1932 1932 if self.applied:
1933 1933 base = repo.changelog.node(rev[0])
1934 1934 if base in [n.node for n in self.applied]:
1935 1935 raise util.Abort(_('revision %d is already managed')
1936 1936 % rev[0])
1937 1937 if heads != [self.applied[-1].node]:
1938 1938 raise util.Abort(_('revision %d is not the parent of '
1939 1939 'the queue') % rev[0])
1940 1940 base = repo.changelog.rev(self.applied[0].node)
1941 1941 lastparent = repo.changelog.parentrevs(base)[0]
1942 1942 else:
1943 1943 if heads != [repo.changelog.node(rev[0])]:
1944 1944 raise util.Abort(_('revision %d has unmanaged children')
1945 1945 % rev[0])
1946 1946 lastparent = None
1947 1947
1948 1948 diffopts = self.diffopts({'git': git})
1949 1949 for r in rev:
1950 1950 if not repo[r].mutable():
1951 1951 raise util.Abort(_('revision %d is not mutable') % r,
1952 1952 hint=_('see "hg help phases" for details'))
1953 1953 p1, p2 = repo.changelog.parentrevs(r)
1954 1954 n = repo.changelog.node(r)
1955 1955 if p2 != nullrev:
1956 1956 raise util.Abort(_('cannot import merge revision %d') % r)
1957 1957 if lastparent and lastparent != r:
1958 1958 raise util.Abort(_('revision %d is not the parent of %d')
1959 1959 % (r, lastparent))
1960 1960 lastparent = p1
1961 1961
1962 1962 if not patchname:
1963 1963 patchname = normname('%d.diff' % r)
1964 1964 checkseries(patchname)
1965 1965 self.checkpatchname(patchname, force)
1966 1966 self.fullseries.insert(0, patchname)
1967 1967
1968 1968 patchf = self.opener(patchname, "w")
1969 1969 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1970 1970 patchf.close()
1971 1971
1972 1972 se = statusentry(n, patchname)
1973 1973 self.applied.insert(0, se)
1974 1974
1975 1975 self.added.append(patchname)
1976 1976 imported.append(patchname)
1977 1977 patchname = None
1978 1978 if rev and repo.ui.configbool('mq', 'secret', False):
1979 1979 # if we added anything with --rev, we must move the secret root
1980 1980 phases.retractboundary(repo, phases.secret, [n])
1981 1981 self.parseseries()
1982 1982 self.applieddirty = True
1983 1983 self.seriesdirty = True
1984 1984
1985 1985 for i, filename in enumerate(files):
1986 1986 if existing:
1987 1987 if filename == '-':
1988 1988 raise util.Abort(_('-e is incompatible with import from -'))
1989 1989 filename = normname(filename)
1990 1990 self.checkreservedname(filename)
1991 1991 originpath = self.join(filename)
1992 1992 if not os.path.isfile(originpath):
1993 1993 raise util.Abort(_("patch %s does not exist") % filename)
1994 1994
1995 1995 if patchname:
1996 1996 self.checkpatchname(patchname, force)
1997 1997
1998 1998 self.ui.write(_('renaming %s to %s\n')
1999 1999 % (filename, patchname))
2000 2000 util.rename(originpath, self.join(patchname))
2001 2001 else:
2002 2002 patchname = filename
2003 2003
2004 2004 else:
2005 2005 if filename == '-' and not patchname:
2006 2006 raise util.Abort(_('need --name to import a patch from -'))
2007 2007 elif not patchname:
2008 2008 patchname = normname(os.path.basename(filename.rstrip('/')))
2009 2009 self.checkpatchname(patchname, force)
2010 2010 try:
2011 2011 if filename == '-':
2012 2012 text = self.ui.fin.read()
2013 2013 else:
2014 2014 fp = hg.openpath(self.ui, filename)
2015 2015 text = fp.read()
2016 2016 fp.close()
2017 2017 except (OSError, IOError):
2018 2018 raise util.Abort(_("unable to read file %s") % filename)
2019 2019 patchf = self.opener(patchname, "w")
2020 2020 patchf.write(text)
2021 2021 patchf.close()
2022 2022 if not force:
2023 2023 checkseries(patchname)
2024 2024 if patchname not in self.series:
2025 2025 index = self.fullseriesend() + i
2026 2026 self.fullseries[index:index] = [patchname]
2027 2027 self.parseseries()
2028 2028 self.seriesdirty = True
2029 2029 self.ui.warn(_("adding %s to series file\n") % patchname)
2030 2030 self.added.append(patchname)
2031 2031 imported.append(patchname)
2032 2032 patchname = None
2033 2033
2034 2034 self.removeundo(repo)
2035 2035 return imported
2036 2036
2037 2037 def fixkeepchangesopts(ui, opts):
2038 2038 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2039 2039 or opts.get('exact')):
2040 2040 return opts
2041 2041 opts = dict(opts)
2042 2042 opts['keep_changes'] = True
2043 2043 return opts
2044 2044
2045 2045 @command("qdelete|qremove|qrm",
2046 2046 [('k', 'keep', None, _('keep patch file')),
2047 2047 ('r', 'rev', [],
2048 2048 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2049 2049 _('hg qdelete [-k] [PATCH]...'))
2050 2050 def delete(ui, repo, *patches, **opts):
2051 2051 """remove patches from queue
2052 2052
2053 2053 The patches must not be applied, and at least one patch is required. Exact
2054 2054 patch identifiers must be given. With -k/--keep, the patch files are
2055 2055 preserved in the patch directory.
2056 2056
2057 2057 To stop managing a patch and move it into permanent history,
2058 2058 use the :hg:`qfinish` command."""
2059 2059 q = repo.mq
2060 2060 q.delete(repo, patches, opts)
2061 2061 q.savedirty()
2062 2062 return 0
2063 2063
2064 2064 @command("qapplied",
2065 2065 [('1', 'last', None, _('show only the preceding applied patch'))
2066 2066 ] + seriesopts,
2067 2067 _('hg qapplied [-1] [-s] [PATCH]'))
2068 2068 def applied(ui, repo, patch=None, **opts):
2069 2069 """print the patches already applied
2070 2070
2071 2071 Returns 0 on success."""
2072 2072
2073 2073 q = repo.mq
2074 2074
2075 2075 if patch:
2076 2076 if patch not in q.series:
2077 2077 raise util.Abort(_("patch %s is not in series file") % patch)
2078 2078 end = q.series.index(patch) + 1
2079 2079 else:
2080 2080 end = q.seriesend(True)
2081 2081
2082 2082 if opts.get('last') and not end:
2083 2083 ui.write(_("no patches applied\n"))
2084 2084 return 1
2085 2085 elif opts.get('last') and end == 1:
2086 2086 ui.write(_("only one patch applied\n"))
2087 2087 return 1
2088 2088 elif opts.get('last'):
2089 2089 start = end - 2
2090 2090 end = 1
2091 2091 else:
2092 2092 start = 0
2093 2093
2094 2094 q.qseries(repo, length=end, start=start, status='A',
2095 2095 summary=opts.get('summary'))
2096 2096
2097 2097
2098 2098 @command("qunapplied",
2099 2099 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2100 2100 _('hg qunapplied [-1] [-s] [PATCH]'))
2101 2101 def unapplied(ui, repo, patch=None, **opts):
2102 2102 """print the patches not yet applied
2103 2103
2104 2104 Returns 0 on success."""
2105 2105
2106 2106 q = repo.mq
2107 2107 if patch:
2108 2108 if patch not in q.series:
2109 2109 raise util.Abort(_("patch %s is not in series file") % patch)
2110 2110 start = q.series.index(patch) + 1
2111 2111 else:
2112 2112 start = q.seriesend(True)
2113 2113
2114 2114 if start == len(q.series) and opts.get('first'):
2115 2115 ui.write(_("all patches applied\n"))
2116 2116 return 1
2117 2117
2118 2118 length = opts.get('first') and 1 or None
2119 2119 q.qseries(repo, start=start, length=length, status='U',
2120 2120 summary=opts.get('summary'))
2121 2121
2122 2122 @command("qimport",
2123 2123 [('e', 'existing', None, _('import file in patch directory')),
2124 2124 ('n', 'name', '',
2125 2125 _('name of patch file'), _('NAME')),
2126 2126 ('f', 'force', None, _('overwrite existing files')),
2127 2127 ('r', 'rev', [],
2128 2128 _('place existing revisions under mq control'), _('REV')),
2129 2129 ('g', 'git', None, _('use git extended diff format')),
2130 2130 ('P', 'push', None, _('qpush after importing'))],
2131 2131 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
2132 2132 def qimport(ui, repo, *filename, **opts):
2133 2133 """import a patch or existing changeset
2134 2134
2135 2135 The patch is inserted into the series after the last applied
2136 2136 patch. If no patches have been applied, qimport prepends the patch
2137 2137 to the series.
2138 2138
2139 2139 The patch will have the same name as its source file unless you
2140 2140 give it a new one with -n/--name.
2141 2141
2142 2142 You can register an existing patch inside the patch directory with
2143 2143 the -e/--existing flag.
2144 2144
2145 2145 With -f/--force, an existing patch of the same name will be
2146 2146 overwritten.
2147 2147
2148 2148 An existing changeset may be placed under mq control with -r/--rev
2149 2149 (e.g. qimport --rev tip -n patch will place tip under mq control).
2150 2150 With -g/--git, patches imported with --rev will use the git diff
2151 2151 format. See the diffs help topic for information on why this is
2152 2152 important for preserving rename/copy information and permission
2153 2153 changes. Use :hg:`qfinish` to remove changesets from mq control.
2154 2154
2155 2155 To import a patch from standard input, pass - as the patch file.
2156 2156 When importing from standard input, a patch name must be specified
2157 2157 using the --name flag.
2158 2158
2159 2159 To import an existing patch while renaming it::
2160 2160
2161 2161 hg qimport -e existing-patch -n new-name
2162 2162
2163 2163 Returns 0 if import succeeded.
2164 2164 """
2165 2165 lock = repo.lock() # cause this may move phase
2166 2166 try:
2167 2167 q = repo.mq
2168 2168 try:
2169 2169 imported = q.qimport(
2170 2170 repo, filename, patchname=opts.get('name'),
2171 2171 existing=opts.get('existing'), force=opts.get('force'),
2172 2172 rev=opts.get('rev'), git=opts.get('git'))
2173 2173 finally:
2174 2174 q.savedirty()
2175 2175 finally:
2176 2176 lock.release()
2177 2177
2178 2178 if imported and opts.get('push') and not opts.get('rev'):
2179 2179 return q.push(repo, imported[-1])
2180 2180 return 0
2181 2181
2182 2182 def qinit(ui, repo, create):
2183 2183 """initialize a new queue repository
2184 2184
2185 2185 This command also creates a series file for ordering patches, and
2186 2186 an mq-specific .hgignore file in the queue repository, to exclude
2187 2187 the status and guards files (these contain mostly transient state).
2188 2188
2189 2189 Returns 0 if initialization succeeded."""
2190 2190 q = repo.mq
2191 2191 r = q.init(repo, create)
2192 2192 q.savedirty()
2193 2193 if r:
2194 2194 if not os.path.exists(r.wjoin('.hgignore')):
2195 2195 fp = r.wopener('.hgignore', 'w')
2196 2196 fp.write('^\\.hg\n')
2197 2197 fp.write('^\\.mq\n')
2198 2198 fp.write('syntax: glob\n')
2199 2199 fp.write('status\n')
2200 2200 fp.write('guards\n')
2201 2201 fp.close()
2202 2202 if not os.path.exists(r.wjoin('series')):
2203 2203 r.wopener('series', 'w').close()
2204 2204 r[None].add(['.hgignore', 'series'])
2205 2205 commands.add(ui, r)
2206 2206 return 0
2207 2207
2208 2208 @command("^qinit",
2209 2209 [('c', 'create-repo', None, _('create queue repository'))],
2210 2210 _('hg qinit [-c]'))
2211 2211 def init(ui, repo, **opts):
2212 2212 """init a new queue repository (DEPRECATED)
2213 2213
2214 2214 The queue repository is unversioned by default. If
2215 2215 -c/--create-repo is specified, qinit will create a separate nested
2216 2216 repository for patches (qinit -c may also be run later to convert
2217 2217 an unversioned patch repository into a versioned one). You can use
2218 2218 qcommit to commit changes to this queue repository.
2219 2219
2220 2220 This command is deprecated. Without -c, it's implied by other relevant
2221 2221 commands. With -c, use :hg:`init --mq` instead."""
2222 2222 return qinit(ui, repo, create=opts.get('create_repo'))
2223 2223
2224 2224 @command("qclone",
2225 2225 [('', 'pull', None, _('use pull protocol to copy metadata')),
2226 2226 ('U', 'noupdate', None,
2227 2227 _('do not update the new working directories')),
2228 2228 ('', 'uncompressed', None,
2229 2229 _('use uncompressed transfer (fast over LAN)')),
2230 2230 ('p', 'patches', '',
2231 2231 _('location of source patch repository'), _('REPO')),
2232 2232 ] + commands.remoteopts,
2233 2233 _('hg qclone [OPTION]... SOURCE [DEST]'))
2234 2234 def clone(ui, source, dest=None, **opts):
2235 2235 '''clone main and patch repository at same time
2236 2236
2237 2237 If source is local, destination will have no patches applied. If
2238 2238 source is remote, this command can not check if patches are
2239 2239 applied in source, so cannot guarantee that patches are not
2240 2240 applied in destination. If you clone remote repository, be sure
2241 2241 before that it has no patches applied.
2242 2242
2243 2243 Source patch repository is looked for in <src>/.hg/patches by
2244 2244 default. Use -p <url> to change.
2245 2245
2246 2246 The patch directory must be a nested Mercurial repository, as
2247 2247 would be created by :hg:`init --mq`.
2248 2248
2249 2249 Return 0 on success.
2250 2250 '''
2251 2251 def patchdir(repo):
2252 2252 """compute a patch repo url from a repo object"""
2253 2253 url = repo.url()
2254 2254 if url.endswith('/'):
2255 2255 url = url[:-1]
2256 2256 return url + '/.hg/patches'
2257 2257
2258 2258 # main repo (destination and sources)
2259 2259 if dest is None:
2260 2260 dest = hg.defaultdest(source)
2261 2261 sr = hg.peer(ui, opts, ui.expandpath(source))
2262 2262
2263 2263 # patches repo (source only)
2264 2264 if opts.get('patches'):
2265 2265 patchespath = ui.expandpath(opts.get('patches'))
2266 2266 else:
2267 2267 patchespath = patchdir(sr)
2268 2268 try:
2269 2269 hg.peer(ui, opts, patchespath)
2270 2270 except error.RepoError:
2271 2271 raise util.Abort(_('versioned patch repository not found'
2272 2272 ' (see init --mq)'))
2273 2273 qbase, destrev = None, None
2274 2274 if sr.local():
2275 2275 repo = sr.local()
2276 2276 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2277 2277 qbase = repo.mq.applied[0].node
2278 2278 if not hg.islocal(dest):
2279 2279 heads = set(repo.heads())
2280 2280 destrev = list(heads.difference(repo.heads(qbase)))
2281 2281 destrev.append(repo.changelog.parents(qbase)[0])
2282 2282 elif sr.capable('lookup'):
2283 2283 try:
2284 2284 qbase = sr.lookup('qbase')
2285 2285 except error.RepoError:
2286 2286 pass
2287 2287
2288 2288 ui.note(_('cloning main repository\n'))
2289 2289 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2290 2290 pull=opts.get('pull'),
2291 2291 rev=destrev,
2292 2292 update=False,
2293 2293 stream=opts.get('uncompressed'))
2294 2294
2295 2295 ui.note(_('cloning patch repository\n'))
2296 2296 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2297 2297 pull=opts.get('pull'), update=not opts.get('noupdate'),
2298 2298 stream=opts.get('uncompressed'))
2299 2299
2300 2300 if dr.local():
2301 2301 repo = dr.local()
2302 2302 if qbase:
2303 2303 ui.note(_('stripping applied patches from destination '
2304 2304 'repository\n'))
2305 2305 repo.mq.strip(repo, [qbase], update=False, backup=None)
2306 2306 if not opts.get('noupdate'):
2307 2307 ui.note(_('updating destination repository\n'))
2308 2308 hg.update(repo, repo.changelog.tip())
2309 2309
2310 2310 @command("qcommit|qci",
2311 2311 commands.table["^commit|ci"][1],
2312 2312 _('hg qcommit [OPTION]... [FILE]...'))
2313 2313 def commit(ui, repo, *pats, **opts):
2314 2314 """commit changes in the queue repository (DEPRECATED)
2315 2315
2316 2316 This command is deprecated; use :hg:`commit --mq` instead."""
2317 2317 q = repo.mq
2318 2318 r = q.qrepo()
2319 2319 if not r:
2320 2320 raise util.Abort('no queue repository')
2321 2321 commands.commit(r.ui, r, *pats, **opts)
2322 2322
2323 2323 @command("qseries",
2324 2324 [('m', 'missing', None, _('print patches not in series')),
2325 2325 ] + seriesopts,
2326 2326 _('hg qseries [-ms]'))
2327 2327 def series(ui, repo, **opts):
2328 2328 """print the entire series file
2329 2329
2330 2330 Returns 0 on success."""
2331 2331 repo.mq.qseries(repo, missing=opts.get('missing'),
2332 2332 summary=opts.get('summary'))
2333 2333 return 0
2334 2334
2335 2335 @command("qtop", seriesopts, _('hg qtop [-s]'))
2336 2336 def top(ui, repo, **opts):
2337 2337 """print the name of the current patch
2338 2338
2339 2339 Returns 0 on success."""
2340 2340 q = repo.mq
2341 2341 t = q.applied and q.seriesend(True) or 0
2342 2342 if t:
2343 2343 q.qseries(repo, start=t - 1, length=1, status='A',
2344 2344 summary=opts.get('summary'))
2345 2345 else:
2346 2346 ui.write(_("no patches applied\n"))
2347 2347 return 1
2348 2348
2349 2349 @command("qnext", seriesopts, _('hg qnext [-s]'))
2350 2350 def next(ui, repo, **opts):
2351 2351 """print the name of the next pushable patch
2352 2352
2353 2353 Returns 0 on success."""
2354 2354 q = repo.mq
2355 2355 end = q.seriesend()
2356 2356 if end == len(q.series):
2357 2357 ui.write(_("all patches applied\n"))
2358 2358 return 1
2359 2359 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2360 2360
2361 2361 @command("qprev", seriesopts, _('hg qprev [-s]'))
2362 2362 def prev(ui, repo, **opts):
2363 2363 """print the name of the preceding applied patch
2364 2364
2365 2365 Returns 0 on success."""
2366 2366 q = repo.mq
2367 2367 l = len(q.applied)
2368 2368 if l == 1:
2369 2369 ui.write(_("only one patch applied\n"))
2370 2370 return 1
2371 2371 if not l:
2372 2372 ui.write(_("no patches applied\n"))
2373 2373 return 1
2374 2374 idx = q.series.index(q.applied[-2].name)
2375 2375 q.qseries(repo, start=idx, length=1, status='A',
2376 2376 summary=opts.get('summary'))
2377 2377
2378 2378 def setupheaderopts(ui, opts):
2379 2379 if not opts.get('user') and opts.get('currentuser'):
2380 2380 opts['user'] = ui.username()
2381 2381 if not opts.get('date') and opts.get('currentdate'):
2382 2382 opts['date'] = "%d %d" % util.makedate()
2383 2383
2384 2384 @command("^qnew",
2385 2385 [('e', 'edit', None, _('edit commit message')),
2386 2386 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2387 2387 ('g', 'git', None, _('use git extended diff format')),
2388 2388 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2389 2389 ('u', 'user', '',
2390 2390 _('add "From: <USER>" to patch'), _('USER')),
2391 2391 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2392 2392 ('d', 'date', '',
2393 2393 _('add "Date: <DATE>" to patch'), _('DATE'))
2394 2394 ] + commands.walkopts + commands.commitopts,
2395 2395 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
2396 2396 def new(ui, repo, patch, *args, **opts):
2397 2397 """create a new patch
2398 2398
2399 2399 qnew creates a new patch on top of the currently-applied patch (if
2400 2400 any). The patch will be initialized with any outstanding changes
2401 2401 in the working directory. You may also use -I/--include,
2402 2402 -X/--exclude, and/or a list of files after the patch name to add
2403 2403 only changes to matching files to the new patch, leaving the rest
2404 2404 as uncommitted modifications.
2405 2405
2406 2406 -u/--user and -d/--date can be used to set the (given) user and
2407 2407 date, respectively. -U/--currentuser and -D/--currentdate set user
2408 2408 to current user and date to current date.
2409 2409
2410 2410 -e/--edit, -m/--message or -l/--logfile set the patch header as
2411 2411 well as the commit message. If none is specified, the header is
2412 2412 empty and the commit message is '[mq]: PATCH'.
2413 2413
2414 2414 Use the -g/--git option to keep the patch in the git extended diff
2415 2415 format. Read the diffs help topic for more information on why this
2416 2416 is important for preserving permission changes and copy/rename
2417 2417 information.
2418 2418
2419 2419 Returns 0 on successful creation of a new patch.
2420 2420 """
2421 2421 msg = cmdutil.logmessage(ui, opts)
2422 2422 def getmsg():
2423 2423 return ui.edit(msg, opts.get('user') or ui.username())
2424 2424 q = repo.mq
2425 2425 opts['msg'] = msg
2426 2426 if opts.get('edit'):
2427 2427 opts['msg'] = getmsg
2428 2428 else:
2429 2429 opts['msg'] = msg
2430 2430 setupheaderopts(ui, opts)
2431 2431 q.new(repo, patch, *args, **opts)
2432 2432 q.savedirty()
2433 2433 return 0
2434 2434
2435 2435 @command("^qrefresh",
2436 2436 [('e', 'edit', None, _('edit commit message')),
2437 2437 ('g', 'git', None, _('use git extended diff format')),
2438 2438 ('s', 'short', None,
2439 2439 _('refresh only files already in the patch and specified files')),
2440 2440 ('U', 'currentuser', None,
2441 2441 _('add/update author field in patch with current user')),
2442 2442 ('u', 'user', '',
2443 2443 _('add/update author field in patch with given user'), _('USER')),
2444 2444 ('D', 'currentdate', None,
2445 2445 _('add/update date field in patch with current date')),
2446 2446 ('d', 'date', '',
2447 2447 _('add/update date field in patch with given date'), _('DATE'))
2448 2448 ] + commands.walkopts + commands.commitopts,
2449 2449 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
2450 2450 def refresh(ui, repo, *pats, **opts):
2451 2451 """update the current patch
2452 2452
2453 2453 If any file patterns are provided, the refreshed patch will
2454 2454 contain only the modifications that match those patterns; the
2455 2455 remaining modifications will remain in the working directory.
2456 2456
2457 2457 If -s/--short is specified, files currently included in the patch
2458 2458 will be refreshed just like matched files and remain in the patch.
2459 2459
2460 2460 If -e/--edit is specified, Mercurial will start your configured editor for
2461 2461 you to enter a message. In case qrefresh fails, you will find a backup of
2462 2462 your message in ``.hg/last-message.txt``.
2463 2463
2464 2464 hg add/remove/copy/rename work as usual, though you might want to
2465 2465 use git-style patches (-g/--git or [diff] git=1) to track copies
2466 2466 and renames. See the diffs help topic for more information on the
2467 2467 git diff format.
2468 2468
2469 2469 Returns 0 on success.
2470 2470 """
2471 2471 q = repo.mq
2472 2472 message = cmdutil.logmessage(ui, opts)
2473 2473 if opts.get('edit'):
2474 2474 if not q.applied:
2475 2475 ui.write(_("no patches applied\n"))
2476 2476 return 1
2477 2477 if message:
2478 2478 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2479 2479 patch = q.applied[-1].name
2480 2480 ph = patchheader(q.join(patch), q.plainmode)
2481 2481 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2482 2482 # We don't want to lose the patch message if qrefresh fails (issue2062)
2483 2483 repo.savecommitmessage(message)
2484 2484 setupheaderopts(ui, opts)
2485 2485 wlock = repo.wlock()
2486 2486 try:
2487 2487 ret = q.refresh(repo, pats, msg=message, **opts)
2488 2488 q.savedirty()
2489 2489 return ret
2490 2490 finally:
2491 2491 wlock.release()
2492 2492
2493 2493 @command("^qdiff",
2494 2494 commands.diffopts + commands.diffopts2 + commands.walkopts,
2495 2495 _('hg qdiff [OPTION]... [FILE]...'))
2496 2496 def diff(ui, repo, *pats, **opts):
2497 2497 """diff of the current patch and subsequent modifications
2498 2498
2499 2499 Shows a diff which includes the current patch as well as any
2500 2500 changes which have been made in the working directory since the
2501 2501 last refresh (thus showing what the current patch would become
2502 2502 after a qrefresh).
2503 2503
2504 2504 Use :hg:`diff` if you only want to see the changes made since the
2505 2505 last qrefresh, or :hg:`export qtip` if you want to see changes
2506 2506 made by the current patch without including changes made since the
2507 2507 qrefresh.
2508 2508
2509 2509 Returns 0 on success.
2510 2510 """
2511 2511 repo.mq.diff(repo, pats, opts)
2512 2512 return 0
2513 2513
2514 2514 @command('qfold',
2515 2515 [('e', 'edit', None, _('edit patch header')),
2516 2516 ('k', 'keep', None, _('keep folded patch files')),
2517 2517 ] + commands.commitopts,
2518 2518 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2519 2519 def fold(ui, repo, *files, **opts):
2520 2520 """fold the named patches into the current patch
2521 2521
2522 2522 Patches must not yet be applied. Each patch will be successively
2523 2523 applied to the current patch in the order given. If all the
2524 2524 patches apply successfully, the current patch will be refreshed
2525 2525 with the new cumulative patch, and the folded patches will be
2526 2526 deleted. With -k/--keep, the folded patch files will not be
2527 2527 removed afterwards.
2528 2528
2529 2529 The header for each folded patch will be concatenated with the
2530 2530 current patch header, separated by a line of ``* * *``.
2531 2531
2532 2532 Returns 0 on success."""
2533 2533 q = repo.mq
2534 2534 if not files:
2535 2535 raise util.Abort(_('qfold requires at least one patch name'))
2536 2536 if not q.checktoppatch(repo)[0]:
2537 2537 raise util.Abort(_('no patches applied'))
2538 2538 q.checklocalchanges(repo)
2539 2539
2540 2540 message = cmdutil.logmessage(ui, opts)
2541 2541 if opts.get('edit'):
2542 2542 if message:
2543 2543 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2544 2544
2545 2545 parent = q.lookup('qtip')
2546 2546 patches = []
2547 2547 messages = []
2548 2548 for f in files:
2549 2549 p = q.lookup(f)
2550 2550 if p in patches or p == parent:
2551 2551 ui.warn(_('skipping already folded patch %s\n') % p)
2552 2552 if q.isapplied(p):
2553 2553 raise util.Abort(_('qfold cannot fold already applied patch %s')
2554 2554 % p)
2555 2555 patches.append(p)
2556 2556
2557 2557 for p in patches:
2558 2558 if not message:
2559 2559 ph = patchheader(q.join(p), q.plainmode)
2560 2560 if ph.message:
2561 2561 messages.append(ph.message)
2562 2562 pf = q.join(p)
2563 2563 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2564 2564 if not patchsuccess:
2565 2565 raise util.Abort(_('error folding patch %s') % p)
2566 2566
2567 2567 if not message:
2568 2568 ph = patchheader(q.join(parent), q.plainmode)
2569 2569 message, user = ph.message, ph.user
2570 2570 for msg in messages:
2571 2571 message.append('* * *')
2572 2572 message.extend(msg)
2573 2573 message = '\n'.join(message)
2574 2574
2575 2575 if opts.get('edit'):
2576 2576 message = ui.edit(message, user or ui.username())
2577 2577
2578 2578 diffopts = q.patchopts(q.diffopts(), *patches)
2579 2579 wlock = repo.wlock()
2580 2580 try:
2581 2581 q.refresh(repo, msg=message, git=diffopts.git)
2582 2582 q.delete(repo, patches, opts)
2583 2583 q.savedirty()
2584 2584 finally:
2585 2585 wlock.release()
2586 2586
2587 2587 @command("qgoto",
2588 2588 [('', 'keep-changes', None,
2589 2589 _('tolerate non-conflicting local changes')),
2590 2590 ('f', 'force', None, _('overwrite any local changes')),
2591 2591 ('', 'no-backup', None, _('do not save backup copies of files'))],
2592 2592 _('hg qgoto [OPTION]... PATCH'))
2593 2593 def goto(ui, repo, patch, **opts):
2594 2594 '''push or pop patches until named patch is at top of stack
2595 2595
2596 2596 Returns 0 on success.'''
2597 2597 opts = fixkeepchangesopts(ui, opts)
2598 2598 q = repo.mq
2599 2599 patch = q.lookup(patch)
2600 2600 nobackup = opts.get('no_backup')
2601 2601 keepchanges = opts.get('keep_changes')
2602 2602 if q.isapplied(patch):
2603 2603 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2604 2604 keepchanges=keepchanges)
2605 2605 else:
2606 2606 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2607 2607 keepchanges=keepchanges)
2608 2608 q.savedirty()
2609 2609 return ret
2610 2610
2611 2611 @command("qguard",
2612 2612 [('l', 'list', None, _('list all patches and guards')),
2613 2613 ('n', 'none', None, _('drop all guards'))],
2614 2614 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2615 2615 def guard(ui, repo, *args, **opts):
2616 2616 '''set or print guards for a patch
2617 2617
2618 2618 Guards control whether a patch can be pushed. A patch with no
2619 2619 guards is always pushed. A patch with a positive guard ("+foo") is
2620 2620 pushed only if the :hg:`qselect` command has activated it. A patch with
2621 2621 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2622 2622 has activated it.
2623 2623
2624 2624 With no arguments, print the currently active guards.
2625 2625 With arguments, set guards for the named patch.
2626 2626
2627 2627 .. note::
2628 2628 Specifying negative guards now requires '--'.
2629 2629
2630 2630 To set guards on another patch::
2631 2631
2632 2632 hg qguard other.patch -- +2.6.17 -stable
2633 2633
2634 2634 Returns 0 on success.
2635 2635 '''
2636 2636 def status(idx):
2637 2637 guards = q.seriesguards[idx] or ['unguarded']
2638 2638 if q.series[idx] in applied:
2639 2639 state = 'applied'
2640 2640 elif q.pushable(idx)[0]:
2641 2641 state = 'unapplied'
2642 2642 else:
2643 2643 state = 'guarded'
2644 2644 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2645 2645 ui.write('%s: ' % ui.label(q.series[idx], label))
2646 2646
2647 2647 for i, guard in enumerate(guards):
2648 2648 if guard.startswith('+'):
2649 2649 ui.write(guard, label='qguard.positive')
2650 2650 elif guard.startswith('-'):
2651 2651 ui.write(guard, label='qguard.negative')
2652 2652 else:
2653 2653 ui.write(guard, label='qguard.unguarded')
2654 2654 if i != len(guards) - 1:
2655 2655 ui.write(' ')
2656 2656 ui.write('\n')
2657 2657 q = repo.mq
2658 2658 applied = set(p.name for p in q.applied)
2659 2659 patch = None
2660 2660 args = list(args)
2661 2661 if opts.get('list'):
2662 2662 if args or opts.get('none'):
2663 2663 raise util.Abort(_('cannot mix -l/--list with options or '
2664 2664 'arguments'))
2665 2665 for i in xrange(len(q.series)):
2666 2666 status(i)
2667 2667 return
2668 2668 if not args or args[0][0:1] in '-+':
2669 2669 if not q.applied:
2670 2670 raise util.Abort(_('no patches applied'))
2671 2671 patch = q.applied[-1].name
2672 2672 if patch is None and args[0][0:1] not in '-+':
2673 2673 patch = args.pop(0)
2674 2674 if patch is None:
2675 2675 raise util.Abort(_('no patch to work with'))
2676 2676 if args or opts.get('none'):
2677 2677 idx = q.findseries(patch)
2678 2678 if idx is None:
2679 2679 raise util.Abort(_('no patch named %s') % patch)
2680 2680 q.setguards(idx, args)
2681 2681 q.savedirty()
2682 2682 else:
2683 2683 status(q.series.index(q.lookup(patch)))
2684 2684
2685 2685 @command("qheader", [], _('hg qheader [PATCH]'))
2686 2686 def header(ui, repo, patch=None):
2687 2687 """print the header of the topmost or specified patch
2688 2688
2689 2689 Returns 0 on success."""
2690 2690 q = repo.mq
2691 2691
2692 2692 if patch:
2693 2693 patch = q.lookup(patch)
2694 2694 else:
2695 2695 if not q.applied:
2696 2696 ui.write(_('no patches applied\n'))
2697 2697 return 1
2698 2698 patch = q.lookup('qtip')
2699 2699 ph = patchheader(q.join(patch), q.plainmode)
2700 2700
2701 2701 ui.write('\n'.join(ph.message) + '\n')
2702 2702
2703 2703 def lastsavename(path):
2704 2704 (directory, base) = os.path.split(path)
2705 2705 names = os.listdir(directory)
2706 2706 namere = re.compile("%s.([0-9]+)" % base)
2707 2707 maxindex = None
2708 2708 maxname = None
2709 2709 for f in names:
2710 2710 m = namere.match(f)
2711 2711 if m:
2712 2712 index = int(m.group(1))
2713 2713 if maxindex is None or index > maxindex:
2714 2714 maxindex = index
2715 2715 maxname = f
2716 2716 if maxname:
2717 2717 return (os.path.join(directory, maxname), maxindex)
2718 2718 return (None, None)
2719 2719
2720 2720 def savename(path):
2721 2721 (last, index) = lastsavename(path)
2722 2722 if last is None:
2723 2723 index = 0
2724 2724 newpath = path + ".%d" % (index + 1)
2725 2725 return newpath
2726 2726
2727 2727 @command("^qpush",
2728 2728 [('', 'keep-changes', None,
2729 2729 _('tolerate non-conflicting local changes')),
2730 2730 ('f', 'force', None, _('apply on top of local changes')),
2731 2731 ('e', 'exact', None,
2732 2732 _('apply the target patch to its recorded parent')),
2733 2733 ('l', 'list', None, _('list patch name in commit text')),
2734 2734 ('a', 'all', None, _('apply all patches')),
2735 2735 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2736 2736 ('n', 'name', '',
2737 2737 _('merge queue name (DEPRECATED)'), _('NAME')),
2738 2738 ('', 'move', None,
2739 2739 _('reorder patch series and apply only the patch')),
2740 2740 ('', 'no-backup', None, _('do not save backup copies of files'))],
2741 2741 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2742 2742 def push(ui, repo, patch=None, **opts):
2743 2743 """push the next patch onto the stack
2744 2744
2745 2745 By default, abort if the working directory contains uncommitted
2746 2746 changes. With --keep-changes, abort only if the uncommitted files
2747 2747 overlap with patched files. With -f/--force, backup and patch over
2748 2748 uncommitted changes.
2749 2749
2750 2750 Return 0 on success.
2751 2751 """
2752 2752 q = repo.mq
2753 2753 mergeq = None
2754 2754
2755 2755 opts = fixkeepchangesopts(ui, opts)
2756 2756 if opts.get('merge'):
2757 2757 if opts.get('name'):
2758 2758 newpath = repo.join(opts.get('name'))
2759 2759 else:
2760 2760 newpath, i = lastsavename(q.path)
2761 2761 if not newpath:
2762 2762 ui.warn(_("no saved queues found, please use -n\n"))
2763 2763 return 1
2764 2764 mergeq = queue(ui, repo.path, newpath)
2765 2765 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2766 2766 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2767 2767 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2768 2768 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2769 2769 keepchanges=opts.get('keep_changes'))
2770 2770 return ret
2771 2771
2772 2772 @command("^qpop",
2773 2773 [('a', 'all', None, _('pop all patches')),
2774 2774 ('n', 'name', '',
2775 2775 _('queue name to pop (DEPRECATED)'), _('NAME')),
2776 2776 ('', 'keep-changes', None,
2777 2777 _('tolerate non-conflicting local changes')),
2778 2778 ('f', 'force', None, _('forget any local changes to patched files')),
2779 2779 ('', 'no-backup', None, _('do not save backup copies of files'))],
2780 2780 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2781 2781 def pop(ui, repo, patch=None, **opts):
2782 2782 """pop the current patch off the stack
2783 2783
2784 2784 Without argument, pops off the top of the patch stack. If given a
2785 2785 patch name, keeps popping off patches until the named patch is at
2786 2786 the top of the stack.
2787 2787
2788 2788 By default, abort if the working directory contains uncommitted
2789 2789 changes. With --keep-changes, abort only if the uncommitted files
2790 2790 overlap with patched files. With -f/--force, backup and discard
2791 2791 changes made to such files.
2792 2792
2793 2793 Return 0 on success.
2794 2794 """
2795 2795 opts = fixkeepchangesopts(ui, opts)
2796 2796 localupdate = True
2797 2797 if opts.get('name'):
2798 2798 q = queue(ui, repo.path, repo.join(opts.get('name')))
2799 2799 ui.warn(_('using patch queue: %s\n') % q.path)
2800 2800 localupdate = False
2801 2801 else:
2802 2802 q = repo.mq
2803 2803 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2804 2804 all=opts.get('all'), nobackup=opts.get('no_backup'),
2805 2805 keepchanges=opts.get('keep_changes'))
2806 2806 q.savedirty()
2807 2807 return ret
2808 2808
2809 2809 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2810 2810 def rename(ui, repo, patch, name=None, **opts):
2811 2811 """rename a patch
2812 2812
2813 2813 With one argument, renames the current patch to PATCH1.
2814 2814 With two arguments, renames PATCH1 to PATCH2.
2815 2815
2816 2816 Returns 0 on success."""
2817 2817 q = repo.mq
2818 2818 if not name:
2819 2819 name = patch
2820 2820 patch = None
2821 2821
2822 2822 if patch:
2823 2823 patch = q.lookup(patch)
2824 2824 else:
2825 2825 if not q.applied:
2826 2826 ui.write(_('no patches applied\n'))
2827 2827 return
2828 2828 patch = q.lookup('qtip')
2829 2829 absdest = q.join(name)
2830 2830 if os.path.isdir(absdest):
2831 2831 name = normname(os.path.join(name, os.path.basename(patch)))
2832 2832 absdest = q.join(name)
2833 2833 q.checkpatchname(name)
2834 2834
2835 2835 ui.note(_('renaming %s to %s\n') % (patch, name))
2836 2836 i = q.findseries(patch)
2837 2837 guards = q.guard_re.findall(q.fullseries[i])
2838 2838 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2839 2839 q.parseseries()
2840 2840 q.seriesdirty = True
2841 2841
2842 2842 info = q.isapplied(patch)
2843 2843 if info:
2844 2844 q.applied[info[0]] = statusentry(info[1], name)
2845 2845 q.applieddirty = True
2846 2846
2847 2847 destdir = os.path.dirname(absdest)
2848 2848 if not os.path.isdir(destdir):
2849 2849 os.makedirs(destdir)
2850 2850 util.rename(q.join(patch), absdest)
2851 2851 r = q.qrepo()
2852 2852 if r and patch in r.dirstate:
2853 2853 wctx = r[None]
2854 2854 wlock = r.wlock()
2855 2855 try:
2856 2856 if r.dirstate[patch] == 'a':
2857 2857 r.dirstate.drop(patch)
2858 2858 r.dirstate.add(name)
2859 2859 else:
2860 2860 wctx.copy(patch, name)
2861 2861 wctx.forget([patch])
2862 2862 finally:
2863 2863 wlock.release()
2864 2864
2865 2865 q.savedirty()
2866 2866
2867 2867 @command("qrestore",
2868 2868 [('d', 'delete', None, _('delete save entry')),
2869 2869 ('u', 'update', None, _('update queue working directory'))],
2870 2870 _('hg qrestore [-d] [-u] REV'))
2871 2871 def restore(ui, repo, rev, **opts):
2872 2872 """restore the queue state saved by a revision (DEPRECATED)
2873 2873
2874 2874 This command is deprecated, use :hg:`rebase` instead."""
2875 2875 rev = repo.lookup(rev)
2876 2876 q = repo.mq
2877 2877 q.restore(repo, rev, delete=opts.get('delete'),
2878 2878 qupdate=opts.get('update'))
2879 2879 q.savedirty()
2880 2880 return 0
2881 2881
2882 2882 @command("qsave",
2883 2883 [('c', 'copy', None, _('copy patch directory')),
2884 2884 ('n', 'name', '',
2885 2885 _('copy directory name'), _('NAME')),
2886 2886 ('e', 'empty', None, _('clear queue status file')),
2887 2887 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2888 2888 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2889 2889 def save(ui, repo, **opts):
2890 2890 """save current queue state (DEPRECATED)
2891 2891
2892 2892 This command is deprecated, use :hg:`rebase` instead."""
2893 2893 q = repo.mq
2894 2894 message = cmdutil.logmessage(ui, opts)
2895 2895 ret = q.save(repo, msg=message)
2896 2896 if ret:
2897 2897 return ret
2898 2898 q.savedirty() # save to .hg/patches before copying
2899 2899 if opts.get('copy'):
2900 2900 path = q.path
2901 2901 if opts.get('name'):
2902 2902 newpath = os.path.join(q.basepath, opts.get('name'))
2903 2903 if os.path.exists(newpath):
2904 2904 if not os.path.isdir(newpath):
2905 2905 raise util.Abort(_('destination %s exists and is not '
2906 2906 'a directory') % newpath)
2907 2907 if not opts.get('force'):
2908 2908 raise util.Abort(_('destination %s exists, '
2909 2909 'use -f to force') % newpath)
2910 2910 else:
2911 2911 newpath = savename(path)
2912 2912 ui.warn(_("copy %s to %s\n") % (path, newpath))
2913 2913 util.copyfiles(path, newpath)
2914 2914 if opts.get('empty'):
2915 2915 del q.applied[:]
2916 2916 q.applieddirty = True
2917 2917 q.savedirty()
2918 2918 return 0
2919 2919
2920 2920 @command("strip",
2921 2921 [
2922 2922 ('r', 'rev', [], _('strip specified revision (optional, '
2923 2923 'can specify revisions without this '
2924 2924 'option)'), _('REV')),
2925 2925 ('f', 'force', None, _('force removal of changesets, discard '
2926 2926 'uncommitted changes (no backup)')),
2927 2927 ('b', 'backup', None, _('bundle only changesets with local revision'
2928 2928 ' number greater than REV which are not'
2929 2929 ' descendants of REV (DEPRECATED)')),
2930 2930 ('', 'no-backup', None, _('no backups')),
2931 2931 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
2932 2932 ('n', '', None, _('ignored (DEPRECATED)')),
2933 2933 ('k', 'keep', None, _("do not modify working copy during strip")),
2934 2934 ('B', 'bookmark', '', _("remove revs only reachable from given"
2935 2935 " bookmark"))],
2936 2936 _('hg strip [-k] [-f] [-n] [-B bookmark] [-r] REV...'))
2937 2937 def strip(ui, repo, *revs, **opts):
2938 2938 """strip changesets and all their descendants from the repository
2939 2939
2940 2940 The strip command removes the specified changesets and all their
2941 2941 descendants. If the working directory has uncommitted changes, the
2942 2942 operation is aborted unless the --force flag is supplied, in which
2943 2943 case changes will be discarded.
2944 2944
2945 2945 If a parent of the working directory is stripped, then the working
2946 2946 directory will automatically be updated to the most recent
2947 2947 available ancestor of the stripped parent after the operation
2948 2948 completes.
2949 2949
2950 2950 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2951 2951 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2952 2952 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2953 2953 where BUNDLE is the bundle file created by the strip. Note that
2954 2954 the local revision numbers will in general be different after the
2955 2955 restore.
2956 2956
2957 2957 Use the --no-backup option to discard the backup bundle once the
2958 2958 operation completes.
2959 2959
2960 2960 Strip is not a history-rewriting operation and can be used on
2961 2961 changesets in the public phase. But if the stripped changesets have
2962 2962 been pushed to a remote repository you will likely pull them again.
2963 2963
2964 2964 Return 0 on success.
2965 2965 """
2966 2966 backup = 'all'
2967 2967 if opts.get('backup'):
2968 2968 backup = 'strip'
2969 2969 elif opts.get('no_backup') or opts.get('nobackup'):
2970 2970 backup = 'none'
2971 2971
2972 2972 cl = repo.changelog
2973 2973 revs = list(revs) + opts.get('rev')
2974 2974 revs = set(scmutil.revrange(repo, revs))
2975 2975
2976 2976 if opts.get('bookmark'):
2977 2977 mark = opts.get('bookmark')
2978 2978 marks = repo._bookmarks
2979 2979 if mark not in marks:
2980 2980 raise util.Abort(_("bookmark '%s' not found") % mark)
2981 2981
2982 2982 # If the requested bookmark is not the only one pointing to a
2983 2983 # a revision we have to only delete the bookmark and not strip
2984 2984 # anything. revsets cannot detect that case.
2985 2985 uniquebm = True
2986 2986 for m, n in marks.iteritems():
2987 2987 if m != mark and n == repo[mark].node():
2988 2988 uniquebm = False
2989 2989 break
2990 2990 if uniquebm:
2991 2991 rsrevs = repo.revs("ancestors(bookmark(%s)) - "
2992 2992 "ancestors(head() and not bookmark(%s)) - "
2993 2993 "ancestors(bookmark() and not bookmark(%s))",
2994 2994 mark, mark, mark)
2995 2995 revs.update(set(rsrevs))
2996 2996 if not revs:
2997 2997 del marks[mark]
2998 2998 marks.write()
2999 2999 ui.write(_("bookmark '%s' deleted\n") % mark)
3000 3000
3001 3001 if not revs:
3002 3002 raise util.Abort(_('empty revision set'))
3003 3003
3004 3004 descendants = set(cl.descendants(revs))
3005 3005 strippedrevs = revs.union(descendants)
3006 3006 roots = revs.difference(descendants)
3007 3007
3008 3008 update = False
3009 3009 # if one of the wdir parent is stripped we'll need
3010 3010 # to update away to an earlier revision
3011 3011 for p in repo.dirstate.parents():
3012 3012 if p != nullid and cl.rev(p) in strippedrevs:
3013 3013 update = True
3014 3014 break
3015 3015
3016 3016 rootnodes = set(cl.node(r) for r in roots)
3017 3017
3018 3018 q = repo.mq
3019 3019 if q.applied:
3020 3020 # refresh queue state if we're about to strip
3021 3021 # applied patches
3022 3022 if cl.rev(repo.lookup('qtip')) in strippedrevs:
3023 3023 q.applieddirty = True
3024 3024 start = 0
3025 3025 end = len(q.applied)
3026 3026 for i, statusentry in enumerate(q.applied):
3027 3027 if statusentry.node in rootnodes:
3028 3028 # if one of the stripped roots is an applied
3029 3029 # patch, only part of the queue is stripped
3030 3030 start = i
3031 3031 break
3032 3032 del q.applied[start:end]
3033 3033 q.savedirty()
3034 3034
3035 3035 revs = sorted(rootnodes)
3036 3036 if update and opts.get('keep'):
3037 3037 wlock = repo.wlock()
3038 3038 try:
3039 3039 urev = repo.mq.qparents(repo, revs[0])
3040 3040 repo.dirstate.rebuild(urev, repo[urev].manifest())
3041 3041 repo.dirstate.write()
3042 3042 update = False
3043 3043 finally:
3044 3044 wlock.release()
3045 3045
3046 3046 if opts.get('bookmark'):
3047 3047 del marks[mark]
3048 3048 marks.write()
3049 3049 ui.write(_("bookmark '%s' deleted\n") % mark)
3050 3050
3051 3051 repo.mq.strip(repo, revs, backup=backup, update=update,
3052 3052 force=opts.get('force'))
3053 3053
3054 3054 return 0
3055 3055
3056 3056 @command("qselect",
3057 3057 [('n', 'none', None, _('disable all guards')),
3058 3058 ('s', 'series', None, _('list all guards in series file')),
3059 3059 ('', 'pop', None, _('pop to before first guarded applied patch')),
3060 3060 ('', 'reapply', None, _('pop, then reapply patches'))],
3061 3061 _('hg qselect [OPTION]... [GUARD]...'))
3062 3062 def select(ui, repo, *args, **opts):
3063 3063 '''set or print guarded patches to push
3064 3064
3065 3065 Use the :hg:`qguard` command to set or print guards on patch, then use
3066 3066 qselect to tell mq which guards to use. A patch will be pushed if
3067 3067 it has no guards or any positive guards match the currently
3068 3068 selected guard, but will not be pushed if any negative guards
3069 3069 match the current guard. For example::
3070 3070
3071 3071 qguard foo.patch -- -stable (negative guard)
3072 3072 qguard bar.patch +stable (positive guard)
3073 3073 qselect stable
3074 3074
3075 3075 This activates the "stable" guard. mq will skip foo.patch (because
3076 3076 it has a negative match) but push bar.patch (because it has a
3077 3077 positive match).
3078 3078
3079 3079 With no arguments, prints the currently active guards.
3080 3080 With one argument, sets the active guard.
3081 3081
3082 3082 Use -n/--none to deactivate guards (no other arguments needed).
3083 3083 When no guards are active, patches with positive guards are
3084 3084 skipped and patches with negative guards are pushed.
3085 3085
3086 3086 qselect can change the guards on applied patches. It does not pop
3087 3087 guarded patches by default. Use --pop to pop back to the last
3088 3088 applied patch that is not guarded. Use --reapply (which implies
3089 3089 --pop) to push back to the current patch afterwards, but skip
3090 3090 guarded patches.
3091 3091
3092 3092 Use -s/--series to print a list of all guards in the series file
3093 3093 (no other arguments needed). Use -v for more information.
3094 3094
3095 3095 Returns 0 on success.'''
3096 3096
3097 3097 q = repo.mq
3098 3098 guards = q.active()
3099 3099 if args or opts.get('none'):
3100 3100 old_unapplied = q.unapplied(repo)
3101 3101 old_guarded = [i for i in xrange(len(q.applied)) if
3102 3102 not q.pushable(i)[0]]
3103 3103 q.setactive(args)
3104 3104 q.savedirty()
3105 3105 if not args:
3106 3106 ui.status(_('guards deactivated\n'))
3107 3107 if not opts.get('pop') and not opts.get('reapply'):
3108 3108 unapplied = q.unapplied(repo)
3109 3109 guarded = [i for i in xrange(len(q.applied))
3110 3110 if not q.pushable(i)[0]]
3111 3111 if len(unapplied) != len(old_unapplied):
3112 3112 ui.status(_('number of unguarded, unapplied patches has '
3113 3113 'changed from %d to %d\n') %
3114 3114 (len(old_unapplied), len(unapplied)))
3115 3115 if len(guarded) != len(old_guarded):
3116 3116 ui.status(_('number of guarded, applied patches has changed '
3117 3117 'from %d to %d\n') %
3118 3118 (len(old_guarded), len(guarded)))
3119 3119 elif opts.get('series'):
3120 3120 guards = {}
3121 3121 noguards = 0
3122 3122 for gs in q.seriesguards:
3123 3123 if not gs:
3124 3124 noguards += 1
3125 3125 for g in gs:
3126 3126 guards.setdefault(g, 0)
3127 3127 guards[g] += 1
3128 3128 if ui.verbose:
3129 3129 guards['NONE'] = noguards
3130 3130 guards = guards.items()
3131 3131 guards.sort(key=lambda x: x[0][1:])
3132 3132 if guards:
3133 3133 ui.note(_('guards in series file:\n'))
3134 3134 for guard, count in guards:
3135 3135 ui.note('%2d ' % count)
3136 3136 ui.write(guard, '\n')
3137 3137 else:
3138 3138 ui.note(_('no guards in series file\n'))
3139 3139 else:
3140 3140 if guards:
3141 3141 ui.note(_('active guards:\n'))
3142 3142 for g in guards:
3143 3143 ui.write(g, '\n')
3144 3144 else:
3145 3145 ui.write(_('no active guards\n'))
3146 3146 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
3147 3147 popped = False
3148 3148 if opts.get('pop') or opts.get('reapply'):
3149 3149 for i in xrange(len(q.applied)):
3150 3150 pushable, reason = q.pushable(i)
3151 3151 if not pushable:
3152 3152 ui.status(_('popping guarded patches\n'))
3153 3153 popped = True
3154 3154 if i == 0:
3155 3155 q.pop(repo, all=True)
3156 3156 else:
3157 3157 q.pop(repo, str(i - 1))
3158 3158 break
3159 3159 if popped:
3160 3160 try:
3161 3161 if reapply:
3162 3162 ui.status(_('reapplying unguarded patches\n'))
3163 3163 q.push(repo, reapply)
3164 3164 finally:
3165 3165 q.savedirty()
3166 3166
3167 3167 @command("qfinish",
3168 3168 [('a', 'applied', None, _('finish all applied changesets'))],
3169 3169 _('hg qfinish [-a] [REV]...'))
3170 3170 def finish(ui, repo, *revrange, **opts):
3171 3171 """move applied patches into repository history
3172 3172
3173 3173 Finishes the specified revisions (corresponding to applied
3174 3174 patches) by moving them out of mq control into regular repository
3175 3175 history.
3176 3176
3177 3177 Accepts a revision range or the -a/--applied option. If --applied
3178 3178 is specified, all applied mq revisions are removed from mq
3179 3179 control. Otherwise, the given revisions must be at the base of the
3180 3180 stack of applied patches.
3181 3181
3182 3182 This can be especially useful if your changes have been applied to
3183 3183 an upstream repository, or if you are about to push your changes
3184 3184 to upstream.
3185 3185
3186 3186 Returns 0 on success.
3187 3187 """
3188 3188 if not opts.get('applied') and not revrange:
3189 3189 raise util.Abort(_('no revisions specified'))
3190 3190 elif opts.get('applied'):
3191 3191 revrange = ('qbase::qtip',) + revrange
3192 3192
3193 3193 q = repo.mq
3194 3194 if not q.applied:
3195 3195 ui.status(_('no patches applied\n'))
3196 3196 return 0
3197 3197
3198 3198 revs = scmutil.revrange(repo, revrange)
3199 3199 if repo['.'].rev() in revs and repo[None].files():
3200 3200 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3201 3201 # queue.finish may changes phases but leave the responsibility to lock the
3202 3202 # repo to the caller to avoid deadlock with wlock. This command code is
3203 3203 # responsibility for this locking.
3204 3204 lock = repo.lock()
3205 3205 try:
3206 3206 q.finish(repo, revs)
3207 3207 q.savedirty()
3208 3208 finally:
3209 3209 lock.release()
3210 3210 return 0
3211 3211
3212 3212 @command("qqueue",
3213 3213 [('l', 'list', False, _('list all available queues')),
3214 3214 ('', 'active', False, _('print name of active queue')),
3215 3215 ('c', 'create', False, _('create new queue')),
3216 3216 ('', 'rename', False, _('rename active queue')),
3217 3217 ('', 'delete', False, _('delete reference to queue')),
3218 3218 ('', 'purge', False, _('delete queue, and remove patch dir')),
3219 3219 ],
3220 3220 _('[OPTION] [QUEUE]'))
3221 3221 def qqueue(ui, repo, name=None, **opts):
3222 3222 '''manage multiple patch queues
3223 3223
3224 3224 Supports switching between different patch queues, as well as creating
3225 3225 new patch queues and deleting existing ones.
3226 3226
3227 3227 Omitting a queue name or specifying -l/--list will show you the registered
3228 3228 queues - by default the "normal" patches queue is registered. The currently
3229 3229 active queue will be marked with "(active)". Specifying --active will print
3230 3230 only the name of the active queue.
3231 3231
3232 3232 To create a new queue, use -c/--create. The queue is automatically made
3233 3233 active, except in the case where there are applied patches from the
3234 3234 currently active queue in the repository. Then the queue will only be
3235 3235 created and switching will fail.
3236 3236
3237 3237 To delete an existing queue, use --delete. You cannot delete the currently
3238 3238 active queue.
3239 3239
3240 3240 Returns 0 on success.
3241 3241 '''
3242 3242 q = repo.mq
3243 3243 _defaultqueue = 'patches'
3244 3244 _allqueues = 'patches.queues'
3245 3245 _activequeue = 'patches.queue'
3246 3246
3247 3247 def _getcurrent():
3248 3248 cur = os.path.basename(q.path)
3249 3249 if cur.startswith('patches-'):
3250 3250 cur = cur[8:]
3251 3251 return cur
3252 3252
3253 3253 def _noqueues():
3254 3254 try:
3255 3255 fh = repo.opener(_allqueues, 'r')
3256 3256 fh.close()
3257 3257 except IOError:
3258 3258 return True
3259 3259
3260 3260 return False
3261 3261
3262 3262 def _getqueues():
3263 3263 current = _getcurrent()
3264 3264
3265 3265 try:
3266 3266 fh = repo.opener(_allqueues, 'r')
3267 3267 queues = [queue.strip() for queue in fh if queue.strip()]
3268 3268 fh.close()
3269 3269 if current not in queues:
3270 3270 queues.append(current)
3271 3271 except IOError:
3272 3272 queues = [_defaultqueue]
3273 3273
3274 3274 return sorted(queues)
3275 3275
3276 3276 def _setactive(name):
3277 3277 if q.applied:
3278 3278 raise util.Abort(_('new queue created, but cannot make active '
3279 3279 'as patches are applied'))
3280 3280 _setactivenocheck(name)
3281 3281
3282 3282 def _setactivenocheck(name):
3283 3283 fh = repo.opener(_activequeue, 'w')
3284 3284 if name != 'patches':
3285 3285 fh.write(name)
3286 3286 fh.close()
3287 3287
3288 3288 def _addqueue(name):
3289 3289 fh = repo.opener(_allqueues, 'a')
3290 3290 fh.write('%s\n' % (name,))
3291 3291 fh.close()
3292 3292
3293 3293 def _queuedir(name):
3294 3294 if name == 'patches':
3295 3295 return repo.join('patches')
3296 3296 else:
3297 3297 return repo.join('patches-' + name)
3298 3298
3299 3299 def _validname(name):
3300 3300 for n in name:
3301 3301 if n in ':\\/.':
3302 3302 return False
3303 3303 return True
3304 3304
3305 3305 def _delete(name):
3306 3306 if name not in existing:
3307 3307 raise util.Abort(_('cannot delete queue that does not exist'))
3308 3308
3309 3309 current = _getcurrent()
3310 3310
3311 3311 if name == current:
3312 3312 raise util.Abort(_('cannot delete currently active queue'))
3313 3313
3314 3314 fh = repo.opener('patches.queues.new', 'w')
3315 3315 for queue in existing:
3316 3316 if queue == name:
3317 3317 continue
3318 3318 fh.write('%s\n' % (queue,))
3319 3319 fh.close()
3320 3320 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3321 3321
3322 3322 if not name or opts.get('list') or opts.get('active'):
3323 3323 current = _getcurrent()
3324 3324 if opts.get('active'):
3325 3325 ui.write('%s\n' % (current,))
3326 3326 return
3327 3327 for queue in _getqueues():
3328 3328 ui.write('%s' % (queue,))
3329 3329 if queue == current and not ui.quiet:
3330 3330 ui.write(_(' (active)\n'))
3331 3331 else:
3332 3332 ui.write('\n')
3333 3333 return
3334 3334
3335 3335 if not _validname(name):
3336 3336 raise util.Abort(
3337 3337 _('invalid queue name, may not contain the characters ":\\/."'))
3338 3338
3339 3339 existing = _getqueues()
3340 3340
3341 3341 if opts.get('create'):
3342 3342 if name in existing:
3343 3343 raise util.Abort(_('queue "%s" already exists') % name)
3344 3344 if _noqueues():
3345 3345 _addqueue(_defaultqueue)
3346 3346 _addqueue(name)
3347 3347 _setactive(name)
3348 3348 elif opts.get('rename'):
3349 3349 current = _getcurrent()
3350 3350 if name == current:
3351 3351 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3352 3352 if name in existing:
3353 3353 raise util.Abort(_('queue "%s" already exists') % name)
3354 3354
3355 3355 olddir = _queuedir(current)
3356 3356 newdir = _queuedir(name)
3357 3357
3358 3358 if os.path.exists(newdir):
3359 3359 raise util.Abort(_('non-queue directory "%s" already exists') %
3360 3360 newdir)
3361 3361
3362 3362 fh = repo.opener('patches.queues.new', 'w')
3363 3363 for queue in existing:
3364 3364 if queue == current:
3365 3365 fh.write('%s\n' % (name,))
3366 3366 if os.path.exists(olddir):
3367 3367 util.rename(olddir, newdir)
3368 3368 else:
3369 3369 fh.write('%s\n' % (queue,))
3370 3370 fh.close()
3371 3371 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3372 3372 _setactivenocheck(name)
3373 3373 elif opts.get('delete'):
3374 3374 _delete(name)
3375 3375 elif opts.get('purge'):
3376 3376 if name in existing:
3377 3377 _delete(name)
3378 3378 qdir = _queuedir(name)
3379 3379 if os.path.exists(qdir):
3380 3380 shutil.rmtree(qdir)
3381 3381 else:
3382 3382 if name not in existing:
3383 3383 raise util.Abort(_('use --create to create a new queue'))
3384 3384 _setactive(name)
3385 3385
3386 3386 def mqphasedefaults(repo, roots):
3387 3387 """callback used to set mq changeset as secret when no phase data exists"""
3388 3388 if repo.mq.applied:
3389 3389 if repo.ui.configbool('mq', 'secret', False):
3390 3390 mqphase = phases.secret
3391 3391 else:
3392 3392 mqphase = phases.draft
3393 3393 qbase = repo[repo.mq.applied[0].node]
3394 3394 roots[mqphase].add(qbase.node())
3395 3395 return roots
3396 3396
3397 3397 def reposetup(ui, repo):
3398 3398 class mqrepo(repo.__class__):
3399 3399 @util.propertycache
3400 3400 def mq(self):
3401 3401 return queue(self.ui, self.path)
3402 3402
3403 3403 def abortifwdirpatched(self, errmsg, force=False):
3404 3404 if self.mq.applied and not force:
3405 3405 parents = self.dirstate.parents()
3406 3406 patches = [s.node for s in self.mq.applied]
3407 3407 if parents[0] in patches or parents[1] in patches:
3408 3408 raise util.Abort(errmsg)
3409 3409
3410 3410 def commit(self, text="", user=None, date=None, match=None,
3411 3411 force=False, editor=False, extra={}):
3412 3412 self.abortifwdirpatched(
3413 3413 _('cannot commit over an applied mq patch'),
3414 3414 force)
3415 3415
3416 3416 return super(mqrepo, self).commit(text, user, date, match, force,
3417 3417 editor, extra)
3418 3418
3419 3419 def checkpush(self, force, revs):
3420 3420 if self.mq.applied and not force:
3421 3421 outapplied = [e.node for e in self.mq.applied]
3422 3422 if revs:
3423 3423 # Assume applied patches have no non-patch descendants and
3424 3424 # are not on remote already. Filtering any changeset not
3425 3425 # pushed.
3426 3426 heads = set(revs)
3427 3427 for node in reversed(outapplied):
3428 3428 if node in heads:
3429 3429 break
3430 3430 else:
3431 3431 outapplied.pop()
3432 3432 # looking for pushed and shared changeset
3433 3433 for node in outapplied:
3434 3434 if self[node].phase() < phases.secret:
3435 3435 raise util.Abort(_('source has mq patches applied'))
3436 3436 # no non-secret patches pushed
3437 3437 super(mqrepo, self).checkpush(force, revs)
3438 3438
3439 3439 def _findtags(self):
3440 3440 '''augment tags from base class with patch tags'''
3441 3441 result = super(mqrepo, self)._findtags()
3442 3442
3443 3443 q = self.mq
3444 3444 if not q.applied:
3445 3445 return result
3446 3446
3447 3447 mqtags = [(patch.node, patch.name) for patch in q.applied]
3448 3448
3449 3449 try:
3450 3450 # for now ignore filtering business
3451 3451 self.unfiltered().changelog.rev(mqtags[-1][0])
3452 3452 except error.LookupError:
3453 3453 self.ui.warn(_('mq status file refers to unknown node %s\n')
3454 3454 % short(mqtags[-1][0]))
3455 3455 return result
3456 3456
3457 3457 mqtags.append((mqtags[-1][0], 'qtip'))
3458 3458 mqtags.append((mqtags[0][0], 'qbase'))
3459 3459 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3460 3460 tags = result[0]
3461 3461 for patch in mqtags:
3462 3462 if patch[1] in tags:
3463 3463 self.ui.warn(_('tag %s overrides mq patch of the same '
3464 3464 'name\n') % patch[1])
3465 3465 else:
3466 3466 tags[patch[1]] = patch[0]
3467 3467
3468 3468 return result
3469 3469
3470 3470 if repo.local():
3471 3471 repo.__class__ = mqrepo
3472 3472
3473 3473 repo._phasedefaults.append(mqphasedefaults)
3474 3474
3475 3475 def mqimport(orig, ui, repo, *args, **kwargs):
3476 3476 if (util.safehasattr(repo, 'abortifwdirpatched')
3477 3477 and not kwargs.get('no_commit', False)):
3478 3478 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3479 3479 kwargs.get('force'))
3480 3480 return orig(ui, repo, *args, **kwargs)
3481 3481
3482 3482 def mqinit(orig, ui, *args, **kwargs):
3483 3483 mq = kwargs.pop('mq', None)
3484 3484
3485 3485 if not mq:
3486 3486 return orig(ui, *args, **kwargs)
3487 3487
3488 3488 if args:
3489 3489 repopath = args[0]
3490 3490 if not hg.islocal(repopath):
3491 3491 raise util.Abort(_('only a local queue repository '
3492 3492 'may be initialized'))
3493 3493 else:
3494 3494 repopath = cmdutil.findrepo(os.getcwd())
3495 3495 if not repopath:
3496 3496 raise util.Abort(_('there is no Mercurial repository here '
3497 3497 '(.hg not found)'))
3498 3498 repo = hg.repository(ui, repopath)
3499 3499 return qinit(ui, repo, True)
3500 3500
3501 3501 def mqcommand(orig, ui, repo, *args, **kwargs):
3502 3502 """Add --mq option to operate on patch repository instead of main"""
3503 3503
3504 3504 # some commands do not like getting unknown options
3505 3505 mq = kwargs.pop('mq', None)
3506 3506
3507 3507 if not mq:
3508 3508 return orig(ui, repo, *args, **kwargs)
3509 3509
3510 3510 q = repo.mq
3511 3511 r = q.qrepo()
3512 3512 if not r:
3513 3513 raise util.Abort(_('no queue repository'))
3514 3514 return orig(r.ui, r, *args, **kwargs)
3515 3515
3516 3516 def summary(orig, ui, repo, *args, **kwargs):
3517 3517 r = orig(ui, repo, *args, **kwargs)
3518 3518 q = repo.mq
3519 3519 m = []
3520 3520 a, u = len(q.applied), len(q.unapplied(repo))
3521 3521 if a:
3522 3522 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3523 3523 if u:
3524 3524 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3525 3525 if m:
3526 3526 # i18n: column positioning for "hg summary"
3527 3527 ui.write(_("mq: %s\n") % ', '.join(m))
3528 3528 else:
3529 3529 # i18n: column positioning for "hg summary"
3530 3530 ui.note(_("mq: (empty queue)\n"))
3531 3531 return r
3532 3532
3533 3533 def revsetmq(repo, subset, x):
3534 3534 """``mq()``
3535 3535 Changesets managed by MQ.
3536 3536 """
3537 3537 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3538 3538 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3539 3539 return [r for r in subset if r in applied]
3540 3540
3541 3541 # tell hggettext to extract docstrings from these functions:
3542 3542 i18nfunctions = [revsetmq]
3543 3543
3544 3544 def extsetup(ui):
3545 3545 # Ensure mq wrappers are called first, regardless of extension load order by
3546 3546 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3547 3547 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3548 3548
3549 3549 extensions.wrapcommand(commands.table, 'import', mqimport)
3550 3550 extensions.wrapcommand(commands.table, 'summary', summary)
3551 3551
3552 3552 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3553 3553 entry[1].extend(mqopt)
3554 3554
3555 3555 nowrap = set(commands.norepo.split(" "))
3556 3556
3557 3557 def dotable(cmdtable):
3558 3558 for cmd in cmdtable.keys():
3559 3559 cmd = cmdutil.parsealiases(cmd)[0]
3560 3560 if cmd in nowrap:
3561 3561 continue
3562 3562 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3563 3563 entry[1].extend(mqopt)
3564 3564
3565 3565 dotable(commands.table)
3566 3566
3567 3567 for extname, extmodule in extensions.extensions():
3568 3568 if extmodule.__file__ != __file__:
3569 3569 dotable(getattr(extmodule, 'cmdtable', {}))
3570 3570
3571 3571 revset.symbols['mq'] = revsetmq
3572 3572
3573 3573 colortable = {'qguard.negative': 'red',
3574 3574 'qguard.positive': 'yellow',
3575 3575 'qguard.unguarded': 'green',
3576 3576 'qseries.applied': 'blue bold underline',
3577 3577 'qseries.guarded': 'black bold',
3578 3578 'qseries.missing': 'red bold',
3579 3579 'qseries.unapplied': 'black bold'}
3580 3580
3581 3581 commands.inferrepo += " qnew qrefresh qdiff qcommit"
@@ -1,780 +1,780 b''
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 http://mercurial.selenic.com/wiki/RebaseExtension
15 15 '''
16 16
17 17 from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks
18 18 from mercurial import extensions, patch, scmutil, phases, obsolete
19 19 from mercurial.commands import templateopts
20 20 from mercurial.node import nullrev
21 21 from mercurial.lock import release
22 22 from mercurial.i18n import _
23 23 import os, errno
24 24
25 25 nullmerge = -2
26 26 revignored = -3
27 27
28 28 cmdtable = {}
29 29 command = cmdutil.command(cmdtable)
30 30 testedwith = 'internal'
31 31
32 32 @command('rebase',
33 33 [('s', 'source', '',
34 34 _('rebase from the specified changeset'), _('REV')),
35 35 ('b', 'base', '',
36 36 _('rebase from the base of the specified changeset '
37 37 '(up to greatest common ancestor of base and dest)'),
38 38 _('REV')),
39 39 ('r', 'rev', [],
40 40 _('rebase these revisions'),
41 41 _('REV')),
42 42 ('d', 'dest', '',
43 43 _('rebase onto the specified changeset'), _('REV')),
44 44 ('', 'collapse', False, _('collapse the rebased changesets')),
45 45 ('m', 'message', '',
46 46 _('use text as collapse commit message'), _('TEXT')),
47 47 ('e', 'edit', False, _('invoke editor on commit messages')),
48 48 ('l', 'logfile', '',
49 49 _('read collapse commit message from file'), _('FILE')),
50 50 ('', 'keep', False, _('keep original changesets')),
51 51 ('', 'keepbranches', False, _('keep original branch names')),
52 52 ('D', 'detach', False, _('(DEPRECATED)')),
53 53 ('t', 'tool', '', _('specify merge tool')),
54 54 ('c', 'continue', False, _('continue an interrupted rebase')),
55 55 ('a', 'abort', False, _('abort an interrupted rebase'))] +
56 56 templateopts,
57 57 _('[-s REV | -b REV] [-d REV] [OPTION]'))
58 58 def rebase(ui, repo, **opts):
59 59 """move changeset (and descendants) to a different branch
60 60
61 61 Rebase uses repeated merging to graft changesets from one part of
62 62 history (the source) onto another (the destination). This can be
63 63 useful for linearizing *local* changes relative to a master
64 64 development tree.
65 65
66 66 You should not rebase changesets that have already been shared
67 67 with others. Doing so will force everybody else to perform the
68 68 same rebase or they will end up with duplicated changesets after
69 69 pulling in your rebased changesets.
70 70
71 71 In its default configuration, Mercurial will prevent you from
72 72 rebasing published changes. See :hg:`help phases` for details.
73 73
74 74 If you don't specify a destination changeset (``-d/--dest``),
75 75 rebase uses the tipmost head of the current named branch as the
76 76 destination. (The destination changeset is not modified by
77 77 rebasing, but new changesets are added as its descendants.)
78 78
79 79 You can specify which changesets to rebase in two ways: as a
80 80 "source" changeset or as a "base" changeset. Both are shorthand
81 81 for a topologically related set of changesets (the "source
82 82 branch"). If you specify source (``-s/--source``), rebase will
83 83 rebase that changeset and all of its descendants onto dest. If you
84 84 specify base (``-b/--base``), rebase will select ancestors of base
85 85 back to but not including the common ancestor with dest. Thus,
86 86 ``-b`` is less precise but more convenient than ``-s``: you can
87 87 specify any changeset in the source branch, and rebase will select
88 88 the whole branch. If you specify neither ``-s`` nor ``-b``, rebase
89 89 uses the parent of the working directory as the base.
90 90
91 91 For advanced usage, a third way is available through the ``--rev``
92 92 option. It allows you to specify an arbitrary set of changesets to
93 93 rebase. Descendants of revs you specify with this option are not
94 94 automatically included in the rebase.
95 95
96 96 By default, rebase recreates the changesets in the source branch
97 97 as descendants of dest and then destroys the originals. Use
98 98 ``--keep`` to preserve the original source changesets. Some
99 99 changesets in the source branch (e.g. merges from the destination
100 100 branch) may be dropped if they no longer contribute any change.
101 101
102 102 One result of the rules for selecting the destination changeset
103 103 and source branch is that, unlike ``merge``, rebase will do
104 104 nothing if you are at the latest (tipmost) head of a named branch
105 105 with two heads. You need to explicitly specify source and/or
106 106 destination (or ``update`` to the other head, if it's the head of
107 107 the intended source branch).
108 108
109 109 If a rebase is interrupted to manually resolve a merge, it can be
110 110 continued with --continue/-c or aborted with --abort/-a.
111 111
112 112 Returns 0 on success, 1 if nothing to rebase.
113 113 """
114 114 originalwd = target = None
115 115 external = nullrev
116 116 state = {}
117 117 skipped = set()
118 118 targetancestors = set()
119 119
120 120 editor = None
121 121 if opts.get('edit'):
122 122 editor = cmdutil.commitforceeditor
123 123
124 124 lock = wlock = None
125 125 try:
126 126 wlock = repo.wlock()
127 127 lock = repo.lock()
128 128
129 129 # Validate input and define rebasing points
130 130 destf = opts.get('dest', None)
131 131 srcf = opts.get('source', None)
132 132 basef = opts.get('base', None)
133 133 revf = opts.get('rev', [])
134 134 contf = opts.get('continue')
135 135 abortf = opts.get('abort')
136 136 collapsef = opts.get('collapse', False)
137 137 collapsemsg = cmdutil.logmessage(ui, opts)
138 138 extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion
139 139 keepf = opts.get('keep', False)
140 140 keepbranchesf = opts.get('keepbranches', False)
141 141 # keepopen is not meant for use on the command line, but by
142 142 # other extensions
143 143 keepopen = opts.get('keepopen', False)
144 144
145 145 if collapsemsg and not collapsef:
146 146 raise util.Abort(
147 147 _('message can only be specified with collapse'))
148 148
149 149 if contf or abortf:
150 150 if contf and abortf:
151 151 raise util.Abort(_('cannot use both abort and continue'))
152 152 if collapsef:
153 153 raise util.Abort(
154 154 _('cannot use collapse with continue or abort'))
155 155 if srcf or basef or destf:
156 156 raise util.Abort(
157 157 _('abort and continue do not allow specifying revisions'))
158 158 if opts.get('tool', False):
159 159 ui.warn(_('tool option will be ignored\n'))
160 160
161 161 (originalwd, target, state, skipped, collapsef, keepf,
162 162 keepbranchesf, external) = restorestatus(repo)
163 163 if abortf:
164 164 return abort(repo, originalwd, target, state)
165 165 else:
166 166 if srcf and basef:
167 167 raise util.Abort(_('cannot specify both a '
168 168 'source and a base'))
169 169 if revf and basef:
170 170 raise util.Abort(_('cannot specify both a '
171 171 'revision and a base'))
172 172 if revf and srcf:
173 173 raise util.Abort(_('cannot specify both a '
174 174 'revision and a source'))
175 175
176 176 cmdutil.bailifchanged(repo)
177 177
178 178 if not destf:
179 179 # Destination defaults to the latest revision in the
180 180 # current branch
181 181 branch = repo[None].branch()
182 182 dest = repo[branch]
183 183 else:
184 184 dest = scmutil.revsingle(repo, destf)
185 185
186 186 if revf:
187 187 rebaseset = repo.revs('%lr', revf)
188 188 elif srcf:
189 189 src = scmutil.revrange(repo, [srcf])
190 190 rebaseset = repo.revs('(%ld)::', src)
191 191 else:
192 192 base = scmutil.revrange(repo, [basef or '.'])
193 193 rebaseset = repo.revs(
194 194 '(children(ancestor(%ld, %d)) and ::(%ld))::',
195 195 base, dest, base)
196 196 if rebaseset:
197 197 root = min(rebaseset)
198 198 else:
199 199 root = None
200 200
201 201 if not rebaseset:
202 202 repo.ui.debug('base is ancestor of destination\n')
203 203 result = None
204 204 elif (not (keepf or obsolete._enabled)
205 205 and repo.revs('first(children(%ld) - %ld)',
206 206 rebaseset, rebaseset)):
207 207 raise util.Abort(
208 208 _("can't remove original changesets with"
209 209 " unrebased descendants"),
210 210 hint=_('use --keep to keep original changesets'))
211 211 elif not keepf and not repo[root].mutable():
212 212 raise util.Abort(_("can't rebase immutable changeset %s")
213 213 % repo[root],
214 214 hint=_('see hg help phases for details'))
215 215 else:
216 216 result = buildstate(repo, dest, rebaseset, collapsef)
217 217
218 218 if not result:
219 219 # Empty state built, nothing to rebase
220 220 ui.status(_('nothing to rebase\n'))
221 221 return 1
222 222 else:
223 223 originalwd, target, state = result
224 224 if collapsef:
225 225 targetancestors = repo.changelog.ancestors([target],
226 226 inclusive=True)
227 227 external = checkexternal(repo, state, targetancestors)
228 228
229 229 if keepbranchesf:
230 230 assert not extrafn, 'cannot use both keepbranches and extrafn'
231 231 def extrafn(ctx, extra):
232 232 extra['branch'] = ctx.branch()
233 233 if collapsef:
234 234 branches = set()
235 235 for rev in state:
236 236 branches.add(repo[rev].branch())
237 237 if len(branches) > 1:
238 238 raise util.Abort(_('cannot collapse multiple named '
239 239 'branches'))
240 240
241 241
242 242 # Rebase
243 243 if not targetancestors:
244 244 targetancestors = repo.changelog.ancestors([target], inclusive=True)
245 245
246 246 # Keep track of the current bookmarks in order to reset them later
247 247 currentbookmarks = repo._bookmarks.copy()
248 248 activebookmark = repo._bookmarkcurrent
249 249 if activebookmark:
250 250 bookmarks.unsetcurrent(repo)
251 251
252 252 sortedstate = sorted(state)
253 253 total = len(sortedstate)
254 254 pos = 0
255 255 for rev in sortedstate:
256 256 pos += 1
257 257 if state[rev] == -1:
258 258 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])),
259 259 _('changesets'), total)
260 260 storestatus(repo, originalwd, target, state, collapsef, keepf,
261 261 keepbranchesf, external)
262 262 p1, p2 = defineparents(repo, rev, target, state,
263 263 targetancestors)
264 264 if len(repo.parents()) == 2:
265 265 repo.ui.debug('resuming interrupted rebase\n')
266 266 else:
267 267 try:
268 268 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
269 269 stats = rebasenode(repo, rev, p1, state, collapsef)
270 270 if stats and stats[3] > 0:
271 271 raise util.Abort(_('unresolved conflicts (see hg '
272 272 'resolve, then hg rebase --continue)'))
273 273 finally:
274 274 ui.setconfig('ui', 'forcemerge', '')
275 275 cmdutil.duplicatecopies(repo, rev, target)
276 276 if not collapsef:
277 277 newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn,
278 278 editor=editor)
279 279 else:
280 280 # Skip commit if we are collapsing
281 281 repo.setparents(repo[p1].node())
282 282 newrev = None
283 283 # Update the state
284 284 if newrev is not None:
285 285 state[rev] = repo[newrev].rev()
286 286 else:
287 287 if not collapsef:
288 288 ui.note(_('no changes, revision %d skipped\n') % rev)
289 289 ui.debug('next revision set to %s\n' % p1)
290 290 skipped.add(rev)
291 291 state[rev] = p1
292 292
293 293 ui.progress(_('rebasing'), None)
294 294 ui.note(_('rebase merging completed\n'))
295 295
296 296 if collapsef and not keepopen:
297 297 p1, p2 = defineparents(repo, min(state), target,
298 298 state, targetancestors)
299 299 if collapsemsg:
300 300 commitmsg = collapsemsg
301 301 else:
302 302 commitmsg = 'Collapsed revision'
303 303 for rebased in state:
304 304 if rebased not in skipped and state[rebased] > nullmerge:
305 305 commitmsg += '\n* %s' % repo[rebased].description()
306 306 commitmsg = ui.edit(commitmsg, repo.ui.username())
307 307 newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
308 308 extrafn=extrafn, editor=editor)
309 309
310 310 if 'qtip' in repo.tags():
311 311 updatemq(repo, state, skipped, **opts)
312 312
313 313 if currentbookmarks:
314 314 # Nodeids are needed to reset bookmarks
315 315 nstate = {}
316 316 for k, v in state.iteritems():
317 317 if v > nullmerge:
318 318 nstate[repo[k].node()] = repo[v].node()
319 319 # XXX this is the same as dest.node() for the non-continue path --
320 320 # this should probably be cleaned up
321 321 targetnode = repo[target].node()
322 322
323 323 if not keepf:
324 324 collapsedas = None
325 325 if collapsef:
326 326 collapsedas = newrev
327 327 clearrebased(ui, repo, state, skipped, collapsedas)
328 328
329 329 if currentbookmarks:
330 330 updatebookmarks(repo, targetnode, nstate, currentbookmarks)
331 331
332 332 clearstatus(repo)
333 333 ui.note(_("rebase completed\n"))
334 334 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
335 335 if skipped:
336 336 ui.note(_("%d revisions have been skipped\n") % len(skipped))
337 337
338 338 if (activebookmark and
339 339 repo['tip'].node() == repo._bookmarks[activebookmark]):
340 340 bookmarks.setcurrent(repo, activebookmark)
341 341
342 342 finally:
343 343 release(lock, wlock)
344 344
345 345 def checkexternal(repo, state, targetancestors):
346 346 """Check whether one or more external revisions need to be taken in
347 347 consideration. In the latter case, abort.
348 348 """
349 349 external = nullrev
350 350 source = min(state)
351 351 for rev in state:
352 352 if rev == source:
353 353 continue
354 354 # Check externals and fail if there are more than one
355 355 for p in repo[rev].parents():
356 356 if (p.rev() not in state
357 357 and p.rev() not in targetancestors):
358 358 if external != nullrev:
359 359 raise util.Abort(_('unable to collapse, there is more '
360 360 'than one external parent'))
361 361 external = p.rev()
362 362 return external
363 363
364 364 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None):
365 365 'Commit the changes and store useful information in extra'
366 366 try:
367 367 repo.setparents(repo[p1].node(), repo[p2].node())
368 368 ctx = repo[rev]
369 369 if commitmsg is None:
370 370 commitmsg = ctx.description()
371 371 extra = {'rebase_source': ctx.hex()}
372 372 if extrafn:
373 373 extrafn(ctx, extra)
374 374 # Commit might fail if unresolved files exist
375 375 newrev = repo.commit(text=commitmsg, user=ctx.user(),
376 376 date=ctx.date(), extra=extra, editor=editor)
377 377 repo.dirstate.setbranch(repo[newrev].branch())
378 378 targetphase = max(ctx.phase(), phases.draft)
379 379 # retractboundary doesn't overwrite upper phase inherited from parent
380 380 newnode = repo[newrev].node()
381 381 if newnode:
382 382 phases.retractboundary(repo, targetphase, [newnode])
383 383 return newrev
384 384 except util.Abort:
385 385 # Invalidate the previous setparents
386 386 repo.dirstate.invalidate()
387 387 raise
388 388
389 389 def rebasenode(repo, rev, p1, state, collapse):
390 390 'Rebase a single revision'
391 391 # Merge phase
392 392 # Update to target and merge it with local
393 393 if repo['.'].rev() != repo[p1].rev():
394 394 repo.ui.debug(" update to %d:%s\n" % (repo[p1].rev(), repo[p1]))
395 395 merge.update(repo, p1, False, True, False)
396 396 else:
397 397 repo.ui.debug(" already in target\n")
398 398 repo.dirstate.write()
399 399 repo.ui.debug(" merge against %d:%s\n" % (repo[rev].rev(), repo[rev]))
400 400 base = None
401 401 if repo[rev].rev() != repo[min(state)].rev():
402 402 base = repo[rev].p1().node()
403 403 # When collapsing in-place, the parent is the common ancestor, we
404 404 # have to allow merging with it.
405 405 return merge.update(repo, rev, True, True, False, base, collapse)
406 406
407 407 def nearestrebased(repo, rev, state):
408 408 """return the nearest ancestors of rev in the rebase result"""
409 409 rebased = [r for r in state if state[r] > nullmerge]
410 410 candidates = repo.revs('max(%ld and (::%d))', rebased, rev)
411 411 if candidates:
412 412 return state[candidates[0]]
413 413 else:
414 414 return None
415 415
416 416 def defineparents(repo, rev, target, state, targetancestors):
417 417 'Return the new parent relationship of the revision that will be rebased'
418 418 parents = repo[rev].parents()
419 419 p1 = p2 = nullrev
420 420
421 421 P1n = parents[0].rev()
422 422 if P1n in targetancestors:
423 423 p1 = target
424 424 elif P1n in state:
425 425 if state[P1n] == nullmerge:
426 426 p1 = target
427 427 elif state[P1n] == revignored:
428 428 p1 = nearestrebased(repo, P1n, state)
429 429 if p1 is None:
430 430 p1 = target
431 431 else:
432 432 p1 = state[P1n]
433 433 else: # P1n external
434 434 p1 = target
435 435 p2 = P1n
436 436
437 437 if len(parents) == 2 and parents[1].rev() not in targetancestors:
438 438 P2n = parents[1].rev()
439 439 # interesting second parent
440 440 if P2n in state:
441 441 if p1 == target: # P1n in targetancestors or external
442 442 p1 = state[P2n]
443 443 elif state[P2n] == revignored:
444 444 p2 = nearestrebased(repo, P2n, state)
445 445 if p2 is None:
446 446 # no ancestors rebased yet, detach
447 447 p2 = target
448 448 else:
449 449 p2 = state[P2n]
450 450 else: # P2n external
451 451 if p2 != nullrev: # P1n external too => rev is a merged revision
452 452 raise util.Abort(_('cannot use revision %d as base, result '
453 453 'would have 3 parents') % rev)
454 454 p2 = P2n
455 455 repo.ui.debug(" future parents are %d and %d\n" %
456 456 (repo[p1].rev(), repo[p2].rev()))
457 457 return p1, p2
458 458
459 459 def isagitpatch(repo, patchname):
460 460 'Return true if the given patch is in git format'
461 461 mqpatch = os.path.join(repo.mq.path, patchname)
462 462 for line in patch.linereader(file(mqpatch, 'rb')):
463 463 if line.startswith('diff --git'):
464 464 return True
465 465 return False
466 466
467 467 def updatemq(repo, state, skipped, **opts):
468 468 'Update rebased mq patches - finalize and then import them'
469 469 mqrebase = {}
470 470 mq = repo.mq
471 471 original_series = mq.fullseries[:]
472 472 skippedpatches = set()
473 473
474 474 for p in mq.applied:
475 475 rev = repo[p.node].rev()
476 476 if rev in state:
477 477 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
478 478 (rev, p.name))
479 479 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
480 480 else:
481 481 # Applied but not rebased, not sure this should happen
482 482 skippedpatches.add(p.name)
483 483
484 484 if mqrebase:
485 485 mq.finish(repo, mqrebase.keys())
486 486
487 487 # We must start import from the newest revision
488 488 for rev in sorted(mqrebase, reverse=True):
489 489 if rev not in skipped:
490 490 name, isgit = mqrebase[rev]
491 491 repo.ui.debug('import mq patch %d (%s)\n' % (state[rev], name))
492 492 mq.qimport(repo, (), patchname=name, git=isgit,
493 493 rev=[str(state[rev])])
494 494 else:
495 495 # Rebased and skipped
496 496 skippedpatches.add(mqrebase[rev][0])
497 497
498 498 # Patches were either applied and rebased and imported in
499 499 # order, applied and removed or unapplied. Discard the removed
500 500 # ones while preserving the original series order and guards.
501 501 newseries = [s for s in original_series
502 502 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
503 503 mq.fullseries[:] = newseries
504 504 mq.seriesdirty = True
505 505 mq.savedirty()
506 506
507 507 def updatebookmarks(repo, targetnode, nstate, originalbookmarks):
508 508 'Move bookmarks to their correct changesets, and delete divergent ones'
509 509 marks = repo._bookmarks
510 510 for k, v in originalbookmarks.iteritems():
511 511 if v in nstate:
512 512 # update the bookmarks for revs that have moved
513 513 marks[k] = nstate[v]
514 514 bookmarks.deletedivergent(repo, [targetnode], k)
515 515
516 516 marks.write()
517 517
518 518 def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
519 519 external):
520 520 'Store the current status to allow recovery'
521 521 f = repo.opener("rebasestate", "w")
522 522 f.write(repo[originalwd].hex() + '\n')
523 523 f.write(repo[target].hex() + '\n')
524 524 f.write(repo[external].hex() + '\n')
525 525 f.write('%d\n' % int(collapse))
526 526 f.write('%d\n' % int(keep))
527 527 f.write('%d\n' % int(keepbranches))
528 528 for d, v in state.iteritems():
529 529 oldrev = repo[d].hex()
530 530 if v > nullmerge:
531 531 newrev = repo[v].hex()
532 532 else:
533 533 newrev = v
534 534 f.write("%s:%s\n" % (oldrev, newrev))
535 535 f.close()
536 536 repo.ui.debug('rebase status stored\n')
537 537
538 538 def clearstatus(repo):
539 539 'Remove the status files'
540 540 util.unlinkpath(repo.join("rebasestate"), ignoremissing=True)
541 541
542 542 def restorestatus(repo):
543 543 'Restore a previously stored status'
544 544 try:
545 545 target = None
546 546 collapse = False
547 547 external = nullrev
548 548 state = {}
549 549 f = repo.opener("rebasestate")
550 550 for i, l in enumerate(f.read().splitlines()):
551 551 if i == 0:
552 552 originalwd = repo[l].rev()
553 553 elif i == 1:
554 554 target = repo[l].rev()
555 555 elif i == 2:
556 556 external = repo[l].rev()
557 557 elif i == 3:
558 558 collapse = bool(int(l))
559 559 elif i == 4:
560 560 keep = bool(int(l))
561 561 elif i == 5:
562 562 keepbranches = bool(int(l))
563 563 else:
564 564 oldrev, newrev = l.split(':')
565 565 if newrev in (str(nullmerge), str(revignored)):
566 566 state[repo[oldrev].rev()] = int(newrev)
567 567 else:
568 568 state[repo[oldrev].rev()] = repo[newrev].rev()
569 569 skipped = set()
570 570 # recompute the set of skipped revs
571 571 if not collapse:
572 572 seen = set([target])
573 573 for old, new in sorted(state.items()):
574 574 if new != nullrev and new in seen:
575 575 skipped.add(old)
576 576 seen.add(new)
577 577 repo.ui.debug('computed skipped revs: %s\n' % skipped)
578 578 repo.ui.debug('rebase status resumed\n')
579 579 return (originalwd, target, state, skipped,
580 580 collapse, keep, keepbranches, external)
581 581 except IOError, err:
582 582 if err.errno != errno.ENOENT:
583 583 raise
584 584 raise util.Abort(_('no rebase in progress'))
585 585
586 586 def abort(repo, originalwd, target, state):
587 587 'Restore the repository to its original state'
588 588 dstates = [s for s in state.values() if s != nullrev]
589 589 immutable = [d for d in dstates if not repo[d].mutable()]
590 590 if immutable:
591 591 raise util.Abort(_("can't abort rebase due to immutable changesets %s")
592 592 % ', '.join(str(repo[r]) for r in immutable),
593 593 hint=_('see hg help phases for details'))
594 594
595 595 descendants = set()
596 596 if dstates:
597 597 descendants = set(repo.changelog.descendants(dstates))
598 598 if descendants - set(dstates):
599 599 repo.ui.warn(_("warning: new changesets detected on target branch, "
600 600 "can't abort\n"))
601 601 return -1
602 602 else:
603 603 # Strip from the first rebased revision
604 604 merge.update(repo, repo[originalwd].rev(), False, True, False)
605 605 rebased = filter(lambda x: x > -1 and x != target, state.values())
606 606 if rebased:
607 607 strippoints = [c.node() for c in repo.set('roots(%ld)', rebased)]
608 608 # no backup of rebased cset versions needed
609 609 repair.strip(repo.ui, repo, strippoints)
610 610 clearstatus(repo)
611 611 repo.ui.warn(_('rebase aborted\n'))
612 612 return 0
613 613
614 614 def buildstate(repo, dest, rebaseset, collapse):
615 615 '''Define which revisions are going to be rebased and where
616 616
617 617 repo: repo
618 618 dest: context
619 619 rebaseset: set of rev
620 620 '''
621 621
622 622 # This check isn't strictly necessary, since mq detects commits over an
623 623 # applied patch. But it prevents messing up the working directory when
624 624 # a partially completed rebase is blocked by mq.
625 625 if 'qtip' in repo.tags() and (dest.node() in
626 626 [s.node for s in repo.mq.applied]):
627 627 raise util.Abort(_('cannot rebase onto an applied mq patch'))
628 628
629 629 roots = list(repo.set('roots(%ld)', rebaseset))
630 630 if not roots:
631 631 raise util.Abort(_('no matching revisions'))
632 632 roots.sort()
633 633 state = {}
634 634 detachset = set()
635 635 for root in roots:
636 636 commonbase = root.ancestor(dest)
637 637 if commonbase == root:
638 638 raise util.Abort(_('source is ancestor of destination'))
639 639 if commonbase == dest:
640 640 samebranch = root.branch() == dest.branch()
641 641 if not collapse and samebranch and root in dest.children():
642 642 repo.ui.debug('source is a child of destination\n')
643 643 return None
644 644
645 645 repo.ui.debug('rebase onto %d starting from %s\n' % (dest, roots))
646 646 state.update(dict.fromkeys(rebaseset, nullrev))
647 647 # Rebase tries to turn <dest> into a parent of <root> while
648 648 # preserving the number of parents of rebased changesets:
649 649 #
650 650 # - A changeset with a single parent will always be rebased as a
651 651 # changeset with a single parent.
652 652 #
653 653 # - A merge will be rebased as merge unless its parents are both
654 654 # ancestors of <dest> or are themselves in the rebased set and
655 655 # pruned while rebased.
656 656 #
657 657 # If one parent of <root> is an ancestor of <dest>, the rebased
658 658 # version of this parent will be <dest>. This is always true with
659 659 # --base option.
660 660 #
661 661 # Otherwise, we need to *replace* the original parents with
662 662 # <dest>. This "detaches" the rebased set from its former location
663 663 # and rebases it onto <dest>. Changes introduced by ancestors of
664 664 # <root> not common with <dest> (the detachset, marked as
665 665 # nullmerge) are "removed" from the rebased changesets.
666 666 #
667 667 # - If <root> has a single parent, set it to <dest>.
668 668 #
669 669 # - If <root> is a merge, we cannot decide which parent to
670 670 # replace, the rebase operation is not clearly defined.
671 671 #
672 672 # The table below sums up this behavior:
673 673 #
674 674 # +------------------+----------------------+-------------------------+
675 675 # | | one parent | merge |
676 676 # +------------------+----------------------+-------------------------+
677 677 # | parent in | new parent is <dest> | parents in ::<dest> are |
678 678 # | ::<dest> | | remapped to <dest> |
679 679 # +------------------+----------------------+-------------------------+
680 680 # | unrelated source | new parent is <dest> | ambiguous, abort |
681 681 # +------------------+----------------------+-------------------------+
682 682 #
683 683 # The actual abort is handled by `defineparents`
684 684 if len(root.parents()) <= 1:
685 685 # ancestors of <root> not ancestors of <dest>
686 686 detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
687 687 [root.rev()]))
688 688 for r in detachset:
689 689 if r not in state:
690 690 state[r] = nullmerge
691 691 if len(roots) > 1:
692 692 # If we have multiple roots, we may have "hole" in the rebase set.
693 693 # Rebase roots that descend from those "hole" should not be detached as
694 694 # other root are. We use the special `revignored` to inform rebase that
695 # the revision should be ignored but that `defineparent` should search
696 # a rebase destination that make sense regarding rebaset topology.
695 # the revision should be ignored but that `defineparents` should search
696 # a rebase destination that make sense regarding rebased topology.
697 697 rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
698 698 for ignored in set(rebasedomain) - set(rebaseset):
699 699 state[ignored] = revignored
700 700 return repo['.'].rev(), dest.rev(), state
701 701
702 702 def clearrebased(ui, repo, state, skipped, collapsedas=None):
703 703 """dispose of rebased revision at the end of the rebase
704 704
705 705 If `collapsedas` is not None, the rebase was a collapse whose result if the
706 706 `collapsedas` node."""
707 707 if obsolete._enabled:
708 708 markers = []
709 709 for rev, newrev in sorted(state.items()):
710 710 if newrev >= 0:
711 711 if rev in skipped:
712 712 succs = ()
713 713 elif collapsedas is not None:
714 714 succs = (repo[collapsedas],)
715 715 else:
716 716 succs = (repo[newrev],)
717 717 markers.append((repo[rev], succs))
718 718 if markers:
719 719 obsolete.createmarkers(repo, markers)
720 720 else:
721 721 rebased = [rev for rev in state if state[rev] > nullmerge]
722 722 if rebased:
723 723 stripped = []
724 724 for root in repo.set('roots(%ld)', rebased):
725 725 if set(repo.changelog.descendants([root.rev()])) - set(state):
726 726 ui.warn(_("warning: new changesets detected "
727 727 "on source branch, not stripping\n"))
728 728 else:
729 729 stripped.append(root.node())
730 730 if stripped:
731 731 # backup the old csets by default
732 732 repair.strip(ui, repo, stripped, "all")
733 733
734 734
735 735 def pullrebase(orig, ui, repo, *args, **opts):
736 736 'Call rebase after pull if the latter has been invoked with --rebase'
737 737 if opts.get('rebase'):
738 738 if opts.get('update'):
739 739 del opts['update']
740 740 ui.debug('--update and --rebase are not compatible, ignoring '
741 741 'the update flag\n')
742 742
743 743 movemarkfrom = repo['.'].node()
744 744 cmdutil.bailifchanged(repo)
745 745 revsprepull = len(repo)
746 746 origpostincoming = commands.postincoming
747 747 def _dummy(*args, **kwargs):
748 748 pass
749 749 commands.postincoming = _dummy
750 750 try:
751 751 orig(ui, repo, *args, **opts)
752 752 finally:
753 753 commands.postincoming = origpostincoming
754 754 revspostpull = len(repo)
755 755 if revspostpull > revsprepull:
756 756 # --rev option from pull conflict with rebase own --rev
757 757 # dropping it
758 758 if 'rev' in opts:
759 759 del opts['rev']
760 760 rebase(ui, repo, **opts)
761 761 branch = repo[None].branch()
762 762 dest = repo[branch].rev()
763 763 if dest != repo['.'].rev():
764 764 # there was nothing to rebase we force an update
765 765 hg.update(repo, dest)
766 766 if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
767 767 ui.status(_("updating bookmark %s\n")
768 768 % repo._bookmarkcurrent)
769 769 else:
770 770 if opts.get('tool'):
771 771 raise util.Abort(_('--tool can only be used with --rebase'))
772 772 orig(ui, repo, *args, **opts)
773 773
774 774 def uisetup(ui):
775 775 'Replace pull with a decorator to provide --rebase option'
776 776 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
777 777 entry[1].append(('', 'rebase', None,
778 778 _("rebase working directory to branch head")))
779 779 entry[1].append(('t', 'tool', '',
780 780 _("specify merge tool for rebase")))
@@ -1,223 +1,223 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev
9 9 import encoding
10 10 import util, repoview
11 11
12 12 def _filename(repo):
13 13 """name of a branchcache file for a given repo or repoview"""
14 14 filename = "cache/branchheads"
15 15 if repo.filtername:
16 16 filename = '%s-%s' % (filename, repo.filtername)
17 17 return filename
18 18
19 19 def read(repo):
20 20 try:
21 21 f = repo.opener(_filename(repo))
22 22 lines = f.read().split('\n')
23 23 f.close()
24 24 except (IOError, OSError):
25 25 return None
26 26
27 27 try:
28 28 cachekey = lines.pop(0).split(" ", 2)
29 29 last, lrev = cachekey[:2]
30 30 last, lrev = bin(last), int(lrev)
31 31 filteredhash = None
32 32 if len(cachekey) > 2:
33 33 filteredhash = bin(cachekey[2])
34 34 partial = branchcache(tipnode=last, tiprev=lrev,
35 35 filteredhash=filteredhash)
36 36 if not partial.validfor(repo):
37 37 # invalidate the cache
38 38 raise ValueError('tip differs')
39 39 for l in lines:
40 40 if not l:
41 41 continue
42 42 node, label = l.split(" ", 1)
43 43 label = encoding.tolocal(label.strip())
44 44 if not node in repo:
45 45 raise ValueError('node %s does not exist' % node)
46 46 partial.setdefault(label, []).append(bin(node))
47 47 except KeyboardInterrupt:
48 48 raise
49 49 except Exception, inst:
50 50 if repo.ui.debugflag:
51 51 msg = 'invalid branchheads cache'
52 52 if repo.filtername is not None:
53 53 msg += ' (%s)' % repo.filtername
54 54 msg += ': %s\n'
55 55 repo.ui.warn(msg % inst)
56 56 partial = None
57 57 return partial
58 58
59 59
60 60
61 61 def updatecache(repo):
62 62 cl = repo.changelog
63 63 filtername = repo.filtername
64 64 partial = repo._branchcaches.get(filtername)
65 65
66 66 revs = []
67 67 if partial is None or not partial.validfor(repo):
68 68 partial = read(repo)
69 69 if partial is None:
70 70 subsetname = repoview.subsettable.get(filtername)
71 71 if subsetname is None:
72 72 partial = branchcache()
73 73 else:
74 74 subset = repo.filtered(subsetname)
75 75 partial = subset.branchmap().copy()
76 76 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
77 77 revs.extend(r for r in extrarevs if r <= partial.tiprev)
78 78 revs.extend(cl.revs(start=partial.tiprev + 1))
79 79 if revs:
80 80 partial.update(repo, revs)
81 81 partial.write(repo)
82 82 assert partial.validfor(repo), filtername
83 83 repo._branchcaches[repo.filtername] = partial
84 84
85 85 class branchcache(dict):
86 86 """A dict like object that hold branches heads cache"""
87 87
88 88 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
89 89 filteredhash=None):
90 90 super(branchcache, self).__init__(entries)
91 91 self.tipnode = tipnode
92 92 self.tiprev = tiprev
93 93 self.filteredhash = filteredhash
94 94
95 95 def _hashfiltered(self, repo):
96 96 """build hash of revision filtered in the current cache
97 97
98 Tracking tipnode and tiprev is not enough to ensure validaty of the
98 Tracking tipnode and tiprev is not enough to ensure validity of the
99 99 cache as they do not help to distinct cache that ignored various
100 100 revision bellow tiprev.
101 101
102 102 To detect such difference, we build a cache of all ignored revisions.
103 103 """
104 104 cl = repo.changelog
105 105 if not cl.filteredrevs:
106 106 return None
107 107 key = None
108 108 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
109 109 if revs:
110 110 s = util.sha1()
111 111 for rev in revs:
112 112 s.update('%s;' % rev)
113 113 key = s.digest()
114 114 return key
115 115
116 116 def validfor(self, repo):
117 """Is the cache content valide regarding a repo
117 """Is the cache content valid regarding a repo
118 118
119 - False when cached tipnode are unknown or if we detect a strip.
119 - False when cached tipnode is unknown or if we detect a strip.
120 120 - True when cache is up to date or a subset of current repo."""
121 121 try:
122 122 return ((self.tipnode == repo.changelog.node(self.tiprev))
123 123 and (self.filteredhash == self._hashfiltered(repo)))
124 124 except IndexError:
125 125 return False
126 126
127 127 def copy(self):
128 128 """return an deep copy of the branchcache object"""
129 129 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash)
130 130
131 131 def write(self, repo):
132 132 try:
133 133 f = repo.opener(_filename(repo), "w", atomictemp=True)
134 134 cachekey = [hex(self.tipnode), str(self.tiprev)]
135 135 if self.filteredhash is not None:
136 136 cachekey.append(hex(self.filteredhash))
137 137 f.write(" ".join(cachekey) + '\n')
138 138 for label, nodes in sorted(self.iteritems()):
139 139 for node in nodes:
140 140 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
141 141 f.close()
142 142 except (IOError, OSError, util.Abort):
143 143 # Abort may be raise by read only opener
144 144 pass
145 145
146 146 def update(self, repo, revgen):
147 147 """Given a branchhead cache, self, that may have extra nodes or be
148 148 missing heads, and a generator of nodes that are at least a superset of
149 149 heads missing, this function updates self to be correct.
150 150 """
151 151 cl = repo.changelog
152 152 # collect new branch entries
153 153 newbranches = {}
154 154 getbranch = cl.branch
155 155 for r in revgen:
156 156 newbranches.setdefault(getbranch(r), []).append(cl.node(r))
157 157 # if older branchheads are reachable from new ones, they aren't
158 158 # really branchheads. Note checking parents is insufficient:
159 159 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
160 160 for branch, newnodes in newbranches.iteritems():
161 161 bheads = self.setdefault(branch, [])
162 162 # Remove candidate heads that no longer are in the repo (e.g., as
163 163 # the result of a strip that just happened). Avoid using 'node in
164 164 # self' here because that dives down into branchcache code somewhat
165 165 # recursively.
166 166 bheadrevs = [cl.rev(node) for node in bheads
167 167 if cl.hasnode(node)]
168 168 newheadrevs = [cl.rev(node) for node in newnodes
169 169 if cl.hasnode(node)]
170 170 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
171 171 # Remove duplicates - nodes that are in newheadrevs and are already
172 172 # in bheadrevs. This can happen if you strip a node whose parent
173 173 # was already a head (because they're on different branches).
174 174 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
175 175
176 176 # Starting from tip means fewer passes over reachable. If we know
177 177 # the new candidates are not ancestors of existing heads, we don't
178 178 # have to examine ancestors of existing heads
179 179 if ctxisnew:
180 180 iterrevs = sorted(newheadrevs)
181 181 else:
182 182 iterrevs = list(bheadrevs)
183 183
184 184 # This loop prunes out two kinds of heads - heads that are
185 185 # superseded by a head in newheadrevs, and newheadrevs that are not
186 186 # heads because an existing head is their descendant.
187 187 while iterrevs:
188 188 latest = iterrevs.pop()
189 189 if latest not in bheadrevs:
190 190 continue
191 191 ancestors = set(cl.ancestors([latest],
192 192 bheadrevs[0]))
193 193 if ancestors:
194 194 bheadrevs = [b for b in bheadrevs if b not in ancestors]
195 195 self[branch] = [cl.node(rev) for rev in bheadrevs]
196 196 tiprev = max(bheadrevs)
197 197 if tiprev > self.tiprev:
198 198 self.tipnode = cl.node(tiprev)
199 199 self.tiprev = tiprev
200 200
201 201 # There may be branches that cease to exist when the last commit in the
202 202 # branch was stripped. This code filters them out. Note that the
203 203 # branch that ceased to exist may not be in newbranches because
204 204 # newbranches is the set of candidate heads, which when you strip the
205 205 # last commit in a branch will be the parent branch.
206 206 droppednodes = []
207 207 for branch in self.keys():
208 208 nodes = [head for head in self[branch]
209 209 if cl.hasnode(head)]
210 210 if not nodes:
211 211 droppednodes.extend(nodes)
212 212 del self[branch]
213 213 if ((not self.validfor(repo)) or (self.tipnode in droppednodes)):
214 214
215 215 # cache key are not valid anymore
216 216 self.tipnode = nullid
217 217 self.tiprev = nullrev
218 218 for heads in self.values():
219 219 tiprev = max(cl.rev(node) for node in heads)
220 220 if tiprev > self.tiprev:
221 221 self.tipnode = cl.node(tiprev)
222 222 self.tiprev = tiprev
223 223 self.filteredhash = self._hashfiltered(repo)
@@ -1,1364 +1,1364 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import copies
12 12 import match as matchmod
13 13 import os, errno, stat
14 14 import obsolete as obsmod
15 15 import repoview
16 16
17 17 propertycache = util.propertycache
18 18
19 19 class changectx(object):
20 20 """A changecontext object makes access to data related to a particular
21 21 changeset convenient."""
22 22 def __init__(self, repo, changeid=''):
23 23 """changeid is a revision number, node, or tag"""
24 24 if changeid == '':
25 25 changeid = '.'
26 26 self._repo = repo
27 27
28 28 if isinstance(changeid, int):
29 29 try:
30 30 self._node = repo.changelog.node(changeid)
31 31 except IndexError:
32 32 raise error.RepoLookupError(
33 33 _("unknown revision '%s'") % changeid)
34 34 self._rev = changeid
35 35 return
36 36 if isinstance(changeid, long):
37 37 changeid = str(changeid)
38 38 if changeid == '.':
39 39 self._node = repo.dirstate.p1()
40 40 self._rev = repo.changelog.rev(self._node)
41 41 return
42 42 if changeid == 'null':
43 43 self._node = nullid
44 44 self._rev = nullrev
45 45 return
46 46 if changeid == 'tip':
47 47 self._node = repo.changelog.tip()
48 48 self._rev = repo.changelog.rev(self._node)
49 49 return
50 50 if len(changeid) == 20:
51 51 try:
52 52 self._node = changeid
53 53 self._rev = repo.changelog.rev(changeid)
54 54 return
55 55 except LookupError:
56 56 pass
57 57
58 58 try:
59 59 r = int(changeid)
60 60 if str(r) != changeid:
61 61 raise ValueError
62 62 l = len(repo.changelog)
63 63 if r < 0:
64 64 r += l
65 65 if r < 0 or r >= l:
66 66 raise ValueError
67 67 self._rev = r
68 68 self._node = repo.changelog.node(r)
69 69 return
70 70 except (ValueError, OverflowError, IndexError):
71 71 pass
72 72
73 73 if len(changeid) == 40:
74 74 try:
75 75 self._node = bin(changeid)
76 76 self._rev = repo.changelog.rev(self._node)
77 77 return
78 78 except (TypeError, LookupError):
79 79 pass
80 80
81 81 if changeid in repo._bookmarks:
82 82 self._node = repo._bookmarks[changeid]
83 83 self._rev = repo.changelog.rev(self._node)
84 84 return
85 85 if changeid in repo._tagscache.tags:
86 86 self._node = repo._tagscache.tags[changeid]
87 87 self._rev = repo.changelog.rev(self._node)
88 88 return
89 89 try:
90 90 self._node = repo.branchtip(changeid)
91 91 self._rev = repo.changelog.rev(self._node)
92 92 return
93 93 except error.RepoLookupError:
94 94 pass
95 95
96 96 self._node = repo.changelog._partialmatch(changeid)
97 97 if self._node is not None:
98 98 self._rev = repo.changelog.rev(self._node)
99 99 return
100 100
101 101 # lookup failed
102 102 # check if it might have come from damaged dirstate
103 103 #
104 104 # XXX we could avoid the unfiltered if we had a recognizable exception
105 105 # for filtered changeset access
106 106 if changeid in repo.unfiltered().dirstate.parents():
107 107 raise error.Abort(_("working directory has unknown parent '%s'!")
108 108 % short(changeid))
109 109 try:
110 110 if len(changeid) == 20:
111 111 changeid = hex(changeid)
112 112 except TypeError:
113 113 pass
114 114 raise error.RepoLookupError(
115 115 _("unknown revision '%s'") % changeid)
116 116
117 117 def __str__(self):
118 118 return short(self.node())
119 119
120 120 def __int__(self):
121 121 return self.rev()
122 122
123 123 def __repr__(self):
124 124 return "<changectx %s>" % str(self)
125 125
126 126 def __hash__(self):
127 127 try:
128 128 return hash(self._rev)
129 129 except AttributeError:
130 130 return id(self)
131 131
132 132 def __eq__(self, other):
133 133 try:
134 134 return self._rev == other._rev
135 135 except AttributeError:
136 136 return False
137 137
138 138 def __ne__(self, other):
139 139 return not (self == other)
140 140
141 141 def __nonzero__(self):
142 142 return self._rev != nullrev
143 143
144 144 @propertycache
145 145 def _changeset(self):
146 146 return self._repo.changelog.read(self.rev())
147 147
148 148 @propertycache
149 149 def _manifest(self):
150 150 return self._repo.manifest.read(self._changeset[0])
151 151
152 152 @propertycache
153 153 def _manifestdelta(self):
154 154 return self._repo.manifest.readdelta(self._changeset[0])
155 155
156 156 @propertycache
157 157 def _parents(self):
158 158 p = self._repo.changelog.parentrevs(self._rev)
159 159 if p[1] == nullrev:
160 160 p = p[:-1]
161 161 return [changectx(self._repo, x) for x in p]
162 162
163 163 @propertycache
164 164 def substate(self):
165 165 return subrepo.state(self, self._repo.ui)
166 166
167 167 def __contains__(self, key):
168 168 return key in self._manifest
169 169
170 170 def __getitem__(self, key):
171 171 return self.filectx(key)
172 172
173 173 def __iter__(self):
174 174 for f in sorted(self._manifest):
175 175 yield f
176 176
177 177 def changeset(self):
178 178 return self._changeset
179 179 def manifest(self):
180 180 return self._manifest
181 181 def manifestnode(self):
182 182 return self._changeset[0]
183 183
184 184 def rev(self):
185 185 return self._rev
186 186 def node(self):
187 187 return self._node
188 188 def hex(self):
189 189 return hex(self._node)
190 190 def user(self):
191 191 return self._changeset[1]
192 192 def date(self):
193 193 return self._changeset[2]
194 194 def files(self):
195 195 return self._changeset[3]
196 196 def description(self):
197 197 return self._changeset[4]
198 198 def branch(self):
199 199 return encoding.tolocal(self._changeset[5].get("branch"))
200 200 def closesbranch(self):
201 201 return 'close' in self._changeset[5]
202 202 def extra(self):
203 203 return self._changeset[5]
204 204 def tags(self):
205 205 return self._repo.nodetags(self._node)
206 206 def bookmarks(self):
207 207 return self._repo.nodebookmarks(self._node)
208 208 def phase(self):
209 209 return self._repo._phasecache.phase(self._repo, self._rev)
210 210 def phasestr(self):
211 211 return phases.phasenames[self.phase()]
212 212 def mutable(self):
213 213 return self.phase() > phases.public
214 214 def hidden(self):
215 215 return self._rev in repoview.filterrevs(self._repo, 'visible')
216 216
217 217 def parents(self):
218 218 """return contexts for each parent changeset"""
219 219 return self._parents
220 220
221 221 def p1(self):
222 222 return self._parents[0]
223 223
224 224 def p2(self):
225 225 if len(self._parents) == 2:
226 226 return self._parents[1]
227 227 return changectx(self._repo, -1)
228 228
229 229 def children(self):
230 230 """return contexts for each child changeset"""
231 231 c = self._repo.changelog.children(self._node)
232 232 return [changectx(self._repo, x) for x in c]
233 233
234 234 def ancestors(self):
235 235 for a in self._repo.changelog.ancestors([self._rev]):
236 236 yield changectx(self._repo, a)
237 237
238 238 def descendants(self):
239 239 for d in self._repo.changelog.descendants([self._rev]):
240 240 yield changectx(self._repo, d)
241 241
242 242 def obsolete(self):
243 243 """True if the changeset is obsolete"""
244 244 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
245 245
246 246 def extinct(self):
247 247 """True if the changeset is extinct"""
248 248 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
249 249
250 250 def unstable(self):
251 251 """True if the changeset is not obsolete but it's ancestor are"""
252 252 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
253 253
254 254 def bumped(self):
255 255 """True if the changeset try to be a successor of a public changeset
256 256
257 257 Only non-public and non-obsolete changesets may be bumped.
258 258 """
259 259 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
260 260
261 261 def divergent(self):
262 262 """Is a successors of a changeset with multiple possible successors set
263 263
264 264 Only non-public and non-obsolete changesets may be divergent.
265 265 """
266 266 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
267 267
268 268 def troubled(self):
269 269 """True if the changeset is either unstable, bumped or divergent"""
270 270 return self.unstable() or self.bumped() or self.divergent()
271 271
272 272 def troubles(self):
273 273 """return the list of troubles affecting this changesets.
274 274
275 275 Troubles are returned as strings. possible values are:
276 276 - unstable,
277 277 - bumped,
278 278 - divergent.
279 279 """
280 280 troubles = []
281 281 if self.unstable():
282 282 troubles.append('unstable')
283 283 if self.bumped():
284 284 troubles.append('bumped')
285 285 if self.divergent():
286 286 troubles.append('divergent')
287 287 return troubles
288 288
289 289 def _fileinfo(self, path):
290 290 if '_manifest' in self.__dict__:
291 291 try:
292 292 return self._manifest[path], self._manifest.flags(path)
293 293 except KeyError:
294 294 raise error.LookupError(self._node, path,
295 295 _('not found in manifest'))
296 296 if '_manifestdelta' in self.__dict__ or path in self.files():
297 297 if path in self._manifestdelta:
298 298 return (self._manifestdelta[path],
299 299 self._manifestdelta.flags(path))
300 300 node, flag = self._repo.manifest.find(self._changeset[0], path)
301 301 if not node:
302 302 raise error.LookupError(self._node, path,
303 303 _('not found in manifest'))
304 304
305 305 return node, flag
306 306
307 307 def filenode(self, path):
308 308 return self._fileinfo(path)[0]
309 309
310 310 def flags(self, path):
311 311 try:
312 312 return self._fileinfo(path)[1]
313 313 except error.LookupError:
314 314 return ''
315 315
316 316 def filectx(self, path, fileid=None, filelog=None):
317 317 """get a file context from this changeset"""
318 318 if fileid is None:
319 319 fileid = self.filenode(path)
320 320 return filectx(self._repo, path, fileid=fileid,
321 321 changectx=self, filelog=filelog)
322 322
323 323 def ancestor(self, c2):
324 324 """
325 325 return the ancestor context of self and c2
326 326 """
327 327 # deal with workingctxs
328 328 n2 = c2._node
329 329 if n2 is None:
330 330 n2 = c2._parents[0]._node
331 331 n = self._repo.changelog.ancestor(self._node, n2)
332 332 return changectx(self._repo, n)
333 333
334 334 def descendant(self, other):
335 335 """True if other is descendant of this changeset"""
336 336 return self._repo.changelog.descendant(self._rev, other._rev)
337 337
338 338 def walk(self, match):
339 339 fset = set(match.files())
340 340 # for dirstate.walk, files=['.'] means "walk the whole tree".
341 341 # follow that here, too
342 342 fset.discard('.')
343 343 for fn in self:
344 344 if fn in fset:
345 345 # specified pattern is the exact name
346 346 fset.remove(fn)
347 347 if match(fn):
348 348 yield fn
349 349 for fn in sorted(fset):
350 350 if fn in self._dirs:
351 351 # specified pattern is a directory
352 352 continue
353 353 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
354 354 yield fn
355 355
356 356 def sub(self, path):
357 357 return subrepo.subrepo(self, path)
358 358
359 359 def match(self, pats=[], include=None, exclude=None, default='glob'):
360 360 r = self._repo
361 361 return matchmod.match(r.root, r.getcwd(), pats,
362 362 include, exclude, default,
363 363 auditor=r.auditor, ctx=self)
364 364
365 365 def diff(self, ctx2=None, match=None, **opts):
366 366 """Returns a diff generator for the given contexts and matcher"""
367 367 if ctx2 is None:
368 368 ctx2 = self.p1()
369 369 if ctx2 is not None and not isinstance(ctx2, changectx):
370 370 ctx2 = self._repo[ctx2]
371 371 diffopts = patch.diffopts(self._repo.ui, opts)
372 372 return patch.diff(self._repo, ctx2.node(), self.node(),
373 373 match=match, opts=diffopts)
374 374
375 375 @propertycache
376 376 def _dirs(self):
377 377 dirs = set()
378 378 for f in self._manifest:
379 379 pos = f.rfind('/')
380 380 while pos != -1:
381 381 f = f[:pos]
382 382 if f in dirs:
383 383 break # dirs already contains this and above
384 384 dirs.add(f)
385 385 pos = f.rfind('/')
386 386 return dirs
387 387
388 388 def dirs(self):
389 389 return self._dirs
390 390
391 391 def dirty(self):
392 392 return False
393 393
394 394 class filectx(object):
395 395 """A filecontext object makes access to data related to a particular
396 396 filerevision convenient."""
397 397 def __init__(self, repo, path, changeid=None, fileid=None,
398 398 filelog=None, changectx=None):
399 399 """changeid can be a changeset revision, node, or tag.
400 400 fileid can be a file revision or node."""
401 401 self._repo = repo
402 402 self._path = path
403 403
404 404 assert (changeid is not None
405 405 or fileid is not None
406 406 or changectx is not None), \
407 407 ("bad args: changeid=%r, fileid=%r, changectx=%r"
408 408 % (changeid, fileid, changectx))
409 409
410 410 if filelog:
411 411 self._filelog = filelog
412 412
413 413 if changeid is not None:
414 414 self._changeid = changeid
415 415 if changectx is not None:
416 416 self._changectx = changectx
417 417 if fileid is not None:
418 418 self._fileid = fileid
419 419
420 420 @propertycache
421 421 def _changectx(self):
422 422 try:
423 423 return changectx(self._repo, self._changeid)
424 424 except error.RepoLookupError:
425 425 # Linkrev may point to any revision in the repository. When the
426 426 # repository is filtered this may lead to `filectx` trying to build
427 427 # `changectx` for filtered revision. In such case we fallback to
428 428 # creating `changectx` on the unfiltered version of the reposition.
429 429 # This fallback should not be an issue because`changectx` from
430 # `filectx` are not used in complexe operation that care about
430 # `filectx` are not used in complex operations that care about
431 431 # filtering.
432 432 #
433 433 # This fallback is a cheap and dirty fix that prevent several
434 # crash. It does not ensure the behavior is correct. However the
434 # crashes. It does not ensure the behavior is correct. However the
435 435 # behavior was not correct before filtering either and "incorrect
436 436 # behavior" is seen as better as "crash"
437 437 #
438 438 # Linkrevs have several serious troubles with filtering that are
439 439 # complicated to solve. Proper handling of the issue here should be
440 440 # considered when solving linkrev issue are on the table.
441 441 return changectx(self._repo.unfiltered(), self._changeid)
442 442
443 443 @propertycache
444 444 def _filelog(self):
445 445 return self._repo.file(self._path)
446 446
447 447 @propertycache
448 448 def _changeid(self):
449 449 if '_changectx' in self.__dict__:
450 450 return self._changectx.rev()
451 451 else:
452 452 return self._filelog.linkrev(self._filerev)
453 453
454 454 @propertycache
455 455 def _filenode(self):
456 456 if '_fileid' in self.__dict__:
457 457 return self._filelog.lookup(self._fileid)
458 458 else:
459 459 return self._changectx.filenode(self._path)
460 460
461 461 @propertycache
462 462 def _filerev(self):
463 463 return self._filelog.rev(self._filenode)
464 464
465 465 @propertycache
466 466 def _repopath(self):
467 467 return self._path
468 468
469 469 def __nonzero__(self):
470 470 try:
471 471 self._filenode
472 472 return True
473 473 except error.LookupError:
474 474 # file is missing
475 475 return False
476 476
477 477 def __str__(self):
478 478 return "%s@%s" % (self.path(), short(self.node()))
479 479
480 480 def __repr__(self):
481 481 return "<filectx %s>" % str(self)
482 482
483 483 def __hash__(self):
484 484 try:
485 485 return hash((self._path, self._filenode))
486 486 except AttributeError:
487 487 return id(self)
488 488
489 489 def __eq__(self, other):
490 490 try:
491 491 return (self._path == other._path
492 492 and self._filenode == other._filenode)
493 493 except AttributeError:
494 494 return False
495 495
496 496 def __ne__(self, other):
497 497 return not (self == other)
498 498
499 499 def filectx(self, fileid):
500 500 '''opens an arbitrary revision of the file without
501 501 opening a new filelog'''
502 502 return filectx(self._repo, self._path, fileid=fileid,
503 503 filelog=self._filelog)
504 504
505 505 def filerev(self):
506 506 return self._filerev
507 507 def filenode(self):
508 508 return self._filenode
509 509 def flags(self):
510 510 return self._changectx.flags(self._path)
511 511 def filelog(self):
512 512 return self._filelog
513 513
514 514 def rev(self):
515 515 if '_changectx' in self.__dict__:
516 516 return self._changectx.rev()
517 517 if '_changeid' in self.__dict__:
518 518 return self._changectx.rev()
519 519 return self._filelog.linkrev(self._filerev)
520 520
521 521 def linkrev(self):
522 522 return self._filelog.linkrev(self._filerev)
523 523 def node(self):
524 524 return self._changectx.node()
525 525 def hex(self):
526 526 return hex(self.node())
527 527 def user(self):
528 528 return self._changectx.user()
529 529 def date(self):
530 530 return self._changectx.date()
531 531 def files(self):
532 532 return self._changectx.files()
533 533 def description(self):
534 534 return self._changectx.description()
535 535 def branch(self):
536 536 return self._changectx.branch()
537 537 def extra(self):
538 538 return self._changectx.extra()
539 539 def phase(self):
540 540 return self._changectx.phase()
541 541 def phasestr(self):
542 542 return self._changectx.phasestr()
543 543 def manifest(self):
544 544 return self._changectx.manifest()
545 545 def changectx(self):
546 546 return self._changectx
547 547
548 548 def data(self):
549 549 return self._filelog.read(self._filenode)
550 550 def path(self):
551 551 return self._path
552 552 def size(self):
553 553 return self._filelog.size(self._filerev)
554 554
555 555 def isbinary(self):
556 556 try:
557 557 return util.binary(self.data())
558 558 except IOError:
559 559 return False
560 560
561 561 def cmp(self, fctx):
562 562 """compare with other file context
563 563
564 564 returns True if different than fctx.
565 565 """
566 566 if (fctx._filerev is None
567 567 and (self._repo._encodefilterpats
568 568 # if file data starts with '\1\n', empty metadata block is
569 569 # prepended, which adds 4 bytes to filelog.size().
570 570 or self.size() - 4 == fctx.size())
571 571 or self.size() == fctx.size()):
572 572 return self._filelog.cmp(self._filenode, fctx.data())
573 573
574 574 return True
575 575
576 576 def renamed(self):
577 577 """check if file was actually renamed in this changeset revision
578 578
579 579 If rename logged in file revision, we report copy for changeset only
580 580 if file revisions linkrev points back to the changeset in question
581 581 or both changeset parents contain different file revisions.
582 582 """
583 583
584 584 renamed = self._filelog.renamed(self._filenode)
585 585 if not renamed:
586 586 return renamed
587 587
588 588 if self.rev() == self.linkrev():
589 589 return renamed
590 590
591 591 name = self.path()
592 592 fnode = self._filenode
593 593 for p in self._changectx.parents():
594 594 try:
595 595 if fnode == p.filenode(name):
596 596 return None
597 597 except error.LookupError:
598 598 pass
599 599 return renamed
600 600
601 601 def parents(self):
602 602 p = self._path
603 603 fl = self._filelog
604 604 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
605 605
606 606 r = self._filelog.renamed(self._filenode)
607 607 if r:
608 608 pl[0] = (r[0], r[1], None)
609 609
610 610 return [filectx(self._repo, p, fileid=n, filelog=l)
611 611 for p, n, l in pl if n != nullid]
612 612
613 613 def p1(self):
614 614 return self.parents()[0]
615 615
616 616 def p2(self):
617 617 p = self.parents()
618 618 if len(p) == 2:
619 619 return p[1]
620 620 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
621 621
622 622 def children(self):
623 623 # hard for renames
624 624 c = self._filelog.children(self._filenode)
625 625 return [filectx(self._repo, self._path, fileid=x,
626 626 filelog=self._filelog) for x in c]
627 627
628 628 def annotate(self, follow=False, linenumber=None, diffopts=None):
629 629 '''returns a list of tuples of (ctx, line) for each line
630 630 in the file, where ctx is the filectx of the node where
631 631 that line was last changed.
632 632 This returns tuples of ((ctx, linenumber), line) for each line,
633 633 if "linenumber" parameter is NOT "None".
634 634 In such tuples, linenumber means one at the first appearance
635 635 in the managed file.
636 636 To reduce annotation cost,
637 637 this returns fixed value(False is used) as linenumber,
638 638 if "linenumber" parameter is "False".'''
639 639
640 640 def decorate_compat(text, rev):
641 641 return ([rev] * len(text.splitlines()), text)
642 642
643 643 def without_linenumber(text, rev):
644 644 return ([(rev, False)] * len(text.splitlines()), text)
645 645
646 646 def with_linenumber(text, rev):
647 647 size = len(text.splitlines())
648 648 return ([(rev, i) for i in xrange(1, size + 1)], text)
649 649
650 650 decorate = (((linenumber is None) and decorate_compat) or
651 651 (linenumber and with_linenumber) or
652 652 without_linenumber)
653 653
654 654 def pair(parent, child):
655 655 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
656 656 refine=True)
657 657 for (a1, a2, b1, b2), t in blocks:
658 658 # Changed blocks ('!') or blocks made only of blank lines ('~')
659 659 # belong to the child.
660 660 if t == '=':
661 661 child[0][b1:b2] = parent[0][a1:a2]
662 662 return child
663 663
664 664 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
665 665 def getctx(path, fileid):
666 666 log = path == self._path and self._filelog or getlog(path)
667 667 return filectx(self._repo, path, fileid=fileid, filelog=log)
668 668 getctx = util.lrucachefunc(getctx)
669 669
670 670 def parents(f):
671 671 # we want to reuse filectx objects as much as possible
672 672 p = f._path
673 673 if f._filerev is None: # working dir
674 674 pl = [(n.path(), n.filerev()) for n in f.parents()]
675 675 else:
676 676 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
677 677
678 678 if follow:
679 679 r = f.renamed()
680 680 if r:
681 681 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
682 682
683 683 return [getctx(p, n) for p, n in pl if n != nullrev]
684 684
685 685 # use linkrev to find the first changeset where self appeared
686 686 if self.rev() != self.linkrev():
687 687 base = self.filectx(self.filerev())
688 688 else:
689 689 base = self
690 690
691 691 # This algorithm would prefer to be recursive, but Python is a
692 692 # bit recursion-hostile. Instead we do an iterative
693 693 # depth-first search.
694 694
695 695 visit = [base]
696 696 hist = {}
697 697 pcache = {}
698 698 needed = {base: 1}
699 699 while visit:
700 700 f = visit[-1]
701 701 if f not in pcache:
702 702 pcache[f] = parents(f)
703 703
704 704 ready = True
705 705 pl = pcache[f]
706 706 for p in pl:
707 707 if p not in hist:
708 708 ready = False
709 709 visit.append(p)
710 710 needed[p] = needed.get(p, 0) + 1
711 711 if ready:
712 712 visit.pop()
713 713 curr = decorate(f.data(), f)
714 714 for p in pl:
715 715 curr = pair(hist[p], curr)
716 716 if needed[p] == 1:
717 717 del hist[p]
718 718 else:
719 719 needed[p] -= 1
720 720
721 721 hist[f] = curr
722 722 pcache[f] = []
723 723
724 724 return zip(hist[base][0], hist[base][1].splitlines(True))
725 725
726 726 def ancestor(self, fc2, actx):
727 727 """
728 728 find the common ancestor file context, if any, of self, and fc2
729 729
730 730 actx must be the changectx of the common ancestor
731 731 of self's and fc2's respective changesets.
732 732 """
733 733
734 734 # the easy case: no (relevant) renames
735 735 if fc2.path() == self.path() and self.path() in actx:
736 736 return actx[self.path()]
737 737
738 738 # the next easiest cases: unambiguous predecessor (name trumps
739 739 # history)
740 740 if self.path() in actx and fc2.path() not in actx:
741 741 return actx[self.path()]
742 742 if fc2.path() in actx and self.path() not in actx:
743 743 return actx[fc2.path()]
744 744
745 745 # prime the ancestor cache for the working directory
746 746 acache = {}
747 747 for c in (self, fc2):
748 748 if c._filerev is None:
749 749 pl = [(n.path(), n.filenode()) for n in c.parents()]
750 750 acache[(c._path, None)] = pl
751 751
752 752 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
753 753 def parents(vertex):
754 754 if vertex in acache:
755 755 return acache[vertex]
756 756 f, n = vertex
757 757 if f not in flcache:
758 758 flcache[f] = self._repo.file(f)
759 759 fl = flcache[f]
760 760 pl = [(f, p) for p in fl.parents(n) if p != nullid]
761 761 re = fl.renamed(n)
762 762 if re:
763 763 pl.append(re)
764 764 acache[vertex] = pl
765 765 return pl
766 766
767 767 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
768 768 v = ancestor.ancestor(a, b, parents)
769 769 if v:
770 770 f, n = v
771 771 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
772 772
773 773 return None
774 774
775 775 def ancestors(self, followfirst=False):
776 776 visit = {}
777 777 c = self
778 778 cut = followfirst and 1 or None
779 779 while True:
780 780 for parent in c.parents()[:cut]:
781 781 visit[(parent.rev(), parent.node())] = parent
782 782 if not visit:
783 783 break
784 784 c = visit.pop(max(visit))
785 785 yield c
786 786
787 787 def copies(self, c2):
788 788 if not util.safehasattr(self, "_copycache"):
789 789 self._copycache = {}
790 790 sc2 = str(c2)
791 791 if sc2 not in self._copycache:
792 792 self._copycache[sc2] = copies.pathcopies(c2)
793 793 return self._copycache[sc2]
794 794
795 795 class workingctx(changectx):
796 796 """A workingctx object makes access to data related to
797 797 the current working directory convenient.
798 798 date - any valid date string or (unixtime, offset), or None.
799 799 user - username string, or None.
800 800 extra - a dictionary of extra values, or None.
801 801 changes - a list of file lists as returned by localrepo.status()
802 802 or None to use the repository status.
803 803 """
804 804 def __init__(self, repo, text="", user=None, date=None, extra=None,
805 805 changes=None):
806 806 self._repo = repo
807 807 self._rev = None
808 808 self._node = None
809 809 self._text = text
810 810 if date:
811 811 self._date = util.parsedate(date)
812 812 if user:
813 813 self._user = user
814 814 if changes:
815 815 self._status = list(changes[:4])
816 816 self._unknown = changes[4]
817 817 self._ignored = changes[5]
818 818 self._clean = changes[6]
819 819 else:
820 820 self._unknown = None
821 821 self._ignored = None
822 822 self._clean = None
823 823
824 824 self._extra = {}
825 825 if extra:
826 826 self._extra = extra.copy()
827 827 if 'branch' not in self._extra:
828 828 try:
829 829 branch = encoding.fromlocal(self._repo.dirstate.branch())
830 830 except UnicodeDecodeError:
831 831 raise util.Abort(_('branch name not in UTF-8!'))
832 832 self._extra['branch'] = branch
833 833 if self._extra['branch'] == '':
834 834 self._extra['branch'] = 'default'
835 835
836 836 def __str__(self):
837 837 return str(self._parents[0]) + "+"
838 838
839 839 def __repr__(self):
840 840 return "<workingctx %s>" % str(self)
841 841
842 842 def __nonzero__(self):
843 843 return True
844 844
845 845 def __contains__(self, key):
846 846 return self._repo.dirstate[key] not in "?r"
847 847
848 848 def _buildflagfunc(self):
849 849 # Create a fallback function for getting file flags when the
850 850 # filesystem doesn't support them
851 851
852 852 copiesget = self._repo.dirstate.copies().get
853 853
854 854 if len(self._parents) < 2:
855 855 # when we have one parent, it's easy: copy from parent
856 856 man = self._parents[0].manifest()
857 857 def func(f):
858 858 f = copiesget(f, f)
859 859 return man.flags(f)
860 860 else:
861 861 # merges are tricky: we try to reconstruct the unstored
862 862 # result from the merge (issue1802)
863 863 p1, p2 = self._parents
864 864 pa = p1.ancestor(p2)
865 865 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
866 866
867 867 def func(f):
868 868 f = copiesget(f, f) # may be wrong for merges with copies
869 869 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
870 870 if fl1 == fl2:
871 871 return fl1
872 872 if fl1 == fla:
873 873 return fl2
874 874 if fl2 == fla:
875 875 return fl1
876 876 return '' # punt for conflicts
877 877
878 878 return func
879 879
880 880 @propertycache
881 881 def _flagfunc(self):
882 882 return self._repo.dirstate.flagfunc(self._buildflagfunc)
883 883
884 884 @propertycache
885 885 def _manifest(self):
886 886 """generate a manifest corresponding to the working directory"""
887 887
888 888 man = self._parents[0].manifest().copy()
889 889 if len(self._parents) > 1:
890 890 man2 = self.p2().manifest()
891 891 def getman(f):
892 892 if f in man:
893 893 return man
894 894 return man2
895 895 else:
896 896 getman = lambda f: man
897 897
898 898 copied = self._repo.dirstate.copies()
899 899 ff = self._flagfunc
900 900 modified, added, removed, deleted = self._status
901 901 for i, l in (("a", added), ("m", modified)):
902 902 for f in l:
903 903 orig = copied.get(f, f)
904 904 man[f] = getman(orig).get(orig, nullid) + i
905 905 try:
906 906 man.set(f, ff(f))
907 907 except OSError:
908 908 pass
909 909
910 910 for f in deleted + removed:
911 911 if f in man:
912 912 del man[f]
913 913
914 914 return man
915 915
916 916 def __iter__(self):
917 917 d = self._repo.dirstate
918 918 for f in d:
919 919 if d[f] != 'r':
920 920 yield f
921 921
922 922 @propertycache
923 923 def _status(self):
924 924 return self._repo.status()[:4]
925 925
926 926 @propertycache
927 927 def _user(self):
928 928 return self._repo.ui.username()
929 929
930 930 @propertycache
931 931 def _date(self):
932 932 return util.makedate()
933 933
934 934 @propertycache
935 935 def _parents(self):
936 936 p = self._repo.dirstate.parents()
937 937 if p[1] == nullid:
938 938 p = p[:-1]
939 939 return [changectx(self._repo, x) for x in p]
940 940
941 941 def status(self, ignored=False, clean=False, unknown=False):
942 942 """Explicit status query
943 943 Unless this method is used to query the working copy status, the
944 944 _status property will implicitly read the status using its default
945 945 arguments."""
946 946 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
947 947 self._unknown = self._ignored = self._clean = None
948 948 if unknown:
949 949 self._unknown = stat[4]
950 950 if ignored:
951 951 self._ignored = stat[5]
952 952 if clean:
953 953 self._clean = stat[6]
954 954 self._status = stat[:4]
955 955 return stat
956 956
957 957 def manifest(self):
958 958 return self._manifest
959 959 def user(self):
960 960 return self._user or self._repo.ui.username()
961 961 def date(self):
962 962 return self._date
963 963 def description(self):
964 964 return self._text
965 965 def files(self):
966 966 return sorted(self._status[0] + self._status[1] + self._status[2])
967 967
968 968 def modified(self):
969 969 return self._status[0]
970 970 def added(self):
971 971 return self._status[1]
972 972 def removed(self):
973 973 return self._status[2]
974 974 def deleted(self):
975 975 return self._status[3]
976 976 def unknown(self):
977 977 assert self._unknown is not None # must call status first
978 978 return self._unknown
979 979 def ignored(self):
980 980 assert self._ignored is not None # must call status first
981 981 return self._ignored
982 982 def clean(self):
983 983 assert self._clean is not None # must call status first
984 984 return self._clean
985 985 def branch(self):
986 986 return encoding.tolocal(self._extra['branch'])
987 987 def closesbranch(self):
988 988 return 'close' in self._extra
989 989 def extra(self):
990 990 return self._extra
991 991
992 992 def tags(self):
993 993 t = []
994 994 for p in self.parents():
995 995 t.extend(p.tags())
996 996 return t
997 997
998 998 def bookmarks(self):
999 999 b = []
1000 1000 for p in self.parents():
1001 1001 b.extend(p.bookmarks())
1002 1002 return b
1003 1003
1004 1004 def phase(self):
1005 1005 phase = phases.draft # default phase to draft
1006 1006 for p in self.parents():
1007 1007 phase = max(phase, p.phase())
1008 1008 return phase
1009 1009
1010 1010 def hidden(self):
1011 1011 return False
1012 1012
1013 1013 def children(self):
1014 1014 return []
1015 1015
1016 1016 def flags(self, path):
1017 1017 if '_manifest' in self.__dict__:
1018 1018 try:
1019 1019 return self._manifest.flags(path)
1020 1020 except KeyError:
1021 1021 return ''
1022 1022
1023 1023 try:
1024 1024 return self._flagfunc(path)
1025 1025 except OSError:
1026 1026 return ''
1027 1027
1028 1028 def filectx(self, path, filelog=None):
1029 1029 """get a file context from the working directory"""
1030 1030 return workingfilectx(self._repo, path, workingctx=self,
1031 1031 filelog=filelog)
1032 1032
1033 1033 def ancestor(self, c2):
1034 1034 """return the ancestor context of self and c2"""
1035 1035 return self._parents[0].ancestor(c2) # punt on two parents for now
1036 1036
1037 1037 def walk(self, match):
1038 1038 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1039 1039 True, False))
1040 1040
1041 1041 def dirty(self, missing=False, merge=True, branch=True):
1042 1042 "check whether a working directory is modified"
1043 1043 # check subrepos first
1044 1044 for s in sorted(self.substate):
1045 1045 if self.sub(s).dirty():
1046 1046 return True
1047 1047 # check current working dir
1048 1048 return ((merge and self.p2()) or
1049 1049 (branch and self.branch() != self.p1().branch()) or
1050 1050 self.modified() or self.added() or self.removed() or
1051 1051 (missing and self.deleted()))
1052 1052
1053 1053 def add(self, list, prefix=""):
1054 1054 join = lambda f: os.path.join(prefix, f)
1055 1055 wlock = self._repo.wlock()
1056 1056 ui, ds = self._repo.ui, self._repo.dirstate
1057 1057 try:
1058 1058 rejected = []
1059 1059 for f in list:
1060 1060 scmutil.checkportable(ui, join(f))
1061 1061 p = self._repo.wjoin(f)
1062 1062 try:
1063 1063 st = os.lstat(p)
1064 1064 except OSError:
1065 1065 ui.warn(_("%s does not exist!\n") % join(f))
1066 1066 rejected.append(f)
1067 1067 continue
1068 1068 if st.st_size > 10000000:
1069 1069 ui.warn(_("%s: up to %d MB of RAM may be required "
1070 1070 "to manage this file\n"
1071 1071 "(use 'hg revert %s' to cancel the "
1072 1072 "pending addition)\n")
1073 1073 % (f, 3 * st.st_size // 1000000, join(f)))
1074 1074 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1075 1075 ui.warn(_("%s not added: only files and symlinks "
1076 1076 "supported currently\n") % join(f))
1077 1077 rejected.append(p)
1078 1078 elif ds[f] in 'amn':
1079 1079 ui.warn(_("%s already tracked!\n") % join(f))
1080 1080 elif ds[f] == 'r':
1081 1081 ds.normallookup(f)
1082 1082 else:
1083 1083 ds.add(f)
1084 1084 return rejected
1085 1085 finally:
1086 1086 wlock.release()
1087 1087
1088 1088 def forget(self, files, prefix=""):
1089 1089 join = lambda f: os.path.join(prefix, f)
1090 1090 wlock = self._repo.wlock()
1091 1091 try:
1092 1092 rejected = []
1093 1093 for f in files:
1094 1094 if f not in self._repo.dirstate:
1095 1095 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1096 1096 rejected.append(f)
1097 1097 elif self._repo.dirstate[f] != 'a':
1098 1098 self._repo.dirstate.remove(f)
1099 1099 else:
1100 1100 self._repo.dirstate.drop(f)
1101 1101 return rejected
1102 1102 finally:
1103 1103 wlock.release()
1104 1104
1105 1105 def ancestors(self):
1106 1106 for a in self._repo.changelog.ancestors(
1107 1107 [p.rev() for p in self._parents]):
1108 1108 yield changectx(self._repo, a)
1109 1109
1110 1110 def undelete(self, list):
1111 1111 pctxs = self.parents()
1112 1112 wlock = self._repo.wlock()
1113 1113 try:
1114 1114 for f in list:
1115 1115 if self._repo.dirstate[f] != 'r':
1116 1116 self._repo.ui.warn(_("%s not removed!\n") % f)
1117 1117 else:
1118 1118 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1119 1119 t = fctx.data()
1120 1120 self._repo.wwrite(f, t, fctx.flags())
1121 1121 self._repo.dirstate.normal(f)
1122 1122 finally:
1123 1123 wlock.release()
1124 1124
1125 1125 def copy(self, source, dest):
1126 1126 p = self._repo.wjoin(dest)
1127 1127 if not os.path.lexists(p):
1128 1128 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1129 1129 elif not (os.path.isfile(p) or os.path.islink(p)):
1130 1130 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1131 1131 "symbolic link\n") % dest)
1132 1132 else:
1133 1133 wlock = self._repo.wlock()
1134 1134 try:
1135 1135 if self._repo.dirstate[dest] in '?r':
1136 1136 self._repo.dirstate.add(dest)
1137 1137 self._repo.dirstate.copy(source, dest)
1138 1138 finally:
1139 1139 wlock.release()
1140 1140
1141 1141 def dirs(self):
1142 1142 return set(self._repo.dirstate.dirs())
1143 1143
1144 1144 class workingfilectx(filectx):
1145 1145 """A workingfilectx object makes access to data related to a particular
1146 1146 file in the working directory convenient."""
1147 1147 def __init__(self, repo, path, filelog=None, workingctx=None):
1148 1148 """changeid can be a changeset revision, node, or tag.
1149 1149 fileid can be a file revision or node."""
1150 1150 self._repo = repo
1151 1151 self._path = path
1152 1152 self._changeid = None
1153 1153 self._filerev = self._filenode = None
1154 1154
1155 1155 if filelog:
1156 1156 self._filelog = filelog
1157 1157 if workingctx:
1158 1158 self._changectx = workingctx
1159 1159
1160 1160 @propertycache
1161 1161 def _changectx(self):
1162 1162 return workingctx(self._repo)
1163 1163
1164 1164 def __nonzero__(self):
1165 1165 return True
1166 1166
1167 1167 def __str__(self):
1168 1168 return "%s@%s" % (self.path(), self._changectx)
1169 1169
1170 1170 def __repr__(self):
1171 1171 return "<workingfilectx %s>" % str(self)
1172 1172
1173 1173 def data(self):
1174 1174 return self._repo.wread(self._path)
1175 1175 def renamed(self):
1176 1176 rp = self._repo.dirstate.copied(self._path)
1177 1177 if not rp:
1178 1178 return None
1179 1179 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1180 1180
1181 1181 def parents(self):
1182 1182 '''return parent filectxs, following copies if necessary'''
1183 1183 def filenode(ctx, path):
1184 1184 return ctx._manifest.get(path, nullid)
1185 1185
1186 1186 path = self._path
1187 1187 fl = self._filelog
1188 1188 pcl = self._changectx._parents
1189 1189 renamed = self.renamed()
1190 1190
1191 1191 if renamed:
1192 1192 pl = [renamed + (None,)]
1193 1193 else:
1194 1194 pl = [(path, filenode(pcl[0], path), fl)]
1195 1195
1196 1196 for pc in pcl[1:]:
1197 1197 pl.append((path, filenode(pc, path), fl))
1198 1198
1199 1199 return [filectx(self._repo, p, fileid=n, filelog=l)
1200 1200 for p, n, l in pl if n != nullid]
1201 1201
1202 1202 def children(self):
1203 1203 return []
1204 1204
1205 1205 def size(self):
1206 1206 return os.lstat(self._repo.wjoin(self._path)).st_size
1207 1207 def date(self):
1208 1208 t, tz = self._changectx.date()
1209 1209 try:
1210 1210 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
1211 1211 except OSError, err:
1212 1212 if err.errno != errno.ENOENT:
1213 1213 raise
1214 1214 return (t, tz)
1215 1215
1216 1216 def cmp(self, fctx):
1217 1217 """compare with other file context
1218 1218
1219 1219 returns True if different than fctx.
1220 1220 """
1221 1221 # fctx should be a filectx (not a workingfilectx)
1222 1222 # invert comparison to reuse the same code path
1223 1223 return fctx.cmp(self)
1224 1224
1225 1225 class memctx(object):
1226 1226 """Use memctx to perform in-memory commits via localrepo.commitctx().
1227 1227
1228 1228 Revision information is supplied at initialization time while
1229 1229 related files data and is made available through a callback
1230 1230 mechanism. 'repo' is the current localrepo, 'parents' is a
1231 1231 sequence of two parent revisions identifiers (pass None for every
1232 1232 missing parent), 'text' is the commit message and 'files' lists
1233 1233 names of files touched by the revision (normalized and relative to
1234 1234 repository root).
1235 1235
1236 1236 filectxfn(repo, memctx, path) is a callable receiving the
1237 1237 repository, the current memctx object and the normalized path of
1238 1238 requested file, relative to repository root. It is fired by the
1239 1239 commit function for every file in 'files', but calls order is
1240 1240 undefined. If the file is available in the revision being
1241 1241 committed (updated or added), filectxfn returns a memfilectx
1242 1242 object. If the file was removed, filectxfn raises an
1243 1243 IOError. Moved files are represented by marking the source file
1244 1244 removed and the new file added with copy information (see
1245 1245 memfilectx).
1246 1246
1247 1247 user receives the committer name and defaults to current
1248 1248 repository username, date is the commit date in any format
1249 1249 supported by util.parsedate() and defaults to current date, extra
1250 1250 is a dictionary of metadata or is left empty.
1251 1251 """
1252 1252 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1253 1253 date=None, extra=None):
1254 1254 self._repo = repo
1255 1255 self._rev = None
1256 1256 self._node = None
1257 1257 self._text = text
1258 1258 self._date = date and util.parsedate(date) or util.makedate()
1259 1259 self._user = user
1260 1260 parents = [(p or nullid) for p in parents]
1261 1261 p1, p2 = parents
1262 1262 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1263 1263 files = sorted(set(files))
1264 1264 self._status = [files, [], [], [], []]
1265 1265 self._filectxfn = filectxfn
1266 1266
1267 1267 self._extra = extra and extra.copy() or {}
1268 1268 if self._extra.get('branch', '') == '':
1269 1269 self._extra['branch'] = 'default'
1270 1270
1271 1271 def __str__(self):
1272 1272 return str(self._parents[0]) + "+"
1273 1273
1274 1274 def __int__(self):
1275 1275 return self._rev
1276 1276
1277 1277 def __nonzero__(self):
1278 1278 return True
1279 1279
1280 1280 def __getitem__(self, key):
1281 1281 return self.filectx(key)
1282 1282
1283 1283 def p1(self):
1284 1284 return self._parents[0]
1285 1285 def p2(self):
1286 1286 return self._parents[1]
1287 1287
1288 1288 def user(self):
1289 1289 return self._user or self._repo.ui.username()
1290 1290 def date(self):
1291 1291 return self._date
1292 1292 def description(self):
1293 1293 return self._text
1294 1294 def files(self):
1295 1295 return self.modified()
1296 1296 def modified(self):
1297 1297 return self._status[0]
1298 1298 def added(self):
1299 1299 return self._status[1]
1300 1300 def removed(self):
1301 1301 return self._status[2]
1302 1302 def deleted(self):
1303 1303 return self._status[3]
1304 1304 def unknown(self):
1305 1305 return self._status[4]
1306 1306 def ignored(self):
1307 1307 return self._status[5]
1308 1308 def clean(self):
1309 1309 return self._status[6]
1310 1310 def branch(self):
1311 1311 return encoding.tolocal(self._extra['branch'])
1312 1312 def extra(self):
1313 1313 return self._extra
1314 1314 def flags(self, f):
1315 1315 return self[f].flags()
1316 1316
1317 1317 def parents(self):
1318 1318 """return contexts for each parent changeset"""
1319 1319 return self._parents
1320 1320
1321 1321 def filectx(self, path, filelog=None):
1322 1322 """get a file context from the working directory"""
1323 1323 return self._filectxfn(self._repo, self, path)
1324 1324
1325 1325 def commit(self):
1326 1326 """commit context to the repo"""
1327 1327 return self._repo.commitctx(self)
1328 1328
1329 1329 class memfilectx(object):
1330 1330 """memfilectx represents an in-memory file to commit.
1331 1331
1332 1332 See memctx for more details.
1333 1333 """
1334 1334 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1335 1335 """
1336 1336 path is the normalized file path relative to repository root.
1337 1337 data is the file content as a string.
1338 1338 islink is True if the file is a symbolic link.
1339 1339 isexec is True if the file is executable.
1340 1340 copied is the source file path if current file was copied in the
1341 1341 revision being committed, or None."""
1342 1342 self._path = path
1343 1343 self._data = data
1344 1344 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1345 1345 self._copied = None
1346 1346 if copied:
1347 1347 self._copied = (copied, nullid)
1348 1348
1349 1349 def __nonzero__(self):
1350 1350 return True
1351 1351 def __str__(self):
1352 1352 return "%s@%s" % (self.path(), self._changectx)
1353 1353 def path(self):
1354 1354 return self._path
1355 1355 def data(self):
1356 1356 return self._data
1357 1357 def flags(self):
1358 1358 return self._flags
1359 1359 def isexec(self):
1360 1360 return 'x' in self._flags
1361 1361 def islink(self):
1362 1362 return 'l' in self._flags
1363 1363 def renamed(self):
1364 1364 return self._copied
@@ -1,2594 +1,2588 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 """check if an repo and a unfilteredproperty cached value for <name>"""
52 """check if a repo has an unfilteredpropertycache value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('served')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150 filtername = None
151 151
152 152 def _baserequirements(self, create):
153 153 return self.requirements[:]
154 154
155 155 def __init__(self, baseui, path=None, create=False):
156 156 self.wvfs = scmutil.vfs(path, expand=True)
157 157 self.wopener = self.wvfs
158 158 self.root = self.wvfs.base
159 159 self.path = self.wvfs.join(".hg")
160 160 self.origroot = path
161 161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 162 self.vfs = scmutil.vfs(self.path)
163 163 self.opener = self.vfs
164 164 self.baseui = baseui
165 165 self.ui = baseui.copy()
166 166 # A list of callback to shape the phase if no data were found.
167 167 # Callback are in the form: func(repo, roots) --> processed root.
168 168 # This list it to be filled by extension during repo setup
169 169 self._phasedefaults = []
170 170 try:
171 171 self.ui.readconfig(self.join("hgrc"), self.root)
172 172 extensions.loadall(self.ui)
173 173 except IOError:
174 174 pass
175 175
176 176 if not self.vfs.isdir():
177 177 if create:
178 178 if not self.wvfs.exists():
179 179 self.wvfs.makedirs()
180 180 self.vfs.makedir(notindexed=True)
181 181 requirements = self._baserequirements(create)
182 182 if self.ui.configbool('format', 'usestore', True):
183 183 self.vfs.mkdir("store")
184 184 requirements.append("store")
185 185 if self.ui.configbool('format', 'usefncache', True):
186 186 requirements.append("fncache")
187 187 if self.ui.configbool('format', 'dotencode', True):
188 188 requirements.append('dotencode')
189 189 # create an invalid changelog
190 190 self.vfs.append(
191 191 "00changelog.i",
192 192 '\0\0\0\2' # represents revlogv2
193 193 ' dummy changelog to prevent using the old repo layout'
194 194 )
195 195 if self.ui.configbool('format', 'generaldelta', False):
196 196 requirements.append("generaldelta")
197 197 requirements = set(requirements)
198 198 else:
199 199 raise error.RepoError(_("repository %s not found") % path)
200 200 elif create:
201 201 raise error.RepoError(_("repository %s already exists") % path)
202 202 else:
203 203 try:
204 204 requirements = scmutil.readrequires(self.vfs, self.supported)
205 205 except IOError, inst:
206 206 if inst.errno != errno.ENOENT:
207 207 raise
208 208 requirements = set()
209 209
210 210 self.sharedpath = self.path
211 211 try:
212 212 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
213 213 if not os.path.exists(s):
214 214 raise error.RepoError(
215 215 _('.hg/sharedpath points to nonexistent directory %s') % s)
216 216 self.sharedpath = s
217 217 except IOError, inst:
218 218 if inst.errno != errno.ENOENT:
219 219 raise
220 220
221 221 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
222 222 self.spath = self.store.path
223 223 self.svfs = self.store.vfs
224 224 self.sopener = self.svfs
225 225 self.sjoin = self.store.join
226 226 self.vfs.createmode = self.store.createmode
227 227 self._applyrequirements(requirements)
228 228 if create:
229 229 self._writerequirements()
230 230
231 231
232 232 self._branchcaches = {}
233 233 self.filterpats = {}
234 234 self._datafilters = {}
235 235 self._transref = self._lockref = self._wlockref = None
236 236
237 237 # A cache for various files under .hg/ that tracks file changes,
238 238 # (used by the filecache decorator)
239 239 #
240 240 # Maps a property name to its util.filecacheentry
241 241 self._filecache = {}
242 242
243 243 # hold sets of revision to be filtered
244 244 # should be cleared when something might have changed the filter value:
245 245 # - new changesets,
246 246 # - phase change,
247 247 # - new obsolescence marker,
248 248 # - working directory parent change,
249 249 # - bookmark changes
250 250 self.filteredrevcache = {}
251 251
252 252 def close(self):
253 253 pass
254 254
255 255 def _restrictcapabilities(self, caps):
256 256 return caps
257 257
258 258 def _applyrequirements(self, requirements):
259 259 self.requirements = requirements
260 260 self.sopener.options = dict((r, 1) for r in requirements
261 261 if r in self.openerreqs)
262 262
263 263 def _writerequirements(self):
264 264 reqfile = self.opener("requires", "w")
265 265 for r in sorted(self.requirements):
266 266 reqfile.write("%s\n" % r)
267 267 reqfile.close()
268 268
269 269 def _checknested(self, path):
270 270 """Determine if path is a legal nested repository."""
271 271 if not path.startswith(self.root):
272 272 return False
273 273 subpath = path[len(self.root) + 1:]
274 274 normsubpath = util.pconvert(subpath)
275 275
276 276 # XXX: Checking against the current working copy is wrong in
277 277 # the sense that it can reject things like
278 278 #
279 279 # $ hg cat -r 10 sub/x.txt
280 280 #
281 281 # if sub/ is no longer a subrepository in the working copy
282 282 # parent revision.
283 283 #
284 284 # However, it can of course also allow things that would have
285 285 # been rejected before, such as the above cat command if sub/
286 286 # is a subrepository now, but was a normal directory before.
287 287 # The old path auditor would have rejected by mistake since it
288 288 # panics when it sees sub/.hg/.
289 289 #
290 290 # All in all, checking against the working copy seems sensible
291 291 # since we want to prevent access to nested repositories on
292 292 # the filesystem *now*.
293 293 ctx = self[None]
294 294 parts = util.splitpath(subpath)
295 295 while parts:
296 296 prefix = '/'.join(parts)
297 297 if prefix in ctx.substate:
298 298 if prefix == normsubpath:
299 299 return True
300 300 else:
301 301 sub = ctx.sub(prefix)
302 302 return sub.checknested(subpath[len(prefix) + 1:])
303 303 else:
304 304 parts.pop()
305 305 return False
306 306
307 307 def peer(self):
308 308 return localpeer(self) # not cached to avoid reference cycle
309 309
310 310 def unfiltered(self):
311 311 """Return unfiltered version of the repository
312 312
313 Intended to be ovewritten by filtered repo."""
313 Intended to be overwritten by filtered repo."""
314 314 return self
315 315
316 316 def filtered(self, name):
317 317 """Return a filtered version of a repository"""
318 318 # build a new class with the mixin and the current class
319 # (possibily subclass of the repo)
319 # (possibly subclass of the repo)
320 320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 321 pass
322 322 return proxycls(self, name)
323 323
324 324 @repofilecache('bookmarks')
325 325 def _bookmarks(self):
326 326 return bookmarks.bmstore(self)
327 327
328 328 @repofilecache('bookmarks.current')
329 329 def _bookmarkcurrent(self):
330 330 return bookmarks.readcurrent(self)
331 331
332 332 def bookmarkheads(self, bookmark):
333 333 name = bookmark.split('@', 1)[0]
334 334 heads = []
335 335 for mark, n in self._bookmarks.iteritems():
336 336 if mark.split('@', 1)[0] == name:
337 337 heads.append(n)
338 338 return heads
339 339
340 340 @storecache('phaseroots')
341 341 def _phasecache(self):
342 342 return phases.phasecache(self, self._phasedefaults)
343 343
344 344 @storecache('obsstore')
345 345 def obsstore(self):
346 346 store = obsolete.obsstore(self.sopener)
347 347 if store and not obsolete._enabled:
348 348 # message is rare enough to not be translated
349 349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 350 self.ui.warn(msg % len(list(store)))
351 351 return store
352 352
353 353 @storecache('00changelog.i')
354 354 def changelog(self):
355 355 c = changelog.changelog(self.sopener)
356 356 if 'HG_PENDING' in os.environ:
357 357 p = os.environ['HG_PENDING']
358 358 if p.startswith(self.root):
359 359 c.readpending('00changelog.i.a')
360 360 return c
361 361
362 362 @storecache('00manifest.i')
363 363 def manifest(self):
364 364 return manifest.manifest(self.sopener)
365 365
366 366 @repofilecache('dirstate')
367 367 def dirstate(self):
368 368 warned = [0]
369 369 def validate(node):
370 370 try:
371 371 self.changelog.rev(node)
372 372 return node
373 373 except error.LookupError:
374 374 if not warned[0]:
375 375 warned[0] = True
376 376 self.ui.warn(_("warning: ignoring unknown"
377 377 " working parent %s!\n") % short(node))
378 378 return nullid
379 379
380 380 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
381 381
382 382 def __getitem__(self, changeid):
383 383 if changeid is None:
384 384 return context.workingctx(self)
385 385 return context.changectx(self, changeid)
386 386
387 387 def __contains__(self, changeid):
388 388 try:
389 389 return bool(self.lookup(changeid))
390 390 except error.RepoLookupError:
391 391 return False
392 392
393 393 def __nonzero__(self):
394 394 return True
395 395
396 396 def __len__(self):
397 397 return len(self.changelog)
398 398
399 399 def __iter__(self):
400 400 return iter(self.changelog)
401 401
402 402 def revs(self, expr, *args):
403 403 '''Return a list of revisions matching the given revset'''
404 404 expr = revset.formatspec(expr, *args)
405 405 m = revset.match(None, expr)
406 406 return [r for r in m(self, list(self))]
407 407
408 408 def set(self, expr, *args):
409 409 '''
410 410 Yield a context for each matching revision, after doing arg
411 411 replacement via revset.formatspec
412 412 '''
413 413 for r in self.revs(expr, *args):
414 414 yield self[r]
415 415
416 416 def url(self):
417 417 return 'file:' + self.root
418 418
419 419 def hook(self, name, throw=False, **args):
420 420 return hook.hook(self.ui, self, name, throw, **args)
421 421
422 422 @unfilteredmethod
423 423 def _tag(self, names, node, message, local, user, date, extra={}):
424 424 if isinstance(names, str):
425 425 names = (names,)
426 426
427 427 branches = self.branchmap()
428 428 for name in names:
429 429 self.hook('pretag', throw=True, node=hex(node), tag=name,
430 430 local=local)
431 431 if name in branches:
432 432 self.ui.warn(_("warning: tag %s conflicts with existing"
433 433 " branch name\n") % name)
434 434
435 435 def writetags(fp, names, munge, prevtags):
436 436 fp.seek(0, 2)
437 437 if prevtags and prevtags[-1] != '\n':
438 438 fp.write('\n')
439 439 for name in names:
440 440 m = munge and munge(name) or name
441 441 if (self._tagscache.tagtypes and
442 442 name in self._tagscache.tagtypes):
443 443 old = self.tags().get(name, nullid)
444 444 fp.write('%s %s\n' % (hex(old), m))
445 445 fp.write('%s %s\n' % (hex(node), m))
446 446 fp.close()
447 447
448 448 prevtags = ''
449 449 if local:
450 450 try:
451 451 fp = self.opener('localtags', 'r+')
452 452 except IOError:
453 453 fp = self.opener('localtags', 'a')
454 454 else:
455 455 prevtags = fp.read()
456 456
457 457 # local tags are stored in the current charset
458 458 writetags(fp, names, None, prevtags)
459 459 for name in names:
460 460 self.hook('tag', node=hex(node), tag=name, local=local)
461 461 return
462 462
463 463 try:
464 464 fp = self.wfile('.hgtags', 'rb+')
465 465 except IOError, e:
466 466 if e.errno != errno.ENOENT:
467 467 raise
468 468 fp = self.wfile('.hgtags', 'ab')
469 469 else:
470 470 prevtags = fp.read()
471 471
472 472 # committed tags are stored in UTF-8
473 473 writetags(fp, names, encoding.fromlocal, prevtags)
474 474
475 475 fp.close()
476 476
477 477 self.invalidatecaches()
478 478
479 479 if '.hgtags' not in self.dirstate:
480 480 self[None].add(['.hgtags'])
481 481
482 482 m = matchmod.exact(self.root, '', ['.hgtags'])
483 483 tagnode = self.commit(message, user, date, extra=extra, match=m)
484 484
485 485 for name in names:
486 486 self.hook('tag', node=hex(node), tag=name, local=local)
487 487
488 488 return tagnode
489 489
490 490 def tag(self, names, node, message, local, user, date):
491 491 '''tag a revision with one or more symbolic names.
492 492
493 493 names is a list of strings or, when adding a single tag, names may be a
494 494 string.
495 495
496 496 if local is True, the tags are stored in a per-repository file.
497 497 otherwise, they are stored in the .hgtags file, and a new
498 498 changeset is committed with the change.
499 499
500 500 keyword arguments:
501 501
502 502 local: whether to store tags in non-version-controlled file
503 503 (default False)
504 504
505 505 message: commit message to use if committing
506 506
507 507 user: name of user to use if committing
508 508
509 509 date: date tuple to use if committing'''
510 510
511 511 if not local:
512 512 for x in self.status()[:5]:
513 513 if '.hgtags' in x:
514 514 raise util.Abort(_('working copy of .hgtags is changed '
515 515 '(please commit .hgtags manually)'))
516 516
517 517 self.tags() # instantiate the cache
518 518 self._tag(names, node, message, local, user, date)
519 519
520 520 @filteredpropertycache
521 521 def _tagscache(self):
522 522 '''Returns a tagscache object that contains various tags related
523 523 caches.'''
524 524
525 525 # This simplifies its cache management by having one decorated
526 526 # function (this one) and the rest simply fetch things from it.
527 527 class tagscache(object):
528 528 def __init__(self):
529 529 # These two define the set of tags for this repository. tags
530 530 # maps tag name to node; tagtypes maps tag name to 'global' or
531 531 # 'local'. (Global tags are defined by .hgtags across all
532 532 # heads, and local tags are defined in .hg/localtags.)
533 533 # They constitute the in-memory cache of tags.
534 534 self.tags = self.tagtypes = None
535 535
536 536 self.nodetagscache = self.tagslist = None
537 537
538 538 cache = tagscache()
539 539 cache.tags, cache.tagtypes = self._findtags()
540 540
541 541 return cache
542 542
543 543 def tags(self):
544 544 '''return a mapping of tag to node'''
545 545 t = {}
546 546 if self.changelog.filteredrevs:
547 547 tags, tt = self._findtags()
548 548 else:
549 549 tags = self._tagscache.tags
550 550 for k, v in tags.iteritems():
551 551 try:
552 552 # ignore tags to unknown nodes
553 553 self.changelog.rev(v)
554 554 t[k] = v
555 555 except (error.LookupError, ValueError):
556 556 pass
557 557 return t
558 558
559 559 def _findtags(self):
560 560 '''Do the hard work of finding tags. Return a pair of dicts
561 561 (tags, tagtypes) where tags maps tag name to node, and tagtypes
562 562 maps tag name to a string like \'global\' or \'local\'.
563 563 Subclasses or extensions are free to add their own tags, but
564 564 should be aware that the returned dicts will be retained for the
565 565 duration of the localrepo object.'''
566 566
567 567 # XXX what tagtype should subclasses/extensions use? Currently
568 568 # mq and bookmarks add tags, but do not set the tagtype at all.
569 569 # Should each extension invent its own tag type? Should there
570 570 # be one tagtype for all such "virtual" tags? Or is the status
571 571 # quo fine?
572 572
573 573 alltags = {} # map tag name to (node, hist)
574 574 tagtypes = {}
575 575
576 576 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
577 577 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
578 578
579 579 # Build the return dicts. Have to re-encode tag names because
580 580 # the tags module always uses UTF-8 (in order not to lose info
581 581 # writing to the cache), but the rest of Mercurial wants them in
582 582 # local encoding.
583 583 tags = {}
584 584 for (name, (node, hist)) in alltags.iteritems():
585 585 if node != nullid:
586 586 tags[encoding.tolocal(name)] = node
587 587 tags['tip'] = self.changelog.tip()
588 588 tagtypes = dict([(encoding.tolocal(name), value)
589 589 for (name, value) in tagtypes.iteritems()])
590 590 return (tags, tagtypes)
591 591
592 592 def tagtype(self, tagname):
593 593 '''
594 594 return the type of the given tag. result can be:
595 595
596 596 'local' : a local tag
597 597 'global' : a global tag
598 598 None : tag does not exist
599 599 '''
600 600
601 601 return self._tagscache.tagtypes.get(tagname)
602 602
603 603 def tagslist(self):
604 604 '''return a list of tags ordered by revision'''
605 605 if not self._tagscache.tagslist:
606 606 l = []
607 607 for t, n in self.tags().iteritems():
608 608 r = self.changelog.rev(n)
609 609 l.append((r, t, n))
610 610 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
611 611
612 612 return self._tagscache.tagslist
613 613
614 614 def nodetags(self, node):
615 615 '''return the tags associated with a node'''
616 616 if not self._tagscache.nodetagscache:
617 617 nodetagscache = {}
618 618 for t, n in self._tagscache.tags.iteritems():
619 619 nodetagscache.setdefault(n, []).append(t)
620 620 for tags in nodetagscache.itervalues():
621 621 tags.sort()
622 622 self._tagscache.nodetagscache = nodetagscache
623 623 return self._tagscache.nodetagscache.get(node, [])
624 624
625 625 def nodebookmarks(self, node):
626 626 marks = []
627 627 for bookmark, n in self._bookmarks.iteritems():
628 628 if n == node:
629 629 marks.append(bookmark)
630 630 return sorted(marks)
631 631
632 632 def branchmap(self):
633 633 '''returns a dictionary {branch: [branchheads]}'''
634 634 branchmap.updatecache(self)
635 635 return self._branchcaches[self.filtername]
636 636
637 637
638 638 def _branchtip(self, heads):
639 639 '''return the tipmost branch head in heads'''
640 640 tip = heads[-1]
641 641 for h in reversed(heads):
642 642 if not self[h].closesbranch():
643 643 tip = h
644 644 break
645 645 return tip
646 646
647 647 def branchtip(self, branch):
648 648 '''return the tip node for a given branch'''
649 649 if branch not in self.branchmap():
650 650 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
651 651 return self._branchtip(self.branchmap()[branch])
652 652
653 653 def branchtags(self):
654 654 '''return a dict where branch names map to the tipmost head of
655 655 the branch, open heads come before closed'''
656 656 bt = {}
657 657 for bn, heads in self.branchmap().iteritems():
658 658 bt[bn] = self._branchtip(heads)
659 659 return bt
660 660
661 661 def lookup(self, key):
662 662 return self[key].node()
663 663
664 664 def lookupbranch(self, key, remote=None):
665 665 repo = remote or self
666 666 if key in repo.branchmap():
667 667 return key
668 668
669 669 repo = (remote and remote.local()) and remote or self
670 670 return repo[key].branch()
671 671
672 672 def known(self, nodes):
673 673 nm = self.changelog.nodemap
674 674 pc = self._phasecache
675 675 result = []
676 676 for n in nodes:
677 677 r = nm.get(n)
678 678 resp = not (r is None or pc.phase(self, r) >= phases.secret)
679 679 result.append(resp)
680 680 return result
681 681
682 682 def local(self):
683 683 return self
684 684
685 685 def cancopy(self):
686 686 return self.local() # so statichttprepo's override of local() works
687 687
688 688 def join(self, f):
689 689 return os.path.join(self.path, f)
690 690
691 691 def wjoin(self, f):
692 692 return os.path.join(self.root, f)
693 693
694 694 def file(self, f):
695 695 if f[0] == '/':
696 696 f = f[1:]
697 697 return filelog.filelog(self.sopener, f)
698 698
699 699 def changectx(self, changeid):
700 700 return self[changeid]
701 701
702 702 def parents(self, changeid=None):
703 703 '''get list of changectxs for parents of changeid'''
704 704 return self[changeid].parents()
705 705
706 706 def setparents(self, p1, p2=nullid):
707 707 copies = self.dirstate.setparents(p1, p2)
708 708 if copies:
709 709 # Adjust copy records, the dirstate cannot do it, it
710 710 # requires access to parents manifests. Preserve them
711 711 # only for entries added to first parent.
712 712 pctx = self[p1]
713 713 for f in copies:
714 714 if f not in pctx and copies[f] in pctx:
715 715 self.dirstate.copy(copies[f], f)
716 716
717 717 def filectx(self, path, changeid=None, fileid=None):
718 718 """changeid can be a changeset revision, node, or tag.
719 719 fileid can be a file revision or node."""
720 720 return context.filectx(self, path, changeid, fileid)
721 721
722 722 def getcwd(self):
723 723 return self.dirstate.getcwd()
724 724
725 725 def pathto(self, f, cwd=None):
726 726 return self.dirstate.pathto(f, cwd)
727 727
728 728 def wfile(self, f, mode='r'):
729 729 return self.wopener(f, mode)
730 730
731 731 def _link(self, f):
732 732 return os.path.islink(self.wjoin(f))
733 733
734 734 def _loadfilter(self, filter):
735 735 if filter not in self.filterpats:
736 736 l = []
737 737 for pat, cmd in self.ui.configitems(filter):
738 738 if cmd == '!':
739 739 continue
740 740 mf = matchmod.match(self.root, '', [pat])
741 741 fn = None
742 742 params = cmd
743 743 for name, filterfn in self._datafilters.iteritems():
744 744 if cmd.startswith(name):
745 745 fn = filterfn
746 746 params = cmd[len(name):].lstrip()
747 747 break
748 748 if not fn:
749 749 fn = lambda s, c, **kwargs: util.filter(s, c)
750 750 # Wrap old filters not supporting keyword arguments
751 751 if not inspect.getargspec(fn)[2]:
752 752 oldfn = fn
753 753 fn = lambda s, c, **kwargs: oldfn(s, c)
754 754 l.append((mf, fn, params))
755 755 self.filterpats[filter] = l
756 756 return self.filterpats[filter]
757 757
758 758 def _filter(self, filterpats, filename, data):
759 759 for mf, fn, cmd in filterpats:
760 760 if mf(filename):
761 761 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
762 762 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
763 763 break
764 764
765 765 return data
766 766
767 767 @unfilteredpropertycache
768 768 def _encodefilterpats(self):
769 769 return self._loadfilter('encode')
770 770
771 771 @unfilteredpropertycache
772 772 def _decodefilterpats(self):
773 773 return self._loadfilter('decode')
774 774
775 775 def adddatafilter(self, name, filter):
776 776 self._datafilters[name] = filter
777 777
778 778 def wread(self, filename):
779 779 if self._link(filename):
780 780 data = os.readlink(self.wjoin(filename))
781 781 else:
782 782 data = self.wopener.read(filename)
783 783 return self._filter(self._encodefilterpats, filename, data)
784 784
785 785 def wwrite(self, filename, data, flags):
786 786 data = self._filter(self._decodefilterpats, filename, data)
787 787 if 'l' in flags:
788 788 self.wopener.symlink(data, filename)
789 789 else:
790 790 self.wopener.write(filename, data)
791 791 if 'x' in flags:
792 792 util.setflags(self.wjoin(filename), False, True)
793 793
794 794 def wwritedata(self, filename, data):
795 795 return self._filter(self._decodefilterpats, filename, data)
796 796
797 797 def transaction(self, desc):
798 798 tr = self._transref and self._transref() or None
799 799 if tr and tr.running():
800 800 return tr.nest()
801 801
802 802 # abort here if the journal already exists
803 803 if os.path.exists(self.sjoin("journal")):
804 804 raise error.RepoError(
805 805 _("abandoned transaction found - run hg recover"))
806 806
807 807 self._writejournal(desc)
808 808 renames = [(x, undoname(x)) for x in self._journalfiles()]
809 809
810 810 tr = transaction.transaction(self.ui.warn, self.sopener,
811 811 self.sjoin("journal"),
812 812 aftertrans(renames),
813 813 self.store.createmode)
814 814 self._transref = weakref.ref(tr)
815 815 return tr
816 816
817 817 def _journalfiles(self):
818 818 return (self.sjoin('journal'), self.join('journal.dirstate'),
819 819 self.join('journal.branch'), self.join('journal.desc'),
820 820 self.join('journal.bookmarks'),
821 821 self.sjoin('journal.phaseroots'))
822 822
823 823 def undofiles(self):
824 824 return [undoname(x) for x in self._journalfiles()]
825 825
826 826 def _writejournal(self, desc):
827 827 self.opener.write("journal.dirstate",
828 828 self.opener.tryread("dirstate"))
829 829 self.opener.write("journal.branch",
830 830 encoding.fromlocal(self.dirstate.branch()))
831 831 self.opener.write("journal.desc",
832 832 "%d\n%s\n" % (len(self), desc))
833 833 self.opener.write("journal.bookmarks",
834 834 self.opener.tryread("bookmarks"))
835 835 self.sopener.write("journal.phaseroots",
836 836 self.sopener.tryread("phaseroots"))
837 837
838 838 def recover(self):
839 839 lock = self.lock()
840 840 try:
841 841 if os.path.exists(self.sjoin("journal")):
842 842 self.ui.status(_("rolling back interrupted transaction\n"))
843 843 transaction.rollback(self.sopener, self.sjoin("journal"),
844 844 self.ui.warn)
845 845 self.invalidate()
846 846 return True
847 847 else:
848 848 self.ui.warn(_("no interrupted transaction available\n"))
849 849 return False
850 850 finally:
851 851 lock.release()
852 852
853 853 def rollback(self, dryrun=False, force=False):
854 854 wlock = lock = None
855 855 try:
856 856 wlock = self.wlock()
857 857 lock = self.lock()
858 858 if os.path.exists(self.sjoin("undo")):
859 859 return self._rollback(dryrun, force)
860 860 else:
861 861 self.ui.warn(_("no rollback information available\n"))
862 862 return 1
863 863 finally:
864 864 release(lock, wlock)
865 865
866 866 @unfilteredmethod # Until we get smarter cache management
867 867 def _rollback(self, dryrun, force):
868 868 ui = self.ui
869 869 try:
870 870 args = self.opener.read('undo.desc').splitlines()
871 871 (oldlen, desc, detail) = (int(args[0]), args[1], None)
872 872 if len(args) >= 3:
873 873 detail = args[2]
874 874 oldtip = oldlen - 1
875 875
876 876 if detail and ui.verbose:
877 877 msg = (_('repository tip rolled back to revision %s'
878 878 ' (undo %s: %s)\n')
879 879 % (oldtip, desc, detail))
880 880 else:
881 881 msg = (_('repository tip rolled back to revision %s'
882 882 ' (undo %s)\n')
883 883 % (oldtip, desc))
884 884 except IOError:
885 885 msg = _('rolling back unknown transaction\n')
886 886 desc = None
887 887
888 888 if not force and self['.'] != self['tip'] and desc == 'commit':
889 889 raise util.Abort(
890 890 _('rollback of last commit while not checked out '
891 891 'may lose data'), hint=_('use -f to force'))
892 892
893 893 ui.status(msg)
894 894 if dryrun:
895 895 return 0
896 896
897 897 parents = self.dirstate.parents()
898 898 self.destroying()
899 899 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
900 900 if os.path.exists(self.join('undo.bookmarks')):
901 901 util.rename(self.join('undo.bookmarks'),
902 902 self.join('bookmarks'))
903 903 if os.path.exists(self.sjoin('undo.phaseroots')):
904 904 util.rename(self.sjoin('undo.phaseroots'),
905 905 self.sjoin('phaseroots'))
906 906 self.invalidate()
907 907
908 908 parentgone = (parents[0] not in self.changelog.nodemap or
909 909 parents[1] not in self.changelog.nodemap)
910 910 if parentgone:
911 911 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
912 912 try:
913 913 branch = self.opener.read('undo.branch')
914 914 self.dirstate.setbranch(encoding.tolocal(branch))
915 915 except IOError:
916 916 ui.warn(_('named branch could not be reset: '
917 917 'current branch is still \'%s\'\n')
918 918 % self.dirstate.branch())
919 919
920 920 self.dirstate.invalidate()
921 921 parents = tuple([p.rev() for p in self.parents()])
922 922 if len(parents) > 1:
923 923 ui.status(_('working directory now based on '
924 924 'revisions %d and %d\n') % parents)
925 925 else:
926 926 ui.status(_('working directory now based on '
927 927 'revision %d\n') % parents)
928 928 # TODO: if we know which new heads may result from this rollback, pass
929 929 # them to destroy(), which will prevent the branchhead cache from being
930 930 # invalidated.
931 931 self.destroyed()
932 932 return 0
933 933
934 934 def invalidatecaches(self):
935 935
936 936 if '_tagscache' in vars(self):
937 937 # can't use delattr on proxy
938 938 del self.__dict__['_tagscache']
939 939
940 940 self.unfiltered()._branchcaches.clear()
941 941 self.invalidatevolatilesets()
942 942
943 943 def invalidatevolatilesets(self):
944 944 self.filteredrevcache.clear()
945 945 obsolete.clearobscaches(self)
946 946
947 947 def invalidatedirstate(self):
948 948 '''Invalidates the dirstate, causing the next call to dirstate
949 949 to check if it was modified since the last time it was read,
950 950 rereading it if it has.
951 951
952 952 This is different to dirstate.invalidate() that it doesn't always
953 953 rereads the dirstate. Use dirstate.invalidate() if you want to
954 954 explicitly read the dirstate again (i.e. restoring it to a previous
955 955 known good state).'''
956 956 if hasunfilteredcache(self, 'dirstate'):
957 957 for k in self.dirstate._filecache:
958 958 try:
959 959 delattr(self.dirstate, k)
960 960 except AttributeError:
961 961 pass
962 962 delattr(self.unfiltered(), 'dirstate')
963 963
964 964 def invalidate(self):
965 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
965 unfiltered = self.unfiltered() # all file caches are stored unfiltered
966 966 for k in self._filecache:
967 967 # dirstate is invalidated separately in invalidatedirstate()
968 968 if k == 'dirstate':
969 969 continue
970 970
971 971 try:
972 972 delattr(unfiltered, k)
973 973 except AttributeError:
974 974 pass
975 975 self.invalidatecaches()
976 976
977 977 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
978 978 try:
979 979 l = lock.lock(lockname, 0, releasefn, desc=desc)
980 980 except error.LockHeld, inst:
981 981 if not wait:
982 982 raise
983 983 self.ui.warn(_("waiting for lock on %s held by %r\n") %
984 984 (desc, inst.locker))
985 985 # default to 600 seconds timeout
986 986 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
987 987 releasefn, desc=desc)
988 988 if acquirefn:
989 989 acquirefn()
990 990 return l
991 991
992 992 def _afterlock(self, callback):
993 993 """add a callback to the current repository lock.
994 994
995 995 The callback will be executed on lock release."""
996 996 l = self._lockref and self._lockref()
997 997 if l:
998 998 l.postrelease.append(callback)
999 999 else:
1000 1000 callback()
1001 1001
1002 1002 def lock(self, wait=True):
1003 1003 '''Lock the repository store (.hg/store) and return a weak reference
1004 1004 to the lock. Use this before modifying the store (e.g. committing or
1005 1005 stripping). If you are opening a transaction, get a lock as well.)'''
1006 1006 l = self._lockref and self._lockref()
1007 1007 if l is not None and l.held:
1008 1008 l.lock()
1009 1009 return l
1010 1010
1011 1011 def unlock():
1012 1012 self.store.write()
1013 1013 if hasunfilteredcache(self, '_phasecache'):
1014 1014 self._phasecache.write()
1015 1015 for k, ce in self._filecache.items():
1016 1016 if k == 'dirstate' or k not in self.__dict__:
1017 1017 continue
1018 1018 ce.refresh()
1019 1019
1020 1020 l = self._lock(self.sjoin("lock"), wait, unlock,
1021 1021 self.invalidate, _('repository %s') % self.origroot)
1022 1022 self._lockref = weakref.ref(l)
1023 1023 return l
1024 1024
1025 1025 def wlock(self, wait=True):
1026 1026 '''Lock the non-store parts of the repository (everything under
1027 1027 .hg except .hg/store) and return a weak reference to the lock.
1028 1028 Use this before modifying files in .hg.'''
1029 1029 l = self._wlockref and self._wlockref()
1030 1030 if l is not None and l.held:
1031 1031 l.lock()
1032 1032 return l
1033 1033
1034 1034 def unlock():
1035 1035 self.dirstate.write()
1036 1036 self._filecache['dirstate'].refresh()
1037 1037
1038 1038 l = self._lock(self.join("wlock"), wait, unlock,
1039 1039 self.invalidatedirstate, _('working directory of %s') %
1040 1040 self.origroot)
1041 1041 self._wlockref = weakref.ref(l)
1042 1042 return l
1043 1043
1044 1044 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1045 1045 """
1046 1046 commit an individual file as part of a larger transaction
1047 1047 """
1048 1048
1049 1049 fname = fctx.path()
1050 1050 text = fctx.data()
1051 1051 flog = self.file(fname)
1052 1052 fparent1 = manifest1.get(fname, nullid)
1053 1053 fparent2 = fparent2o = manifest2.get(fname, nullid)
1054 1054
1055 1055 meta = {}
1056 1056 copy = fctx.renamed()
1057 1057 if copy and copy[0] != fname:
1058 1058 # Mark the new revision of this file as a copy of another
1059 1059 # file. This copy data will effectively act as a parent
1060 1060 # of this new revision. If this is a merge, the first
1061 1061 # parent will be the nullid (meaning "look up the copy data")
1062 1062 # and the second one will be the other parent. For example:
1063 1063 #
1064 1064 # 0 --- 1 --- 3 rev1 changes file foo
1065 1065 # \ / rev2 renames foo to bar and changes it
1066 1066 # \- 2 -/ rev3 should have bar with all changes and
1067 1067 # should record that bar descends from
1068 1068 # bar in rev2 and foo in rev1
1069 1069 #
1070 1070 # this allows this merge to succeed:
1071 1071 #
1072 1072 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1073 1073 # \ / merging rev3 and rev4 should use bar@rev2
1074 1074 # \- 2 --- 4 as the merge base
1075 1075 #
1076 1076
1077 1077 cfname = copy[0]
1078 1078 crev = manifest1.get(cfname)
1079 1079 newfparent = fparent2
1080 1080
1081 1081 if manifest2: # branch merge
1082 1082 if fparent2 == nullid or crev is None: # copied on remote side
1083 1083 if cfname in manifest2:
1084 1084 crev = manifest2[cfname]
1085 1085 newfparent = fparent1
1086 1086
1087 1087 # find source in nearest ancestor if we've lost track
1088 1088 if not crev:
1089 1089 self.ui.debug(" %s: searching for copy revision for %s\n" %
1090 1090 (fname, cfname))
1091 1091 for ancestor in self[None].ancestors():
1092 1092 if cfname in ancestor:
1093 1093 crev = ancestor[cfname].filenode()
1094 1094 break
1095 1095
1096 1096 if crev:
1097 1097 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1098 1098 meta["copy"] = cfname
1099 1099 meta["copyrev"] = hex(crev)
1100 1100 fparent1, fparent2 = nullid, newfparent
1101 1101 else:
1102 1102 self.ui.warn(_("warning: can't find ancestor for '%s' "
1103 1103 "copied from '%s'!\n") % (fname, cfname))
1104 1104
1105 1105 elif fparent2 != nullid:
1106 1106 # is one parent an ancestor of the other?
1107 1107 fparentancestor = flog.ancestor(fparent1, fparent2)
1108 1108 if fparentancestor == fparent1:
1109 1109 fparent1, fparent2 = fparent2, nullid
1110 1110 elif fparentancestor == fparent2:
1111 1111 fparent2 = nullid
1112 1112
1113 1113 # is the file changed?
1114 1114 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1115 1115 changelist.append(fname)
1116 1116 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1117 1117
1118 1118 # are just the flags changed during merge?
1119 1119 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1120 1120 changelist.append(fname)
1121 1121
1122 1122 return fparent1
1123 1123
1124 1124 @unfilteredmethod
1125 1125 def commit(self, text="", user=None, date=None, match=None, force=False,
1126 1126 editor=False, extra={}):
1127 1127 """Add a new revision to current repository.
1128 1128
1129 1129 Revision information is gathered from the working directory,
1130 1130 match can be used to filter the committed files. If editor is
1131 1131 supplied, it is called to get a commit message.
1132 1132 """
1133 1133
1134 1134 def fail(f, msg):
1135 1135 raise util.Abort('%s: %s' % (f, msg))
1136 1136
1137 1137 if not match:
1138 1138 match = matchmod.always(self.root, '')
1139 1139
1140 1140 if not force:
1141 1141 vdirs = []
1142 1142 match.dir = vdirs.append
1143 1143 match.bad = fail
1144 1144
1145 1145 wlock = self.wlock()
1146 1146 try:
1147 1147 wctx = self[None]
1148 1148 merge = len(wctx.parents()) > 1
1149 1149
1150 1150 if (not force and merge and match and
1151 1151 (match.files() or match.anypats())):
1152 1152 raise util.Abort(_('cannot partially commit a merge '
1153 1153 '(do not specify files or patterns)'))
1154 1154
1155 1155 changes = self.status(match=match, clean=force)
1156 1156 if force:
1157 1157 changes[0].extend(changes[6]) # mq may commit unchanged files
1158 1158
1159 1159 # check subrepos
1160 1160 subs = []
1161 1161 commitsubs = set()
1162 1162 newstate = wctx.substate.copy()
1163 1163 # only manage subrepos and .hgsubstate if .hgsub is present
1164 1164 if '.hgsub' in wctx:
1165 1165 # we'll decide whether to track this ourselves, thanks
1166 1166 if '.hgsubstate' in changes[0]:
1167 1167 changes[0].remove('.hgsubstate')
1168 1168 if '.hgsubstate' in changes[2]:
1169 1169 changes[2].remove('.hgsubstate')
1170 1170
1171 1171 # compare current state to last committed state
1172 1172 # build new substate based on last committed state
1173 1173 oldstate = wctx.p1().substate
1174 1174 for s in sorted(newstate.keys()):
1175 1175 if not match(s):
1176 1176 # ignore working copy, use old state if present
1177 1177 if s in oldstate:
1178 1178 newstate[s] = oldstate[s]
1179 1179 continue
1180 1180 if not force:
1181 1181 raise util.Abort(
1182 1182 _("commit with new subrepo %s excluded") % s)
1183 1183 if wctx.sub(s).dirty(True):
1184 1184 if not self.ui.configbool('ui', 'commitsubrepos'):
1185 1185 raise util.Abort(
1186 1186 _("uncommitted changes in subrepo %s") % s,
1187 1187 hint=_("use --subrepos for recursive commit"))
1188 1188 subs.append(s)
1189 1189 commitsubs.add(s)
1190 1190 else:
1191 1191 bs = wctx.sub(s).basestate()
1192 1192 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1193 1193 if oldstate.get(s, (None, None, None))[1] != bs:
1194 1194 subs.append(s)
1195 1195
1196 1196 # check for removed subrepos
1197 1197 for p in wctx.parents():
1198 1198 r = [s for s in p.substate if s not in newstate]
1199 1199 subs += [s for s in r if match(s)]
1200 1200 if subs:
1201 1201 if (not match('.hgsub') and
1202 1202 '.hgsub' in (wctx.modified() + wctx.added())):
1203 1203 raise util.Abort(
1204 1204 _("can't commit subrepos without .hgsub"))
1205 1205 changes[0].insert(0, '.hgsubstate')
1206 1206
1207 1207 elif '.hgsub' in changes[2]:
1208 1208 # clean up .hgsubstate when .hgsub is removed
1209 1209 if ('.hgsubstate' in wctx and
1210 1210 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1211 1211 changes[2].insert(0, '.hgsubstate')
1212 1212
1213 1213 # make sure all explicit patterns are matched
1214 1214 if not force and match.files():
1215 1215 matched = set(changes[0] + changes[1] + changes[2])
1216 1216
1217 1217 for f in match.files():
1218 1218 f = self.dirstate.normalize(f)
1219 1219 if f == '.' or f in matched or f in wctx.substate:
1220 1220 continue
1221 1221 if f in changes[3]: # missing
1222 1222 fail(f, _('file not found!'))
1223 1223 if f in vdirs: # visited directory
1224 1224 d = f + '/'
1225 1225 for mf in matched:
1226 1226 if mf.startswith(d):
1227 1227 break
1228 1228 else:
1229 1229 fail(f, _("no match under directory!"))
1230 1230 elif f not in self.dirstate:
1231 1231 fail(f, _("file not tracked!"))
1232 1232
1233 1233 if (not force and not extra.get("close") and not merge
1234 1234 and not (changes[0] or changes[1] or changes[2])
1235 1235 and wctx.branch() == wctx.p1().branch()):
1236 1236 return None
1237 1237
1238 1238 if merge and changes[3]:
1239 1239 raise util.Abort(_("cannot commit merge with missing files"))
1240 1240
1241 1241 ms = mergemod.mergestate(self)
1242 1242 for f in changes[0]:
1243 1243 if f in ms and ms[f] == 'u':
1244 1244 raise util.Abort(_("unresolved merge conflicts "
1245 1245 "(see hg help resolve)"))
1246 1246
1247 1247 cctx = context.workingctx(self, text, user, date, extra, changes)
1248 1248 if editor:
1249 1249 cctx._text = editor(self, cctx, subs)
1250 1250 edited = (text != cctx._text)
1251 1251
1252 1252 # commit subs and write new state
1253 1253 if subs:
1254 1254 for s in sorted(commitsubs):
1255 1255 sub = wctx.sub(s)
1256 1256 self.ui.status(_('committing subrepository %s\n') %
1257 1257 subrepo.subrelpath(sub))
1258 1258 sr = sub.commit(cctx._text, user, date)
1259 1259 newstate[s] = (newstate[s][0], sr)
1260 1260 subrepo.writestate(self, newstate)
1261 1261
1262 1262 # Save commit message in case this transaction gets rolled back
1263 1263 # (e.g. by a pretxncommit hook). Leave the content alone on
1264 1264 # the assumption that the user will use the same editor again.
1265 1265 msgfn = self.savecommitmessage(cctx._text)
1266 1266
1267 1267 p1, p2 = self.dirstate.parents()
1268 1268 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1269 1269 try:
1270 1270 self.hook("precommit", throw=True, parent1=hookp1,
1271 1271 parent2=hookp2)
1272 1272 ret = self.commitctx(cctx, True)
1273 1273 except: # re-raises
1274 1274 if edited:
1275 1275 self.ui.write(
1276 1276 _('note: commit message saved in %s\n') % msgfn)
1277 1277 raise
1278 1278
1279 1279 # update bookmarks, dirstate and mergestate
1280 1280 bookmarks.update(self, [p1, p2], ret)
1281 1281 for f in changes[0] + changes[1]:
1282 1282 self.dirstate.normal(f)
1283 1283 for f in changes[2]:
1284 1284 self.dirstate.drop(f)
1285 1285 self.dirstate.setparents(ret)
1286 1286 ms.reset()
1287 1287 finally:
1288 1288 wlock.release()
1289 1289
1290 1290 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1291 1291 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1292 1292 self._afterlock(commithook)
1293 1293 return ret
1294 1294
1295 1295 @unfilteredmethod
1296 1296 def commitctx(self, ctx, error=False):
1297 1297 """Add a new revision to current repository.
1298 1298 Revision information is passed via the context argument.
1299 1299 """
1300 1300
1301 1301 tr = lock = None
1302 1302 removed = list(ctx.removed())
1303 1303 p1, p2 = ctx.p1(), ctx.p2()
1304 1304 user = ctx.user()
1305 1305
1306 1306 lock = self.lock()
1307 1307 try:
1308 1308 tr = self.transaction("commit")
1309 1309 trp = weakref.proxy(tr)
1310 1310
1311 1311 if ctx.files():
1312 1312 m1 = p1.manifest().copy()
1313 1313 m2 = p2.manifest()
1314 1314
1315 1315 # check in files
1316 1316 new = {}
1317 1317 changed = []
1318 1318 linkrev = len(self)
1319 1319 for f in sorted(ctx.modified() + ctx.added()):
1320 1320 self.ui.note(f + "\n")
1321 1321 try:
1322 1322 fctx = ctx[f]
1323 1323 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1324 1324 changed)
1325 1325 m1.set(f, fctx.flags())
1326 1326 except OSError, inst:
1327 1327 self.ui.warn(_("trouble committing %s!\n") % f)
1328 1328 raise
1329 1329 except IOError, inst:
1330 1330 errcode = getattr(inst, 'errno', errno.ENOENT)
1331 1331 if error or errcode and errcode != errno.ENOENT:
1332 1332 self.ui.warn(_("trouble committing %s!\n") % f)
1333 1333 raise
1334 1334 else:
1335 1335 removed.append(f)
1336 1336
1337 1337 # update manifest
1338 1338 m1.update(new)
1339 1339 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1340 1340 drop = [f for f in removed if f in m1]
1341 1341 for f in drop:
1342 1342 del m1[f]
1343 1343 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1344 1344 p2.manifestnode(), (new, drop))
1345 1345 files = changed + removed
1346 1346 else:
1347 1347 mn = p1.manifestnode()
1348 1348 files = []
1349 1349
1350 1350 # update changelog
1351 1351 self.changelog.delayupdate()
1352 1352 n = self.changelog.add(mn, files, ctx.description(),
1353 1353 trp, p1.node(), p2.node(),
1354 1354 user, ctx.date(), ctx.extra().copy())
1355 1355 p = lambda: self.changelog.writepending() and self.root or ""
1356 1356 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1357 1357 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1358 1358 parent2=xp2, pending=p)
1359 1359 self.changelog.finalize(trp)
1360 1360 # set the new commit is proper phase
1361 1361 targetphase = phases.newcommitphase(self.ui)
1362 1362 if targetphase:
1363 1363 # retract boundary do not alter parent changeset.
1364 1364 # if a parent have higher the resulting phase will
1365 1365 # be compliant anyway
1366 1366 #
1367 1367 # if minimal phase was 0 we don't need to retract anything
1368 1368 phases.retractboundary(self, targetphase, [n])
1369 1369 tr.close()
1370 1370 branchmap.updatecache(self.filtered('served'))
1371 1371 return n
1372 1372 finally:
1373 1373 if tr:
1374 1374 tr.release()
1375 1375 lock.release()
1376 1376
1377 1377 @unfilteredmethod
1378 1378 def destroying(self):
1379 1379 '''Inform the repository that nodes are about to be destroyed.
1380 1380 Intended for use by strip and rollback, so there's a common
1381 1381 place for anything that has to be done before destroying history.
1382 1382
1383 1383 This is mostly useful for saving state that is in memory and waiting
1384 1384 to be flushed when the current lock is released. Because a call to
1385 1385 destroyed is imminent, the repo will be invalidated causing those
1386 1386 changes to stay in memory (waiting for the next unlock), or vanish
1387 1387 completely.
1388 1388 '''
1389 1389 # When using the same lock to commit and strip, the phasecache is left
1390 1390 # dirty after committing. Then when we strip, the repo is invalidated,
1391 1391 # causing those changes to disappear.
1392 1392 if '_phasecache' in vars(self):
1393 1393 self._phasecache.write()
1394 1394
1395 1395 @unfilteredmethod
1396 1396 def destroyed(self):
1397 1397 '''Inform the repository that nodes have been destroyed.
1398 1398 Intended for use by strip and rollback, so there's a common
1399 1399 place for anything that has to be done after destroying history.
1400
1401 If you know the branchheadcache was uptodate before nodes were removed
1402 and you also know the set of candidate new heads that may have resulted
1403 from the destruction, you can set newheadnodes. This will enable the
1404 code to update the branchheads cache, rather than having future code
1405 decide it's invalid and regenerating it from scratch.
1406 1400 '''
1407 1401 # When one tries to:
1408 1402 # 1) destroy nodes thus calling this method (e.g. strip)
1409 1403 # 2) use phasecache somewhere (e.g. commit)
1410 1404 #
1411 1405 # then 2) will fail because the phasecache contains nodes that were
1412 1406 # removed. We can either remove phasecache from the filecache,
1413 1407 # causing it to reload next time it is accessed, or simply filter
1414 1408 # the removed nodes now and write the updated cache.
1415 1409 if '_phasecache' in self._filecache:
1416 1410 self._phasecache.filterunknown(self)
1417 1411 self._phasecache.write()
1418 1412
1419 1413 # update the 'served' branch cache to help read only server process
1420 # Thanks to branchcach collaboration this is done from the nearest
1414 # Thanks to branchcache collaboration this is done from the nearest
1421 1415 # filtered subset and it is expected to be fast.
1422 1416 branchmap.updatecache(self.filtered('served'))
1423 1417
1424 1418 # Ensure the persistent tag cache is updated. Doing it now
1425 1419 # means that the tag cache only has to worry about destroyed
1426 1420 # heads immediately after a strip/rollback. That in turn
1427 1421 # guarantees that "cachetip == currenttip" (comparing both rev
1428 1422 # and node) always means no nodes have been added or destroyed.
1429 1423
1430 1424 # XXX this is suboptimal when qrefresh'ing: we strip the current
1431 1425 # head, refresh the tag cache, then immediately add a new head.
1432 1426 # But I think doing it this way is necessary for the "instant
1433 1427 # tag cache retrieval" case to work.
1434 1428 self.invalidate()
1435 1429
1436 1430 def walk(self, match, node=None):
1437 1431 '''
1438 1432 walk recursively through the directory tree or a given
1439 1433 changeset, finding all files matched by the match
1440 1434 function
1441 1435 '''
1442 1436 return self[node].walk(match)
1443 1437
1444 1438 def status(self, node1='.', node2=None, match=None,
1445 1439 ignored=False, clean=False, unknown=False,
1446 1440 listsubrepos=False):
1447 1441 """return status of files between two nodes or node and working
1448 1442 directory.
1449 1443
1450 1444 If node1 is None, use the first dirstate parent instead.
1451 1445 If node2 is None, compare node1 with working directory.
1452 1446 """
1453 1447
1454 1448 def mfmatches(ctx):
1455 1449 mf = ctx.manifest().copy()
1456 1450 if match.always():
1457 1451 return mf
1458 1452 for fn in mf.keys():
1459 1453 if not match(fn):
1460 1454 del mf[fn]
1461 1455 return mf
1462 1456
1463 1457 if isinstance(node1, context.changectx):
1464 1458 ctx1 = node1
1465 1459 else:
1466 1460 ctx1 = self[node1]
1467 1461 if isinstance(node2, context.changectx):
1468 1462 ctx2 = node2
1469 1463 else:
1470 1464 ctx2 = self[node2]
1471 1465
1472 1466 working = ctx2.rev() is None
1473 1467 parentworking = working and ctx1 == self['.']
1474 1468 match = match or matchmod.always(self.root, self.getcwd())
1475 1469 listignored, listclean, listunknown = ignored, clean, unknown
1476 1470
1477 1471 # load earliest manifest first for caching reasons
1478 1472 if not working and ctx2.rev() < ctx1.rev():
1479 1473 ctx2.manifest()
1480 1474
1481 1475 if not parentworking:
1482 1476 def bad(f, msg):
1483 1477 # 'f' may be a directory pattern from 'match.files()',
1484 1478 # so 'f not in ctx1' is not enough
1485 1479 if f not in ctx1 and f not in ctx1.dirs():
1486 1480 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1487 1481 match.bad = bad
1488 1482
1489 1483 if working: # we need to scan the working dir
1490 1484 subrepos = []
1491 1485 if '.hgsub' in self.dirstate:
1492 1486 subrepos = sorted(ctx2.substate)
1493 1487 s = self.dirstate.status(match, subrepos, listignored,
1494 1488 listclean, listunknown)
1495 1489 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1496 1490
1497 1491 # check for any possibly clean files
1498 1492 if parentworking and cmp:
1499 1493 fixup = []
1500 1494 # do a full compare of any files that might have changed
1501 1495 for f in sorted(cmp):
1502 1496 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1503 1497 or ctx1[f].cmp(ctx2[f])):
1504 1498 modified.append(f)
1505 1499 else:
1506 1500 fixup.append(f)
1507 1501
1508 1502 # update dirstate for files that are actually clean
1509 1503 if fixup:
1510 1504 if listclean:
1511 1505 clean += fixup
1512 1506
1513 1507 try:
1514 1508 # updating the dirstate is optional
1515 1509 # so we don't wait on the lock
1516 1510 wlock = self.wlock(False)
1517 1511 try:
1518 1512 for f in fixup:
1519 1513 self.dirstate.normal(f)
1520 1514 finally:
1521 1515 wlock.release()
1522 1516 except error.LockError:
1523 1517 pass
1524 1518
1525 1519 if not parentworking:
1526 1520 mf1 = mfmatches(ctx1)
1527 1521 if working:
1528 1522 # we are comparing working dir against non-parent
1529 1523 # generate a pseudo-manifest for the working dir
1530 1524 mf2 = mfmatches(self['.'])
1531 1525 for f in cmp + modified + added:
1532 1526 mf2[f] = None
1533 1527 mf2.set(f, ctx2.flags(f))
1534 1528 for f in removed:
1535 1529 if f in mf2:
1536 1530 del mf2[f]
1537 1531 else:
1538 1532 # we are comparing two revisions
1539 1533 deleted, unknown, ignored = [], [], []
1540 1534 mf2 = mfmatches(ctx2)
1541 1535
1542 1536 modified, added, clean = [], [], []
1543 1537 withflags = mf1.withflags() | mf2.withflags()
1544 1538 for fn in mf2:
1545 1539 if fn in mf1:
1546 1540 if (fn not in deleted and
1547 1541 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1548 1542 (mf1[fn] != mf2[fn] and
1549 1543 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1550 1544 modified.append(fn)
1551 1545 elif listclean:
1552 1546 clean.append(fn)
1553 1547 del mf1[fn]
1554 1548 elif fn not in deleted:
1555 1549 added.append(fn)
1556 1550 removed = mf1.keys()
1557 1551
1558 1552 if working and modified and not self.dirstate._checklink:
1559 1553 # Symlink placeholders may get non-symlink-like contents
1560 1554 # via user error or dereferencing by NFS or Samba servers,
1561 1555 # so we filter out any placeholders that don't look like a
1562 1556 # symlink
1563 1557 sane = []
1564 1558 for f in modified:
1565 1559 if ctx2.flags(f) == 'l':
1566 1560 d = ctx2[f].data()
1567 1561 if len(d) >= 1024 or '\n' in d or util.binary(d):
1568 1562 self.ui.debug('ignoring suspect symlink placeholder'
1569 1563 ' "%s"\n' % f)
1570 1564 continue
1571 1565 sane.append(f)
1572 1566 modified = sane
1573 1567
1574 1568 r = modified, added, removed, deleted, unknown, ignored, clean
1575 1569
1576 1570 if listsubrepos:
1577 1571 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1578 1572 if working:
1579 1573 rev2 = None
1580 1574 else:
1581 1575 rev2 = ctx2.substate[subpath][1]
1582 1576 try:
1583 1577 submatch = matchmod.narrowmatcher(subpath, match)
1584 1578 s = sub.status(rev2, match=submatch, ignored=listignored,
1585 1579 clean=listclean, unknown=listunknown,
1586 1580 listsubrepos=True)
1587 1581 for rfiles, sfiles in zip(r, s):
1588 1582 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1589 1583 except error.LookupError:
1590 1584 self.ui.status(_("skipping missing subrepository: %s\n")
1591 1585 % subpath)
1592 1586
1593 1587 for l in r:
1594 1588 l.sort()
1595 1589 return r
1596 1590
1597 1591 def heads(self, start=None):
1598 1592 heads = self.changelog.heads(start)
1599 1593 # sort the output in rev descending order
1600 1594 return sorted(heads, key=self.changelog.rev, reverse=True)
1601 1595
1602 1596 def branchheads(self, branch=None, start=None, closed=False):
1603 1597 '''return a (possibly filtered) list of heads for the given branch
1604 1598
1605 1599 Heads are returned in topological order, from newest to oldest.
1606 1600 If branch is None, use the dirstate branch.
1607 1601 If start is not None, return only heads reachable from start.
1608 1602 If closed is True, return heads that are marked as closed as well.
1609 1603 '''
1610 1604 if branch is None:
1611 1605 branch = self[None].branch()
1612 1606 branches = self.branchmap()
1613 1607 if branch not in branches:
1614 1608 return []
1615 1609 # the cache returns heads ordered lowest to highest
1616 1610 bheads = list(reversed(branches[branch]))
1617 1611 if start is not None:
1618 1612 # filter out the heads that cannot be reached from startrev
1619 1613 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1620 1614 bheads = [h for h in bheads if h in fbheads]
1621 1615 if not closed:
1622 1616 bheads = [h for h in bheads if not self[h].closesbranch()]
1623 1617 return bheads
1624 1618
1625 1619 def branches(self, nodes):
1626 1620 if not nodes:
1627 1621 nodes = [self.changelog.tip()]
1628 1622 b = []
1629 1623 for n in nodes:
1630 1624 t = n
1631 1625 while True:
1632 1626 p = self.changelog.parents(n)
1633 1627 if p[1] != nullid or p[0] == nullid:
1634 1628 b.append((t, n, p[0], p[1]))
1635 1629 break
1636 1630 n = p[0]
1637 1631 return b
1638 1632
1639 1633 def between(self, pairs):
1640 1634 r = []
1641 1635
1642 1636 for top, bottom in pairs:
1643 1637 n, l, i = top, [], 0
1644 1638 f = 1
1645 1639
1646 1640 while n != bottom and n != nullid:
1647 1641 p = self.changelog.parents(n)[0]
1648 1642 if i == f:
1649 1643 l.append(n)
1650 1644 f = f * 2
1651 1645 n = p
1652 1646 i += 1
1653 1647
1654 1648 r.append(l)
1655 1649
1656 1650 return r
1657 1651
1658 1652 def pull(self, remote, heads=None, force=False):
1659 1653 # don't open transaction for nothing or you break future useful
1660 1654 # rollback call
1661 1655 tr = None
1662 1656 trname = 'pull\n' + util.hidepassword(remote.url())
1663 1657 lock = self.lock()
1664 1658 try:
1665 1659 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1666 1660 force=force)
1667 1661 common, fetch, rheads = tmp
1668 1662 if not fetch:
1669 1663 self.ui.status(_("no changes found\n"))
1670 1664 added = []
1671 1665 result = 0
1672 1666 else:
1673 1667 tr = self.transaction(trname)
1674 1668 if heads is None and list(common) == [nullid]:
1675 1669 self.ui.status(_("requesting all changes\n"))
1676 1670 elif heads is None and remote.capable('changegroupsubset'):
1677 1671 # issue1320, avoid a race if remote changed after discovery
1678 1672 heads = rheads
1679 1673
1680 1674 if remote.capable('getbundle'):
1681 1675 cg = remote.getbundle('pull', common=common,
1682 1676 heads=heads or rheads)
1683 1677 elif heads is None:
1684 1678 cg = remote.changegroup(fetch, 'pull')
1685 1679 elif not remote.capable('changegroupsubset'):
1686 1680 raise util.Abort(_("partial pull cannot be done because "
1687 1681 "other repository doesn't support "
1688 1682 "changegroupsubset."))
1689 1683 else:
1690 1684 cg = remote.changegroupsubset(fetch, heads, 'pull')
1691 1685 # we use unfiltered changelog here because hidden revision must
1692 1686 # be taken in account for phase synchronization. They may
1693 1687 # becomes public and becomes visible again.
1694 1688 cl = self.unfiltered().changelog
1695 1689 clstart = len(cl)
1696 1690 result = self.addchangegroup(cg, 'pull', remote.url())
1697 1691 clend = len(cl)
1698 1692 added = [cl.node(r) for r in xrange(clstart, clend)]
1699 1693
1700 1694 # compute target subset
1701 1695 if heads is None:
1702 1696 # We pulled every thing possible
1703 1697 # sync on everything common
1704 1698 subset = common + added
1705 1699 else:
1706 1700 # We pulled a specific subset
1707 1701 # sync on this subset
1708 1702 subset = heads
1709 1703
1710 1704 # Get remote phases data from remote
1711 1705 remotephases = remote.listkeys('phases')
1712 1706 publishing = bool(remotephases.get('publishing', False))
1713 1707 if remotephases and not publishing:
1714 1708 # remote is new and unpublishing
1715 1709 pheads, _dr = phases.analyzeremotephases(self, subset,
1716 1710 remotephases)
1717 1711 phases.advanceboundary(self, phases.public, pheads)
1718 1712 phases.advanceboundary(self, phases.draft, subset)
1719 1713 else:
1720 1714 # Remote is old or publishing all common changesets
1721 1715 # should be seen as public
1722 1716 phases.advanceboundary(self, phases.public, subset)
1723 1717
1724 1718 if obsolete._enabled:
1725 1719 self.ui.debug('fetching remote obsolete markers\n')
1726 1720 remoteobs = remote.listkeys('obsolete')
1727 1721 if 'dump0' in remoteobs:
1728 1722 if tr is None:
1729 1723 tr = self.transaction(trname)
1730 1724 for key in sorted(remoteobs, reverse=True):
1731 1725 if key.startswith('dump'):
1732 1726 data = base85.b85decode(remoteobs[key])
1733 1727 self.obsstore.mergemarkers(tr, data)
1734 1728 self.invalidatevolatilesets()
1735 1729 if tr is not None:
1736 1730 tr.close()
1737 1731 finally:
1738 1732 if tr is not None:
1739 1733 tr.release()
1740 1734 lock.release()
1741 1735
1742 1736 return result
1743 1737
1744 1738 def checkpush(self, force, revs):
1745 1739 """Extensions can override this function if additional checks have
1746 1740 to be performed before pushing, or call it if they override push
1747 1741 command.
1748 1742 """
1749 1743 pass
1750 1744
1751 1745 def push(self, remote, force=False, revs=None, newbranch=False):
1752 1746 '''Push outgoing changesets (limited by revs) from the current
1753 1747 repository to remote. Return an integer:
1754 1748 - None means nothing to push
1755 1749 - 0 means HTTP error
1756 1750 - 1 means we pushed and remote head count is unchanged *or*
1757 1751 we have outgoing changesets but refused to push
1758 1752 - other values as described by addchangegroup()
1759 1753 '''
1760 1754 # there are two ways to push to remote repo:
1761 1755 #
1762 1756 # addchangegroup assumes local user can lock remote
1763 1757 # repo (local filesystem, old ssh servers).
1764 1758 #
1765 1759 # unbundle assumes local user cannot lock remote repo (new ssh
1766 1760 # servers, http servers).
1767 1761
1768 1762 if not remote.canpush():
1769 1763 raise util.Abort(_("destination does not support push"))
1770 1764 unfi = self.unfiltered()
1771 1765 # get local lock as we might write phase data
1772 1766 locallock = self.lock()
1773 1767 try:
1774 1768 self.checkpush(force, revs)
1775 1769 lock = None
1776 1770 unbundle = remote.capable('unbundle')
1777 1771 if not unbundle:
1778 1772 lock = remote.lock()
1779 1773 try:
1780 1774 # discovery
1781 1775 fci = discovery.findcommonincoming
1782 1776 commoninc = fci(unfi, remote, force=force)
1783 1777 common, inc, remoteheads = commoninc
1784 1778 fco = discovery.findcommonoutgoing
1785 1779 outgoing = fco(unfi, remote, onlyheads=revs,
1786 1780 commoninc=commoninc, force=force)
1787 1781
1788 1782
1789 1783 if not outgoing.missing:
1790 1784 # nothing to push
1791 1785 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1792 1786 ret = None
1793 1787 else:
1794 1788 # something to push
1795 1789 if not force:
1796 1790 # if self.obsstore == False --> no obsolete
1797 1791 # then, save the iteration
1798 1792 if unfi.obsstore:
1799 1793 # this message are here for 80 char limit reason
1800 1794 mso = _("push includes obsolete changeset: %s!")
1801 1795 mst = "push includes %s changeset: %s!"
1802 1796 # plain versions for i18n tool to detect them
1803 1797 _("push includes unstable changeset: %s!")
1804 1798 _("push includes bumped changeset: %s!")
1805 1799 _("push includes divergent changeset: %s!")
1806 1800 # If we are to push if there is at least one
1807 1801 # obsolete or unstable changeset in missing, at
1808 1802 # least one of the missinghead will be obsolete or
1809 1803 # unstable. So checking heads only is ok
1810 1804 for node in outgoing.missingheads:
1811 1805 ctx = unfi[node]
1812 1806 if ctx.obsolete():
1813 1807 raise util.Abort(mso % ctx)
1814 1808 elif ctx.troubled():
1815 1809 raise util.Abort(_(mst)
1816 1810 % (ctx.troubles()[0],
1817 1811 ctx))
1818 1812 discovery.checkheads(unfi, remote, outgoing,
1819 1813 remoteheads, newbranch,
1820 1814 bool(inc))
1821 1815
1822 1816 # create a changegroup from local
1823 1817 if revs is None and not outgoing.excluded:
1824 1818 # push everything,
1825 1819 # use the fast path, no race possible on push
1826 1820 cg = self._changegroup(outgoing.missing, 'push')
1827 1821 else:
1828 1822 cg = self.getlocalbundle('push', outgoing)
1829 1823
1830 1824 # apply changegroup to remote
1831 1825 if unbundle:
1832 1826 # local repo finds heads on server, finds out what
1833 1827 # revs it must push. once revs transferred, if server
1834 1828 # finds it has different heads (someone else won
1835 1829 # commit/push race), server aborts.
1836 1830 if force:
1837 1831 remoteheads = ['force']
1838 1832 # ssh: return remote's addchangegroup()
1839 1833 # http: return remote's addchangegroup() or 0 for error
1840 1834 ret = remote.unbundle(cg, remoteheads, 'push')
1841 1835 else:
1842 1836 # we return an integer indicating remote head count
1843 1837 # change
1844 1838 ret = remote.addchangegroup(cg, 'push', self.url())
1845 1839
1846 1840 if ret:
1847 1841 # push succeed, synchronize target of the push
1848 1842 cheads = outgoing.missingheads
1849 1843 elif revs is None:
1850 1844 # All out push fails. synchronize all common
1851 1845 cheads = outgoing.commonheads
1852 1846 else:
1853 1847 # I want cheads = heads(::missingheads and ::commonheads)
1854 1848 # (missingheads is revs with secret changeset filtered out)
1855 1849 #
1856 1850 # This can be expressed as:
1857 1851 # cheads = ( (missingheads and ::commonheads)
1858 1852 # + (commonheads and ::missingheads))"
1859 1853 # )
1860 1854 #
1861 1855 # while trying to push we already computed the following:
1862 1856 # common = (::commonheads)
1863 1857 # missing = ((commonheads::missingheads) - commonheads)
1864 1858 #
1865 1859 # We can pick:
1866 1860 # * missingheads part of common (::commonheads)
1867 1861 common = set(outgoing.common)
1868 1862 cheads = [node for node in revs if node in common]
1869 1863 # and
1870 1864 # * commonheads parents on missing
1871 1865 revset = unfi.set('%ln and parents(roots(%ln))',
1872 1866 outgoing.commonheads,
1873 1867 outgoing.missing)
1874 1868 cheads.extend(c.node() for c in revset)
1875 1869 # even when we don't push, exchanging phase data is useful
1876 1870 remotephases = remote.listkeys('phases')
1877 1871 if (self.ui.configbool('ui', '_usedassubrepo', False)
1878 1872 and remotephases # server supports phases
1879 1873 and ret is None # nothing was pushed
1880 1874 and remotephases.get('publishing', False)):
1881 1875 # When:
1882 1876 # - this is a subrepo push
1883 1877 # - and remote support phase
1884 1878 # - and no changeset was pushed
1885 1879 # - and remote is publishing
1886 1880 # We may be in issue 3871 case!
1887 1881 # We drop the possible phase synchronisation done by
1888 1882 # courtesy to publish changesets possibly locally draft
1889 1883 # on the remote.
1890 1884 remotephases = {'publishing': 'True'}
1891 1885 if not remotephases: # old server or public only repo
1892 1886 phases.advanceboundary(self, phases.public, cheads)
1893 1887 # don't push any phase data as there is nothing to push
1894 1888 else:
1895 1889 ana = phases.analyzeremotephases(self, cheads, remotephases)
1896 1890 pheads, droots = ana
1897 1891 ### Apply remote phase on local
1898 1892 if remotephases.get('publishing', False):
1899 1893 phases.advanceboundary(self, phases.public, cheads)
1900 1894 else: # publish = False
1901 1895 phases.advanceboundary(self, phases.public, pheads)
1902 1896 phases.advanceboundary(self, phases.draft, cheads)
1903 1897 ### Apply local phase on remote
1904 1898
1905 1899 # Get the list of all revs draft on remote by public here.
1906 1900 # XXX Beware that revset break if droots is not strictly
1907 1901 # XXX root we may want to ensure it is but it is costly
1908 1902 outdated = unfi.set('heads((%ln::%ln) and public())',
1909 1903 droots, cheads)
1910 1904 for newremotehead in outdated:
1911 1905 r = remote.pushkey('phases',
1912 1906 newremotehead.hex(),
1913 1907 str(phases.draft),
1914 1908 str(phases.public))
1915 1909 if not r:
1916 1910 self.ui.warn(_('updating %s to public failed!\n')
1917 1911 % newremotehead)
1918 1912 self.ui.debug('try to push obsolete markers to remote\n')
1919 1913 if (obsolete._enabled and self.obsstore and
1920 1914 'obsolete' in remote.listkeys('namespaces')):
1921 1915 rslts = []
1922 1916 remotedata = self.listkeys('obsolete')
1923 1917 for key in sorted(remotedata, reverse=True):
1924 1918 # reverse sort to ensure we end with dump0
1925 1919 data = remotedata[key]
1926 1920 rslts.append(remote.pushkey('obsolete', key, '', data))
1927 1921 if [r for r in rslts if not r]:
1928 1922 msg = _('failed to push some obsolete markers!\n')
1929 1923 self.ui.warn(msg)
1930 1924 finally:
1931 1925 if lock is not None:
1932 1926 lock.release()
1933 1927 finally:
1934 1928 locallock.release()
1935 1929
1936 1930 self.ui.debug("checking for updated bookmarks\n")
1937 1931 rb = remote.listkeys('bookmarks')
1938 1932 for k in rb.keys():
1939 1933 if k in unfi._bookmarks:
1940 1934 nr, nl = rb[k], hex(self._bookmarks[k])
1941 1935 if nr in unfi:
1942 1936 cr = unfi[nr]
1943 1937 cl = unfi[nl]
1944 1938 if bookmarks.validdest(unfi, cr, cl):
1945 1939 r = remote.pushkey('bookmarks', k, nr, nl)
1946 1940 if r:
1947 1941 self.ui.status(_("updating bookmark %s\n") % k)
1948 1942 else:
1949 1943 self.ui.warn(_('updating bookmark %s'
1950 1944 ' failed!\n') % k)
1951 1945
1952 1946 return ret
1953 1947
1954 1948 def changegroupinfo(self, nodes, source):
1955 1949 if self.ui.verbose or source == 'bundle':
1956 1950 self.ui.status(_("%d changesets found\n") % len(nodes))
1957 1951 if self.ui.debugflag:
1958 1952 self.ui.debug("list of changesets:\n")
1959 1953 for node in nodes:
1960 1954 self.ui.debug("%s\n" % hex(node))
1961 1955
1962 1956 def changegroupsubset(self, bases, heads, source):
1963 1957 """Compute a changegroup consisting of all the nodes that are
1964 1958 descendants of any of the bases and ancestors of any of the heads.
1965 1959 Return a chunkbuffer object whose read() method will return
1966 1960 successive changegroup chunks.
1967 1961
1968 1962 It is fairly complex as determining which filenodes and which
1969 1963 manifest nodes need to be included for the changeset to be complete
1970 1964 is non-trivial.
1971 1965
1972 1966 Another wrinkle is doing the reverse, figuring out which changeset in
1973 1967 the changegroup a particular filenode or manifestnode belongs to.
1974 1968 """
1975 1969 cl = self.changelog
1976 1970 if not bases:
1977 1971 bases = [nullid]
1978 1972 csets, bases, heads = cl.nodesbetween(bases, heads)
1979 1973 # We assume that all ancestors of bases are known
1980 1974 common = cl.ancestors([cl.rev(n) for n in bases])
1981 1975 return self._changegroupsubset(common, csets, heads, source)
1982 1976
1983 1977 def getlocalbundle(self, source, outgoing):
1984 1978 """Like getbundle, but taking a discovery.outgoing as an argument.
1985 1979
1986 1980 This is only implemented for local repos and reuses potentially
1987 1981 precomputed sets in outgoing."""
1988 1982 if not outgoing.missing:
1989 1983 return None
1990 1984 return self._changegroupsubset(outgoing.common,
1991 1985 outgoing.missing,
1992 1986 outgoing.missingheads,
1993 1987 source)
1994 1988
1995 1989 def getbundle(self, source, heads=None, common=None):
1996 1990 """Like changegroupsubset, but returns the set difference between the
1997 1991 ancestors of heads and the ancestors common.
1998 1992
1999 1993 If heads is None, use the local heads. If common is None, use [nullid].
2000 1994
2001 1995 The nodes in common might not all be known locally due to the way the
2002 1996 current discovery protocol works.
2003 1997 """
2004 1998 cl = self.changelog
2005 1999 if common:
2006 2000 hasnode = cl.hasnode
2007 2001 common = [n for n in common if hasnode(n)]
2008 2002 else:
2009 2003 common = [nullid]
2010 2004 if not heads:
2011 2005 heads = cl.heads()
2012 2006 return self.getlocalbundle(source,
2013 2007 discovery.outgoing(cl, common, heads))
2014 2008
2015 2009 @unfilteredmethod
2016 2010 def _changegroupsubset(self, commonrevs, csets, heads, source):
2017 2011
2018 2012 cl = self.changelog
2019 2013 mf = self.manifest
2020 2014 mfs = {} # needed manifests
2021 2015 fnodes = {} # needed file nodes
2022 2016 changedfiles = set()
2023 2017 fstate = ['', {}]
2024 2018 count = [0, 0]
2025 2019
2026 2020 # can we go through the fast path ?
2027 2021 heads.sort()
2028 2022 if heads == sorted(self.heads()):
2029 2023 return self._changegroup(csets, source)
2030 2024
2031 2025 # slow path
2032 2026 self.hook('preoutgoing', throw=True, source=source)
2033 2027 self.changegroupinfo(csets, source)
2034 2028
2035 2029 # filter any nodes that claim to be part of the known set
2036 2030 def prune(revlog, missing):
2037 2031 rr, rl = revlog.rev, revlog.linkrev
2038 2032 return [n for n in missing
2039 2033 if rl(rr(n)) not in commonrevs]
2040 2034
2041 2035 progress = self.ui.progress
2042 2036 _bundling = _('bundling')
2043 2037 _changesets = _('changesets')
2044 2038 _manifests = _('manifests')
2045 2039 _files = _('files')
2046 2040
2047 2041 def lookup(revlog, x):
2048 2042 if revlog == cl:
2049 2043 c = cl.read(x)
2050 2044 changedfiles.update(c[3])
2051 2045 mfs.setdefault(c[0], x)
2052 2046 count[0] += 1
2053 2047 progress(_bundling, count[0],
2054 2048 unit=_changesets, total=count[1])
2055 2049 return x
2056 2050 elif revlog == mf:
2057 2051 clnode = mfs[x]
2058 2052 mdata = mf.readfast(x)
2059 2053 for f, n in mdata.iteritems():
2060 2054 if f in changedfiles:
2061 2055 fnodes[f].setdefault(n, clnode)
2062 2056 count[0] += 1
2063 2057 progress(_bundling, count[0],
2064 2058 unit=_manifests, total=count[1])
2065 2059 return clnode
2066 2060 else:
2067 2061 progress(_bundling, count[0], item=fstate[0],
2068 2062 unit=_files, total=count[1])
2069 2063 return fstate[1][x]
2070 2064
2071 2065 bundler = changegroup.bundle10(lookup)
2072 2066 reorder = self.ui.config('bundle', 'reorder', 'auto')
2073 2067 if reorder == 'auto':
2074 2068 reorder = None
2075 2069 else:
2076 2070 reorder = util.parsebool(reorder)
2077 2071
2078 2072 def gengroup():
2079 2073 # Create a changenode group generator that will call our functions
2080 2074 # back to lookup the owning changenode and collect information.
2081 2075 count[:] = [0, len(csets)]
2082 2076 for chunk in cl.group(csets, bundler, reorder=reorder):
2083 2077 yield chunk
2084 2078 progress(_bundling, None)
2085 2079
2086 2080 # Create a generator for the manifestnodes that calls our lookup
2087 2081 # and data collection functions back.
2088 2082 for f in changedfiles:
2089 2083 fnodes[f] = {}
2090 2084 count[:] = [0, len(mfs)]
2091 2085 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2092 2086 yield chunk
2093 2087 progress(_bundling, None)
2094 2088
2095 2089 mfs.clear()
2096 2090
2097 2091 # Go through all our files in order sorted by name.
2098 2092 count[:] = [0, len(changedfiles)]
2099 2093 for fname in sorted(changedfiles):
2100 2094 filerevlog = self.file(fname)
2101 2095 if not len(filerevlog):
2102 2096 raise util.Abort(_("empty or missing revlog for %s")
2103 2097 % fname)
2104 2098 fstate[0] = fname
2105 2099 fstate[1] = fnodes.pop(fname, {})
2106 2100
2107 2101 nodelist = prune(filerevlog, fstate[1])
2108 2102 if nodelist:
2109 2103 count[0] += 1
2110 2104 yield bundler.fileheader(fname)
2111 2105 for chunk in filerevlog.group(nodelist, bundler, reorder):
2112 2106 yield chunk
2113 2107
2114 2108 # Signal that no more groups are left.
2115 2109 yield bundler.close()
2116 2110 progress(_bundling, None)
2117 2111
2118 2112 if csets:
2119 2113 self.hook('outgoing', node=hex(csets[0]), source=source)
2120 2114
2121 2115 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2122 2116
2123 2117 def changegroup(self, basenodes, source):
2124 2118 # to avoid a race we use changegroupsubset() (issue1320)
2125 2119 return self.changegroupsubset(basenodes, self.heads(), source)
2126 2120
2127 2121 @unfilteredmethod
2128 2122 def _changegroup(self, nodes, source):
2129 2123 """Compute the changegroup of all nodes that we have that a recipient
2130 2124 doesn't. Return a chunkbuffer object whose read() method will return
2131 2125 successive changegroup chunks.
2132 2126
2133 2127 This is much easier than the previous function as we can assume that
2134 2128 the recipient has any changenode we aren't sending them.
2135 2129
2136 2130 nodes is the set of nodes to send"""
2137 2131
2138 2132 cl = self.changelog
2139 2133 mf = self.manifest
2140 2134 mfs = {}
2141 2135 changedfiles = set()
2142 2136 fstate = ['']
2143 2137 count = [0, 0]
2144 2138
2145 2139 self.hook('preoutgoing', throw=True, source=source)
2146 2140 self.changegroupinfo(nodes, source)
2147 2141
2148 2142 revset = set([cl.rev(n) for n in nodes])
2149 2143
2150 2144 def gennodelst(log):
2151 2145 ln, llr = log.node, log.linkrev
2152 2146 return [ln(r) for r in log if llr(r) in revset]
2153 2147
2154 2148 progress = self.ui.progress
2155 2149 _bundling = _('bundling')
2156 2150 _changesets = _('changesets')
2157 2151 _manifests = _('manifests')
2158 2152 _files = _('files')
2159 2153
2160 2154 def lookup(revlog, x):
2161 2155 if revlog == cl:
2162 2156 c = cl.read(x)
2163 2157 changedfiles.update(c[3])
2164 2158 mfs.setdefault(c[0], x)
2165 2159 count[0] += 1
2166 2160 progress(_bundling, count[0],
2167 2161 unit=_changesets, total=count[1])
2168 2162 return x
2169 2163 elif revlog == mf:
2170 2164 count[0] += 1
2171 2165 progress(_bundling, count[0],
2172 2166 unit=_manifests, total=count[1])
2173 2167 return cl.node(revlog.linkrev(revlog.rev(x)))
2174 2168 else:
2175 2169 progress(_bundling, count[0], item=fstate[0],
2176 2170 total=count[1], unit=_files)
2177 2171 return cl.node(revlog.linkrev(revlog.rev(x)))
2178 2172
2179 2173 bundler = changegroup.bundle10(lookup)
2180 2174 reorder = self.ui.config('bundle', 'reorder', 'auto')
2181 2175 if reorder == 'auto':
2182 2176 reorder = None
2183 2177 else:
2184 2178 reorder = util.parsebool(reorder)
2185 2179
2186 2180 def gengroup():
2187 2181 '''yield a sequence of changegroup chunks (strings)'''
2188 2182 # construct a list of all changed files
2189 2183
2190 2184 count[:] = [0, len(nodes)]
2191 2185 for chunk in cl.group(nodes, bundler, reorder=reorder):
2192 2186 yield chunk
2193 2187 progress(_bundling, None)
2194 2188
2195 2189 count[:] = [0, len(mfs)]
2196 2190 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2197 2191 yield chunk
2198 2192 progress(_bundling, None)
2199 2193
2200 2194 count[:] = [0, len(changedfiles)]
2201 2195 for fname in sorted(changedfiles):
2202 2196 filerevlog = self.file(fname)
2203 2197 if not len(filerevlog):
2204 2198 raise util.Abort(_("empty or missing revlog for %s")
2205 2199 % fname)
2206 2200 fstate[0] = fname
2207 2201 nodelist = gennodelst(filerevlog)
2208 2202 if nodelist:
2209 2203 count[0] += 1
2210 2204 yield bundler.fileheader(fname)
2211 2205 for chunk in filerevlog.group(nodelist, bundler, reorder):
2212 2206 yield chunk
2213 2207 yield bundler.close()
2214 2208 progress(_bundling, None)
2215 2209
2216 2210 if nodes:
2217 2211 self.hook('outgoing', node=hex(nodes[0]), source=source)
2218 2212
2219 2213 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2220 2214
2221 2215 @unfilteredmethod
2222 2216 def addchangegroup(self, source, srctype, url, emptyok=False):
2223 2217 """Add the changegroup returned by source.read() to this repo.
2224 2218 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2225 2219 the URL of the repo where this changegroup is coming from.
2226 2220
2227 2221 Return an integer summarizing the change to this repo:
2228 2222 - nothing changed or no source: 0
2229 2223 - more heads than before: 1+added heads (2..n)
2230 2224 - fewer heads than before: -1-removed heads (-2..-n)
2231 2225 - number of heads stays the same: 1
2232 2226 """
2233 2227 def csmap(x):
2234 2228 self.ui.debug("add changeset %s\n" % short(x))
2235 2229 return len(cl)
2236 2230
2237 2231 def revmap(x):
2238 2232 return cl.rev(x)
2239 2233
2240 2234 if not source:
2241 2235 return 0
2242 2236
2243 2237 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2244 2238
2245 2239 changesets = files = revisions = 0
2246 2240 efiles = set()
2247 2241
2248 2242 # write changelog data to temp files so concurrent readers will not see
2249 2243 # inconsistent view
2250 2244 cl = self.changelog
2251 2245 cl.delayupdate()
2252 2246 oldheads = cl.heads()
2253 2247
2254 2248 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2255 2249 try:
2256 2250 trp = weakref.proxy(tr)
2257 2251 # pull off the changeset group
2258 2252 self.ui.status(_("adding changesets\n"))
2259 2253 clstart = len(cl)
2260 2254 class prog(object):
2261 2255 step = _('changesets')
2262 2256 count = 1
2263 2257 ui = self.ui
2264 2258 total = None
2265 2259 def __call__(self):
2266 2260 self.ui.progress(self.step, self.count, unit=_('chunks'),
2267 2261 total=self.total)
2268 2262 self.count += 1
2269 2263 pr = prog()
2270 2264 source.callback = pr
2271 2265
2272 2266 source.changelogheader()
2273 2267 srccontent = cl.addgroup(source, csmap, trp)
2274 2268 if not (srccontent or emptyok):
2275 2269 raise util.Abort(_("received changelog group is empty"))
2276 2270 clend = len(cl)
2277 2271 changesets = clend - clstart
2278 2272 for c in xrange(clstart, clend):
2279 2273 efiles.update(self[c].files())
2280 2274 efiles = len(efiles)
2281 2275 self.ui.progress(_('changesets'), None)
2282 2276
2283 2277 # pull off the manifest group
2284 2278 self.ui.status(_("adding manifests\n"))
2285 2279 pr.step = _('manifests')
2286 2280 pr.count = 1
2287 2281 pr.total = changesets # manifests <= changesets
2288 2282 # no need to check for empty manifest group here:
2289 2283 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2290 2284 # no new manifest will be created and the manifest group will
2291 2285 # be empty during the pull
2292 2286 source.manifestheader()
2293 2287 self.manifest.addgroup(source, revmap, trp)
2294 2288 self.ui.progress(_('manifests'), None)
2295 2289
2296 2290 needfiles = {}
2297 2291 if self.ui.configbool('server', 'validate', default=False):
2298 2292 # validate incoming csets have their manifests
2299 2293 for cset in xrange(clstart, clend):
2300 2294 mfest = self.changelog.read(self.changelog.node(cset))[0]
2301 2295 mfest = self.manifest.readdelta(mfest)
2302 2296 # store file nodes we must see
2303 2297 for f, n in mfest.iteritems():
2304 2298 needfiles.setdefault(f, set()).add(n)
2305 2299
2306 2300 # process the files
2307 2301 self.ui.status(_("adding file changes\n"))
2308 2302 pr.step = _('files')
2309 2303 pr.count = 1
2310 2304 pr.total = efiles
2311 2305 source.callback = None
2312 2306
2313 2307 while True:
2314 2308 chunkdata = source.filelogheader()
2315 2309 if not chunkdata:
2316 2310 break
2317 2311 f = chunkdata["filename"]
2318 2312 self.ui.debug("adding %s revisions\n" % f)
2319 2313 pr()
2320 2314 fl = self.file(f)
2321 2315 o = len(fl)
2322 2316 if not fl.addgroup(source, revmap, trp):
2323 2317 raise util.Abort(_("received file revlog group is empty"))
2324 2318 revisions += len(fl) - o
2325 2319 files += 1
2326 2320 if f in needfiles:
2327 2321 needs = needfiles[f]
2328 2322 for new in xrange(o, len(fl)):
2329 2323 n = fl.node(new)
2330 2324 if n in needs:
2331 2325 needs.remove(n)
2332 2326 else:
2333 2327 raise util.Abort(
2334 2328 _("received spurious file revlog entry"))
2335 2329 if not needs:
2336 2330 del needfiles[f]
2337 2331 self.ui.progress(_('files'), None)
2338 2332
2339 2333 for f, needs in needfiles.iteritems():
2340 2334 fl = self.file(f)
2341 2335 for n in needs:
2342 2336 try:
2343 2337 fl.rev(n)
2344 2338 except error.LookupError:
2345 2339 raise util.Abort(
2346 2340 _('missing file data for %s:%s - run hg verify') %
2347 2341 (f, hex(n)))
2348 2342
2349 2343 dh = 0
2350 2344 if oldheads:
2351 2345 heads = cl.heads()
2352 2346 dh = len(heads) - len(oldheads)
2353 2347 for h in heads:
2354 2348 if h not in oldheads and self[h].closesbranch():
2355 2349 dh -= 1
2356 2350 htext = ""
2357 2351 if dh:
2358 2352 htext = _(" (%+d heads)") % dh
2359 2353
2360 2354 self.ui.status(_("added %d changesets"
2361 2355 " with %d changes to %d files%s\n")
2362 2356 % (changesets, revisions, files, htext))
2363 2357 self.invalidatevolatilesets()
2364 2358
2365 2359 if changesets > 0:
2366 2360 p = lambda: cl.writepending() and self.root or ""
2367 2361 self.hook('pretxnchangegroup', throw=True,
2368 2362 node=hex(cl.node(clstart)), source=srctype,
2369 2363 url=url, pending=p)
2370 2364
2371 2365 added = [cl.node(r) for r in xrange(clstart, clend)]
2372 2366 publishing = self.ui.configbool('phases', 'publish', True)
2373 2367 if srctype == 'push':
2374 2368 # Old server can not push the boundary themself.
2375 2369 # New server won't push the boundary if changeset already
2376 2370 # existed locally as secrete
2377 2371 #
2378 2372 # We should not use added here but the list of all change in
2379 2373 # the bundle
2380 2374 if publishing:
2381 2375 phases.advanceboundary(self, phases.public, srccontent)
2382 2376 else:
2383 2377 phases.advanceboundary(self, phases.draft, srccontent)
2384 2378 phases.retractboundary(self, phases.draft, added)
2385 2379 elif srctype != 'strip':
2386 2380 # publishing only alter behavior during push
2387 2381 #
2388 2382 # strip should not touch boundary at all
2389 2383 phases.retractboundary(self, phases.draft, added)
2390 2384
2391 2385 # make changelog see real files again
2392 2386 cl.finalize(trp)
2393 2387
2394 2388 tr.close()
2395 2389
2396 2390 if changesets > 0:
2397 2391 if srctype != 'strip':
2398 2392 # During strip, branchcache is invalid but coming call to
2399 2393 # `destroyed` will repair it.
2400 2394 # In other case we can safely update cache on disk.
2401 2395 branchmap.updatecache(self.filtered('served'))
2402 2396 def runhooks():
2403 2397 # forcefully update the on-disk branch cache
2404 2398 self.ui.debug("updating the branch cache\n")
2405 2399 self.hook("changegroup", node=hex(cl.node(clstart)),
2406 2400 source=srctype, url=url)
2407 2401
2408 2402 for n in added:
2409 2403 self.hook("incoming", node=hex(n), source=srctype,
2410 2404 url=url)
2411 2405 self._afterlock(runhooks)
2412 2406
2413 2407 finally:
2414 2408 tr.release()
2415 2409 # never return 0 here:
2416 2410 if dh < 0:
2417 2411 return dh - 1
2418 2412 else:
2419 2413 return dh + 1
2420 2414
2421 2415 def stream_in(self, remote, requirements):
2422 2416 lock = self.lock()
2423 2417 try:
2424 2418 # Save remote branchmap. We will use it later
2425 2419 # to speed up branchcache creation
2426 2420 rbranchmap = None
2427 2421 if remote.capable("branchmap"):
2428 2422 rbranchmap = remote.branchmap()
2429 2423
2430 2424 fp = remote.stream_out()
2431 2425 l = fp.readline()
2432 2426 try:
2433 2427 resp = int(l)
2434 2428 except ValueError:
2435 2429 raise error.ResponseError(
2436 2430 _('unexpected response from remote server:'), l)
2437 2431 if resp == 1:
2438 2432 raise util.Abort(_('operation forbidden by server'))
2439 2433 elif resp == 2:
2440 2434 raise util.Abort(_('locking the remote repository failed'))
2441 2435 elif resp != 0:
2442 2436 raise util.Abort(_('the server sent an unknown error code'))
2443 2437 self.ui.status(_('streaming all changes\n'))
2444 2438 l = fp.readline()
2445 2439 try:
2446 2440 total_files, total_bytes = map(int, l.split(' ', 1))
2447 2441 except (ValueError, TypeError):
2448 2442 raise error.ResponseError(
2449 2443 _('unexpected response from remote server:'), l)
2450 2444 self.ui.status(_('%d files to transfer, %s of data\n') %
2451 2445 (total_files, util.bytecount(total_bytes)))
2452 2446 handled_bytes = 0
2453 2447 self.ui.progress(_('clone'), 0, total=total_bytes)
2454 2448 start = time.time()
2455 2449 for i in xrange(total_files):
2456 2450 # XXX doesn't support '\n' or '\r' in filenames
2457 2451 l = fp.readline()
2458 2452 try:
2459 2453 name, size = l.split('\0', 1)
2460 2454 size = int(size)
2461 2455 except (ValueError, TypeError):
2462 2456 raise error.ResponseError(
2463 2457 _('unexpected response from remote server:'), l)
2464 2458 if self.ui.debugflag:
2465 2459 self.ui.debug('adding %s (%s)\n' %
2466 2460 (name, util.bytecount(size)))
2467 2461 # for backwards compat, name was partially encoded
2468 2462 ofp = self.sopener(store.decodedir(name), 'w')
2469 2463 for chunk in util.filechunkiter(fp, limit=size):
2470 2464 handled_bytes += len(chunk)
2471 2465 self.ui.progress(_('clone'), handled_bytes,
2472 2466 total=total_bytes)
2473 2467 ofp.write(chunk)
2474 2468 ofp.close()
2475 2469 elapsed = time.time() - start
2476 2470 if elapsed <= 0:
2477 2471 elapsed = 0.001
2478 2472 self.ui.progress(_('clone'), None)
2479 2473 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2480 2474 (util.bytecount(total_bytes), elapsed,
2481 2475 util.bytecount(total_bytes / elapsed)))
2482 2476
2483 2477 # new requirements = old non-format requirements +
2484 2478 # new format-related
2485 2479 # requirements from the streamed-in repository
2486 2480 requirements.update(set(self.requirements) - self.supportedformats)
2487 2481 self._applyrequirements(requirements)
2488 2482 self._writerequirements()
2489 2483
2490 2484 if rbranchmap:
2491 2485 rbheads = []
2492 2486 for bheads in rbranchmap.itervalues():
2493 2487 rbheads.extend(bheads)
2494 2488
2495 2489 if rbheads:
2496 2490 rtiprev = max((int(self.changelog.rev(node))
2497 2491 for node in rbheads))
2498 2492 cache = branchmap.branchcache(rbranchmap,
2499 2493 self[rtiprev].node(),
2500 2494 rtiprev)
2501 2495 # Try to stick it as low as possible
2502 2496 # filter above served are unlikely to be fetch from a clone
2503 2497 for candidate in ('base', 'immutable', 'served'):
2504 2498 rview = self.filtered(candidate)
2505 2499 if cache.validfor(rview):
2506 2500 self._branchcaches[candidate] = cache
2507 2501 cache.write(rview)
2508 2502 break
2509 2503 self.invalidate()
2510 2504 return len(self.heads()) + 1
2511 2505 finally:
2512 2506 lock.release()
2513 2507
2514 2508 def clone(self, remote, heads=[], stream=False):
2515 2509 '''clone remote repository.
2516 2510
2517 2511 keyword arguments:
2518 2512 heads: list of revs to clone (forces use of pull)
2519 2513 stream: use streaming clone if possible'''
2520 2514
2521 2515 # now, all clients that can request uncompressed clones can
2522 2516 # read repo formats supported by all servers that can serve
2523 2517 # them.
2524 2518
2525 2519 # if revlog format changes, client will have to check version
2526 2520 # and format flags on "stream" capability, and use
2527 2521 # uncompressed only if compatible.
2528 2522
2529 2523 if not stream:
2530 2524 # if the server explicitly prefers to stream (for fast LANs)
2531 2525 stream = remote.capable('stream-preferred')
2532 2526
2533 2527 if stream and not heads:
2534 2528 # 'stream' means remote revlog format is revlogv1 only
2535 2529 if remote.capable('stream'):
2536 2530 return self.stream_in(remote, set(('revlogv1',)))
2537 2531 # otherwise, 'streamreqs' contains the remote revlog format
2538 2532 streamreqs = remote.capable('streamreqs')
2539 2533 if streamreqs:
2540 2534 streamreqs = set(streamreqs.split(','))
2541 2535 # if we support it, stream in and adjust our requirements
2542 2536 if not streamreqs - self.supportedformats:
2543 2537 return self.stream_in(remote, streamreqs)
2544 2538 return self.pull(remote, heads)
2545 2539
2546 2540 def pushkey(self, namespace, key, old, new):
2547 2541 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2548 2542 old=old, new=new)
2549 2543 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2550 2544 ret = pushkey.push(self, namespace, key, old, new)
2551 2545 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2552 2546 ret=ret)
2553 2547 return ret
2554 2548
2555 2549 def listkeys(self, namespace):
2556 2550 self.hook('prelistkeys', throw=True, namespace=namespace)
2557 2551 self.ui.debug('listing keys for "%s"\n' % namespace)
2558 2552 values = pushkey.list(self, namespace)
2559 2553 self.hook('listkeys', namespace=namespace, values=values)
2560 2554 return values
2561 2555
2562 2556 def debugwireargs(self, one, two, three=None, four=None, five=None):
2563 2557 '''used to test argument passing over the wire'''
2564 2558 return "%s %s %s %s %s" % (one, two, three, four, five)
2565 2559
2566 2560 def savecommitmessage(self, text):
2567 2561 fp = self.opener('last-message.txt', 'wb')
2568 2562 try:
2569 2563 fp.write(text)
2570 2564 finally:
2571 2565 fp.close()
2572 2566 return self.pathto(fp.name[len(self.root) + 1:])
2573 2567
2574 2568 # used to avoid circular references so destructors work
2575 2569 def aftertrans(files):
2576 2570 renamefiles = [tuple(t) for t in files]
2577 2571 def a():
2578 2572 for src, dest in renamefiles:
2579 2573 try:
2580 2574 util.rename(src, dest)
2581 2575 except OSError: # journal file does not yet exist
2582 2576 pass
2583 2577 return a
2584 2578
2585 2579 def undoname(fn):
2586 2580 base, name = os.path.split(fn)
2587 2581 assert name.startswith('journal')
2588 2582 return os.path.join(base, name.replace('journal', 'undo', 1))
2589 2583
2590 2584 def instance(ui, path, create):
2591 2585 return localrepository(ui, util.urllocalpath(path), create)
2592 2586
2593 2587 def islocal(path):
2594 2588 return True
@@ -1,752 +1,753 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 case. If two independants operation rewrite the same changeset A in to A' and
49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 from i18n import _
88 88
89 89 _pack = struct.pack
90 90 _unpack = struct.unpack
91 91
92 92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 93
94 94 # the obsolete feature is not mature enough to be enabled by default.
95 95 # you have to rely on third party extension extension to enable this.
96 96 _enabled = False
97 97
98 98 # data used for parsing and writing
99 99 _fmversion = 0
100 100 _fmfixed = '>BIB20s'
101 101 _fmnode = '20s'
102 102 _fmfsize = struct.calcsize(_fmfixed)
103 103 _fnodesize = struct.calcsize(_fmnode)
104 104
105 105 ### obsolescence marker flag
106 106
107 107 ## bumpedfix flag
108 108 #
109 109 # When a changeset A' succeed to a changeset A which became public, we call A'
110 110 # "bumped" because it's a successors of a public changesets
111 111 #
112 112 # o A' (bumped)
113 113 # |`:
114 114 # | o A
115 115 # |/
116 116 # o Z
117 117 #
118 118 # The way to solve this situation is to create a new changeset Ad as children
119 119 # of A. This changeset have the same content than A'. So the diff from A to A'
120 120 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 121 #
122 122 # o Ad
123 123 # |`:
124 124 # | x A'
125 125 # |'|
126 126 # o | A
127 127 # |/
128 128 # o Z
129 129 #
130 130 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 131 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 # This flag mean that the successors are an interdiff that fix the bumped
133 # situation, breaking the transitivity of "bumped" here.
132 # This flag mean that the successors express the changes between the public and
133 # bumped version and fix the situation, breaking the transitivity of
134 # "bumped" here.
134 135 bumpedfix = 1
135 136
136 137 def _readmarkers(data):
137 138 """Read and enumerate markers from raw data"""
138 139 off = 0
139 140 diskversion = _unpack('>B', data[off:off + 1])[0]
140 141 off += 1
141 142 if diskversion != _fmversion:
142 143 raise util.Abort(_('parsing obsolete marker: unknown version %r')
143 144 % diskversion)
144 145
145 146 # Loop on markers
146 147 l = len(data)
147 148 while off + _fmfsize <= l:
148 149 # read fixed part
149 150 cur = data[off:off + _fmfsize]
150 151 off += _fmfsize
151 152 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
152 153 # read replacement
153 154 sucs = ()
154 155 if nbsuc:
155 156 s = (_fnodesize * nbsuc)
156 157 cur = data[off:off + s]
157 158 sucs = _unpack(_fmnode * nbsuc, cur)
158 159 off += s
159 160 # read metadata
160 161 # (metadata will be decoded on demand)
161 162 metadata = data[off:off + mdsize]
162 163 if len(metadata) != mdsize:
163 164 raise util.Abort(_('parsing obsolete marker: metadata is too '
164 165 'short, %d bytes expected, got %d')
165 166 % (mdsize, len(metadata)))
166 167 off += mdsize
167 168 yield (pre, sucs, flags, metadata)
168 169
169 170 def encodemeta(meta):
170 171 """Return encoded metadata string to string mapping.
171 172
172 173 Assume no ':' in key and no '\0' in both key and value."""
173 174 for key, value in meta.iteritems():
174 175 if ':' in key or '\0' in key:
175 176 raise ValueError("':' and '\0' are forbidden in metadata key'")
176 177 if '\0' in value:
177 178 raise ValueError("':' are forbidden in metadata value'")
178 179 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
179 180
180 181 def decodemeta(data):
181 182 """Return string to string dictionary from encoded version."""
182 183 d = {}
183 184 for l in data.split('\0'):
184 185 if l:
185 186 key, value = l.split(':')
186 187 d[key] = value
187 188 return d
188 189
189 190 class marker(object):
190 191 """Wrap obsolete marker raw data"""
191 192
192 193 def __init__(self, repo, data):
193 194 # the repo argument will be used to create changectx in later version
194 195 self._repo = repo
195 196 self._data = data
196 197 self._decodedmeta = None
197 198
198 199 def precnode(self):
199 200 """Precursor changeset node identifier"""
200 201 return self._data[0]
201 202
202 203 def succnodes(self):
203 204 """List of successor changesets node identifiers"""
204 205 return self._data[1]
205 206
206 207 def metadata(self):
207 208 """Decoded metadata dictionary"""
208 209 if self._decodedmeta is None:
209 210 self._decodedmeta = decodemeta(self._data[3])
210 211 return self._decodedmeta
211 212
212 213 def date(self):
213 214 """Creation date as (unixtime, offset)"""
214 215 parts = self.metadata()['date'].split(' ')
215 216 return (float(parts[0]), int(parts[1]))
216 217
217 218 class obsstore(object):
218 219 """Store obsolete markers
219 220
220 221 Markers can be accessed with two mappings:
221 222 - precursors[x] -> set(markers on precursors edges of x)
222 223 - successors[x] -> set(markers on successors edges of x)
223 224 """
224 225
225 226 def __init__(self, sopener):
226 227 # caches for various obsolescence related cache
227 228 self.caches = {}
228 229 self._all = []
229 230 # new markers to serialize
230 231 self.precursors = {}
231 232 self.successors = {}
232 233 self.sopener = sopener
233 234 data = sopener.tryread('obsstore')
234 235 if data:
235 236 self._load(_readmarkers(data))
236 237
237 238 def __iter__(self):
238 239 return iter(self._all)
239 240
240 241 def __nonzero__(self):
241 242 return bool(self._all)
242 243
243 244 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
244 245 """obsolete: add a new obsolete marker
245 246
246 247 * ensuring it is hashable
247 248 * check mandatory metadata
248 249 * encode metadata
249 250 """
250 251 if metadata is None:
251 252 metadata = {}
252 253 if len(prec) != 20:
253 254 raise ValueError(prec)
254 255 for succ in succs:
255 256 if len(succ) != 20:
256 257 raise ValueError(succ)
257 258 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
258 259 self.add(transaction, [marker])
259 260
260 261 def add(self, transaction, markers):
261 262 """Add new markers to the store
262 263
263 264 Take care of filtering duplicate.
264 265 Return the number of new marker."""
265 266 if not _enabled:
266 267 raise util.Abort('obsolete feature is not enabled on this repo')
267 268 new = [m for m in markers if m not in self._all]
268 269 if new:
269 270 f = self.sopener('obsstore', 'ab')
270 271 try:
271 272 # Whether the file's current position is at the begin or at
272 273 # the end after opening a file for appending is implementation
273 274 # defined. So we must seek to the end before calling tell(),
274 275 # or we may get a zero offset for non-zero sized files on
275 276 # some platforms (issue3543).
276 277 f.seek(0, _SEEK_END)
277 278 offset = f.tell()
278 279 transaction.add('obsstore', offset)
279 280 # offset == 0: new file - add the version header
280 281 for bytes in _encodemarkers(new, offset == 0):
281 282 f.write(bytes)
282 283 finally:
283 284 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
284 285 # call 'filecacheentry.refresh()' here
285 286 f.close()
286 287 self._load(new)
287 288 # new marker *may* have changed several set. invalidate the cache.
288 289 self.caches.clear()
289 290 return len(new)
290 291
291 292 def mergemarkers(self, transaction, data):
292 293 markers = _readmarkers(data)
293 294 self.add(transaction, markers)
294 295
295 296 def _load(self, markers):
296 297 for mark in markers:
297 298 self._all.append(mark)
298 299 pre, sucs = mark[:2]
299 300 self.successors.setdefault(pre, set()).add(mark)
300 301 for suc in sucs:
301 302 self.precursors.setdefault(suc, set()).add(mark)
302 303 if node.nullid in self.precursors:
303 304 raise util.Abort(_('bad obsolescence marker detected: '
304 305 'invalid successors nullid'))
305 306
306 307 def _encodemarkers(markers, addheader=False):
307 308 # Kept separate from flushmarkers(), it will be reused for
308 309 # markers exchange.
309 310 if addheader:
310 311 yield _pack('>B', _fmversion)
311 312 for marker in markers:
312 313 yield _encodeonemarker(marker)
313 314
314 315
315 316 def _encodeonemarker(marker):
316 317 pre, sucs, flags, metadata = marker
317 318 nbsuc = len(sucs)
318 319 format = _fmfixed + (_fmnode * nbsuc)
319 320 data = [nbsuc, len(metadata), flags, pre]
320 321 data.extend(sucs)
321 322 return _pack(format, *data) + metadata
322 323
323 324 # arbitrary picked to fit into 8K limit from HTTP server
324 325 # you have to take in account:
325 326 # - the version header
326 327 # - the base85 encoding
327 328 _maxpayload = 5300
328 329
329 330 def listmarkers(repo):
330 331 """List markers over pushkey"""
331 332 if not repo.obsstore:
332 333 return {}
333 334 keys = {}
334 335 parts = []
335 336 currentlen = _maxpayload * 2 # ensure we create a new part
336 337 for marker in repo.obsstore:
337 338 nextdata = _encodeonemarker(marker)
338 339 if (len(nextdata) + currentlen > _maxpayload):
339 340 currentpart = []
340 341 currentlen = 0
341 342 parts.append(currentpart)
342 343 currentpart.append(nextdata)
343 344 currentlen += len(nextdata)
344 345 for idx, part in enumerate(reversed(parts)):
345 346 data = ''.join([_pack('>B', _fmversion)] + part)
346 347 keys['dump%i' % idx] = base85.b85encode(data)
347 348 return keys
348 349
349 350 def pushmarker(repo, key, old, new):
350 351 """Push markers over pushkey"""
351 352 if not key.startswith('dump'):
352 353 repo.ui.warn(_('unknown key: %r') % key)
353 354 return 0
354 355 if old:
355 356 repo.ui.warn(_('unexpected old value') % key)
356 357 return 0
357 358 data = base85.b85decode(new)
358 359 lock = repo.lock()
359 360 try:
360 361 tr = repo.transaction('pushkey: obsolete markers')
361 362 try:
362 363 repo.obsstore.mergemarkers(tr, data)
363 364 tr.close()
364 365 return 1
365 366 finally:
366 367 tr.release()
367 368 finally:
368 369 lock.release()
369 370
370 371 def allmarkers(repo):
371 372 """all obsolete markers known in a repository"""
372 373 for markerdata in repo.obsstore:
373 374 yield marker(repo, markerdata)
374 375
375 376 def precursormarkers(ctx):
376 377 """obsolete marker marking this changeset as a successors"""
377 378 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
378 379 yield marker(ctx._repo, data)
379 380
380 381 def successormarkers(ctx):
381 382 """obsolete marker making this changeset obsolete"""
382 383 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
383 384 yield marker(ctx._repo, data)
384 385
385 386 def allsuccessors(obsstore, nodes, ignoreflags=0):
386 387 """Yield node for every successor of <nodes>.
387 388
388 389 Some successors may be unknown locally.
389 390
390 391 This is a linear yield unsuited to detecting split changesets."""
391 392 remaining = set(nodes)
392 393 seen = set(remaining)
393 394 while remaining:
394 395 current = remaining.pop()
395 396 yield current
396 397 for mark in obsstore.successors.get(current, ()):
397 398 # ignore marker flagged with with specified flag
398 399 if mark[2] & ignoreflags:
399 400 continue
400 401 for suc in mark[1]:
401 402 if suc not in seen:
402 403 seen.add(suc)
403 404 remaining.add(suc)
404 405
405 406 def successorssets(repo, initialnode, cache=None):
406 407 """Return all set of successors of initial nodes
407 408
408 409 Successors set of changeset A are a group of revision that succeed A. It
409 410 succeed A as a consistent whole, each revision being only partial
410 411 replacement. Successors set contains non-obsolete changeset only.
411 412
412 413 In most cases a changeset A have zero (changeset pruned) or a single
413 414 successors set that contains a single successor (changeset A replaced by
414 415 A')
415 416
416 417 When changeset is split, it results successors set containing more than
417 418 a single element. Divergent rewriting will result in multiple successors
418 419 sets.
419 420
420 421 They are returned as a list of tuples containing all valid successors sets.
421 422
422 423 Final successors unknown locally are considered plain prune (obsoleted
423 424 without successors).
424 425
425 426 The optional `cache` parameter is a dictionary that may contains
426 427 precomputed successors sets. It is meant to reuse the computation of
427 428 previous call to `successorssets` when multiple calls are made at the same
428 429 time. The cache dictionary is updated in place. The caller is responsible
429 430 for its live spawn. Code that makes multiple calls to `successorssets`
430 431 *must* use this cache mechanism or suffer terrible performances."""
431 432
432 433 succmarkers = repo.obsstore.successors
433 434
434 435 # Stack of nodes we search successors sets for
435 436 toproceed = [initialnode]
436 437 # set version of above list for fast loop detection
437 438 # element added to "toproceed" must be added here
438 439 stackedset = set(toproceed)
439 440 if cache is None:
440 441 cache = {}
441 442
442 443 # This while loop is the flattened version of a recursive search for
443 444 # successors sets
444 445 #
445 446 # def successorssets(x):
446 447 # successors = directsuccessors(x)
447 448 # ss = [[]]
448 449 # for succ in directsuccessors(x):
449 450 # # product as in itertools cartesian product
450 451 # ss = product(ss, successorssets(succ))
451 452 # return ss
452 453 #
453 454 # But we can not use plain recursive calls here:
454 455 # - that would blow the python call stack
455 456 # - obsolescence markers may have cycles, we need to handle them.
456 457 #
457 458 # The `toproceed` list act as our call stack. Every node we search
458 459 # successors set for are stacked there.
459 460 #
460 461 # The `stackedset` is set version of this stack used to check if a node is
461 462 # already stacked. This check is used to detect cycles and prevent infinite
462 463 # loop.
463 464 #
464 465 # successors set of all nodes are stored in the `cache` dictionary.
465 466 #
466 467 # After this while loop ends we use the cache to return the successors sets
467 468 # for the node requested by the caller.
468 469 while toproceed:
469 470 # Every iteration tries to compute the successors sets of the topmost
470 471 # node of the stack: CURRENT.
471 472 #
472 473 # There are four possible outcomes:
473 474 #
474 475 # 1) We already know the successors sets of CURRENT:
475 476 # -> mission accomplished, pop it from the stack.
476 477 # 2) Node is not obsolete:
477 478 # -> the node is its own successors sets. Add it to the cache.
478 479 # 3) We do not know successors set of direct successors of CURRENT:
479 480 # -> We add those successors to the stack.
480 481 # 4) We know successors sets of all direct successors of CURRENT:
481 482 # -> We can compute CURRENT successors set and add it to the
482 483 # cache.
483 484 #
484 485 current = toproceed[-1]
485 486 if current in cache:
486 487 # case (1): We already know the successors sets
487 488 stackedset.remove(toproceed.pop())
488 489 elif current not in succmarkers:
489 490 # case (2): The node is not obsolete.
490 491 if current in repo:
491 492 # We have a valid last successors.
492 493 cache[current] = [(current,)]
493 494 else:
494 495 # Final obsolete version is unknown locally.
495 496 # Do not count that as a valid successors
496 497 cache[current] = []
497 498 else:
498 499 # cases (3) and (4)
499 500 #
500 501 # We proceed in two phases. Phase 1 aims to distinguish case (3)
501 502 # from case (4):
502 503 #
503 504 # For each direct successors of CURRENT, we check whether its
504 505 # successors sets are known. If they are not, we stack the
505 506 # unknown node and proceed to the next iteration of the while
506 507 # loop. (case 3)
507 508 #
508 509 # During this step, we may detect obsolescence cycles: a node
509 510 # with unknown successors sets but already in the call stack.
510 511 # In such a situation, we arbitrary set the successors sets of
511 512 # the node to nothing (node pruned) to break the cycle.
512 513 #
513 # If no break was encountered we proceeed to phase 2.
514 # If no break was encountered we proceed to phase 2.
514 515 #
515 516 # Phase 2 computes successors sets of CURRENT (case 4); see details
516 517 # in phase 2 itself.
517 518 #
518 519 # Note the two levels of iteration in each phase.
519 520 # - The first one handles obsolescence markers using CURRENT as
520 521 # precursor (successors markers of CURRENT).
521 522 #
522 523 # Having multiple entry here means divergence.
523 524 #
524 525 # - The second one handles successors defined in each marker.
525 526 #
526 527 # Having none means pruned node, multiple successors means split,
527 528 # single successors are standard replacement.
528 529 #
529 530 for mark in sorted(succmarkers[current]):
530 531 for suc in mark[1]:
531 532 if suc not in cache:
532 533 if suc in stackedset:
533 534 # cycle breaking
534 535 cache[suc] = []
535 536 else:
536 537 # case (3) If we have not computed successors sets
537 538 # of one of those successors we add it to the
538 539 # `toproceed` stack and stop all work for this
539 540 # iteration.
540 541 toproceed.append(suc)
541 542 stackedset.add(suc)
542 543 break
543 544 else:
544 545 continue
545 546 break
546 547 else:
547 548 # case (4): we know all successors sets of all direct
548 549 # successors
549 550 #
550 551 # Successors set contributed by each marker depends on the
551 552 # successors sets of all its "successors" node.
552 553 #
553 554 # Each different marker is a divergence in the obsolescence
554 # history. It contributes successors sets dictinct from other
555 # history. It contributes successors sets distinct from other
555 556 # markers.
556 557 #
557 558 # Within a marker, a successor may have divergent successors
558 559 # sets. In such a case, the marker will contribute multiple
559 560 # divergent successors sets. If multiple successors have
560 # divergents successors sets, a cartesian product is used.
561 # divergent successors sets, a cartesian product is used.
561 562 #
562 563 # At the end we post-process successors sets to remove
563 564 # duplicated entry and successors set that are strict subset of
564 565 # another one.
565 566 succssets = []
566 567 for mark in sorted(succmarkers[current]):
567 568 # successors sets contributed by this marker
568 569 markss = [[]]
569 570 for suc in mark[1]:
570 571 # cardinal product with previous successors
571 572 productresult = []
572 573 for prefix in markss:
573 574 for suffix in cache[suc]:
574 575 newss = list(prefix)
575 576 for part in suffix:
576 577 # do not duplicated entry in successors set
577 578 # first entry wins.
578 579 if part not in newss:
579 580 newss.append(part)
580 581 productresult.append(newss)
581 582 markss = productresult
582 583 succssets.extend(markss)
583 584 # remove duplicated and subset
584 585 seen = []
585 586 final = []
586 587 candidate = sorted(((set(s), s) for s in succssets if s),
587 588 key=lambda x: len(x[1]), reverse=True)
588 589 for setversion, listversion in candidate:
589 590 for seenset in seen:
590 591 if setversion.issubset(seenset):
591 592 break
592 593 else:
593 594 final.append(listversion)
594 595 seen.append(setversion)
595 596 final.reverse() # put small successors set first
596 597 cache[current] = final
597 598 return cache[initialnode]
598 599
599 600 def _knownrevs(repo, nodes):
600 601 """yield revision numbers of known nodes passed in parameters
601 602
602 603 Unknown revisions are silently ignored."""
603 604 torev = repo.changelog.nodemap.get
604 605 for n in nodes:
605 606 rev = torev(n)
606 607 if rev is not None:
607 608 yield rev
608 609
609 610 # mapping of 'set-name' -> <function to compute this set>
610 611 cachefuncs = {}
611 612 def cachefor(name):
612 613 """Decorator to register a function as computing the cache for a set"""
613 614 def decorator(func):
614 615 assert name not in cachefuncs
615 616 cachefuncs[name] = func
616 617 return func
617 618 return decorator
618 619
619 620 def getrevs(repo, name):
620 621 """Return the set of revision that belong to the <name> set
621 622
622 623 Such access may compute the set and cache it for future use"""
623 624 repo = repo.unfiltered()
624 625 if not repo.obsstore:
625 626 return ()
626 627 if name not in repo.obsstore.caches:
627 628 repo.obsstore.caches[name] = cachefuncs[name](repo)
628 629 return repo.obsstore.caches[name]
629 630
630 631 # To be simple we need to invalidate obsolescence cache when:
631 632 #
632 633 # - new changeset is added:
633 634 # - public phase is changed
634 635 # - obsolescence marker are added
635 636 # - strip is used a repo
636 637 def clearobscaches(repo):
637 638 """Remove all obsolescence related cache from a repo
638 639
639 640 This remove all cache in obsstore is the obsstore already exist on the
640 641 repo.
641 642
642 643 (We could be smarter here given the exact event that trigger the cache
643 644 clearing)"""
644 645 # only clear cache is there is obsstore data in this repo
645 646 if 'obsstore' in repo._filecache:
646 647 repo.obsstore.caches.clear()
647 648
648 649 @cachefor('obsolete')
649 650 def _computeobsoleteset(repo):
650 651 """the set of obsolete revisions"""
651 652 obs = set()
652 653 getrev = repo.changelog.nodemap.get
653 654 getphase = repo._phasecache.phase
654 655 for node in repo.obsstore.successors:
655 656 rev = getrev(node)
656 657 if rev is not None and getphase(repo, rev):
657 658 obs.add(rev)
658 659 return obs
659 660
660 661 @cachefor('unstable')
661 662 def _computeunstableset(repo):
662 663 """the set of non obsolete revisions with obsolete parents"""
663 664 # revset is not efficient enough here
664 665 # we do (obsolete()::) - obsolete() by hand
665 666 obs = getrevs(repo, 'obsolete')
666 667 if not obs:
667 668 return set()
668 669 cl = repo.changelog
669 670 return set(r for r in cl.descendants(obs) if r not in obs)
670 671
671 672 @cachefor('suspended')
672 673 def _computesuspendedset(repo):
673 674 """the set of obsolete parents with non obsolete descendants"""
674 675 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
675 676 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
676 677
677 678 @cachefor('extinct')
678 679 def _computeextinctset(repo):
679 680 """the set of obsolete parents without non obsolete descendants"""
680 681 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
681 682
682 683
683 684 @cachefor('bumped')
684 685 def _computebumpedset(repo):
685 686 """the set of revs trying to obsolete public revisions"""
686 687 # get all possible bumped changesets
687 688 tonode = repo.changelog.node
688 689 publicnodes = (tonode(r) for r in repo.revs('public()'))
689 690 successors = allsuccessors(repo.obsstore, publicnodes,
690 691 ignoreflags=bumpedfix)
691 692 # revision public or already obsolete don't count as bumped
692 693 query = '%ld - obsolete() - public()'
693 694 return set(repo.revs(query, _knownrevs(repo, successors)))
694 695
695 696 @cachefor('divergent')
696 697 def _computedivergentset(repo):
697 698 """the set of rev that compete to be the final successors of some revision.
698 699 """
699 700 divergent = set()
700 701 obsstore = repo.obsstore
701 702 newermap = {}
702 703 for ctx in repo.set('(not public()) - obsolete()'):
703 704 mark = obsstore.precursors.get(ctx.node(), ())
704 705 toprocess = set(mark)
705 706 while toprocess:
706 707 prec = toprocess.pop()[0]
707 708 if prec not in newermap:
708 709 successorssets(repo, prec, newermap)
709 710 newer = [n for n in newermap[prec] if n]
710 711 if len(newer) > 1:
711 712 divergent.add(ctx.rev())
712 713 break
713 714 toprocess.update(obsstore.precursors.get(prec, ()))
714 715 return divergent
715 716
716 717
717 718 def createmarkers(repo, relations, flag=0, metadata=None):
718 719 """Add obsolete markers between changesets in a repo
719 720
720 721 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
721 722 `old` and `news` are changectx.
722 723
723 724 Trying to obsolete a public changeset will raise an exception.
724 725
725 726 Current user and date are used except if specified otherwise in the
726 727 metadata attribute.
727 728
728 729 This function operates within a transaction of its own, but does
729 730 not take any lock on the repo.
730 731 """
731 732 # prepare metadata
732 733 if metadata is None:
733 734 metadata = {}
734 735 if 'date' not in metadata:
735 736 metadata['date'] = '%i %i' % util.makedate()
736 737 if 'user' not in metadata:
737 738 metadata['user'] = repo.ui.username()
738 739 tr = repo.transaction('add-obsolescence-marker')
739 740 try:
740 741 for prec, sucs in relations:
741 742 if not prec.mutable():
742 743 raise util.Abort("cannot obsolete immutable changeset: %s"
743 744 % prec)
744 745 nprec = prec.node()
745 746 nsucs = tuple(s.node() for s in sucs)
746 747 if nprec in nsucs:
747 748 raise util.Abort("changeset %s cannot obsolete itself" % prec)
748 749 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
749 750 repo.filteredrevcache.clear()
750 751 tr.close()
751 752 finally:
752 753 tr.release()
@@ -1,219 +1,219 b''
1 1 # repoview.py - Filtered view of a localrepo object
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import copy
10 10 import phases
11 11 import util
12 12 import obsolete, revset
13 13
14 14
15 15 def hideablerevs(repo):
16 16 """Revisions candidates to be hidden
17 17
18 18 This is a standalone function to help extensions to wrap it."""
19 19 return obsolete.getrevs(repo, 'obsolete')
20 20
21 21 def computehidden(repo):
22 22 """compute the set of hidden revision to filter
23 23
24 24 During most operation hidden should be filtered."""
25 25 assert not repo.changelog.filteredrevs
26 26 hideable = hideablerevs(repo)
27 27 if hideable:
28 28 cl = repo.changelog
29 29 firsthideable = min(hideable)
30 30 revs = cl.revs(start=firsthideable)
31 31 blockers = [r for r in revset._children(repo, revs, hideable)
32 32 if r not in hideable]
33 33 for par in repo[None].parents():
34 34 blockers.append(par.rev())
35 35 for bm in repo._bookmarks.values():
36 36 blockers.append(repo[bm].rev())
37 37 blocked = cl.ancestors(blockers, inclusive=True)
38 38 return frozenset(r for r in hideable if r not in blocked)
39 39 return frozenset()
40 40
41 41 def computeunserved(repo):
42 42 """compute the set of revision that should be filtered when used a server
43 43
44 44 Secret and hidden changeset should not pretend to be here."""
45 45 assert not repo.changelog.filteredrevs
46 46 # fast path in simple case to avoid impact of non optimised code
47 47 hiddens = filterrevs(repo, 'visible')
48 48 if phases.hassecret(repo):
49 49 cl = repo.changelog
50 50 secret = phases.secret
51 51 getphase = repo._phasecache.phase
52 52 first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
53 53 revs = cl.revs(start=first)
54 54 secrets = set(r for r in revs if getphase(repo, r) >= secret)
55 55 return frozenset(hiddens | secrets)
56 56 else:
57 57 return hiddens
58 58 return frozenset()
59 59
60 60 def computemutable(repo):
61 61 """compute the set of revision that should be filtered when used a server
62 62
63 63 Secret and hidden changeset should not pretend to be here."""
64 64 assert not repo.changelog.filteredrevs
65 65 # fast check to avoid revset call on huge repo
66 66 if util.any(repo._phasecache.phaseroots[1:]):
67 67 getphase = repo._phasecache.phase
68 68 maymutable = filterrevs(repo, 'base')
69 69 return frozenset(r for r in maymutable if getphase(repo, r))
70 70 return frozenset()
71 71
72 72 def computeimpactable(repo):
73 73 """Everything impactable by mutable revision
74 74
75 75 The immutable filter still have some chance to get invalidated. This will
76 76 happen when:
77 77
78 78 - you garbage collect hidden changeset,
79 79 - public phase is moved backward,
80 80 - something is changed in the filtering (this could be fixed)
81 81
82 82 This filter out any mutable changeset and any public changeset that may be
83 83 impacted by something happening to a mutable revision.
84 84
85 85 This is achieved by filtered everything with a revision number egal or
86 86 higher than the first mutable changeset is filtered."""
87 87 assert not repo.changelog.filteredrevs
88 88 cl = repo.changelog
89 89 firstmutable = len(cl)
90 90 for roots in repo._phasecache.phaseroots[1:]:
91 91 if roots:
92 92 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
93 93 # protect from nullrev root
94 94 firstmutable = max(0, firstmutable)
95 95 return frozenset(xrange(firstmutable, len(cl)))
96 96
97 97 # function to compute filtered set
98 98 filtertable = {'visible': computehidden,
99 99 'served': computeunserved,
100 100 'immutable': computemutable,
101 101 'base': computeimpactable}
102 102 ### Nearest subset relation
103 103 # Nearest subset of filter X is a filter Y so that:
104 104 # * Y is included in X,
105 105 # * X - Y is as small as possible.
106 106 # This create and ordering used for branchmap purpose.
107 107 # the ordering may be partial
108 108 subsettable = {None: 'visible',
109 109 'visible': 'served',
110 110 'served': 'immutable',
111 111 'immutable': 'base'}
112 112
113 113 def filterrevs(repo, filtername):
114 114 """returns set of filtered revision for this filter name"""
115 115 if filtername not in repo.filteredrevcache:
116 116 func = filtertable[filtername]
117 117 repo.filteredrevcache[filtername] = func(repo.unfiltered())
118 118 return repo.filteredrevcache[filtername]
119 119
120 120 class repoview(object):
121 121 """Provide a read/write view of a repo through a filtered changelog
122 122
123 123 This object is used to access a filtered version of a repository without
124 124 altering the original repository object itself. We can not alter the
125 125 original object for two main reasons:
126 126 - It prevents the use of a repo with multiple filters at the same time. In
127 127 particular when multiple threads are involved.
128 128 - It makes scope of the filtering harder to control.
129 129
130 130 This object behaves very closely to the original repository. All attribute
131 131 operations are done on the original repository:
132 132 - An access to `repoview.someattr` actually returns `repo.someattr`,
133 133 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
134 134 - A deletion of `repoview.someattr` actually drops `someattr`
135 135 from `repo.__dict__`.
136 136
137 137 The only exception is the `changelog` property. It is overridden to return
138 138 a (surface) copy of `repo.changelog` with some revisions filtered. The
139 139 `filtername` attribute of the view control the revisions that need to be
140 140 filtered. (the fact the changelog is copied is an implementation detail).
141 141
142 142 Unlike attributes, this object intercepts all method calls. This means that
143 143 all methods are run on the `repoview` object with the filtered `changelog`
144 144 property. For this purpose the simple `repoview` class must be mixed with
145 145 the actual class of the repository. This ensures that the resulting
146 146 `repoview` object have the very same methods than the repo object. This
147 147 leads to the property below.
148 148
149 149 repoview.method() --> repo.__class__.method(repoview)
150 150
151 151 The inheritance has to be done dynamically because `repo` can be of any
152 subclasses of `localrepo`. Eg: `bundlerepo` or `httprepo`.
152 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
153 153 """
154 154
155 155 def __init__(self, repo, filtername):
156 156 object.__setattr__(self, '_unfilteredrepo', repo)
157 157 object.__setattr__(self, 'filtername', filtername)
158 158 object.__setattr__(self, '_clcachekey', None)
159 159 object.__setattr__(self, '_clcache', None)
160 160
161 # not a cacheproperty on purpose we shall implement a proper cache later
161 # not a propertycache on purpose we shall implement a proper cache later
162 162 @property
163 163 def changelog(self):
164 164 """return a filtered version of the changeset
165 165
166 166 this changelog must not be used for writing"""
167 167 # some cache may be implemented later
168 168 unfi = self._unfilteredrepo
169 169 unfichangelog = unfi.changelog
170 170 revs = filterrevs(unfi, self.filtername)
171 171 cl = self._clcache
172 172 newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs))
173 173 if cl is not None:
174 174 # we need to check curkey too for some obscure reason.
175 175 # MQ test show a corruption of the underlying repo (in _clcache)
176 176 # without change in the cachekey.
177 177 oldfilter = cl.filteredrevs
178 178 try:
179 179 cl.filterrevs = () # disable filtering for tip
180 180 curkey = (len(cl), cl.tip(), hash(oldfilter))
181 181 finally:
182 182 cl.filteredrevs = oldfilter
183 183 if newkey != self._clcachekey or newkey != curkey:
184 184 cl = None
185 185 # could have been made None by the previous if
186 186 if cl is None:
187 187 cl = copy.copy(unfichangelog)
188 188 cl.filteredrevs = revs
189 189 object.__setattr__(self, '_clcache', cl)
190 190 object.__setattr__(self, '_clcachekey', newkey)
191 191 return cl
192 192
193 193 def unfiltered(self):
194 194 """Return an unfiltered version of a repo"""
195 195 return self._unfilteredrepo
196 196
197 197 def filtered(self, name):
198 198 """Return a filtered version of a repository"""
199 199 if name == self.filtername:
200 200 return self
201 201 return self.unfiltered().filtered(name)
202 202
203 203 # everything access are forwarded to the proxied repo
204 204 def __getattr__(self, attr):
205 205 return getattr(self._unfilteredrepo, attr)
206 206
207 207 def __setattr__(self, attr, value):
208 208 return setattr(self._unfilteredrepo, attr, value)
209 209
210 210 def __delattr__(self, attr):
211 211 return delattr(self._unfilteredrepo, attr)
212 212
213 # The `requirement` attribut is initialiazed during __init__. But
213 # The `requirements` attribute is initialized during __init__. But
214 214 # __getattr__ won't be called as it also exists on the class. We need
215 215 # explicit forwarding to main repo here
216 216 @property
217 217 def requirements(self):
218 218 return self._unfilteredrepo.requirements
219 219
@@ -1,447 +1,447 b''
1 1 Test file dedicated to testing the divergent troubles from obsolete changeset.
2 2
3 This is the most complexe troubles from far so we isolate it in a dedicated
3 This is the most complex troubles from far so we isolate it in a dedicated
4 4 file.
5 5
6 6 Enable obsolete
7 7
8 8 $ cat > obs.py << EOF
9 9 > import mercurial.obsolete
10 10 > mercurial.obsolete._enabled = True
11 11 > EOF
12 12 $ cat >> $HGRCPATH << EOF
13 13 > [ui]
14 14 > logtemplate = {rev}:{node|short} {desc}\n
15 15 > [extensions]
16 16 > obs=${TESTTMP}/obs.py
17 17 > [alias]
18 18 > debugobsolete = debugobsolete -d '0 0'
19 19 > [phases]
20 20 > publish=False
21 21 > EOF
22 22
23 23
24 24 $ mkcommit() {
25 25 > echo "$1" > "$1"
26 26 > hg add "$1"
27 27 > hg ci -m "$1"
28 28 > }
29 29 $ getid() {
30 30 > hg id --debug --hidden -ir "desc('$1')"
31 31 > }
32 32
33 33 setup repo
34 34
35 35 $ hg init reference
36 36 $ cd reference
37 37 $ mkcommit base
38 38 $ mkcommit A_0
39 39 $ hg up 0
40 40 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
41 41 $ mkcommit A_1
42 42 created new head
43 43 $ hg up 0
44 44 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
45 45 $ mkcommit A_2
46 46 created new head
47 47 $ hg up 0
48 48 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
49 49 $ cd ..
50 50
51 51
52 52 $ newcase() {
53 53 > hg clone -u 0 -q reference $1
54 54 > cd $1
55 55 > }
56 56
57 57 direct divergence
58 58 -----------------
59 59
60 60 A_1 have two direct and divergent successors A_1 and A_1
61 61
62 62 $ newcase direct
63 63 $ hg debugobsolete `getid A_0` `getid A_1`
64 64 $ hg debugobsolete `getid A_0` `getid A_2`
65 65 invalid branchheads cache (served): tip differs
66 66 $ hg log -G --hidden
67 67 o 3:392fd25390da A_2
68 68 |
69 69 | o 2:82623d38b9ba A_1
70 70 |/
71 71 | x 1:007dc284c1f8 A_0
72 72 |/
73 73 @ 0:d20a80d4def3 base
74 74
75 75 $ hg debugsuccessorssets --hidden 'all()'
76 76 d20a80d4def3
77 77 d20a80d4def3
78 78 007dc284c1f8
79 79 82623d38b9ba
80 80 392fd25390da
81 81 82623d38b9ba
82 82 82623d38b9ba
83 83 392fd25390da
84 84 392fd25390da
85 85 $ hg log -r 'divergent()'
86 86 2:82623d38b9ba A_1
87 87 3:392fd25390da A_2
88 88
89 89 check that mercurial refuse to push
90 90
91 91 $ hg init ../other
92 92 $ hg push ../other
93 93 pushing to ../other
94 94 searching for changes
95 95 abort: push includes divergent changeset: 392fd25390da!
96 96 [255]
97 97
98 98 $ cd ..
99 99
100 100
101 101 indirect divergence with known changeset
102 102 -------------------------------------------
103 103
104 104 $ newcase indirect_known
105 105 $ hg debugobsolete `getid A_0` `getid A_1`
106 106 $ hg debugobsolete `getid A_0` `getid A_2`
107 107 invalid branchheads cache (served): tip differs
108 108 $ mkcommit A_3
109 109 created new head
110 110 $ hg debugobsolete `getid A_2` `getid A_3`
111 111 $ hg log -G --hidden
112 112 @ 4:01f36c5a8fda A_3
113 113 |
114 114 | x 3:392fd25390da A_2
115 115 |/
116 116 | o 2:82623d38b9ba A_1
117 117 |/
118 118 | x 1:007dc284c1f8 A_0
119 119 |/
120 120 o 0:d20a80d4def3 base
121 121
122 122 $ hg debugsuccessorssets --hidden 'all()'
123 123 d20a80d4def3
124 124 d20a80d4def3
125 125 007dc284c1f8
126 126 82623d38b9ba
127 127 01f36c5a8fda
128 128 82623d38b9ba
129 129 82623d38b9ba
130 130 392fd25390da
131 131 01f36c5a8fda
132 132 01f36c5a8fda
133 133 01f36c5a8fda
134 134 $ hg log -r 'divergent()'
135 135 2:82623d38b9ba A_1
136 136 4:01f36c5a8fda A_3
137 137 $ cd ..
138 138
139 139
140 140 indirect divergence with known changeset
141 141 -------------------------------------------
142 142
143 143 $ newcase indirect_unknown
144 144 $ hg debugobsolete `getid A_0` aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
145 145 $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid A_1`
146 146 invalid branchheads cache (served): tip differs
147 147 $ hg debugobsolete `getid A_0` `getid A_2`
148 148 $ hg log -G --hidden
149 149 o 3:392fd25390da A_2
150 150 |
151 151 | o 2:82623d38b9ba A_1
152 152 |/
153 153 | x 1:007dc284c1f8 A_0
154 154 |/
155 155 @ 0:d20a80d4def3 base
156 156
157 157 $ hg debugsuccessorssets --hidden 'all()'
158 158 d20a80d4def3
159 159 d20a80d4def3
160 160 007dc284c1f8
161 161 82623d38b9ba
162 162 392fd25390da
163 163 82623d38b9ba
164 164 82623d38b9ba
165 165 392fd25390da
166 166 392fd25390da
167 167 $ hg log -r 'divergent()'
168 168 2:82623d38b9ba A_1
169 169 3:392fd25390da A_2
170 170 $ cd ..
171 171
172 172 do not take unknown node in account if they are final
173 173 -----------------------------------------------------
174 174
175 175 $ newcase final-unknown
176 176 $ hg debugobsolete `getid A_0` `getid A_1`
177 177 $ hg debugobsolete `getid A_1` `getid A_2`
178 178 invalid branchheads cache (served): tip differs
179 179 $ hg debugobsolete `getid A_0` bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
180 180 $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb cccccccccccccccccccccccccccccccccccccccc
181 181 $ hg debugobsolete `getid A_1` dddddddddddddddddddddddddddddddddddddddd
182 182
183 183 $ hg debugsuccessorssets --hidden 'desc('A_0')'
184 184 007dc284c1f8
185 185 392fd25390da
186 186
187 187 $ cd ..
188 188
189 189 divergence that converge again is not divergence anymore
190 190 -----------------------------------------------------
191 191
192 192 $ newcase converged_divergence
193 193 $ hg debugobsolete `getid A_0` `getid A_1`
194 194 $ hg debugobsolete `getid A_0` `getid A_2`
195 195 invalid branchheads cache (served): tip differs
196 196 $ mkcommit A_3
197 197 created new head
198 198 $ hg debugobsolete `getid A_1` `getid A_3`
199 199 $ hg debugobsolete `getid A_2` `getid A_3`
200 200 $ hg log -G --hidden
201 201 @ 4:01f36c5a8fda A_3
202 202 |
203 203 | x 3:392fd25390da A_2
204 204 |/
205 205 | x 2:82623d38b9ba A_1
206 206 |/
207 207 | x 1:007dc284c1f8 A_0
208 208 |/
209 209 o 0:d20a80d4def3 base
210 210
211 211 $ hg debugsuccessorssets --hidden 'all()'
212 212 d20a80d4def3
213 213 d20a80d4def3
214 214 007dc284c1f8
215 215 01f36c5a8fda
216 216 82623d38b9ba
217 217 01f36c5a8fda
218 218 392fd25390da
219 219 01f36c5a8fda
220 220 01f36c5a8fda
221 221 01f36c5a8fda
222 222 $ hg log -r 'divergent()'
223 223 $ cd ..
224 224
225 225 split is not divergences
226 226 -----------------------------
227 227
228 228 $ newcase split
229 229 $ hg debugobsolete `getid A_0` `getid A_1` `getid A_2`
230 230 $ hg log -G --hidden
231 231 o 3:392fd25390da A_2
232 232 |
233 233 | o 2:82623d38b9ba A_1
234 234 |/
235 235 | x 1:007dc284c1f8 A_0
236 236 |/
237 237 @ 0:d20a80d4def3 base
238 238
239 239 $ hg debugsuccessorssets --hidden 'all()'
240 240 d20a80d4def3
241 241 d20a80d4def3
242 242 007dc284c1f8
243 243 82623d38b9ba 392fd25390da
244 244 82623d38b9ba
245 245 82623d38b9ba
246 246 392fd25390da
247 247 392fd25390da
248 248 $ hg log -r 'divergent()'
249 249
250 250 Even when subsequente rewriting happen
251 251
252 252 $ mkcommit A_3
253 253 created new head
254 254 $ hg debugobsolete `getid A_1` `getid A_3`
255 255 $ hg up 0
256 256 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
257 257 $ mkcommit A_4
258 258 created new head
259 259 $ hg debugobsolete `getid A_2` `getid A_4`
260 260 $ hg up 0
261 261 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
262 262 $ mkcommit A_5
263 263 created new head
264 264 $ hg debugobsolete `getid A_4` `getid A_5`
265 265 $ hg log -G --hidden
266 266 @ 6:e442cfc57690 A_5
267 267 |
268 268 | x 5:6a411f0d7a0a A_4
269 269 |/
270 270 | o 4:01f36c5a8fda A_3
271 271 |/
272 272 | x 3:392fd25390da A_2
273 273 |/
274 274 | x 2:82623d38b9ba A_1
275 275 |/
276 276 | x 1:007dc284c1f8 A_0
277 277 |/
278 278 o 0:d20a80d4def3 base
279 279
280 280 $ hg debugsuccessorssets --hidden 'all()'
281 281 d20a80d4def3
282 282 d20a80d4def3
283 283 007dc284c1f8
284 284 01f36c5a8fda e442cfc57690
285 285 82623d38b9ba
286 286 01f36c5a8fda
287 287 392fd25390da
288 288 e442cfc57690
289 289 01f36c5a8fda
290 290 01f36c5a8fda
291 291 6a411f0d7a0a
292 292 e442cfc57690
293 293 e442cfc57690
294 294 e442cfc57690
295 295 $ hg log -r 'divergent()'
296 296
297 Check more complexe obsolescence graft (with divergence)
297 Check more complex obsolescence graft (with divergence)
298 298
299 299 $ mkcommit B_0; hg up 0
300 300 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
301 301 $ hg debugobsolete `getid B_0` `getid A_2`
302 302 $ mkcommit A_7; hg up 0
303 303 created new head
304 304 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
305 305 $ mkcommit A_8; hg up 0
306 306 created new head
307 307 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
308 308 $ hg debugobsolete `getid A_5` `getid A_7` `getid A_8`
309 309 $ mkcommit A_9; hg up 0
310 310 created new head
311 311 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
312 312 $ hg debugobsolete `getid A_5` `getid A_9`
313 313 $ hg log -G --hidden
314 314 o 10:bed64f5d2f5a A_9
315 315 |
316 316 | o 9:14608b260df8 A_8
317 317 |/
318 318 | o 8:7ae126973a96 A_7
319 319 |/
320 320 | x 7:3750ebee865d B_0
321 321 | |
322 322 | x 6:e442cfc57690 A_5
323 323 |/
324 324 | x 5:6a411f0d7a0a A_4
325 325 |/
326 326 | o 4:01f36c5a8fda A_3
327 327 |/
328 328 | x 3:392fd25390da A_2
329 329 |/
330 330 | x 2:82623d38b9ba A_1
331 331 |/
332 332 | x 1:007dc284c1f8 A_0
333 333 |/
334 334 @ 0:d20a80d4def3 base
335 335
336 336 $ hg debugsuccessorssets --hidden 'all()'
337 337 d20a80d4def3
338 338 d20a80d4def3
339 339 007dc284c1f8
340 340 01f36c5a8fda bed64f5d2f5a
341 341 01f36c5a8fda 7ae126973a96 14608b260df8
342 342 82623d38b9ba
343 343 01f36c5a8fda
344 344 392fd25390da
345 345 bed64f5d2f5a
346 346 7ae126973a96 14608b260df8
347 347 01f36c5a8fda
348 348 01f36c5a8fda
349 349 6a411f0d7a0a
350 350 bed64f5d2f5a
351 351 7ae126973a96 14608b260df8
352 352 e442cfc57690
353 353 bed64f5d2f5a
354 354 7ae126973a96 14608b260df8
355 355 3750ebee865d
356 356 bed64f5d2f5a
357 357 7ae126973a96 14608b260df8
358 358 7ae126973a96
359 359 7ae126973a96
360 360 14608b260df8
361 361 14608b260df8
362 362 bed64f5d2f5a
363 363 bed64f5d2f5a
364 364 $ hg log -r 'divergent()'
365 365 4:01f36c5a8fda A_3
366 366 8:7ae126973a96 A_7
367 367 9:14608b260df8 A_8
368 368 10:bed64f5d2f5a A_9
369 369
370 370 fix the divergence
371 371
372 372 $ mkcommit A_A; hg up 0
373 373 created new head
374 374 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
375 375 $ hg debugobsolete `getid A_9` `getid A_A`
376 376 $ hg debugobsolete `getid A_7` `getid A_A`
377 377 $ hg debugobsolete `getid A_8` `getid A_A`
378 378 $ hg log -G --hidden
379 379 o 11:a139f71be9da A_A
380 380 |
381 381 | x 10:bed64f5d2f5a A_9
382 382 |/
383 383 | x 9:14608b260df8 A_8
384 384 |/
385 385 | x 8:7ae126973a96 A_7
386 386 |/
387 387 | x 7:3750ebee865d B_0
388 388 | |
389 389 | x 6:e442cfc57690 A_5
390 390 |/
391 391 | x 5:6a411f0d7a0a A_4
392 392 |/
393 393 | o 4:01f36c5a8fda A_3
394 394 |/
395 395 | x 3:392fd25390da A_2
396 396 |/
397 397 | x 2:82623d38b9ba A_1
398 398 |/
399 399 | x 1:007dc284c1f8 A_0
400 400 |/
401 401 @ 0:d20a80d4def3 base
402 402
403 403 $ hg debugsuccessorssets --hidden 'all()'
404 404 d20a80d4def3
405 405 d20a80d4def3
406 406 007dc284c1f8
407 407 01f36c5a8fda a139f71be9da
408 408 82623d38b9ba
409 409 01f36c5a8fda
410 410 392fd25390da
411 411 a139f71be9da
412 412 01f36c5a8fda
413 413 01f36c5a8fda
414 414 6a411f0d7a0a
415 415 a139f71be9da
416 416 e442cfc57690
417 417 a139f71be9da
418 418 3750ebee865d
419 419 a139f71be9da
420 420 7ae126973a96
421 421 a139f71be9da
422 422 14608b260df8
423 423 a139f71be9da
424 424 bed64f5d2f5a
425 425 a139f71be9da
426 426 a139f71be9da
427 427 a139f71be9da
428 428 $ hg log -r 'divergent()'
429 429
430 430 $ cd ..
431 431
432 432
433 433 Subset does not diverge
434 434 ------------------------------
435 435
436 436 Do not report divergent successors-set if it is a subset of another
437 437 successors-set. (report [A,B] not [A] + [A,B])
438 438
439 439 $ newcase subset
440 440 $ hg debugobsolete `getid A_0` `getid A_2`
441 441 $ hg debugobsolete `getid A_0` `getid A_1` `getid A_2`
442 442 invalid branchheads cache (served): tip differs
443 443 $ hg debugsuccessorssets --hidden 'desc('A_0')'
444 444 007dc284c1f8
445 445 82623d38b9ba 392fd25390da
446 446
447 447 $ cd ..
@@ -1,649 +1,649 b''
1 1 $ cat >> $HGRCPATH <<EOF
2 2 > [extensions]
3 3 > graphlog=
4 4 > rebase=
5 5 >
6 6 > [phases]
7 7 > publish=False
8 8 >
9 9 > [alias]
10 10 > tglog = log -G --template "{rev}: '{desc}' {branches}\n"
11 11 > EOF
12 12
13 13
14 14 $ hg init a
15 15 $ cd a
16 16 $ hg unbundle "$TESTDIR/bundles/rebase.hg"
17 17 adding changesets
18 18 adding manifests
19 19 adding file changes
20 20 added 8 changesets with 7 changes to 7 files (+2 heads)
21 21 (run 'hg heads' to see heads, 'hg merge' to merge)
22 22 $ hg up tip
23 23 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ cd ..
25 25
26 26
27 27 Rebasing
28 28 D onto H - simple rebase:
29 29
30 30 $ hg clone -q -u . a a1
31 31 $ cd a1
32 32
33 33 $ hg tglog
34 34 @ 7: 'H'
35 35 |
36 36 | o 6: 'G'
37 37 |/|
38 38 o | 5: 'F'
39 39 | |
40 40 | o 4: 'E'
41 41 |/
42 42 | o 3: 'D'
43 43 | |
44 44 | o 2: 'C'
45 45 | |
46 46 | o 1: 'B'
47 47 |/
48 48 o 0: 'A'
49 49
50 50
51 51 $ hg rebase -s 3 -d 7
52 52 saved backup bundle to $TESTTMP/a1/.hg/strip-backup/*-backup.hg (glob)
53 53
54 54 $ hg tglog
55 55 @ 7: 'D'
56 56 |
57 57 o 6: 'H'
58 58 |
59 59 | o 5: 'G'
60 60 |/|
61 61 o | 4: 'F'
62 62 | |
63 63 | o 3: 'E'
64 64 |/
65 65 | o 2: 'C'
66 66 | |
67 67 | o 1: 'B'
68 68 |/
69 69 o 0: 'A'
70 70
71 71 $ cd ..
72 72
73 73
74 74 D onto F - intermediate point:
75 75
76 76 $ hg clone -q -u . a a2
77 77 $ cd a2
78 78
79 79 $ hg rebase -s 3 -d 5
80 80 saved backup bundle to $TESTTMP/a2/.hg/strip-backup/*-backup.hg (glob)
81 81
82 82 $ hg tglog
83 83 @ 7: 'D'
84 84 |
85 85 | o 6: 'H'
86 86 |/
87 87 | o 5: 'G'
88 88 |/|
89 89 o | 4: 'F'
90 90 | |
91 91 | o 3: 'E'
92 92 |/
93 93 | o 2: 'C'
94 94 | |
95 95 | o 1: 'B'
96 96 |/
97 97 o 0: 'A'
98 98
99 99 $ cd ..
100 100
101 101
102 102 E onto H - skip of G:
103 103
104 104 $ hg clone -q -u . a a3
105 105 $ cd a3
106 106
107 107 $ hg rebase -s 4 -d 7
108 108 saved backup bundle to $TESTTMP/a3/.hg/strip-backup/*-backup.hg (glob)
109 109
110 110 $ hg tglog
111 111 @ 6: 'E'
112 112 |
113 113 o 5: 'H'
114 114 |
115 115 o 4: 'F'
116 116 |
117 117 | o 3: 'D'
118 118 | |
119 119 | o 2: 'C'
120 120 | |
121 121 | o 1: 'B'
122 122 |/
123 123 o 0: 'A'
124 124
125 125 $ cd ..
126 126
127 127
128 128 F onto E - rebase of a branching point (skip G):
129 129
130 130 $ hg clone -q -u . a a4
131 131 $ cd a4
132 132
133 133 $ hg rebase -s 5 -d 4
134 134 saved backup bundle to $TESTTMP/a4/.hg/strip-backup/*-backup.hg (glob)
135 135
136 136 $ hg tglog
137 137 @ 6: 'H'
138 138 |
139 139 o 5: 'F'
140 140 |
141 141 o 4: 'E'
142 142 |
143 143 | o 3: 'D'
144 144 | |
145 145 | o 2: 'C'
146 146 | |
147 147 | o 1: 'B'
148 148 |/
149 149 o 0: 'A'
150 150
151 151 $ cd ..
152 152
153 153
154 154 G onto H - merged revision having a parent in ancestors of target:
155 155
156 156 $ hg clone -q -u . a a5
157 157 $ cd a5
158 158
159 159 $ hg rebase -s 6 -d 7
160 160 saved backup bundle to $TESTTMP/a5/.hg/strip-backup/*-backup.hg (glob)
161 161
162 162 $ hg tglog
163 163 @ 7: 'G'
164 164 |\
165 165 | o 6: 'H'
166 166 | |
167 167 | o 5: 'F'
168 168 | |
169 169 o | 4: 'E'
170 170 |/
171 171 | o 3: 'D'
172 172 | |
173 173 | o 2: 'C'
174 174 | |
175 175 | o 1: 'B'
176 176 |/
177 177 o 0: 'A'
178 178
179 179 $ cd ..
180 180
181 181
182 182 F onto B - G maintains E as parent:
183 183
184 184 $ hg clone -q -u . a a6
185 185 $ cd a6
186 186
187 187 $ hg rebase -s 5 -d 1
188 188 saved backup bundle to $TESTTMP/a6/.hg/strip-backup/*-backup.hg (glob)
189 189
190 190 $ hg tglog
191 191 @ 7: 'H'
192 192 |
193 193 | o 6: 'G'
194 194 |/|
195 195 o | 5: 'F'
196 196 | |
197 197 | o 4: 'E'
198 198 | |
199 199 | | o 3: 'D'
200 200 | | |
201 201 +---o 2: 'C'
202 202 | |
203 203 o | 1: 'B'
204 204 |/
205 205 o 0: 'A'
206 206
207 207 $ cd ..
208 208
209 209
210 210 These will fail (using --source):
211 211
212 212 G onto F - rebase onto an ancestor:
213 213
214 214 $ hg clone -q -u . a a7
215 215 $ cd a7
216 216
217 217 $ hg rebase -s 6 -d 5
218 218 nothing to rebase
219 219 [1]
220 220
221 221 F onto G - rebase onto a descendant:
222 222
223 223 $ hg rebase -s 5 -d 6
224 224 abort: source is ancestor of destination
225 225 [255]
226 226
227 227 G onto B - merge revision with both parents not in ancestors of target:
228 228
229 229 $ hg rebase -s 6 -d 1
230 230 abort: cannot use revision 6 as base, result would have 3 parents
231 231 [255]
232 232
233 233
234 234 These will abort gracefully (using --base):
235 235
236 236 G onto G - rebase onto same changeset:
237 237
238 238 $ hg rebase -b 6 -d 6
239 239 nothing to rebase
240 240 [1]
241 241
242 242 G onto F - rebase onto an ancestor:
243 243
244 244 $ hg rebase -b 6 -d 5
245 245 nothing to rebase
246 246 [1]
247 247
248 248 F onto G - rebase onto a descendant:
249 249
250 250 $ hg rebase -b 5 -d 6
251 251 nothing to rebase
252 252 [1]
253 253
254 254 C onto A - rebase onto an ancestor:
255 255
256 256 $ hg rebase -d 0 -s 2
257 257 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/5fddd98957c8-backup.hg (glob)
258 258 $ hg tglog
259 259 @ 7: 'D'
260 260 |
261 261 o 6: 'C'
262 262 |
263 263 | o 5: 'H'
264 264 | |
265 265 | | o 4: 'G'
266 266 | |/|
267 267 | o | 3: 'F'
268 268 |/ /
269 269 | o 2: 'E'
270 270 |/
271 271 | o 1: 'B'
272 272 |/
273 273 o 0: 'A'
274 274
275 275
276 276 Check rebasing public changeset
277 277
278 278 $ hg pull --config phases.publish=True -q -r 6 . # update phase of 6
279 279 $ hg rebase -d 5 -b 6
280 280 abort: can't rebase immutable changeset e1c4361dd923
281 281 (see hg help phases for details)
282 282 [255]
283 283
284 284 $ hg rebase -d 5 -b 6 --keep
285 285
286 286 Check rebasing mutable changeset
287 287 Source phase greater or equal to destination phase: new changeset get the phase of source:
288 288 $ hg rebase -s9 -d0
289 289 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/2b23e52411f4-backup.hg (glob)
290 290 $ hg log --template "{phase}\n" -r 9
291 291 draft
292 292 $ hg rebase -s9 -d1
293 293 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/2cb10d0cfc6c-backup.hg (glob)
294 294 $ hg log --template "{phase}\n" -r 9
295 295 draft
296 296 $ hg phase --force --secret 9
297 297 $ hg rebase -s9 -d0
298 298 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/c5b12b67163a-backup.hg (glob)
299 299 $ hg log --template "{phase}\n" -r 9
300 300 secret
301 301 $ hg rebase -s9 -d1
302 302 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/2a0524f868ac-backup.hg (glob)
303 303 $ hg log --template "{phase}\n" -r 9
304 304 secret
305 305 Source phase lower than destination phase: new changeset get the phase of destination:
306 306 $ hg rebase -s8 -d9
307 307 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/6d4f22462821-backup.hg (glob)
308 308 $ hg log --template "{phase}\n" -r 'rev(9)'
309 309 secret
310 310
311 311 $ cd ..
312 312
313 313 Test for revset
314 314
315 315 We need a bit different graph
316 316 All destination are B
317 317
318 318 $ hg init ah
319 319 $ cd ah
320 320 $ hg unbundle "$TESTDIR/bundles/rebase-revset.hg"
321 321 adding changesets
322 322 adding manifests
323 323 adding file changes
324 324 added 9 changesets with 9 changes to 9 files (+2 heads)
325 325 (run 'hg heads' to see heads, 'hg merge' to merge)
326 326 $ hg tglog
327 327 o 8: 'I'
328 328 |
329 329 o 7: 'H'
330 330 |
331 331 o 6: 'G'
332 332 |
333 333 | o 5: 'F'
334 334 | |
335 335 | o 4: 'E'
336 336 |/
337 337 o 3: 'D'
338 338 |
339 339 o 2: 'C'
340 340 |
341 341 | o 1: 'B'
342 342 |/
343 343 o 0: 'A'
344 344
345 345 $ cd ..
346 346
347 347
348 348 Simple case with keep:
349 349
350 350 Source on have two descendant heads but ask for one
351 351
352 352 $ hg clone -q -u . ah ah1
353 353 $ cd ah1
354 354 $ hg rebase -r '2::8' -d 1
355 355 abort: can't remove original changesets with unrebased descendants
356 356 (use --keep to keep original changesets)
357 357 [255]
358 358 $ hg rebase -r '2::8' -d 1 --keep
359 359 $ hg tglog
360 360 @ 13: 'I'
361 361 |
362 362 o 12: 'H'
363 363 |
364 364 o 11: 'G'
365 365 |
366 366 o 10: 'D'
367 367 |
368 368 o 9: 'C'
369 369 |
370 370 | o 8: 'I'
371 371 | |
372 372 | o 7: 'H'
373 373 | |
374 374 | o 6: 'G'
375 375 | |
376 376 | | o 5: 'F'
377 377 | | |
378 378 | | o 4: 'E'
379 379 | |/
380 380 | o 3: 'D'
381 381 | |
382 382 | o 2: 'C'
383 383 | |
384 384 o | 1: 'B'
385 385 |/
386 386 o 0: 'A'
387 387
388 388
389 389 $ cd ..
390 390
391 391 Base on have one descendant heads we ask for but common ancestor have two
392 392
393 393 $ hg clone -q -u . ah ah2
394 394 $ cd ah2
395 395 $ hg rebase -r '3::8' -d 1
396 396 abort: can't remove original changesets with unrebased descendants
397 397 (use --keep to keep original changesets)
398 398 [255]
399 399 $ hg rebase -r '3::8' -d 1 --keep
400 400 $ hg tglog
401 401 @ 12: 'I'
402 402 |
403 403 o 11: 'H'
404 404 |
405 405 o 10: 'G'
406 406 |
407 407 o 9: 'D'
408 408 |
409 409 | o 8: 'I'
410 410 | |
411 411 | o 7: 'H'
412 412 | |
413 413 | o 6: 'G'
414 414 | |
415 415 | | o 5: 'F'
416 416 | | |
417 417 | | o 4: 'E'
418 418 | |/
419 419 | o 3: 'D'
420 420 | |
421 421 | o 2: 'C'
422 422 | |
423 423 o | 1: 'B'
424 424 |/
425 425 o 0: 'A'
426 426
427 427
428 428 $ cd ..
429 429
430 430 rebase subset
431 431
432 432 $ hg clone -q -u . ah ah3
433 433 $ cd ah3
434 434 $ hg rebase -r '3::7' -d 1
435 435 abort: can't remove original changesets with unrebased descendants
436 436 (use --keep to keep original changesets)
437 437 [255]
438 438 $ hg rebase -r '3::7' -d 1 --keep
439 439 $ hg tglog
440 440 @ 11: 'H'
441 441 |
442 442 o 10: 'G'
443 443 |
444 444 o 9: 'D'
445 445 |
446 446 | o 8: 'I'
447 447 | |
448 448 | o 7: 'H'
449 449 | |
450 450 | o 6: 'G'
451 451 | |
452 452 | | o 5: 'F'
453 453 | | |
454 454 | | o 4: 'E'
455 455 | |/
456 456 | o 3: 'D'
457 457 | |
458 458 | o 2: 'C'
459 459 | |
460 460 o | 1: 'B'
461 461 |/
462 462 o 0: 'A'
463 463
464 464
465 465 $ cd ..
466 466
467 467 rebase subset with multiple head
468 468
469 469 $ hg clone -q -u . ah ah4
470 470 $ cd ah4
471 471 $ hg rebase -r '3::(7+5)' -d 1
472 472 abort: can't remove original changesets with unrebased descendants
473 473 (use --keep to keep original changesets)
474 474 [255]
475 475 $ hg rebase -r '3::(7+5)' -d 1 --keep
476 476 $ hg tglog
477 477 @ 13: 'H'
478 478 |
479 479 o 12: 'G'
480 480 |
481 481 | o 11: 'F'
482 482 | |
483 483 | o 10: 'E'
484 484 |/
485 485 o 9: 'D'
486 486 |
487 487 | o 8: 'I'
488 488 | |
489 489 | o 7: 'H'
490 490 | |
491 491 | o 6: 'G'
492 492 | |
493 493 | | o 5: 'F'
494 494 | | |
495 495 | | o 4: 'E'
496 496 | |/
497 497 | o 3: 'D'
498 498 | |
499 499 | o 2: 'C'
500 500 | |
501 501 o | 1: 'B'
502 502 |/
503 503 o 0: 'A'
504 504
505 505
506 506 $ cd ..
507 507
508 508 More advanced tests
509 509
510 510 rebase on ancestor with revset
511 511
512 512 $ hg clone -q -u . ah ah5
513 513 $ cd ah5
514 514 $ hg rebase -r '6::' -d 2
515 515 saved backup bundle to $TESTTMP/ah5/.hg/strip-backup/3d8a618087a7-backup.hg (glob)
516 516 $ hg tglog
517 517 @ 8: 'I'
518 518 |
519 519 o 7: 'H'
520 520 |
521 521 o 6: 'G'
522 522 |
523 523 | o 5: 'F'
524 524 | |
525 525 | o 4: 'E'
526 526 | |
527 527 | o 3: 'D'
528 528 |/
529 529 o 2: 'C'
530 530 |
531 531 | o 1: 'B'
532 532 |/
533 533 o 0: 'A'
534 534
535 535 $ cd ..
536 536
537 537
538 538 rebase with multiple root.
539 539 We rebase E and G on B
540 540 We would expect heads are I, F if it was supported
541 541
542 542 $ hg clone -q -u . ah ah6
543 543 $ cd ah6
544 544 $ hg rebase -r '(4+6)::' -d 1
545 545 saved backup bundle to $TESTTMP/ah6/.hg/strip-backup/3d8a618087a7-backup.hg (glob)
546 546 $ hg tglog
547 547 @ 8: 'I'
548 548 |
549 549 o 7: 'H'
550 550 |
551 551 o 6: 'G'
552 552 |
553 553 | o 5: 'F'
554 554 | |
555 555 | o 4: 'E'
556 556 |/
557 557 | o 3: 'D'
558 558 | |
559 559 | o 2: 'C'
560 560 | |
561 561 o | 1: 'B'
562 562 |/
563 563 o 0: 'A'
564 564
565 565 $ cd ..
566 566
567 More complexe rebase with multiple roots
567 More complex rebase with multiple roots
568 568 each root have a different common ancestor with the destination and this is a detach
569 569
570 570 (setup)
571 571
572 572 $ hg clone -q -u . a a8
573 573 $ cd a8
574 574 $ echo I > I
575 575 $ hg add I
576 576 $ hg commit -m I
577 577 $ hg up 4
578 578 1 files updated, 0 files merged, 3 files removed, 0 files unresolved
579 579 $ echo I > J
580 580 $ hg add J
581 581 $ hg commit -m J
582 582 created new head
583 583 $ echo I > K
584 584 $ hg add K
585 585 $ hg commit -m K
586 586 $ hg tglog
587 587 @ 10: 'K'
588 588 |
589 589 o 9: 'J'
590 590 |
591 591 | o 8: 'I'
592 592 | |
593 593 | o 7: 'H'
594 594 | |
595 595 +---o 6: 'G'
596 596 | |/
597 597 | o 5: 'F'
598 598 | |
599 599 o | 4: 'E'
600 600 |/
601 601 | o 3: 'D'
602 602 | |
603 603 | o 2: 'C'
604 604 | |
605 605 | o 1: 'B'
606 606 |/
607 607 o 0: 'A'
608 608
609 609 (actual test)
610 610
611 611 $ hg rebase --dest 'desc(G)' --rev 'desc(K) + desc(I)'
612 612 saved backup bundle to $TESTTMP/a8/.hg/strip-backup/23a4ace37988-backup.hg (glob)
613 613 $ hg log --rev 'children(desc(G))'
614 614 changeset: 9:adb617877056
615 615 parent: 6:eea13746799a
616 616 user: test
617 617 date: Thu Jan 01 00:00:00 1970 +0000
618 618 summary: I
619 619
620 620 changeset: 10:882431a34a0e
621 621 tag: tip
622 622 parent: 6:eea13746799a
623 623 user: test
624 624 date: Thu Jan 01 00:00:00 1970 +0000
625 625 summary: K
626 626
627 627 $ hg tglog
628 628 @ 10: 'K'
629 629 |
630 630 | o 9: 'I'
631 631 |/
632 632 | o 8: 'J'
633 633 | |
634 634 | | o 7: 'H'
635 635 | | |
636 636 o---+ 6: 'G'
637 637 |/ /
638 638 | o 5: 'F'
639 639 | |
640 640 o | 4: 'E'
641 641 |/
642 642 | o 3: 'D'
643 643 | |
644 644 | o 2: 'C'
645 645 | |
646 646 | o 1: 'B'
647 647 |/
648 648 o 0: 'A'
649 649
General Comments 0
You need to be logged in to leave comments. Login now