##// END OF EJS Templates
merge: restate calculateupdates in terms of a matcher...
Augie Fackler -
r27345:98266b1d default
parent child Browse files
Show More
@@ -1,682 +1,681 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 from mercurial import cmdutil, scmutil, util, commands, obsolete
4 from mercurial import cmdutil, scmutil, util, commands, obsolete
5 from mercurial import repoview, branchmap, merge, copies, error
5 from mercurial import repoview, branchmap, merge, copies, error
6 import time, os, sys
6 import time, os, sys
7 import random
7 import random
8 import functools
8 import functools
9
9
10 formatteropts = commands.formatteropts
10 formatteropts = commands.formatteropts
11
11
12 cmdtable = {}
12 cmdtable = {}
13 command = cmdutil.command(cmdtable)
13 command = cmdutil.command(cmdtable)
14
14
15 def getlen(ui):
15 def getlen(ui):
16 if ui.configbool("perf", "stub"):
16 if ui.configbool("perf", "stub"):
17 return lambda x: 1
17 return lambda x: 1
18 return len
18 return len
19
19
20 def gettimer(ui, opts=None):
20 def gettimer(ui, opts=None):
21 """return a timer function and formatter: (timer, formatter)
21 """return a timer function and formatter: (timer, formatter)
22
22
23 This function exists to gather the creation of formatter in a single
23 This function exists to gather the creation of formatter in a single
24 place instead of duplicating it in all performance commands."""
24 place instead of duplicating it in all performance commands."""
25
25
26 # enforce an idle period before execution to counteract power management
26 # enforce an idle period before execution to counteract power management
27 # experimental config: perf.presleep
27 # experimental config: perf.presleep
28 time.sleep(ui.configint("perf", "presleep", 1))
28 time.sleep(ui.configint("perf", "presleep", 1))
29
29
30 if opts is None:
30 if opts is None:
31 opts = {}
31 opts = {}
32 # redirect all to stderr
32 # redirect all to stderr
33 ui = ui.copy()
33 ui = ui.copy()
34 ui.fout = ui.ferr
34 ui.fout = ui.ferr
35 # get a formatter
35 # get a formatter
36 fm = ui.formatter('perf', opts)
36 fm = ui.formatter('perf', opts)
37 # stub function, runs code only once instead of in a loop
37 # stub function, runs code only once instead of in a loop
38 # experimental config: perf.stub
38 # experimental config: perf.stub
39 if ui.configbool("perf", "stub"):
39 if ui.configbool("perf", "stub"):
40 return functools.partial(stub_timer, fm), fm
40 return functools.partial(stub_timer, fm), fm
41 return functools.partial(_timer, fm), fm
41 return functools.partial(_timer, fm), fm
42
42
43 def stub_timer(fm, func, title=None):
43 def stub_timer(fm, func, title=None):
44 func()
44 func()
45
45
46 def _timer(fm, func, title=None):
46 def _timer(fm, func, title=None):
47 results = []
47 results = []
48 begin = time.time()
48 begin = time.time()
49 count = 0
49 count = 0
50 while True:
50 while True:
51 ostart = os.times()
51 ostart = os.times()
52 cstart = time.time()
52 cstart = time.time()
53 r = func()
53 r = func()
54 cstop = time.time()
54 cstop = time.time()
55 ostop = os.times()
55 ostop = os.times()
56 count += 1
56 count += 1
57 a, b = ostart, ostop
57 a, b = ostart, ostop
58 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
58 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
59 if cstop - begin > 3 and count >= 100:
59 if cstop - begin > 3 and count >= 100:
60 break
60 break
61 if cstop - begin > 10 and count >= 3:
61 if cstop - begin > 10 and count >= 3:
62 break
62 break
63
63
64 fm.startitem()
64 fm.startitem()
65
65
66 if title:
66 if title:
67 fm.write('title', '! %s\n', title)
67 fm.write('title', '! %s\n', title)
68 if r:
68 if r:
69 fm.write('result', '! result: %s\n', r)
69 fm.write('result', '! result: %s\n', r)
70 m = min(results)
70 m = min(results)
71 fm.plain('!')
71 fm.plain('!')
72 fm.write('wall', ' wall %f', m[0])
72 fm.write('wall', ' wall %f', m[0])
73 fm.write('comb', ' comb %f', m[1] + m[2])
73 fm.write('comb', ' comb %f', m[1] + m[2])
74 fm.write('user', ' user %f', m[1])
74 fm.write('user', ' user %f', m[1])
75 fm.write('sys', ' sys %f', m[2])
75 fm.write('sys', ' sys %f', m[2])
76 fm.write('count', ' (best of %d)', count)
76 fm.write('count', ' (best of %d)', count)
77 fm.plain('\n')
77 fm.plain('\n')
78
78
79 @command('perfwalk', formatteropts)
79 @command('perfwalk', formatteropts)
80 def perfwalk(ui, repo, *pats, **opts):
80 def perfwalk(ui, repo, *pats, **opts):
81 timer, fm = gettimer(ui, opts)
81 timer, fm = gettimer(ui, opts)
82 try:
82 try:
83 m = scmutil.match(repo[None], pats, {})
83 m = scmutil.match(repo[None], pats, {})
84 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
84 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
85 except Exception:
85 except Exception:
86 try:
86 try:
87 m = scmutil.match(repo[None], pats, {})
87 m = scmutil.match(repo[None], pats, {})
88 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
88 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
89 except Exception:
89 except Exception:
90 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
90 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
91 fm.end()
91 fm.end()
92
92
93 @command('perfannotate', formatteropts)
93 @command('perfannotate', formatteropts)
94 def perfannotate(ui, repo, f, **opts):
94 def perfannotate(ui, repo, f, **opts):
95 timer, fm = gettimer(ui, opts)
95 timer, fm = gettimer(ui, opts)
96 fc = repo['.'][f]
96 fc = repo['.'][f]
97 timer(lambda: len(fc.annotate(True)))
97 timer(lambda: len(fc.annotate(True)))
98 fm.end()
98 fm.end()
99
99
100 @command('perfstatus',
100 @command('perfstatus',
101 [('u', 'unknown', False,
101 [('u', 'unknown', False,
102 'ask status to look for unknown files')] + formatteropts)
102 'ask status to look for unknown files')] + formatteropts)
103 def perfstatus(ui, repo, **opts):
103 def perfstatus(ui, repo, **opts):
104 #m = match.always(repo.root, repo.getcwd())
104 #m = match.always(repo.root, repo.getcwd())
105 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
105 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
106 # False))))
106 # False))))
107 timer, fm = gettimer(ui, opts)
107 timer, fm = gettimer(ui, opts)
108 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
108 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
109 fm.end()
109 fm.end()
110
110
111 @command('perfaddremove', formatteropts)
111 @command('perfaddremove', formatteropts)
112 def perfaddremove(ui, repo, **opts):
112 def perfaddremove(ui, repo, **opts):
113 timer, fm = gettimer(ui, opts)
113 timer, fm = gettimer(ui, opts)
114 try:
114 try:
115 oldquiet = repo.ui.quiet
115 oldquiet = repo.ui.quiet
116 repo.ui.quiet = True
116 repo.ui.quiet = True
117 matcher = scmutil.match(repo[None])
117 matcher = scmutil.match(repo[None])
118 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
118 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
119 finally:
119 finally:
120 repo.ui.quiet = oldquiet
120 repo.ui.quiet = oldquiet
121 fm.end()
121 fm.end()
122
122
123 def clearcaches(cl):
123 def clearcaches(cl):
124 # behave somewhat consistently across internal API changes
124 # behave somewhat consistently across internal API changes
125 if util.safehasattr(cl, 'clearcaches'):
125 if util.safehasattr(cl, 'clearcaches'):
126 cl.clearcaches()
126 cl.clearcaches()
127 elif util.safehasattr(cl, '_nodecache'):
127 elif util.safehasattr(cl, '_nodecache'):
128 from mercurial.node import nullid, nullrev
128 from mercurial.node import nullid, nullrev
129 cl._nodecache = {nullid: nullrev}
129 cl._nodecache = {nullid: nullrev}
130 cl._nodepos = None
130 cl._nodepos = None
131
131
132 @command('perfheads', formatteropts)
132 @command('perfheads', formatteropts)
133 def perfheads(ui, repo, **opts):
133 def perfheads(ui, repo, **opts):
134 timer, fm = gettimer(ui, opts)
134 timer, fm = gettimer(ui, opts)
135 cl = repo.changelog
135 cl = repo.changelog
136 def d():
136 def d():
137 len(cl.headrevs())
137 len(cl.headrevs())
138 clearcaches(cl)
138 clearcaches(cl)
139 timer(d)
139 timer(d)
140 fm.end()
140 fm.end()
141
141
142 @command('perftags', formatteropts)
142 @command('perftags', formatteropts)
143 def perftags(ui, repo, **opts):
143 def perftags(ui, repo, **opts):
144 import mercurial.changelog
144 import mercurial.changelog
145 import mercurial.manifest
145 import mercurial.manifest
146 timer, fm = gettimer(ui, opts)
146 timer, fm = gettimer(ui, opts)
147 def t():
147 def t():
148 repo.changelog = mercurial.changelog.changelog(repo.svfs)
148 repo.changelog = mercurial.changelog.changelog(repo.svfs)
149 repo.manifest = mercurial.manifest.manifest(repo.svfs)
149 repo.manifest = mercurial.manifest.manifest(repo.svfs)
150 repo._tags = None
150 repo._tags = None
151 return len(repo.tags())
151 return len(repo.tags())
152 timer(t)
152 timer(t)
153 fm.end()
153 fm.end()
154
154
155 @command('perfancestors', formatteropts)
155 @command('perfancestors', formatteropts)
156 def perfancestors(ui, repo, **opts):
156 def perfancestors(ui, repo, **opts):
157 timer, fm = gettimer(ui, opts)
157 timer, fm = gettimer(ui, opts)
158 heads = repo.changelog.headrevs()
158 heads = repo.changelog.headrevs()
159 def d():
159 def d():
160 for a in repo.changelog.ancestors(heads):
160 for a in repo.changelog.ancestors(heads):
161 pass
161 pass
162 timer(d)
162 timer(d)
163 fm.end()
163 fm.end()
164
164
165 @command('perfancestorset', formatteropts)
165 @command('perfancestorset', formatteropts)
166 def perfancestorset(ui, repo, revset, **opts):
166 def perfancestorset(ui, repo, revset, **opts):
167 timer, fm = gettimer(ui, opts)
167 timer, fm = gettimer(ui, opts)
168 revs = repo.revs(revset)
168 revs = repo.revs(revset)
169 heads = repo.changelog.headrevs()
169 heads = repo.changelog.headrevs()
170 def d():
170 def d():
171 s = repo.changelog.ancestors(heads)
171 s = repo.changelog.ancestors(heads)
172 for rev in revs:
172 for rev in revs:
173 rev in s
173 rev in s
174 timer(d)
174 timer(d)
175 fm.end()
175 fm.end()
176
176
177 @command('perfdirs', formatteropts)
177 @command('perfdirs', formatteropts)
178 def perfdirs(ui, repo, **opts):
178 def perfdirs(ui, repo, **opts):
179 timer, fm = gettimer(ui, opts)
179 timer, fm = gettimer(ui, opts)
180 dirstate = repo.dirstate
180 dirstate = repo.dirstate
181 'a' in dirstate
181 'a' in dirstate
182 def d():
182 def d():
183 dirstate.dirs()
183 dirstate.dirs()
184 del dirstate._dirs
184 del dirstate._dirs
185 timer(d)
185 timer(d)
186 fm.end()
186 fm.end()
187
187
188 @command('perfdirstate', formatteropts)
188 @command('perfdirstate', formatteropts)
189 def perfdirstate(ui, repo, **opts):
189 def perfdirstate(ui, repo, **opts):
190 timer, fm = gettimer(ui, opts)
190 timer, fm = gettimer(ui, opts)
191 "a" in repo.dirstate
191 "a" in repo.dirstate
192 def d():
192 def d():
193 repo.dirstate.invalidate()
193 repo.dirstate.invalidate()
194 "a" in repo.dirstate
194 "a" in repo.dirstate
195 timer(d)
195 timer(d)
196 fm.end()
196 fm.end()
197
197
198 @command('perfdirstatedirs', formatteropts)
198 @command('perfdirstatedirs', formatteropts)
199 def perfdirstatedirs(ui, repo, **opts):
199 def perfdirstatedirs(ui, repo, **opts):
200 timer, fm = gettimer(ui, opts)
200 timer, fm = gettimer(ui, opts)
201 "a" in repo.dirstate
201 "a" in repo.dirstate
202 def d():
202 def d():
203 "a" in repo.dirstate._dirs
203 "a" in repo.dirstate._dirs
204 del repo.dirstate._dirs
204 del repo.dirstate._dirs
205 timer(d)
205 timer(d)
206 fm.end()
206 fm.end()
207
207
208 @command('perfdirstatefoldmap', formatteropts)
208 @command('perfdirstatefoldmap', formatteropts)
209 def perfdirstatefoldmap(ui, repo, **opts):
209 def perfdirstatefoldmap(ui, repo, **opts):
210 timer, fm = gettimer(ui, opts)
210 timer, fm = gettimer(ui, opts)
211 dirstate = repo.dirstate
211 dirstate = repo.dirstate
212 'a' in dirstate
212 'a' in dirstate
213 def d():
213 def d():
214 dirstate._filefoldmap.get('a')
214 dirstate._filefoldmap.get('a')
215 del dirstate._filefoldmap
215 del dirstate._filefoldmap
216 timer(d)
216 timer(d)
217 fm.end()
217 fm.end()
218
218
219 @command('perfdirfoldmap', formatteropts)
219 @command('perfdirfoldmap', formatteropts)
220 def perfdirfoldmap(ui, repo, **opts):
220 def perfdirfoldmap(ui, repo, **opts):
221 timer, fm = gettimer(ui, opts)
221 timer, fm = gettimer(ui, opts)
222 dirstate = repo.dirstate
222 dirstate = repo.dirstate
223 'a' in dirstate
223 'a' in dirstate
224 def d():
224 def d():
225 dirstate._dirfoldmap.get('a')
225 dirstate._dirfoldmap.get('a')
226 del dirstate._dirfoldmap
226 del dirstate._dirfoldmap
227 del dirstate._dirs
227 del dirstate._dirs
228 timer(d)
228 timer(d)
229 fm.end()
229 fm.end()
230
230
231 @command('perfdirstatewrite', formatteropts)
231 @command('perfdirstatewrite', formatteropts)
232 def perfdirstatewrite(ui, repo, **opts):
232 def perfdirstatewrite(ui, repo, **opts):
233 timer, fm = gettimer(ui, opts)
233 timer, fm = gettimer(ui, opts)
234 ds = repo.dirstate
234 ds = repo.dirstate
235 "a" in ds
235 "a" in ds
236 def d():
236 def d():
237 ds._dirty = True
237 ds._dirty = True
238 ds.write(repo.currenttransaction())
238 ds.write(repo.currenttransaction())
239 timer(d)
239 timer(d)
240 fm.end()
240 fm.end()
241
241
242 @command('perfmergecalculate',
242 @command('perfmergecalculate',
243 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
243 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
244 def perfmergecalculate(ui, repo, rev, **opts):
244 def perfmergecalculate(ui, repo, rev, **opts):
245 timer, fm = gettimer(ui, opts)
245 timer, fm = gettimer(ui, opts)
246 wctx = repo[None]
246 wctx = repo[None]
247 rctx = scmutil.revsingle(repo, rev, rev)
247 rctx = scmutil.revsingle(repo, rev, rev)
248 ancestor = wctx.ancestor(rctx)
248 ancestor = wctx.ancestor(rctx)
249 # we don't want working dir files to be stat'd in the benchmark, so prime
249 # we don't want working dir files to be stat'd in the benchmark, so prime
250 # that cache
250 # that cache
251 wctx.dirty()
251 wctx.dirty()
252 def d():
252 def d():
253 # acceptremote is True because we don't want prompts in the middle of
253 # acceptremote is True because we don't want prompts in the middle of
254 # our benchmark
254 # our benchmark
255 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
255 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
256 False, acceptremote=True, followcopies=True)
256 acceptremote=True, followcopies=True)
257 timer(d)
257 timer(d)
258 fm.end()
258 fm.end()
259
259
260 @command('perfpathcopies', [], "REV REV")
260 @command('perfpathcopies', [], "REV REV")
261 def perfpathcopies(ui, repo, rev1, rev2, **opts):
261 def perfpathcopies(ui, repo, rev1, rev2, **opts):
262 timer, fm = gettimer(ui, opts)
262 timer, fm = gettimer(ui, opts)
263 ctx1 = scmutil.revsingle(repo, rev1, rev1)
263 ctx1 = scmutil.revsingle(repo, rev1, rev1)
264 ctx2 = scmutil.revsingle(repo, rev2, rev2)
264 ctx2 = scmutil.revsingle(repo, rev2, rev2)
265 def d():
265 def d():
266 copies.pathcopies(ctx1, ctx2)
266 copies.pathcopies(ctx1, ctx2)
267 timer(d)
267 timer(d)
268 fm.end()
268 fm.end()
269
269
270 @command('perfmanifest', [], 'REV')
270 @command('perfmanifest', [], 'REV')
271 def perfmanifest(ui, repo, rev, **opts):
271 def perfmanifest(ui, repo, rev, **opts):
272 timer, fm = gettimer(ui, opts)
272 timer, fm = gettimer(ui, opts)
273 ctx = scmutil.revsingle(repo, rev, rev)
273 ctx = scmutil.revsingle(repo, rev, rev)
274 t = ctx.manifestnode()
274 t = ctx.manifestnode()
275 def d():
275 def d():
276 repo.manifest._mancache.clear()
276 repo.manifest._mancache.clear()
277 repo.manifest._cache = None
277 repo.manifest._cache = None
278 repo.manifest.read(t)
278 repo.manifest.read(t)
279 timer(d)
279 timer(d)
280 fm.end()
280 fm.end()
281
281
282 @command('perfchangeset', formatteropts)
282 @command('perfchangeset', formatteropts)
283 def perfchangeset(ui, repo, rev, **opts):
283 def perfchangeset(ui, repo, rev, **opts):
284 timer, fm = gettimer(ui, opts)
284 timer, fm = gettimer(ui, opts)
285 n = repo[rev].node()
285 n = repo[rev].node()
286 def d():
286 def d():
287 repo.changelog.read(n)
287 repo.changelog.read(n)
288 #repo.changelog._cache = None
288 #repo.changelog._cache = None
289 timer(d)
289 timer(d)
290 fm.end()
290 fm.end()
291
291
292 @command('perfindex', formatteropts)
292 @command('perfindex', formatteropts)
293 def perfindex(ui, repo, **opts):
293 def perfindex(ui, repo, **opts):
294 import mercurial.revlog
294 import mercurial.revlog
295 timer, fm = gettimer(ui, opts)
295 timer, fm = gettimer(ui, opts)
296 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
296 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
297 n = repo["tip"].node()
297 n = repo["tip"].node()
298 def d():
298 def d():
299 cl = mercurial.revlog.revlog(repo.svfs, "00changelog.i")
299 cl = mercurial.revlog.revlog(repo.svfs, "00changelog.i")
300 cl.rev(n)
300 cl.rev(n)
301 timer(d)
301 timer(d)
302 fm.end()
302 fm.end()
303
303
304 @command('perfstartup', formatteropts)
304 @command('perfstartup', formatteropts)
305 def perfstartup(ui, repo, **opts):
305 def perfstartup(ui, repo, **opts):
306 timer, fm = gettimer(ui, opts)
306 timer, fm = gettimer(ui, opts)
307 cmd = sys.argv[0]
307 cmd = sys.argv[0]
308 def d():
308 def d():
309 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
309 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
310 timer(d)
310 timer(d)
311 fm.end()
311 fm.end()
312
312
313 @command('perfparents', formatteropts)
313 @command('perfparents', formatteropts)
314 def perfparents(ui, repo, **opts):
314 def perfparents(ui, repo, **opts):
315 timer, fm = gettimer(ui, opts)
315 timer, fm = gettimer(ui, opts)
316 # control the number of commits perfparents iterates over
316 # control the number of commits perfparents iterates over
317 # experimental config: perf.parentscount
317 # experimental config: perf.parentscount
318 count = ui.configint("perf", "parentscount", 1000)
318 count = ui.configint("perf", "parentscount", 1000)
319 if len(repo.changelog) < count:
319 if len(repo.changelog) < count:
320 raise error.Abort("repo needs %d commits for this test" % count)
320 raise error.Abort("repo needs %d commits for this test" % count)
321 repo = repo.unfiltered()
321 repo = repo.unfiltered()
322 nl = [repo.changelog.node(i) for i in xrange(count)]
322 nl = [repo.changelog.node(i) for i in xrange(count)]
323 def d():
323 def d():
324 for n in nl:
324 for n in nl:
325 repo.changelog.parents(n)
325 repo.changelog.parents(n)
326 timer(d)
326 timer(d)
327 fm.end()
327 fm.end()
328
328
329 @command('perfctxfiles', formatteropts)
329 @command('perfctxfiles', formatteropts)
330 def perfctxfiles(ui, repo, x, **opts):
330 def perfctxfiles(ui, repo, x, **opts):
331 x = int(x)
331 x = int(x)
332 timer, fm = gettimer(ui, opts)
332 timer, fm = gettimer(ui, opts)
333 def d():
333 def d():
334 len(repo[x].files())
334 len(repo[x].files())
335 timer(d)
335 timer(d)
336 fm.end()
336 fm.end()
337
337
338 @command('perfrawfiles', formatteropts)
338 @command('perfrawfiles', formatteropts)
339 def perfrawfiles(ui, repo, x, **opts):
339 def perfrawfiles(ui, repo, x, **opts):
340 x = int(x)
340 x = int(x)
341 timer, fm = gettimer(ui, opts)
341 timer, fm = gettimer(ui, opts)
342 cl = repo.changelog
342 cl = repo.changelog
343 def d():
343 def d():
344 len(cl.read(x)[3])
344 len(cl.read(x)[3])
345 timer(d)
345 timer(d)
346 fm.end()
346 fm.end()
347
347
348 @command('perflookup', formatteropts)
348 @command('perflookup', formatteropts)
349 def perflookup(ui, repo, rev, **opts):
349 def perflookup(ui, repo, rev, **opts):
350 timer, fm = gettimer(ui, opts)
350 timer, fm = gettimer(ui, opts)
351 timer(lambda: len(repo.lookup(rev)))
351 timer(lambda: len(repo.lookup(rev)))
352 fm.end()
352 fm.end()
353
353
354 @command('perfrevrange', formatteropts)
354 @command('perfrevrange', formatteropts)
355 def perfrevrange(ui, repo, *specs, **opts):
355 def perfrevrange(ui, repo, *specs, **opts):
356 timer, fm = gettimer(ui, opts)
356 timer, fm = gettimer(ui, opts)
357 revrange = scmutil.revrange
357 revrange = scmutil.revrange
358 timer(lambda: len(revrange(repo, specs)))
358 timer(lambda: len(revrange(repo, specs)))
359 fm.end()
359 fm.end()
360
360
361 @command('perfnodelookup', formatteropts)
361 @command('perfnodelookup', formatteropts)
362 def perfnodelookup(ui, repo, rev, **opts):
362 def perfnodelookup(ui, repo, rev, **opts):
363 timer, fm = gettimer(ui, opts)
363 timer, fm = gettimer(ui, opts)
364 import mercurial.revlog
364 import mercurial.revlog
365 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
365 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
366 n = repo[rev].node()
366 n = repo[rev].node()
367 cl = mercurial.revlog.revlog(repo.svfs, "00changelog.i")
367 cl = mercurial.revlog.revlog(repo.svfs, "00changelog.i")
368 def d():
368 def d():
369 cl.rev(n)
369 cl.rev(n)
370 clearcaches(cl)
370 clearcaches(cl)
371 timer(d)
371 timer(d)
372 fm.end()
372 fm.end()
373
373
374 @command('perflog',
374 @command('perflog',
375 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
375 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
376 def perflog(ui, repo, rev=None, **opts):
376 def perflog(ui, repo, rev=None, **opts):
377 if rev is None:
377 if rev is None:
378 rev=[]
378 rev=[]
379 timer, fm = gettimer(ui, opts)
379 timer, fm = gettimer(ui, opts)
380 ui.pushbuffer()
380 ui.pushbuffer()
381 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
381 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
382 copies=opts.get('rename')))
382 copies=opts.get('rename')))
383 ui.popbuffer()
383 ui.popbuffer()
384 fm.end()
384 fm.end()
385
385
386 @command('perfmoonwalk', formatteropts)
386 @command('perfmoonwalk', formatteropts)
387 def perfmoonwalk(ui, repo, **opts):
387 def perfmoonwalk(ui, repo, **opts):
388 """benchmark walking the changelog backwards
388 """benchmark walking the changelog backwards
389
389
390 This also loads the changelog data for each revision in the changelog.
390 This also loads the changelog data for each revision in the changelog.
391 """
391 """
392 timer, fm = gettimer(ui, opts)
392 timer, fm = gettimer(ui, opts)
393 def moonwalk():
393 def moonwalk():
394 for i in xrange(len(repo), -1, -1):
394 for i in xrange(len(repo), -1, -1):
395 ctx = repo[i]
395 ctx = repo[i]
396 ctx.branch() # read changelog data (in addition to the index)
396 ctx.branch() # read changelog data (in addition to the index)
397 timer(moonwalk)
397 timer(moonwalk)
398 fm.end()
398 fm.end()
399
399
400 @command('perftemplating', formatteropts)
400 @command('perftemplating', formatteropts)
401 def perftemplating(ui, repo, rev=None, **opts):
401 def perftemplating(ui, repo, rev=None, **opts):
402 if rev is None:
402 if rev is None:
403 rev=[]
403 rev=[]
404 timer, fm = gettimer(ui, opts)
404 timer, fm = gettimer(ui, opts)
405 ui.pushbuffer()
405 ui.pushbuffer()
406 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
406 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
407 template='{date|shortdate} [{rev}:{node|short}]'
407 template='{date|shortdate} [{rev}:{node|short}]'
408 ' {author|person}: {desc|firstline}\n'))
408 ' {author|person}: {desc|firstline}\n'))
409 ui.popbuffer()
409 ui.popbuffer()
410 fm.end()
410 fm.end()
411
411
412 @command('perfcca', formatteropts)
412 @command('perfcca', formatteropts)
413 def perfcca(ui, repo, **opts):
413 def perfcca(ui, repo, **opts):
414 timer, fm = gettimer(ui, opts)
414 timer, fm = gettimer(ui, opts)
415 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
415 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
416 fm.end()
416 fm.end()
417
417
418 @command('perffncacheload', formatteropts)
418 @command('perffncacheload', formatteropts)
419 def perffncacheload(ui, repo, **opts):
419 def perffncacheload(ui, repo, **opts):
420 timer, fm = gettimer(ui, opts)
420 timer, fm = gettimer(ui, opts)
421 s = repo.store
421 s = repo.store
422 def d():
422 def d():
423 s.fncache._load()
423 s.fncache._load()
424 timer(d)
424 timer(d)
425 fm.end()
425 fm.end()
426
426
427 @command('perffncachewrite', formatteropts)
427 @command('perffncachewrite', formatteropts)
428 def perffncachewrite(ui, repo, **opts):
428 def perffncachewrite(ui, repo, **opts):
429 timer, fm = gettimer(ui, opts)
429 timer, fm = gettimer(ui, opts)
430 s = repo.store
430 s = repo.store
431 s.fncache._load()
431 s.fncache._load()
432 lock = repo.lock()
432 lock = repo.lock()
433 tr = repo.transaction('perffncachewrite')
433 tr = repo.transaction('perffncachewrite')
434 def d():
434 def d():
435 s.fncache._dirty = True
435 s.fncache._dirty = True
436 s.fncache.write(tr)
436 s.fncache.write(tr)
437 timer(d)
437 timer(d)
438 lock.release()
438 lock.release()
439 fm.end()
439 fm.end()
440
440
441 @command('perffncacheencode', formatteropts)
441 @command('perffncacheencode', formatteropts)
442 def perffncacheencode(ui, repo, **opts):
442 def perffncacheencode(ui, repo, **opts):
443 timer, fm = gettimer(ui, opts)
443 timer, fm = gettimer(ui, opts)
444 s = repo.store
444 s = repo.store
445 s.fncache._load()
445 s.fncache._load()
446 def d():
446 def d():
447 for p in s.fncache.entries:
447 for p in s.fncache.entries:
448 s.encode(p)
448 s.encode(p)
449 timer(d)
449 timer(d)
450 fm.end()
450 fm.end()
451
451
452 @command('perfdiffwd', formatteropts)
452 @command('perfdiffwd', formatteropts)
453 def perfdiffwd(ui, repo, **opts):
453 def perfdiffwd(ui, repo, **opts):
454 """Profile diff of working directory changes"""
454 """Profile diff of working directory changes"""
455 timer, fm = gettimer(ui, opts)
455 timer, fm = gettimer(ui, opts)
456 options = {
456 options = {
457 'w': 'ignore_all_space',
457 'w': 'ignore_all_space',
458 'b': 'ignore_space_change',
458 'b': 'ignore_space_change',
459 'B': 'ignore_blank_lines',
459 'B': 'ignore_blank_lines',
460 }
460 }
461
461
462 for diffopt in ('', 'w', 'b', 'B', 'wB'):
462 for diffopt in ('', 'w', 'b', 'B', 'wB'):
463 opts = dict((options[c], '1') for c in diffopt)
463 opts = dict((options[c], '1') for c in diffopt)
464 def d():
464 def d():
465 ui.pushbuffer()
465 ui.pushbuffer()
466 commands.diff(ui, repo, **opts)
466 commands.diff(ui, repo, **opts)
467 ui.popbuffer()
467 ui.popbuffer()
468 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
468 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
469 timer(d, title)
469 timer(d, title)
470 fm.end()
470 fm.end()
471
471
472 @command('perfrevlog',
472 @command('perfrevlog',
473 [('d', 'dist', 100, 'distance between the revisions')] + formatteropts,
473 [('d', 'dist', 100, 'distance between the revisions')] + formatteropts,
474 "[INDEXFILE]")
474 "[INDEXFILE]")
475 def perfrevlog(ui, repo, file_, **opts):
475 def perfrevlog(ui, repo, file_, **opts):
476 timer, fm = gettimer(ui, opts)
476 timer, fm = gettimer(ui, opts)
477 from mercurial import revlog
477 from mercurial import revlog
478 dist = opts['dist']
478 dist = opts['dist']
479 _len = getlen(ui)
479 _len = getlen(ui)
480 def d():
480 def d():
481 r = revlog.revlog(lambda fn: open(fn, 'rb'), file_)
481 r = revlog.revlog(lambda fn: open(fn, 'rb'), file_)
482 for x in xrange(0, _len(r), dist):
482 for x in xrange(0, _len(r), dist):
483 r.revision(r.node(x))
483 r.revision(r.node(x))
484
484
485 timer(d)
485 timer(d)
486 fm.end()
486 fm.end()
487
487
488 @command('perfrevset',
488 @command('perfrevset',
489 [('C', 'clear', False, 'clear volatile cache between each call.'),
489 [('C', 'clear', False, 'clear volatile cache between each call.'),
490 ('', 'contexts', False, 'obtain changectx for each revision')]
490 ('', 'contexts', False, 'obtain changectx for each revision')]
491 + formatteropts, "REVSET")
491 + formatteropts, "REVSET")
492 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
492 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
493 """benchmark the execution time of a revset
493 """benchmark the execution time of a revset
494
494
495 Use the --clean option if need to evaluate the impact of build volatile
495 Use the --clean option if need to evaluate the impact of build volatile
496 revisions set cache on the revset execution. Volatile cache hold filtered
496 revisions set cache on the revset execution. Volatile cache hold filtered
497 and obsolete related cache."""
497 and obsolete related cache."""
498 timer, fm = gettimer(ui, opts)
498 timer, fm = gettimer(ui, opts)
499 def d():
499 def d():
500 if clear:
500 if clear:
501 repo.invalidatevolatilesets()
501 repo.invalidatevolatilesets()
502 if contexts:
502 if contexts:
503 for ctx in repo.set(expr): pass
503 for ctx in repo.set(expr): pass
504 else:
504 else:
505 for r in repo.revs(expr): pass
505 for r in repo.revs(expr): pass
506 timer(d)
506 timer(d)
507 fm.end()
507 fm.end()
508
508
509 @command('perfvolatilesets', formatteropts)
509 @command('perfvolatilesets', formatteropts)
510 def perfvolatilesets(ui, repo, *names, **opts):
510 def perfvolatilesets(ui, repo, *names, **opts):
511 """benchmark the computation of various volatile set
511 """benchmark the computation of various volatile set
512
512
513 Volatile set computes element related to filtering and obsolescence."""
513 Volatile set computes element related to filtering and obsolescence."""
514 timer, fm = gettimer(ui, opts)
514 timer, fm = gettimer(ui, opts)
515 repo = repo.unfiltered()
515 repo = repo.unfiltered()
516
516
517 def getobs(name):
517 def getobs(name):
518 def d():
518 def d():
519 repo.invalidatevolatilesets()
519 repo.invalidatevolatilesets()
520 obsolete.getrevs(repo, name)
520 obsolete.getrevs(repo, name)
521 return d
521 return d
522
522
523 allobs = sorted(obsolete.cachefuncs)
523 allobs = sorted(obsolete.cachefuncs)
524 if names:
524 if names:
525 allobs = [n for n in allobs if n in names]
525 allobs = [n for n in allobs if n in names]
526
526
527 for name in allobs:
527 for name in allobs:
528 timer(getobs(name), title=name)
528 timer(getobs(name), title=name)
529
529
530 def getfiltered(name):
530 def getfiltered(name):
531 def d():
531 def d():
532 repo.invalidatevolatilesets()
532 repo.invalidatevolatilesets()
533 repoview.filterrevs(repo, name)
533 repoview.filterrevs(repo, name)
534 return d
534 return d
535
535
536 allfilter = sorted(repoview.filtertable)
536 allfilter = sorted(repoview.filtertable)
537 if names:
537 if names:
538 allfilter = [n for n in allfilter if n in names]
538 allfilter = [n for n in allfilter if n in names]
539
539
540 for name in allfilter:
540 for name in allfilter:
541 timer(getfiltered(name), title=name)
541 timer(getfiltered(name), title=name)
542 fm.end()
542 fm.end()
543
543
544 @command('perfbranchmap',
544 @command('perfbranchmap',
545 [('f', 'full', False,
545 [('f', 'full', False,
546 'Includes build time of subset'),
546 'Includes build time of subset'),
547 ] + formatteropts)
547 ] + formatteropts)
548 def perfbranchmap(ui, repo, full=False, **opts):
548 def perfbranchmap(ui, repo, full=False, **opts):
549 """benchmark the update of a branchmap
549 """benchmark the update of a branchmap
550
550
551 This benchmarks the full repo.branchmap() call with read and write disabled
551 This benchmarks the full repo.branchmap() call with read and write disabled
552 """
552 """
553 timer, fm = gettimer(ui, opts)
553 timer, fm = gettimer(ui, opts)
554 def getbranchmap(filtername):
554 def getbranchmap(filtername):
555 """generate a benchmark function for the filtername"""
555 """generate a benchmark function for the filtername"""
556 if filtername is None:
556 if filtername is None:
557 view = repo
557 view = repo
558 else:
558 else:
559 view = repo.filtered(filtername)
559 view = repo.filtered(filtername)
560 def d():
560 def d():
561 if full:
561 if full:
562 view._branchcaches.clear()
562 view._branchcaches.clear()
563 else:
563 else:
564 view._branchcaches.pop(filtername, None)
564 view._branchcaches.pop(filtername, None)
565 view.branchmap()
565 view.branchmap()
566 return d
566 return d
567 # add filter in smaller subset to bigger subset
567 # add filter in smaller subset to bigger subset
568 possiblefilters = set(repoview.filtertable)
568 possiblefilters = set(repoview.filtertable)
569 allfilters = []
569 allfilters = []
570 while possiblefilters:
570 while possiblefilters:
571 for name in possiblefilters:
571 for name in possiblefilters:
572 subset = branchmap.subsettable.get(name)
572 subset = branchmap.subsettable.get(name)
573 if subset not in possiblefilters:
573 if subset not in possiblefilters:
574 break
574 break
575 else:
575 else:
576 assert False, 'subset cycle %s!' % possiblefilters
576 assert False, 'subset cycle %s!' % possiblefilters
577 allfilters.append(name)
577 allfilters.append(name)
578 possiblefilters.remove(name)
578 possiblefilters.remove(name)
579
579
580 # warm the cache
580 # warm the cache
581 if not full:
581 if not full:
582 for name in allfilters:
582 for name in allfilters:
583 repo.filtered(name).branchmap()
583 repo.filtered(name).branchmap()
584 # add unfiltered
584 # add unfiltered
585 allfilters.append(None)
585 allfilters.append(None)
586 oldread = branchmap.read
586 oldread = branchmap.read
587 oldwrite = branchmap.branchcache.write
587 oldwrite = branchmap.branchcache.write
588 try:
588 try:
589 branchmap.read = lambda repo: None
589 branchmap.read = lambda repo: None
590 branchmap.write = lambda repo: None
590 branchmap.write = lambda repo: None
591 for name in allfilters:
591 for name in allfilters:
592 timer(getbranchmap(name), title=str(name))
592 timer(getbranchmap(name), title=str(name))
593 finally:
593 finally:
594 branchmap.read = oldread
594 branchmap.read = oldread
595 branchmap.branchcache.write = oldwrite
595 branchmap.branchcache.write = oldwrite
596 fm.end()
596 fm.end()
597
597
598 @command('perfloadmarkers')
598 @command('perfloadmarkers')
599 def perfloadmarkers(ui, repo):
599 def perfloadmarkers(ui, repo):
600 """benchmark the time to parse the on-disk markers for a repo
600 """benchmark the time to parse the on-disk markers for a repo
601
601
602 Result is the number of markers in the repo."""
602 Result is the number of markers in the repo."""
603 timer, fm = gettimer(ui)
603 timer, fm = gettimer(ui)
604 timer(lambda: len(obsolete.obsstore(repo.svfs)))
604 timer(lambda: len(obsolete.obsstore(repo.svfs)))
605 fm.end()
605 fm.end()
606
606
607 @command('perflrucachedict', formatteropts +
607 @command('perflrucachedict', formatteropts +
608 [('', 'size', 4, 'size of cache'),
608 [('', 'size', 4, 'size of cache'),
609 ('', 'gets', 10000, 'number of key lookups'),
609 ('', 'gets', 10000, 'number of key lookups'),
610 ('', 'sets', 10000, 'number of key sets'),
610 ('', 'sets', 10000, 'number of key sets'),
611 ('', 'mixed', 10000, 'number of mixed mode operations'),
611 ('', 'mixed', 10000, 'number of mixed mode operations'),
612 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
612 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
613 norepo=True)
613 norepo=True)
614 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
614 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
615 mixedgetfreq=50, **opts):
615 mixedgetfreq=50, **opts):
616 def doinit():
616 def doinit():
617 for i in xrange(10000):
617 for i in xrange(10000):
618 util.lrucachedict(size)
618 util.lrucachedict(size)
619
619
620 values = []
620 values = []
621 for i in xrange(size):
621 for i in xrange(size):
622 values.append(random.randint(0, sys.maxint))
622 values.append(random.randint(0, sys.maxint))
623
623
624 # Get mode fills the cache and tests raw lookup performance with no
624 # Get mode fills the cache and tests raw lookup performance with no
625 # eviction.
625 # eviction.
626 getseq = []
626 getseq = []
627 for i in xrange(gets):
627 for i in xrange(gets):
628 getseq.append(random.choice(values))
628 getseq.append(random.choice(values))
629
629
630 def dogets():
630 def dogets():
631 d = util.lrucachedict(size)
631 d = util.lrucachedict(size)
632 for v in values:
632 for v in values:
633 d[v] = v
633 d[v] = v
634 for key in getseq:
634 for key in getseq:
635 value = d[key]
635 value = d[key]
636 value # silence pyflakes warning
636 value # silence pyflakes warning
637
637
638 # Set mode tests insertion speed with cache eviction.
638 # Set mode tests insertion speed with cache eviction.
639 setseq = []
639 setseq = []
640 for i in xrange(sets):
640 for i in xrange(sets):
641 setseq.append(random.randint(0, sys.maxint))
641 setseq.append(random.randint(0, sys.maxint))
642
642
643 def dosets():
643 def dosets():
644 d = util.lrucachedict(size)
644 d = util.lrucachedict(size)
645 for v in setseq:
645 for v in setseq:
646 d[v] = v
646 d[v] = v
647
647
648 # Mixed mode randomly performs gets and sets with eviction.
648 # Mixed mode randomly performs gets and sets with eviction.
649 mixedops = []
649 mixedops = []
650 for i in xrange(mixed):
650 for i in xrange(mixed):
651 r = random.randint(0, 100)
651 r = random.randint(0, 100)
652 if r < mixedgetfreq:
652 if r < mixedgetfreq:
653 op = 0
653 op = 0
654 else:
654 else:
655 op = 1
655 op = 1
656
656
657 mixedops.append((op, random.randint(0, size * 2)))
657 mixedops.append((op, random.randint(0, size * 2)))
658
658
659 def domixed():
659 def domixed():
660 d = util.lrucachedict(size)
660 d = util.lrucachedict(size)
661
661
662 for op, v in mixedops:
662 for op, v in mixedops:
663 if op == 0:
663 if op == 0:
664 try:
664 try:
665 d[v]
665 d[v]
666 except KeyError:
666 except KeyError:
667 pass
667 pass
668 else:
668 else:
669 d[v] = v
669 d[v] = v
670
670
671 benches = [
671 benches = [
672 (doinit, 'init'),
672 (doinit, 'init'),
673 (dogets, 'gets'),
673 (dogets, 'gets'),
674 (dosets, 'sets'),
674 (dosets, 'sets'),
675 (domixed, 'mixed')
675 (domixed, 'mixed')
676 ]
676 ]
677
677
678 for fn, title in benches:
678 for fn, title in benches:
679 timer, fm = gettimer(ui, opts)
679 timer, fm = gettimer(ui, opts)
680 timer(fn, title=title)
680 timer(fn, title=title)
681 fm.end()
681 fm.end()
682
@@ -1,626 +1,625 b''
1 # hg.py - hg backend for convert extension
1 # hg.py - hg backend for convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # Notes for hg->hg conversion:
8 # Notes for hg->hg conversion:
9 #
9 #
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 # of commit messages, but new versions do. Changesets created by
11 # of commit messages, but new versions do. Changesets created by
12 # those older versions, then converted, may thus have different
12 # those older versions, then converted, may thus have different
13 # hashes for changesets that are otherwise identical.
13 # hashes for changesets that are otherwise identical.
14 #
14 #
15 # * Using "--config convert.hg.saverev=true" will make the source
15 # * Using "--config convert.hg.saverev=true" will make the source
16 # identifier to be stored in the converted revision. This will cause
16 # identifier to be stored in the converted revision. This will cause
17 # the converted revision to have a different identity than the
17 # the converted revision to have a different identity than the
18 # source.
18 # source.
19
19
20
20
21 import os, time, cStringIO
21 import os, time, cStringIO
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import bin, hex, nullid
23 from mercurial.node import bin, hex, nullid
24 from mercurial import hg, util, context, bookmarks, error, scmutil, exchange
24 from mercurial import hg, util, context, bookmarks, error, scmutil, exchange
25 from mercurial import phases
25 from mercurial import phases
26 from mercurial import lock as lockmod
26 from mercurial import lock as lockmod
27 from mercurial import merge as mergemod
27 from mercurial import merge as mergemod
28
28
29 from common import NoRepo, commit, converter_source, converter_sink, mapfile
29 from common import NoRepo, commit, converter_source, converter_sink, mapfile
30
30
31 import re
31 import re
32 sha1re = re.compile(r'\b[0-9a-f]{12,40}\b')
32 sha1re = re.compile(r'\b[0-9a-f]{12,40}\b')
33
33
34 class mercurial_sink(converter_sink):
34 class mercurial_sink(converter_sink):
35 def __init__(self, ui, path):
35 def __init__(self, ui, path):
36 converter_sink.__init__(self, ui, path)
36 converter_sink.__init__(self, ui, path)
37 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
37 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
38 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
38 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
39 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
39 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
40 self.lastbranch = None
40 self.lastbranch = None
41 if os.path.isdir(path) and len(os.listdir(path)) > 0:
41 if os.path.isdir(path) and len(os.listdir(path)) > 0:
42 try:
42 try:
43 self.repo = hg.repository(self.ui, path)
43 self.repo = hg.repository(self.ui, path)
44 if not self.repo.local():
44 if not self.repo.local():
45 raise NoRepo(_('%s is not a local Mercurial repository')
45 raise NoRepo(_('%s is not a local Mercurial repository')
46 % path)
46 % path)
47 except error.RepoError as err:
47 except error.RepoError as err:
48 ui.traceback()
48 ui.traceback()
49 raise NoRepo(err.args[0])
49 raise NoRepo(err.args[0])
50 else:
50 else:
51 try:
51 try:
52 ui.status(_('initializing destination %s repository\n') % path)
52 ui.status(_('initializing destination %s repository\n') % path)
53 self.repo = hg.repository(self.ui, path, create=True)
53 self.repo = hg.repository(self.ui, path, create=True)
54 if not self.repo.local():
54 if not self.repo.local():
55 raise NoRepo(_('%s is not a local Mercurial repository')
55 raise NoRepo(_('%s is not a local Mercurial repository')
56 % path)
56 % path)
57 self.created.append(path)
57 self.created.append(path)
58 except error.RepoError:
58 except error.RepoError:
59 ui.traceback()
59 ui.traceback()
60 raise NoRepo(_("could not create hg repository %s as sink")
60 raise NoRepo(_("could not create hg repository %s as sink")
61 % path)
61 % path)
62 self.lock = None
62 self.lock = None
63 self.wlock = None
63 self.wlock = None
64 self.filemapmode = False
64 self.filemapmode = False
65 self.subrevmaps = {}
65 self.subrevmaps = {}
66
66
67 def before(self):
67 def before(self):
68 self.ui.debug('run hg sink pre-conversion action\n')
68 self.ui.debug('run hg sink pre-conversion action\n')
69 self.wlock = self.repo.wlock()
69 self.wlock = self.repo.wlock()
70 self.lock = self.repo.lock()
70 self.lock = self.repo.lock()
71
71
72 def after(self):
72 def after(self):
73 self.ui.debug('run hg sink post-conversion action\n')
73 self.ui.debug('run hg sink post-conversion action\n')
74 if self.lock:
74 if self.lock:
75 self.lock.release()
75 self.lock.release()
76 if self.wlock:
76 if self.wlock:
77 self.wlock.release()
77 self.wlock.release()
78
78
79 def revmapfile(self):
79 def revmapfile(self):
80 return self.repo.join("shamap")
80 return self.repo.join("shamap")
81
81
82 def authorfile(self):
82 def authorfile(self):
83 return self.repo.join("authormap")
83 return self.repo.join("authormap")
84
84
85 def setbranch(self, branch, pbranches):
85 def setbranch(self, branch, pbranches):
86 if not self.clonebranches:
86 if not self.clonebranches:
87 return
87 return
88
88
89 setbranch = (branch != self.lastbranch)
89 setbranch = (branch != self.lastbranch)
90 self.lastbranch = branch
90 self.lastbranch = branch
91 if not branch:
91 if not branch:
92 branch = 'default'
92 branch = 'default'
93 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
93 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
94 if pbranches:
94 if pbranches:
95 pbranch = pbranches[0][1]
95 pbranch = pbranches[0][1]
96 else:
96 else:
97 pbranch = 'default'
97 pbranch = 'default'
98
98
99 branchpath = os.path.join(self.path, branch)
99 branchpath = os.path.join(self.path, branch)
100 if setbranch:
100 if setbranch:
101 self.after()
101 self.after()
102 try:
102 try:
103 self.repo = hg.repository(self.ui, branchpath)
103 self.repo = hg.repository(self.ui, branchpath)
104 except Exception:
104 except Exception:
105 self.repo = hg.repository(self.ui, branchpath, create=True)
105 self.repo = hg.repository(self.ui, branchpath, create=True)
106 self.before()
106 self.before()
107
107
108 # pbranches may bring revisions from other branches (merge parents)
108 # pbranches may bring revisions from other branches (merge parents)
109 # Make sure we have them, or pull them.
109 # Make sure we have them, or pull them.
110 missings = {}
110 missings = {}
111 for b in pbranches:
111 for b in pbranches:
112 try:
112 try:
113 self.repo.lookup(b[0])
113 self.repo.lookup(b[0])
114 except Exception:
114 except Exception:
115 missings.setdefault(b[1], []).append(b[0])
115 missings.setdefault(b[1], []).append(b[0])
116
116
117 if missings:
117 if missings:
118 self.after()
118 self.after()
119 for pbranch, heads in sorted(missings.iteritems()):
119 for pbranch, heads in sorted(missings.iteritems()):
120 pbranchpath = os.path.join(self.path, pbranch)
120 pbranchpath = os.path.join(self.path, pbranch)
121 prepo = hg.peer(self.ui, {}, pbranchpath)
121 prepo = hg.peer(self.ui, {}, pbranchpath)
122 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
122 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
123 exchange.pull(self.repo, prepo,
123 exchange.pull(self.repo, prepo,
124 [prepo.lookup(h) for h in heads])
124 [prepo.lookup(h) for h in heads])
125 self.before()
125 self.before()
126
126
127 def _rewritetags(self, source, revmap, data):
127 def _rewritetags(self, source, revmap, data):
128 fp = cStringIO.StringIO()
128 fp = cStringIO.StringIO()
129 for line in data.splitlines():
129 for line in data.splitlines():
130 s = line.split(' ', 1)
130 s = line.split(' ', 1)
131 if len(s) != 2:
131 if len(s) != 2:
132 continue
132 continue
133 revid = revmap.get(source.lookuprev(s[0]))
133 revid = revmap.get(source.lookuprev(s[0]))
134 if not revid:
134 if not revid:
135 if s[0] == hex(nullid):
135 if s[0] == hex(nullid):
136 revid = s[0]
136 revid = s[0]
137 else:
137 else:
138 continue
138 continue
139 fp.write('%s %s\n' % (revid, s[1]))
139 fp.write('%s %s\n' % (revid, s[1]))
140 return fp.getvalue()
140 return fp.getvalue()
141
141
142 def _rewritesubstate(self, source, data):
142 def _rewritesubstate(self, source, data):
143 fp = cStringIO.StringIO()
143 fp = cStringIO.StringIO()
144 for line in data.splitlines():
144 for line in data.splitlines():
145 s = line.split(' ', 1)
145 s = line.split(' ', 1)
146 if len(s) != 2:
146 if len(s) != 2:
147 continue
147 continue
148
148
149 revid = s[0]
149 revid = s[0]
150 subpath = s[1]
150 subpath = s[1]
151 if revid != hex(nullid):
151 if revid != hex(nullid):
152 revmap = self.subrevmaps.get(subpath)
152 revmap = self.subrevmaps.get(subpath)
153 if revmap is None:
153 if revmap is None:
154 revmap = mapfile(self.ui,
154 revmap = mapfile(self.ui,
155 self.repo.wjoin(subpath, '.hg/shamap'))
155 self.repo.wjoin(subpath, '.hg/shamap'))
156 self.subrevmaps[subpath] = revmap
156 self.subrevmaps[subpath] = revmap
157
157
158 # It is reasonable that one or more of the subrepos don't
158 # It is reasonable that one or more of the subrepos don't
159 # need to be converted, in which case they can be cloned
159 # need to be converted, in which case they can be cloned
160 # into place instead of converted. Therefore, only warn
160 # into place instead of converted. Therefore, only warn
161 # once.
161 # once.
162 msg = _('no ".hgsubstate" updates will be made for "%s"\n')
162 msg = _('no ".hgsubstate" updates will be made for "%s"\n')
163 if len(revmap) == 0:
163 if len(revmap) == 0:
164 sub = self.repo.wvfs.reljoin(subpath, '.hg')
164 sub = self.repo.wvfs.reljoin(subpath, '.hg')
165
165
166 if self.repo.wvfs.exists(sub):
166 if self.repo.wvfs.exists(sub):
167 self.ui.warn(msg % subpath)
167 self.ui.warn(msg % subpath)
168
168
169 newid = revmap.get(revid)
169 newid = revmap.get(revid)
170 if not newid:
170 if not newid:
171 if len(revmap) > 0:
171 if len(revmap) > 0:
172 self.ui.warn(_("%s is missing from %s/.hg/shamap\n") %
172 self.ui.warn(_("%s is missing from %s/.hg/shamap\n") %
173 (revid, subpath))
173 (revid, subpath))
174 else:
174 else:
175 revid = newid
175 revid = newid
176
176
177 fp.write('%s %s\n' % (revid, subpath))
177 fp.write('%s %s\n' % (revid, subpath))
178
178
179 return fp.getvalue()
179 return fp.getvalue()
180
180
181 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
181 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
182 """Calculates the files from p2 that we need to pull in when merging p1
182 """Calculates the files from p2 that we need to pull in when merging p1
183 and p2, given that the merge is coming from the given source.
183 and p2, given that the merge is coming from the given source.
184
184
185 This prevents us from losing files that only exist in the target p2 and
185 This prevents us from losing files that only exist in the target p2 and
186 that don't come from the source repo (like if you're merging multiple
186 that don't come from the source repo (like if you're merging multiple
187 repositories together).
187 repositories together).
188 """
188 """
189 anc = [p1ctx.ancestor(p2ctx)]
189 anc = [p1ctx.ancestor(p2ctx)]
190 # Calculate what files are coming from p2
190 # Calculate what files are coming from p2
191 actions, diverge, rename = mergemod.calculateupdates(
191 actions, diverge, rename = mergemod.calculateupdates(
192 self.repo, p1ctx, p2ctx, anc,
192 self.repo, p1ctx, p2ctx, anc,
193 True, # branchmerge
193 True, # branchmerge
194 True, # force
194 True, # force
195 False, # partial
196 False, # acceptremote
195 False, # acceptremote
197 False, # followcopies
196 False, # followcopies
198 )
197 )
199
198
200 for file, (action, info, msg) in actions.iteritems():
199 for file, (action, info, msg) in actions.iteritems():
201 if source.targetfilebelongstosource(file):
200 if source.targetfilebelongstosource(file):
202 # If the file belongs to the source repo, ignore the p2
201 # If the file belongs to the source repo, ignore the p2
203 # since it will be covered by the existing fileset.
202 # since it will be covered by the existing fileset.
204 continue
203 continue
205
204
206 # If the file requires actual merging, abort. We don't have enough
205 # If the file requires actual merging, abort. We don't have enough
207 # context to resolve merges correctly.
206 # context to resolve merges correctly.
208 if action in ['m', 'dm', 'cd', 'dc']:
207 if action in ['m', 'dm', 'cd', 'dc']:
209 raise error.Abort(_("unable to convert merge commit "
208 raise error.Abort(_("unable to convert merge commit "
210 "since target parents do not merge cleanly (file "
209 "since target parents do not merge cleanly (file "
211 "%s, parents %s and %s)") % (file, p1ctx,
210 "%s, parents %s and %s)") % (file, p1ctx,
212 p2ctx))
211 p2ctx))
213 elif action == 'k':
212 elif action == 'k':
214 # 'keep' means nothing changed from p1
213 # 'keep' means nothing changed from p1
215 continue
214 continue
216 else:
215 else:
217 # Any other change means we want to take the p2 version
216 # Any other change means we want to take the p2 version
218 yield file
217 yield file
219
218
220 def putcommit(self, files, copies, parents, commit, source, revmap, full,
219 def putcommit(self, files, copies, parents, commit, source, revmap, full,
221 cleanp2):
220 cleanp2):
222 files = dict(files)
221 files = dict(files)
223
222
224 def getfilectx(repo, memctx, f):
223 def getfilectx(repo, memctx, f):
225 if p2ctx and f in p2files and f not in copies:
224 if p2ctx and f in p2files and f not in copies:
226 self.ui.debug('reusing %s from p2\n' % f)
225 self.ui.debug('reusing %s from p2\n' % f)
227 try:
226 try:
228 return p2ctx[f]
227 return p2ctx[f]
229 except error.ManifestLookupError:
228 except error.ManifestLookupError:
230 # If the file doesn't exist in p2, then we're syncing a
229 # If the file doesn't exist in p2, then we're syncing a
231 # delete, so just return None.
230 # delete, so just return None.
232 return None
231 return None
233 try:
232 try:
234 v = files[f]
233 v = files[f]
235 except KeyError:
234 except KeyError:
236 return None
235 return None
237 data, mode = source.getfile(f, v)
236 data, mode = source.getfile(f, v)
238 if data is None:
237 if data is None:
239 return None
238 return None
240 if f == '.hgtags':
239 if f == '.hgtags':
241 data = self._rewritetags(source, revmap, data)
240 data = self._rewritetags(source, revmap, data)
242 if f == '.hgsubstate':
241 if f == '.hgsubstate':
243 data = self._rewritesubstate(source, data)
242 data = self._rewritesubstate(source, data)
244 return context.memfilectx(self.repo, f, data, 'l' in mode,
243 return context.memfilectx(self.repo, f, data, 'l' in mode,
245 'x' in mode, copies.get(f))
244 'x' in mode, copies.get(f))
246
245
247 pl = []
246 pl = []
248 for p in parents:
247 for p in parents:
249 if p not in pl:
248 if p not in pl:
250 pl.append(p)
249 pl.append(p)
251 parents = pl
250 parents = pl
252 nparents = len(parents)
251 nparents = len(parents)
253 if self.filemapmode and nparents == 1:
252 if self.filemapmode and nparents == 1:
254 m1node = self.repo.changelog.read(bin(parents[0]))[0]
253 m1node = self.repo.changelog.read(bin(parents[0]))[0]
255 parent = parents[0]
254 parent = parents[0]
256
255
257 if len(parents) < 2:
256 if len(parents) < 2:
258 parents.append(nullid)
257 parents.append(nullid)
259 if len(parents) < 2:
258 if len(parents) < 2:
260 parents.append(nullid)
259 parents.append(nullid)
261 p2 = parents.pop(0)
260 p2 = parents.pop(0)
262
261
263 text = commit.desc
262 text = commit.desc
264
263
265 sha1s = re.findall(sha1re, text)
264 sha1s = re.findall(sha1re, text)
266 for sha1 in sha1s:
265 for sha1 in sha1s:
267 oldrev = source.lookuprev(sha1)
266 oldrev = source.lookuprev(sha1)
268 newrev = revmap.get(oldrev)
267 newrev = revmap.get(oldrev)
269 if newrev is not None:
268 if newrev is not None:
270 text = text.replace(sha1, newrev[:len(sha1)])
269 text = text.replace(sha1, newrev[:len(sha1)])
271
270
272 extra = commit.extra.copy()
271 extra = commit.extra.copy()
273
272
274 sourcename = self.repo.ui.config('convert', 'hg.sourcename')
273 sourcename = self.repo.ui.config('convert', 'hg.sourcename')
275 if sourcename:
274 if sourcename:
276 extra['convert_source'] = sourcename
275 extra['convert_source'] = sourcename
277
276
278 for label in ('source', 'transplant_source', 'rebase_source',
277 for label in ('source', 'transplant_source', 'rebase_source',
279 'intermediate-source'):
278 'intermediate-source'):
280 node = extra.get(label)
279 node = extra.get(label)
281
280
282 if node is None:
281 if node is None:
283 continue
282 continue
284
283
285 # Only transplant stores its reference in binary
284 # Only transplant stores its reference in binary
286 if label == 'transplant_source':
285 if label == 'transplant_source':
287 node = hex(node)
286 node = hex(node)
288
287
289 newrev = revmap.get(node)
288 newrev = revmap.get(node)
290 if newrev is not None:
289 if newrev is not None:
291 if label == 'transplant_source':
290 if label == 'transplant_source':
292 newrev = bin(newrev)
291 newrev = bin(newrev)
293
292
294 extra[label] = newrev
293 extra[label] = newrev
295
294
296 if self.branchnames and commit.branch:
295 if self.branchnames and commit.branch:
297 extra['branch'] = commit.branch
296 extra['branch'] = commit.branch
298 if commit.rev and commit.saverev:
297 if commit.rev and commit.saverev:
299 extra['convert_revision'] = commit.rev
298 extra['convert_revision'] = commit.rev
300
299
301 while parents:
300 while parents:
302 p1 = p2
301 p1 = p2
303 p2 = parents.pop(0)
302 p2 = parents.pop(0)
304 p1ctx = self.repo[p1]
303 p1ctx = self.repo[p1]
305 p2ctx = None
304 p2ctx = None
306 if p2 != nullid:
305 if p2 != nullid:
307 p2ctx = self.repo[p2]
306 p2ctx = self.repo[p2]
308 fileset = set(files)
307 fileset = set(files)
309 if full:
308 if full:
310 fileset.update(self.repo[p1])
309 fileset.update(self.repo[p1])
311 fileset.update(self.repo[p2])
310 fileset.update(self.repo[p2])
312
311
313 if p2ctx:
312 if p2ctx:
314 p2files = set(cleanp2)
313 p2files = set(cleanp2)
315 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
314 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
316 p2files.add(file)
315 p2files.add(file)
317 fileset.add(file)
316 fileset.add(file)
318
317
319 ctx = context.memctx(self.repo, (p1, p2), text, fileset,
318 ctx = context.memctx(self.repo, (p1, p2), text, fileset,
320 getfilectx, commit.author, commit.date, extra)
319 getfilectx, commit.author, commit.date, extra)
321
320
322 # We won't know if the conversion changes the node until after the
321 # We won't know if the conversion changes the node until after the
323 # commit, so copy the source's phase for now.
322 # commit, so copy the source's phase for now.
324 self.repo.ui.setconfig('phases', 'new-commit',
323 self.repo.ui.setconfig('phases', 'new-commit',
325 phases.phasenames[commit.phase], 'convert')
324 phases.phasenames[commit.phase], 'convert')
326
325
327 tr = self.repo.transaction("convert")
326 tr = self.repo.transaction("convert")
328
327
329 try:
328 try:
330 node = hex(self.repo.commitctx(ctx))
329 node = hex(self.repo.commitctx(ctx))
331
330
332 # If the node value has changed, but the phase is lower than
331 # If the node value has changed, but the phase is lower than
333 # draft, set it back to draft since it hasn't been exposed
332 # draft, set it back to draft since it hasn't been exposed
334 # anywhere.
333 # anywhere.
335 if commit.rev != node:
334 if commit.rev != node:
336 ctx = self.repo[node]
335 ctx = self.repo[node]
337 if ctx.phase() < phases.draft:
336 if ctx.phase() < phases.draft:
338 phases.retractboundary(self.repo, tr, phases.draft,
337 phases.retractboundary(self.repo, tr, phases.draft,
339 [ctx.node()])
338 [ctx.node()])
340 tr.close()
339 tr.close()
341 finally:
340 finally:
342 tr.release()
341 tr.release()
343
342
344 text = "(octopus merge fixup)\n"
343 text = "(octopus merge fixup)\n"
345 p2 = node
344 p2 = node
346
345
347 if self.filemapmode and nparents == 1:
346 if self.filemapmode and nparents == 1:
348 man = self.repo.manifest
347 man = self.repo.manifest
349 mnode = self.repo.changelog.read(bin(p2))[0]
348 mnode = self.repo.changelog.read(bin(p2))[0]
350 closed = 'close' in commit.extra
349 closed = 'close' in commit.extra
351 if not closed and not man.cmp(m1node, man.revision(mnode)):
350 if not closed and not man.cmp(m1node, man.revision(mnode)):
352 self.ui.status(_("filtering out empty revision\n"))
351 self.ui.status(_("filtering out empty revision\n"))
353 self.repo.rollback(force=True)
352 self.repo.rollback(force=True)
354 return parent
353 return parent
355 return p2
354 return p2
356
355
357 def puttags(self, tags):
356 def puttags(self, tags):
358 try:
357 try:
359 parentctx = self.repo[self.tagsbranch]
358 parentctx = self.repo[self.tagsbranch]
360 tagparent = parentctx.node()
359 tagparent = parentctx.node()
361 except error.RepoError:
360 except error.RepoError:
362 parentctx = None
361 parentctx = None
363 tagparent = nullid
362 tagparent = nullid
364
363
365 oldlines = set()
364 oldlines = set()
366 for branch, heads in self.repo.branchmap().iteritems():
365 for branch, heads in self.repo.branchmap().iteritems():
367 for h in heads:
366 for h in heads:
368 if '.hgtags' in self.repo[h]:
367 if '.hgtags' in self.repo[h]:
369 oldlines.update(
368 oldlines.update(
370 set(self.repo[h]['.hgtags'].data().splitlines(True)))
369 set(self.repo[h]['.hgtags'].data().splitlines(True)))
371 oldlines = sorted(list(oldlines))
370 oldlines = sorted(list(oldlines))
372
371
373 newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
372 newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
374 if newlines == oldlines:
373 if newlines == oldlines:
375 return None, None
374 return None, None
376
375
377 # if the old and new tags match, then there is nothing to update
376 # if the old and new tags match, then there is nothing to update
378 oldtags = set()
377 oldtags = set()
379 newtags = set()
378 newtags = set()
380 for line in oldlines:
379 for line in oldlines:
381 s = line.strip().split(' ', 1)
380 s = line.strip().split(' ', 1)
382 if len(s) != 2:
381 if len(s) != 2:
383 continue
382 continue
384 oldtags.add(s[1])
383 oldtags.add(s[1])
385 for line in newlines:
384 for line in newlines:
386 s = line.strip().split(' ', 1)
385 s = line.strip().split(' ', 1)
387 if len(s) != 2:
386 if len(s) != 2:
388 continue
387 continue
389 if s[1] not in oldtags:
388 if s[1] not in oldtags:
390 newtags.add(s[1].strip())
389 newtags.add(s[1].strip())
391
390
392 if not newtags:
391 if not newtags:
393 return None, None
392 return None, None
394
393
395 data = "".join(newlines)
394 data = "".join(newlines)
396 def getfilectx(repo, memctx, f):
395 def getfilectx(repo, memctx, f):
397 return context.memfilectx(repo, f, data, False, False, None)
396 return context.memfilectx(repo, f, data, False, False, None)
398
397
399 self.ui.status(_("updating tags\n"))
398 self.ui.status(_("updating tags\n"))
400 date = "%s 0" % int(time.mktime(time.gmtime()))
399 date = "%s 0" % int(time.mktime(time.gmtime()))
401 extra = {'branch': self.tagsbranch}
400 extra = {'branch': self.tagsbranch}
402 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
401 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
403 [".hgtags"], getfilectx, "convert-repo", date,
402 [".hgtags"], getfilectx, "convert-repo", date,
404 extra)
403 extra)
405 node = self.repo.commitctx(ctx)
404 node = self.repo.commitctx(ctx)
406 return hex(node), hex(tagparent)
405 return hex(node), hex(tagparent)
407
406
408 def setfilemapmode(self, active):
407 def setfilemapmode(self, active):
409 self.filemapmode = active
408 self.filemapmode = active
410
409
411 def putbookmarks(self, updatedbookmark):
410 def putbookmarks(self, updatedbookmark):
412 if not len(updatedbookmark):
411 if not len(updatedbookmark):
413 return
412 return
414 wlock = lock = tr = None
413 wlock = lock = tr = None
415 try:
414 try:
416 wlock = self.repo.wlock()
415 wlock = self.repo.wlock()
417 lock = self.repo.lock()
416 lock = self.repo.lock()
418 tr = self.repo.transaction('bookmark')
417 tr = self.repo.transaction('bookmark')
419 self.ui.status(_("updating bookmarks\n"))
418 self.ui.status(_("updating bookmarks\n"))
420 destmarks = self.repo._bookmarks
419 destmarks = self.repo._bookmarks
421 for bookmark in updatedbookmark:
420 for bookmark in updatedbookmark:
422 destmarks[bookmark] = bin(updatedbookmark[bookmark])
421 destmarks[bookmark] = bin(updatedbookmark[bookmark])
423 destmarks.recordchange(tr)
422 destmarks.recordchange(tr)
424 tr.close()
423 tr.close()
425 finally:
424 finally:
426 lockmod.release(lock, wlock, tr)
425 lockmod.release(lock, wlock, tr)
427
426
428 def hascommitfrommap(self, rev):
427 def hascommitfrommap(self, rev):
429 # the exact semantics of clonebranches is unclear so we can't say no
428 # the exact semantics of clonebranches is unclear so we can't say no
430 return rev in self.repo or self.clonebranches
429 return rev in self.repo or self.clonebranches
431
430
432 def hascommitforsplicemap(self, rev):
431 def hascommitforsplicemap(self, rev):
433 if rev not in self.repo and self.clonebranches:
432 if rev not in self.repo and self.clonebranches:
434 raise error.Abort(_('revision %s not found in destination '
433 raise error.Abort(_('revision %s not found in destination '
435 'repository (lookups with clonebranches=true '
434 'repository (lookups with clonebranches=true '
436 'are not implemented)') % rev)
435 'are not implemented)') % rev)
437 return rev in self.repo
436 return rev in self.repo
438
437
439 class mercurial_source(converter_source):
438 class mercurial_source(converter_source):
440 def __init__(self, ui, path, revs=None):
439 def __init__(self, ui, path, revs=None):
441 converter_source.__init__(self, ui, path, revs)
440 converter_source.__init__(self, ui, path, revs)
442 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
441 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
443 self.ignored = set()
442 self.ignored = set()
444 self.saverev = ui.configbool('convert', 'hg.saverev', False)
443 self.saverev = ui.configbool('convert', 'hg.saverev', False)
445 try:
444 try:
446 self.repo = hg.repository(self.ui, path)
445 self.repo = hg.repository(self.ui, path)
447 # try to provoke an exception if this isn't really a hg
446 # try to provoke an exception if this isn't really a hg
448 # repo, but some other bogus compatible-looking url
447 # repo, but some other bogus compatible-looking url
449 if not self.repo.local():
448 if not self.repo.local():
450 raise error.RepoError
449 raise error.RepoError
451 except error.RepoError:
450 except error.RepoError:
452 ui.traceback()
451 ui.traceback()
453 raise NoRepo(_("%s is not a local Mercurial repository") % path)
452 raise NoRepo(_("%s is not a local Mercurial repository") % path)
454 self.lastrev = None
453 self.lastrev = None
455 self.lastctx = None
454 self.lastctx = None
456 self._changescache = None, None
455 self._changescache = None, None
457 self.convertfp = None
456 self.convertfp = None
458 # Restrict converted revisions to startrev descendants
457 # Restrict converted revisions to startrev descendants
459 startnode = ui.config('convert', 'hg.startrev')
458 startnode = ui.config('convert', 'hg.startrev')
460 hgrevs = ui.config('convert', 'hg.revs')
459 hgrevs = ui.config('convert', 'hg.revs')
461 if hgrevs is None:
460 if hgrevs is None:
462 if startnode is not None:
461 if startnode is not None:
463 try:
462 try:
464 startnode = self.repo.lookup(startnode)
463 startnode = self.repo.lookup(startnode)
465 except error.RepoError:
464 except error.RepoError:
466 raise error.Abort(_('%s is not a valid start revision')
465 raise error.Abort(_('%s is not a valid start revision')
467 % startnode)
466 % startnode)
468 startrev = self.repo.changelog.rev(startnode)
467 startrev = self.repo.changelog.rev(startnode)
469 children = {startnode: 1}
468 children = {startnode: 1}
470 for r in self.repo.changelog.descendants([startrev]):
469 for r in self.repo.changelog.descendants([startrev]):
471 children[self.repo.changelog.node(r)] = 1
470 children[self.repo.changelog.node(r)] = 1
472 self.keep = children.__contains__
471 self.keep = children.__contains__
473 else:
472 else:
474 self.keep = util.always
473 self.keep = util.always
475 if revs:
474 if revs:
476 self._heads = [self.repo[r].node() for r in revs]
475 self._heads = [self.repo[r].node() for r in revs]
477 else:
476 else:
478 self._heads = self.repo.heads()
477 self._heads = self.repo.heads()
479 else:
478 else:
480 if revs or startnode is not None:
479 if revs or startnode is not None:
481 raise error.Abort(_('hg.revs cannot be combined with '
480 raise error.Abort(_('hg.revs cannot be combined with '
482 'hg.startrev or --rev'))
481 'hg.startrev or --rev'))
483 nodes = set()
482 nodes = set()
484 parents = set()
483 parents = set()
485 for r in scmutil.revrange(self.repo, [hgrevs]):
484 for r in scmutil.revrange(self.repo, [hgrevs]):
486 ctx = self.repo[r]
485 ctx = self.repo[r]
487 nodes.add(ctx.node())
486 nodes.add(ctx.node())
488 parents.update(p.node() for p in ctx.parents())
487 parents.update(p.node() for p in ctx.parents())
489 self.keep = nodes.__contains__
488 self.keep = nodes.__contains__
490 self._heads = nodes - parents
489 self._heads = nodes - parents
491
490
492 def changectx(self, rev):
491 def changectx(self, rev):
493 if self.lastrev != rev:
492 if self.lastrev != rev:
494 self.lastctx = self.repo[rev]
493 self.lastctx = self.repo[rev]
495 self.lastrev = rev
494 self.lastrev = rev
496 return self.lastctx
495 return self.lastctx
497
496
498 def parents(self, ctx):
497 def parents(self, ctx):
499 return [p for p in ctx.parents() if p and self.keep(p.node())]
498 return [p for p in ctx.parents() if p and self.keep(p.node())]
500
499
501 def getheads(self):
500 def getheads(self):
502 return [hex(h) for h in self._heads if self.keep(h)]
501 return [hex(h) for h in self._heads if self.keep(h)]
503
502
504 def getfile(self, name, rev):
503 def getfile(self, name, rev):
505 try:
504 try:
506 fctx = self.changectx(rev)[name]
505 fctx = self.changectx(rev)[name]
507 return fctx.data(), fctx.flags()
506 return fctx.data(), fctx.flags()
508 except error.LookupError:
507 except error.LookupError:
509 return None, None
508 return None, None
510
509
511 def getchanges(self, rev, full):
510 def getchanges(self, rev, full):
512 ctx = self.changectx(rev)
511 ctx = self.changectx(rev)
513 parents = self.parents(ctx)
512 parents = self.parents(ctx)
514 if full or not parents:
513 if full or not parents:
515 files = copyfiles = ctx.manifest()
514 files = copyfiles = ctx.manifest()
516 if parents:
515 if parents:
517 if self._changescache[0] == rev:
516 if self._changescache[0] == rev:
518 m, a, r = self._changescache[1]
517 m, a, r = self._changescache[1]
519 else:
518 else:
520 m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3]
519 m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3]
521 if not full:
520 if not full:
522 files = m + a + r
521 files = m + a + r
523 copyfiles = m + a
522 copyfiles = m + a
524 # getcopies() is also run for roots and before filtering so missing
523 # getcopies() is also run for roots and before filtering so missing
525 # revlogs are detected early
524 # revlogs are detected early
526 copies = self.getcopies(ctx, parents, copyfiles)
525 copies = self.getcopies(ctx, parents, copyfiles)
527 cleanp2 = set()
526 cleanp2 = set()
528 if len(parents) == 2:
527 if len(parents) == 2:
529 cleanp2.update(self.repo.status(parents[1].node(), ctx.node(),
528 cleanp2.update(self.repo.status(parents[1].node(), ctx.node(),
530 clean=True).clean)
529 clean=True).clean)
531 changes = [(f, rev) for f in files if f not in self.ignored]
530 changes = [(f, rev) for f in files if f not in self.ignored]
532 changes.sort()
531 changes.sort()
533 return changes, copies, cleanp2
532 return changes, copies, cleanp2
534
533
535 def getcopies(self, ctx, parents, files):
534 def getcopies(self, ctx, parents, files):
536 copies = {}
535 copies = {}
537 for name in files:
536 for name in files:
538 if name in self.ignored:
537 if name in self.ignored:
539 continue
538 continue
540 try:
539 try:
541 copysource, _copynode = ctx.filectx(name).renamed()
540 copysource, _copynode = ctx.filectx(name).renamed()
542 if copysource in self.ignored:
541 if copysource in self.ignored:
543 continue
542 continue
544 # Ignore copy sources not in parent revisions
543 # Ignore copy sources not in parent revisions
545 found = False
544 found = False
546 for p in parents:
545 for p in parents:
547 if copysource in p:
546 if copysource in p:
548 found = True
547 found = True
549 break
548 break
550 if not found:
549 if not found:
551 continue
550 continue
552 copies[name] = copysource
551 copies[name] = copysource
553 except TypeError:
552 except TypeError:
554 pass
553 pass
555 except error.LookupError as e:
554 except error.LookupError as e:
556 if not self.ignoreerrors:
555 if not self.ignoreerrors:
557 raise
556 raise
558 self.ignored.add(name)
557 self.ignored.add(name)
559 self.ui.warn(_('ignoring: %s\n') % e)
558 self.ui.warn(_('ignoring: %s\n') % e)
560 return copies
559 return copies
561
560
562 def getcommit(self, rev):
561 def getcommit(self, rev):
563 ctx = self.changectx(rev)
562 ctx = self.changectx(rev)
564 parents = [p.hex() for p in self.parents(ctx)]
563 parents = [p.hex() for p in self.parents(ctx)]
565 crev = rev
564 crev = rev
566
565
567 return commit(author=ctx.user(),
566 return commit(author=ctx.user(),
568 date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'),
567 date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'),
569 desc=ctx.description(), rev=crev, parents=parents,
568 desc=ctx.description(), rev=crev, parents=parents,
570 branch=ctx.branch(), extra=ctx.extra(),
569 branch=ctx.branch(), extra=ctx.extra(),
571 sortkey=ctx.rev(), saverev=self.saverev,
570 sortkey=ctx.rev(), saverev=self.saverev,
572 phase=ctx.phase())
571 phase=ctx.phase())
573
572
574 def gettags(self):
573 def gettags(self):
575 # This will get written to .hgtags, filter non global tags out.
574 # This will get written to .hgtags, filter non global tags out.
576 tags = [t for t in self.repo.tagslist()
575 tags = [t for t in self.repo.tagslist()
577 if self.repo.tagtype(t[0]) == 'global']
576 if self.repo.tagtype(t[0]) == 'global']
578 return dict([(name, hex(node)) for name, node in tags
577 return dict([(name, hex(node)) for name, node in tags
579 if self.keep(node)])
578 if self.keep(node)])
580
579
581 def getchangedfiles(self, rev, i):
580 def getchangedfiles(self, rev, i):
582 ctx = self.changectx(rev)
581 ctx = self.changectx(rev)
583 parents = self.parents(ctx)
582 parents = self.parents(ctx)
584 if not parents and i is None:
583 if not parents and i is None:
585 i = 0
584 i = 0
586 changes = [], ctx.manifest().keys(), []
585 changes = [], ctx.manifest().keys(), []
587 else:
586 else:
588 i = i or 0
587 i = i or 0
589 changes = self.repo.status(parents[i].node(), ctx.node())[:3]
588 changes = self.repo.status(parents[i].node(), ctx.node())[:3]
590 changes = [[f for f in l if f not in self.ignored] for l in changes]
589 changes = [[f for f in l if f not in self.ignored] for l in changes]
591
590
592 if i == 0:
591 if i == 0:
593 self._changescache = (rev, changes)
592 self._changescache = (rev, changes)
594
593
595 return changes[0] + changes[1] + changes[2]
594 return changes[0] + changes[1] + changes[2]
596
595
597 def converted(self, rev, destrev):
596 def converted(self, rev, destrev):
598 if self.convertfp is None:
597 if self.convertfp is None:
599 self.convertfp = open(self.repo.join('shamap'), 'a')
598 self.convertfp = open(self.repo.join('shamap'), 'a')
600 self.convertfp.write('%s %s\n' % (destrev, rev))
599 self.convertfp.write('%s %s\n' % (destrev, rev))
601 self.convertfp.flush()
600 self.convertfp.flush()
602
601
603 def before(self):
602 def before(self):
604 self.ui.debug('run hg source pre-conversion action\n')
603 self.ui.debug('run hg source pre-conversion action\n')
605
604
606 def after(self):
605 def after(self):
607 self.ui.debug('run hg source post-conversion action\n')
606 self.ui.debug('run hg source post-conversion action\n')
608
607
609 def hasnativeorder(self):
608 def hasnativeorder(self):
610 return True
609 return True
611
610
612 def hasnativeclose(self):
611 def hasnativeclose(self):
613 return True
612 return True
614
613
615 def lookuprev(self, rev):
614 def lookuprev(self, rev):
616 try:
615 try:
617 return hex(self.repo.lookup(rev))
616 return hex(self.repo.lookup(rev))
618 except (error.RepoError, error.LookupError):
617 except (error.RepoError, error.LookupError):
619 return None
618 return None
620
619
621 def getbookmarks(self):
620 def getbookmarks(self):
622 return bookmarks.listbookmarks(self.repo)
621 return bookmarks.listbookmarks(self.repo)
623
622
624 def checkrevformat(self, revstr, mapname='splicemap'):
623 def checkrevformat(self, revstr, mapname='splicemap'):
625 """ Mercurial, revision string is a 40 byte hex """
624 """ Mercurial, revision string is a 40 byte hex """
626 self.checkhexformat(revstr, mapname)
625 self.checkhexformat(revstr, mapname)
@@ -1,1433 +1,1433 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset, error
15 archival, pathutil, revset, error
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfutil
18 import lfutil
19 import lfcommands
19 import lfcommands
20 import basestore
20 import basestore
21
21
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23
23
24 def composelargefilematcher(match, manifest):
24 def composelargefilematcher(match, manifest):
25 '''create a matcher that matches only the largefiles in the original
25 '''create a matcher that matches only the largefiles in the original
26 matcher'''
26 matcher'''
27 m = copy.copy(match)
27 m = copy.copy(match)
28 lfile = lambda f: lfutil.standin(f) in manifest
28 lfile = lambda f: lfutil.standin(f) in manifest
29 m._files = filter(lfile, m._files)
29 m._files = filter(lfile, m._files)
30 m._fileroots = set(m._files)
30 m._fileroots = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def composenormalfilematcher(match, manifest, exclude=None):
36 def composenormalfilematcher(match, manifest, exclude=None):
37 excluded = set()
37 excluded = set()
38 if exclude is not None:
38 if exclude is not None:
39 excluded.update(exclude)
39 excluded.update(exclude)
40
40
41 m = copy.copy(match)
41 m = copy.copy(match)
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 manifest or f in excluded)
43 manifest or f in excluded)
44 m._files = filter(notlfile, m._files)
44 m._files = filter(notlfile, m._files)
45 m._fileroots = set(m._files)
45 m._fileroots = set(m._files)
46 m._always = False
46 m._always = False
47 origmatchfn = m.matchfn
47 origmatchfn = m.matchfn
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 return m
49 return m
50
50
51 def installnormalfilesmatchfn(manifest):
51 def installnormalfilesmatchfn(manifest):
52 '''installmatchfn with a matchfn that ignores all largefiles'''
52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
54 default='relpath', badfn=None):
54 default='relpath', badfn=None):
55 if opts is None:
55 if opts is None:
56 opts = {}
56 opts = {}
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
58 return composenormalfilematcher(match, manifest)
58 return composenormalfilematcher(match, manifest)
59 oldmatch = installmatchfn(overridematch)
59 oldmatch = installmatchfn(overridematch)
60
60
61 def installmatchfn(f):
61 def installmatchfn(f):
62 '''monkey patch the scmutil module with a custom match function.
62 '''monkey patch the scmutil module with a custom match function.
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
64 oldmatch = scmutil.match
64 oldmatch = scmutil.match
65 setattr(f, 'oldmatch', oldmatch)
65 setattr(f, 'oldmatch', oldmatch)
66 scmutil.match = f
66 scmutil.match = f
67 return oldmatch
67 return oldmatch
68
68
69 def restorematchfn():
69 def restorematchfn():
70 '''restores scmutil.match to what it was before installmatchfn
70 '''restores scmutil.match to what it was before installmatchfn
71 was called. no-op if scmutil.match is its original function.
71 was called. no-op if scmutil.match is its original function.
72
72
73 Note that n calls to installmatchfn will require n calls to
73 Note that n calls to installmatchfn will require n calls to
74 restore the original matchfn.'''
74 restore the original matchfn.'''
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
76
76
77 def installmatchandpatsfn(f):
77 def installmatchandpatsfn(f):
78 oldmatchandpats = scmutil.matchandpats
78 oldmatchandpats = scmutil.matchandpats
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
80 scmutil.matchandpats = f
80 scmutil.matchandpats = f
81 return oldmatchandpats
81 return oldmatchandpats
82
82
83 def restorematchandpatsfn():
83 def restorematchandpatsfn():
84 '''restores scmutil.matchandpats to what it was before
84 '''restores scmutil.matchandpats to what it was before
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
86 is its original function.
86 is its original function.
87
87
88 Note that n calls to installmatchandpatsfn will require n calls
88 Note that n calls to installmatchandpatsfn will require n calls
89 to restore the original matchfn.'''
89 to restore the original matchfn.'''
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
91 scmutil.matchandpats)
91 scmutil.matchandpats)
92
92
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
94 large = opts.get('large')
94 large = opts.get('large')
95 lfsize = lfutil.getminsize(
95 lfsize = lfutil.getminsize(
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
97
97
98 lfmatcher = None
98 lfmatcher = None
99 if lfutil.islfilesrepo(repo):
99 if lfutil.islfilesrepo(repo):
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
101 if lfpats:
101 if lfpats:
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
103
103
104 lfnames = []
104 lfnames = []
105 m = matcher
105 m = matcher
106
106
107 wctx = repo[None]
107 wctx = repo[None]
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
109 exact = m.exact(f)
109 exact = m.exact(f)
110 lfile = lfutil.standin(f) in wctx
110 lfile = lfutil.standin(f) in wctx
111 nfile = f in wctx
111 nfile = f in wctx
112 exists = lfile or nfile
112 exists = lfile or nfile
113
113
114 # addremove in core gets fancy with the name, add doesn't
114 # addremove in core gets fancy with the name, add doesn't
115 if isaddremove:
115 if isaddremove:
116 name = m.uipath(f)
116 name = m.uipath(f)
117 else:
117 else:
118 name = m.rel(f)
118 name = m.rel(f)
119
119
120 # Don't warn the user when they attempt to add a normal tracked file.
120 # Don't warn the user when they attempt to add a normal tracked file.
121 # The normal add code will do that for us.
121 # The normal add code will do that for us.
122 if exact and exists:
122 if exact and exists:
123 if lfile:
123 if lfile:
124 ui.warn(_('%s already a largefile\n') % name)
124 ui.warn(_('%s already a largefile\n') % name)
125 continue
125 continue
126
126
127 if (exact or not exists) and not lfutil.isstandin(f):
127 if (exact or not exists) and not lfutil.isstandin(f):
128 # In case the file was removed previously, but not committed
128 # In case the file was removed previously, but not committed
129 # (issue3507)
129 # (issue3507)
130 if not repo.wvfs.exists(f):
130 if not repo.wvfs.exists(f):
131 continue
131 continue
132
132
133 abovemin = (lfsize and
133 abovemin = (lfsize and
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 lfnames.append(f)
136 lfnames.append(f)
137 if ui.verbose or not exact:
137 if ui.verbose or not exact:
138 ui.status(_('adding %s as a largefile\n') % name)
138 ui.status(_('adding %s as a largefile\n') % name)
139
139
140 bad = []
140 bad = []
141
141
142 # Need to lock, otherwise there could be a race condition between
142 # Need to lock, otherwise there could be a race condition between
143 # when standins are created and added to the repo.
143 # when standins are created and added to the repo.
144 wlock = repo.wlock()
144 wlock = repo.wlock()
145 try:
145 try:
146 if not opts.get('dry_run'):
146 if not opts.get('dry_run'):
147 standins = []
147 standins = []
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 for f in lfnames:
149 for f in lfnames:
150 standinname = lfutil.standin(f)
150 standinname = lfutil.standin(f)
151 lfutil.writestandin(repo, standinname, hash='',
151 lfutil.writestandin(repo, standinname, hash='',
152 executable=lfutil.getexecutable(repo.wjoin(f)))
152 executable=lfutil.getexecutable(repo.wjoin(f)))
153 standins.append(standinname)
153 standins.append(standinname)
154 if lfdirstate[f] == 'r':
154 if lfdirstate[f] == 'r':
155 lfdirstate.normallookup(f)
155 lfdirstate.normallookup(f)
156 else:
156 else:
157 lfdirstate.add(f)
157 lfdirstate.add(f)
158 lfdirstate.write()
158 lfdirstate.write()
159 bad += [lfutil.splitstandin(f)
159 bad += [lfutil.splitstandin(f)
160 for f in repo[None].add(standins)
160 for f in repo[None].add(standins)
161 if f in m.files()]
161 if f in m.files()]
162
162
163 added = [f for f in lfnames if f not in bad]
163 added = [f for f in lfnames if f not in bad]
164 finally:
164 finally:
165 wlock.release()
165 wlock.release()
166 return added, bad
166 return added, bad
167
167
168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
169 after = opts.get('after')
169 after = opts.get('after')
170 m = composelargefilematcher(matcher, repo[None].manifest())
170 m = composelargefilematcher(matcher, repo[None].manifest())
171 try:
171 try:
172 repo.lfstatus = True
172 repo.lfstatus = True
173 s = repo.status(match=m, clean=not isaddremove)
173 s = repo.status(match=m, clean=not isaddremove)
174 finally:
174 finally:
175 repo.lfstatus = False
175 repo.lfstatus = False
176 manifest = repo[None].manifest()
176 manifest = repo[None].manifest()
177 modified, added, deleted, clean = [[f for f in list
177 modified, added, deleted, clean = [[f for f in list
178 if lfutil.standin(f) in manifest]
178 if lfutil.standin(f) in manifest]
179 for list in (s.modified, s.added,
179 for list in (s.modified, s.added,
180 s.deleted, s.clean)]
180 s.deleted, s.clean)]
181
181
182 def warn(files, msg):
182 def warn(files, msg):
183 for f in files:
183 for f in files:
184 ui.warn(msg % m.rel(f))
184 ui.warn(msg % m.rel(f))
185 return int(len(files) > 0)
185 return int(len(files) > 0)
186
186
187 result = 0
187 result = 0
188
188
189 if after:
189 if after:
190 remove = deleted
190 remove = deleted
191 result = warn(modified + added + clean,
191 result = warn(modified + added + clean,
192 _('not removing %s: file still exists\n'))
192 _('not removing %s: file still exists\n'))
193 else:
193 else:
194 remove = deleted + clean
194 remove = deleted + clean
195 result = warn(modified, _('not removing %s: file is modified (use -f'
195 result = warn(modified, _('not removing %s: file is modified (use -f'
196 ' to force removal)\n'))
196 ' to force removal)\n'))
197 result = warn(added, _('not removing %s: file has been marked for add'
197 result = warn(added, _('not removing %s: file has been marked for add'
198 ' (use forget to undo)\n')) or result
198 ' (use forget to undo)\n')) or result
199
199
200 # Need to lock because standin files are deleted then removed from the
200 # Need to lock because standin files are deleted then removed from the
201 # repository and we could race in-between.
201 # repository and we could race in-between.
202 wlock = repo.wlock()
202 wlock = repo.wlock()
203 try:
203 try:
204 lfdirstate = lfutil.openlfdirstate(ui, repo)
204 lfdirstate = lfutil.openlfdirstate(ui, repo)
205 for f in sorted(remove):
205 for f in sorted(remove):
206 if ui.verbose or not m.exact(f):
206 if ui.verbose or not m.exact(f):
207 # addremove in core gets fancy with the name, remove doesn't
207 # addremove in core gets fancy with the name, remove doesn't
208 if isaddremove:
208 if isaddremove:
209 name = m.uipath(f)
209 name = m.uipath(f)
210 else:
210 else:
211 name = m.rel(f)
211 name = m.rel(f)
212 ui.status(_('removing %s\n') % name)
212 ui.status(_('removing %s\n') % name)
213
213
214 if not opts.get('dry_run'):
214 if not opts.get('dry_run'):
215 if not after:
215 if not after:
216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
217
217
218 if opts.get('dry_run'):
218 if opts.get('dry_run'):
219 return result
219 return result
220
220
221 remove = [lfutil.standin(f) for f in remove]
221 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
222 # If this is being called by addremove, let the original addremove
223 # function handle this.
223 # function handle this.
224 if not isaddremove:
224 if not isaddremove:
225 for f in remove:
225 for f in remove:
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227 repo[None].forget(remove)
227 repo[None].forget(remove)
228
228
229 for f in remove:
229 for f in remove:
230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
231 False)
231 False)
232
232
233 lfdirstate.write()
233 lfdirstate.write()
234 finally:
234 finally:
235 wlock.release()
235 wlock.release()
236
236
237 return result
237 return result
238
238
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 # appear at their right place in the manifests.
240 # appear at their right place in the manifests.
241 def decodepath(orig, path):
241 def decodepath(orig, path):
242 return lfutil.splitstandin(path) or path
242 return lfutil.splitstandin(path) or path
243
243
244 # -- Wrappers: modify existing commands --------------------------------
244 # -- Wrappers: modify existing commands --------------------------------
245
245
246 def overrideadd(orig, ui, repo, *pats, **opts):
246 def overrideadd(orig, ui, repo, *pats, **opts):
247 if opts.get('normal') and opts.get('large'):
247 if opts.get('normal') and opts.get('large'):
248 raise error.Abort(_('--normal cannot be used with --large'))
248 raise error.Abort(_('--normal cannot be used with --large'))
249 return orig(ui, repo, *pats, **opts)
249 return orig(ui, repo, *pats, **opts)
250
250
251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
252 # The --normal flag short circuits this override
252 # The --normal flag short circuits this override
253 if opts.get('normal'):
253 if opts.get('normal'):
254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
255
255
256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
258 ladded)
258 ladded)
259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
260
260
261 bad.extend(f for f in lbad)
261 bad.extend(f for f in lbad)
262 return bad
262 return bad
263
263
264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
267 return removelargefiles(ui, repo, False, matcher, after=after,
267 return removelargefiles(ui, repo, False, matcher, after=after,
268 force=force) or result
268 force=force) or result
269
269
270 def overridestatusfn(orig, repo, rev2, **opts):
270 def overridestatusfn(orig, repo, rev2, **opts):
271 try:
271 try:
272 repo._repo.lfstatus = True
272 repo._repo.lfstatus = True
273 return orig(repo, rev2, **opts)
273 return orig(repo, rev2, **opts)
274 finally:
274 finally:
275 repo._repo.lfstatus = False
275 repo._repo.lfstatus = False
276
276
277 def overridestatus(orig, ui, repo, *pats, **opts):
277 def overridestatus(orig, ui, repo, *pats, **opts):
278 try:
278 try:
279 repo.lfstatus = True
279 repo.lfstatus = True
280 return orig(ui, repo, *pats, **opts)
280 return orig(ui, repo, *pats, **opts)
281 finally:
281 finally:
282 repo.lfstatus = False
282 repo.lfstatus = False
283
283
284 def overridedirty(orig, repo, ignoreupdate=False):
284 def overridedirty(orig, repo, ignoreupdate=False):
285 try:
285 try:
286 repo._repo.lfstatus = True
286 repo._repo.lfstatus = True
287 return orig(repo, ignoreupdate)
287 return orig(repo, ignoreupdate)
288 finally:
288 finally:
289 repo._repo.lfstatus = False
289 repo._repo.lfstatus = False
290
290
291 def overridelog(orig, ui, repo, *pats, **opts):
291 def overridelog(orig, ui, repo, *pats, **opts):
292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
293 default='relpath', badfn=None):
293 default='relpath', badfn=None):
294 """Matcher that merges root directory with .hglf, suitable for log.
294 """Matcher that merges root directory with .hglf, suitable for log.
295 It is still possible to match .hglf directly.
295 It is still possible to match .hglf directly.
296 For any listed files run log on the standin too.
296 For any listed files run log on the standin too.
297 matchfn tries both the given filename and with .hglf stripped.
297 matchfn tries both the given filename and with .hglf stripped.
298 """
298 """
299 if opts is None:
299 if opts is None:
300 opts = {}
300 opts = {}
301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
302 badfn=badfn)
302 badfn=badfn)
303 m, p = copy.copy(matchandpats)
303 m, p = copy.copy(matchandpats)
304
304
305 if m.always():
305 if m.always():
306 # We want to match everything anyway, so there's no benefit trying
306 # We want to match everything anyway, so there's no benefit trying
307 # to add standins.
307 # to add standins.
308 return matchandpats
308 return matchandpats
309
309
310 pats = set(p)
310 pats = set(p)
311
311
312 def fixpats(pat, tostandin=lfutil.standin):
312 def fixpats(pat, tostandin=lfutil.standin):
313 if pat.startswith('set:'):
313 if pat.startswith('set:'):
314 return pat
314 return pat
315
315
316 kindpat = match_._patsplit(pat, None)
316 kindpat = match_._patsplit(pat, None)
317
317
318 if kindpat[0] is not None:
318 if kindpat[0] is not None:
319 return kindpat[0] + ':' + tostandin(kindpat[1])
319 return kindpat[0] + ':' + tostandin(kindpat[1])
320 return tostandin(kindpat[1])
320 return tostandin(kindpat[1])
321
321
322 if m._cwd:
322 if m._cwd:
323 hglf = lfutil.shortname
323 hglf = lfutil.shortname
324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
325
325
326 def tostandin(f):
326 def tostandin(f):
327 # The file may already be a standin, so truncate the back
327 # The file may already be a standin, so truncate the back
328 # prefix and test before mangling it. This avoids turning
328 # prefix and test before mangling it. This avoids turning
329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
331 return f
331 return f
332
332
333 # An absolute path is from outside the repo, so truncate the
333 # An absolute path is from outside the repo, so truncate the
334 # path to the root before building the standin. Otherwise cwd
334 # path to the root before building the standin. Otherwise cwd
335 # is somewhere in the repo, relative to root, and needs to be
335 # is somewhere in the repo, relative to root, and needs to be
336 # prepended before building the standin.
336 # prepended before building the standin.
337 if os.path.isabs(m._cwd):
337 if os.path.isabs(m._cwd):
338 f = f[len(back):]
338 f = f[len(back):]
339 else:
339 else:
340 f = m._cwd + '/' + f
340 f = m._cwd + '/' + f
341 return back + lfutil.standin(f)
341 return back + lfutil.standin(f)
342
342
343 pats.update(fixpats(f, tostandin) for f in p)
343 pats.update(fixpats(f, tostandin) for f in p)
344 else:
344 else:
345 def tostandin(f):
345 def tostandin(f):
346 if lfutil.splitstandin(f):
346 if lfutil.splitstandin(f):
347 return f
347 return f
348 return lfutil.standin(f)
348 return lfutil.standin(f)
349 pats.update(fixpats(f, tostandin) for f in p)
349 pats.update(fixpats(f, tostandin) for f in p)
350
350
351 for i in range(0, len(m._files)):
351 for i in range(0, len(m._files)):
352 # Don't add '.hglf' to m.files, since that is already covered by '.'
352 # Don't add '.hglf' to m.files, since that is already covered by '.'
353 if m._files[i] == '.':
353 if m._files[i] == '.':
354 continue
354 continue
355 standin = lfutil.standin(m._files[i])
355 standin = lfutil.standin(m._files[i])
356 # If the "standin" is a directory, append instead of replace to
356 # If the "standin" is a directory, append instead of replace to
357 # support naming a directory on the command line with only
357 # support naming a directory on the command line with only
358 # largefiles. The original directory is kept to support normal
358 # largefiles. The original directory is kept to support normal
359 # files.
359 # files.
360 if standin in repo[ctx.node()]:
360 if standin in repo[ctx.node()]:
361 m._files[i] = standin
361 m._files[i] = standin
362 elif m._files[i] not in repo[ctx.node()] \
362 elif m._files[i] not in repo[ctx.node()] \
363 and repo.wvfs.isdir(standin):
363 and repo.wvfs.isdir(standin):
364 m._files.append(standin)
364 m._files.append(standin)
365
365
366 m._fileroots = set(m._files)
366 m._fileroots = set(m._files)
367 m._always = False
367 m._always = False
368 origmatchfn = m.matchfn
368 origmatchfn = m.matchfn
369 def lfmatchfn(f):
369 def lfmatchfn(f):
370 lf = lfutil.splitstandin(f)
370 lf = lfutil.splitstandin(f)
371 if lf is not None and origmatchfn(lf):
371 if lf is not None and origmatchfn(lf):
372 return True
372 return True
373 r = origmatchfn(f)
373 r = origmatchfn(f)
374 return r
374 return r
375 m.matchfn = lfmatchfn
375 m.matchfn = lfmatchfn
376
376
377 ui.debug('updated patterns: %s\n' % sorted(pats))
377 ui.debug('updated patterns: %s\n' % sorted(pats))
378 return m, pats
378 return m, pats
379
379
380 # For hg log --patch, the match object is used in two different senses:
380 # For hg log --patch, the match object is used in two different senses:
381 # (1) to determine what revisions should be printed out, and
381 # (1) to determine what revisions should be printed out, and
382 # (2) to determine what files to print out diffs for.
382 # (2) to determine what files to print out diffs for.
383 # The magic matchandpats override should be used for case (1) but not for
383 # The magic matchandpats override should be used for case (1) but not for
384 # case (2).
384 # case (2).
385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
386 wctx = repo[None]
386 wctx = repo[None]
387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
388 return lambda rev: match
388 return lambda rev: match
389
389
390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
393
393
394 try:
394 try:
395 return orig(ui, repo, *pats, **opts)
395 return orig(ui, repo, *pats, **opts)
396 finally:
396 finally:
397 restorematchandpatsfn()
397 restorematchandpatsfn()
398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
399
399
400 def overrideverify(orig, ui, repo, *pats, **opts):
400 def overrideverify(orig, ui, repo, *pats, **opts):
401 large = opts.pop('large', False)
401 large = opts.pop('large', False)
402 all = opts.pop('lfa', False)
402 all = opts.pop('lfa', False)
403 contents = opts.pop('lfc', False)
403 contents = opts.pop('lfc', False)
404
404
405 result = orig(ui, repo, *pats, **opts)
405 result = orig(ui, repo, *pats, **opts)
406 if large or all or contents:
406 if large or all or contents:
407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
408 return result
408 return result
409
409
410 def overridedebugstate(orig, ui, repo, *pats, **opts):
410 def overridedebugstate(orig, ui, repo, *pats, **opts):
411 large = opts.pop('large', False)
411 large = opts.pop('large', False)
412 if large:
412 if large:
413 class fakerepo(object):
413 class fakerepo(object):
414 dirstate = lfutil.openlfdirstate(ui, repo)
414 dirstate = lfutil.openlfdirstate(ui, repo)
415 orig(ui, fakerepo, *pats, **opts)
415 orig(ui, fakerepo, *pats, **opts)
416 else:
416 else:
417 orig(ui, repo, *pats, **opts)
417 orig(ui, repo, *pats, **opts)
418
418
419 # Before starting the manifest merge, merge.updates will call
419 # Before starting the manifest merge, merge.updates will call
420 # _checkunknownfile to check if there are any files in the merged-in
420 # _checkunknownfile to check if there are any files in the merged-in
421 # changeset that collide with unknown files in the working copy.
421 # changeset that collide with unknown files in the working copy.
422 #
422 #
423 # The largefiles are seen as unknown, so this prevents us from merging
423 # The largefiles are seen as unknown, so this prevents us from merging
424 # in a file 'foo' if we already have a largefile with the same name.
424 # in a file 'foo' if we already have a largefile with the same name.
425 #
425 #
426 # The overridden function filters the unknown files by removing any
426 # The overridden function filters the unknown files by removing any
427 # largefiles. This makes the merge proceed and we can then handle this
427 # largefiles. This makes the merge proceed and we can then handle this
428 # case further in the overridden calculateupdates function below.
428 # case further in the overridden calculateupdates function below.
429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
431 return False
431 return False
432 return origfn(repo, wctx, mctx, f, f2)
432 return origfn(repo, wctx, mctx, f, f2)
433
433
434 # The manifest merge handles conflicts on the manifest level. We want
434 # The manifest merge handles conflicts on the manifest level. We want
435 # to handle changes in largefile-ness of files at this level too.
435 # to handle changes in largefile-ness of files at this level too.
436 #
436 #
437 # The strategy is to run the original calculateupdates and then process
437 # The strategy is to run the original calculateupdates and then process
438 # the action list it outputs. There are two cases we need to deal with:
438 # the action list it outputs. There are two cases we need to deal with:
439 #
439 #
440 # 1. Normal file in p1, largefile in p2. Here the largefile is
440 # 1. Normal file in p1, largefile in p2. Here the largefile is
441 # detected via its standin file, which will enter the working copy
441 # detected via its standin file, which will enter the working copy
442 # with a "get" action. It is not "merge" since the standin is all
442 # with a "get" action. It is not "merge" since the standin is all
443 # Mercurial is concerned with at this level -- the link to the
443 # Mercurial is concerned with at this level -- the link to the
444 # existing normal file is not relevant here.
444 # existing normal file is not relevant here.
445 #
445 #
446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
447 # since the largefile will be present in the working copy and
447 # since the largefile will be present in the working copy and
448 # different from the normal file in p2. Mercurial therefore
448 # different from the normal file in p2. Mercurial therefore
449 # triggers a merge action.
449 # triggers a merge action.
450 #
450 #
451 # In both cases, we prompt the user and emit new actions to either
451 # In both cases, we prompt the user and emit new actions to either
452 # remove the standin (if the normal file was kept) or to remove the
452 # remove the standin (if the normal file was kept) or to remove the
453 # normal file and get the standin (if the largefile was kept). The
453 # normal file and get the standin (if the largefile was kept). The
454 # default prompt answer is to use the largefile version since it was
454 # default prompt answer is to use the largefile version since it was
455 # presumably changed on purpose.
455 # presumably changed on purpose.
456 #
456 #
457 # Finally, the merge.applyupdates function will then take care of
457 # Finally, the merge.applyupdates function will then take care of
458 # writing the files into the working copy and lfcommands.updatelfiles
458 # writing the files into the working copy and lfcommands.updatelfiles
459 # will update the largefiles.
459 # will update the largefiles.
460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
461 partial, acceptremote, followcopies):
461 acceptremote, followcopies, matcher=None):
462 overwrite = force and not branchmerge
462 overwrite = force and not branchmerge
463 actions, diverge, renamedelete = origfn(
463 actions, diverge, renamedelete = origfn(
464 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
464 repo, p1, p2, pas, branchmerge, force, acceptremote,
465 followcopies)
465 followcopies, matcher=matcher)
466
466
467 if overwrite:
467 if overwrite:
468 return actions, diverge, renamedelete
468 return actions, diverge, renamedelete
469
469
470 # Convert to dictionary with filename as key and action as value.
470 # Convert to dictionary with filename as key and action as value.
471 lfiles = set()
471 lfiles = set()
472 for f in actions:
472 for f in actions:
473 splitstandin = f and lfutil.splitstandin(f)
473 splitstandin = f and lfutil.splitstandin(f)
474 if splitstandin in p1:
474 if splitstandin in p1:
475 lfiles.add(splitstandin)
475 lfiles.add(splitstandin)
476 elif lfutil.standin(f) in p1:
476 elif lfutil.standin(f) in p1:
477 lfiles.add(f)
477 lfiles.add(f)
478
478
479 for lfile in lfiles:
479 for lfile in lfiles:
480 standin = lfutil.standin(lfile)
480 standin = lfutil.standin(lfile)
481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
483 if sm in ('g', 'dc') and lm != 'r':
483 if sm in ('g', 'dc') and lm != 'r':
484 if sm == 'dc':
484 if sm == 'dc':
485 f1, f2, fa, move, anc = sargs
485 f1, f2, fa, move, anc = sargs
486 sargs = (p2[f2].flags(),)
486 sargs = (p2[f2].flags(),)
487 # Case 1: normal file in the working copy, largefile in
487 # Case 1: normal file in the working copy, largefile in
488 # the second parent
488 # the second parent
489 usermsg = _('remote turned local normal file %s into a largefile\n'
489 usermsg = _('remote turned local normal file %s into a largefile\n'
490 'use (l)argefile or keep (n)ormal file?'
490 'use (l)argefile or keep (n)ormal file?'
491 '$$ &Largefile $$ &Normal file') % lfile
491 '$$ &Largefile $$ &Normal file') % lfile
492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
493 actions[lfile] = ('r', None, 'replaced by standin')
493 actions[lfile] = ('r', None, 'replaced by standin')
494 actions[standin] = ('g', sargs, 'replaces standin')
494 actions[standin] = ('g', sargs, 'replaces standin')
495 else: # keep local normal file
495 else: # keep local normal file
496 actions[lfile] = ('k', None, 'replaces standin')
496 actions[lfile] = ('k', None, 'replaces standin')
497 if branchmerge:
497 if branchmerge:
498 actions[standin] = ('k', None, 'replaced by non-standin')
498 actions[standin] = ('k', None, 'replaced by non-standin')
499 else:
499 else:
500 actions[standin] = ('r', None, 'replaced by non-standin')
500 actions[standin] = ('r', None, 'replaced by non-standin')
501 elif lm in ('g', 'dc') and sm != 'r':
501 elif lm in ('g', 'dc') and sm != 'r':
502 if lm == 'dc':
502 if lm == 'dc':
503 f1, f2, fa, move, anc = largs
503 f1, f2, fa, move, anc = largs
504 largs = (p2[f2].flags(),)
504 largs = (p2[f2].flags(),)
505 # Case 2: largefile in the working copy, normal file in
505 # Case 2: largefile in the working copy, normal file in
506 # the second parent
506 # the second parent
507 usermsg = _('remote turned local largefile %s into a normal file\n'
507 usermsg = _('remote turned local largefile %s into a normal file\n'
508 'keep (l)argefile or use (n)ormal file?'
508 'keep (l)argefile or use (n)ormal file?'
509 '$$ &Largefile $$ &Normal file') % lfile
509 '$$ &Largefile $$ &Normal file') % lfile
510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
511 if branchmerge:
511 if branchmerge:
512 # largefile can be restored from standin safely
512 # largefile can be restored from standin safely
513 actions[lfile] = ('k', None, 'replaced by standin')
513 actions[lfile] = ('k', None, 'replaced by standin')
514 actions[standin] = ('k', None, 'replaces standin')
514 actions[standin] = ('k', None, 'replaces standin')
515 else:
515 else:
516 # "lfile" should be marked as "removed" without
516 # "lfile" should be marked as "removed" without
517 # removal of itself
517 # removal of itself
518 actions[lfile] = ('lfmr', None,
518 actions[lfile] = ('lfmr', None,
519 'forget non-standin largefile')
519 'forget non-standin largefile')
520
520
521 # linear-merge should treat this largefile as 're-added'
521 # linear-merge should treat this largefile as 're-added'
522 actions[standin] = ('a', None, 'keep standin')
522 actions[standin] = ('a', None, 'keep standin')
523 else: # pick remote normal file
523 else: # pick remote normal file
524 actions[lfile] = ('g', largs, 'replaces standin')
524 actions[lfile] = ('g', largs, 'replaces standin')
525 actions[standin] = ('r', None, 'replaced by non-standin')
525 actions[standin] = ('r', None, 'replaced by non-standin')
526
526
527 return actions, diverge, renamedelete
527 return actions, diverge, renamedelete
528
528
529 def mergerecordupdates(orig, repo, actions, branchmerge):
529 def mergerecordupdates(orig, repo, actions, branchmerge):
530 if 'lfmr' in actions:
530 if 'lfmr' in actions:
531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
532 for lfile, args, msg in actions['lfmr']:
532 for lfile, args, msg in actions['lfmr']:
533 # this should be executed before 'orig', to execute 'remove'
533 # this should be executed before 'orig', to execute 'remove'
534 # before all other actions
534 # before all other actions
535 repo.dirstate.remove(lfile)
535 repo.dirstate.remove(lfile)
536 # make sure lfile doesn't get synclfdirstate'd as normal
536 # make sure lfile doesn't get synclfdirstate'd as normal
537 lfdirstate.add(lfile)
537 lfdirstate.add(lfile)
538 lfdirstate.write()
538 lfdirstate.write()
539
539
540 return orig(repo, actions, branchmerge)
540 return orig(repo, actions, branchmerge)
541
541
542
542
543 # Override filemerge to prompt the user about how they wish to merge
543 # Override filemerge to prompt the user about how they wish to merge
544 # largefiles. This will handle identical edits without prompting the user.
544 # largefiles. This will handle identical edits without prompting the user.
545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
546 labels=None):
546 labels=None):
547 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
547 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
549 labels=labels)
549 labels=labels)
550
550
551 ahash = fca.data().strip().lower()
551 ahash = fca.data().strip().lower()
552 dhash = fcd.data().strip().lower()
552 dhash = fcd.data().strip().lower()
553 ohash = fco.data().strip().lower()
553 ohash = fco.data().strip().lower()
554 if (ohash != ahash and
554 if (ohash != ahash and
555 ohash != dhash and
555 ohash != dhash and
556 (dhash == ahash or
556 (dhash == ahash or
557 repo.ui.promptchoice(
557 repo.ui.promptchoice(
558 _('largefile %s has a merge conflict\nancestor was %s\n'
558 _('largefile %s has a merge conflict\nancestor was %s\n'
559 'keep (l)ocal %s or\ntake (o)ther %s?'
559 'keep (l)ocal %s or\ntake (o)ther %s?'
560 '$$ &Local $$ &Other') %
560 '$$ &Local $$ &Other') %
561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
562 0) == 1)):
562 0) == 1)):
563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
564 return True, 0, False
564 return True, 0, False
565
565
566 def copiespathcopies(orig, ctx1, ctx2, match=None):
566 def copiespathcopies(orig, ctx1, ctx2, match=None):
567 copies = orig(ctx1, ctx2, match=match)
567 copies = orig(ctx1, ctx2, match=match)
568 updated = {}
568 updated = {}
569
569
570 for k, v in copies.iteritems():
570 for k, v in copies.iteritems():
571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
572
572
573 return updated
573 return updated
574
574
575 # Copy first changes the matchers to match standins instead of
575 # Copy first changes the matchers to match standins instead of
576 # largefiles. Then it overrides util.copyfile in that function it
576 # largefiles. Then it overrides util.copyfile in that function it
577 # checks if the destination largefile already exists. It also keeps a
577 # checks if the destination largefile already exists. It also keeps a
578 # list of copied files so that the largefiles can be copied and the
578 # list of copied files so that the largefiles can be copied and the
579 # dirstate updated.
579 # dirstate updated.
580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
581 # doesn't remove largefile on rename
581 # doesn't remove largefile on rename
582 if len(pats) < 2:
582 if len(pats) < 2:
583 # this isn't legal, let the original function deal with it
583 # this isn't legal, let the original function deal with it
584 return orig(ui, repo, pats, opts, rename)
584 return orig(ui, repo, pats, opts, rename)
585
585
586 # This could copy both lfiles and normal files in one command,
586 # This could copy both lfiles and normal files in one command,
587 # but we don't want to do that. First replace their matcher to
587 # but we don't want to do that. First replace their matcher to
588 # only match normal files and run it, then replace it to just
588 # only match normal files and run it, then replace it to just
589 # match largefiles and run it again.
589 # match largefiles and run it again.
590 nonormalfiles = False
590 nonormalfiles = False
591 nolfiles = False
591 nolfiles = False
592 installnormalfilesmatchfn(repo[None].manifest())
592 installnormalfilesmatchfn(repo[None].manifest())
593 try:
593 try:
594 result = orig(ui, repo, pats, opts, rename)
594 result = orig(ui, repo, pats, opts, rename)
595 except error.Abort as e:
595 except error.Abort as e:
596 if str(e) != _('no files to copy'):
596 if str(e) != _('no files to copy'):
597 raise e
597 raise e
598 else:
598 else:
599 nonormalfiles = True
599 nonormalfiles = True
600 result = 0
600 result = 0
601 finally:
601 finally:
602 restorematchfn()
602 restorematchfn()
603
603
604 # The first rename can cause our current working directory to be removed.
604 # The first rename can cause our current working directory to be removed.
605 # In that case there is nothing left to copy/rename so just quit.
605 # In that case there is nothing left to copy/rename so just quit.
606 try:
606 try:
607 repo.getcwd()
607 repo.getcwd()
608 except OSError:
608 except OSError:
609 return result
609 return result
610
610
611 def makestandin(relpath):
611 def makestandin(relpath):
612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
613 return os.path.join(repo.wjoin(lfutil.standin(path)))
613 return os.path.join(repo.wjoin(lfutil.standin(path)))
614
614
615 fullpats = scmutil.expandpats(pats)
615 fullpats = scmutil.expandpats(pats)
616 dest = fullpats[-1]
616 dest = fullpats[-1]
617
617
618 if os.path.isdir(dest):
618 if os.path.isdir(dest):
619 if not os.path.isdir(makestandin(dest)):
619 if not os.path.isdir(makestandin(dest)):
620 os.makedirs(makestandin(dest))
620 os.makedirs(makestandin(dest))
621
621
622 try:
622 try:
623 # When we call orig below it creates the standins but we don't add
623 # When we call orig below it creates the standins but we don't add
624 # them to the dir state until later so lock during that time.
624 # them to the dir state until later so lock during that time.
625 wlock = repo.wlock()
625 wlock = repo.wlock()
626
626
627 manifest = repo[None].manifest()
627 manifest = repo[None].manifest()
628 def overridematch(ctx, pats=(), opts=None, globbed=False,
628 def overridematch(ctx, pats=(), opts=None, globbed=False,
629 default='relpath', badfn=None):
629 default='relpath', badfn=None):
630 if opts is None:
630 if opts is None:
631 opts = {}
631 opts = {}
632 newpats = []
632 newpats = []
633 # The patterns were previously mangled to add the standin
633 # The patterns were previously mangled to add the standin
634 # directory; we need to remove that now
634 # directory; we need to remove that now
635 for pat in pats:
635 for pat in pats:
636 if match_.patkind(pat) is None and lfutil.shortname in pat:
636 if match_.patkind(pat) is None and lfutil.shortname in pat:
637 newpats.append(pat.replace(lfutil.shortname, ''))
637 newpats.append(pat.replace(lfutil.shortname, ''))
638 else:
638 else:
639 newpats.append(pat)
639 newpats.append(pat)
640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
641 m = copy.copy(match)
641 m = copy.copy(match)
642 lfile = lambda f: lfutil.standin(f) in manifest
642 lfile = lambda f: lfutil.standin(f) in manifest
643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
644 m._fileroots = set(m._files)
644 m._fileroots = set(m._files)
645 origmatchfn = m.matchfn
645 origmatchfn = m.matchfn
646 m.matchfn = lambda f: (lfutil.isstandin(f) and
646 m.matchfn = lambda f: (lfutil.isstandin(f) and
647 (f in manifest) and
647 (f in manifest) and
648 origmatchfn(lfutil.splitstandin(f)) or
648 origmatchfn(lfutil.splitstandin(f)) or
649 None)
649 None)
650 return m
650 return m
651 oldmatch = installmatchfn(overridematch)
651 oldmatch = installmatchfn(overridematch)
652 listpats = []
652 listpats = []
653 for pat in pats:
653 for pat in pats:
654 if match_.patkind(pat) is not None:
654 if match_.patkind(pat) is not None:
655 listpats.append(pat)
655 listpats.append(pat)
656 else:
656 else:
657 listpats.append(makestandin(pat))
657 listpats.append(makestandin(pat))
658
658
659 try:
659 try:
660 origcopyfile = util.copyfile
660 origcopyfile = util.copyfile
661 copiedfiles = []
661 copiedfiles = []
662 def overridecopyfile(src, dest):
662 def overridecopyfile(src, dest):
663 if (lfutil.shortname in src and
663 if (lfutil.shortname in src and
664 dest.startswith(repo.wjoin(lfutil.shortname))):
664 dest.startswith(repo.wjoin(lfutil.shortname))):
665 destlfile = dest.replace(lfutil.shortname, '')
665 destlfile = dest.replace(lfutil.shortname, '')
666 if not opts['force'] and os.path.exists(destlfile):
666 if not opts['force'] and os.path.exists(destlfile):
667 raise IOError('',
667 raise IOError('',
668 _('destination largefile already exists'))
668 _('destination largefile already exists'))
669 copiedfiles.append((src, dest))
669 copiedfiles.append((src, dest))
670 origcopyfile(src, dest)
670 origcopyfile(src, dest)
671
671
672 util.copyfile = overridecopyfile
672 util.copyfile = overridecopyfile
673 result += orig(ui, repo, listpats, opts, rename)
673 result += orig(ui, repo, listpats, opts, rename)
674 finally:
674 finally:
675 util.copyfile = origcopyfile
675 util.copyfile = origcopyfile
676
676
677 lfdirstate = lfutil.openlfdirstate(ui, repo)
677 lfdirstate = lfutil.openlfdirstate(ui, repo)
678 for (src, dest) in copiedfiles:
678 for (src, dest) in copiedfiles:
679 if (lfutil.shortname in src and
679 if (lfutil.shortname in src and
680 dest.startswith(repo.wjoin(lfutil.shortname))):
680 dest.startswith(repo.wjoin(lfutil.shortname))):
681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
684 if not os.path.isdir(destlfiledir):
684 if not os.path.isdir(destlfiledir):
685 os.makedirs(destlfiledir)
685 os.makedirs(destlfiledir)
686 if rename:
686 if rename:
687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
688
688
689 # The file is gone, but this deletes any empty parent
689 # The file is gone, but this deletes any empty parent
690 # directories as a side-effect.
690 # directories as a side-effect.
691 util.unlinkpath(repo.wjoin(srclfile), True)
691 util.unlinkpath(repo.wjoin(srclfile), True)
692 lfdirstate.remove(srclfile)
692 lfdirstate.remove(srclfile)
693 else:
693 else:
694 util.copyfile(repo.wjoin(srclfile),
694 util.copyfile(repo.wjoin(srclfile),
695 repo.wjoin(destlfile))
695 repo.wjoin(destlfile))
696
696
697 lfdirstate.add(destlfile)
697 lfdirstate.add(destlfile)
698 lfdirstate.write()
698 lfdirstate.write()
699 except error.Abort as e:
699 except error.Abort as e:
700 if str(e) != _('no files to copy'):
700 if str(e) != _('no files to copy'):
701 raise e
701 raise e
702 else:
702 else:
703 nolfiles = True
703 nolfiles = True
704 finally:
704 finally:
705 restorematchfn()
705 restorematchfn()
706 wlock.release()
706 wlock.release()
707
707
708 if nolfiles and nonormalfiles:
708 if nolfiles and nonormalfiles:
709 raise error.Abort(_('no files to copy'))
709 raise error.Abort(_('no files to copy'))
710
710
711 return result
711 return result
712
712
713 # When the user calls revert, we have to be careful to not revert any
713 # When the user calls revert, we have to be careful to not revert any
714 # changes to other largefiles accidentally. This means we have to keep
714 # changes to other largefiles accidentally. This means we have to keep
715 # track of the largefiles that are being reverted so we only pull down
715 # track of the largefiles that are being reverted so we only pull down
716 # the necessary largefiles.
716 # the necessary largefiles.
717 #
717 #
718 # Standins are only updated (to match the hash of largefiles) before
718 # Standins are only updated (to match the hash of largefiles) before
719 # commits. Update the standins then run the original revert, changing
719 # commits. Update the standins then run the original revert, changing
720 # the matcher to hit standins instead of largefiles. Based on the
720 # the matcher to hit standins instead of largefiles. Based on the
721 # resulting standins update the largefiles.
721 # resulting standins update the largefiles.
722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
723 # Because we put the standins in a bad state (by updating them)
723 # Because we put the standins in a bad state (by updating them)
724 # and then return them to a correct state we need to lock to
724 # and then return them to a correct state we need to lock to
725 # prevent others from changing them in their incorrect state.
725 # prevent others from changing them in their incorrect state.
726 wlock = repo.wlock()
726 wlock = repo.wlock()
727 try:
727 try:
728 lfdirstate = lfutil.openlfdirstate(ui, repo)
728 lfdirstate = lfutil.openlfdirstate(ui, repo)
729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
730 lfdirstate.write()
730 lfdirstate.write()
731 for lfile in s.modified:
731 for lfile in s.modified:
732 lfutil.updatestandin(repo, lfutil.standin(lfile))
732 lfutil.updatestandin(repo, lfutil.standin(lfile))
733 for lfile in s.deleted:
733 for lfile in s.deleted:
734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
736
736
737 oldstandins = lfutil.getstandinsstate(repo)
737 oldstandins = lfutil.getstandinsstate(repo)
738
738
739 def overridematch(mctx, pats=(), opts=None, globbed=False,
739 def overridematch(mctx, pats=(), opts=None, globbed=False,
740 default='relpath', badfn=None):
740 default='relpath', badfn=None):
741 if opts is None:
741 if opts is None:
742 opts = {}
742 opts = {}
743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
744 m = copy.copy(match)
744 m = copy.copy(match)
745
745
746 # revert supports recursing into subrepos, and though largefiles
746 # revert supports recursing into subrepos, and though largefiles
747 # currently doesn't work correctly in that case, this match is
747 # currently doesn't work correctly in that case, this match is
748 # called, so the lfdirstate above may not be the correct one for
748 # called, so the lfdirstate above may not be the correct one for
749 # this invocation of match.
749 # this invocation of match.
750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
751 False)
751 False)
752
752
753 def tostandin(f):
753 def tostandin(f):
754 standin = lfutil.standin(f)
754 standin = lfutil.standin(f)
755 if standin in ctx or standin in mctx:
755 if standin in ctx or standin in mctx:
756 return standin
756 return standin
757 elif standin in repo[None] or lfdirstate[f] == 'r':
757 elif standin in repo[None] or lfdirstate[f] == 'r':
758 return None
758 return None
759 return f
759 return f
760 m._files = [tostandin(f) for f in m._files]
760 m._files = [tostandin(f) for f in m._files]
761 m._files = [f for f in m._files if f is not None]
761 m._files = [f for f in m._files if f is not None]
762 m._fileroots = set(m._files)
762 m._fileroots = set(m._files)
763 origmatchfn = m.matchfn
763 origmatchfn = m.matchfn
764 def matchfn(f):
764 def matchfn(f):
765 if lfutil.isstandin(f):
765 if lfutil.isstandin(f):
766 return (origmatchfn(lfutil.splitstandin(f)) and
766 return (origmatchfn(lfutil.splitstandin(f)) and
767 (f in ctx or f in mctx))
767 (f in ctx or f in mctx))
768 return origmatchfn(f)
768 return origmatchfn(f)
769 m.matchfn = matchfn
769 m.matchfn = matchfn
770 return m
770 return m
771 oldmatch = installmatchfn(overridematch)
771 oldmatch = installmatchfn(overridematch)
772 try:
772 try:
773 orig(ui, repo, ctx, parents, *pats, **opts)
773 orig(ui, repo, ctx, parents, *pats, **opts)
774 finally:
774 finally:
775 restorematchfn()
775 restorematchfn()
776
776
777 newstandins = lfutil.getstandinsstate(repo)
777 newstandins = lfutil.getstandinsstate(repo)
778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
779 # lfdirstate should be 'normallookup'-ed for updated files,
779 # lfdirstate should be 'normallookup'-ed for updated files,
780 # because reverting doesn't touch dirstate for 'normal' files
780 # because reverting doesn't touch dirstate for 'normal' files
781 # when target revision is explicitly specified: in such case,
781 # when target revision is explicitly specified: in such case,
782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
783 # of target (standin) file.
783 # of target (standin) file.
784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
785 normallookup=True)
785 normallookup=True)
786
786
787 finally:
787 finally:
788 wlock.release()
788 wlock.release()
789
789
790 # after pulling changesets, we need to take some extra care to get
790 # after pulling changesets, we need to take some extra care to get
791 # largefiles updated remotely
791 # largefiles updated remotely
792 def overridepull(orig, ui, repo, source=None, **opts):
792 def overridepull(orig, ui, repo, source=None, **opts):
793 revsprepull = len(repo)
793 revsprepull = len(repo)
794 if not source:
794 if not source:
795 source = 'default'
795 source = 'default'
796 repo.lfpullsource = source
796 repo.lfpullsource = source
797 result = orig(ui, repo, source, **opts)
797 result = orig(ui, repo, source, **opts)
798 revspostpull = len(repo)
798 revspostpull = len(repo)
799 lfrevs = opts.get('lfrev', [])
799 lfrevs = opts.get('lfrev', [])
800 if opts.get('all_largefiles'):
800 if opts.get('all_largefiles'):
801 lfrevs.append('pulled()')
801 lfrevs.append('pulled()')
802 if lfrevs and revspostpull > revsprepull:
802 if lfrevs and revspostpull > revsprepull:
803 numcached = 0
803 numcached = 0
804 repo.firstpulled = revsprepull # for pulled() revset expression
804 repo.firstpulled = revsprepull # for pulled() revset expression
805 try:
805 try:
806 for rev in scmutil.revrange(repo, lfrevs):
806 for rev in scmutil.revrange(repo, lfrevs):
807 ui.note(_('pulling largefiles for revision %s\n') % rev)
807 ui.note(_('pulling largefiles for revision %s\n') % rev)
808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
809 numcached += len(cached)
809 numcached += len(cached)
810 finally:
810 finally:
811 del repo.firstpulled
811 del repo.firstpulled
812 ui.status(_("%d largefiles cached\n") % numcached)
812 ui.status(_("%d largefiles cached\n") % numcached)
813 return result
813 return result
814
814
815 def pulledrevsetsymbol(repo, subset, x):
815 def pulledrevsetsymbol(repo, subset, x):
816 """``pulled()``
816 """``pulled()``
817 Changesets that just has been pulled.
817 Changesets that just has been pulled.
818
818
819 Only available with largefiles from pull --lfrev expressions.
819 Only available with largefiles from pull --lfrev expressions.
820
820
821 .. container:: verbose
821 .. container:: verbose
822
822
823 Some examples:
823 Some examples:
824
824
825 - pull largefiles for all new changesets::
825 - pull largefiles for all new changesets::
826
826
827 hg pull -lfrev "pulled()"
827 hg pull -lfrev "pulled()"
828
828
829 - pull largefiles for all new branch heads::
829 - pull largefiles for all new branch heads::
830
830
831 hg pull -lfrev "head(pulled()) and not closed()"
831 hg pull -lfrev "head(pulled()) and not closed()"
832
832
833 """
833 """
834
834
835 try:
835 try:
836 firstpulled = repo.firstpulled
836 firstpulled = repo.firstpulled
837 except AttributeError:
837 except AttributeError:
838 raise error.Abort(_("pulled() only available in --lfrev"))
838 raise error.Abort(_("pulled() only available in --lfrev"))
839 return revset.baseset([r for r in subset if r >= firstpulled])
839 return revset.baseset([r for r in subset if r >= firstpulled])
840
840
841 def overrideclone(orig, ui, source, dest=None, **opts):
841 def overrideclone(orig, ui, source, dest=None, **opts):
842 d = dest
842 d = dest
843 if d is None:
843 if d is None:
844 d = hg.defaultdest(source)
844 d = hg.defaultdest(source)
845 if opts.get('all_largefiles') and not hg.islocal(d):
845 if opts.get('all_largefiles') and not hg.islocal(d):
846 raise error.Abort(_(
846 raise error.Abort(_(
847 '--all-largefiles is incompatible with non-local destination %s') %
847 '--all-largefiles is incompatible with non-local destination %s') %
848 d)
848 d)
849
849
850 return orig(ui, source, dest, **opts)
850 return orig(ui, source, dest, **opts)
851
851
852 def hgclone(orig, ui, opts, *args, **kwargs):
852 def hgclone(orig, ui, opts, *args, **kwargs):
853 result = orig(ui, opts, *args, **kwargs)
853 result = orig(ui, opts, *args, **kwargs)
854
854
855 if result is not None:
855 if result is not None:
856 sourcerepo, destrepo = result
856 sourcerepo, destrepo = result
857 repo = destrepo.local()
857 repo = destrepo.local()
858
858
859 # When cloning to a remote repo (like through SSH), no repo is available
859 # When cloning to a remote repo (like through SSH), no repo is available
860 # from the peer. Therefore the largefiles can't be downloaded and the
860 # from the peer. Therefore the largefiles can't be downloaded and the
861 # hgrc can't be updated.
861 # hgrc can't be updated.
862 if not repo:
862 if not repo:
863 return result
863 return result
864
864
865 # If largefiles is required for this repo, permanently enable it locally
865 # If largefiles is required for this repo, permanently enable it locally
866 if 'largefiles' in repo.requirements:
866 if 'largefiles' in repo.requirements:
867 fp = repo.vfs('hgrc', 'a', text=True)
867 fp = repo.vfs('hgrc', 'a', text=True)
868 try:
868 try:
869 fp.write('\n[extensions]\nlargefiles=\n')
869 fp.write('\n[extensions]\nlargefiles=\n')
870 finally:
870 finally:
871 fp.close()
871 fp.close()
872
872
873 # Caching is implicitly limited to 'rev' option, since the dest repo was
873 # Caching is implicitly limited to 'rev' option, since the dest repo was
874 # truncated at that point. The user may expect a download count with
874 # truncated at that point. The user may expect a download count with
875 # this option, so attempt whether or not this is a largefile repo.
875 # this option, so attempt whether or not this is a largefile repo.
876 if opts.get('all_largefiles'):
876 if opts.get('all_largefiles'):
877 success, missing = lfcommands.downloadlfiles(ui, repo, None)
877 success, missing = lfcommands.downloadlfiles(ui, repo, None)
878
878
879 if missing != 0:
879 if missing != 0:
880 return None
880 return None
881
881
882 return result
882 return result
883
883
884 def overriderebase(orig, ui, repo, **opts):
884 def overriderebase(orig, ui, repo, **opts):
885 if not util.safehasattr(repo, '_largefilesenabled'):
885 if not util.safehasattr(repo, '_largefilesenabled'):
886 return orig(ui, repo, **opts)
886 return orig(ui, repo, **opts)
887
887
888 resuming = opts.get('continue')
888 resuming = opts.get('continue')
889 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
889 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
890 repo._lfstatuswriters.append(lambda *msg, **opts: None)
890 repo._lfstatuswriters.append(lambda *msg, **opts: None)
891 try:
891 try:
892 return orig(ui, repo, **opts)
892 return orig(ui, repo, **opts)
893 finally:
893 finally:
894 repo._lfstatuswriters.pop()
894 repo._lfstatuswriters.pop()
895 repo._lfcommithooks.pop()
895 repo._lfcommithooks.pop()
896
896
897 def overridearchivecmd(orig, ui, repo, dest, **opts):
897 def overridearchivecmd(orig, ui, repo, dest, **opts):
898 repo.unfiltered().lfstatus = True
898 repo.unfiltered().lfstatus = True
899
899
900 try:
900 try:
901 return orig(ui, repo.unfiltered(), dest, **opts)
901 return orig(ui, repo.unfiltered(), dest, **opts)
902 finally:
902 finally:
903 repo.unfiltered().lfstatus = False
903 repo.unfiltered().lfstatus = False
904
904
905 def hgwebarchive(orig, web, req, tmpl):
905 def hgwebarchive(orig, web, req, tmpl):
906 web.repo.lfstatus = True
906 web.repo.lfstatus = True
907
907
908 try:
908 try:
909 return orig(web, req, tmpl)
909 return orig(web, req, tmpl)
910 finally:
910 finally:
911 web.repo.lfstatus = False
911 web.repo.lfstatus = False
912
912
913 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
913 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
914 prefix='', mtime=None, subrepos=None):
914 prefix='', mtime=None, subrepos=None):
915 # For some reason setting repo.lfstatus in hgwebarchive only changes the
915 # For some reason setting repo.lfstatus in hgwebarchive only changes the
916 # unfiltered repo's attr, so check that as well.
916 # unfiltered repo's attr, so check that as well.
917 if not repo.lfstatus and not repo.unfiltered().lfstatus:
917 if not repo.lfstatus and not repo.unfiltered().lfstatus:
918 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
918 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
919 subrepos)
919 subrepos)
920
920
921 # No need to lock because we are only reading history and
921 # No need to lock because we are only reading history and
922 # largefile caches, neither of which are modified.
922 # largefile caches, neither of which are modified.
923 if node is not None:
923 if node is not None:
924 lfcommands.cachelfiles(repo.ui, repo, node)
924 lfcommands.cachelfiles(repo.ui, repo, node)
925
925
926 if kind not in archival.archivers:
926 if kind not in archival.archivers:
927 raise error.Abort(_("unknown archive type '%s'") % kind)
927 raise error.Abort(_("unknown archive type '%s'") % kind)
928
928
929 ctx = repo[node]
929 ctx = repo[node]
930
930
931 if kind == 'files':
931 if kind == 'files':
932 if prefix:
932 if prefix:
933 raise error.Abort(
933 raise error.Abort(
934 _('cannot give prefix when archiving to files'))
934 _('cannot give prefix when archiving to files'))
935 else:
935 else:
936 prefix = archival.tidyprefix(dest, kind, prefix)
936 prefix = archival.tidyprefix(dest, kind, prefix)
937
937
938 def write(name, mode, islink, getdata):
938 def write(name, mode, islink, getdata):
939 if matchfn and not matchfn(name):
939 if matchfn and not matchfn(name):
940 return
940 return
941 data = getdata()
941 data = getdata()
942 if decode:
942 if decode:
943 data = repo.wwritedata(name, data)
943 data = repo.wwritedata(name, data)
944 archiver.addfile(prefix + name, mode, islink, data)
944 archiver.addfile(prefix + name, mode, islink, data)
945
945
946 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
946 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
947
947
948 if repo.ui.configbool("ui", "archivemeta", True):
948 if repo.ui.configbool("ui", "archivemeta", True):
949 write('.hg_archival.txt', 0o644, False,
949 write('.hg_archival.txt', 0o644, False,
950 lambda: archival.buildmetadata(ctx))
950 lambda: archival.buildmetadata(ctx))
951
951
952 for f in ctx:
952 for f in ctx:
953 ff = ctx.flags(f)
953 ff = ctx.flags(f)
954 getdata = ctx[f].data
954 getdata = ctx[f].data
955 if lfutil.isstandin(f):
955 if lfutil.isstandin(f):
956 if node is not None:
956 if node is not None:
957 path = lfutil.findfile(repo, getdata().strip())
957 path = lfutil.findfile(repo, getdata().strip())
958
958
959 if path is None:
959 if path is None:
960 raise error.Abort(
960 raise error.Abort(
961 _('largefile %s not found in repo store or system cache')
961 _('largefile %s not found in repo store or system cache')
962 % lfutil.splitstandin(f))
962 % lfutil.splitstandin(f))
963 else:
963 else:
964 path = lfutil.splitstandin(f)
964 path = lfutil.splitstandin(f)
965
965
966 f = lfutil.splitstandin(f)
966 f = lfutil.splitstandin(f)
967
967
968 def getdatafn():
968 def getdatafn():
969 fd = None
969 fd = None
970 try:
970 try:
971 fd = open(path, 'rb')
971 fd = open(path, 'rb')
972 return fd.read()
972 return fd.read()
973 finally:
973 finally:
974 if fd:
974 if fd:
975 fd.close()
975 fd.close()
976
976
977 getdata = getdatafn
977 getdata = getdatafn
978 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
978 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
979
979
980 if subrepos:
980 if subrepos:
981 for subpath in sorted(ctx.substate):
981 for subpath in sorted(ctx.substate):
982 sub = ctx.workingsub(subpath)
982 sub = ctx.workingsub(subpath)
983 submatch = match_.narrowmatcher(subpath, matchfn)
983 submatch = match_.narrowmatcher(subpath, matchfn)
984 sub._repo.lfstatus = True
984 sub._repo.lfstatus = True
985 sub.archive(archiver, prefix, submatch)
985 sub.archive(archiver, prefix, submatch)
986
986
987 archiver.done()
987 archiver.done()
988
988
989 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
989 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
990 if not repo._repo.lfstatus:
990 if not repo._repo.lfstatus:
991 return orig(repo, archiver, prefix, match)
991 return orig(repo, archiver, prefix, match)
992
992
993 repo._get(repo._state + ('hg',))
993 repo._get(repo._state + ('hg',))
994 rev = repo._state[1]
994 rev = repo._state[1]
995 ctx = repo._repo[rev]
995 ctx = repo._repo[rev]
996
996
997 if ctx.node() is not None:
997 if ctx.node() is not None:
998 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
998 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
999
999
1000 def write(name, mode, islink, getdata):
1000 def write(name, mode, islink, getdata):
1001 # At this point, the standin has been replaced with the largefile name,
1001 # At this point, the standin has been replaced with the largefile name,
1002 # so the normal matcher works here without the lfutil variants.
1002 # so the normal matcher works here without the lfutil variants.
1003 if match and not match(f):
1003 if match and not match(f):
1004 return
1004 return
1005 data = getdata()
1005 data = getdata()
1006
1006
1007 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1007 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1008
1008
1009 for f in ctx:
1009 for f in ctx:
1010 ff = ctx.flags(f)
1010 ff = ctx.flags(f)
1011 getdata = ctx[f].data
1011 getdata = ctx[f].data
1012 if lfutil.isstandin(f):
1012 if lfutil.isstandin(f):
1013 if ctx.node() is not None:
1013 if ctx.node() is not None:
1014 path = lfutil.findfile(repo._repo, getdata().strip())
1014 path = lfutil.findfile(repo._repo, getdata().strip())
1015
1015
1016 if path is None:
1016 if path is None:
1017 raise error.Abort(
1017 raise error.Abort(
1018 _('largefile %s not found in repo store or system cache')
1018 _('largefile %s not found in repo store or system cache')
1019 % lfutil.splitstandin(f))
1019 % lfutil.splitstandin(f))
1020 else:
1020 else:
1021 path = lfutil.splitstandin(f)
1021 path = lfutil.splitstandin(f)
1022
1022
1023 f = lfutil.splitstandin(f)
1023 f = lfutil.splitstandin(f)
1024
1024
1025 def getdatafn():
1025 def getdatafn():
1026 fd = None
1026 fd = None
1027 try:
1027 try:
1028 fd = open(os.path.join(prefix, path), 'rb')
1028 fd = open(os.path.join(prefix, path), 'rb')
1029 return fd.read()
1029 return fd.read()
1030 finally:
1030 finally:
1031 if fd:
1031 if fd:
1032 fd.close()
1032 fd.close()
1033
1033
1034 getdata = getdatafn
1034 getdata = getdatafn
1035
1035
1036 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1036 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1037
1037
1038 for subpath in sorted(ctx.substate):
1038 for subpath in sorted(ctx.substate):
1039 sub = ctx.workingsub(subpath)
1039 sub = ctx.workingsub(subpath)
1040 submatch = match_.narrowmatcher(subpath, match)
1040 submatch = match_.narrowmatcher(subpath, match)
1041 sub._repo.lfstatus = True
1041 sub._repo.lfstatus = True
1042 sub.archive(archiver, prefix + repo._path + '/', submatch)
1042 sub.archive(archiver, prefix + repo._path + '/', submatch)
1043
1043
1044 # If a largefile is modified, the change is not reflected in its
1044 # If a largefile is modified, the change is not reflected in its
1045 # standin until a commit. cmdutil.bailifchanged() raises an exception
1045 # standin until a commit. cmdutil.bailifchanged() raises an exception
1046 # if the repo has uncommitted changes. Wrap it to also check if
1046 # if the repo has uncommitted changes. Wrap it to also check if
1047 # largefiles were changed. This is used by bisect, backout and fetch.
1047 # largefiles were changed. This is used by bisect, backout and fetch.
1048 def overridebailifchanged(orig, repo, *args, **kwargs):
1048 def overridebailifchanged(orig, repo, *args, **kwargs):
1049 orig(repo, *args, **kwargs)
1049 orig(repo, *args, **kwargs)
1050 repo.lfstatus = True
1050 repo.lfstatus = True
1051 s = repo.status()
1051 s = repo.status()
1052 repo.lfstatus = False
1052 repo.lfstatus = False
1053 if s.modified or s.added or s.removed or s.deleted:
1053 if s.modified or s.added or s.removed or s.deleted:
1054 raise error.Abort(_('uncommitted changes'))
1054 raise error.Abort(_('uncommitted changes'))
1055
1055
1056 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1056 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1057 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1057 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1058 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1058 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1059 m = composelargefilematcher(match, repo[None].manifest())
1059 m = composelargefilematcher(match, repo[None].manifest())
1060
1060
1061 try:
1061 try:
1062 repo.lfstatus = True
1062 repo.lfstatus = True
1063 s = repo.status(match=m, clean=True)
1063 s = repo.status(match=m, clean=True)
1064 finally:
1064 finally:
1065 repo.lfstatus = False
1065 repo.lfstatus = False
1066 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1066 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1067 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1067 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1068
1068
1069 for f in forget:
1069 for f in forget:
1070 if lfutil.standin(f) not in repo.dirstate and not \
1070 if lfutil.standin(f) not in repo.dirstate and not \
1071 repo.wvfs.isdir(lfutil.standin(f)):
1071 repo.wvfs.isdir(lfutil.standin(f)):
1072 ui.warn(_('not removing %s: file is already untracked\n')
1072 ui.warn(_('not removing %s: file is already untracked\n')
1073 % m.rel(f))
1073 % m.rel(f))
1074 bad.append(f)
1074 bad.append(f)
1075
1075
1076 for f in forget:
1076 for f in forget:
1077 if ui.verbose or not m.exact(f):
1077 if ui.verbose or not m.exact(f):
1078 ui.status(_('removing %s\n') % m.rel(f))
1078 ui.status(_('removing %s\n') % m.rel(f))
1079
1079
1080 # Need to lock because standin files are deleted then removed from the
1080 # Need to lock because standin files are deleted then removed from the
1081 # repository and we could race in-between.
1081 # repository and we could race in-between.
1082 wlock = repo.wlock()
1082 wlock = repo.wlock()
1083 try:
1083 try:
1084 lfdirstate = lfutil.openlfdirstate(ui, repo)
1084 lfdirstate = lfutil.openlfdirstate(ui, repo)
1085 for f in forget:
1085 for f in forget:
1086 if lfdirstate[f] == 'a':
1086 if lfdirstate[f] == 'a':
1087 lfdirstate.drop(f)
1087 lfdirstate.drop(f)
1088 else:
1088 else:
1089 lfdirstate.remove(f)
1089 lfdirstate.remove(f)
1090 lfdirstate.write()
1090 lfdirstate.write()
1091 standins = [lfutil.standin(f) for f in forget]
1091 standins = [lfutil.standin(f) for f in forget]
1092 for f in standins:
1092 for f in standins:
1093 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1093 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1094 rejected = repo[None].forget(standins)
1094 rejected = repo[None].forget(standins)
1095 finally:
1095 finally:
1096 wlock.release()
1096 wlock.release()
1097
1097
1098 bad.extend(f for f in rejected if f in m.files())
1098 bad.extend(f for f in rejected if f in m.files())
1099 forgot.extend(f for f in forget if f not in rejected)
1099 forgot.extend(f for f in forget if f not in rejected)
1100 return bad, forgot
1100 return bad, forgot
1101
1101
1102 def _getoutgoings(repo, other, missing, addfunc):
1102 def _getoutgoings(repo, other, missing, addfunc):
1103 """get pairs of filename and largefile hash in outgoing revisions
1103 """get pairs of filename and largefile hash in outgoing revisions
1104 in 'missing'.
1104 in 'missing'.
1105
1105
1106 largefiles already existing on 'other' repository are ignored.
1106 largefiles already existing on 'other' repository are ignored.
1107
1107
1108 'addfunc' is invoked with each unique pairs of filename and
1108 'addfunc' is invoked with each unique pairs of filename and
1109 largefile hash value.
1109 largefile hash value.
1110 """
1110 """
1111 knowns = set()
1111 knowns = set()
1112 lfhashes = set()
1112 lfhashes = set()
1113 def dedup(fn, lfhash):
1113 def dedup(fn, lfhash):
1114 k = (fn, lfhash)
1114 k = (fn, lfhash)
1115 if k not in knowns:
1115 if k not in knowns:
1116 knowns.add(k)
1116 knowns.add(k)
1117 lfhashes.add(lfhash)
1117 lfhashes.add(lfhash)
1118 lfutil.getlfilestoupload(repo, missing, dedup)
1118 lfutil.getlfilestoupload(repo, missing, dedup)
1119 if lfhashes:
1119 if lfhashes:
1120 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1120 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1121 for fn, lfhash in knowns:
1121 for fn, lfhash in knowns:
1122 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1122 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1123 addfunc(fn, lfhash)
1123 addfunc(fn, lfhash)
1124
1124
1125 def outgoinghook(ui, repo, other, opts, missing):
1125 def outgoinghook(ui, repo, other, opts, missing):
1126 if opts.pop('large', None):
1126 if opts.pop('large', None):
1127 lfhashes = set()
1127 lfhashes = set()
1128 if ui.debugflag:
1128 if ui.debugflag:
1129 toupload = {}
1129 toupload = {}
1130 def addfunc(fn, lfhash):
1130 def addfunc(fn, lfhash):
1131 if fn not in toupload:
1131 if fn not in toupload:
1132 toupload[fn] = []
1132 toupload[fn] = []
1133 toupload[fn].append(lfhash)
1133 toupload[fn].append(lfhash)
1134 lfhashes.add(lfhash)
1134 lfhashes.add(lfhash)
1135 def showhashes(fn):
1135 def showhashes(fn):
1136 for lfhash in sorted(toupload[fn]):
1136 for lfhash in sorted(toupload[fn]):
1137 ui.debug(' %s\n' % (lfhash))
1137 ui.debug(' %s\n' % (lfhash))
1138 else:
1138 else:
1139 toupload = set()
1139 toupload = set()
1140 def addfunc(fn, lfhash):
1140 def addfunc(fn, lfhash):
1141 toupload.add(fn)
1141 toupload.add(fn)
1142 lfhashes.add(lfhash)
1142 lfhashes.add(lfhash)
1143 def showhashes(fn):
1143 def showhashes(fn):
1144 pass
1144 pass
1145 _getoutgoings(repo, other, missing, addfunc)
1145 _getoutgoings(repo, other, missing, addfunc)
1146
1146
1147 if not toupload:
1147 if not toupload:
1148 ui.status(_('largefiles: no files to upload\n'))
1148 ui.status(_('largefiles: no files to upload\n'))
1149 else:
1149 else:
1150 ui.status(_('largefiles to upload (%d entities):\n')
1150 ui.status(_('largefiles to upload (%d entities):\n')
1151 % (len(lfhashes)))
1151 % (len(lfhashes)))
1152 for file in sorted(toupload):
1152 for file in sorted(toupload):
1153 ui.status(lfutil.splitstandin(file) + '\n')
1153 ui.status(lfutil.splitstandin(file) + '\n')
1154 showhashes(file)
1154 showhashes(file)
1155 ui.status('\n')
1155 ui.status('\n')
1156
1156
1157 def summaryremotehook(ui, repo, opts, changes):
1157 def summaryremotehook(ui, repo, opts, changes):
1158 largeopt = opts.get('large', False)
1158 largeopt = opts.get('large', False)
1159 if changes is None:
1159 if changes is None:
1160 if largeopt:
1160 if largeopt:
1161 return (False, True) # only outgoing check is needed
1161 return (False, True) # only outgoing check is needed
1162 else:
1162 else:
1163 return (False, False)
1163 return (False, False)
1164 elif largeopt:
1164 elif largeopt:
1165 url, branch, peer, outgoing = changes[1]
1165 url, branch, peer, outgoing = changes[1]
1166 if peer is None:
1166 if peer is None:
1167 # i18n: column positioning for "hg summary"
1167 # i18n: column positioning for "hg summary"
1168 ui.status(_('largefiles: (no remote repo)\n'))
1168 ui.status(_('largefiles: (no remote repo)\n'))
1169 return
1169 return
1170
1170
1171 toupload = set()
1171 toupload = set()
1172 lfhashes = set()
1172 lfhashes = set()
1173 def addfunc(fn, lfhash):
1173 def addfunc(fn, lfhash):
1174 toupload.add(fn)
1174 toupload.add(fn)
1175 lfhashes.add(lfhash)
1175 lfhashes.add(lfhash)
1176 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1176 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1177
1177
1178 if not toupload:
1178 if not toupload:
1179 # i18n: column positioning for "hg summary"
1179 # i18n: column positioning for "hg summary"
1180 ui.status(_('largefiles: (no files to upload)\n'))
1180 ui.status(_('largefiles: (no files to upload)\n'))
1181 else:
1181 else:
1182 # i18n: column positioning for "hg summary"
1182 # i18n: column positioning for "hg summary"
1183 ui.status(_('largefiles: %d entities for %d files to upload\n')
1183 ui.status(_('largefiles: %d entities for %d files to upload\n')
1184 % (len(lfhashes), len(toupload)))
1184 % (len(lfhashes), len(toupload)))
1185
1185
1186 def overridesummary(orig, ui, repo, *pats, **opts):
1186 def overridesummary(orig, ui, repo, *pats, **opts):
1187 try:
1187 try:
1188 repo.lfstatus = True
1188 repo.lfstatus = True
1189 orig(ui, repo, *pats, **opts)
1189 orig(ui, repo, *pats, **opts)
1190 finally:
1190 finally:
1191 repo.lfstatus = False
1191 repo.lfstatus = False
1192
1192
1193 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1193 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1194 similarity=None):
1194 similarity=None):
1195 if opts is None:
1195 if opts is None:
1196 opts = {}
1196 opts = {}
1197 if not lfutil.islfilesrepo(repo):
1197 if not lfutil.islfilesrepo(repo):
1198 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1198 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1199 # Get the list of missing largefiles so we can remove them
1199 # Get the list of missing largefiles so we can remove them
1200 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1200 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1201 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1201 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1202 False, False, False)
1202 False, False, False)
1203
1203
1204 # Call into the normal remove code, but the removing of the standin, we want
1204 # Call into the normal remove code, but the removing of the standin, we want
1205 # to have handled by original addremove. Monkey patching here makes sure
1205 # to have handled by original addremove. Monkey patching here makes sure
1206 # we don't remove the standin in the largefiles code, preventing a very
1206 # we don't remove the standin in the largefiles code, preventing a very
1207 # confused state later.
1207 # confused state later.
1208 if s.deleted:
1208 if s.deleted:
1209 m = copy.copy(matcher)
1209 m = copy.copy(matcher)
1210
1210
1211 # The m._files and m._map attributes are not changed to the deleted list
1211 # The m._files and m._map attributes are not changed to the deleted list
1212 # because that affects the m.exact() test, which in turn governs whether
1212 # because that affects the m.exact() test, which in turn governs whether
1213 # or not the file name is printed, and how. Simply limit the original
1213 # or not the file name is printed, and how. Simply limit the original
1214 # matches to those in the deleted status list.
1214 # matches to those in the deleted status list.
1215 matchfn = m.matchfn
1215 matchfn = m.matchfn
1216 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1216 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1217
1217
1218 removelargefiles(repo.ui, repo, True, m, **opts)
1218 removelargefiles(repo.ui, repo, True, m, **opts)
1219 # Call into the normal add code, and any files that *should* be added as
1219 # Call into the normal add code, and any files that *should* be added as
1220 # largefiles will be
1220 # largefiles will be
1221 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1221 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1222 # Now that we've handled largefiles, hand off to the original addremove
1222 # Now that we've handled largefiles, hand off to the original addremove
1223 # function to take care of the rest. Make sure it doesn't do anything with
1223 # function to take care of the rest. Make sure it doesn't do anything with
1224 # largefiles by passing a matcher that will ignore them.
1224 # largefiles by passing a matcher that will ignore them.
1225 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1225 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1226 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1226 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1227
1227
1228 # Calling purge with --all will cause the largefiles to be deleted.
1228 # Calling purge with --all will cause the largefiles to be deleted.
1229 # Override repo.status to prevent this from happening.
1229 # Override repo.status to prevent this from happening.
1230 def overridepurge(orig, ui, repo, *dirs, **opts):
1230 def overridepurge(orig, ui, repo, *dirs, **opts):
1231 # XXX Monkey patching a repoview will not work. The assigned attribute will
1231 # XXX Monkey patching a repoview will not work. The assigned attribute will
1232 # be set on the unfiltered repo, but we will only lookup attributes in the
1232 # be set on the unfiltered repo, but we will only lookup attributes in the
1233 # unfiltered repo if the lookup in the repoview object itself fails. As the
1233 # unfiltered repo if the lookup in the repoview object itself fails. As the
1234 # monkey patched method exists on the repoview class the lookup will not
1234 # monkey patched method exists on the repoview class the lookup will not
1235 # fail. As a result, the original version will shadow the monkey patched
1235 # fail. As a result, the original version will shadow the monkey patched
1236 # one, defeating the monkey patch.
1236 # one, defeating the monkey patch.
1237 #
1237 #
1238 # As a work around we use an unfiltered repo here. We should do something
1238 # As a work around we use an unfiltered repo here. We should do something
1239 # cleaner instead.
1239 # cleaner instead.
1240 repo = repo.unfiltered()
1240 repo = repo.unfiltered()
1241 oldstatus = repo.status
1241 oldstatus = repo.status
1242 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1242 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1243 clean=False, unknown=False, listsubrepos=False):
1243 clean=False, unknown=False, listsubrepos=False):
1244 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1244 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1245 listsubrepos)
1245 listsubrepos)
1246 lfdirstate = lfutil.openlfdirstate(ui, repo)
1246 lfdirstate = lfutil.openlfdirstate(ui, repo)
1247 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1247 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1248 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1248 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1249 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1249 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1250 unknown, ignored, r.clean)
1250 unknown, ignored, r.clean)
1251 repo.status = overridestatus
1251 repo.status = overridestatus
1252 orig(ui, repo, *dirs, **opts)
1252 orig(ui, repo, *dirs, **opts)
1253 repo.status = oldstatus
1253 repo.status = oldstatus
1254 def overriderollback(orig, ui, repo, **opts):
1254 def overriderollback(orig, ui, repo, **opts):
1255 wlock = repo.wlock()
1255 wlock = repo.wlock()
1256 try:
1256 try:
1257 before = repo.dirstate.parents()
1257 before = repo.dirstate.parents()
1258 orphans = set(f for f in repo.dirstate
1258 orphans = set(f for f in repo.dirstate
1259 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1259 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1260 result = orig(ui, repo, **opts)
1260 result = orig(ui, repo, **opts)
1261 after = repo.dirstate.parents()
1261 after = repo.dirstate.parents()
1262 if before == after:
1262 if before == after:
1263 return result # no need to restore standins
1263 return result # no need to restore standins
1264
1264
1265 pctx = repo['.']
1265 pctx = repo['.']
1266 for f in repo.dirstate:
1266 for f in repo.dirstate:
1267 if lfutil.isstandin(f):
1267 if lfutil.isstandin(f):
1268 orphans.discard(f)
1268 orphans.discard(f)
1269 if repo.dirstate[f] == 'r':
1269 if repo.dirstate[f] == 'r':
1270 repo.wvfs.unlinkpath(f, ignoremissing=True)
1270 repo.wvfs.unlinkpath(f, ignoremissing=True)
1271 elif f in pctx:
1271 elif f in pctx:
1272 fctx = pctx[f]
1272 fctx = pctx[f]
1273 repo.wwrite(f, fctx.data(), fctx.flags())
1273 repo.wwrite(f, fctx.data(), fctx.flags())
1274 else:
1274 else:
1275 # content of standin is not so important in 'a',
1275 # content of standin is not so important in 'a',
1276 # 'm' or 'n' (coming from the 2nd parent) cases
1276 # 'm' or 'n' (coming from the 2nd parent) cases
1277 lfutil.writestandin(repo, f, '', False)
1277 lfutil.writestandin(repo, f, '', False)
1278 for standin in orphans:
1278 for standin in orphans:
1279 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1279 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1280
1280
1281 lfdirstate = lfutil.openlfdirstate(ui, repo)
1281 lfdirstate = lfutil.openlfdirstate(ui, repo)
1282 orphans = set(lfdirstate)
1282 orphans = set(lfdirstate)
1283 lfiles = lfutil.listlfiles(repo)
1283 lfiles = lfutil.listlfiles(repo)
1284 for file in lfiles:
1284 for file in lfiles:
1285 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1285 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1286 orphans.discard(file)
1286 orphans.discard(file)
1287 for lfile in orphans:
1287 for lfile in orphans:
1288 lfdirstate.drop(lfile)
1288 lfdirstate.drop(lfile)
1289 lfdirstate.write()
1289 lfdirstate.write()
1290 finally:
1290 finally:
1291 wlock.release()
1291 wlock.release()
1292 return result
1292 return result
1293
1293
1294 def overridetransplant(orig, ui, repo, *revs, **opts):
1294 def overridetransplant(orig, ui, repo, *revs, **opts):
1295 resuming = opts.get('continue')
1295 resuming = opts.get('continue')
1296 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1296 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1297 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1297 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1298 try:
1298 try:
1299 result = orig(ui, repo, *revs, **opts)
1299 result = orig(ui, repo, *revs, **opts)
1300 finally:
1300 finally:
1301 repo._lfstatuswriters.pop()
1301 repo._lfstatuswriters.pop()
1302 repo._lfcommithooks.pop()
1302 repo._lfcommithooks.pop()
1303 return result
1303 return result
1304
1304
1305 def overridecat(orig, ui, repo, file1, *pats, **opts):
1305 def overridecat(orig, ui, repo, file1, *pats, **opts):
1306 ctx = scmutil.revsingle(repo, opts.get('rev'))
1306 ctx = scmutil.revsingle(repo, opts.get('rev'))
1307 err = 1
1307 err = 1
1308 notbad = set()
1308 notbad = set()
1309 m = scmutil.match(ctx, (file1,) + pats, opts)
1309 m = scmutil.match(ctx, (file1,) + pats, opts)
1310 origmatchfn = m.matchfn
1310 origmatchfn = m.matchfn
1311 def lfmatchfn(f):
1311 def lfmatchfn(f):
1312 if origmatchfn(f):
1312 if origmatchfn(f):
1313 return True
1313 return True
1314 lf = lfutil.splitstandin(f)
1314 lf = lfutil.splitstandin(f)
1315 if lf is None:
1315 if lf is None:
1316 return False
1316 return False
1317 notbad.add(lf)
1317 notbad.add(lf)
1318 return origmatchfn(lf)
1318 return origmatchfn(lf)
1319 m.matchfn = lfmatchfn
1319 m.matchfn = lfmatchfn
1320 origbadfn = m.bad
1320 origbadfn = m.bad
1321 def lfbadfn(f, msg):
1321 def lfbadfn(f, msg):
1322 if not f in notbad:
1322 if not f in notbad:
1323 origbadfn(f, msg)
1323 origbadfn(f, msg)
1324 m.bad = lfbadfn
1324 m.bad = lfbadfn
1325
1325
1326 origvisitdirfn = m.visitdir
1326 origvisitdirfn = m.visitdir
1327 def lfvisitdirfn(dir):
1327 def lfvisitdirfn(dir):
1328 if dir == lfutil.shortname:
1328 if dir == lfutil.shortname:
1329 return True
1329 return True
1330 ret = origvisitdirfn(dir)
1330 ret = origvisitdirfn(dir)
1331 if ret:
1331 if ret:
1332 return ret
1332 return ret
1333 lf = lfutil.splitstandin(dir)
1333 lf = lfutil.splitstandin(dir)
1334 if lf is None:
1334 if lf is None:
1335 return False
1335 return False
1336 return origvisitdirfn(lf)
1336 return origvisitdirfn(lf)
1337 m.visitdir = lfvisitdirfn
1337 m.visitdir = lfvisitdirfn
1338
1338
1339 for f in ctx.walk(m):
1339 for f in ctx.walk(m):
1340 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1340 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1341 pathname=f)
1341 pathname=f)
1342 lf = lfutil.splitstandin(f)
1342 lf = lfutil.splitstandin(f)
1343 if lf is None or origmatchfn(f):
1343 if lf is None or origmatchfn(f):
1344 # duplicating unreachable code from commands.cat
1344 # duplicating unreachable code from commands.cat
1345 data = ctx[f].data()
1345 data = ctx[f].data()
1346 if opts.get('decode'):
1346 if opts.get('decode'):
1347 data = repo.wwritedata(f, data)
1347 data = repo.wwritedata(f, data)
1348 fp.write(data)
1348 fp.write(data)
1349 else:
1349 else:
1350 hash = lfutil.readstandin(repo, lf, ctx.rev())
1350 hash = lfutil.readstandin(repo, lf, ctx.rev())
1351 if not lfutil.inusercache(repo.ui, hash):
1351 if not lfutil.inusercache(repo.ui, hash):
1352 store = basestore._openstore(repo)
1352 store = basestore._openstore(repo)
1353 success, missing = store.get([(lf, hash)])
1353 success, missing = store.get([(lf, hash)])
1354 if len(success) != 1:
1354 if len(success) != 1:
1355 raise error.Abort(
1355 raise error.Abort(
1356 _('largefile %s is not in cache and could not be '
1356 _('largefile %s is not in cache and could not be '
1357 'downloaded') % lf)
1357 'downloaded') % lf)
1358 path = lfutil.usercachepath(repo.ui, hash)
1358 path = lfutil.usercachepath(repo.ui, hash)
1359 fpin = open(path, "rb")
1359 fpin = open(path, "rb")
1360 for chunk in util.filechunkiter(fpin, 128 * 1024):
1360 for chunk in util.filechunkiter(fpin, 128 * 1024):
1361 fp.write(chunk)
1361 fp.write(chunk)
1362 fpin.close()
1362 fpin.close()
1363 fp.close()
1363 fp.close()
1364 err = 0
1364 err = 0
1365 return err
1365 return err
1366
1366
1367 def mergeupdate(orig, repo, node, branchmerge, force,
1367 def mergeupdate(orig, repo, node, branchmerge, force,
1368 *args, **kwargs):
1368 *args, **kwargs):
1369 matcher = kwargs.get('matcher', None)
1369 matcher = kwargs.get('matcher', None)
1370 # note if this is a partial update
1370 # note if this is a partial update
1371 partial = matcher and not matcher.always()
1371 partial = matcher and not matcher.always()
1372 wlock = repo.wlock()
1372 wlock = repo.wlock()
1373 try:
1373 try:
1374 # branch | | |
1374 # branch | | |
1375 # merge | force | partial | action
1375 # merge | force | partial | action
1376 # -------+-------+---------+--------------
1376 # -------+-------+---------+--------------
1377 # x | x | x | linear-merge
1377 # x | x | x | linear-merge
1378 # o | x | x | branch-merge
1378 # o | x | x | branch-merge
1379 # x | o | x | overwrite (as clean update)
1379 # x | o | x | overwrite (as clean update)
1380 # o | o | x | force-branch-merge (*1)
1380 # o | o | x | force-branch-merge (*1)
1381 # x | x | o | (*)
1381 # x | x | o | (*)
1382 # o | x | o | (*)
1382 # o | x | o | (*)
1383 # x | o | o | overwrite (as revert)
1383 # x | o | o | overwrite (as revert)
1384 # o | o | o | (*)
1384 # o | o | o | (*)
1385 #
1385 #
1386 # (*) don't care
1386 # (*) don't care
1387 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1387 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1388
1388
1389 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1389 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1390 unsure, s = lfdirstate.status(match_.always(repo.root,
1390 unsure, s = lfdirstate.status(match_.always(repo.root,
1391 repo.getcwd()),
1391 repo.getcwd()),
1392 [], False, False, False)
1392 [], False, False, False)
1393 pctx = repo['.']
1393 pctx = repo['.']
1394 for lfile in unsure + s.modified:
1394 for lfile in unsure + s.modified:
1395 lfileabs = repo.wvfs.join(lfile)
1395 lfileabs = repo.wvfs.join(lfile)
1396 if not os.path.exists(lfileabs):
1396 if not os.path.exists(lfileabs):
1397 continue
1397 continue
1398 lfhash = lfutil.hashrepofile(repo, lfile)
1398 lfhash = lfutil.hashrepofile(repo, lfile)
1399 standin = lfutil.standin(lfile)
1399 standin = lfutil.standin(lfile)
1400 lfutil.writestandin(repo, standin, lfhash,
1400 lfutil.writestandin(repo, standin, lfhash,
1401 lfutil.getexecutable(lfileabs))
1401 lfutil.getexecutable(lfileabs))
1402 if (standin in pctx and
1402 if (standin in pctx and
1403 lfhash == lfutil.readstandin(repo, lfile, '.')):
1403 lfhash == lfutil.readstandin(repo, lfile, '.')):
1404 lfdirstate.normal(lfile)
1404 lfdirstate.normal(lfile)
1405 for lfile in s.added:
1405 for lfile in s.added:
1406 lfutil.updatestandin(repo, lfutil.standin(lfile))
1406 lfutil.updatestandin(repo, lfutil.standin(lfile))
1407 lfdirstate.write()
1407 lfdirstate.write()
1408
1408
1409 oldstandins = lfutil.getstandinsstate(repo)
1409 oldstandins = lfutil.getstandinsstate(repo)
1410
1410
1411 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1411 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1412
1412
1413 newstandins = lfutil.getstandinsstate(repo)
1413 newstandins = lfutil.getstandinsstate(repo)
1414 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1414 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1415 if branchmerge or force or partial:
1415 if branchmerge or force or partial:
1416 filelist.extend(s.deleted + s.removed)
1416 filelist.extend(s.deleted + s.removed)
1417
1417
1418 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1418 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1419 normallookup=partial)
1419 normallookup=partial)
1420
1420
1421 return result
1421 return result
1422 finally:
1422 finally:
1423 wlock.release()
1423 wlock.release()
1424
1424
1425 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1425 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1426 result = orig(repo, files, *args, **kwargs)
1426 result = orig(repo, files, *args, **kwargs)
1427
1427
1428 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1428 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1429 if filelist:
1429 if filelist:
1430 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1430 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1431 printmessage=False, normallookup=True)
1431 printmessage=False, normallookup=True)
1432
1432
1433 return result
1433 return result
@@ -1,1545 +1,1545 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 )
22 )
23 from . import (
23 from . import (
24 copies,
24 copies,
25 destutil,
25 destutil,
26 error,
26 error,
27 filemerge,
27 filemerge,
28 obsolete,
28 obsolete,
29 subrepo,
29 subrepo,
30 util,
30 util,
31 worker,
31 worker,
32 )
32 )
33
33
34 _pack = struct.pack
34 _pack = struct.pack
35 _unpack = struct.unpack
35 _unpack = struct.unpack
36
36
37 def _droponode(data):
37 def _droponode(data):
38 # used for compatibility for v1
38 # used for compatibility for v1
39 bits = data.split('\0')
39 bits = data.split('\0')
40 bits = bits[:-2] + bits[-1:]
40 bits = bits[:-2] + bits[-1:]
41 return '\0'.join(bits)
41 return '\0'.join(bits)
42
42
43 class mergestate(object):
43 class mergestate(object):
44 '''track 3-way merge state of individual files
44 '''track 3-way merge state of individual files
45
45
46 The merge state is stored on disk when needed. Two files are used: one with
46 The merge state is stored on disk when needed. Two files are used: one with
47 an old format (version 1), and one with a new format (version 2). Version 2
47 an old format (version 1), and one with a new format (version 2). Version 2
48 stores a superset of the data in version 1, including new kinds of records
48 stores a superset of the data in version 1, including new kinds of records
49 in the future. For more about the new format, see the documentation for
49 in the future. For more about the new format, see the documentation for
50 `_readrecordsv2`.
50 `_readrecordsv2`.
51
51
52 Each record can contain arbitrary content, and has an associated type. This
52 Each record can contain arbitrary content, and has an associated type. This
53 `type` should be a letter. If `type` is uppercase, the record is mandatory:
53 `type` should be a letter. If `type` is uppercase, the record is mandatory:
54 versions of Mercurial that don't support it should abort. If `type` is
54 versions of Mercurial that don't support it should abort. If `type` is
55 lowercase, the record can be safely ignored.
55 lowercase, the record can be safely ignored.
56
56
57 Currently known records:
57 Currently known records:
58
58
59 L: the node of the "local" part of the merge (hexified version)
59 L: the node of the "local" part of the merge (hexified version)
60 O: the node of the "other" part of the merge (hexified version)
60 O: the node of the "other" part of the merge (hexified version)
61 F: a file to be merged entry
61 F: a file to be merged entry
62 C: a change/delete or delete/change conflict
62 C: a change/delete or delete/change conflict
63 D: a file that the external merge driver will merge internally
63 D: a file that the external merge driver will merge internally
64 (experimental)
64 (experimental)
65 m: the external merge driver defined for this merge plus its run state
65 m: the external merge driver defined for this merge plus its run state
66 (experimental)
66 (experimental)
67 X: unsupported mandatory record type (used in tests)
67 X: unsupported mandatory record type (used in tests)
68 x: unsupported advisory record type (used in tests)
68 x: unsupported advisory record type (used in tests)
69
69
70 Merge driver run states (experimental):
70 Merge driver run states (experimental):
71 u: driver-resolved files unmarked -- needs to be run next time we're about
71 u: driver-resolved files unmarked -- needs to be run next time we're about
72 to resolve or commit
72 to resolve or commit
73 m: driver-resolved files marked -- only needs to be run before commit
73 m: driver-resolved files marked -- only needs to be run before commit
74 s: success/skipped -- does not need to be run any more
74 s: success/skipped -- does not need to be run any more
75
75
76 '''
76 '''
77 statepathv1 = 'merge/state'
77 statepathv1 = 'merge/state'
78 statepathv2 = 'merge/state2'
78 statepathv2 = 'merge/state2'
79
79
80 @staticmethod
80 @staticmethod
81 def clean(repo, node=None, other=None):
81 def clean(repo, node=None, other=None):
82 """Initialize a brand new merge state, removing any existing state on
82 """Initialize a brand new merge state, removing any existing state on
83 disk."""
83 disk."""
84 ms = mergestate(repo)
84 ms = mergestate(repo)
85 ms.reset(node, other)
85 ms.reset(node, other)
86 return ms
86 return ms
87
87
88 @staticmethod
88 @staticmethod
89 def read(repo):
89 def read(repo):
90 """Initialize the merge state, reading it from disk."""
90 """Initialize the merge state, reading it from disk."""
91 ms = mergestate(repo)
91 ms = mergestate(repo)
92 ms._read()
92 ms._read()
93 return ms
93 return ms
94
94
95 def __init__(self, repo):
95 def __init__(self, repo):
96 """Initialize the merge state.
96 """Initialize the merge state.
97
97
98 Do not use this directly! Instead call read() or clean()."""
98 Do not use this directly! Instead call read() or clean()."""
99 self._repo = repo
99 self._repo = repo
100 self._dirty = False
100 self._dirty = False
101
101
102 def reset(self, node=None, other=None):
102 def reset(self, node=None, other=None):
103 self._state = {}
103 self._state = {}
104 self._local = None
104 self._local = None
105 self._other = None
105 self._other = None
106 for var in ('localctx', 'otherctx'):
106 for var in ('localctx', 'otherctx'):
107 if var in vars(self):
107 if var in vars(self):
108 delattr(self, var)
108 delattr(self, var)
109 if node:
109 if node:
110 self._local = node
110 self._local = node
111 self._other = other
111 self._other = other
112 self._readmergedriver = None
112 self._readmergedriver = None
113 if self.mergedriver:
113 if self.mergedriver:
114 self._mdstate = 's'
114 self._mdstate = 's'
115 else:
115 else:
116 self._mdstate = 'u'
116 self._mdstate = 'u'
117 shutil.rmtree(self._repo.join('merge'), True)
117 shutil.rmtree(self._repo.join('merge'), True)
118 self._results = {}
118 self._results = {}
119 self._dirty = False
119 self._dirty = False
120
120
121 def _read(self):
121 def _read(self):
122 """Analyse each record content to restore a serialized state from disk
122 """Analyse each record content to restore a serialized state from disk
123
123
124 This function process "record" entry produced by the de-serialization
124 This function process "record" entry produced by the de-serialization
125 of on disk file.
125 of on disk file.
126 """
126 """
127 self._state = {}
127 self._state = {}
128 self._local = None
128 self._local = None
129 self._other = None
129 self._other = None
130 for var in ('localctx', 'otherctx'):
130 for var in ('localctx', 'otherctx'):
131 if var in vars(self):
131 if var in vars(self):
132 delattr(self, var)
132 delattr(self, var)
133 self._readmergedriver = None
133 self._readmergedriver = None
134 self._mdstate = 's'
134 self._mdstate = 's'
135 unsupported = set()
135 unsupported = set()
136 records = self._readrecords()
136 records = self._readrecords()
137 for rtype, record in records:
137 for rtype, record in records:
138 if rtype == 'L':
138 if rtype == 'L':
139 self._local = bin(record)
139 self._local = bin(record)
140 elif rtype == 'O':
140 elif rtype == 'O':
141 self._other = bin(record)
141 self._other = bin(record)
142 elif rtype == 'm':
142 elif rtype == 'm':
143 bits = record.split('\0', 1)
143 bits = record.split('\0', 1)
144 mdstate = bits[1]
144 mdstate = bits[1]
145 if len(mdstate) != 1 or mdstate not in 'ums':
145 if len(mdstate) != 1 or mdstate not in 'ums':
146 # the merge driver should be idempotent, so just rerun it
146 # the merge driver should be idempotent, so just rerun it
147 mdstate = 'u'
147 mdstate = 'u'
148
148
149 self._readmergedriver = bits[0]
149 self._readmergedriver = bits[0]
150 self._mdstate = mdstate
150 self._mdstate = mdstate
151 elif rtype in 'FDC':
151 elif rtype in 'FDC':
152 bits = record.split('\0')
152 bits = record.split('\0')
153 self._state[bits[0]] = bits[1:]
153 self._state[bits[0]] = bits[1:]
154 elif not rtype.islower():
154 elif not rtype.islower():
155 unsupported.add(rtype)
155 unsupported.add(rtype)
156 self._results = {}
156 self._results = {}
157 self._dirty = False
157 self._dirty = False
158
158
159 if unsupported:
159 if unsupported:
160 raise error.UnsupportedMergeRecords(unsupported)
160 raise error.UnsupportedMergeRecords(unsupported)
161
161
162 def _readrecords(self):
162 def _readrecords(self):
163 """Read merge state from disk and return a list of record (TYPE, data)
163 """Read merge state from disk and return a list of record (TYPE, data)
164
164
165 We read data from both v1 and v2 files and decide which one to use.
165 We read data from both v1 and v2 files and decide which one to use.
166
166
167 V1 has been used by version prior to 2.9.1 and contains less data than
167 V1 has been used by version prior to 2.9.1 and contains less data than
168 v2. We read both versions and check if no data in v2 contradicts
168 v2. We read both versions and check if no data in v2 contradicts
169 v1. If there is not contradiction we can safely assume that both v1
169 v1. If there is not contradiction we can safely assume that both v1
170 and v2 were written at the same time and use the extract data in v2. If
170 and v2 were written at the same time and use the extract data in v2. If
171 there is contradiction we ignore v2 content as we assume an old version
171 there is contradiction we ignore v2 content as we assume an old version
172 of Mercurial has overwritten the mergestate file and left an old v2
172 of Mercurial has overwritten the mergestate file and left an old v2
173 file around.
173 file around.
174
174
175 returns list of record [(TYPE, data), ...]"""
175 returns list of record [(TYPE, data), ...]"""
176 v1records = self._readrecordsv1()
176 v1records = self._readrecordsv1()
177 v2records = self._readrecordsv2()
177 v2records = self._readrecordsv2()
178 if self._v1v2match(v1records, v2records):
178 if self._v1v2match(v1records, v2records):
179 return v2records
179 return v2records
180 else:
180 else:
181 # v1 file is newer than v2 file, use it
181 # v1 file is newer than v2 file, use it
182 # we have to infer the "other" changeset of the merge
182 # we have to infer the "other" changeset of the merge
183 # we cannot do better than that with v1 of the format
183 # we cannot do better than that with v1 of the format
184 mctx = self._repo[None].parents()[-1]
184 mctx = self._repo[None].parents()[-1]
185 v1records.append(('O', mctx.hex()))
185 v1records.append(('O', mctx.hex()))
186 # add place holder "other" file node information
186 # add place holder "other" file node information
187 # nobody is using it yet so we do no need to fetch the data
187 # nobody is using it yet so we do no need to fetch the data
188 # if mctx was wrong `mctx[bits[-2]]` may fails.
188 # if mctx was wrong `mctx[bits[-2]]` may fails.
189 for idx, r in enumerate(v1records):
189 for idx, r in enumerate(v1records):
190 if r[0] == 'F':
190 if r[0] == 'F':
191 bits = r[1].split('\0')
191 bits = r[1].split('\0')
192 bits.insert(-2, '')
192 bits.insert(-2, '')
193 v1records[idx] = (r[0], '\0'.join(bits))
193 v1records[idx] = (r[0], '\0'.join(bits))
194 return v1records
194 return v1records
195
195
196 def _v1v2match(self, v1records, v2records):
196 def _v1v2match(self, v1records, v2records):
197 oldv2 = set() # old format version of v2 record
197 oldv2 = set() # old format version of v2 record
198 for rec in v2records:
198 for rec in v2records:
199 if rec[0] == 'L':
199 if rec[0] == 'L':
200 oldv2.add(rec)
200 oldv2.add(rec)
201 elif rec[0] == 'F':
201 elif rec[0] == 'F':
202 # drop the onode data (not contained in v1)
202 # drop the onode data (not contained in v1)
203 oldv2.add(('F', _droponode(rec[1])))
203 oldv2.add(('F', _droponode(rec[1])))
204 for rec in v1records:
204 for rec in v1records:
205 if rec not in oldv2:
205 if rec not in oldv2:
206 return False
206 return False
207 else:
207 else:
208 return True
208 return True
209
209
210 def _readrecordsv1(self):
210 def _readrecordsv1(self):
211 """read on disk merge state for version 1 file
211 """read on disk merge state for version 1 file
212
212
213 returns list of record [(TYPE, data), ...]
213 returns list of record [(TYPE, data), ...]
214
214
215 Note: the "F" data from this file are one entry short
215 Note: the "F" data from this file are one entry short
216 (no "other file node" entry)
216 (no "other file node" entry)
217 """
217 """
218 records = []
218 records = []
219 try:
219 try:
220 f = self._repo.vfs(self.statepathv1)
220 f = self._repo.vfs(self.statepathv1)
221 for i, l in enumerate(f):
221 for i, l in enumerate(f):
222 if i == 0:
222 if i == 0:
223 records.append(('L', l[:-1]))
223 records.append(('L', l[:-1]))
224 else:
224 else:
225 records.append(('F', l[:-1]))
225 records.append(('F', l[:-1]))
226 f.close()
226 f.close()
227 except IOError as err:
227 except IOError as err:
228 if err.errno != errno.ENOENT:
228 if err.errno != errno.ENOENT:
229 raise
229 raise
230 return records
230 return records
231
231
232 def _readrecordsv2(self):
232 def _readrecordsv2(self):
233 """read on disk merge state for version 2 file
233 """read on disk merge state for version 2 file
234
234
235 This format is a list of arbitrary records of the form:
235 This format is a list of arbitrary records of the form:
236
236
237 [type][length][content]
237 [type][length][content]
238
238
239 `type` is a single character, `length` is a 4 byte integer, and
239 `type` is a single character, `length` is a 4 byte integer, and
240 `content` is an arbitrary byte sequence of length `length`.
240 `content` is an arbitrary byte sequence of length `length`.
241
241
242 Mercurial versions prior to 3.7 have a bug where if there are
242 Mercurial versions prior to 3.7 have a bug where if there are
243 unsupported mandatory merge records, attempting to clear out the merge
243 unsupported mandatory merge records, attempting to clear out the merge
244 state with hg update --clean or similar aborts. The 't' record type
244 state with hg update --clean or similar aborts. The 't' record type
245 works around that by writing out what those versions treat as an
245 works around that by writing out what those versions treat as an
246 advisory record, but later versions interpret as special: the first
246 advisory record, but later versions interpret as special: the first
247 character is the 'real' record type and everything onwards is the data.
247 character is the 'real' record type and everything onwards is the data.
248
248
249 Returns list of records [(TYPE, data), ...]."""
249 Returns list of records [(TYPE, data), ...]."""
250 records = []
250 records = []
251 try:
251 try:
252 f = self._repo.vfs(self.statepathv2)
252 f = self._repo.vfs(self.statepathv2)
253 data = f.read()
253 data = f.read()
254 off = 0
254 off = 0
255 end = len(data)
255 end = len(data)
256 while off < end:
256 while off < end:
257 rtype = data[off]
257 rtype = data[off]
258 off += 1
258 off += 1
259 length = _unpack('>I', data[off:(off + 4)])[0]
259 length = _unpack('>I', data[off:(off + 4)])[0]
260 off += 4
260 off += 4
261 record = data[off:(off + length)]
261 record = data[off:(off + length)]
262 off += length
262 off += length
263 if rtype == 't':
263 if rtype == 't':
264 rtype, record = record[0], record[1:]
264 rtype, record = record[0], record[1:]
265 records.append((rtype, record))
265 records.append((rtype, record))
266 f.close()
266 f.close()
267 except IOError as err:
267 except IOError as err:
268 if err.errno != errno.ENOENT:
268 if err.errno != errno.ENOENT:
269 raise
269 raise
270 return records
270 return records
271
271
272 @util.propertycache
272 @util.propertycache
273 def mergedriver(self):
273 def mergedriver(self):
274 # protect against the following:
274 # protect against the following:
275 # - A configures a malicious merge driver in their hgrc, then
275 # - A configures a malicious merge driver in their hgrc, then
276 # pauses the merge
276 # pauses the merge
277 # - A edits their hgrc to remove references to the merge driver
277 # - A edits their hgrc to remove references to the merge driver
278 # - A gives a copy of their entire repo, including .hg, to B
278 # - A gives a copy of their entire repo, including .hg, to B
279 # - B inspects .hgrc and finds it to be clean
279 # - B inspects .hgrc and finds it to be clean
280 # - B then continues the merge and the malicious merge driver
280 # - B then continues the merge and the malicious merge driver
281 # gets invoked
281 # gets invoked
282 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
282 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
283 if (self._readmergedriver is not None
283 if (self._readmergedriver is not None
284 and self._readmergedriver != configmergedriver):
284 and self._readmergedriver != configmergedriver):
285 raise error.ConfigError(
285 raise error.ConfigError(
286 _("merge driver changed since merge started"),
286 _("merge driver changed since merge started"),
287 hint=_("revert merge driver change or abort merge"))
287 hint=_("revert merge driver change or abort merge"))
288
288
289 return configmergedriver
289 return configmergedriver
290
290
291 @util.propertycache
291 @util.propertycache
292 def localctx(self):
292 def localctx(self):
293 if self._local is None:
293 if self._local is None:
294 raise RuntimeError("localctx accessed but self._local isn't set")
294 raise RuntimeError("localctx accessed but self._local isn't set")
295 return self._repo[self._local]
295 return self._repo[self._local]
296
296
297 @util.propertycache
297 @util.propertycache
298 def otherctx(self):
298 def otherctx(self):
299 if self._other is None:
299 if self._other is None:
300 raise RuntimeError("localctx accessed but self._local isn't set")
300 raise RuntimeError("localctx accessed but self._local isn't set")
301 return self._repo[self._other]
301 return self._repo[self._other]
302
302
303 def active(self):
303 def active(self):
304 """Whether mergestate is active.
304 """Whether mergestate is active.
305
305
306 Returns True if there appears to be mergestate. This is a rough proxy
306 Returns True if there appears to be mergestate. This is a rough proxy
307 for "is a merge in progress."
307 for "is a merge in progress."
308 """
308 """
309 # Check local variables before looking at filesystem for performance
309 # Check local variables before looking at filesystem for performance
310 # reasons.
310 # reasons.
311 return bool(self._local) or bool(self._state) or \
311 return bool(self._local) or bool(self._state) or \
312 self._repo.vfs.exists(self.statepathv1) or \
312 self._repo.vfs.exists(self.statepathv1) or \
313 self._repo.vfs.exists(self.statepathv2)
313 self._repo.vfs.exists(self.statepathv2)
314
314
315 def commit(self):
315 def commit(self):
316 """Write current state on disk (if necessary)"""
316 """Write current state on disk (if necessary)"""
317 if self._dirty:
317 if self._dirty:
318 records = self._makerecords()
318 records = self._makerecords()
319 self._writerecords(records)
319 self._writerecords(records)
320 self._dirty = False
320 self._dirty = False
321
321
322 def _makerecords(self):
322 def _makerecords(self):
323 records = []
323 records = []
324 records.append(('L', hex(self._local)))
324 records.append(('L', hex(self._local)))
325 records.append(('O', hex(self._other)))
325 records.append(('O', hex(self._other)))
326 if self.mergedriver:
326 if self.mergedriver:
327 records.append(('m', '\0'.join([
327 records.append(('m', '\0'.join([
328 self.mergedriver, self._mdstate])))
328 self.mergedriver, self._mdstate])))
329 for d, v in self._state.iteritems():
329 for d, v in self._state.iteritems():
330 if v[0] == 'd':
330 if v[0] == 'd':
331 records.append(('D', '\0'.join([d] + v)))
331 records.append(('D', '\0'.join([d] + v)))
332 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
332 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
333 # older versions of Mercurial
333 # older versions of Mercurial
334 elif v[1] == nullhex or v[6] == nullhex:
334 elif v[1] == nullhex or v[6] == nullhex:
335 records.append(('C', '\0'.join([d] + v)))
335 records.append(('C', '\0'.join([d] + v)))
336 else:
336 else:
337 records.append(('F', '\0'.join([d] + v)))
337 records.append(('F', '\0'.join([d] + v)))
338 return records
338 return records
339
339
340 def _writerecords(self, records):
340 def _writerecords(self, records):
341 """Write current state on disk (both v1 and v2)"""
341 """Write current state on disk (both v1 and v2)"""
342 self._writerecordsv1(records)
342 self._writerecordsv1(records)
343 self._writerecordsv2(records)
343 self._writerecordsv2(records)
344
344
345 def _writerecordsv1(self, records):
345 def _writerecordsv1(self, records):
346 """Write current state on disk in a version 1 file"""
346 """Write current state on disk in a version 1 file"""
347 f = self._repo.vfs(self.statepathv1, 'w')
347 f = self._repo.vfs(self.statepathv1, 'w')
348 irecords = iter(records)
348 irecords = iter(records)
349 lrecords = irecords.next()
349 lrecords = irecords.next()
350 assert lrecords[0] == 'L'
350 assert lrecords[0] == 'L'
351 f.write(hex(self._local) + '\n')
351 f.write(hex(self._local) + '\n')
352 for rtype, data in irecords:
352 for rtype, data in irecords:
353 if rtype == 'F':
353 if rtype == 'F':
354 f.write('%s\n' % _droponode(data))
354 f.write('%s\n' % _droponode(data))
355 f.close()
355 f.close()
356
356
357 def _writerecordsv2(self, records):
357 def _writerecordsv2(self, records):
358 """Write current state on disk in a version 2 file
358 """Write current state on disk in a version 2 file
359
359
360 See the docstring for _readrecordsv2 for why we use 't'."""
360 See the docstring for _readrecordsv2 for why we use 't'."""
361 # these are the records that all version 2 clients can read
361 # these are the records that all version 2 clients can read
362 whitelist = 'LOF'
362 whitelist = 'LOF'
363 f = self._repo.vfs(self.statepathv2, 'w')
363 f = self._repo.vfs(self.statepathv2, 'w')
364 for key, data in records:
364 for key, data in records:
365 assert len(key) == 1
365 assert len(key) == 1
366 if key not in whitelist:
366 if key not in whitelist:
367 key, data = 't', '%s%s' % (key, data)
367 key, data = 't', '%s%s' % (key, data)
368 format = '>sI%is' % len(data)
368 format = '>sI%is' % len(data)
369 f.write(_pack(format, key, len(data), data))
369 f.write(_pack(format, key, len(data), data))
370 f.close()
370 f.close()
371
371
372 def add(self, fcl, fco, fca, fd):
372 def add(self, fcl, fco, fca, fd):
373 """add a new (potentially?) conflicting file the merge state
373 """add a new (potentially?) conflicting file the merge state
374 fcl: file context for local,
374 fcl: file context for local,
375 fco: file context for remote,
375 fco: file context for remote,
376 fca: file context for ancestors,
376 fca: file context for ancestors,
377 fd: file path of the resulting merge.
377 fd: file path of the resulting merge.
378
378
379 note: also write the local version to the `.hg/merge` directory.
379 note: also write the local version to the `.hg/merge` directory.
380 """
380 """
381 if fcl.isabsent():
381 if fcl.isabsent():
382 hash = nullhex
382 hash = nullhex
383 else:
383 else:
384 hash = util.sha1(fcl.path()).hexdigest()
384 hash = util.sha1(fcl.path()).hexdigest()
385 self._repo.vfs.write('merge/' + hash, fcl.data())
385 self._repo.vfs.write('merge/' + hash, fcl.data())
386 self._state[fd] = ['u', hash, fcl.path(),
386 self._state[fd] = ['u', hash, fcl.path(),
387 fca.path(), hex(fca.filenode()),
387 fca.path(), hex(fca.filenode()),
388 fco.path(), hex(fco.filenode()),
388 fco.path(), hex(fco.filenode()),
389 fcl.flags()]
389 fcl.flags()]
390 self._dirty = True
390 self._dirty = True
391
391
392 def __contains__(self, dfile):
392 def __contains__(self, dfile):
393 return dfile in self._state
393 return dfile in self._state
394
394
395 def __getitem__(self, dfile):
395 def __getitem__(self, dfile):
396 return self._state[dfile][0]
396 return self._state[dfile][0]
397
397
398 def __iter__(self):
398 def __iter__(self):
399 return iter(sorted(self._state))
399 return iter(sorted(self._state))
400
400
401 def files(self):
401 def files(self):
402 return self._state.keys()
402 return self._state.keys()
403
403
404 def mark(self, dfile, state):
404 def mark(self, dfile, state):
405 self._state[dfile][0] = state
405 self._state[dfile][0] = state
406 self._dirty = True
406 self._dirty = True
407
407
408 def mdstate(self):
408 def mdstate(self):
409 return self._mdstate
409 return self._mdstate
410
410
411 def unresolved(self):
411 def unresolved(self):
412 """Obtain the paths of unresolved files."""
412 """Obtain the paths of unresolved files."""
413
413
414 for f, entry in self._state.items():
414 for f, entry in self._state.items():
415 if entry[0] == 'u':
415 if entry[0] == 'u':
416 yield f
416 yield f
417
417
418 def driverresolved(self):
418 def driverresolved(self):
419 """Obtain the paths of driver-resolved files."""
419 """Obtain the paths of driver-resolved files."""
420
420
421 for f, entry in self._state.items():
421 for f, entry in self._state.items():
422 if entry[0] == 'd':
422 if entry[0] == 'd':
423 yield f
423 yield f
424
424
425 def _resolve(self, preresolve, dfile, wctx, labels=None):
425 def _resolve(self, preresolve, dfile, wctx, labels=None):
426 """rerun merge process for file path `dfile`"""
426 """rerun merge process for file path `dfile`"""
427 if self[dfile] in 'rd':
427 if self[dfile] in 'rd':
428 return True, 0
428 return True, 0
429 stateentry = self._state[dfile]
429 stateentry = self._state[dfile]
430 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
430 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
431 octx = self._repo[self._other]
431 octx = self._repo[self._other]
432 fcd = self._filectxorabsent(hash, wctx, dfile)
432 fcd = self._filectxorabsent(hash, wctx, dfile)
433 fco = self._filectxorabsent(onode, octx, ofile)
433 fco = self._filectxorabsent(onode, octx, ofile)
434 # TODO: move this to filectxorabsent
434 # TODO: move this to filectxorabsent
435 fca = self._repo.filectx(afile, fileid=anode)
435 fca = self._repo.filectx(afile, fileid=anode)
436 # "premerge" x flags
436 # "premerge" x flags
437 flo = fco.flags()
437 flo = fco.flags()
438 fla = fca.flags()
438 fla = fca.flags()
439 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
439 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
440 if fca.node() == nullid:
440 if fca.node() == nullid:
441 if preresolve:
441 if preresolve:
442 self._repo.ui.warn(
442 self._repo.ui.warn(
443 _('warning: cannot merge flags for %s\n') % afile)
443 _('warning: cannot merge flags for %s\n') % afile)
444 elif flags == fla:
444 elif flags == fla:
445 flags = flo
445 flags = flo
446 if preresolve:
446 if preresolve:
447 # restore local
447 # restore local
448 if hash != nullhex:
448 if hash != nullhex:
449 f = self._repo.vfs('merge/' + hash)
449 f = self._repo.vfs('merge/' + hash)
450 self._repo.wwrite(dfile, f.read(), flags)
450 self._repo.wwrite(dfile, f.read(), flags)
451 f.close()
451 f.close()
452 else:
452 else:
453 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
453 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
454 complete, r, deleted = filemerge.premerge(self._repo, self._local,
454 complete, r, deleted = filemerge.premerge(self._repo, self._local,
455 lfile, fcd, fco, fca,
455 lfile, fcd, fco, fca,
456 labels=labels)
456 labels=labels)
457 else:
457 else:
458 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
458 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
459 lfile, fcd, fco, fca,
459 lfile, fcd, fco, fca,
460 labels=labels)
460 labels=labels)
461 if r is None:
461 if r is None:
462 # no real conflict
462 # no real conflict
463 del self._state[dfile]
463 del self._state[dfile]
464 self._dirty = True
464 self._dirty = True
465 elif not r:
465 elif not r:
466 self.mark(dfile, 'r')
466 self.mark(dfile, 'r')
467
467
468 if complete:
468 if complete:
469 action = None
469 action = None
470 if deleted:
470 if deleted:
471 if fcd.isabsent():
471 if fcd.isabsent():
472 # dc: local picked. Need to drop if present, which may
472 # dc: local picked. Need to drop if present, which may
473 # happen on re-resolves.
473 # happen on re-resolves.
474 action = 'f'
474 action = 'f'
475 else:
475 else:
476 # cd: remote picked (or otherwise deleted)
476 # cd: remote picked (or otherwise deleted)
477 action = 'r'
477 action = 'r'
478 else:
478 else:
479 if fcd.isabsent(): # dc: remote picked
479 if fcd.isabsent(): # dc: remote picked
480 action = 'g'
480 action = 'g'
481 elif fco.isabsent(): # cd: local picked
481 elif fco.isabsent(): # cd: local picked
482 if dfile in self.localctx:
482 if dfile in self.localctx:
483 action = 'am'
483 action = 'am'
484 else:
484 else:
485 action = 'a'
485 action = 'a'
486 # else: regular merges (no action necessary)
486 # else: regular merges (no action necessary)
487 self._results[dfile] = r, action
487 self._results[dfile] = r, action
488
488
489 return complete, r
489 return complete, r
490
490
491 def _filectxorabsent(self, hexnode, ctx, f):
491 def _filectxorabsent(self, hexnode, ctx, f):
492 if hexnode == nullhex:
492 if hexnode == nullhex:
493 return filemerge.absentfilectx(ctx, f)
493 return filemerge.absentfilectx(ctx, f)
494 else:
494 else:
495 return ctx[f]
495 return ctx[f]
496
496
497 def preresolve(self, dfile, wctx, labels=None):
497 def preresolve(self, dfile, wctx, labels=None):
498 """run premerge process for dfile
498 """run premerge process for dfile
499
499
500 Returns whether the merge is complete, and the exit code."""
500 Returns whether the merge is complete, and the exit code."""
501 return self._resolve(True, dfile, wctx, labels=labels)
501 return self._resolve(True, dfile, wctx, labels=labels)
502
502
503 def resolve(self, dfile, wctx, labels=None):
503 def resolve(self, dfile, wctx, labels=None):
504 """run merge process (assuming premerge was run) for dfile
504 """run merge process (assuming premerge was run) for dfile
505
505
506 Returns the exit code of the merge."""
506 Returns the exit code of the merge."""
507 return self._resolve(False, dfile, wctx, labels=labels)[1]
507 return self._resolve(False, dfile, wctx, labels=labels)[1]
508
508
509 def counts(self):
509 def counts(self):
510 """return counts for updated, merged and removed files in this
510 """return counts for updated, merged and removed files in this
511 session"""
511 session"""
512 updated, merged, removed = 0, 0, 0
512 updated, merged, removed = 0, 0, 0
513 for r, action in self._results.itervalues():
513 for r, action in self._results.itervalues():
514 if r is None:
514 if r is None:
515 updated += 1
515 updated += 1
516 elif r == 0:
516 elif r == 0:
517 if action == 'r':
517 if action == 'r':
518 removed += 1
518 removed += 1
519 else:
519 else:
520 merged += 1
520 merged += 1
521 return updated, merged, removed
521 return updated, merged, removed
522
522
523 def unresolvedcount(self):
523 def unresolvedcount(self):
524 """get unresolved count for this merge (persistent)"""
524 """get unresolved count for this merge (persistent)"""
525 return len([True for f, entry in self._state.iteritems()
525 return len([True for f, entry in self._state.iteritems()
526 if entry[0] == 'u'])
526 if entry[0] == 'u'])
527
527
528 def actions(self):
528 def actions(self):
529 """return lists of actions to perform on the dirstate"""
529 """return lists of actions to perform on the dirstate"""
530 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
530 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
531 for f, (r, action) in self._results.iteritems():
531 for f, (r, action) in self._results.iteritems():
532 if action is not None:
532 if action is not None:
533 actions[action].append((f, None, "merge result"))
533 actions[action].append((f, None, "merge result"))
534 return actions
534 return actions
535
535
536 def recordactions(self):
536 def recordactions(self):
537 """record remove/add/get actions in the dirstate"""
537 """record remove/add/get actions in the dirstate"""
538 branchmerge = self._repo.dirstate.p2() != nullid
538 branchmerge = self._repo.dirstate.p2() != nullid
539 recordupdates(self._repo, self.actions(), branchmerge)
539 recordupdates(self._repo, self.actions(), branchmerge)
540
540
541 def queueremove(self, f):
541 def queueremove(self, f):
542 """queues a file to be removed from the dirstate
542 """queues a file to be removed from the dirstate
543
543
544 Meant for use by custom merge drivers."""
544 Meant for use by custom merge drivers."""
545 self._results[f] = 0, 'r'
545 self._results[f] = 0, 'r'
546
546
547 def queueadd(self, f):
547 def queueadd(self, f):
548 """queues a file to be added to the dirstate
548 """queues a file to be added to the dirstate
549
549
550 Meant for use by custom merge drivers."""
550 Meant for use by custom merge drivers."""
551 self._results[f] = 0, 'a'
551 self._results[f] = 0, 'a'
552
552
553 def queueget(self, f):
553 def queueget(self, f):
554 """queues a file to be marked modified in the dirstate
554 """queues a file to be marked modified in the dirstate
555
555
556 Meant for use by custom merge drivers."""
556 Meant for use by custom merge drivers."""
557 self._results[f] = 0, 'g'
557 self._results[f] = 0, 'g'
558
558
559 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
559 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
560 if f2 is None:
560 if f2 is None:
561 f2 = f
561 f2 = f
562 return (os.path.isfile(repo.wjoin(f))
562 return (os.path.isfile(repo.wjoin(f))
563 and repo.wvfs.audit.check(f)
563 and repo.wvfs.audit.check(f)
564 and repo.dirstate.normalize(f) not in repo.dirstate
564 and repo.dirstate.normalize(f) not in repo.dirstate
565 and mctx[f2].cmp(wctx[f]))
565 and mctx[f2].cmp(wctx[f]))
566
566
567 def _checkunknownfiles(repo, wctx, mctx, force, actions):
567 def _checkunknownfiles(repo, wctx, mctx, force, actions):
568 """
568 """
569 Considers any actions that care about the presence of conflicting unknown
569 Considers any actions that care about the presence of conflicting unknown
570 files. For some actions, the result is to abort; for others, it is to
570 files. For some actions, the result is to abort; for others, it is to
571 choose a different action.
571 choose a different action.
572 """
572 """
573 aborts = []
573 aborts = []
574 if not force:
574 if not force:
575 for f, (m, args, msg) in actions.iteritems():
575 for f, (m, args, msg) in actions.iteritems():
576 if m in ('c', 'dc'):
576 if m in ('c', 'dc'):
577 if _checkunknownfile(repo, wctx, mctx, f):
577 if _checkunknownfile(repo, wctx, mctx, f):
578 aborts.append(f)
578 aborts.append(f)
579 elif m == 'dg':
579 elif m == 'dg':
580 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
580 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
581 aborts.append(f)
581 aborts.append(f)
582
582
583 for f in sorted(aborts):
583 for f in sorted(aborts):
584 repo.ui.warn(_("%s: untracked file differs\n") % f)
584 repo.ui.warn(_("%s: untracked file differs\n") % f)
585 if aborts:
585 if aborts:
586 raise error.Abort(_("untracked files in working directory differ "
586 raise error.Abort(_("untracked files in working directory differ "
587 "from files in requested revision"))
587 "from files in requested revision"))
588
588
589 for f, (m, args, msg) in actions.iteritems():
589 for f, (m, args, msg) in actions.iteritems():
590 if m == 'c':
590 if m == 'c':
591 actions[f] = ('g', args, msg)
591 actions[f] = ('g', args, msg)
592 elif m == 'cm':
592 elif m == 'cm':
593 fl2, anc = args
593 fl2, anc = args
594 different = _checkunknownfile(repo, wctx, mctx, f)
594 different = _checkunknownfile(repo, wctx, mctx, f)
595 if different:
595 if different:
596 actions[f] = ('m', (f, f, None, False, anc),
596 actions[f] = ('m', (f, f, None, False, anc),
597 "remote differs from untracked local")
597 "remote differs from untracked local")
598 else:
598 else:
599 actions[f] = ('g', (fl2,), "remote created")
599 actions[f] = ('g', (fl2,), "remote created")
600
600
601 def _forgetremoved(wctx, mctx, branchmerge):
601 def _forgetremoved(wctx, mctx, branchmerge):
602 """
602 """
603 Forget removed files
603 Forget removed files
604
604
605 If we're jumping between revisions (as opposed to merging), and if
605 If we're jumping between revisions (as opposed to merging), and if
606 neither the working directory nor the target rev has the file,
606 neither the working directory nor the target rev has the file,
607 then we need to remove it from the dirstate, to prevent the
607 then we need to remove it from the dirstate, to prevent the
608 dirstate from listing the file when it is no longer in the
608 dirstate from listing the file when it is no longer in the
609 manifest.
609 manifest.
610
610
611 If we're merging, and the other revision has removed a file
611 If we're merging, and the other revision has removed a file
612 that is not present in the working directory, we need to mark it
612 that is not present in the working directory, we need to mark it
613 as removed.
613 as removed.
614 """
614 """
615
615
616 actions = {}
616 actions = {}
617 m = 'f'
617 m = 'f'
618 if branchmerge:
618 if branchmerge:
619 m = 'r'
619 m = 'r'
620 for f in wctx.deleted():
620 for f in wctx.deleted():
621 if f not in mctx:
621 if f not in mctx:
622 actions[f] = m, None, "forget deleted"
622 actions[f] = m, None, "forget deleted"
623
623
624 if not branchmerge:
624 if not branchmerge:
625 for f in wctx.removed():
625 for f in wctx.removed():
626 if f not in mctx:
626 if f not in mctx:
627 actions[f] = 'f', None, "forget removed"
627 actions[f] = 'f', None, "forget removed"
628
628
629 return actions
629 return actions
630
630
631 def _checkcollision(repo, wmf, actions):
631 def _checkcollision(repo, wmf, actions):
632 # build provisional merged manifest up
632 # build provisional merged manifest up
633 pmmf = set(wmf)
633 pmmf = set(wmf)
634
634
635 if actions:
635 if actions:
636 # k, dr, e and rd are no-op
636 # k, dr, e and rd are no-op
637 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
637 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
638 for f, args, msg in actions[m]:
638 for f, args, msg in actions[m]:
639 pmmf.add(f)
639 pmmf.add(f)
640 for f, args, msg in actions['r']:
640 for f, args, msg in actions['r']:
641 pmmf.discard(f)
641 pmmf.discard(f)
642 for f, args, msg in actions['dm']:
642 for f, args, msg in actions['dm']:
643 f2, flags = args
643 f2, flags = args
644 pmmf.discard(f2)
644 pmmf.discard(f2)
645 pmmf.add(f)
645 pmmf.add(f)
646 for f, args, msg in actions['dg']:
646 for f, args, msg in actions['dg']:
647 pmmf.add(f)
647 pmmf.add(f)
648 for f, args, msg in actions['m']:
648 for f, args, msg in actions['m']:
649 f1, f2, fa, move, anc = args
649 f1, f2, fa, move, anc = args
650 if move:
650 if move:
651 pmmf.discard(f1)
651 pmmf.discard(f1)
652 pmmf.add(f)
652 pmmf.add(f)
653
653
654 # check case-folding collision in provisional merged manifest
654 # check case-folding collision in provisional merged manifest
655 foldmap = {}
655 foldmap = {}
656 for f in sorted(pmmf):
656 for f in sorted(pmmf):
657 fold = util.normcase(f)
657 fold = util.normcase(f)
658 if fold in foldmap:
658 if fold in foldmap:
659 raise error.Abort(_("case-folding collision between %s and %s")
659 raise error.Abort(_("case-folding collision between %s and %s")
660 % (f, foldmap[fold]))
660 % (f, foldmap[fold]))
661 foldmap[fold] = f
661 foldmap[fold] = f
662
662
663 # check case-folding of directories
663 # check case-folding of directories
664 foldprefix = unfoldprefix = lastfull = ''
664 foldprefix = unfoldprefix = lastfull = ''
665 for fold, f in sorted(foldmap.items()):
665 for fold, f in sorted(foldmap.items()):
666 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
666 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
667 # the folded prefix matches but actual casing is different
667 # the folded prefix matches but actual casing is different
668 raise error.Abort(_("case-folding collision between "
668 raise error.Abort(_("case-folding collision between "
669 "%s and directory of %s") % (lastfull, f))
669 "%s and directory of %s") % (lastfull, f))
670 foldprefix = fold + '/'
670 foldprefix = fold + '/'
671 unfoldprefix = f + '/'
671 unfoldprefix = f + '/'
672 lastfull = f
672 lastfull = f
673
673
674 def driverpreprocess(repo, ms, wctx, labels=None):
674 def driverpreprocess(repo, ms, wctx, labels=None):
675 """run the preprocess step of the merge driver, if any
675 """run the preprocess step of the merge driver, if any
676
676
677 This is currently not implemented -- it's an extension point."""
677 This is currently not implemented -- it's an extension point."""
678 return True
678 return True
679
679
680 def driverconclude(repo, ms, wctx, labels=None):
680 def driverconclude(repo, ms, wctx, labels=None):
681 """run the conclude step of the merge driver, if any
681 """run the conclude step of the merge driver, if any
682
682
683 This is currently not implemented -- it's an extension point."""
683 This is currently not implemented -- it's an extension point."""
684 return True
684 return True
685
685
686 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
686 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
687 acceptremote, followcopies):
687 acceptremote, followcopies):
688 """
688 """
689 Merge p1 and p2 with ancestor pa and generate merge action list
689 Merge p1 and p2 with ancestor pa and generate merge action list
690
690
691 branchmerge and force are as passed in to update
691 branchmerge and force are as passed in to update
692 partial = function to filter file lists
692 partial = function to filter file lists
693 acceptremote = accept the incoming changes without prompting
693 acceptremote = accept the incoming changes without prompting
694 """
694 """
695
695
696 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
696 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
697
697
698 # manifests fetched in order are going to be faster, so prime the caches
698 # manifests fetched in order are going to be faster, so prime the caches
699 [x.manifest() for x in
699 [x.manifest() for x in
700 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
700 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
701
701
702 if followcopies:
702 if followcopies:
703 ret = copies.mergecopies(repo, wctx, p2, pa)
703 ret = copies.mergecopies(repo, wctx, p2, pa)
704 copy, movewithdir, diverge, renamedelete = ret
704 copy, movewithdir, diverge, renamedelete = ret
705
705
706 repo.ui.note(_("resolving manifests\n"))
706 repo.ui.note(_("resolving manifests\n"))
707 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
707 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
708 % (bool(branchmerge), bool(force), bool(partial)))
708 % (bool(branchmerge), bool(force), bool(partial)))
709 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
709 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
710
710
711 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
711 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
712 copied = set(copy.values())
712 copied = set(copy.values())
713 copied.update(movewithdir.values())
713 copied.update(movewithdir.values())
714
714
715 if '.hgsubstate' in m1:
715 if '.hgsubstate' in m1:
716 # check whether sub state is modified
716 # check whether sub state is modified
717 for s in sorted(wctx.substate):
717 for s in sorted(wctx.substate):
718 if wctx.sub(s).dirty():
718 if wctx.sub(s).dirty():
719 m1['.hgsubstate'] += '+'
719 m1['.hgsubstate'] += '+'
720 break
720 break
721
721
722 # Compare manifests
722 # Compare manifests
723 diff = m1.diff(m2)
723 diff = m1.diff(m2)
724
724
725 actions = {}
725 actions = {}
726 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
726 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
727 if partial and not partial(f):
727 if partial and not partial(f):
728 continue
728 continue
729 if n1 and n2: # file exists on both local and remote side
729 if n1 and n2: # file exists on both local and remote side
730 if f not in ma:
730 if f not in ma:
731 fa = copy.get(f, None)
731 fa = copy.get(f, None)
732 if fa is not None:
732 if fa is not None:
733 actions[f] = ('m', (f, f, fa, False, pa.node()),
733 actions[f] = ('m', (f, f, fa, False, pa.node()),
734 "both renamed from " + fa)
734 "both renamed from " + fa)
735 else:
735 else:
736 actions[f] = ('m', (f, f, None, False, pa.node()),
736 actions[f] = ('m', (f, f, None, False, pa.node()),
737 "both created")
737 "both created")
738 else:
738 else:
739 a = ma[f]
739 a = ma[f]
740 fla = ma.flags(f)
740 fla = ma.flags(f)
741 nol = 'l' not in fl1 + fl2 + fla
741 nol = 'l' not in fl1 + fl2 + fla
742 if n2 == a and fl2 == fla:
742 if n2 == a and fl2 == fla:
743 actions[f] = ('k' , (), "remote unchanged")
743 actions[f] = ('k' , (), "remote unchanged")
744 elif n1 == a and fl1 == fla: # local unchanged - use remote
744 elif n1 == a and fl1 == fla: # local unchanged - use remote
745 if n1 == n2: # optimization: keep local content
745 if n1 == n2: # optimization: keep local content
746 actions[f] = ('e', (fl2,), "update permissions")
746 actions[f] = ('e', (fl2,), "update permissions")
747 else:
747 else:
748 actions[f] = ('g', (fl2,), "remote is newer")
748 actions[f] = ('g', (fl2,), "remote is newer")
749 elif nol and n2 == a: # remote only changed 'x'
749 elif nol and n2 == a: # remote only changed 'x'
750 actions[f] = ('e', (fl2,), "update permissions")
750 actions[f] = ('e', (fl2,), "update permissions")
751 elif nol and n1 == a: # local only changed 'x'
751 elif nol and n1 == a: # local only changed 'x'
752 actions[f] = ('g', (fl1,), "remote is newer")
752 actions[f] = ('g', (fl1,), "remote is newer")
753 else: # both changed something
753 else: # both changed something
754 actions[f] = ('m', (f, f, f, False, pa.node()),
754 actions[f] = ('m', (f, f, f, False, pa.node()),
755 "versions differ")
755 "versions differ")
756 elif n1: # file exists only on local side
756 elif n1: # file exists only on local side
757 if f in copied:
757 if f in copied:
758 pass # we'll deal with it on m2 side
758 pass # we'll deal with it on m2 side
759 elif f in movewithdir: # directory rename, move local
759 elif f in movewithdir: # directory rename, move local
760 f2 = movewithdir[f]
760 f2 = movewithdir[f]
761 if f2 in m2:
761 if f2 in m2:
762 actions[f2] = ('m', (f, f2, None, True, pa.node()),
762 actions[f2] = ('m', (f, f2, None, True, pa.node()),
763 "remote directory rename, both created")
763 "remote directory rename, both created")
764 else:
764 else:
765 actions[f2] = ('dm', (f, fl1),
765 actions[f2] = ('dm', (f, fl1),
766 "remote directory rename - move from " + f)
766 "remote directory rename - move from " + f)
767 elif f in copy:
767 elif f in copy:
768 f2 = copy[f]
768 f2 = copy[f]
769 actions[f] = ('m', (f, f2, f2, False, pa.node()),
769 actions[f] = ('m', (f, f2, f2, False, pa.node()),
770 "local copied/moved from " + f2)
770 "local copied/moved from " + f2)
771 elif f in ma: # clean, a different, no remote
771 elif f in ma: # clean, a different, no remote
772 if n1 != ma[f]:
772 if n1 != ma[f]:
773 if acceptremote:
773 if acceptremote:
774 actions[f] = ('r', None, "remote delete")
774 actions[f] = ('r', None, "remote delete")
775 else:
775 else:
776 actions[f] = ('cd', (f, None, f, False, pa.node()),
776 actions[f] = ('cd', (f, None, f, False, pa.node()),
777 "prompt changed/deleted")
777 "prompt changed/deleted")
778 elif n1[20:] == 'a':
778 elif n1[20:] == 'a':
779 # This extra 'a' is added by working copy manifest to mark
779 # This extra 'a' is added by working copy manifest to mark
780 # the file as locally added. We should forget it instead of
780 # the file as locally added. We should forget it instead of
781 # deleting it.
781 # deleting it.
782 actions[f] = ('f', None, "remote deleted")
782 actions[f] = ('f', None, "remote deleted")
783 else:
783 else:
784 actions[f] = ('r', None, "other deleted")
784 actions[f] = ('r', None, "other deleted")
785 elif n2: # file exists only on remote side
785 elif n2: # file exists only on remote side
786 if f in copied:
786 if f in copied:
787 pass # we'll deal with it on m1 side
787 pass # we'll deal with it on m1 side
788 elif f in movewithdir:
788 elif f in movewithdir:
789 f2 = movewithdir[f]
789 f2 = movewithdir[f]
790 if f2 in m1:
790 if f2 in m1:
791 actions[f2] = ('m', (f2, f, None, False, pa.node()),
791 actions[f2] = ('m', (f2, f, None, False, pa.node()),
792 "local directory rename, both created")
792 "local directory rename, both created")
793 else:
793 else:
794 actions[f2] = ('dg', (f, fl2),
794 actions[f2] = ('dg', (f, fl2),
795 "local directory rename - get from " + f)
795 "local directory rename - get from " + f)
796 elif f in copy:
796 elif f in copy:
797 f2 = copy[f]
797 f2 = copy[f]
798 if f2 in m2:
798 if f2 in m2:
799 actions[f] = ('m', (f2, f, f2, False, pa.node()),
799 actions[f] = ('m', (f2, f, f2, False, pa.node()),
800 "remote copied from " + f2)
800 "remote copied from " + f2)
801 else:
801 else:
802 actions[f] = ('m', (f2, f, f2, True, pa.node()),
802 actions[f] = ('m', (f2, f, f2, True, pa.node()),
803 "remote moved from " + f2)
803 "remote moved from " + f2)
804 elif f not in ma:
804 elif f not in ma:
805 # local unknown, remote created: the logic is described by the
805 # local unknown, remote created: the logic is described by the
806 # following table:
806 # following table:
807 #
807 #
808 # force branchmerge different | action
808 # force branchmerge different | action
809 # n * * | create
809 # n * * | create
810 # y n * | create
810 # y n * | create
811 # y y n | create
811 # y y n | create
812 # y y y | merge
812 # y y y | merge
813 #
813 #
814 # Checking whether the files are different is expensive, so we
814 # Checking whether the files are different is expensive, so we
815 # don't do that when we can avoid it.
815 # don't do that when we can avoid it.
816 if not force:
816 if not force:
817 actions[f] = ('c', (fl2,), "remote created")
817 actions[f] = ('c', (fl2,), "remote created")
818 elif not branchmerge:
818 elif not branchmerge:
819 actions[f] = ('c', (fl2,), "remote created")
819 actions[f] = ('c', (fl2,), "remote created")
820 else:
820 else:
821 actions[f] = ('cm', (fl2, pa.node()),
821 actions[f] = ('cm', (fl2, pa.node()),
822 "remote created, get or merge")
822 "remote created, get or merge")
823 elif n2 != ma[f]:
823 elif n2 != ma[f]:
824 if acceptremote:
824 if acceptremote:
825 actions[f] = ('c', (fl2,), "remote recreating")
825 actions[f] = ('c', (fl2,), "remote recreating")
826 else:
826 else:
827 actions[f] = ('dc', (None, f, f, False, pa.node()),
827 actions[f] = ('dc', (None, f, f, False, pa.node()),
828 "prompt deleted/changed")
828 "prompt deleted/changed")
829
829
830 return actions, diverge, renamedelete
830 return actions, diverge, renamedelete
831
831
832 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
832 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
833 """Resolves false conflicts where the nodeid changed but the content
833 """Resolves false conflicts where the nodeid changed but the content
834 remained the same."""
834 remained the same."""
835
835
836 for f, (m, args, msg) in actions.items():
836 for f, (m, args, msg) in actions.items():
837 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
837 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
838 # local did change but ended up with same content
838 # local did change but ended up with same content
839 actions[f] = 'r', None, "prompt same"
839 actions[f] = 'r', None, "prompt same"
840 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
840 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
841 # remote did change but ended up with same content
841 # remote did change but ended up with same content
842 del actions[f] # don't get = keep local deleted
842 del actions[f] # don't get = keep local deleted
843
843
844 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
844 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
845 acceptremote, followcopies):
845 acceptremote, followcopies, matcher=None):
846 "Calculate the actions needed to merge mctx into wctx using ancestors"
846 "Calculate the actions needed to merge mctx into wctx using ancestors"
847 if matcher is None or matcher.always():
848 partial = False
849 else:
850 partial = matcher.matchfn
847
851
848 if len(ancestors) == 1: # default
852 if len(ancestors) == 1: # default
849 actions, diverge, renamedelete = manifestmerge(
853 actions, diverge, renamedelete = manifestmerge(
850 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
854 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
851 acceptremote, followcopies)
855 acceptremote, followcopies)
852 _checkunknownfiles(repo, wctx, mctx, force, actions)
856 _checkunknownfiles(repo, wctx, mctx, force, actions)
853
857
854 else: # only when merge.preferancestor=* - the default
858 else: # only when merge.preferancestor=* - the default
855 repo.ui.note(
859 repo.ui.note(
856 _("note: merging %s and %s using bids from ancestors %s\n") %
860 _("note: merging %s and %s using bids from ancestors %s\n") %
857 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
861 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
858
862
859 # Call for bids
863 # Call for bids
860 fbids = {} # mapping filename to bids (action method to list af actions)
864 fbids = {} # mapping filename to bids (action method to list af actions)
861 diverge, renamedelete = None, None
865 diverge, renamedelete = None, None
862 for ancestor in ancestors:
866 for ancestor in ancestors:
863 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
867 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
864 actions, diverge1, renamedelete1 = manifestmerge(
868 actions, diverge1, renamedelete1 = manifestmerge(
865 repo, wctx, mctx, ancestor, branchmerge, force, partial,
869 repo, wctx, mctx, ancestor, branchmerge, force, partial,
866 acceptremote, followcopies)
870 acceptremote, followcopies)
867 _checkunknownfiles(repo, wctx, mctx, force, actions)
871 _checkunknownfiles(repo, wctx, mctx, force, actions)
868
872
869 # Track the shortest set of warning on the theory that bid
873 # Track the shortest set of warning on the theory that bid
870 # merge will correctly incorporate more information
874 # merge will correctly incorporate more information
871 if diverge is None or len(diverge1) < len(diverge):
875 if diverge is None or len(diverge1) < len(diverge):
872 diverge = diverge1
876 diverge = diverge1
873 if renamedelete is None or len(renamedelete) < len(renamedelete1):
877 if renamedelete is None or len(renamedelete) < len(renamedelete1):
874 renamedelete = renamedelete1
878 renamedelete = renamedelete1
875
879
876 for f, a in sorted(actions.iteritems()):
880 for f, a in sorted(actions.iteritems()):
877 m, args, msg = a
881 m, args, msg = a
878 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
882 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
879 if f in fbids:
883 if f in fbids:
880 d = fbids[f]
884 d = fbids[f]
881 if m in d:
885 if m in d:
882 d[m].append(a)
886 d[m].append(a)
883 else:
887 else:
884 d[m] = [a]
888 d[m] = [a]
885 else:
889 else:
886 fbids[f] = {m: [a]}
890 fbids[f] = {m: [a]}
887
891
888 # Pick the best bid for each file
892 # Pick the best bid for each file
889 repo.ui.note(_('\nauction for merging merge bids\n'))
893 repo.ui.note(_('\nauction for merging merge bids\n'))
890 actions = {}
894 actions = {}
891 for f, bids in sorted(fbids.items()):
895 for f, bids in sorted(fbids.items()):
892 # bids is a mapping from action method to list af actions
896 # bids is a mapping from action method to list af actions
893 # Consensus?
897 # Consensus?
894 if len(bids) == 1: # all bids are the same kind of method
898 if len(bids) == 1: # all bids are the same kind of method
895 m, l = bids.items()[0]
899 m, l = bids.items()[0]
896 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
900 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
897 repo.ui.note(" %s: consensus for %s\n" % (f, m))
901 repo.ui.note(" %s: consensus for %s\n" % (f, m))
898 actions[f] = l[0]
902 actions[f] = l[0]
899 continue
903 continue
900 # If keep is an option, just do it.
904 # If keep is an option, just do it.
901 if 'k' in bids:
905 if 'k' in bids:
902 repo.ui.note(" %s: picking 'keep' action\n" % f)
906 repo.ui.note(" %s: picking 'keep' action\n" % f)
903 actions[f] = bids['k'][0]
907 actions[f] = bids['k'][0]
904 continue
908 continue
905 # If there are gets and they all agree [how could they not?], do it.
909 # If there are gets and they all agree [how could they not?], do it.
906 if 'g' in bids:
910 if 'g' in bids:
907 ga0 = bids['g'][0]
911 ga0 = bids['g'][0]
908 if all(a == ga0 for a in bids['g'][1:]):
912 if all(a == ga0 for a in bids['g'][1:]):
909 repo.ui.note(" %s: picking 'get' action\n" % f)
913 repo.ui.note(" %s: picking 'get' action\n" % f)
910 actions[f] = ga0
914 actions[f] = ga0
911 continue
915 continue
912 # TODO: Consider other simple actions such as mode changes
916 # TODO: Consider other simple actions such as mode changes
913 # Handle inefficient democrazy.
917 # Handle inefficient democrazy.
914 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
918 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
915 for m, l in sorted(bids.items()):
919 for m, l in sorted(bids.items()):
916 for _f, args, msg in l:
920 for _f, args, msg in l:
917 repo.ui.note(' %s -> %s\n' % (msg, m))
921 repo.ui.note(' %s -> %s\n' % (msg, m))
918 # Pick random action. TODO: Instead, prompt user when resolving
922 # Pick random action. TODO: Instead, prompt user when resolving
919 m, l = bids.items()[0]
923 m, l = bids.items()[0]
920 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
924 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
921 (f, m))
925 (f, m))
922 actions[f] = l[0]
926 actions[f] = l[0]
923 continue
927 continue
924 repo.ui.note(_('end of auction\n\n'))
928 repo.ui.note(_('end of auction\n\n'))
925
929
926 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
930 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
927
931
928 if wctx.rev() is None:
932 if wctx.rev() is None:
929 fractions = _forgetremoved(wctx, mctx, branchmerge)
933 fractions = _forgetremoved(wctx, mctx, branchmerge)
930 actions.update(fractions)
934 actions.update(fractions)
931
935
932 return actions, diverge, renamedelete
936 return actions, diverge, renamedelete
933
937
934 def batchremove(repo, actions):
938 def batchremove(repo, actions):
935 """apply removes to the working directory
939 """apply removes to the working directory
936
940
937 yields tuples for progress updates
941 yields tuples for progress updates
938 """
942 """
939 verbose = repo.ui.verbose
943 verbose = repo.ui.verbose
940 unlink = util.unlinkpath
944 unlink = util.unlinkpath
941 wjoin = repo.wjoin
945 wjoin = repo.wjoin
942 audit = repo.wvfs.audit
946 audit = repo.wvfs.audit
943 i = 0
947 i = 0
944 for f, args, msg in actions:
948 for f, args, msg in actions:
945 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
949 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
946 if verbose:
950 if verbose:
947 repo.ui.note(_("removing %s\n") % f)
951 repo.ui.note(_("removing %s\n") % f)
948 audit(f)
952 audit(f)
949 try:
953 try:
950 unlink(wjoin(f), ignoremissing=True)
954 unlink(wjoin(f), ignoremissing=True)
951 except OSError as inst:
955 except OSError as inst:
952 repo.ui.warn(_("update failed to remove %s: %s!\n") %
956 repo.ui.warn(_("update failed to remove %s: %s!\n") %
953 (f, inst.strerror))
957 (f, inst.strerror))
954 if i == 100:
958 if i == 100:
955 yield i, f
959 yield i, f
956 i = 0
960 i = 0
957 i += 1
961 i += 1
958 if i > 0:
962 if i > 0:
959 yield i, f
963 yield i, f
960
964
961 def batchget(repo, mctx, actions):
965 def batchget(repo, mctx, actions):
962 """apply gets to the working directory
966 """apply gets to the working directory
963
967
964 mctx is the context to get from
968 mctx is the context to get from
965
969
966 yields tuples for progress updates
970 yields tuples for progress updates
967 """
971 """
968 verbose = repo.ui.verbose
972 verbose = repo.ui.verbose
969 fctx = mctx.filectx
973 fctx = mctx.filectx
970 wwrite = repo.wwrite
974 wwrite = repo.wwrite
971 i = 0
975 i = 0
972 for f, args, msg in actions:
976 for f, args, msg in actions:
973 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
977 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
974 if verbose:
978 if verbose:
975 repo.ui.note(_("getting %s\n") % f)
979 repo.ui.note(_("getting %s\n") % f)
976 wwrite(f, fctx(f).data(), args[0])
980 wwrite(f, fctx(f).data(), args[0])
977 if i == 100:
981 if i == 100:
978 yield i, f
982 yield i, f
979 i = 0
983 i = 0
980 i += 1
984 i += 1
981 if i > 0:
985 if i > 0:
982 yield i, f
986 yield i, f
983
987
984 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
988 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
985 """apply the merge action list to the working directory
989 """apply the merge action list to the working directory
986
990
987 wctx is the working copy context
991 wctx is the working copy context
988 mctx is the context to be merged into the working copy
992 mctx is the context to be merged into the working copy
989
993
990 Return a tuple of counts (updated, merged, removed, unresolved) that
994 Return a tuple of counts (updated, merged, removed, unresolved) that
991 describes how many files were affected by the update.
995 describes how many files were affected by the update.
992 """
996 """
993
997
994 updated, merged, removed = 0, 0, 0
998 updated, merged, removed = 0, 0, 0
995 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node())
999 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node())
996 moves = []
1000 moves = []
997 for m, l in actions.items():
1001 for m, l in actions.items():
998 l.sort()
1002 l.sort()
999
1003
1000 # 'cd' and 'dc' actions are treated like other merge conflicts
1004 # 'cd' and 'dc' actions are treated like other merge conflicts
1001 mergeactions = sorted(actions['cd'])
1005 mergeactions = sorted(actions['cd'])
1002 mergeactions.extend(sorted(actions['dc']))
1006 mergeactions.extend(sorted(actions['dc']))
1003 mergeactions.extend(actions['m'])
1007 mergeactions.extend(actions['m'])
1004 for f, args, msg in mergeactions:
1008 for f, args, msg in mergeactions:
1005 f1, f2, fa, move, anc = args
1009 f1, f2, fa, move, anc = args
1006 if f == '.hgsubstate': # merged internally
1010 if f == '.hgsubstate': # merged internally
1007 continue
1011 continue
1008 if f1 is None:
1012 if f1 is None:
1009 fcl = filemerge.absentfilectx(wctx, fa)
1013 fcl = filemerge.absentfilectx(wctx, fa)
1010 else:
1014 else:
1011 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1015 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1012 fcl = wctx[f1]
1016 fcl = wctx[f1]
1013 if f2 is None:
1017 if f2 is None:
1014 fco = filemerge.absentfilectx(mctx, fa)
1018 fco = filemerge.absentfilectx(mctx, fa)
1015 else:
1019 else:
1016 fco = mctx[f2]
1020 fco = mctx[f2]
1017 actx = repo[anc]
1021 actx = repo[anc]
1018 if fa in actx:
1022 if fa in actx:
1019 fca = actx[fa]
1023 fca = actx[fa]
1020 else:
1024 else:
1021 # TODO: move to absentfilectx
1025 # TODO: move to absentfilectx
1022 fca = repo.filectx(f1, fileid=nullrev)
1026 fca = repo.filectx(f1, fileid=nullrev)
1023 ms.add(fcl, fco, fca, f)
1027 ms.add(fcl, fco, fca, f)
1024 if f1 != f and move:
1028 if f1 != f and move:
1025 moves.append(f1)
1029 moves.append(f1)
1026
1030
1027 audit = repo.wvfs.audit
1031 audit = repo.wvfs.audit
1028 _updating = _('updating')
1032 _updating = _('updating')
1029 _files = _('files')
1033 _files = _('files')
1030 progress = repo.ui.progress
1034 progress = repo.ui.progress
1031
1035
1032 # remove renamed files after safely stored
1036 # remove renamed files after safely stored
1033 for f in moves:
1037 for f in moves:
1034 if os.path.lexists(repo.wjoin(f)):
1038 if os.path.lexists(repo.wjoin(f)):
1035 repo.ui.debug("removing %s\n" % f)
1039 repo.ui.debug("removing %s\n" % f)
1036 audit(f)
1040 audit(f)
1037 util.unlinkpath(repo.wjoin(f))
1041 util.unlinkpath(repo.wjoin(f))
1038
1042
1039 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1043 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1040
1044
1041 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1045 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1042 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1046 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1043
1047
1044 # remove in parallel (must come first)
1048 # remove in parallel (must come first)
1045 z = 0
1049 z = 0
1046 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1050 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1047 for i, item in prog:
1051 for i, item in prog:
1048 z += i
1052 z += i
1049 progress(_updating, z, item=item, total=numupdates, unit=_files)
1053 progress(_updating, z, item=item, total=numupdates, unit=_files)
1050 removed = len(actions['r'])
1054 removed = len(actions['r'])
1051
1055
1052 # get in parallel
1056 # get in parallel
1053 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1057 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1054 for i, item in prog:
1058 for i, item in prog:
1055 z += i
1059 z += i
1056 progress(_updating, z, item=item, total=numupdates, unit=_files)
1060 progress(_updating, z, item=item, total=numupdates, unit=_files)
1057 updated = len(actions['g'])
1061 updated = len(actions['g'])
1058
1062
1059 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1063 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1060 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1064 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1061
1065
1062 # forget (manifest only, just log it) (must come first)
1066 # forget (manifest only, just log it) (must come first)
1063 for f, args, msg in actions['f']:
1067 for f, args, msg in actions['f']:
1064 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1068 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1065 z += 1
1069 z += 1
1066 progress(_updating, z, item=f, total=numupdates, unit=_files)
1070 progress(_updating, z, item=f, total=numupdates, unit=_files)
1067
1071
1068 # re-add (manifest only, just log it)
1072 # re-add (manifest only, just log it)
1069 for f, args, msg in actions['a']:
1073 for f, args, msg in actions['a']:
1070 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1074 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1071 z += 1
1075 z += 1
1072 progress(_updating, z, item=f, total=numupdates, unit=_files)
1076 progress(_updating, z, item=f, total=numupdates, unit=_files)
1073
1077
1074 # re-add/mark as modified (manifest only, just log it)
1078 # re-add/mark as modified (manifest only, just log it)
1075 for f, args, msg in actions['am']:
1079 for f, args, msg in actions['am']:
1076 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1080 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1077 z += 1
1081 z += 1
1078 progress(_updating, z, item=f, total=numupdates, unit=_files)
1082 progress(_updating, z, item=f, total=numupdates, unit=_files)
1079
1083
1080 # keep (noop, just log it)
1084 # keep (noop, just log it)
1081 for f, args, msg in actions['k']:
1085 for f, args, msg in actions['k']:
1082 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1086 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1083 # no progress
1087 # no progress
1084
1088
1085 # directory rename, move local
1089 # directory rename, move local
1086 for f, args, msg in actions['dm']:
1090 for f, args, msg in actions['dm']:
1087 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1091 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1088 z += 1
1092 z += 1
1089 progress(_updating, z, item=f, total=numupdates, unit=_files)
1093 progress(_updating, z, item=f, total=numupdates, unit=_files)
1090 f0, flags = args
1094 f0, flags = args
1091 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1095 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1092 audit(f)
1096 audit(f)
1093 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1097 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1094 util.unlinkpath(repo.wjoin(f0))
1098 util.unlinkpath(repo.wjoin(f0))
1095 updated += 1
1099 updated += 1
1096
1100
1097 # local directory rename, get
1101 # local directory rename, get
1098 for f, args, msg in actions['dg']:
1102 for f, args, msg in actions['dg']:
1099 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1103 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1100 z += 1
1104 z += 1
1101 progress(_updating, z, item=f, total=numupdates, unit=_files)
1105 progress(_updating, z, item=f, total=numupdates, unit=_files)
1102 f0, flags = args
1106 f0, flags = args
1103 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1107 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1104 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1108 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1105 updated += 1
1109 updated += 1
1106
1110
1107 # exec
1111 # exec
1108 for f, args, msg in actions['e']:
1112 for f, args, msg in actions['e']:
1109 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1113 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1110 z += 1
1114 z += 1
1111 progress(_updating, z, item=f, total=numupdates, unit=_files)
1115 progress(_updating, z, item=f, total=numupdates, unit=_files)
1112 flags, = args
1116 flags, = args
1113 audit(f)
1117 audit(f)
1114 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1118 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1115 updated += 1
1119 updated += 1
1116
1120
1117 # the ordering is important here -- ms.mergedriver will raise if the merge
1121 # the ordering is important here -- ms.mergedriver will raise if the merge
1118 # driver has changed, and we want to be able to bypass it when overwrite is
1122 # driver has changed, and we want to be able to bypass it when overwrite is
1119 # True
1123 # True
1120 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1124 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1121
1125
1122 if usemergedriver:
1126 if usemergedriver:
1123 ms.commit()
1127 ms.commit()
1124 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1128 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1125 # the driver might leave some files unresolved
1129 # the driver might leave some files unresolved
1126 unresolvedf = set(ms.unresolved())
1130 unresolvedf = set(ms.unresolved())
1127 if not proceed:
1131 if not proceed:
1128 # XXX setting unresolved to at least 1 is a hack to make sure we
1132 # XXX setting unresolved to at least 1 is a hack to make sure we
1129 # error out
1133 # error out
1130 return updated, merged, removed, max(len(unresolvedf), 1)
1134 return updated, merged, removed, max(len(unresolvedf), 1)
1131 newactions = []
1135 newactions = []
1132 for f, args, msg in mergeactions:
1136 for f, args, msg in mergeactions:
1133 if f in unresolvedf:
1137 if f in unresolvedf:
1134 newactions.append((f, args, msg))
1138 newactions.append((f, args, msg))
1135 mergeactions = newactions
1139 mergeactions = newactions
1136
1140
1137 # premerge
1141 # premerge
1138 tocomplete = []
1142 tocomplete = []
1139 for f, args, msg in mergeactions:
1143 for f, args, msg in mergeactions:
1140 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1144 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1141 z += 1
1145 z += 1
1142 progress(_updating, z, item=f, total=numupdates, unit=_files)
1146 progress(_updating, z, item=f, total=numupdates, unit=_files)
1143 if f == '.hgsubstate': # subrepo states need updating
1147 if f == '.hgsubstate': # subrepo states need updating
1144 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1148 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1145 overwrite)
1149 overwrite)
1146 continue
1150 continue
1147 audit(f)
1151 audit(f)
1148 complete, r = ms.preresolve(f, wctx, labels=labels)
1152 complete, r = ms.preresolve(f, wctx, labels=labels)
1149 if not complete:
1153 if not complete:
1150 numupdates += 1
1154 numupdates += 1
1151 tocomplete.append((f, args, msg))
1155 tocomplete.append((f, args, msg))
1152
1156
1153 # merge
1157 # merge
1154 for f, args, msg in tocomplete:
1158 for f, args, msg in tocomplete:
1155 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1159 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1156 z += 1
1160 z += 1
1157 progress(_updating, z, item=f, total=numupdates, unit=_files)
1161 progress(_updating, z, item=f, total=numupdates, unit=_files)
1158 ms.resolve(f, wctx, labels=labels)
1162 ms.resolve(f, wctx, labels=labels)
1159
1163
1160 ms.commit()
1164 ms.commit()
1161
1165
1162 unresolved = ms.unresolvedcount()
1166 unresolved = ms.unresolvedcount()
1163
1167
1164 if usemergedriver and not unresolved and ms.mdstate() != 's':
1168 if usemergedriver and not unresolved and ms.mdstate() != 's':
1165 if not driverconclude(repo, ms, wctx, labels=labels):
1169 if not driverconclude(repo, ms, wctx, labels=labels):
1166 # XXX setting unresolved to at least 1 is a hack to make sure we
1170 # XXX setting unresolved to at least 1 is a hack to make sure we
1167 # error out
1171 # error out
1168 unresolved = max(unresolved, 1)
1172 unresolved = max(unresolved, 1)
1169
1173
1170 ms.commit()
1174 ms.commit()
1171
1175
1172 msupdated, msmerged, msremoved = ms.counts()
1176 msupdated, msmerged, msremoved = ms.counts()
1173 updated += msupdated
1177 updated += msupdated
1174 merged += msmerged
1178 merged += msmerged
1175 removed += msremoved
1179 removed += msremoved
1176
1180
1177 extraactions = ms.actions()
1181 extraactions = ms.actions()
1178 for k, acts in extraactions.iteritems():
1182 for k, acts in extraactions.iteritems():
1179 actions[k].extend(acts)
1183 actions[k].extend(acts)
1180
1184
1181 progress(_updating, None, total=numupdates, unit=_files)
1185 progress(_updating, None, total=numupdates, unit=_files)
1182
1186
1183 return updated, merged, removed, unresolved
1187 return updated, merged, removed, unresolved
1184
1188
1185 def recordupdates(repo, actions, branchmerge):
1189 def recordupdates(repo, actions, branchmerge):
1186 "record merge actions to the dirstate"
1190 "record merge actions to the dirstate"
1187 # remove (must come first)
1191 # remove (must come first)
1188 for f, args, msg in actions.get('r', []):
1192 for f, args, msg in actions.get('r', []):
1189 if branchmerge:
1193 if branchmerge:
1190 repo.dirstate.remove(f)
1194 repo.dirstate.remove(f)
1191 else:
1195 else:
1192 repo.dirstate.drop(f)
1196 repo.dirstate.drop(f)
1193
1197
1194 # forget (must come first)
1198 # forget (must come first)
1195 for f, args, msg in actions.get('f', []):
1199 for f, args, msg in actions.get('f', []):
1196 repo.dirstate.drop(f)
1200 repo.dirstate.drop(f)
1197
1201
1198 # re-add
1202 # re-add
1199 for f, args, msg in actions.get('a', []):
1203 for f, args, msg in actions.get('a', []):
1200 repo.dirstate.add(f)
1204 repo.dirstate.add(f)
1201
1205
1202 # re-add/mark as modified
1206 # re-add/mark as modified
1203 for f, args, msg in actions.get('am', []):
1207 for f, args, msg in actions.get('am', []):
1204 if branchmerge:
1208 if branchmerge:
1205 repo.dirstate.normallookup(f)
1209 repo.dirstate.normallookup(f)
1206 else:
1210 else:
1207 repo.dirstate.add(f)
1211 repo.dirstate.add(f)
1208
1212
1209 # exec change
1213 # exec change
1210 for f, args, msg in actions.get('e', []):
1214 for f, args, msg in actions.get('e', []):
1211 repo.dirstate.normallookup(f)
1215 repo.dirstate.normallookup(f)
1212
1216
1213 # keep
1217 # keep
1214 for f, args, msg in actions.get('k', []):
1218 for f, args, msg in actions.get('k', []):
1215 pass
1219 pass
1216
1220
1217 # get
1221 # get
1218 for f, args, msg in actions.get('g', []):
1222 for f, args, msg in actions.get('g', []):
1219 if branchmerge:
1223 if branchmerge:
1220 repo.dirstate.otherparent(f)
1224 repo.dirstate.otherparent(f)
1221 else:
1225 else:
1222 repo.dirstate.normal(f)
1226 repo.dirstate.normal(f)
1223
1227
1224 # merge
1228 # merge
1225 for f, args, msg in actions.get('m', []):
1229 for f, args, msg in actions.get('m', []):
1226 f1, f2, fa, move, anc = args
1230 f1, f2, fa, move, anc = args
1227 if branchmerge:
1231 if branchmerge:
1228 # We've done a branch merge, mark this file as merged
1232 # We've done a branch merge, mark this file as merged
1229 # so that we properly record the merger later
1233 # so that we properly record the merger later
1230 repo.dirstate.merge(f)
1234 repo.dirstate.merge(f)
1231 if f1 != f2: # copy/rename
1235 if f1 != f2: # copy/rename
1232 if move:
1236 if move:
1233 repo.dirstate.remove(f1)
1237 repo.dirstate.remove(f1)
1234 if f1 != f:
1238 if f1 != f:
1235 repo.dirstate.copy(f1, f)
1239 repo.dirstate.copy(f1, f)
1236 else:
1240 else:
1237 repo.dirstate.copy(f2, f)
1241 repo.dirstate.copy(f2, f)
1238 else:
1242 else:
1239 # We've update-merged a locally modified file, so
1243 # We've update-merged a locally modified file, so
1240 # we set the dirstate to emulate a normal checkout
1244 # we set the dirstate to emulate a normal checkout
1241 # of that file some time in the past. Thus our
1245 # of that file some time in the past. Thus our
1242 # merge will appear as a normal local file
1246 # merge will appear as a normal local file
1243 # modification.
1247 # modification.
1244 if f2 == f: # file not locally copied/moved
1248 if f2 == f: # file not locally copied/moved
1245 repo.dirstate.normallookup(f)
1249 repo.dirstate.normallookup(f)
1246 if move:
1250 if move:
1247 repo.dirstate.drop(f1)
1251 repo.dirstate.drop(f1)
1248
1252
1249 # directory rename, move local
1253 # directory rename, move local
1250 for f, args, msg in actions.get('dm', []):
1254 for f, args, msg in actions.get('dm', []):
1251 f0, flag = args
1255 f0, flag = args
1252 if branchmerge:
1256 if branchmerge:
1253 repo.dirstate.add(f)
1257 repo.dirstate.add(f)
1254 repo.dirstate.remove(f0)
1258 repo.dirstate.remove(f0)
1255 repo.dirstate.copy(f0, f)
1259 repo.dirstate.copy(f0, f)
1256 else:
1260 else:
1257 repo.dirstate.normal(f)
1261 repo.dirstate.normal(f)
1258 repo.dirstate.drop(f0)
1262 repo.dirstate.drop(f0)
1259
1263
1260 # directory rename, get
1264 # directory rename, get
1261 for f, args, msg in actions.get('dg', []):
1265 for f, args, msg in actions.get('dg', []):
1262 f0, flag = args
1266 f0, flag = args
1263 if branchmerge:
1267 if branchmerge:
1264 repo.dirstate.add(f)
1268 repo.dirstate.add(f)
1265 repo.dirstate.copy(f0, f)
1269 repo.dirstate.copy(f0, f)
1266 else:
1270 else:
1267 repo.dirstate.normal(f)
1271 repo.dirstate.normal(f)
1268
1272
1269 def update(repo, node, branchmerge, force, ancestor=None,
1273 def update(repo, node, branchmerge, force, ancestor=None,
1270 mergeancestor=False, labels=None, matcher=None):
1274 mergeancestor=False, labels=None, matcher=None):
1271 """
1275 """
1272 Perform a merge between the working directory and the given node
1276 Perform a merge between the working directory and the given node
1273
1277
1274 node = the node to update to, or None if unspecified
1278 node = the node to update to, or None if unspecified
1275 branchmerge = whether to merge between branches
1279 branchmerge = whether to merge between branches
1276 force = whether to force branch merging or file overwriting
1280 force = whether to force branch merging or file overwriting
1277 matcher = a matcher to filter file lists (dirstate not updated)
1281 matcher = a matcher to filter file lists (dirstate not updated)
1278 mergeancestor = whether it is merging with an ancestor. If true,
1282 mergeancestor = whether it is merging with an ancestor. If true,
1279 we should accept the incoming changes for any prompts that occur.
1283 we should accept the incoming changes for any prompts that occur.
1280 If false, merging with an ancestor (fast-forward) is only allowed
1284 If false, merging with an ancestor (fast-forward) is only allowed
1281 between different named branches. This flag is used by rebase extension
1285 between different named branches. This flag is used by rebase extension
1282 as a temporary fix and should be avoided in general.
1286 as a temporary fix and should be avoided in general.
1283
1287
1284 The table below shows all the behaviors of the update command
1288 The table below shows all the behaviors of the update command
1285 given the -c and -C or no options, whether the working directory
1289 given the -c and -C or no options, whether the working directory
1286 is dirty, whether a revision is specified, and the relationship of
1290 is dirty, whether a revision is specified, and the relationship of
1287 the parent rev to the target rev (linear, on the same named
1291 the parent rev to the target rev (linear, on the same named
1288 branch, or on another named branch).
1292 branch, or on another named branch).
1289
1293
1290 This logic is tested by test-update-branches.t.
1294 This logic is tested by test-update-branches.t.
1291
1295
1292 -c -C dirty rev | linear same cross
1296 -c -C dirty rev | linear same cross
1293 n n n n | ok (1) x
1297 n n n n | ok (1) x
1294 n n n y | ok ok ok
1298 n n n y | ok ok ok
1295 n n y n | merge (2) (2)
1299 n n y n | merge (2) (2)
1296 n n y y | merge (3) (3)
1300 n n y y | merge (3) (3)
1297 n y * * | discard discard discard
1301 n y * * | discard discard discard
1298 y n y * | (4) (4) (4)
1302 y n y * | (4) (4) (4)
1299 y n n * | ok ok ok
1303 y n n * | ok ok ok
1300 y y * * | (5) (5) (5)
1304 y y * * | (5) (5) (5)
1301
1305
1302 x = can't happen
1306 x = can't happen
1303 * = don't-care
1307 * = don't-care
1304 1 = abort: not a linear update (merge or update --check to force update)
1308 1 = abort: not a linear update (merge or update --check to force update)
1305 2 = abort: uncommitted changes (commit and merge, or update --clean to
1309 2 = abort: uncommitted changes (commit and merge, or update --clean to
1306 discard changes)
1310 discard changes)
1307 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1311 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1308 4 = abort: uncommitted changes (checked in commands.py)
1312 4 = abort: uncommitted changes (checked in commands.py)
1309 5 = incompatible options (checked in commands.py)
1313 5 = incompatible options (checked in commands.py)
1310
1314
1311 Return the same tuple as applyupdates().
1315 Return the same tuple as applyupdates().
1312 """
1316 """
1313
1317
1314 onode = node
1318 onode = node
1315 wlock = repo.wlock()
1319 wlock = repo.wlock()
1316 # If we're doing a partial update, we need to skip updating
1320 # If we're doing a partial update, we need to skip updating
1317 # the dirstate, so make a note of any partial-ness to the
1321 # the dirstate, so make a note of any partial-ness to the
1318 # update here.
1322 # update here.
1319 if matcher is None or matcher.always():
1323 if matcher is None or matcher.always():
1320 partial = False
1324 partial = False
1321 else:
1325 else:
1322 partial = True
1326 partial = True
1323 try:
1327 try:
1324 wc = repo[None]
1328 wc = repo[None]
1325 pl = wc.parents()
1329 pl = wc.parents()
1326 p1 = pl[0]
1330 p1 = pl[0]
1327 pas = [None]
1331 pas = [None]
1328 if ancestor is not None:
1332 if ancestor is not None:
1329 pas = [repo[ancestor]]
1333 pas = [repo[ancestor]]
1330
1334
1331 if node is None:
1335 if node is None:
1332 if (repo.ui.configbool('devel', 'all-warnings')
1336 if (repo.ui.configbool('devel', 'all-warnings')
1333 or repo.ui.configbool('devel', 'oldapi')):
1337 or repo.ui.configbool('devel', 'oldapi')):
1334 repo.ui.develwarn('update with no target')
1338 repo.ui.develwarn('update with no target')
1335 rev, _mark, _act = destutil.destupdate(repo)
1339 rev, _mark, _act = destutil.destupdate(repo)
1336 node = repo[rev].node()
1340 node = repo[rev].node()
1337
1341
1338 overwrite = force and not branchmerge
1342 overwrite = force and not branchmerge
1339
1343
1340 p2 = repo[node]
1344 p2 = repo[node]
1341 if pas[0] is None:
1345 if pas[0] is None:
1342 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1346 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1343 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1347 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1344 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1348 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1345 else:
1349 else:
1346 pas = [p1.ancestor(p2, warn=branchmerge)]
1350 pas = [p1.ancestor(p2, warn=branchmerge)]
1347
1351
1348 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1352 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1349
1353
1350 ### check phase
1354 ### check phase
1351 if not overwrite:
1355 if not overwrite:
1352 if len(pl) > 1:
1356 if len(pl) > 1:
1353 raise error.Abort(_("outstanding uncommitted merge"))
1357 raise error.Abort(_("outstanding uncommitted merge"))
1354 ms = mergestate.read(repo)
1358 ms = mergestate.read(repo)
1355 if list(ms.unresolved()):
1359 if list(ms.unresolved()):
1356 raise error.Abort(_("outstanding merge conflicts"))
1360 raise error.Abort(_("outstanding merge conflicts"))
1357 if branchmerge:
1361 if branchmerge:
1358 if pas == [p2]:
1362 if pas == [p2]:
1359 raise error.Abort(_("merging with a working directory ancestor"
1363 raise error.Abort(_("merging with a working directory ancestor"
1360 " has no effect"))
1364 " has no effect"))
1361 elif pas == [p1]:
1365 elif pas == [p1]:
1362 if not mergeancestor and p1.branch() == p2.branch():
1366 if not mergeancestor and p1.branch() == p2.branch():
1363 raise error.Abort(_("nothing to merge"),
1367 raise error.Abort(_("nothing to merge"),
1364 hint=_("use 'hg update' "
1368 hint=_("use 'hg update' "
1365 "or check 'hg heads'"))
1369 "or check 'hg heads'"))
1366 if not force and (wc.files() or wc.deleted()):
1370 if not force and (wc.files() or wc.deleted()):
1367 raise error.Abort(_("uncommitted changes"),
1371 raise error.Abort(_("uncommitted changes"),
1368 hint=_("use 'hg status' to list changes"))
1372 hint=_("use 'hg status' to list changes"))
1369 for s in sorted(wc.substate):
1373 for s in sorted(wc.substate):
1370 wc.sub(s).bailifchanged()
1374 wc.sub(s).bailifchanged()
1371
1375
1372 elif not overwrite:
1376 elif not overwrite:
1373 if p1 == p2: # no-op update
1377 if p1 == p2: # no-op update
1374 # call the hooks and exit early
1378 # call the hooks and exit early
1375 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1379 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1376 repo.hook('update', parent1=xp2, parent2='', error=0)
1380 repo.hook('update', parent1=xp2, parent2='', error=0)
1377 return 0, 0, 0, 0
1381 return 0, 0, 0, 0
1378
1382
1379 if pas not in ([p1], [p2]): # nonlinear
1383 if pas not in ([p1], [p2]): # nonlinear
1380 dirty = wc.dirty(missing=True)
1384 dirty = wc.dirty(missing=True)
1381 if dirty or onode is None:
1385 if dirty or onode is None:
1382 # Branching is a bit strange to ensure we do the minimal
1386 # Branching is a bit strange to ensure we do the minimal
1383 # amount of call to obsolete.background.
1387 # amount of call to obsolete.background.
1384 foreground = obsolete.foreground(repo, [p1.node()])
1388 foreground = obsolete.foreground(repo, [p1.node()])
1385 # note: the <node> variable contains a random identifier
1389 # note: the <node> variable contains a random identifier
1386 if repo[node].node() in foreground:
1390 if repo[node].node() in foreground:
1387 pas = [p1] # allow updating to successors
1391 pas = [p1] # allow updating to successors
1388 elif dirty:
1392 elif dirty:
1389 msg = _("uncommitted changes")
1393 msg = _("uncommitted changes")
1390 if onode is None:
1394 if onode is None:
1391 hint = _("commit and merge, or update --clean to"
1395 hint = _("commit and merge, or update --clean to"
1392 " discard changes")
1396 " discard changes")
1393 else:
1397 else:
1394 hint = _("commit or update --clean to discard"
1398 hint = _("commit or update --clean to discard"
1395 " changes")
1399 " changes")
1396 raise error.Abort(msg, hint=hint)
1400 raise error.Abort(msg, hint=hint)
1397 else: # node is none
1401 else: # node is none
1398 msg = _("not a linear update")
1402 msg = _("not a linear update")
1399 hint = _("merge or update --check to force update")
1403 hint = _("merge or update --check to force update")
1400 raise error.Abort(msg, hint=hint)
1404 raise error.Abort(msg, hint=hint)
1401 else:
1405 else:
1402 # Allow jumping branches if clean and specific rev given
1406 # Allow jumping branches if clean and specific rev given
1403 pas = [p1]
1407 pas = [p1]
1404
1408
1405 # deprecated config: merge.followcopies
1409 # deprecated config: merge.followcopies
1406 followcopies = False
1410 followcopies = False
1407 if overwrite:
1411 if overwrite:
1408 pas = [wc]
1412 pas = [wc]
1409 elif pas == [p2]: # backwards
1413 elif pas == [p2]: # backwards
1410 pas = [wc.p1()]
1414 pas = [wc.p1()]
1411 elif not branchmerge and not wc.dirty(missing=True):
1415 elif not branchmerge and not wc.dirty(missing=True):
1412 pass
1416 pass
1413 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1417 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1414 followcopies = True
1418 followcopies = True
1415
1419
1416 ### calculate phase
1420 ### calculate phase
1417 if matcher is None or matcher.always():
1418 partial = False
1419 else:
1420 partial = matcher.matchfn
1421 actionbyfile, diverge, renamedelete = calculateupdates(
1421 actionbyfile, diverge, renamedelete = calculateupdates(
1422 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1422 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1423 followcopies)
1423 followcopies, matcher=matcher)
1424 # Convert to dictionary-of-lists format
1424 # Convert to dictionary-of-lists format
1425 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1425 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1426 for f, (m, args, msg) in actionbyfile.iteritems():
1426 for f, (m, args, msg) in actionbyfile.iteritems():
1427 if m not in actions:
1427 if m not in actions:
1428 actions[m] = []
1428 actions[m] = []
1429 actions[m].append((f, args, msg))
1429 actions[m].append((f, args, msg))
1430
1430
1431 if not util.checkcase(repo.path):
1431 if not util.checkcase(repo.path):
1432 # check collision between files only in p2 for clean update
1432 # check collision between files only in p2 for clean update
1433 if (not branchmerge and
1433 if (not branchmerge and
1434 (force or not wc.dirty(missing=True, branch=False))):
1434 (force or not wc.dirty(missing=True, branch=False))):
1435 _checkcollision(repo, p2.manifest(), None)
1435 _checkcollision(repo, p2.manifest(), None)
1436 else:
1436 else:
1437 _checkcollision(repo, wc.manifest(), actions)
1437 _checkcollision(repo, wc.manifest(), actions)
1438
1438
1439 # Prompt and create actions. Most of this is in the resolve phase
1439 # Prompt and create actions. Most of this is in the resolve phase
1440 # already, but we can't handle .hgsubstate in filemerge or
1440 # already, but we can't handle .hgsubstate in filemerge or
1441 # subrepo.submerge yet so we have to keep prompting for it.
1441 # subrepo.submerge yet so we have to keep prompting for it.
1442 for f, args, msg in sorted(actions['cd']):
1442 for f, args, msg in sorted(actions['cd']):
1443 if f != '.hgsubstate':
1443 if f != '.hgsubstate':
1444 continue
1444 continue
1445 if repo.ui.promptchoice(
1445 if repo.ui.promptchoice(
1446 _("local changed %s which remote deleted\n"
1446 _("local changed %s which remote deleted\n"
1447 "use (c)hanged version or (d)elete?"
1447 "use (c)hanged version or (d)elete?"
1448 "$$ &Changed $$ &Delete") % f, 0):
1448 "$$ &Changed $$ &Delete") % f, 0):
1449 actions['r'].append((f, None, "prompt delete"))
1449 actions['r'].append((f, None, "prompt delete"))
1450 elif f in p1:
1450 elif f in p1:
1451 actions['am'].append((f, None, "prompt keep"))
1451 actions['am'].append((f, None, "prompt keep"))
1452 else:
1452 else:
1453 actions['a'].append((f, None, "prompt keep"))
1453 actions['a'].append((f, None, "prompt keep"))
1454
1454
1455 for f, args, msg in sorted(actions['dc']):
1455 for f, args, msg in sorted(actions['dc']):
1456 if f != '.hgsubstate':
1456 if f != '.hgsubstate':
1457 continue
1457 continue
1458 f1, f2, fa, move, anc = args
1458 f1, f2, fa, move, anc = args
1459 flags = p2[f2].flags()
1459 flags = p2[f2].flags()
1460 if repo.ui.promptchoice(
1460 if repo.ui.promptchoice(
1461 _("remote changed %s which local deleted\n"
1461 _("remote changed %s which local deleted\n"
1462 "use (c)hanged version or leave (d)eleted?"
1462 "use (c)hanged version or leave (d)eleted?"
1463 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1463 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1464 actions['g'].append((f, (flags,), "prompt recreating"))
1464 actions['g'].append((f, (flags,), "prompt recreating"))
1465
1465
1466 # divergent renames
1466 # divergent renames
1467 for f, fl in sorted(diverge.iteritems()):
1467 for f, fl in sorted(diverge.iteritems()):
1468 repo.ui.warn(_("note: possible conflict - %s was renamed "
1468 repo.ui.warn(_("note: possible conflict - %s was renamed "
1469 "multiple times to:\n") % f)
1469 "multiple times to:\n") % f)
1470 for nf in fl:
1470 for nf in fl:
1471 repo.ui.warn(" %s\n" % nf)
1471 repo.ui.warn(" %s\n" % nf)
1472
1472
1473 # rename and delete
1473 # rename and delete
1474 for f, fl in sorted(renamedelete.iteritems()):
1474 for f, fl in sorted(renamedelete.iteritems()):
1475 repo.ui.warn(_("note: possible conflict - %s was deleted "
1475 repo.ui.warn(_("note: possible conflict - %s was deleted "
1476 "and renamed to:\n") % f)
1476 "and renamed to:\n") % f)
1477 for nf in fl:
1477 for nf in fl:
1478 repo.ui.warn(" %s\n" % nf)
1478 repo.ui.warn(" %s\n" % nf)
1479
1479
1480 ### apply phase
1480 ### apply phase
1481 if not branchmerge: # just jump to the new rev
1481 if not branchmerge: # just jump to the new rev
1482 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1482 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1483 if not partial:
1483 if not partial:
1484 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1484 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1485 # note that we're in the middle of an update
1485 # note that we're in the middle of an update
1486 repo.vfs.write('updatestate', p2.hex())
1486 repo.vfs.write('updatestate', p2.hex())
1487
1487
1488 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1488 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1489
1489
1490 if not partial:
1490 if not partial:
1491 repo.dirstate.beginparentchange()
1491 repo.dirstate.beginparentchange()
1492 repo.setparents(fp1, fp2)
1492 repo.setparents(fp1, fp2)
1493 recordupdates(repo, actions, branchmerge)
1493 recordupdates(repo, actions, branchmerge)
1494 # update completed, clear state
1494 # update completed, clear state
1495 util.unlink(repo.join('updatestate'))
1495 util.unlink(repo.join('updatestate'))
1496
1496
1497 if not branchmerge:
1497 if not branchmerge:
1498 repo.dirstate.setbranch(p2.branch())
1498 repo.dirstate.setbranch(p2.branch())
1499 repo.dirstate.endparentchange()
1499 repo.dirstate.endparentchange()
1500 finally:
1500 finally:
1501 wlock.release()
1501 wlock.release()
1502
1502
1503 if not partial:
1503 if not partial:
1504 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1504 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1505 return stats
1505 return stats
1506
1506
1507 def graft(repo, ctx, pctx, labels, keepparent=False):
1507 def graft(repo, ctx, pctx, labels, keepparent=False):
1508 """Do a graft-like merge.
1508 """Do a graft-like merge.
1509
1509
1510 This is a merge where the merge ancestor is chosen such that one
1510 This is a merge where the merge ancestor is chosen such that one
1511 or more changesets are grafted onto the current changeset. In
1511 or more changesets are grafted onto the current changeset. In
1512 addition to the merge, this fixes up the dirstate to include only
1512 addition to the merge, this fixes up the dirstate to include only
1513 a single parent (if keepparent is False) and tries to duplicate any
1513 a single parent (if keepparent is False) and tries to duplicate any
1514 renames/copies appropriately.
1514 renames/copies appropriately.
1515
1515
1516 ctx - changeset to rebase
1516 ctx - changeset to rebase
1517 pctx - merge base, usually ctx.p1()
1517 pctx - merge base, usually ctx.p1()
1518 labels - merge labels eg ['local', 'graft']
1518 labels - merge labels eg ['local', 'graft']
1519 keepparent - keep second parent if any
1519 keepparent - keep second parent if any
1520
1520
1521 """
1521 """
1522 # If we're grafting a descendant onto an ancestor, be sure to pass
1522 # If we're grafting a descendant onto an ancestor, be sure to pass
1523 # mergeancestor=True to update. This does two things: 1) allows the merge if
1523 # mergeancestor=True to update. This does two things: 1) allows the merge if
1524 # the destination is the same as the parent of the ctx (so we can use graft
1524 # the destination is the same as the parent of the ctx (so we can use graft
1525 # to copy commits), and 2) informs update that the incoming changes are
1525 # to copy commits), and 2) informs update that the incoming changes are
1526 # newer than the destination so it doesn't prompt about "remote changed foo
1526 # newer than the destination so it doesn't prompt about "remote changed foo
1527 # which local deleted".
1527 # which local deleted".
1528 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1528 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1529
1529
1530 stats = update(repo, ctx.node(), True, True, pctx.node(),
1530 stats = update(repo, ctx.node(), True, True, pctx.node(),
1531 mergeancestor=mergeancestor, labels=labels)
1531 mergeancestor=mergeancestor, labels=labels)
1532
1532
1533 pother = nullid
1533 pother = nullid
1534 parents = ctx.parents()
1534 parents = ctx.parents()
1535 if keepparent and len(parents) == 2 and pctx in parents:
1535 if keepparent and len(parents) == 2 and pctx in parents:
1536 parents.remove(pctx)
1536 parents.remove(pctx)
1537 pother = parents[0].node()
1537 pother = parents[0].node()
1538
1538
1539 repo.dirstate.beginparentchange()
1539 repo.dirstate.beginparentchange()
1540 repo.setparents(repo['.'].node(), pother)
1540 repo.setparents(repo['.'].node(), pother)
1541 repo.dirstate.write(repo.currenttransaction())
1541 repo.dirstate.write(repo.currenttransaction())
1542 # fix up dirstate for copies and renames
1542 # fix up dirstate for copies and renames
1543 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1543 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1544 repo.dirstate.endparentchange()
1544 repo.dirstate.endparentchange()
1545 return stats
1545 return stats
General Comments 0
You need to be logged in to leave comments. Login now