##// END OF EJS Templates
mergeresult: introduce filemap() which yields filename based mapping...
Pulkit Goyal -
r45906:3c783ff0 default
parent child Browse files
Show More
@@ -1,730 +1,730 b''
1 # hg.py - hg backend for convert extension
1 # hg.py - hg backend for convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # Notes for hg->hg conversion:
8 # Notes for hg->hg conversion:
9 #
9 #
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 # of commit messages, but new versions do. Changesets created by
11 # of commit messages, but new versions do. Changesets created by
12 # those older versions, then converted, may thus have different
12 # those older versions, then converted, may thus have different
13 # hashes for changesets that are otherwise identical.
13 # hashes for changesets that are otherwise identical.
14 #
14 #
15 # * Using "--config convert.hg.saverev=true" will make the source
15 # * Using "--config convert.hg.saverev=true" will make the source
16 # identifier to be stored in the converted revision. This will cause
16 # identifier to be stored in the converted revision. This will cause
17 # the converted revision to have a different identity than the
17 # the converted revision to have a different identity than the
18 # source.
18 # source.
19 from __future__ import absolute_import
19 from __future__ import absolute_import
20
20
21 import os
21 import os
22 import re
22 import re
23 import time
23 import time
24
24
25 from mercurial.i18n import _
25 from mercurial.i18n import _
26 from mercurial.pycompat import open
26 from mercurial.pycompat import open
27 from mercurial import (
27 from mercurial import (
28 bookmarks,
28 bookmarks,
29 context,
29 context,
30 error,
30 error,
31 exchange,
31 exchange,
32 hg,
32 hg,
33 lock as lockmod,
33 lock as lockmod,
34 merge as mergemod,
34 merge as mergemod,
35 node as nodemod,
35 node as nodemod,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 util,
39 util,
40 )
40 )
41 from mercurial.utils import dateutil
41 from mercurial.utils import dateutil
42
42
43 stringio = util.stringio
43 stringio = util.stringio
44
44
45 from . import common
45 from . import common
46
46
47 mapfile = common.mapfile
47 mapfile = common.mapfile
48 NoRepo = common.NoRepo
48 NoRepo = common.NoRepo
49
49
50 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
50 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
51
51
52
52
53 class mercurial_sink(common.converter_sink):
53 class mercurial_sink(common.converter_sink):
54 def __init__(self, ui, repotype, path):
54 def __init__(self, ui, repotype, path):
55 common.converter_sink.__init__(self, ui, repotype, path)
55 common.converter_sink.__init__(self, ui, repotype, path)
56 self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
56 self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
57 self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
57 self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
58 self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
58 self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
59 self.lastbranch = None
59 self.lastbranch = None
60 if os.path.isdir(path) and len(os.listdir(path)) > 0:
60 if os.path.isdir(path) and len(os.listdir(path)) > 0:
61 try:
61 try:
62 self.repo = hg.repository(self.ui, path)
62 self.repo = hg.repository(self.ui, path)
63 if not self.repo.local():
63 if not self.repo.local():
64 raise NoRepo(
64 raise NoRepo(
65 _(b'%s is not a local Mercurial repository') % path
65 _(b'%s is not a local Mercurial repository') % path
66 )
66 )
67 except error.RepoError as err:
67 except error.RepoError as err:
68 ui.traceback()
68 ui.traceback()
69 raise NoRepo(err.args[0])
69 raise NoRepo(err.args[0])
70 else:
70 else:
71 try:
71 try:
72 ui.status(_(b'initializing destination %s repository\n') % path)
72 ui.status(_(b'initializing destination %s repository\n') % path)
73 self.repo = hg.repository(self.ui, path, create=True)
73 self.repo = hg.repository(self.ui, path, create=True)
74 if not self.repo.local():
74 if not self.repo.local():
75 raise NoRepo(
75 raise NoRepo(
76 _(b'%s is not a local Mercurial repository') % path
76 _(b'%s is not a local Mercurial repository') % path
77 )
77 )
78 self.created.append(path)
78 self.created.append(path)
79 except error.RepoError:
79 except error.RepoError:
80 ui.traceback()
80 ui.traceback()
81 raise NoRepo(
81 raise NoRepo(
82 _(b"could not create hg repository %s as sink") % path
82 _(b"could not create hg repository %s as sink") % path
83 )
83 )
84 self.lock = None
84 self.lock = None
85 self.wlock = None
85 self.wlock = None
86 self.filemapmode = False
86 self.filemapmode = False
87 self.subrevmaps = {}
87 self.subrevmaps = {}
88
88
89 def before(self):
89 def before(self):
90 self.ui.debug(b'run hg sink pre-conversion action\n')
90 self.ui.debug(b'run hg sink pre-conversion action\n')
91 self.wlock = self.repo.wlock()
91 self.wlock = self.repo.wlock()
92 self.lock = self.repo.lock()
92 self.lock = self.repo.lock()
93
93
94 def after(self):
94 def after(self):
95 self.ui.debug(b'run hg sink post-conversion action\n')
95 self.ui.debug(b'run hg sink post-conversion action\n')
96 if self.lock:
96 if self.lock:
97 self.lock.release()
97 self.lock.release()
98 if self.wlock:
98 if self.wlock:
99 self.wlock.release()
99 self.wlock.release()
100
100
101 def revmapfile(self):
101 def revmapfile(self):
102 return self.repo.vfs.join(b"shamap")
102 return self.repo.vfs.join(b"shamap")
103
103
104 def authorfile(self):
104 def authorfile(self):
105 return self.repo.vfs.join(b"authormap")
105 return self.repo.vfs.join(b"authormap")
106
106
107 def setbranch(self, branch, pbranches):
107 def setbranch(self, branch, pbranches):
108 if not self.clonebranches:
108 if not self.clonebranches:
109 return
109 return
110
110
111 setbranch = branch != self.lastbranch
111 setbranch = branch != self.lastbranch
112 self.lastbranch = branch
112 self.lastbranch = branch
113 if not branch:
113 if not branch:
114 branch = b'default'
114 branch = b'default'
115 pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
115 pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
116
116
117 branchpath = os.path.join(self.path, branch)
117 branchpath = os.path.join(self.path, branch)
118 if setbranch:
118 if setbranch:
119 self.after()
119 self.after()
120 try:
120 try:
121 self.repo = hg.repository(self.ui, branchpath)
121 self.repo = hg.repository(self.ui, branchpath)
122 except Exception:
122 except Exception:
123 self.repo = hg.repository(self.ui, branchpath, create=True)
123 self.repo = hg.repository(self.ui, branchpath, create=True)
124 self.before()
124 self.before()
125
125
126 # pbranches may bring revisions from other branches (merge parents)
126 # pbranches may bring revisions from other branches (merge parents)
127 # Make sure we have them, or pull them.
127 # Make sure we have them, or pull them.
128 missings = {}
128 missings = {}
129 for b in pbranches:
129 for b in pbranches:
130 try:
130 try:
131 self.repo.lookup(b[0])
131 self.repo.lookup(b[0])
132 except Exception:
132 except Exception:
133 missings.setdefault(b[1], []).append(b[0])
133 missings.setdefault(b[1], []).append(b[0])
134
134
135 if missings:
135 if missings:
136 self.after()
136 self.after()
137 for pbranch, heads in sorted(pycompat.iteritems(missings)):
137 for pbranch, heads in sorted(pycompat.iteritems(missings)):
138 pbranchpath = os.path.join(self.path, pbranch)
138 pbranchpath = os.path.join(self.path, pbranch)
139 prepo = hg.peer(self.ui, {}, pbranchpath)
139 prepo = hg.peer(self.ui, {}, pbranchpath)
140 self.ui.note(
140 self.ui.note(
141 _(b'pulling from %s into %s\n') % (pbranch, branch)
141 _(b'pulling from %s into %s\n') % (pbranch, branch)
142 )
142 )
143 exchange.pull(
143 exchange.pull(
144 self.repo, prepo, [prepo.lookup(h) for h in heads]
144 self.repo, prepo, [prepo.lookup(h) for h in heads]
145 )
145 )
146 self.before()
146 self.before()
147
147
148 def _rewritetags(self, source, revmap, data):
148 def _rewritetags(self, source, revmap, data):
149 fp = stringio()
149 fp = stringio()
150 for line in data.splitlines():
150 for line in data.splitlines():
151 s = line.split(b' ', 1)
151 s = line.split(b' ', 1)
152 if len(s) != 2:
152 if len(s) != 2:
153 self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
153 self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
154 fp.write(b'%s\n' % line) # Bogus, but keep for hash stability
154 fp.write(b'%s\n' % line) # Bogus, but keep for hash stability
155 continue
155 continue
156 revid = revmap.get(source.lookuprev(s[0]))
156 revid = revmap.get(source.lookuprev(s[0]))
157 if not revid:
157 if not revid:
158 if s[0] == nodemod.nullhex:
158 if s[0] == nodemod.nullhex:
159 revid = s[0]
159 revid = s[0]
160 else:
160 else:
161 # missing, but keep for hash stability
161 # missing, but keep for hash stability
162 self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
162 self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
163 fp.write(b'%s\n' % line)
163 fp.write(b'%s\n' % line)
164 continue
164 continue
165 fp.write(b'%s %s\n' % (revid, s[1]))
165 fp.write(b'%s %s\n' % (revid, s[1]))
166 return fp.getvalue()
166 return fp.getvalue()
167
167
168 def _rewritesubstate(self, source, data):
168 def _rewritesubstate(self, source, data):
169 fp = stringio()
169 fp = stringio()
170 for line in data.splitlines():
170 for line in data.splitlines():
171 s = line.split(b' ', 1)
171 s = line.split(b' ', 1)
172 if len(s) != 2:
172 if len(s) != 2:
173 continue
173 continue
174
174
175 revid = s[0]
175 revid = s[0]
176 subpath = s[1]
176 subpath = s[1]
177 if revid != nodemod.nullhex:
177 if revid != nodemod.nullhex:
178 revmap = self.subrevmaps.get(subpath)
178 revmap = self.subrevmaps.get(subpath)
179 if revmap is None:
179 if revmap is None:
180 revmap = mapfile(
180 revmap = mapfile(
181 self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
181 self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
182 )
182 )
183 self.subrevmaps[subpath] = revmap
183 self.subrevmaps[subpath] = revmap
184
184
185 # It is reasonable that one or more of the subrepos don't
185 # It is reasonable that one or more of the subrepos don't
186 # need to be converted, in which case they can be cloned
186 # need to be converted, in which case they can be cloned
187 # into place instead of converted. Therefore, only warn
187 # into place instead of converted. Therefore, only warn
188 # once.
188 # once.
189 msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
189 msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
190 if len(revmap) == 0:
190 if len(revmap) == 0:
191 sub = self.repo.wvfs.reljoin(subpath, b'.hg')
191 sub = self.repo.wvfs.reljoin(subpath, b'.hg')
192
192
193 if self.repo.wvfs.exists(sub):
193 if self.repo.wvfs.exists(sub):
194 self.ui.warn(msg % subpath)
194 self.ui.warn(msg % subpath)
195
195
196 newid = revmap.get(revid)
196 newid = revmap.get(revid)
197 if not newid:
197 if not newid:
198 if len(revmap) > 0:
198 if len(revmap) > 0:
199 self.ui.warn(
199 self.ui.warn(
200 _(b"%s is missing from %s/.hg/shamap\n")
200 _(b"%s is missing from %s/.hg/shamap\n")
201 % (revid, subpath)
201 % (revid, subpath)
202 )
202 )
203 else:
203 else:
204 revid = newid
204 revid = newid
205
205
206 fp.write(b'%s %s\n' % (revid, subpath))
206 fp.write(b'%s %s\n' % (revid, subpath))
207
207
208 return fp.getvalue()
208 return fp.getvalue()
209
209
210 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
210 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
211 """Calculates the files from p2 that we need to pull in when merging p1
211 """Calculates the files from p2 that we need to pull in when merging p1
212 and p2, given that the merge is coming from the given source.
212 and p2, given that the merge is coming from the given source.
213
213
214 This prevents us from losing files that only exist in the target p2 and
214 This prevents us from losing files that only exist in the target p2 and
215 that don't come from the source repo (like if you're merging multiple
215 that don't come from the source repo (like if you're merging multiple
216 repositories together).
216 repositories together).
217 """
217 """
218 anc = [p1ctx.ancestor(p2ctx)]
218 anc = [p1ctx.ancestor(p2ctx)]
219 # Calculate what files are coming from p2
219 # Calculate what files are coming from p2
220 # TODO: mresult.commitinfo might be able to get that info
220 # TODO: mresult.commitinfo might be able to get that info
221 mresult = mergemod.calculateupdates(
221 mresult = mergemod.calculateupdates(
222 self.repo,
222 self.repo,
223 p1ctx,
223 p1ctx,
224 p2ctx,
224 p2ctx,
225 anc,
225 anc,
226 branchmerge=True,
226 branchmerge=True,
227 force=True,
227 force=True,
228 acceptremote=False,
228 acceptremote=False,
229 followcopies=False,
229 followcopies=False,
230 )
230 )
231
231
232 for file, (action, info, msg) in pycompat.iteritems(mresult.actions):
232 for file, (action, info, msg) in mresult.filemap():
233 if source.targetfilebelongstosource(file):
233 if source.targetfilebelongstosource(file):
234 # If the file belongs to the source repo, ignore the p2
234 # If the file belongs to the source repo, ignore the p2
235 # since it will be covered by the existing fileset.
235 # since it will be covered by the existing fileset.
236 continue
236 continue
237
237
238 # If the file requires actual merging, abort. We don't have enough
238 # If the file requires actual merging, abort. We don't have enough
239 # context to resolve merges correctly.
239 # context to resolve merges correctly.
240 if action in [b'm', b'dm', b'cd', b'dc']:
240 if action in [b'm', b'dm', b'cd', b'dc']:
241 raise error.Abort(
241 raise error.Abort(
242 _(
242 _(
243 b"unable to convert merge commit "
243 b"unable to convert merge commit "
244 b"since target parents do not merge cleanly (file "
244 b"since target parents do not merge cleanly (file "
245 b"%s, parents %s and %s)"
245 b"%s, parents %s and %s)"
246 )
246 )
247 % (file, p1ctx, p2ctx)
247 % (file, p1ctx, p2ctx)
248 )
248 )
249 elif action == b'k':
249 elif action == b'k':
250 # 'keep' means nothing changed from p1
250 # 'keep' means nothing changed from p1
251 continue
251 continue
252 else:
252 else:
253 # Any other change means we want to take the p2 version
253 # Any other change means we want to take the p2 version
254 yield file
254 yield file
255
255
256 def putcommit(
256 def putcommit(
257 self, files, copies, parents, commit, source, revmap, full, cleanp2
257 self, files, copies, parents, commit, source, revmap, full, cleanp2
258 ):
258 ):
259 files = dict(files)
259 files = dict(files)
260
260
261 def getfilectx(repo, memctx, f):
261 def getfilectx(repo, memctx, f):
262 if p2ctx and f in p2files and f not in copies:
262 if p2ctx and f in p2files and f not in copies:
263 self.ui.debug(b'reusing %s from p2\n' % f)
263 self.ui.debug(b'reusing %s from p2\n' % f)
264 try:
264 try:
265 return p2ctx[f]
265 return p2ctx[f]
266 except error.ManifestLookupError:
266 except error.ManifestLookupError:
267 # If the file doesn't exist in p2, then we're syncing a
267 # If the file doesn't exist in p2, then we're syncing a
268 # delete, so just return None.
268 # delete, so just return None.
269 return None
269 return None
270 try:
270 try:
271 v = files[f]
271 v = files[f]
272 except KeyError:
272 except KeyError:
273 return None
273 return None
274 data, mode = source.getfile(f, v)
274 data, mode = source.getfile(f, v)
275 if data is None:
275 if data is None:
276 return None
276 return None
277 if f == b'.hgtags':
277 if f == b'.hgtags':
278 data = self._rewritetags(source, revmap, data)
278 data = self._rewritetags(source, revmap, data)
279 if f == b'.hgsubstate':
279 if f == b'.hgsubstate':
280 data = self._rewritesubstate(source, data)
280 data = self._rewritesubstate(source, data)
281 return context.memfilectx(
281 return context.memfilectx(
282 self.repo,
282 self.repo,
283 memctx,
283 memctx,
284 f,
284 f,
285 data,
285 data,
286 b'l' in mode,
286 b'l' in mode,
287 b'x' in mode,
287 b'x' in mode,
288 copies.get(f),
288 copies.get(f),
289 )
289 )
290
290
291 pl = []
291 pl = []
292 for p in parents:
292 for p in parents:
293 if p not in pl:
293 if p not in pl:
294 pl.append(p)
294 pl.append(p)
295 parents = pl
295 parents = pl
296 nparents = len(parents)
296 nparents = len(parents)
297 if self.filemapmode and nparents == 1:
297 if self.filemapmode and nparents == 1:
298 m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
298 m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
299 parent = parents[0]
299 parent = parents[0]
300
300
301 if len(parents) < 2:
301 if len(parents) < 2:
302 parents.append(nodemod.nullid)
302 parents.append(nodemod.nullid)
303 if len(parents) < 2:
303 if len(parents) < 2:
304 parents.append(nodemod.nullid)
304 parents.append(nodemod.nullid)
305 p2 = parents.pop(0)
305 p2 = parents.pop(0)
306
306
307 text = commit.desc
307 text = commit.desc
308
308
309 sha1s = re.findall(sha1re, text)
309 sha1s = re.findall(sha1re, text)
310 for sha1 in sha1s:
310 for sha1 in sha1s:
311 oldrev = source.lookuprev(sha1)
311 oldrev = source.lookuprev(sha1)
312 newrev = revmap.get(oldrev)
312 newrev = revmap.get(oldrev)
313 if newrev is not None:
313 if newrev is not None:
314 text = text.replace(sha1, newrev[: len(sha1)])
314 text = text.replace(sha1, newrev[: len(sha1)])
315
315
316 extra = commit.extra.copy()
316 extra = commit.extra.copy()
317
317
318 sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
318 sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
319 if sourcename:
319 if sourcename:
320 extra[b'convert_source'] = sourcename
320 extra[b'convert_source'] = sourcename
321
321
322 for label in (
322 for label in (
323 b'source',
323 b'source',
324 b'transplant_source',
324 b'transplant_source',
325 b'rebase_source',
325 b'rebase_source',
326 b'intermediate-source',
326 b'intermediate-source',
327 ):
327 ):
328 node = extra.get(label)
328 node = extra.get(label)
329
329
330 if node is None:
330 if node is None:
331 continue
331 continue
332
332
333 # Only transplant stores its reference in binary
333 # Only transplant stores its reference in binary
334 if label == b'transplant_source':
334 if label == b'transplant_source':
335 node = nodemod.hex(node)
335 node = nodemod.hex(node)
336
336
337 newrev = revmap.get(node)
337 newrev = revmap.get(node)
338 if newrev is not None:
338 if newrev is not None:
339 if label == b'transplant_source':
339 if label == b'transplant_source':
340 newrev = nodemod.bin(newrev)
340 newrev = nodemod.bin(newrev)
341
341
342 extra[label] = newrev
342 extra[label] = newrev
343
343
344 if self.branchnames and commit.branch:
344 if self.branchnames and commit.branch:
345 extra[b'branch'] = commit.branch
345 extra[b'branch'] = commit.branch
346 if commit.rev and commit.saverev:
346 if commit.rev and commit.saverev:
347 extra[b'convert_revision'] = commit.rev
347 extra[b'convert_revision'] = commit.rev
348
348
349 while parents:
349 while parents:
350 p1 = p2
350 p1 = p2
351 p2 = parents.pop(0)
351 p2 = parents.pop(0)
352 p1ctx = self.repo[p1]
352 p1ctx = self.repo[p1]
353 p2ctx = None
353 p2ctx = None
354 if p2 != nodemod.nullid:
354 if p2 != nodemod.nullid:
355 p2ctx = self.repo[p2]
355 p2ctx = self.repo[p2]
356 fileset = set(files)
356 fileset = set(files)
357 if full:
357 if full:
358 fileset.update(self.repo[p1])
358 fileset.update(self.repo[p1])
359 fileset.update(self.repo[p2])
359 fileset.update(self.repo[p2])
360
360
361 if p2ctx:
361 if p2ctx:
362 p2files = set(cleanp2)
362 p2files = set(cleanp2)
363 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
363 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
364 p2files.add(file)
364 p2files.add(file)
365 fileset.add(file)
365 fileset.add(file)
366
366
367 ctx = context.memctx(
367 ctx = context.memctx(
368 self.repo,
368 self.repo,
369 (p1, p2),
369 (p1, p2),
370 text,
370 text,
371 fileset,
371 fileset,
372 getfilectx,
372 getfilectx,
373 commit.author,
373 commit.author,
374 commit.date,
374 commit.date,
375 extra,
375 extra,
376 )
376 )
377
377
378 # We won't know if the conversion changes the node until after the
378 # We won't know if the conversion changes the node until after the
379 # commit, so copy the source's phase for now.
379 # commit, so copy the source's phase for now.
380 self.repo.ui.setconfig(
380 self.repo.ui.setconfig(
381 b'phases',
381 b'phases',
382 b'new-commit',
382 b'new-commit',
383 phases.phasenames[commit.phase],
383 phases.phasenames[commit.phase],
384 b'convert',
384 b'convert',
385 )
385 )
386
386
387 with self.repo.transaction(b"convert") as tr:
387 with self.repo.transaction(b"convert") as tr:
388 if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
388 if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
389 origctx = commit.ctx
389 origctx = commit.ctx
390 else:
390 else:
391 origctx = None
391 origctx = None
392 node = nodemod.hex(self.repo.commitctx(ctx, origctx=origctx))
392 node = nodemod.hex(self.repo.commitctx(ctx, origctx=origctx))
393
393
394 # If the node value has changed, but the phase is lower than
394 # If the node value has changed, but the phase is lower than
395 # draft, set it back to draft since it hasn't been exposed
395 # draft, set it back to draft since it hasn't been exposed
396 # anywhere.
396 # anywhere.
397 if commit.rev != node:
397 if commit.rev != node:
398 ctx = self.repo[node]
398 ctx = self.repo[node]
399 if ctx.phase() < phases.draft:
399 if ctx.phase() < phases.draft:
400 phases.registernew(
400 phases.registernew(
401 self.repo, tr, phases.draft, [ctx.node()]
401 self.repo, tr, phases.draft, [ctx.node()]
402 )
402 )
403
403
404 text = b"(octopus merge fixup)\n"
404 text = b"(octopus merge fixup)\n"
405 p2 = node
405 p2 = node
406
406
407 if self.filemapmode and nparents == 1:
407 if self.filemapmode and nparents == 1:
408 man = self.repo.manifestlog.getstorage(b'')
408 man = self.repo.manifestlog.getstorage(b'')
409 mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
409 mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
410 closed = b'close' in commit.extra
410 closed = b'close' in commit.extra
411 if not closed and not man.cmp(m1node, man.revision(mnode)):
411 if not closed and not man.cmp(m1node, man.revision(mnode)):
412 self.ui.status(_(b"filtering out empty revision\n"))
412 self.ui.status(_(b"filtering out empty revision\n"))
413 self.repo.rollback(force=True)
413 self.repo.rollback(force=True)
414 return parent
414 return parent
415 return p2
415 return p2
416
416
417 def puttags(self, tags):
417 def puttags(self, tags):
418 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
418 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
419 tagparent = tagparent or nodemod.nullid
419 tagparent = tagparent or nodemod.nullid
420
420
421 oldlines = set()
421 oldlines = set()
422 for branch, heads in pycompat.iteritems(self.repo.branchmap()):
422 for branch, heads in pycompat.iteritems(self.repo.branchmap()):
423 for h in heads:
423 for h in heads:
424 if b'.hgtags' in self.repo[h]:
424 if b'.hgtags' in self.repo[h]:
425 oldlines.update(
425 oldlines.update(
426 set(self.repo[h][b'.hgtags'].data().splitlines(True))
426 set(self.repo[h][b'.hgtags'].data().splitlines(True))
427 )
427 )
428 oldlines = sorted(list(oldlines))
428 oldlines = sorted(list(oldlines))
429
429
430 newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
430 newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
431 if newlines == oldlines:
431 if newlines == oldlines:
432 return None, None
432 return None, None
433
433
434 # if the old and new tags match, then there is nothing to update
434 # if the old and new tags match, then there is nothing to update
435 oldtags = set()
435 oldtags = set()
436 newtags = set()
436 newtags = set()
437 for line in oldlines:
437 for line in oldlines:
438 s = line.strip().split(b' ', 1)
438 s = line.strip().split(b' ', 1)
439 if len(s) != 2:
439 if len(s) != 2:
440 continue
440 continue
441 oldtags.add(s[1])
441 oldtags.add(s[1])
442 for line in newlines:
442 for line in newlines:
443 s = line.strip().split(b' ', 1)
443 s = line.strip().split(b' ', 1)
444 if len(s) != 2:
444 if len(s) != 2:
445 continue
445 continue
446 if s[1] not in oldtags:
446 if s[1] not in oldtags:
447 newtags.add(s[1].strip())
447 newtags.add(s[1].strip())
448
448
449 if not newtags:
449 if not newtags:
450 return None, None
450 return None, None
451
451
452 data = b"".join(newlines)
452 data = b"".join(newlines)
453
453
454 def getfilectx(repo, memctx, f):
454 def getfilectx(repo, memctx, f):
455 return context.memfilectx(repo, memctx, f, data, False, False, None)
455 return context.memfilectx(repo, memctx, f, data, False, False, None)
456
456
457 self.ui.status(_(b"updating tags\n"))
457 self.ui.status(_(b"updating tags\n"))
458 date = b"%d 0" % int(time.mktime(time.gmtime()))
458 date = b"%d 0" % int(time.mktime(time.gmtime()))
459 extra = {b'branch': self.tagsbranch}
459 extra = {b'branch': self.tagsbranch}
460 ctx = context.memctx(
460 ctx = context.memctx(
461 self.repo,
461 self.repo,
462 (tagparent, None),
462 (tagparent, None),
463 b"update tags",
463 b"update tags",
464 [b".hgtags"],
464 [b".hgtags"],
465 getfilectx,
465 getfilectx,
466 b"convert-repo",
466 b"convert-repo",
467 date,
467 date,
468 extra,
468 extra,
469 )
469 )
470 node = self.repo.commitctx(ctx)
470 node = self.repo.commitctx(ctx)
471 return nodemod.hex(node), nodemod.hex(tagparent)
471 return nodemod.hex(node), nodemod.hex(tagparent)
472
472
473 def setfilemapmode(self, active):
473 def setfilemapmode(self, active):
474 self.filemapmode = active
474 self.filemapmode = active
475
475
476 def putbookmarks(self, updatedbookmark):
476 def putbookmarks(self, updatedbookmark):
477 if not len(updatedbookmark):
477 if not len(updatedbookmark):
478 return
478 return
479 wlock = lock = tr = None
479 wlock = lock = tr = None
480 try:
480 try:
481 wlock = self.repo.wlock()
481 wlock = self.repo.wlock()
482 lock = self.repo.lock()
482 lock = self.repo.lock()
483 tr = self.repo.transaction(b'bookmark')
483 tr = self.repo.transaction(b'bookmark')
484 self.ui.status(_(b"updating bookmarks\n"))
484 self.ui.status(_(b"updating bookmarks\n"))
485 destmarks = self.repo._bookmarks
485 destmarks = self.repo._bookmarks
486 changes = [
486 changes = [
487 (bookmark, nodemod.bin(updatedbookmark[bookmark]))
487 (bookmark, nodemod.bin(updatedbookmark[bookmark]))
488 for bookmark in updatedbookmark
488 for bookmark in updatedbookmark
489 ]
489 ]
490 destmarks.applychanges(self.repo, tr, changes)
490 destmarks.applychanges(self.repo, tr, changes)
491 tr.close()
491 tr.close()
492 finally:
492 finally:
493 lockmod.release(lock, wlock, tr)
493 lockmod.release(lock, wlock, tr)
494
494
495 def hascommitfrommap(self, rev):
495 def hascommitfrommap(self, rev):
496 # the exact semantics of clonebranches is unclear so we can't say no
496 # the exact semantics of clonebranches is unclear so we can't say no
497 return rev in self.repo or self.clonebranches
497 return rev in self.repo or self.clonebranches
498
498
499 def hascommitforsplicemap(self, rev):
499 def hascommitforsplicemap(self, rev):
500 if rev not in self.repo and self.clonebranches:
500 if rev not in self.repo and self.clonebranches:
501 raise error.Abort(
501 raise error.Abort(
502 _(
502 _(
503 b'revision %s not found in destination '
503 b'revision %s not found in destination '
504 b'repository (lookups with clonebranches=true '
504 b'repository (lookups with clonebranches=true '
505 b'are not implemented)'
505 b'are not implemented)'
506 )
506 )
507 % rev
507 % rev
508 )
508 )
509 return rev in self.repo
509 return rev in self.repo
510
510
511
511
512 class mercurial_source(common.converter_source):
512 class mercurial_source(common.converter_source):
513 def __init__(self, ui, repotype, path, revs=None):
513 def __init__(self, ui, repotype, path, revs=None):
514 common.converter_source.__init__(self, ui, repotype, path, revs)
514 common.converter_source.__init__(self, ui, repotype, path, revs)
515 self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
515 self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
516 self.ignored = set()
516 self.ignored = set()
517 self.saverev = ui.configbool(b'convert', b'hg.saverev')
517 self.saverev = ui.configbool(b'convert', b'hg.saverev')
518 try:
518 try:
519 self.repo = hg.repository(self.ui, path)
519 self.repo = hg.repository(self.ui, path)
520 # try to provoke an exception if this isn't really a hg
520 # try to provoke an exception if this isn't really a hg
521 # repo, but some other bogus compatible-looking url
521 # repo, but some other bogus compatible-looking url
522 if not self.repo.local():
522 if not self.repo.local():
523 raise error.RepoError
523 raise error.RepoError
524 except error.RepoError:
524 except error.RepoError:
525 ui.traceback()
525 ui.traceback()
526 raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
526 raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
527 self.lastrev = None
527 self.lastrev = None
528 self.lastctx = None
528 self.lastctx = None
529 self._changescache = None, None
529 self._changescache = None, None
530 self.convertfp = None
530 self.convertfp = None
531 # Restrict converted revisions to startrev descendants
531 # Restrict converted revisions to startrev descendants
532 startnode = ui.config(b'convert', b'hg.startrev')
532 startnode = ui.config(b'convert', b'hg.startrev')
533 hgrevs = ui.config(b'convert', b'hg.revs')
533 hgrevs = ui.config(b'convert', b'hg.revs')
534 if hgrevs is None:
534 if hgrevs is None:
535 if startnode is not None:
535 if startnode is not None:
536 try:
536 try:
537 startnode = self.repo.lookup(startnode)
537 startnode = self.repo.lookup(startnode)
538 except error.RepoError:
538 except error.RepoError:
539 raise error.Abort(
539 raise error.Abort(
540 _(b'%s is not a valid start revision') % startnode
540 _(b'%s is not a valid start revision') % startnode
541 )
541 )
542 startrev = self.repo.changelog.rev(startnode)
542 startrev = self.repo.changelog.rev(startnode)
543 children = {startnode: 1}
543 children = {startnode: 1}
544 for r in self.repo.changelog.descendants([startrev]):
544 for r in self.repo.changelog.descendants([startrev]):
545 children[self.repo.changelog.node(r)] = 1
545 children[self.repo.changelog.node(r)] = 1
546 self.keep = children.__contains__
546 self.keep = children.__contains__
547 else:
547 else:
548 self.keep = util.always
548 self.keep = util.always
549 if revs:
549 if revs:
550 self._heads = [self.repo.lookup(r) for r in revs]
550 self._heads = [self.repo.lookup(r) for r in revs]
551 else:
551 else:
552 self._heads = self.repo.heads()
552 self._heads = self.repo.heads()
553 else:
553 else:
554 if revs or startnode is not None:
554 if revs or startnode is not None:
555 raise error.Abort(
555 raise error.Abort(
556 _(
556 _(
557 b'hg.revs cannot be combined with '
557 b'hg.revs cannot be combined with '
558 b'hg.startrev or --rev'
558 b'hg.startrev or --rev'
559 )
559 )
560 )
560 )
561 nodes = set()
561 nodes = set()
562 parents = set()
562 parents = set()
563 for r in scmutil.revrange(self.repo, [hgrevs]):
563 for r in scmutil.revrange(self.repo, [hgrevs]):
564 ctx = self.repo[r]
564 ctx = self.repo[r]
565 nodes.add(ctx.node())
565 nodes.add(ctx.node())
566 parents.update(p.node() for p in ctx.parents())
566 parents.update(p.node() for p in ctx.parents())
567 self.keep = nodes.__contains__
567 self.keep = nodes.__contains__
568 self._heads = nodes - parents
568 self._heads = nodes - parents
569
569
570 def _changectx(self, rev):
570 def _changectx(self, rev):
571 if self.lastrev != rev:
571 if self.lastrev != rev:
572 self.lastctx = self.repo[rev]
572 self.lastctx = self.repo[rev]
573 self.lastrev = rev
573 self.lastrev = rev
574 return self.lastctx
574 return self.lastctx
575
575
576 def _parents(self, ctx):
576 def _parents(self, ctx):
577 return [p for p in ctx.parents() if p and self.keep(p.node())]
577 return [p for p in ctx.parents() if p and self.keep(p.node())]
578
578
579 def getheads(self):
579 def getheads(self):
580 return [nodemod.hex(h) for h in self._heads if self.keep(h)]
580 return [nodemod.hex(h) for h in self._heads if self.keep(h)]
581
581
582 def getfile(self, name, rev):
582 def getfile(self, name, rev):
583 try:
583 try:
584 fctx = self._changectx(rev)[name]
584 fctx = self._changectx(rev)[name]
585 return fctx.data(), fctx.flags()
585 return fctx.data(), fctx.flags()
586 except error.LookupError:
586 except error.LookupError:
587 return None, None
587 return None, None
588
588
589 def _changedfiles(self, ctx1, ctx2):
589 def _changedfiles(self, ctx1, ctx2):
590 ma, r = [], []
590 ma, r = [], []
591 maappend = ma.append
591 maappend = ma.append
592 rappend = r.append
592 rappend = r.append
593 d = ctx1.manifest().diff(ctx2.manifest())
593 d = ctx1.manifest().diff(ctx2.manifest())
594 for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d):
594 for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d):
595 if node2 is None:
595 if node2 is None:
596 rappend(f)
596 rappend(f)
597 else:
597 else:
598 maappend(f)
598 maappend(f)
599 return ma, r
599 return ma, r
600
600
601 def getchanges(self, rev, full):
601 def getchanges(self, rev, full):
602 ctx = self._changectx(rev)
602 ctx = self._changectx(rev)
603 parents = self._parents(ctx)
603 parents = self._parents(ctx)
604 if full or not parents:
604 if full or not parents:
605 files = copyfiles = ctx.manifest()
605 files = copyfiles = ctx.manifest()
606 if parents:
606 if parents:
607 if self._changescache[0] == rev:
607 if self._changescache[0] == rev:
608 ma, r = self._changescache[1]
608 ma, r = self._changescache[1]
609 else:
609 else:
610 ma, r = self._changedfiles(parents[0], ctx)
610 ma, r = self._changedfiles(parents[0], ctx)
611 if not full:
611 if not full:
612 files = ma + r
612 files = ma + r
613 copyfiles = ma
613 copyfiles = ma
614 # _getcopies() is also run for roots and before filtering so missing
614 # _getcopies() is also run for roots and before filtering so missing
615 # revlogs are detected early
615 # revlogs are detected early
616 copies = self._getcopies(ctx, parents, copyfiles)
616 copies = self._getcopies(ctx, parents, copyfiles)
617 cleanp2 = set()
617 cleanp2 = set()
618 if len(parents) == 2:
618 if len(parents) == 2:
619 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
619 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
620 for f, value in pycompat.iteritems(d):
620 for f, value in pycompat.iteritems(d):
621 if value is None:
621 if value is None:
622 cleanp2.add(f)
622 cleanp2.add(f)
623 changes = [(f, rev) for f in files if f not in self.ignored]
623 changes = [(f, rev) for f in files if f not in self.ignored]
624 changes.sort()
624 changes.sort()
625 return changes, copies, cleanp2
625 return changes, copies, cleanp2
626
626
627 def _getcopies(self, ctx, parents, files):
627 def _getcopies(self, ctx, parents, files):
628 copies = {}
628 copies = {}
629 for name in files:
629 for name in files:
630 if name in self.ignored:
630 if name in self.ignored:
631 continue
631 continue
632 try:
632 try:
633 copysource = ctx.filectx(name).copysource()
633 copysource = ctx.filectx(name).copysource()
634 if copysource in self.ignored:
634 if copysource in self.ignored:
635 continue
635 continue
636 # Ignore copy sources not in parent revisions
636 # Ignore copy sources not in parent revisions
637 if not any(copysource in p for p in parents):
637 if not any(copysource in p for p in parents):
638 continue
638 continue
639 copies[name] = copysource
639 copies[name] = copysource
640 except TypeError:
640 except TypeError:
641 pass
641 pass
642 except error.LookupError as e:
642 except error.LookupError as e:
643 if not self.ignoreerrors:
643 if not self.ignoreerrors:
644 raise
644 raise
645 self.ignored.add(name)
645 self.ignored.add(name)
646 self.ui.warn(_(b'ignoring: %s\n') % e)
646 self.ui.warn(_(b'ignoring: %s\n') % e)
647 return copies
647 return copies
648
648
649 def getcommit(self, rev):
649 def getcommit(self, rev):
650 ctx = self._changectx(rev)
650 ctx = self._changectx(rev)
651 _parents = self._parents(ctx)
651 _parents = self._parents(ctx)
652 parents = [p.hex() for p in _parents]
652 parents = [p.hex() for p in _parents]
653 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
653 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
654 crev = rev
654 crev = rev
655
655
656 return common.commit(
656 return common.commit(
657 author=ctx.user(),
657 author=ctx.user(),
658 date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
658 date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
659 desc=ctx.description(),
659 desc=ctx.description(),
660 rev=crev,
660 rev=crev,
661 parents=parents,
661 parents=parents,
662 optparents=optparents,
662 optparents=optparents,
663 branch=ctx.branch(),
663 branch=ctx.branch(),
664 extra=ctx.extra(),
664 extra=ctx.extra(),
665 sortkey=ctx.rev(),
665 sortkey=ctx.rev(),
666 saverev=self.saverev,
666 saverev=self.saverev,
667 phase=ctx.phase(),
667 phase=ctx.phase(),
668 ctx=ctx,
668 ctx=ctx,
669 )
669 )
670
670
671 def numcommits(self):
671 def numcommits(self):
672 return len(self.repo)
672 return len(self.repo)
673
673
674 def gettags(self):
674 def gettags(self):
675 # This will get written to .hgtags, filter non global tags out.
675 # This will get written to .hgtags, filter non global tags out.
676 tags = [
676 tags = [
677 t
677 t
678 for t in self.repo.tagslist()
678 for t in self.repo.tagslist()
679 if self.repo.tagtype(t[0]) == b'global'
679 if self.repo.tagtype(t[0]) == b'global'
680 ]
680 ]
681 return {
681 return {
682 name: nodemod.hex(node) for name, node in tags if self.keep(node)
682 name: nodemod.hex(node) for name, node in tags if self.keep(node)
683 }
683 }
684
684
685 def getchangedfiles(self, rev, i):
685 def getchangedfiles(self, rev, i):
686 ctx = self._changectx(rev)
686 ctx = self._changectx(rev)
687 parents = self._parents(ctx)
687 parents = self._parents(ctx)
688 if not parents and i is None:
688 if not parents and i is None:
689 i = 0
689 i = 0
690 ma, r = ctx.manifest().keys(), []
690 ma, r = ctx.manifest().keys(), []
691 else:
691 else:
692 i = i or 0
692 i = i or 0
693 ma, r = self._changedfiles(parents[i], ctx)
693 ma, r = self._changedfiles(parents[i], ctx)
694 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
694 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
695
695
696 if i == 0:
696 if i == 0:
697 self._changescache = (rev, (ma, r))
697 self._changescache = (rev, (ma, r))
698
698
699 return ma + r
699 return ma + r
700
700
701 def converted(self, rev, destrev):
701 def converted(self, rev, destrev):
702 if self.convertfp is None:
702 if self.convertfp is None:
703 self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
703 self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
704 self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
704 self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
705 self.convertfp.flush()
705 self.convertfp.flush()
706
706
707 def before(self):
707 def before(self):
708 self.ui.debug(b'run hg source pre-conversion action\n')
708 self.ui.debug(b'run hg source pre-conversion action\n')
709
709
710 def after(self):
710 def after(self):
711 self.ui.debug(b'run hg source post-conversion action\n')
711 self.ui.debug(b'run hg source post-conversion action\n')
712
712
713 def hasnativeorder(self):
713 def hasnativeorder(self):
714 return True
714 return True
715
715
716 def hasnativeclose(self):
716 def hasnativeclose(self):
717 return True
717 return True
718
718
719 def lookuprev(self, rev):
719 def lookuprev(self, rev):
720 try:
720 try:
721 return nodemod.hex(self.repo.lookup(rev))
721 return nodemod.hex(self.repo.lookup(rev))
722 except (error.RepoError, error.LookupError):
722 except (error.RepoError, error.LookupError):
723 return None
723 return None
724
724
725 def getbookmarks(self):
725 def getbookmarks(self):
726 return bookmarks.listbookmarks(self.repo)
726 return bookmarks.listbookmarks(self.repo)
727
727
728 def checkrevformat(self, revstr, mapname=b'splicemap'):
728 def checkrevformat(self, revstr, mapname=b'splicemap'):
729 """ Mercurial, revision string is a 40 byte hex """
729 """ Mercurial, revision string is a 40 byte hex """
730 self.checkhexformat(revstr, mapname)
730 self.checkhexformat(revstr, mapname)
@@ -1,1288 +1,1288 b''
1 # __init__.py - remotefilelog extension
1 # __init__.py - remotefilelog extension
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8
8
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 GUARANTEES. This means that repositories created with this extension may
10 GUARANTEES. This means that repositories created with this extension may
11 only be usable with the exact version of this extension/Mercurial that was
11 only be usable with the exact version of this extension/Mercurial that was
12 used. The extension attempts to enforce this in order to prevent repository
12 used. The extension attempts to enforce this in order to prevent repository
13 corruption.
13 corruption.
14
14
15 remotefilelog works by fetching file contents lazily and storing them
15 remotefilelog works by fetching file contents lazily and storing them
16 in a cache on the client rather than in revlogs. This allows enormous
16 in a cache on the client rather than in revlogs. This allows enormous
17 histories to be transferred only partially, making them easier to
17 histories to be transferred only partially, making them easier to
18 operate on.
18 operate on.
19
19
20 Configs:
20 Configs:
21
21
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23
23
24 ``packs.maxpacksize`` specifies the maximum pack file size
24 ``packs.maxpacksize`` specifies the maximum pack file size
25
25
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 shared cache (trees only for now)
27 shared cache (trees only for now)
28
28
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30
30
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 update, and on other commands that use them. Different from pullprefetch.
32 update, and on other commands that use them. Different from pullprefetch.
33
33
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35
35
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 it is garbage collected
37 it is garbage collected
38
38
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40
40
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 days after which it is no longer prefetched.
42 days after which it is no longer prefetched.
43
43
44 ``remotefilelog.prefetchdelay`` specifies delay between background
44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 prefetches in seconds after operations that change the working copy parent
45 prefetches in seconds after operations that change the working copy parent
46
46
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 pack files required to be considered part of a generation. In particular,
48 pack files required to be considered part of a generation. In particular,
49 minimum number of packs files > gencountlimit.
49 minimum number of packs files > gencountlimit.
50
50
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 each generation of the data pack files. For example, list ['100MB','1MB']
52 each generation of the data pack files. For example, list ['100MB','1MB']
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 1MB, 100MB) and [100MB, infinity).
54 1MB, 100MB) and [100MB, infinity).
55
55
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 include in an incremental data repack.
57 include in an incremental data repack.
58
58
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 it to be considered for an incremental data repack.
60 it to be considered for an incremental data repack.
61
61
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 to include in an incremental data repack.
63 to include in an incremental data repack.
64
64
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 history pack files required to be considered part of a generation. In
66 history pack files required to be considered part of a generation. In
67 particular, minimum number of packs files > gencountlimit.
67 particular, minimum number of packs files > gencountlimit.
68
68
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 each generation of the history pack files. For example, list [
70 each generation of the history pack files. For example, list [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73
73
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 include in an incremental history repack.
75 include in an incremental history repack.
76
76
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 for it to be considered for an incremental history repack.
78 for it to be considered for an incremental history repack.
79
79
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 files to include in an incremental history repack.
81 files to include in an incremental history repack.
82
82
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 background
84 background
85
85
86 ``remotefilelog.cachepath`` path to cache
86 ``remotefilelog.cachepath`` path to cache
87
87
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 group
89 group
90
90
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92
92
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94
94
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96
96
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98
98
99 ``remotefilelog.fetchwarning``: message to print when too many
99 ``remotefilelog.fetchwarning``: message to print when too many
100 single-file fetches occur
100 single-file fetches occur
101
101
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103
103
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 files, otherwise use optimistic fetching
105 files, otherwise use optimistic fetching
106
106
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 eagerly downloaded rather than lazily
108 eagerly downloaded rather than lazily
109
109
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 data from other repos in a shared store.
111 data from other repos in a shared store.
112
112
113 ``remotefilelog.server`` if true, enable server-side functionality
113 ``remotefilelog.server`` if true, enable server-side functionality
114
114
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116
116
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 blobs
118 blobs
119
119
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 before returning blobs
121 before returning blobs
122
122
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 corruption before returning metadata
124 corruption before returning metadata
125
125
126 """
126 """
127 from __future__ import absolute_import
127 from __future__ import absolute_import
128
128
129 import os
129 import os
130 import time
130 import time
131 import traceback
131 import traceback
132
132
133 from mercurial.node import hex
133 from mercurial.node import hex
134 from mercurial.i18n import _
134 from mercurial.i18n import _
135 from mercurial.pycompat import open
135 from mercurial.pycompat import open
136 from mercurial import (
136 from mercurial import (
137 changegroup,
137 changegroup,
138 changelog,
138 changelog,
139 cmdutil,
139 cmdutil,
140 commands,
140 commands,
141 configitems,
141 configitems,
142 context,
142 context,
143 copies,
143 copies,
144 debugcommands as hgdebugcommands,
144 debugcommands as hgdebugcommands,
145 dispatch,
145 dispatch,
146 error,
146 error,
147 exchange,
147 exchange,
148 extensions,
148 extensions,
149 hg,
149 hg,
150 localrepo,
150 localrepo,
151 match as matchmod,
151 match as matchmod,
152 merge,
152 merge,
153 mergestate as mergestatemod,
153 mergestate as mergestatemod,
154 node as nodemod,
154 node as nodemod,
155 patch,
155 patch,
156 pycompat,
156 pycompat,
157 registrar,
157 registrar,
158 repair,
158 repair,
159 repoview,
159 repoview,
160 revset,
160 revset,
161 scmutil,
161 scmutil,
162 smartset,
162 smartset,
163 streamclone,
163 streamclone,
164 util,
164 util,
165 )
165 )
166 from . import (
166 from . import (
167 constants,
167 constants,
168 debugcommands,
168 debugcommands,
169 fileserverclient,
169 fileserverclient,
170 remotefilectx,
170 remotefilectx,
171 remotefilelog,
171 remotefilelog,
172 remotefilelogserver,
172 remotefilelogserver,
173 repack as repackmod,
173 repack as repackmod,
174 shallowbundle,
174 shallowbundle,
175 shallowrepo,
175 shallowrepo,
176 shallowstore,
176 shallowstore,
177 shallowutil,
177 shallowutil,
178 shallowverifier,
178 shallowverifier,
179 )
179 )
180
180
181 # ensures debug commands are registered
181 # ensures debug commands are registered
182 hgdebugcommands.command
182 hgdebugcommands.command
183
183
184 cmdtable = {}
184 cmdtable = {}
185 command = registrar.command(cmdtable)
185 command = registrar.command(cmdtable)
186
186
187 configtable = {}
187 configtable = {}
188 configitem = registrar.configitem(configtable)
188 configitem = registrar.configitem(configtable)
189
189
190 configitem(b'remotefilelog', b'debug', default=False)
190 configitem(b'remotefilelog', b'debug', default=False)
191
191
192 configitem(b'remotefilelog', b'reponame', default=b'')
192 configitem(b'remotefilelog', b'reponame', default=b'')
193 configitem(b'remotefilelog', b'cachepath', default=None)
193 configitem(b'remotefilelog', b'cachepath', default=None)
194 configitem(b'remotefilelog', b'cachegroup', default=None)
194 configitem(b'remotefilelog', b'cachegroup', default=None)
195 configitem(b'remotefilelog', b'cacheprocess', default=None)
195 configitem(b'remotefilelog', b'cacheprocess', default=None)
196 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
196 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
197 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
197 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
198
198
199 configitem(
199 configitem(
200 b'remotefilelog',
200 b'remotefilelog',
201 b'fallbackpath',
201 b'fallbackpath',
202 default=configitems.dynamicdefault,
202 default=configitems.dynamicdefault,
203 alias=[(b'remotefilelog', b'fallbackrepo')],
203 alias=[(b'remotefilelog', b'fallbackrepo')],
204 )
204 )
205
205
206 configitem(b'remotefilelog', b'validatecachelog', default=None)
206 configitem(b'remotefilelog', b'validatecachelog', default=None)
207 configitem(b'remotefilelog', b'validatecache', default=b'on')
207 configitem(b'remotefilelog', b'validatecache', default=b'on')
208 configitem(b'remotefilelog', b'server', default=None)
208 configitem(b'remotefilelog', b'server', default=None)
209 configitem(b'remotefilelog', b'servercachepath', default=None)
209 configitem(b'remotefilelog', b'servercachepath', default=None)
210 configitem(b"remotefilelog", b"serverexpiration", default=30)
210 configitem(b"remotefilelog", b"serverexpiration", default=30)
211 configitem(b'remotefilelog', b'backgroundrepack', default=False)
211 configitem(b'remotefilelog', b'backgroundrepack', default=False)
212 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
212 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
213 configitem(b'remotefilelog', b'pullprefetch', default=None)
213 configitem(b'remotefilelog', b'pullprefetch', default=None)
214 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
214 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
215 configitem(b'remotefilelog', b'prefetchdelay', default=120)
215 configitem(b'remotefilelog', b'prefetchdelay', default=120)
216 configitem(b'remotefilelog', b'prefetchdays', default=14)
216 configitem(b'remotefilelog', b'prefetchdays', default=14)
217
217
218 configitem(b'remotefilelog', b'getfilesstep', default=10000)
218 configitem(b'remotefilelog', b'getfilesstep', default=10000)
219 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
219 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
220 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
220 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
221 configitem(b'remotefilelog', b'fetchwarning', default=b'')
221 configitem(b'remotefilelog', b'fetchwarning', default=b'')
222
222
223 configitem(b'remotefilelog', b'includepattern', default=None)
223 configitem(b'remotefilelog', b'includepattern', default=None)
224 configitem(b'remotefilelog', b'excludepattern', default=None)
224 configitem(b'remotefilelog', b'excludepattern', default=None)
225
225
226 configitem(b'remotefilelog', b'gcrepack', default=False)
226 configitem(b'remotefilelog', b'gcrepack', default=False)
227 configitem(b'remotefilelog', b'repackonhggc', default=False)
227 configitem(b'remotefilelog', b'repackonhggc', default=False)
228 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
228 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
229
229
230 configitem(b'packs', b'maxpacksize', default=0)
230 configitem(b'packs', b'maxpacksize', default=0)
231 configitem(b'packs', b'maxchainlen', default=1000)
231 configitem(b'packs', b'maxchainlen', default=1000)
232
232
233 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
233 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
234
234
235 # default TTL limit is 30 days
235 # default TTL limit is 30 days
236 _defaultlimit = 60 * 60 * 24 * 30
236 _defaultlimit = 60 * 60 * 24 * 30
237 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
237 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
238
238
239 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
239 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
240 configitem(
240 configitem(
241 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
241 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
242 )
242 )
243 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
243 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
244 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
244 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
245 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
245 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
246
246
247 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
247 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
248 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
248 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
249 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
249 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
250 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
250 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
251 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
251 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
252
252
253 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
253 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
254 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
254 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
255 # be specifying the version(s) of Mercurial they are tested with, or
255 # be specifying the version(s) of Mercurial they are tested with, or
256 # leave the attribute unspecified.
256 # leave the attribute unspecified.
257 testedwith = b'ships-with-hg-core'
257 testedwith = b'ships-with-hg-core'
258
258
259 repoclass = localrepo.localrepository
259 repoclass = localrepo.localrepository
260 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
260 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
261
261
262 isenabled = shallowutil.isenabled
262 isenabled = shallowutil.isenabled
263
263
264
264
265 def uisetup(ui):
265 def uisetup(ui):
266 """Wraps user facing Mercurial commands to swap them out with shallow
266 """Wraps user facing Mercurial commands to swap them out with shallow
267 versions.
267 versions.
268 """
268 """
269 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
269 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
270
270
271 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
271 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
272 entry[1].append(
272 entry[1].append(
273 (
273 (
274 b'',
274 b'',
275 b'shallow',
275 b'shallow',
276 None,
276 None,
277 _(b"create a shallow clone which uses remote file history"),
277 _(b"create a shallow clone which uses remote file history"),
278 )
278 )
279 )
279 )
280
280
281 extensions.wrapcommand(
281 extensions.wrapcommand(
282 commands.table, b'debugindex', debugcommands.debugindex
282 commands.table, b'debugindex', debugcommands.debugindex
283 )
283 )
284 extensions.wrapcommand(
284 extensions.wrapcommand(
285 commands.table, b'debugindexdot', debugcommands.debugindexdot
285 commands.table, b'debugindexdot', debugcommands.debugindexdot
286 )
286 )
287 extensions.wrapcommand(commands.table, b'log', log)
287 extensions.wrapcommand(commands.table, b'log', log)
288 extensions.wrapcommand(commands.table, b'pull', pull)
288 extensions.wrapcommand(commands.table, b'pull', pull)
289
289
290 # Prevent 'hg manifest --all'
290 # Prevent 'hg manifest --all'
291 def _manifest(orig, ui, repo, *args, **opts):
291 def _manifest(orig, ui, repo, *args, **opts):
292 if isenabled(repo) and opts.get('all'):
292 if isenabled(repo) and opts.get('all'):
293 raise error.Abort(_(b"--all is not supported in a shallow repo"))
293 raise error.Abort(_(b"--all is not supported in a shallow repo"))
294
294
295 return orig(ui, repo, *args, **opts)
295 return orig(ui, repo, *args, **opts)
296
296
297 extensions.wrapcommand(commands.table, b"manifest", _manifest)
297 extensions.wrapcommand(commands.table, b"manifest", _manifest)
298
298
299 # Wrap remotefilelog with lfs code
299 # Wrap remotefilelog with lfs code
300 def _lfsloaded(loaded=False):
300 def _lfsloaded(loaded=False):
301 lfsmod = None
301 lfsmod = None
302 try:
302 try:
303 lfsmod = extensions.find(b'lfs')
303 lfsmod = extensions.find(b'lfs')
304 except KeyError:
304 except KeyError:
305 pass
305 pass
306 if lfsmod:
306 if lfsmod:
307 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
307 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
308 fileserverclient._lfsmod = lfsmod
308 fileserverclient._lfsmod = lfsmod
309
309
310 extensions.afterloaded(b'lfs', _lfsloaded)
310 extensions.afterloaded(b'lfs', _lfsloaded)
311
311
312 # debugdata needs remotefilelog.len to work
312 # debugdata needs remotefilelog.len to work
313 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
313 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
314
314
315 changegroup.cgpacker = shallowbundle.shallowcg1packer
315 changegroup.cgpacker = shallowbundle.shallowcg1packer
316
316
317 extensions.wrapfunction(
317 extensions.wrapfunction(
318 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
318 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
319 )
319 )
320 extensions.wrapfunction(
320 extensions.wrapfunction(
321 changegroup, b'makechangegroup', shallowbundle.makechangegroup
321 changegroup, b'makechangegroup', shallowbundle.makechangegroup
322 )
322 )
323 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
323 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
324 extensions.wrapfunction(exchange, b'pull', exchangepull)
324 extensions.wrapfunction(exchange, b'pull', exchangepull)
325 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
325 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
326 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
326 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
327 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
327 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
328 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
328 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
329 extensions.wrapfunction(
329 extensions.wrapfunction(
330 copies, b'_computeforwardmissing', computeforwardmissing
330 copies, b'_computeforwardmissing', computeforwardmissing
331 )
331 )
332 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
332 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
333 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
333 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
334 extensions.wrapfunction(context.changectx, b'filectx', filectx)
334 extensions.wrapfunction(context.changectx, b'filectx', filectx)
335 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
335 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
336 extensions.wrapfunction(patch, b'trydiff', trydiff)
336 extensions.wrapfunction(patch, b'trydiff', trydiff)
337 extensions.wrapfunction(hg, b'verify', _verify)
337 extensions.wrapfunction(hg, b'verify', _verify)
338 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
338 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
339
339
340 # disappointing hacks below
340 # disappointing hacks below
341 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
341 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
342 extensions.wrapfunction(revset, b'filelog', filelogrevset)
342 extensions.wrapfunction(revset, b'filelog', filelogrevset)
343 revset.symbols[b'filelog'] = revset.filelog
343 revset.symbols[b'filelog'] = revset.filelog
344 extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
344 extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
345
345
346
346
347 def cloneshallow(orig, ui, repo, *args, **opts):
347 def cloneshallow(orig, ui, repo, *args, **opts):
348 if opts.get('shallow'):
348 if opts.get('shallow'):
349 repos = []
349 repos = []
350
350
351 def pull_shallow(orig, self, *args, **kwargs):
351 def pull_shallow(orig, self, *args, **kwargs):
352 if not isenabled(self):
352 if not isenabled(self):
353 repos.append(self.unfiltered())
353 repos.append(self.unfiltered())
354 # set up the client hooks so the post-clone update works
354 # set up the client hooks so the post-clone update works
355 setupclient(self.ui, self.unfiltered())
355 setupclient(self.ui, self.unfiltered())
356
356
357 # setupclient fixed the class on the repo itself
357 # setupclient fixed the class on the repo itself
358 # but we also need to fix it on the repoview
358 # but we also need to fix it on the repoview
359 if isinstance(self, repoview.repoview):
359 if isinstance(self, repoview.repoview):
360 self.__class__.__bases__ = (
360 self.__class__.__bases__ = (
361 self.__class__.__bases__[0],
361 self.__class__.__bases__[0],
362 self.unfiltered().__class__,
362 self.unfiltered().__class__,
363 )
363 )
364 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
364 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
365 scmutil.writereporequirements(self)
365 scmutil.writereporequirements(self)
366
366
367 # Since setupclient hadn't been called, exchange.pull was not
367 # Since setupclient hadn't been called, exchange.pull was not
368 # wrapped. So we need to manually invoke our version of it.
368 # wrapped. So we need to manually invoke our version of it.
369 return exchangepull(orig, self, *args, **kwargs)
369 return exchangepull(orig, self, *args, **kwargs)
370 else:
370 else:
371 return orig(self, *args, **kwargs)
371 return orig(self, *args, **kwargs)
372
372
373 extensions.wrapfunction(exchange, b'pull', pull_shallow)
373 extensions.wrapfunction(exchange, b'pull', pull_shallow)
374
374
375 # Wrap the stream logic to add requirements and to pass include/exclude
375 # Wrap the stream logic to add requirements and to pass include/exclude
376 # patterns around.
376 # patterns around.
377 def setup_streamout(repo, remote):
377 def setup_streamout(repo, remote):
378 # Replace remote.stream_out with a version that sends file
378 # Replace remote.stream_out with a version that sends file
379 # patterns.
379 # patterns.
380 def stream_out_shallow(orig):
380 def stream_out_shallow(orig):
381 caps = remote.capabilities()
381 caps = remote.capabilities()
382 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
382 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
383 opts = {}
383 opts = {}
384 if repo.includepattern:
384 if repo.includepattern:
385 opts['includepattern'] = b'\0'.join(repo.includepattern)
385 opts['includepattern'] = b'\0'.join(repo.includepattern)
386 if repo.excludepattern:
386 if repo.excludepattern:
387 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
387 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
388 return remote._callstream(b'stream_out_shallow', **opts)
388 return remote._callstream(b'stream_out_shallow', **opts)
389 else:
389 else:
390 return orig()
390 return orig()
391
391
392 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
392 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
393
393
394 def stream_wrap(orig, op):
394 def stream_wrap(orig, op):
395 setup_streamout(op.repo, op.remote)
395 setup_streamout(op.repo, op.remote)
396 return orig(op)
396 return orig(op)
397
397
398 extensions.wrapfunction(
398 extensions.wrapfunction(
399 streamclone, b'maybeperformlegacystreamclone', stream_wrap
399 streamclone, b'maybeperformlegacystreamclone', stream_wrap
400 )
400 )
401
401
402 def canperformstreamclone(orig, pullop, bundle2=False):
402 def canperformstreamclone(orig, pullop, bundle2=False):
403 # remotefilelog is currently incompatible with the
403 # remotefilelog is currently incompatible with the
404 # bundle2 flavor of streamclones, so force us to use
404 # bundle2 flavor of streamclones, so force us to use
405 # v1 instead.
405 # v1 instead.
406 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
406 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
407 pullop.remotebundle2caps[b'stream'] = [
407 pullop.remotebundle2caps[b'stream'] = [
408 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
408 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
409 ]
409 ]
410 if bundle2:
410 if bundle2:
411 return False, None
411 return False, None
412 supported, requirements = orig(pullop, bundle2=bundle2)
412 supported, requirements = orig(pullop, bundle2=bundle2)
413 if requirements is not None:
413 if requirements is not None:
414 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
414 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
415 return supported, requirements
415 return supported, requirements
416
416
417 extensions.wrapfunction(
417 extensions.wrapfunction(
418 streamclone, b'canperformstreamclone', canperformstreamclone
418 streamclone, b'canperformstreamclone', canperformstreamclone
419 )
419 )
420
420
421 try:
421 try:
422 orig(ui, repo, *args, **opts)
422 orig(ui, repo, *args, **opts)
423 finally:
423 finally:
424 if opts.get('shallow'):
424 if opts.get('shallow'):
425 for r in repos:
425 for r in repos:
426 if util.safehasattr(r, b'fileservice'):
426 if util.safehasattr(r, b'fileservice'):
427 r.fileservice.close()
427 r.fileservice.close()
428
428
429
429
430 def debugdatashallow(orig, *args, **kwds):
430 def debugdatashallow(orig, *args, **kwds):
431 oldlen = remotefilelog.remotefilelog.__len__
431 oldlen = remotefilelog.remotefilelog.__len__
432 try:
432 try:
433 remotefilelog.remotefilelog.__len__ = lambda x: 1
433 remotefilelog.remotefilelog.__len__ = lambda x: 1
434 return orig(*args, **kwds)
434 return orig(*args, **kwds)
435 finally:
435 finally:
436 remotefilelog.remotefilelog.__len__ = oldlen
436 remotefilelog.remotefilelog.__len__ = oldlen
437
437
438
438
439 def reposetup(ui, repo):
439 def reposetup(ui, repo):
440 if not repo.local():
440 if not repo.local():
441 return
441 return
442
442
443 # put here intentionally bc doesnt work in uisetup
443 # put here intentionally bc doesnt work in uisetup
444 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
444 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
445 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
445 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
446
446
447 isserverenabled = ui.configbool(b'remotefilelog', b'server')
447 isserverenabled = ui.configbool(b'remotefilelog', b'server')
448 isshallowclient = isenabled(repo)
448 isshallowclient = isenabled(repo)
449
449
450 if isserverenabled and isshallowclient:
450 if isserverenabled and isshallowclient:
451 raise RuntimeError(b"Cannot be both a server and shallow client.")
451 raise RuntimeError(b"Cannot be both a server and shallow client.")
452
452
453 if isshallowclient:
453 if isshallowclient:
454 setupclient(ui, repo)
454 setupclient(ui, repo)
455
455
456 if isserverenabled:
456 if isserverenabled:
457 remotefilelogserver.setupserver(ui, repo)
457 remotefilelogserver.setupserver(ui, repo)
458
458
459
459
460 def setupclient(ui, repo):
460 def setupclient(ui, repo):
461 if not isinstance(repo, localrepo.localrepository):
461 if not isinstance(repo, localrepo.localrepository):
462 return
462 return
463
463
464 # Even clients get the server setup since they need to have the
464 # Even clients get the server setup since they need to have the
465 # wireprotocol endpoints registered.
465 # wireprotocol endpoints registered.
466 remotefilelogserver.onetimesetup(ui)
466 remotefilelogserver.onetimesetup(ui)
467 onetimeclientsetup(ui)
467 onetimeclientsetup(ui)
468
468
469 shallowrepo.wraprepo(repo)
469 shallowrepo.wraprepo(repo)
470 repo.store = shallowstore.wrapstore(repo.store)
470 repo.store = shallowstore.wrapstore(repo.store)
471
471
472
472
473 def storewrapper(orig, requirements, path, vfstype):
473 def storewrapper(orig, requirements, path, vfstype):
474 s = orig(requirements, path, vfstype)
474 s = orig(requirements, path, vfstype)
475 if constants.SHALLOWREPO_REQUIREMENT in requirements:
475 if constants.SHALLOWREPO_REQUIREMENT in requirements:
476 s = shallowstore.wrapstore(s)
476 s = shallowstore.wrapstore(s)
477
477
478 return s
478 return s
479
479
480
480
481 # prefetch files before update
481 # prefetch files before update
482 def applyupdates(
482 def applyupdates(
483 orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
483 orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
484 ):
484 ):
485 if isenabled(repo):
485 if isenabled(repo):
486 manifest = mctx.manifest()
486 manifest = mctx.manifest()
487 files = []
487 files = []
488 for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
488 for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
489 files.append((f, hex(manifest[f])))
489 files.append((f, hex(manifest[f])))
490 # batch fetch the needed files from the server
490 # batch fetch the needed files from the server
491 repo.fileservice.prefetch(files)
491 repo.fileservice.prefetch(files)
492 return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
492 return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
493
493
494
494
495 # Prefetch merge checkunknownfiles
495 # Prefetch merge checkunknownfiles
496 def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
496 def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
497 if isenabled(repo):
497 if isenabled(repo):
498 files = []
498 files = []
499 sparsematch = repo.maybesparsematch(mctx.rev())
499 sparsematch = repo.maybesparsematch(mctx.rev())
500 for f, (m, actionargs, msg) in pycompat.iteritems(mresult.actions):
500 for f, (m, actionargs, msg) in mresult.filemap():
501 if sparsematch and not sparsematch(f):
501 if sparsematch and not sparsematch(f):
502 continue
502 continue
503 if m in (
503 if m in (
504 mergestatemod.ACTION_CREATED,
504 mergestatemod.ACTION_CREATED,
505 mergestatemod.ACTION_DELETED_CHANGED,
505 mergestatemod.ACTION_DELETED_CHANGED,
506 mergestatemod.ACTION_CREATED_MERGE,
506 mergestatemod.ACTION_CREATED_MERGE,
507 ):
507 ):
508 files.append((f, hex(mctx.filenode(f))))
508 files.append((f, hex(mctx.filenode(f))))
509 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
509 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
510 f2 = actionargs[0]
510 f2 = actionargs[0]
511 files.append((f2, hex(mctx.filenode(f2))))
511 files.append((f2, hex(mctx.filenode(f2))))
512 # batch fetch the needed files from the server
512 # batch fetch the needed files from the server
513 repo.fileservice.prefetch(files)
513 repo.fileservice.prefetch(files)
514 return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
514 return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
515
515
516
516
517 # Prefetch files before status attempts to look at their size and contents
517 # Prefetch files before status attempts to look at their size and contents
518 def checklookup(orig, self, files):
518 def checklookup(orig, self, files):
519 repo = self._repo
519 repo = self._repo
520 if isenabled(repo):
520 if isenabled(repo):
521 prefetchfiles = []
521 prefetchfiles = []
522 for parent in self._parents:
522 for parent in self._parents:
523 for f in files:
523 for f in files:
524 if f in parent:
524 if f in parent:
525 prefetchfiles.append((f, hex(parent.filenode(f))))
525 prefetchfiles.append((f, hex(parent.filenode(f))))
526 # batch fetch the needed files from the server
526 # batch fetch the needed files from the server
527 repo.fileservice.prefetch(prefetchfiles)
527 repo.fileservice.prefetch(prefetchfiles)
528 return orig(self, files)
528 return orig(self, files)
529
529
530
530
531 # Prefetch the logic that compares added and removed files for renames
531 # Prefetch the logic that compares added and removed files for renames
532 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
532 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
533 if isenabled(repo):
533 if isenabled(repo):
534 files = []
534 files = []
535 pmf = repo[b'.'].manifest()
535 pmf = repo[b'.'].manifest()
536 for f in removed:
536 for f in removed:
537 if f in pmf:
537 if f in pmf:
538 files.append((f, hex(pmf[f])))
538 files.append((f, hex(pmf[f])))
539 # batch fetch the needed files from the server
539 # batch fetch the needed files from the server
540 repo.fileservice.prefetch(files)
540 repo.fileservice.prefetch(files)
541 return orig(repo, matcher, added, removed, *args, **kwargs)
541 return orig(repo, matcher, added, removed, *args, **kwargs)
542
542
543
543
544 # prefetch files before pathcopies check
544 # prefetch files before pathcopies check
545 def computeforwardmissing(orig, a, b, match=None):
545 def computeforwardmissing(orig, a, b, match=None):
546 missing = orig(a, b, match=match)
546 missing = orig(a, b, match=match)
547 repo = a._repo
547 repo = a._repo
548 if isenabled(repo):
548 if isenabled(repo):
549 mb = b.manifest()
549 mb = b.manifest()
550
550
551 files = []
551 files = []
552 sparsematch = repo.maybesparsematch(b.rev())
552 sparsematch = repo.maybesparsematch(b.rev())
553 if sparsematch:
553 if sparsematch:
554 sparsemissing = set()
554 sparsemissing = set()
555 for f in missing:
555 for f in missing:
556 if sparsematch(f):
556 if sparsematch(f):
557 files.append((f, hex(mb[f])))
557 files.append((f, hex(mb[f])))
558 sparsemissing.add(f)
558 sparsemissing.add(f)
559 missing = sparsemissing
559 missing = sparsemissing
560
560
561 # batch fetch the needed files from the server
561 # batch fetch the needed files from the server
562 repo.fileservice.prefetch(files)
562 repo.fileservice.prefetch(files)
563 return missing
563 return missing
564
564
565
565
566 # close cache miss server connection after the command has finished
566 # close cache miss server connection after the command has finished
567 def runcommand(orig, lui, repo, *args, **kwargs):
567 def runcommand(orig, lui, repo, *args, **kwargs):
568 fileservice = None
568 fileservice = None
569 # repo can be None when running in chg:
569 # repo can be None when running in chg:
570 # - at startup, reposetup was called because serve is not norepo
570 # - at startup, reposetup was called because serve is not norepo
571 # - a norepo command like "help" is called
571 # - a norepo command like "help" is called
572 if repo and isenabled(repo):
572 if repo and isenabled(repo):
573 fileservice = repo.fileservice
573 fileservice = repo.fileservice
574 try:
574 try:
575 return orig(lui, repo, *args, **kwargs)
575 return orig(lui, repo, *args, **kwargs)
576 finally:
576 finally:
577 if fileservice:
577 if fileservice:
578 fileservice.close()
578 fileservice.close()
579
579
580
580
581 # prevent strip from stripping remotefilelogs
581 # prevent strip from stripping remotefilelogs
582 def _collectbrokencsets(orig, repo, files, striprev):
582 def _collectbrokencsets(orig, repo, files, striprev):
583 if isenabled(repo):
583 if isenabled(repo):
584 files = list([f for f in files if not repo.shallowmatch(f)])
584 files = list([f for f in files if not repo.shallowmatch(f)])
585 return orig(repo, files, striprev)
585 return orig(repo, files, striprev)
586
586
587
587
588 # changectx wrappers
588 # changectx wrappers
589 def filectx(orig, self, path, fileid=None, filelog=None):
589 def filectx(orig, self, path, fileid=None, filelog=None):
590 if fileid is None:
590 if fileid is None:
591 fileid = self.filenode(path)
591 fileid = self.filenode(path)
592 if isenabled(self._repo) and self._repo.shallowmatch(path):
592 if isenabled(self._repo) and self._repo.shallowmatch(path):
593 return remotefilectx.remotefilectx(
593 return remotefilectx.remotefilectx(
594 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
594 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
595 )
595 )
596 return orig(self, path, fileid=fileid, filelog=filelog)
596 return orig(self, path, fileid=fileid, filelog=filelog)
597
597
598
598
599 def workingfilectx(orig, self, path, filelog=None):
599 def workingfilectx(orig, self, path, filelog=None):
600 if isenabled(self._repo) and self._repo.shallowmatch(path):
600 if isenabled(self._repo) and self._repo.shallowmatch(path):
601 return remotefilectx.remoteworkingfilectx(
601 return remotefilectx.remoteworkingfilectx(
602 self._repo, path, workingctx=self, filelog=filelog
602 self._repo, path, workingctx=self, filelog=filelog
603 )
603 )
604 return orig(self, path, filelog=filelog)
604 return orig(self, path, filelog=filelog)
605
605
606
606
607 # prefetch required revisions before a diff
607 # prefetch required revisions before a diff
608 def trydiff(
608 def trydiff(
609 orig,
609 orig,
610 repo,
610 repo,
611 revs,
611 revs,
612 ctx1,
612 ctx1,
613 ctx2,
613 ctx2,
614 modified,
614 modified,
615 added,
615 added,
616 removed,
616 removed,
617 copy,
617 copy,
618 getfilectx,
618 getfilectx,
619 *args,
619 *args,
620 **kwargs
620 **kwargs
621 ):
621 ):
622 if isenabled(repo):
622 if isenabled(repo):
623 prefetch = []
623 prefetch = []
624 mf1 = ctx1.manifest()
624 mf1 = ctx1.manifest()
625 for fname in modified + added + removed:
625 for fname in modified + added + removed:
626 if fname in mf1:
626 if fname in mf1:
627 fnode = getfilectx(fname, ctx1).filenode()
627 fnode = getfilectx(fname, ctx1).filenode()
628 # fnode can be None if it's a edited working ctx file
628 # fnode can be None if it's a edited working ctx file
629 if fnode:
629 if fnode:
630 prefetch.append((fname, hex(fnode)))
630 prefetch.append((fname, hex(fnode)))
631 if fname not in removed:
631 if fname not in removed:
632 fnode = getfilectx(fname, ctx2).filenode()
632 fnode = getfilectx(fname, ctx2).filenode()
633 if fnode:
633 if fnode:
634 prefetch.append((fname, hex(fnode)))
634 prefetch.append((fname, hex(fnode)))
635
635
636 repo.fileservice.prefetch(prefetch)
636 repo.fileservice.prefetch(prefetch)
637
637
638 return orig(
638 return orig(
639 repo,
639 repo,
640 revs,
640 revs,
641 ctx1,
641 ctx1,
642 ctx2,
642 ctx2,
643 modified,
643 modified,
644 added,
644 added,
645 removed,
645 removed,
646 copy,
646 copy,
647 getfilectx,
647 getfilectx,
648 *args,
648 *args,
649 **kwargs
649 **kwargs
650 )
650 )
651
651
652
652
653 # Prevent verify from processing files
653 # Prevent verify from processing files
654 # a stub for mercurial.hg.verify()
654 # a stub for mercurial.hg.verify()
655 def _verify(orig, repo, level=None):
655 def _verify(orig, repo, level=None):
656 lock = repo.lock()
656 lock = repo.lock()
657 try:
657 try:
658 return shallowverifier.shallowverifier(repo).verify()
658 return shallowverifier.shallowverifier(repo).verify()
659 finally:
659 finally:
660 lock.release()
660 lock.release()
661
661
662
662
663 clientonetime = False
663 clientonetime = False
664
664
665
665
666 def onetimeclientsetup(ui):
666 def onetimeclientsetup(ui):
667 global clientonetime
667 global clientonetime
668 if clientonetime:
668 if clientonetime:
669 return
669 return
670 clientonetime = True
670 clientonetime = True
671
671
672 # Don't commit filelogs until we know the commit hash, since the hash
672 # Don't commit filelogs until we know the commit hash, since the hash
673 # is present in the filelog blob.
673 # is present in the filelog blob.
674 # This violates Mercurial's filelog->manifest->changelog write order,
674 # This violates Mercurial's filelog->manifest->changelog write order,
675 # but is generally fine for client repos.
675 # but is generally fine for client repos.
676 pendingfilecommits = []
676 pendingfilecommits = []
677
677
678 def addrawrevision(
678 def addrawrevision(
679 orig,
679 orig,
680 self,
680 self,
681 rawtext,
681 rawtext,
682 transaction,
682 transaction,
683 link,
683 link,
684 p1,
684 p1,
685 p2,
685 p2,
686 node,
686 node,
687 flags,
687 flags,
688 cachedelta=None,
688 cachedelta=None,
689 _metatuple=None,
689 _metatuple=None,
690 ):
690 ):
691 if isinstance(link, int):
691 if isinstance(link, int):
692 pendingfilecommits.append(
692 pendingfilecommits.append(
693 (
693 (
694 self,
694 self,
695 rawtext,
695 rawtext,
696 transaction,
696 transaction,
697 link,
697 link,
698 p1,
698 p1,
699 p2,
699 p2,
700 node,
700 node,
701 flags,
701 flags,
702 cachedelta,
702 cachedelta,
703 _metatuple,
703 _metatuple,
704 )
704 )
705 )
705 )
706 return node
706 return node
707 else:
707 else:
708 return orig(
708 return orig(
709 self,
709 self,
710 rawtext,
710 rawtext,
711 transaction,
711 transaction,
712 link,
712 link,
713 p1,
713 p1,
714 p2,
714 p2,
715 node,
715 node,
716 flags,
716 flags,
717 cachedelta,
717 cachedelta,
718 _metatuple=_metatuple,
718 _metatuple=_metatuple,
719 )
719 )
720
720
721 extensions.wrapfunction(
721 extensions.wrapfunction(
722 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
722 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
723 )
723 )
724
724
725 def changelogadd(orig, self, *args, **kwargs):
725 def changelogadd(orig, self, *args, **kwargs):
726 oldlen = len(self)
726 oldlen = len(self)
727 node = orig(self, *args, **kwargs)
727 node = orig(self, *args, **kwargs)
728 newlen = len(self)
728 newlen = len(self)
729 if oldlen != newlen:
729 if oldlen != newlen:
730 for oldargs in pendingfilecommits:
730 for oldargs in pendingfilecommits:
731 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
731 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
732 linknode = self.node(link)
732 linknode = self.node(link)
733 if linknode == node:
733 if linknode == node:
734 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
734 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
735 else:
735 else:
736 raise error.ProgrammingError(
736 raise error.ProgrammingError(
737 b'pending multiple integer revisions are not supported'
737 b'pending multiple integer revisions are not supported'
738 )
738 )
739 else:
739 else:
740 # "link" is actually wrong here (it is set to len(changelog))
740 # "link" is actually wrong here (it is set to len(changelog))
741 # if changelog remains unchanged, skip writing file revisions
741 # if changelog remains unchanged, skip writing file revisions
742 # but still do a sanity check about pending multiple revisions
742 # but still do a sanity check about pending multiple revisions
743 if len({x[3] for x in pendingfilecommits}) > 1:
743 if len({x[3] for x in pendingfilecommits}) > 1:
744 raise error.ProgrammingError(
744 raise error.ProgrammingError(
745 b'pending multiple integer revisions are not supported'
745 b'pending multiple integer revisions are not supported'
746 )
746 )
747 del pendingfilecommits[:]
747 del pendingfilecommits[:]
748 return node
748 return node
749
749
750 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
750 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
751
751
752
752
753 def getrenamedfn(orig, repo, endrev=None):
753 def getrenamedfn(orig, repo, endrev=None):
754 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
754 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
755 return orig(repo, endrev)
755 return orig(repo, endrev)
756
756
757 rcache = {}
757 rcache = {}
758
758
759 def getrenamed(fn, rev):
759 def getrenamed(fn, rev):
760 '''looks up all renames for a file (up to endrev) the first
760 '''looks up all renames for a file (up to endrev) the first
761 time the file is given. It indexes on the changerev and only
761 time the file is given. It indexes on the changerev and only
762 parses the manifest if linkrev != changerev.
762 parses the manifest if linkrev != changerev.
763 Returns rename info for fn at changerev rev.'''
763 Returns rename info for fn at changerev rev.'''
764 if rev in rcache.setdefault(fn, {}):
764 if rev in rcache.setdefault(fn, {}):
765 return rcache[fn][rev]
765 return rcache[fn][rev]
766
766
767 try:
767 try:
768 fctx = repo[rev].filectx(fn)
768 fctx = repo[rev].filectx(fn)
769 for ancestor in fctx.ancestors():
769 for ancestor in fctx.ancestors():
770 if ancestor.path() == fn:
770 if ancestor.path() == fn:
771 renamed = ancestor.renamed()
771 renamed = ancestor.renamed()
772 rcache[fn][ancestor.rev()] = renamed and renamed[0]
772 rcache[fn][ancestor.rev()] = renamed and renamed[0]
773
773
774 renamed = fctx.renamed()
774 renamed = fctx.renamed()
775 return renamed and renamed[0]
775 return renamed and renamed[0]
776 except error.LookupError:
776 except error.LookupError:
777 return None
777 return None
778
778
779 return getrenamed
779 return getrenamed
780
780
781
781
782 def walkfilerevs(orig, repo, match, follow, revs, fncache):
782 def walkfilerevs(orig, repo, match, follow, revs, fncache):
783 if not isenabled(repo):
783 if not isenabled(repo):
784 return orig(repo, match, follow, revs, fncache)
784 return orig(repo, match, follow, revs, fncache)
785
785
786 # remotefilelog's can't be walked in rev order, so throw.
786 # remotefilelog's can't be walked in rev order, so throw.
787 # The caller will see the exception and walk the commit tree instead.
787 # The caller will see the exception and walk the commit tree instead.
788 if not follow:
788 if not follow:
789 raise cmdutil.FileWalkError(b"Cannot walk via filelog")
789 raise cmdutil.FileWalkError(b"Cannot walk via filelog")
790
790
791 wanted = set()
791 wanted = set()
792 minrev, maxrev = min(revs), max(revs)
792 minrev, maxrev = min(revs), max(revs)
793
793
794 pctx = repo[b'.']
794 pctx = repo[b'.']
795 for filename in match.files():
795 for filename in match.files():
796 if filename not in pctx:
796 if filename not in pctx:
797 raise error.Abort(
797 raise error.Abort(
798 _(b'cannot follow file not in parent revision: "%s"') % filename
798 _(b'cannot follow file not in parent revision: "%s"') % filename
799 )
799 )
800 fctx = pctx[filename]
800 fctx = pctx[filename]
801
801
802 linkrev = fctx.linkrev()
802 linkrev = fctx.linkrev()
803 if linkrev >= minrev and linkrev <= maxrev:
803 if linkrev >= minrev and linkrev <= maxrev:
804 fncache.setdefault(linkrev, []).append(filename)
804 fncache.setdefault(linkrev, []).append(filename)
805 wanted.add(linkrev)
805 wanted.add(linkrev)
806
806
807 for ancestor in fctx.ancestors():
807 for ancestor in fctx.ancestors():
808 linkrev = ancestor.linkrev()
808 linkrev = ancestor.linkrev()
809 if linkrev >= minrev and linkrev <= maxrev:
809 if linkrev >= minrev and linkrev <= maxrev:
810 fncache.setdefault(linkrev, []).append(ancestor.path())
810 fncache.setdefault(linkrev, []).append(ancestor.path())
811 wanted.add(linkrev)
811 wanted.add(linkrev)
812
812
813 return wanted
813 return wanted
814
814
815
815
816 def filelogrevset(orig, repo, subset, x):
816 def filelogrevset(orig, repo, subset, x):
817 """``filelog(pattern)``
817 """``filelog(pattern)``
818 Changesets connected to the specified filelog.
818 Changesets connected to the specified filelog.
819
819
820 For performance reasons, ``filelog()`` does not show every changeset
820 For performance reasons, ``filelog()`` does not show every changeset
821 that affects the requested file(s). See :hg:`help log` for details. For
821 that affects the requested file(s). See :hg:`help log` for details. For
822 a slower, more accurate result, use ``file()``.
822 a slower, more accurate result, use ``file()``.
823 """
823 """
824
824
825 if not isenabled(repo):
825 if not isenabled(repo):
826 return orig(repo, subset, x)
826 return orig(repo, subset, x)
827
827
828 # i18n: "filelog" is a keyword
828 # i18n: "filelog" is a keyword
829 pat = revset.getstring(x, _(b"filelog requires a pattern"))
829 pat = revset.getstring(x, _(b"filelog requires a pattern"))
830 m = matchmod.match(
830 m = matchmod.match(
831 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
831 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
832 )
832 )
833 s = set()
833 s = set()
834
834
835 if not matchmod.patkind(pat):
835 if not matchmod.patkind(pat):
836 # slow
836 # slow
837 for r in subset:
837 for r in subset:
838 ctx = repo[r]
838 ctx = repo[r]
839 cfiles = ctx.files()
839 cfiles = ctx.files()
840 for f in m.files():
840 for f in m.files():
841 if f in cfiles:
841 if f in cfiles:
842 s.add(ctx.rev())
842 s.add(ctx.rev())
843 break
843 break
844 else:
844 else:
845 # partial
845 # partial
846 files = (f for f in repo[None] if m(f))
846 files = (f for f in repo[None] if m(f))
847 for f in files:
847 for f in files:
848 fctx = repo[None].filectx(f)
848 fctx = repo[None].filectx(f)
849 s.add(fctx.linkrev())
849 s.add(fctx.linkrev())
850 for actx in fctx.ancestors():
850 for actx in fctx.ancestors():
851 s.add(actx.linkrev())
851 s.add(actx.linkrev())
852
852
853 return smartset.baseset([r for r in subset if r in s])
853 return smartset.baseset([r for r in subset if r in s])
854
854
855
855
856 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
856 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
857 def gc(ui, *args, **opts):
857 def gc(ui, *args, **opts):
858 '''garbage collect the client and server filelog caches
858 '''garbage collect the client and server filelog caches
859 '''
859 '''
860 cachepaths = set()
860 cachepaths = set()
861
861
862 # get the system client cache
862 # get the system client cache
863 systemcache = shallowutil.getcachepath(ui, allowempty=True)
863 systemcache = shallowutil.getcachepath(ui, allowempty=True)
864 if systemcache:
864 if systemcache:
865 cachepaths.add(systemcache)
865 cachepaths.add(systemcache)
866
866
867 # get repo client and server cache
867 # get repo client and server cache
868 repopaths = []
868 repopaths = []
869 pwd = ui.environ.get(b'PWD')
869 pwd = ui.environ.get(b'PWD')
870 if pwd:
870 if pwd:
871 repopaths.append(pwd)
871 repopaths.append(pwd)
872
872
873 repopaths.extend(args)
873 repopaths.extend(args)
874 repos = []
874 repos = []
875 for repopath in repopaths:
875 for repopath in repopaths:
876 try:
876 try:
877 repo = hg.peer(ui, {}, repopath)
877 repo = hg.peer(ui, {}, repopath)
878 repos.append(repo)
878 repos.append(repo)
879
879
880 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
880 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
881 if repocache:
881 if repocache:
882 cachepaths.add(repocache)
882 cachepaths.add(repocache)
883 except error.RepoError:
883 except error.RepoError:
884 pass
884 pass
885
885
886 # gc client cache
886 # gc client cache
887 for cachepath in cachepaths:
887 for cachepath in cachepaths:
888 gcclient(ui, cachepath)
888 gcclient(ui, cachepath)
889
889
890 # gc server cache
890 # gc server cache
891 for repo in repos:
891 for repo in repos:
892 remotefilelogserver.gcserver(ui, repo._repo)
892 remotefilelogserver.gcserver(ui, repo._repo)
893
893
894
894
895 def gcclient(ui, cachepath):
895 def gcclient(ui, cachepath):
896 # get list of repos that use this cache
896 # get list of repos that use this cache
897 repospath = os.path.join(cachepath, b'repos')
897 repospath = os.path.join(cachepath, b'repos')
898 if not os.path.exists(repospath):
898 if not os.path.exists(repospath):
899 ui.warn(_(b"no known cache at %s\n") % cachepath)
899 ui.warn(_(b"no known cache at %s\n") % cachepath)
900 return
900 return
901
901
902 reposfile = open(repospath, b'rb')
902 reposfile = open(repospath, b'rb')
903 repos = {r[:-1] for r in reposfile.readlines()}
903 repos = {r[:-1] for r in reposfile.readlines()}
904 reposfile.close()
904 reposfile.close()
905
905
906 # build list of useful files
906 # build list of useful files
907 validrepos = []
907 validrepos = []
908 keepkeys = set()
908 keepkeys = set()
909
909
910 sharedcache = None
910 sharedcache = None
911 filesrepacked = False
911 filesrepacked = False
912
912
913 count = 0
913 count = 0
914 progress = ui.makeprogress(
914 progress = ui.makeprogress(
915 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
915 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
916 )
916 )
917 for path in repos:
917 for path in repos:
918 progress.update(count)
918 progress.update(count)
919 count += 1
919 count += 1
920 try:
920 try:
921 path = ui.expandpath(os.path.normpath(path))
921 path = ui.expandpath(os.path.normpath(path))
922 except TypeError as e:
922 except TypeError as e:
923 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
923 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
924 traceback.print_exc()
924 traceback.print_exc()
925 continue
925 continue
926 try:
926 try:
927 peer = hg.peer(ui, {}, path)
927 peer = hg.peer(ui, {}, path)
928 repo = peer._repo
928 repo = peer._repo
929 except error.RepoError:
929 except error.RepoError:
930 continue
930 continue
931
931
932 validrepos.append(path)
932 validrepos.append(path)
933
933
934 # Protect against any repo or config changes that have happened since
934 # Protect against any repo or config changes that have happened since
935 # this repo was added to the repos file. We'd rather this loop succeed
935 # this repo was added to the repos file. We'd rather this loop succeed
936 # and too much be deleted, than the loop fail and nothing gets deleted.
936 # and too much be deleted, than the loop fail and nothing gets deleted.
937 if not isenabled(repo):
937 if not isenabled(repo):
938 continue
938 continue
939
939
940 if not util.safehasattr(repo, b'name'):
940 if not util.safehasattr(repo, b'name'):
941 ui.warn(
941 ui.warn(
942 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
942 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
943 )
943 )
944 continue
944 continue
945
945
946 # If garbage collection on repack and repack on hg gc are enabled
946 # If garbage collection on repack and repack on hg gc are enabled
947 # then loose files are repacked and garbage collected.
947 # then loose files are repacked and garbage collected.
948 # Otherwise regular garbage collection is performed.
948 # Otherwise regular garbage collection is performed.
949 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
949 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
950 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
950 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
951 if repackonhggc and gcrepack:
951 if repackonhggc and gcrepack:
952 try:
952 try:
953 repackmod.incrementalrepack(repo)
953 repackmod.incrementalrepack(repo)
954 filesrepacked = True
954 filesrepacked = True
955 continue
955 continue
956 except (IOError, repackmod.RepackAlreadyRunning):
956 except (IOError, repackmod.RepackAlreadyRunning):
957 # If repack cannot be performed due to not enough disk space
957 # If repack cannot be performed due to not enough disk space
958 # continue doing garbage collection of loose files w/o repack
958 # continue doing garbage collection of loose files w/o repack
959 pass
959 pass
960
960
961 reponame = repo.name
961 reponame = repo.name
962 if not sharedcache:
962 if not sharedcache:
963 sharedcache = repo.sharedstore
963 sharedcache = repo.sharedstore
964
964
965 # Compute a keepset which is not garbage collected
965 # Compute a keepset which is not garbage collected
966 def keyfn(fname, fnode):
966 def keyfn(fname, fnode):
967 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
967 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
968
968
969 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
969 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
970
970
971 progress.complete()
971 progress.complete()
972
972
973 # write list of valid repos back
973 # write list of valid repos back
974 oldumask = os.umask(0o002)
974 oldumask = os.umask(0o002)
975 try:
975 try:
976 reposfile = open(repospath, b'wb')
976 reposfile = open(repospath, b'wb')
977 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
977 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
978 reposfile.close()
978 reposfile.close()
979 finally:
979 finally:
980 os.umask(oldumask)
980 os.umask(oldumask)
981
981
982 # prune cache
982 # prune cache
983 if sharedcache is not None:
983 if sharedcache is not None:
984 sharedcache.gc(keepkeys)
984 sharedcache.gc(keepkeys)
985 elif not filesrepacked:
985 elif not filesrepacked:
986 ui.warn(_(b"warning: no valid repos in repofile\n"))
986 ui.warn(_(b"warning: no valid repos in repofile\n"))
987
987
988
988
989 def log(orig, ui, repo, *pats, **opts):
989 def log(orig, ui, repo, *pats, **opts):
990 if not isenabled(repo):
990 if not isenabled(repo):
991 return orig(ui, repo, *pats, **opts)
991 return orig(ui, repo, *pats, **opts)
992
992
993 follow = opts.get('follow')
993 follow = opts.get('follow')
994 revs = opts.get('rev')
994 revs = opts.get('rev')
995 if pats:
995 if pats:
996 # Force slowpath for non-follow patterns and follows that start from
996 # Force slowpath for non-follow patterns and follows that start from
997 # non-working-copy-parent revs.
997 # non-working-copy-parent revs.
998 if not follow or revs:
998 if not follow or revs:
999 # This forces the slowpath
999 # This forces the slowpath
1000 opts['removed'] = True
1000 opts['removed'] = True
1001
1001
1002 # If this is a non-follow log without any revs specified, recommend that
1002 # If this is a non-follow log without any revs specified, recommend that
1003 # the user add -f to speed it up.
1003 # the user add -f to speed it up.
1004 if not follow and not revs:
1004 if not follow and not revs:
1005 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
1005 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
1006 isfile = not match.anypats()
1006 isfile = not match.anypats()
1007 if isfile:
1007 if isfile:
1008 for file in match.files():
1008 for file in match.files():
1009 if not os.path.isfile(repo.wjoin(file)):
1009 if not os.path.isfile(repo.wjoin(file)):
1010 isfile = False
1010 isfile = False
1011 break
1011 break
1012
1012
1013 if isfile:
1013 if isfile:
1014 ui.warn(
1014 ui.warn(
1015 _(
1015 _(
1016 b"warning: file log can be slow on large repos - "
1016 b"warning: file log can be slow on large repos - "
1017 + b"use -f to speed it up\n"
1017 + b"use -f to speed it up\n"
1018 )
1018 )
1019 )
1019 )
1020
1020
1021 return orig(ui, repo, *pats, **opts)
1021 return orig(ui, repo, *pats, **opts)
1022
1022
1023
1023
1024 def revdatelimit(ui, revset):
1024 def revdatelimit(ui, revset):
1025 """Update revset so that only changesets no older than 'prefetchdays' days
1025 """Update revset so that only changesets no older than 'prefetchdays' days
1026 are included. The default value is set to 14 days. If 'prefetchdays' is set
1026 are included. The default value is set to 14 days. If 'prefetchdays' is set
1027 to zero or negative value then date restriction is not applied.
1027 to zero or negative value then date restriction is not applied.
1028 """
1028 """
1029 days = ui.configint(b'remotefilelog', b'prefetchdays')
1029 days = ui.configint(b'remotefilelog', b'prefetchdays')
1030 if days > 0:
1030 if days > 0:
1031 revset = b'(%s) & date(-%s)' % (revset, days)
1031 revset = b'(%s) & date(-%s)' % (revset, days)
1032 return revset
1032 return revset
1033
1033
1034
1034
1035 def readytofetch(repo):
1035 def readytofetch(repo):
1036 """Check that enough time has passed since the last background prefetch.
1036 """Check that enough time has passed since the last background prefetch.
1037 This only relates to prefetches after operations that change the working
1037 This only relates to prefetches after operations that change the working
1038 copy parent. Default delay between background prefetches is 2 minutes.
1038 copy parent. Default delay between background prefetches is 2 minutes.
1039 """
1039 """
1040 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1040 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1041 fname = repo.vfs.join(b'lastprefetch')
1041 fname = repo.vfs.join(b'lastprefetch')
1042
1042
1043 ready = False
1043 ready = False
1044 with open(fname, b'a'):
1044 with open(fname, b'a'):
1045 # the with construct above is used to avoid race conditions
1045 # the with construct above is used to avoid race conditions
1046 modtime = os.path.getmtime(fname)
1046 modtime = os.path.getmtime(fname)
1047 if (time.time() - modtime) > timeout:
1047 if (time.time() - modtime) > timeout:
1048 os.utime(fname, None)
1048 os.utime(fname, None)
1049 ready = True
1049 ready = True
1050
1050
1051 return ready
1051 return ready
1052
1052
1053
1053
1054 def wcpprefetch(ui, repo, **kwargs):
1054 def wcpprefetch(ui, repo, **kwargs):
1055 """Prefetches in background revisions specified by bgprefetchrevs revset.
1055 """Prefetches in background revisions specified by bgprefetchrevs revset.
1056 Does background repack if backgroundrepack flag is set in config.
1056 Does background repack if backgroundrepack flag is set in config.
1057 """
1057 """
1058 shallow = isenabled(repo)
1058 shallow = isenabled(repo)
1059 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1059 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1060 isready = readytofetch(repo)
1060 isready = readytofetch(repo)
1061
1061
1062 if not (shallow and bgprefetchrevs and isready):
1062 if not (shallow and bgprefetchrevs and isready):
1063 return
1063 return
1064
1064
1065 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1065 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1066 # update a revset with a date limit
1066 # update a revset with a date limit
1067 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1067 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1068
1068
1069 def anon(unused_success):
1069 def anon(unused_success):
1070 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1070 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1071 return
1071 return
1072 repo.ranprefetch = True
1072 repo.ranprefetch = True
1073 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1073 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1074
1074
1075 repo._afterlock(anon)
1075 repo._afterlock(anon)
1076
1076
1077
1077
1078 def pull(orig, ui, repo, *pats, **opts):
1078 def pull(orig, ui, repo, *pats, **opts):
1079 result = orig(ui, repo, *pats, **opts)
1079 result = orig(ui, repo, *pats, **opts)
1080
1080
1081 if isenabled(repo):
1081 if isenabled(repo):
1082 # prefetch if it's configured
1082 # prefetch if it's configured
1083 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1083 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1084 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1084 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1085 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1085 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1086
1086
1087 if prefetchrevset:
1087 if prefetchrevset:
1088 ui.status(_(b"prefetching file contents\n"))
1088 ui.status(_(b"prefetching file contents\n"))
1089 revs = scmutil.revrange(repo, [prefetchrevset])
1089 revs = scmutil.revrange(repo, [prefetchrevset])
1090 base = repo[b'.'].rev()
1090 base = repo[b'.'].rev()
1091 if bgprefetch:
1091 if bgprefetch:
1092 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1092 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1093 else:
1093 else:
1094 repo.prefetch(revs, base=base)
1094 repo.prefetch(revs, base=base)
1095 if bgrepack:
1095 if bgrepack:
1096 repackmod.backgroundrepack(repo, incremental=True)
1096 repackmod.backgroundrepack(repo, incremental=True)
1097 elif bgrepack:
1097 elif bgrepack:
1098 repackmod.backgroundrepack(repo, incremental=True)
1098 repackmod.backgroundrepack(repo, incremental=True)
1099
1099
1100 return result
1100 return result
1101
1101
1102
1102
1103 def exchangepull(orig, repo, remote, *args, **kwargs):
1103 def exchangepull(orig, repo, remote, *args, **kwargs):
1104 # Hook into the callstream/getbundle to insert bundle capabilities
1104 # Hook into the callstream/getbundle to insert bundle capabilities
1105 # during a pull.
1105 # during a pull.
1106 def localgetbundle(
1106 def localgetbundle(
1107 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1107 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1108 ):
1108 ):
1109 if not bundlecaps:
1109 if not bundlecaps:
1110 bundlecaps = set()
1110 bundlecaps = set()
1111 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1111 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1112 return orig(
1112 return orig(
1113 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1113 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1114 )
1114 )
1115
1115
1116 if util.safehasattr(remote, b'_callstream'):
1116 if util.safehasattr(remote, b'_callstream'):
1117 remote._localrepo = repo
1117 remote._localrepo = repo
1118 elif util.safehasattr(remote, b'getbundle'):
1118 elif util.safehasattr(remote, b'getbundle'):
1119 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1119 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1120
1120
1121 return orig(repo, remote, *args, **kwargs)
1121 return orig(repo, remote, *args, **kwargs)
1122
1122
1123
1123
1124 def _fileprefetchhook(repo, revmatches):
1124 def _fileprefetchhook(repo, revmatches):
1125 if isenabled(repo):
1125 if isenabled(repo):
1126 allfiles = []
1126 allfiles = []
1127 for rev, match in revmatches:
1127 for rev, match in revmatches:
1128 if rev == nodemod.wdirrev or rev is None:
1128 if rev == nodemod.wdirrev or rev is None:
1129 continue
1129 continue
1130 ctx = repo[rev]
1130 ctx = repo[rev]
1131 mf = ctx.manifest()
1131 mf = ctx.manifest()
1132 sparsematch = repo.maybesparsematch(ctx.rev())
1132 sparsematch = repo.maybesparsematch(ctx.rev())
1133 for path in ctx.walk(match):
1133 for path in ctx.walk(match):
1134 if (not sparsematch or sparsematch(path)) and path in mf:
1134 if (not sparsematch or sparsematch(path)) and path in mf:
1135 allfiles.append((path, hex(mf[path])))
1135 allfiles.append((path, hex(mf[path])))
1136 repo.fileservice.prefetch(allfiles)
1136 repo.fileservice.prefetch(allfiles)
1137
1137
1138
1138
1139 @command(
1139 @command(
1140 b'debugremotefilelog',
1140 b'debugremotefilelog',
1141 [(b'd', b'decompress', None, _(b'decompress the filelog first')),],
1141 [(b'd', b'decompress', None, _(b'decompress the filelog first')),],
1142 _(b'hg debugremotefilelog <path>'),
1142 _(b'hg debugremotefilelog <path>'),
1143 norepo=True,
1143 norepo=True,
1144 )
1144 )
1145 def debugremotefilelog(ui, path, **opts):
1145 def debugremotefilelog(ui, path, **opts):
1146 return debugcommands.debugremotefilelog(ui, path, **opts)
1146 return debugcommands.debugremotefilelog(ui, path, **opts)
1147
1147
1148
1148
1149 @command(
1149 @command(
1150 b'verifyremotefilelog',
1150 b'verifyremotefilelog',
1151 [(b'd', b'decompress', None, _(b'decompress the filelogs first')),],
1151 [(b'd', b'decompress', None, _(b'decompress the filelogs first')),],
1152 _(b'hg verifyremotefilelogs <directory>'),
1152 _(b'hg verifyremotefilelogs <directory>'),
1153 norepo=True,
1153 norepo=True,
1154 )
1154 )
1155 def verifyremotefilelog(ui, path, **opts):
1155 def verifyremotefilelog(ui, path, **opts):
1156 return debugcommands.verifyremotefilelog(ui, path, **opts)
1156 return debugcommands.verifyremotefilelog(ui, path, **opts)
1157
1157
1158
1158
1159 @command(
1159 @command(
1160 b'debugdatapack',
1160 b'debugdatapack',
1161 [
1161 [
1162 (b'', b'long', None, _(b'print the long hashes')),
1162 (b'', b'long', None, _(b'print the long hashes')),
1163 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1163 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1164 ],
1164 ],
1165 _(b'hg debugdatapack <paths>'),
1165 _(b'hg debugdatapack <paths>'),
1166 norepo=True,
1166 norepo=True,
1167 )
1167 )
1168 def debugdatapack(ui, *paths, **opts):
1168 def debugdatapack(ui, *paths, **opts):
1169 return debugcommands.debugdatapack(ui, *paths, **opts)
1169 return debugcommands.debugdatapack(ui, *paths, **opts)
1170
1170
1171
1171
1172 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1172 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1173 def debughistorypack(ui, path, **opts):
1173 def debughistorypack(ui, path, **opts):
1174 return debugcommands.debughistorypack(ui, path)
1174 return debugcommands.debughistorypack(ui, path)
1175
1175
1176
1176
1177 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1177 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1178 def debugkeepset(ui, repo, **opts):
1178 def debugkeepset(ui, repo, **opts):
1179 # The command is used to measure keepset computation time
1179 # The command is used to measure keepset computation time
1180 def keyfn(fname, fnode):
1180 def keyfn(fname, fnode):
1181 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1181 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1182
1182
1183 repackmod.keepset(repo, keyfn)
1183 repackmod.keepset(repo, keyfn)
1184 return
1184 return
1185
1185
1186
1186
1187 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1187 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1188 def debugwaitonrepack(ui, repo, **opts):
1188 def debugwaitonrepack(ui, repo, **opts):
1189 return debugcommands.debugwaitonrepack(repo)
1189 return debugcommands.debugwaitonrepack(repo)
1190
1190
1191
1191
1192 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1192 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1193 def debugwaitonprefetch(ui, repo, **opts):
1193 def debugwaitonprefetch(ui, repo, **opts):
1194 return debugcommands.debugwaitonprefetch(repo)
1194 return debugcommands.debugwaitonprefetch(repo)
1195
1195
1196
1196
1197 def resolveprefetchopts(ui, opts):
1197 def resolveprefetchopts(ui, opts):
1198 if not opts.get(b'rev'):
1198 if not opts.get(b'rev'):
1199 revset = [b'.', b'draft()']
1199 revset = [b'.', b'draft()']
1200
1200
1201 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1201 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1202 if prefetchrevset:
1202 if prefetchrevset:
1203 revset.append(b'(%s)' % prefetchrevset)
1203 revset.append(b'(%s)' % prefetchrevset)
1204 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1204 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1205 if bgprefetchrevs:
1205 if bgprefetchrevs:
1206 revset.append(b'(%s)' % bgprefetchrevs)
1206 revset.append(b'(%s)' % bgprefetchrevs)
1207 revset = b'+'.join(revset)
1207 revset = b'+'.join(revset)
1208
1208
1209 # update a revset with a date limit
1209 # update a revset with a date limit
1210 revset = revdatelimit(ui, revset)
1210 revset = revdatelimit(ui, revset)
1211
1211
1212 opts[b'rev'] = [revset]
1212 opts[b'rev'] = [revset]
1213
1213
1214 if not opts.get(b'base'):
1214 if not opts.get(b'base'):
1215 opts[b'base'] = None
1215 opts[b'base'] = None
1216
1216
1217 return opts
1217 return opts
1218
1218
1219
1219
1220 @command(
1220 @command(
1221 b'prefetch',
1221 b'prefetch',
1222 [
1222 [
1223 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1223 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1224 (b'', b'repack', False, _(b'run repack after prefetch')),
1224 (b'', b'repack', False, _(b'run repack after prefetch')),
1225 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1225 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1226 ]
1226 ]
1227 + commands.walkopts,
1227 + commands.walkopts,
1228 _(b'hg prefetch [OPTIONS] [FILE...]'),
1228 _(b'hg prefetch [OPTIONS] [FILE...]'),
1229 helpcategory=command.CATEGORY_MAINTENANCE,
1229 helpcategory=command.CATEGORY_MAINTENANCE,
1230 )
1230 )
1231 def prefetch(ui, repo, *pats, **opts):
1231 def prefetch(ui, repo, *pats, **opts):
1232 """prefetch file revisions from the server
1232 """prefetch file revisions from the server
1233
1233
1234 Prefetchs file revisions for the specified revs and stores them in the
1234 Prefetchs file revisions for the specified revs and stores them in the
1235 local remotefilelog cache. If no rev is specified, the default rev is
1235 local remotefilelog cache. If no rev is specified, the default rev is
1236 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1236 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1237 File names or patterns can be used to limit which files are downloaded.
1237 File names or patterns can be used to limit which files are downloaded.
1238
1238
1239 Return 0 on success.
1239 Return 0 on success.
1240 """
1240 """
1241 opts = pycompat.byteskwargs(opts)
1241 opts = pycompat.byteskwargs(opts)
1242 if not isenabled(repo):
1242 if not isenabled(repo):
1243 raise error.Abort(_(b"repo is not shallow"))
1243 raise error.Abort(_(b"repo is not shallow"))
1244
1244
1245 opts = resolveprefetchopts(ui, opts)
1245 opts = resolveprefetchopts(ui, opts)
1246 revs = scmutil.revrange(repo, opts.get(b'rev'))
1246 revs = scmutil.revrange(repo, opts.get(b'rev'))
1247 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1247 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1248
1248
1249 # Run repack in background
1249 # Run repack in background
1250 if opts.get(b'repack'):
1250 if opts.get(b'repack'):
1251 repackmod.backgroundrepack(repo, incremental=True)
1251 repackmod.backgroundrepack(repo, incremental=True)
1252
1252
1253
1253
1254 @command(
1254 @command(
1255 b'repack',
1255 b'repack',
1256 [
1256 [
1257 (b'', b'background', None, _(b'run in a background process'), None),
1257 (b'', b'background', None, _(b'run in a background process'), None),
1258 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1258 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1259 (
1259 (
1260 b'',
1260 b'',
1261 b'packsonly',
1261 b'packsonly',
1262 None,
1262 None,
1263 _(b'only repack packs (skip loose objects)'),
1263 _(b'only repack packs (skip loose objects)'),
1264 None,
1264 None,
1265 ),
1265 ),
1266 ],
1266 ],
1267 _(b'hg repack [OPTIONS]'),
1267 _(b'hg repack [OPTIONS]'),
1268 )
1268 )
1269 def repack_(ui, repo, *pats, **opts):
1269 def repack_(ui, repo, *pats, **opts):
1270 if opts.get('background'):
1270 if opts.get('background'):
1271 repackmod.backgroundrepack(
1271 repackmod.backgroundrepack(
1272 repo,
1272 repo,
1273 incremental=opts.get('incremental'),
1273 incremental=opts.get('incremental'),
1274 packsonly=opts.get('packsonly', False),
1274 packsonly=opts.get('packsonly', False),
1275 )
1275 )
1276 return
1276 return
1277
1277
1278 options = {b'packsonly': opts.get('packsonly')}
1278 options = {b'packsonly': opts.get('packsonly')}
1279
1279
1280 try:
1280 try:
1281 if opts.get('incremental'):
1281 if opts.get('incremental'):
1282 repackmod.incrementalrepack(repo, options=options)
1282 repackmod.incrementalrepack(repo, options=options)
1283 else:
1283 else:
1284 repackmod.fullrepack(repo, options=options)
1284 repackmod.fullrepack(repo, options=options)
1285 except repackmod.RepackAlreadyRunning as ex:
1285 except repackmod.RepackAlreadyRunning as ex:
1286 # Don't propogate the exception if the repack is already in
1286 # Don't propogate the exception if the repack is already in
1287 # progress, since we want the command to exit 0.
1287 # progress, since we want the command to exit 0.
1288 repo.ui.warn(b'%s\n' % ex)
1288 repo.ui.warn(b'%s\n' % ex)
@@ -1,2311 +1,2315 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import stat
12 import stat
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 modifiednodeid,
18 modifiednodeid,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from .thirdparty import attr
22 from .thirdparty import attr
23 from . import (
23 from . import (
24 copies,
24 copies,
25 encoding,
25 encoding,
26 error,
26 error,
27 filemerge,
27 filemerge,
28 match as matchmod,
28 match as matchmod,
29 mergestate as mergestatemod,
29 mergestate as mergestatemod,
30 obsutil,
30 obsutil,
31 pathutil,
31 pathutil,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepoutil,
34 subrepoutil,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42
42
43 def _getcheckunknownconfig(repo, section, name):
43 def _getcheckunknownconfig(repo, section, name):
44 config = repo.ui.config(section, name)
44 config = repo.ui.config(section, name)
45 valid = [b'abort', b'ignore', b'warn']
45 valid = [b'abort', b'ignore', b'warn']
46 if config not in valid:
46 if config not in valid:
47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
48 raise error.ConfigError(
48 raise error.ConfigError(
49 _(b"%s.%s not valid ('%s' is none of %s)")
49 _(b"%s.%s not valid ('%s' is none of %s)")
50 % (section, name, config, validstr)
50 % (section, name, config, validstr)
51 )
51 )
52 return config
52 return config
53
53
54
54
55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
56 if wctx.isinmemory():
56 if wctx.isinmemory():
57 # Nothing to do in IMM because nothing in the "working copy" can be an
57 # Nothing to do in IMM because nothing in the "working copy" can be an
58 # unknown file.
58 # unknown file.
59 #
59 #
60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
61 # because that function does other useful work.
61 # because that function does other useful work.
62 return False
62 return False
63
63
64 if f2 is None:
64 if f2 is None:
65 f2 = f
65 f2 = f
66 return (
66 return (
67 repo.wvfs.audit.check(f)
67 repo.wvfs.audit.check(f)
68 and repo.wvfs.isfileorlink(f)
68 and repo.wvfs.isfileorlink(f)
69 and repo.dirstate.normalize(f) not in repo.dirstate
69 and repo.dirstate.normalize(f) not in repo.dirstate
70 and mctx[f2].cmp(wctx[f])
70 and mctx[f2].cmp(wctx[f])
71 )
71 )
72
72
73
73
74 class _unknowndirschecker(object):
74 class _unknowndirschecker(object):
75 """
75 """
76 Look for any unknown files or directories that may have a path conflict
76 Look for any unknown files or directories that may have a path conflict
77 with a file. If any path prefix of the file exists as a file or link,
77 with a file. If any path prefix of the file exists as a file or link,
78 then it conflicts. If the file itself is a directory that contains any
78 then it conflicts. If the file itself is a directory that contains any
79 file that is not tracked, then it conflicts.
79 file that is not tracked, then it conflicts.
80
80
81 Returns the shortest path at which a conflict occurs, or None if there is
81 Returns the shortest path at which a conflict occurs, or None if there is
82 no conflict.
82 no conflict.
83 """
83 """
84
84
85 def __init__(self):
85 def __init__(self):
86 # A set of paths known to be good. This prevents repeated checking of
86 # A set of paths known to be good. This prevents repeated checking of
87 # dirs. It will be updated with any new dirs that are checked and found
87 # dirs. It will be updated with any new dirs that are checked and found
88 # to be safe.
88 # to be safe.
89 self._unknowndircache = set()
89 self._unknowndircache = set()
90
90
91 # A set of paths that are known to be absent. This prevents repeated
91 # A set of paths that are known to be absent. This prevents repeated
92 # checking of subdirectories that are known not to exist. It will be
92 # checking of subdirectories that are known not to exist. It will be
93 # updated with any new dirs that are checked and found to be absent.
93 # updated with any new dirs that are checked and found to be absent.
94 self._missingdircache = set()
94 self._missingdircache = set()
95
95
96 def __call__(self, repo, wctx, f):
96 def __call__(self, repo, wctx, f):
97 if wctx.isinmemory():
97 if wctx.isinmemory():
98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
99 return False
99 return False
100
100
101 # Check for path prefixes that exist as unknown files.
101 # Check for path prefixes that exist as unknown files.
102 for p in reversed(list(pathutil.finddirs(f))):
102 for p in reversed(list(pathutil.finddirs(f))):
103 if p in self._missingdircache:
103 if p in self._missingdircache:
104 return
104 return
105 if p in self._unknowndircache:
105 if p in self._unknowndircache:
106 continue
106 continue
107 if repo.wvfs.audit.check(p):
107 if repo.wvfs.audit.check(p):
108 if (
108 if (
109 repo.wvfs.isfileorlink(p)
109 repo.wvfs.isfileorlink(p)
110 and repo.dirstate.normalize(p) not in repo.dirstate
110 and repo.dirstate.normalize(p) not in repo.dirstate
111 ):
111 ):
112 return p
112 return p
113 if not repo.wvfs.lexists(p):
113 if not repo.wvfs.lexists(p):
114 self._missingdircache.add(p)
114 self._missingdircache.add(p)
115 return
115 return
116 self._unknowndircache.add(p)
116 self._unknowndircache.add(p)
117
117
118 # Check if the file conflicts with a directory containing unknown files.
118 # Check if the file conflicts with a directory containing unknown files.
119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
120 # Does the directory contain any files that are not in the dirstate?
120 # Does the directory contain any files that are not in the dirstate?
121 for p, dirs, files in repo.wvfs.walk(f):
121 for p, dirs, files in repo.wvfs.walk(f):
122 for fn in files:
122 for fn in files:
123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
124 relf = repo.dirstate.normalize(relf, isknown=True)
124 relf = repo.dirstate.normalize(relf, isknown=True)
125 if relf not in repo.dirstate:
125 if relf not in repo.dirstate:
126 return f
126 return f
127 return None
127 return None
128
128
129
129
130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
131 """
131 """
132 Considers any actions that care about the presence of conflicting unknown
132 Considers any actions that care about the presence of conflicting unknown
133 files. For some actions, the result is to abort; for others, it is to
133 files. For some actions, the result is to abort; for others, it is to
134 choose a different action.
134 choose a different action.
135 """
135 """
136 fileconflicts = set()
136 fileconflicts = set()
137 pathconflicts = set()
137 pathconflicts = set()
138 warnconflicts = set()
138 warnconflicts = set()
139 abortconflicts = set()
139 abortconflicts = set()
140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
142 pathconfig = repo.ui.configbool(
142 pathconfig = repo.ui.configbool(
143 b'experimental', b'merge.checkpathconflicts'
143 b'experimental', b'merge.checkpathconflicts'
144 )
144 )
145 if not force:
145 if not force:
146
146
147 def collectconflicts(conflicts, config):
147 def collectconflicts(conflicts, config):
148 if config == b'abort':
148 if config == b'abort':
149 abortconflicts.update(conflicts)
149 abortconflicts.update(conflicts)
150 elif config == b'warn':
150 elif config == b'warn':
151 warnconflicts.update(conflicts)
151 warnconflicts.update(conflicts)
152
152
153 checkunknowndirs = _unknowndirschecker()
153 checkunknowndirs = _unknowndirschecker()
154 for f in mresult.files(
154 for f in mresult.files(
155 (
155 (
156 mergestatemod.ACTION_CREATED,
156 mergestatemod.ACTION_CREATED,
157 mergestatemod.ACTION_DELETED_CHANGED,
157 mergestatemod.ACTION_DELETED_CHANGED,
158 )
158 )
159 ):
159 ):
160 if _checkunknownfile(repo, wctx, mctx, f):
160 if _checkunknownfile(repo, wctx, mctx, f):
161 fileconflicts.add(f)
161 fileconflicts.add(f)
162 elif pathconfig and f not in wctx:
162 elif pathconfig and f not in wctx:
163 path = checkunknowndirs(repo, wctx, f)
163 path = checkunknowndirs(repo, wctx, f)
164 if path is not None:
164 if path is not None:
165 pathconflicts.add(path)
165 pathconflicts.add(path)
166 for f, args, msg in mresult.getactions(
166 for f, args, msg in mresult.getactions(
167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
168 ):
168 ):
169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
170 fileconflicts.add(f)
170 fileconflicts.add(f)
171
171
172 allconflicts = fileconflicts | pathconflicts
172 allconflicts = fileconflicts | pathconflicts
173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
174 unknownconflicts = allconflicts - ignoredconflicts
174 unknownconflicts = allconflicts - ignoredconflicts
175 collectconflicts(ignoredconflicts, ignoredconfig)
175 collectconflicts(ignoredconflicts, ignoredconfig)
176 collectconflicts(unknownconflicts, unknownconfig)
176 collectconflicts(unknownconflicts, unknownconfig)
177 else:
177 else:
178 for f, args, msg in list(
178 for f, args, msg in list(
179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
180 ):
180 ):
181 fl2, anc = args
181 fl2, anc = args
182 different = _checkunknownfile(repo, wctx, mctx, f)
182 different = _checkunknownfile(repo, wctx, mctx, f)
183 if repo.dirstate._ignore(f):
183 if repo.dirstate._ignore(f):
184 config = ignoredconfig
184 config = ignoredconfig
185 else:
185 else:
186 config = unknownconfig
186 config = unknownconfig
187
187
188 # The behavior when force is True is described by this table:
188 # The behavior when force is True is described by this table:
189 # config different mergeforce | action backup
189 # config different mergeforce | action backup
190 # * n * | get n
190 # * n * | get n
191 # * y y | merge -
191 # * y y | merge -
192 # abort y n | merge - (1)
192 # abort y n | merge - (1)
193 # warn y n | warn + get y
193 # warn y n | warn + get y
194 # ignore y n | get y
194 # ignore y n | get y
195 #
195 #
196 # (1) this is probably the wrong behavior here -- we should
196 # (1) this is probably the wrong behavior here -- we should
197 # probably abort, but some actions like rebases currently
197 # probably abort, but some actions like rebases currently
198 # don't like an abort happening in the middle of
198 # don't like an abort happening in the middle of
199 # merge.update.
199 # merge.update.
200 if not different:
200 if not different:
201 mresult.addfile(
201 mresult.addfile(
202 f,
202 f,
203 mergestatemod.ACTION_GET,
203 mergestatemod.ACTION_GET,
204 (fl2, False),
204 (fl2, False),
205 b'remote created',
205 b'remote created',
206 )
206 )
207 elif mergeforce or config == b'abort':
207 elif mergeforce or config == b'abort':
208 mresult.addfile(
208 mresult.addfile(
209 f,
209 f,
210 mergestatemod.ACTION_MERGE,
210 mergestatemod.ACTION_MERGE,
211 (f, f, None, False, anc),
211 (f, f, None, False, anc),
212 b'remote differs from untracked local',
212 b'remote differs from untracked local',
213 )
213 )
214 elif config == b'abort':
214 elif config == b'abort':
215 abortconflicts.add(f)
215 abortconflicts.add(f)
216 else:
216 else:
217 if config == b'warn':
217 if config == b'warn':
218 warnconflicts.add(f)
218 warnconflicts.add(f)
219 mresult.addfile(
219 mresult.addfile(
220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
221 )
221 )
222
222
223 for f in sorted(abortconflicts):
223 for f in sorted(abortconflicts):
224 warn = repo.ui.warn
224 warn = repo.ui.warn
225 if f in pathconflicts:
225 if f in pathconflicts:
226 if repo.wvfs.isfileorlink(f):
226 if repo.wvfs.isfileorlink(f):
227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
228 else:
228 else:
229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
230 else:
230 else:
231 warn(_(b"%s: untracked file differs\n") % f)
231 warn(_(b"%s: untracked file differs\n") % f)
232 if abortconflicts:
232 if abortconflicts:
233 raise error.Abort(
233 raise error.Abort(
234 _(
234 _(
235 b"untracked files in working directory "
235 b"untracked files in working directory "
236 b"differ from files in requested revision"
236 b"differ from files in requested revision"
237 )
237 )
238 )
238 )
239
239
240 for f in sorted(warnconflicts):
240 for f in sorted(warnconflicts):
241 if repo.wvfs.isfileorlink(f):
241 if repo.wvfs.isfileorlink(f):
242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
243 else:
243 else:
244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
245
245
246 for f, args, msg in list(
246 for f, args, msg in list(
247 mresult.getactions([mergestatemod.ACTION_CREATED])
247 mresult.getactions([mergestatemod.ACTION_CREATED])
248 ):
248 ):
249 backup = (
249 backup = (
250 f in fileconflicts
250 f in fileconflicts
251 or f in pathconflicts
251 or f in pathconflicts
252 or any(p in pathconflicts for p in pathutil.finddirs(f))
252 or any(p in pathconflicts for p in pathutil.finddirs(f))
253 )
253 )
254 (flags,) = args
254 (flags,) = args
255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
256
256
257
257
258 def _forgetremoved(wctx, mctx, branchmerge):
258 def _forgetremoved(wctx, mctx, branchmerge):
259 """
259 """
260 Forget removed files
260 Forget removed files
261
261
262 If we're jumping between revisions (as opposed to merging), and if
262 If we're jumping between revisions (as opposed to merging), and if
263 neither the working directory nor the target rev has the file,
263 neither the working directory nor the target rev has the file,
264 then we need to remove it from the dirstate, to prevent the
264 then we need to remove it from the dirstate, to prevent the
265 dirstate from listing the file when it is no longer in the
265 dirstate from listing the file when it is no longer in the
266 manifest.
266 manifest.
267
267
268 If we're merging, and the other revision has removed a file
268 If we're merging, and the other revision has removed a file
269 that is not present in the working directory, we need to mark it
269 that is not present in the working directory, we need to mark it
270 as removed.
270 as removed.
271 """
271 """
272
272
273 actions = {}
273 actions = {}
274 m = mergestatemod.ACTION_FORGET
274 m = mergestatemod.ACTION_FORGET
275 if branchmerge:
275 if branchmerge:
276 m = mergestatemod.ACTION_REMOVE
276 m = mergestatemod.ACTION_REMOVE
277 for f in wctx.deleted():
277 for f in wctx.deleted():
278 if f not in mctx:
278 if f not in mctx:
279 actions[f] = m, None, b"forget deleted"
279 actions[f] = m, None, b"forget deleted"
280
280
281 if not branchmerge:
281 if not branchmerge:
282 for f in wctx.removed():
282 for f in wctx.removed():
283 if f not in mctx:
283 if f not in mctx:
284 actions[f] = (
284 actions[f] = (
285 mergestatemod.ACTION_FORGET,
285 mergestatemod.ACTION_FORGET,
286 None,
286 None,
287 b"forget removed",
287 b"forget removed",
288 )
288 )
289
289
290 return actions
290 return actions
291
291
292
292
293 def _checkcollision(repo, wmf, mresult):
293 def _checkcollision(repo, wmf, mresult):
294 """
294 """
295 Check for case-folding collisions.
295 Check for case-folding collisions.
296 """
296 """
297 # If the repo is narrowed, filter out files outside the narrowspec.
297 # If the repo is narrowed, filter out files outside the narrowspec.
298 narrowmatch = repo.narrowmatch()
298 narrowmatch = repo.narrowmatch()
299 if not narrowmatch.always():
299 if not narrowmatch.always():
300 pmmf = set(wmf.walk(narrowmatch))
300 pmmf = set(wmf.walk(narrowmatch))
301 if mresult:
301 if mresult:
302 for f in list(mresult.files()):
302 for f in list(mresult.files()):
303 if not narrowmatch(f):
303 if not narrowmatch(f):
304 mresult.removefile(f)
304 mresult.removefile(f)
305 else:
305 else:
306 # build provisional merged manifest up
306 # build provisional merged manifest up
307 pmmf = set(wmf)
307 pmmf = set(wmf)
308
308
309 if mresult:
309 if mresult:
310 # KEEP and EXEC are no-op
310 # KEEP and EXEC are no-op
311 for f in mresult.files(
311 for f in mresult.files(
312 (
312 (
313 mergestatemod.ACTION_ADD,
313 mergestatemod.ACTION_ADD,
314 mergestatemod.ACTION_ADD_MODIFIED,
314 mergestatemod.ACTION_ADD_MODIFIED,
315 mergestatemod.ACTION_FORGET,
315 mergestatemod.ACTION_FORGET,
316 mergestatemod.ACTION_GET,
316 mergestatemod.ACTION_GET,
317 mergestatemod.ACTION_CHANGED_DELETED,
317 mergestatemod.ACTION_CHANGED_DELETED,
318 mergestatemod.ACTION_DELETED_CHANGED,
318 mergestatemod.ACTION_DELETED_CHANGED,
319 )
319 )
320 ):
320 ):
321 pmmf.add(f)
321 pmmf.add(f)
322 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
322 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
323 pmmf.discard(f)
323 pmmf.discard(f)
324 for f, args, msg in mresult.getactions(
324 for f, args, msg in mresult.getactions(
325 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
325 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
326 ):
326 ):
327 f2, flags = args
327 f2, flags = args
328 pmmf.discard(f2)
328 pmmf.discard(f2)
329 pmmf.add(f)
329 pmmf.add(f)
330 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
330 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
331 pmmf.add(f)
331 pmmf.add(f)
332 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
332 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
333 f1, f2, fa, move, anc = args
333 f1, f2, fa, move, anc = args
334 if move:
334 if move:
335 pmmf.discard(f1)
335 pmmf.discard(f1)
336 pmmf.add(f)
336 pmmf.add(f)
337
337
338 # check case-folding collision in provisional merged manifest
338 # check case-folding collision in provisional merged manifest
339 foldmap = {}
339 foldmap = {}
340 for f in pmmf:
340 for f in pmmf:
341 fold = util.normcase(f)
341 fold = util.normcase(f)
342 if fold in foldmap:
342 if fold in foldmap:
343 raise error.Abort(
343 raise error.Abort(
344 _(b"case-folding collision between %s and %s")
344 _(b"case-folding collision between %s and %s")
345 % (f, foldmap[fold])
345 % (f, foldmap[fold])
346 )
346 )
347 foldmap[fold] = f
347 foldmap[fold] = f
348
348
349 # check case-folding of directories
349 # check case-folding of directories
350 foldprefix = unfoldprefix = lastfull = b''
350 foldprefix = unfoldprefix = lastfull = b''
351 for fold, f in sorted(foldmap.items()):
351 for fold, f in sorted(foldmap.items()):
352 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
352 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
353 # the folded prefix matches but actual casing is different
353 # the folded prefix matches but actual casing is different
354 raise error.Abort(
354 raise error.Abort(
355 _(b"case-folding collision between %s and directory of %s")
355 _(b"case-folding collision between %s and directory of %s")
356 % (lastfull, f)
356 % (lastfull, f)
357 )
357 )
358 foldprefix = fold + b'/'
358 foldprefix = fold + b'/'
359 unfoldprefix = f + b'/'
359 unfoldprefix = f + b'/'
360 lastfull = f
360 lastfull = f
361
361
362
362
363 def driverpreprocess(repo, ms, wctx, labels=None):
363 def driverpreprocess(repo, ms, wctx, labels=None):
364 """run the preprocess step of the merge driver, if any
364 """run the preprocess step of the merge driver, if any
365
365
366 This is currently not implemented -- it's an extension point."""
366 This is currently not implemented -- it's an extension point."""
367 return True
367 return True
368
368
369
369
370 def driverconclude(repo, ms, wctx, labels=None):
370 def driverconclude(repo, ms, wctx, labels=None):
371 """run the conclude step of the merge driver, if any
371 """run the conclude step of the merge driver, if any
372
372
373 This is currently not implemented -- it's an extension point."""
373 This is currently not implemented -- it's an extension point."""
374 return True
374 return True
375
375
376
376
377 def _filesindirs(repo, manifest, dirs):
377 def _filesindirs(repo, manifest, dirs):
378 """
378 """
379 Generator that yields pairs of all the files in the manifest that are found
379 Generator that yields pairs of all the files in the manifest that are found
380 inside the directories listed in dirs, and which directory they are found
380 inside the directories listed in dirs, and which directory they are found
381 in.
381 in.
382 """
382 """
383 for f in manifest:
383 for f in manifest:
384 for p in pathutil.finddirs(f):
384 for p in pathutil.finddirs(f):
385 if p in dirs:
385 if p in dirs:
386 yield f, p
386 yield f, p
387 break
387 break
388
388
389
389
390 def checkpathconflicts(repo, wctx, mctx, mresult):
390 def checkpathconflicts(repo, wctx, mctx, mresult):
391 """
391 """
392 Check if any actions introduce path conflicts in the repository, updating
392 Check if any actions introduce path conflicts in the repository, updating
393 actions to record or handle the path conflict accordingly.
393 actions to record or handle the path conflict accordingly.
394 """
394 """
395 mf = wctx.manifest()
395 mf = wctx.manifest()
396
396
397 # The set of local files that conflict with a remote directory.
397 # The set of local files that conflict with a remote directory.
398 localconflicts = set()
398 localconflicts = set()
399
399
400 # The set of directories that conflict with a remote file, and so may cause
400 # The set of directories that conflict with a remote file, and so may cause
401 # conflicts if they still contain any files after the merge.
401 # conflicts if they still contain any files after the merge.
402 remoteconflicts = set()
402 remoteconflicts = set()
403
403
404 # The set of directories that appear as both a file and a directory in the
404 # The set of directories that appear as both a file and a directory in the
405 # remote manifest. These indicate an invalid remote manifest, which
405 # remote manifest. These indicate an invalid remote manifest, which
406 # can't be updated to cleanly.
406 # can't be updated to cleanly.
407 invalidconflicts = set()
407 invalidconflicts = set()
408
408
409 # The set of directories that contain files that are being created.
409 # The set of directories that contain files that are being created.
410 createdfiledirs = set()
410 createdfiledirs = set()
411
411
412 # The set of files deleted by all the actions.
412 # The set of files deleted by all the actions.
413 deletedfiles = set()
413 deletedfiles = set()
414
414
415 for f in mresult.files(
415 for f in mresult.files(
416 (
416 (
417 mergestatemod.ACTION_CREATED,
417 mergestatemod.ACTION_CREATED,
418 mergestatemod.ACTION_DELETED_CHANGED,
418 mergestatemod.ACTION_DELETED_CHANGED,
419 mergestatemod.ACTION_MERGE,
419 mergestatemod.ACTION_MERGE,
420 mergestatemod.ACTION_CREATED_MERGE,
420 mergestatemod.ACTION_CREATED_MERGE,
421 )
421 )
422 ):
422 ):
423 # This action may create a new local file.
423 # This action may create a new local file.
424 createdfiledirs.update(pathutil.finddirs(f))
424 createdfiledirs.update(pathutil.finddirs(f))
425 if mf.hasdir(f):
425 if mf.hasdir(f):
426 # The file aliases a local directory. This might be ok if all
426 # The file aliases a local directory. This might be ok if all
427 # the files in the local directory are being deleted. This
427 # the files in the local directory are being deleted. This
428 # will be checked once we know what all the deleted files are.
428 # will be checked once we know what all the deleted files are.
429 remoteconflicts.add(f)
429 remoteconflicts.add(f)
430 # Track the names of all deleted files.
430 # Track the names of all deleted files.
431 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
431 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
432 deletedfiles.add(f)
432 deletedfiles.add(f)
433 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
433 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
434 f1, f2, fa, move, anc = args
434 f1, f2, fa, move, anc = args
435 if move:
435 if move:
436 deletedfiles.add(f1)
436 deletedfiles.add(f1)
437 for (f, args, msg) in mresult.getactions(
437 for (f, args, msg) in mresult.getactions(
438 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
438 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
439 ):
439 ):
440 f2, flags = args
440 f2, flags = args
441 deletedfiles.add(f2)
441 deletedfiles.add(f2)
442
442
443 # Check all directories that contain created files for path conflicts.
443 # Check all directories that contain created files for path conflicts.
444 for p in createdfiledirs:
444 for p in createdfiledirs:
445 if p in mf:
445 if p in mf:
446 if p in mctx:
446 if p in mctx:
447 # A file is in a directory which aliases both a local
447 # A file is in a directory which aliases both a local
448 # and a remote file. This is an internal inconsistency
448 # and a remote file. This is an internal inconsistency
449 # within the remote manifest.
449 # within the remote manifest.
450 invalidconflicts.add(p)
450 invalidconflicts.add(p)
451 else:
451 else:
452 # A file is in a directory which aliases a local file.
452 # A file is in a directory which aliases a local file.
453 # We will need to rename the local file.
453 # We will need to rename the local file.
454 localconflicts.add(p)
454 localconflicts.add(p)
455 pd = mresult.getfile(p)
455 pd = mresult.getfile(p)
456 if pd and pd[0] in (
456 if pd and pd[0] in (
457 mergestatemod.ACTION_CREATED,
457 mergestatemod.ACTION_CREATED,
458 mergestatemod.ACTION_DELETED_CHANGED,
458 mergestatemod.ACTION_DELETED_CHANGED,
459 mergestatemod.ACTION_MERGE,
459 mergestatemod.ACTION_MERGE,
460 mergestatemod.ACTION_CREATED_MERGE,
460 mergestatemod.ACTION_CREATED_MERGE,
461 ):
461 ):
462 # The file is in a directory which aliases a remote file.
462 # The file is in a directory which aliases a remote file.
463 # This is an internal inconsistency within the remote
463 # This is an internal inconsistency within the remote
464 # manifest.
464 # manifest.
465 invalidconflicts.add(p)
465 invalidconflicts.add(p)
466
466
467 # Rename all local conflicting files that have not been deleted.
467 # Rename all local conflicting files that have not been deleted.
468 for p in localconflicts:
468 for p in localconflicts:
469 if p not in deletedfiles:
469 if p not in deletedfiles:
470 ctxname = bytes(wctx).rstrip(b'+')
470 ctxname = bytes(wctx).rstrip(b'+')
471 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
471 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
472 porig = wctx[p].copysource() or p
472 porig = wctx[p].copysource() or p
473 mresult.addfile(
473 mresult.addfile(
474 pnew,
474 pnew,
475 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
475 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
476 (p, porig),
476 (p, porig),
477 b'local path conflict',
477 b'local path conflict',
478 )
478 )
479 mresult.addfile(
479 mresult.addfile(
480 p,
480 p,
481 mergestatemod.ACTION_PATH_CONFLICT,
481 mergestatemod.ACTION_PATH_CONFLICT,
482 (pnew, b'l'),
482 (pnew, b'l'),
483 b'path conflict',
483 b'path conflict',
484 )
484 )
485
485
486 if remoteconflicts:
486 if remoteconflicts:
487 # Check if all files in the conflicting directories have been removed.
487 # Check if all files in the conflicting directories have been removed.
488 ctxname = bytes(mctx).rstrip(b'+')
488 ctxname = bytes(mctx).rstrip(b'+')
489 for f, p in _filesindirs(repo, mf, remoteconflicts):
489 for f, p in _filesindirs(repo, mf, remoteconflicts):
490 if f not in deletedfiles:
490 if f not in deletedfiles:
491 m, args, msg = mresult.getfile(p)
491 m, args, msg = mresult.getfile(p)
492 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
492 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
493 if m in (
493 if m in (
494 mergestatemod.ACTION_DELETED_CHANGED,
494 mergestatemod.ACTION_DELETED_CHANGED,
495 mergestatemod.ACTION_MERGE,
495 mergestatemod.ACTION_MERGE,
496 ):
496 ):
497 # Action was merge, just update target.
497 # Action was merge, just update target.
498 mresult.addfile(pnew, m, args, msg)
498 mresult.addfile(pnew, m, args, msg)
499 else:
499 else:
500 # Action was create, change to renamed get action.
500 # Action was create, change to renamed get action.
501 fl = args[0]
501 fl = args[0]
502 mresult.addfile(
502 mresult.addfile(
503 pnew,
503 pnew,
504 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
504 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
505 (p, fl),
505 (p, fl),
506 b'remote path conflict',
506 b'remote path conflict',
507 )
507 )
508 mresult.addfile(
508 mresult.addfile(
509 p,
509 p,
510 mergestatemod.ACTION_PATH_CONFLICT,
510 mergestatemod.ACTION_PATH_CONFLICT,
511 (pnew, mergestatemod.ACTION_REMOVE),
511 (pnew, mergestatemod.ACTION_REMOVE),
512 b'path conflict',
512 b'path conflict',
513 )
513 )
514 remoteconflicts.remove(p)
514 remoteconflicts.remove(p)
515 break
515 break
516
516
517 if invalidconflicts:
517 if invalidconflicts:
518 for p in invalidconflicts:
518 for p in invalidconflicts:
519 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
519 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
520 raise error.Abort(_(b"destination manifest contains path conflicts"))
520 raise error.Abort(_(b"destination manifest contains path conflicts"))
521
521
522
522
523 def _filternarrowactions(narrowmatch, branchmerge, mresult):
523 def _filternarrowactions(narrowmatch, branchmerge, mresult):
524 """
524 """
525 Filters out actions that can ignored because the repo is narrowed.
525 Filters out actions that can ignored because the repo is narrowed.
526
526
527 Raise an exception if the merge cannot be completed because the repo is
527 Raise an exception if the merge cannot be completed because the repo is
528 narrowed.
528 narrowed.
529 """
529 """
530 # TODO: handle with nonconflicttypes
530 # TODO: handle with nonconflicttypes
531 nooptypes = {mergestatemod.ACTION_KEEP}
531 nooptypes = {mergestatemod.ACTION_KEEP}
532 nonconflicttypes = {
532 nonconflicttypes = {
533 mergestatemod.ACTION_ADD,
533 mergestatemod.ACTION_ADD,
534 mergestatemod.ACTION_ADD_MODIFIED,
534 mergestatemod.ACTION_ADD_MODIFIED,
535 mergestatemod.ACTION_CREATED,
535 mergestatemod.ACTION_CREATED,
536 mergestatemod.ACTION_CREATED_MERGE,
536 mergestatemod.ACTION_CREATED_MERGE,
537 mergestatemod.ACTION_FORGET,
537 mergestatemod.ACTION_FORGET,
538 mergestatemod.ACTION_GET,
538 mergestatemod.ACTION_GET,
539 mergestatemod.ACTION_REMOVE,
539 mergestatemod.ACTION_REMOVE,
540 mergestatemod.ACTION_EXEC,
540 mergestatemod.ACTION_EXEC,
541 }
541 }
542 # We mutate the items in the dict during iteration, so iterate
542 # We mutate the items in the dict during iteration, so iterate
543 # over a copy.
543 # over a copy.
544 for f, action in list(mresult.actions.items()):
544 for f, action in mresult.filemap():
545 if narrowmatch(f):
545 if narrowmatch(f):
546 pass
546 pass
547 elif not branchmerge:
547 elif not branchmerge:
548 mresult.removefile(f) # just updating, ignore changes outside clone
548 mresult.removefile(f) # just updating, ignore changes outside clone
549 elif action[0] in nooptypes:
549 elif action[0] in nooptypes:
550 mresult.removefile(f) # merge does not affect file
550 mresult.removefile(f) # merge does not affect file
551 elif action[0] in nonconflicttypes:
551 elif action[0] in nonconflicttypes:
552 raise error.Abort(
552 raise error.Abort(
553 _(
553 _(
554 b'merge affects file \'%s\' outside narrow, '
554 b'merge affects file \'%s\' outside narrow, '
555 b'which is not yet supported'
555 b'which is not yet supported'
556 )
556 )
557 % f,
557 % f,
558 hint=_(b'merging in the other direction may work'),
558 hint=_(b'merging in the other direction may work'),
559 )
559 )
560 else:
560 else:
561 raise error.Abort(
561 raise error.Abort(
562 _(b'conflict in file \'%s\' is outside narrow clone') % f
562 _(b'conflict in file \'%s\' is outside narrow clone') % f
563 )
563 )
564
564
565
565
566 class mergeresult(object):
566 class mergeresult(object):
567 ''''An object representing result of merging manifests.
567 ''''An object representing result of merging manifests.
568
568
569 It has information about what actions need to be performed on dirstate
569 It has information about what actions need to be performed on dirstate
570 mapping of divergent renames and other such cases. '''
570 mapping of divergent renames and other such cases. '''
571
571
572 def __init__(self):
572 def __init__(self):
573 """
573 """
574 filemapping: dict of filename as keys and action related info as values
574 filemapping: dict of filename as keys and action related info as values
575 diverge: mapping of source name -> list of dest name for
575 diverge: mapping of source name -> list of dest name for
576 divergent renames
576 divergent renames
577 renamedelete: mapping of source name -> list of destinations for files
577 renamedelete: mapping of source name -> list of destinations for files
578 deleted on one side and renamed on other.
578 deleted on one side and renamed on other.
579 commitinfo: dict containing data which should be used on commit
579 commitinfo: dict containing data which should be used on commit
580 contains a filename -> info mapping
580 contains a filename -> info mapping
581 actionmapping: dict of action names as keys and values are dict of
581 actionmapping: dict of action names as keys and values are dict of
582 filename as key and related data as values
582 filename as key and related data as values
583 """
583 """
584 self._filemapping = {}
584 self._filemapping = {}
585 self._diverge = {}
585 self._diverge = {}
586 self._renamedelete = {}
586 self._renamedelete = {}
587 self._commitinfo = {}
587 self._commitinfo = {}
588 self._actionmapping = collections.defaultdict(dict)
588 self._actionmapping = collections.defaultdict(dict)
589
589
590 def updatevalues(self, diverge, renamedelete, commitinfo):
590 def updatevalues(self, diverge, renamedelete, commitinfo):
591 self._diverge = diverge
591 self._diverge = diverge
592 self._renamedelete = renamedelete
592 self._renamedelete = renamedelete
593 self._commitinfo = commitinfo
593 self._commitinfo = commitinfo
594
594
595 def addfile(self, filename, action, data, message):
595 def addfile(self, filename, action, data, message):
596 """ adds a new file to the mergeresult object
596 """ adds a new file to the mergeresult object
597
597
598 filename: file which we are adding
598 filename: file which we are adding
599 action: one of mergestatemod.ACTION_*
599 action: one of mergestatemod.ACTION_*
600 data: a tuple of information like fctx and ctx related to this merge
600 data: a tuple of information like fctx and ctx related to this merge
601 message: a message about the merge
601 message: a message about the merge
602 """
602 """
603 # if the file already existed, we need to delete it's old
603 # if the file already existed, we need to delete it's old
604 # entry form _actionmapping too
604 # entry form _actionmapping too
605 if filename in self._filemapping:
605 if filename in self._filemapping:
606 a, d, m = self._filemapping[filename]
606 a, d, m = self._filemapping[filename]
607 del self._actionmapping[a][filename]
607 del self._actionmapping[a][filename]
608
608
609 self._filemapping[filename] = (action, data, message)
609 self._filemapping[filename] = (action, data, message)
610 self._actionmapping[action][filename] = (data, message)
610 self._actionmapping[action][filename] = (data, message)
611
611
612 def getfile(self, filename, default_return=None):
612 def getfile(self, filename, default_return=None):
613 """ returns (action, args, msg) about this file
613 """ returns (action, args, msg) about this file
614
614
615 returns default_return if the file is not present """
615 returns default_return if the file is not present """
616 if filename in self._filemapping:
616 if filename in self._filemapping:
617 return self._filemapping[filename]
617 return self._filemapping[filename]
618 return default_return
618 return default_return
619
619
620 def files(self, actions=None):
620 def files(self, actions=None):
621 """ returns files on which provided action needs to perfromed
621 """ returns files on which provided action needs to perfromed
622
622
623 If actions is None, all files are returned
623 If actions is None, all files are returned
624 """
624 """
625 # TODO: think whether we should return renamedelete and
625 # TODO: think whether we should return renamedelete and
626 # diverge filenames also
626 # diverge filenames also
627 if actions is None:
627 if actions is None:
628 for f in self._filemapping:
628 for f in self._filemapping:
629 yield f
629 yield f
630
630
631 else:
631 else:
632 for a in actions:
632 for a in actions:
633 for f in self._actionmapping[a]:
633 for f in self._actionmapping[a]:
634 yield f
634 yield f
635
635
636 def removefile(self, filename):
636 def removefile(self, filename):
637 """ removes a file from the mergeresult object as the file might
637 """ removes a file from the mergeresult object as the file might
638 not merging anymore """
638 not merging anymore """
639 action, data, message = self._filemapping[filename]
639 action, data, message = self._filemapping[filename]
640 del self._filemapping[filename]
640 del self._filemapping[filename]
641 del self._actionmapping[action][filename]
641 del self._actionmapping[action][filename]
642
642
643 def getactions(self, actions, sort=False):
643 def getactions(self, actions, sort=False):
644 """ get list of files which are marked with these actions
644 """ get list of files which are marked with these actions
645 if sort is true, files for each action is sorted and then added
645 if sort is true, files for each action is sorted and then added
646
646
647 Returns a list of tuple of form (filename, data, message)
647 Returns a list of tuple of form (filename, data, message)
648 """
648 """
649 for a in actions:
649 for a in actions:
650 if sort:
650 if sort:
651 for f in sorted(self._actionmapping[a]):
651 for f in sorted(self._actionmapping[a]):
652 args, msg = self._actionmapping[a][f]
652 args, msg = self._actionmapping[a][f]
653 yield f, args, msg
653 yield f, args, msg
654 else:
654 else:
655 for f, (args, msg) in pycompat.iteritems(
655 for f, (args, msg) in pycompat.iteritems(
656 self._actionmapping[a]
656 self._actionmapping[a]
657 ):
657 ):
658 yield f, args, msg
658 yield f, args, msg
659
659
660 def len(self, actions=None):
660 def len(self, actions=None):
661 """ returns number of files which needs actions
661 """ returns number of files which needs actions
662
662
663 if actions is passed, total of number of files in that action
663 if actions is passed, total of number of files in that action
664 only is returned """
664 only is returned """
665
665
666 if actions is None:
666 if actions is None:
667 return len(self._filemapping)
667 return len(self._filemapping)
668
668
669 return sum(len(self._actionmapping[a]) for a in actions)
669 return sum(len(self._actionmapping[a]) for a in actions)
670
670
671 @property
671 def filemap(self, sort=False):
672 def actions(self):
672 if sorted:
673 return self._filemapping
673 for key, val in sorted(pycompat.iteritems(self._filemapping)):
674 yield key, val
675 else:
676 for key, val in pycompat.iteritems(self._filemapping):
677 yield key, val
674
678
675 @property
679 @property
676 def diverge(self):
680 def diverge(self):
677 return self._diverge
681 return self._diverge
678
682
679 @property
683 @property
680 def renamedelete(self):
684 def renamedelete(self):
681 return self._renamedelete
685 return self._renamedelete
682
686
683 @property
687 @property
684 def commitinfo(self):
688 def commitinfo(self):
685 return self._commitinfo
689 return self._commitinfo
686
690
687 @property
691 @property
688 def actionsdict(self):
692 def actionsdict(self):
689 """ returns a dictionary of actions to be perfomed with action as key
693 """ returns a dictionary of actions to be perfomed with action as key
690 and a list of files and related arguments as values """
694 and a list of files and related arguments as values """
691 res = emptyactions()
695 res = emptyactions()
692 for a, d in pycompat.iteritems(self._actionmapping):
696 for a, d in pycompat.iteritems(self._actionmapping):
693 for f, (args, msg) in pycompat.iteritems(d):
697 for f, (args, msg) in pycompat.iteritems(d):
694 res[a].append((f, args, msg))
698 res[a].append((f, args, msg))
695 return res
699 return res
696
700
697 def setactions(self, actions):
701 def setactions(self, actions):
698 self._filemapping = actions
702 self._filemapping = actions
699 self._actionmapping = collections.defaultdict(dict)
703 self._actionmapping = collections.defaultdict(dict)
700 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
704 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
701 self._actionmapping[act][f] = data, msg
705 self._actionmapping[act][f] = data, msg
702
706
703 def updateactions(self, updates):
707 def updateactions(self, updates):
704 for f, (a, data, msg) in pycompat.iteritems(updates):
708 for f, (a, data, msg) in pycompat.iteritems(updates):
705 self.addfile(f, a, data, msg)
709 self.addfile(f, a, data, msg)
706
710
707 def hasconflicts(self):
711 def hasconflicts(self):
708 """ tells whether this merge resulted in some actions which can
712 """ tells whether this merge resulted in some actions which can
709 result in conflicts or not """
713 result in conflicts or not """
710 for a in self._actionmapping.keys():
714 for a in self._actionmapping.keys():
711 if (
715 if (
712 a
716 a
713 not in (
717 not in (
714 mergestatemod.ACTION_GET,
718 mergestatemod.ACTION_GET,
715 mergestatemod.ACTION_KEEP,
719 mergestatemod.ACTION_KEEP,
716 mergestatemod.ACTION_EXEC,
720 mergestatemod.ACTION_EXEC,
717 mergestatemod.ACTION_REMOVE,
721 mergestatemod.ACTION_REMOVE,
718 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
722 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
719 )
723 )
720 and self._actionmapping[a]
724 and self._actionmapping[a]
721 ):
725 ):
722 return True
726 return True
723
727
724 return False
728 return False
725
729
726
730
727 def manifestmerge(
731 def manifestmerge(
728 repo,
732 repo,
729 wctx,
733 wctx,
730 p2,
734 p2,
731 pa,
735 pa,
732 branchmerge,
736 branchmerge,
733 force,
737 force,
734 matcher,
738 matcher,
735 acceptremote,
739 acceptremote,
736 followcopies,
740 followcopies,
737 forcefulldiff=False,
741 forcefulldiff=False,
738 ):
742 ):
739 """
743 """
740 Merge wctx and p2 with ancestor pa and generate merge action list
744 Merge wctx and p2 with ancestor pa and generate merge action list
741
745
742 branchmerge and force are as passed in to update
746 branchmerge and force are as passed in to update
743 matcher = matcher to filter file lists
747 matcher = matcher to filter file lists
744 acceptremote = accept the incoming changes without prompting
748 acceptremote = accept the incoming changes without prompting
745
749
746 Returns an object of mergeresult class
750 Returns an object of mergeresult class
747 """
751 """
748 mresult = mergeresult()
752 mresult = mergeresult()
749 if matcher is not None and matcher.always():
753 if matcher is not None and matcher.always():
750 matcher = None
754 matcher = None
751
755
752 # manifests fetched in order are going to be faster, so prime the caches
756 # manifests fetched in order are going to be faster, so prime the caches
753 [
757 [
754 x.manifest()
758 x.manifest()
755 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
759 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
756 ]
760 ]
757
761
758 branch_copies1 = copies.branch_copies()
762 branch_copies1 = copies.branch_copies()
759 branch_copies2 = copies.branch_copies()
763 branch_copies2 = copies.branch_copies()
760 diverge = {}
764 diverge = {}
761 # information from merge which is needed at commit time
765 # information from merge which is needed at commit time
762 # for example choosing filelog of which parent to commit
766 # for example choosing filelog of which parent to commit
763 # TODO: use specific constants in future for this mapping
767 # TODO: use specific constants in future for this mapping
764 commitinfo = {}
768 commitinfo = {}
765 if followcopies:
769 if followcopies:
766 branch_copies1, branch_copies2, diverge = copies.mergecopies(
770 branch_copies1, branch_copies2, diverge = copies.mergecopies(
767 repo, wctx, p2, pa
771 repo, wctx, p2, pa
768 )
772 )
769
773
770 boolbm = pycompat.bytestr(bool(branchmerge))
774 boolbm = pycompat.bytestr(bool(branchmerge))
771 boolf = pycompat.bytestr(bool(force))
775 boolf = pycompat.bytestr(bool(force))
772 boolm = pycompat.bytestr(bool(matcher))
776 boolm = pycompat.bytestr(bool(matcher))
773 repo.ui.note(_(b"resolving manifests\n"))
777 repo.ui.note(_(b"resolving manifests\n"))
774 repo.ui.debug(
778 repo.ui.debug(
775 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
779 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
776 )
780 )
777 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
781 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
778
782
779 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
783 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
780 copied1 = set(branch_copies1.copy.values())
784 copied1 = set(branch_copies1.copy.values())
781 copied1.update(branch_copies1.movewithdir.values())
785 copied1.update(branch_copies1.movewithdir.values())
782 copied2 = set(branch_copies2.copy.values())
786 copied2 = set(branch_copies2.copy.values())
783 copied2.update(branch_copies2.movewithdir.values())
787 copied2.update(branch_copies2.movewithdir.values())
784
788
785 if b'.hgsubstate' in m1 and wctx.rev() is None:
789 if b'.hgsubstate' in m1 and wctx.rev() is None:
786 # Check whether sub state is modified, and overwrite the manifest
790 # Check whether sub state is modified, and overwrite the manifest
787 # to flag the change. If wctx is a committed revision, we shouldn't
791 # to flag the change. If wctx is a committed revision, we shouldn't
788 # care for the dirty state of the working directory.
792 # care for the dirty state of the working directory.
789 if any(wctx.sub(s).dirty() for s in wctx.substate):
793 if any(wctx.sub(s).dirty() for s in wctx.substate):
790 m1[b'.hgsubstate'] = modifiednodeid
794 m1[b'.hgsubstate'] = modifiednodeid
791
795
792 # Don't use m2-vs-ma optimization if:
796 # Don't use m2-vs-ma optimization if:
793 # - ma is the same as m1 or m2, which we're just going to diff again later
797 # - ma is the same as m1 or m2, which we're just going to diff again later
794 # - The caller specifically asks for a full diff, which is useful during bid
798 # - The caller specifically asks for a full diff, which is useful during bid
795 # merge.
799 # merge.
796 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
800 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
797 # Identify which files are relevant to the merge, so we can limit the
801 # Identify which files are relevant to the merge, so we can limit the
798 # total m1-vs-m2 diff to just those files. This has significant
802 # total m1-vs-m2 diff to just those files. This has significant
799 # performance benefits in large repositories.
803 # performance benefits in large repositories.
800 relevantfiles = set(ma.diff(m2).keys())
804 relevantfiles = set(ma.diff(m2).keys())
801
805
802 # For copied and moved files, we need to add the source file too.
806 # For copied and moved files, we need to add the source file too.
803 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
807 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
804 if copyvalue in relevantfiles:
808 if copyvalue in relevantfiles:
805 relevantfiles.add(copykey)
809 relevantfiles.add(copykey)
806 for movedirkey in branch_copies1.movewithdir:
810 for movedirkey in branch_copies1.movewithdir:
807 relevantfiles.add(movedirkey)
811 relevantfiles.add(movedirkey)
808 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
812 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
809 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
813 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
810
814
811 diff = m1.diff(m2, match=matcher)
815 diff = m1.diff(m2, match=matcher)
812
816
813 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
817 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
814 if n1 and n2: # file exists on both local and remote side
818 if n1 and n2: # file exists on both local and remote side
815 if f not in ma:
819 if f not in ma:
816 # TODO: what if they're renamed from different sources?
820 # TODO: what if they're renamed from different sources?
817 fa = branch_copies1.copy.get(
821 fa = branch_copies1.copy.get(
818 f, None
822 f, None
819 ) or branch_copies2.copy.get(f, None)
823 ) or branch_copies2.copy.get(f, None)
820 args, msg = None, None
824 args, msg = None, None
821 if fa is not None:
825 if fa is not None:
822 args = (f, f, fa, False, pa.node())
826 args = (f, f, fa, False, pa.node())
823 msg = b'both renamed from %s' % fa
827 msg = b'both renamed from %s' % fa
824 else:
828 else:
825 args = (f, f, None, False, pa.node())
829 args = (f, f, None, False, pa.node())
826 msg = b'both created'
830 msg = b'both created'
827 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
831 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
828 else:
832 else:
829 a = ma[f]
833 a = ma[f]
830 fla = ma.flags(f)
834 fla = ma.flags(f)
831 nol = b'l' not in fl1 + fl2 + fla
835 nol = b'l' not in fl1 + fl2 + fla
832 if n2 == a and fl2 == fla:
836 if n2 == a and fl2 == fla:
833 mresult.addfile(
837 mresult.addfile(
834 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
838 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
835 )
839 )
836 elif n1 == a and fl1 == fla: # local unchanged - use remote
840 elif n1 == a and fl1 == fla: # local unchanged - use remote
837 if n1 == n2: # optimization: keep local content
841 if n1 == n2: # optimization: keep local content
838 mresult.addfile(
842 mresult.addfile(
839 f,
843 f,
840 mergestatemod.ACTION_EXEC,
844 mergestatemod.ACTION_EXEC,
841 (fl2,),
845 (fl2,),
842 b'update permissions',
846 b'update permissions',
843 )
847 )
844 else:
848 else:
845 mresult.addfile(
849 mresult.addfile(
846 f,
850 f,
847 mergestatemod.ACTION_GET,
851 mergestatemod.ACTION_GET,
848 (fl2, False),
852 (fl2, False),
849 b'remote is newer',
853 b'remote is newer',
850 )
854 )
851 if branchmerge:
855 if branchmerge:
852 commitinfo[f] = b'other'
856 commitinfo[f] = b'other'
853 elif nol and n2 == a: # remote only changed 'x'
857 elif nol and n2 == a: # remote only changed 'x'
854 mresult.addfile(
858 mresult.addfile(
855 f,
859 f,
856 mergestatemod.ACTION_EXEC,
860 mergestatemod.ACTION_EXEC,
857 (fl2,),
861 (fl2,),
858 b'update permissions',
862 b'update permissions',
859 )
863 )
860 elif nol and n1 == a: # local only changed 'x'
864 elif nol and n1 == a: # local only changed 'x'
861 mresult.addfile(
865 mresult.addfile(
862 f,
866 f,
863 mergestatemod.ACTION_GET,
867 mergestatemod.ACTION_GET,
864 (fl1, False),
868 (fl1, False),
865 b'remote is newer',
869 b'remote is newer',
866 )
870 )
867 if branchmerge:
871 if branchmerge:
868 commitinfo[f] = b'other'
872 commitinfo[f] = b'other'
869 else: # both changed something
873 else: # both changed something
870 mresult.addfile(
874 mresult.addfile(
871 f,
875 f,
872 mergestatemod.ACTION_MERGE,
876 mergestatemod.ACTION_MERGE,
873 (f, f, f, False, pa.node()),
877 (f, f, f, False, pa.node()),
874 b'versions differ',
878 b'versions differ',
875 )
879 )
876 elif n1: # file exists only on local side
880 elif n1: # file exists only on local side
877 if f in copied2:
881 if f in copied2:
878 pass # we'll deal with it on m2 side
882 pass # we'll deal with it on m2 side
879 elif (
883 elif (
880 f in branch_copies1.movewithdir
884 f in branch_copies1.movewithdir
881 ): # directory rename, move local
885 ): # directory rename, move local
882 f2 = branch_copies1.movewithdir[f]
886 f2 = branch_copies1.movewithdir[f]
883 if f2 in m2:
887 if f2 in m2:
884 mresult.addfile(
888 mresult.addfile(
885 f2,
889 f2,
886 mergestatemod.ACTION_MERGE,
890 mergestatemod.ACTION_MERGE,
887 (f, f2, None, True, pa.node()),
891 (f, f2, None, True, pa.node()),
888 b'remote directory rename, both created',
892 b'remote directory rename, both created',
889 )
893 )
890 else:
894 else:
891 mresult.addfile(
895 mresult.addfile(
892 f2,
896 f2,
893 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
897 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
894 (f, fl1),
898 (f, fl1),
895 b'remote directory rename - move from %s' % f,
899 b'remote directory rename - move from %s' % f,
896 )
900 )
897 elif f in branch_copies1.copy:
901 elif f in branch_copies1.copy:
898 f2 = branch_copies1.copy[f]
902 f2 = branch_copies1.copy[f]
899 mresult.addfile(
903 mresult.addfile(
900 f,
904 f,
901 mergestatemod.ACTION_MERGE,
905 mergestatemod.ACTION_MERGE,
902 (f, f2, f2, False, pa.node()),
906 (f, f2, f2, False, pa.node()),
903 b'local copied/moved from %s' % f2,
907 b'local copied/moved from %s' % f2,
904 )
908 )
905 elif f in ma: # clean, a different, no remote
909 elif f in ma: # clean, a different, no remote
906 if n1 != ma[f]:
910 if n1 != ma[f]:
907 if acceptremote:
911 if acceptremote:
908 mresult.addfile(
912 mresult.addfile(
909 f,
913 f,
910 mergestatemod.ACTION_REMOVE,
914 mergestatemod.ACTION_REMOVE,
911 None,
915 None,
912 b'remote delete',
916 b'remote delete',
913 )
917 )
914 else:
918 else:
915 mresult.addfile(
919 mresult.addfile(
916 f,
920 f,
917 mergestatemod.ACTION_CHANGED_DELETED,
921 mergestatemod.ACTION_CHANGED_DELETED,
918 (f, None, f, False, pa.node()),
922 (f, None, f, False, pa.node()),
919 b'prompt changed/deleted',
923 b'prompt changed/deleted',
920 )
924 )
921 elif n1 == addednodeid:
925 elif n1 == addednodeid:
922 # This file was locally added. We should forget it instead of
926 # This file was locally added. We should forget it instead of
923 # deleting it.
927 # deleting it.
924 mresult.addfile(
928 mresult.addfile(
925 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
929 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
926 )
930 )
927 else:
931 else:
928 mresult.addfile(
932 mresult.addfile(
929 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
933 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
930 )
934 )
931 elif n2: # file exists only on remote side
935 elif n2: # file exists only on remote side
932 if f in copied1:
936 if f in copied1:
933 pass # we'll deal with it on m1 side
937 pass # we'll deal with it on m1 side
934 elif f in branch_copies2.movewithdir:
938 elif f in branch_copies2.movewithdir:
935 f2 = branch_copies2.movewithdir[f]
939 f2 = branch_copies2.movewithdir[f]
936 if f2 in m1:
940 if f2 in m1:
937 mresult.addfile(
941 mresult.addfile(
938 f2,
942 f2,
939 mergestatemod.ACTION_MERGE,
943 mergestatemod.ACTION_MERGE,
940 (f2, f, None, False, pa.node()),
944 (f2, f, None, False, pa.node()),
941 b'local directory rename, both created',
945 b'local directory rename, both created',
942 )
946 )
943 else:
947 else:
944 mresult.addfile(
948 mresult.addfile(
945 f2,
949 f2,
946 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
950 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
947 (f, fl2),
951 (f, fl2),
948 b'local directory rename - get from %s' % f,
952 b'local directory rename - get from %s' % f,
949 )
953 )
950 elif f in branch_copies2.copy:
954 elif f in branch_copies2.copy:
951 f2 = branch_copies2.copy[f]
955 f2 = branch_copies2.copy[f]
952 msg, args = None, None
956 msg, args = None, None
953 if f2 in m2:
957 if f2 in m2:
954 args = (f2, f, f2, False, pa.node())
958 args = (f2, f, f2, False, pa.node())
955 msg = b'remote copied from %s' % f2
959 msg = b'remote copied from %s' % f2
956 else:
960 else:
957 args = (f2, f, f2, True, pa.node())
961 args = (f2, f, f2, True, pa.node())
958 msg = b'remote moved from %s' % f2
962 msg = b'remote moved from %s' % f2
959 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
963 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
960 elif f not in ma:
964 elif f not in ma:
961 # local unknown, remote created: the logic is described by the
965 # local unknown, remote created: the logic is described by the
962 # following table:
966 # following table:
963 #
967 #
964 # force branchmerge different | action
968 # force branchmerge different | action
965 # n * * | create
969 # n * * | create
966 # y n * | create
970 # y n * | create
967 # y y n | create
971 # y y n | create
968 # y y y | merge
972 # y y y | merge
969 #
973 #
970 # Checking whether the files are different is expensive, so we
974 # Checking whether the files are different is expensive, so we
971 # don't do that when we can avoid it.
975 # don't do that when we can avoid it.
972 if not force:
976 if not force:
973 mresult.addfile(
977 mresult.addfile(
974 f,
978 f,
975 mergestatemod.ACTION_CREATED,
979 mergestatemod.ACTION_CREATED,
976 (fl2,),
980 (fl2,),
977 b'remote created',
981 b'remote created',
978 )
982 )
979 elif not branchmerge:
983 elif not branchmerge:
980 mresult.addfile(
984 mresult.addfile(
981 f,
985 f,
982 mergestatemod.ACTION_CREATED,
986 mergestatemod.ACTION_CREATED,
983 (fl2,),
987 (fl2,),
984 b'remote created',
988 b'remote created',
985 )
989 )
986 else:
990 else:
987 mresult.addfile(
991 mresult.addfile(
988 f,
992 f,
989 mergestatemod.ACTION_CREATED_MERGE,
993 mergestatemod.ACTION_CREATED_MERGE,
990 (fl2, pa.node()),
994 (fl2, pa.node()),
991 b'remote created, get or merge',
995 b'remote created, get or merge',
992 )
996 )
993 elif n2 != ma[f]:
997 elif n2 != ma[f]:
994 df = None
998 df = None
995 for d in branch_copies1.dirmove:
999 for d in branch_copies1.dirmove:
996 if f.startswith(d):
1000 if f.startswith(d):
997 # new file added in a directory that was moved
1001 # new file added in a directory that was moved
998 df = branch_copies1.dirmove[d] + f[len(d) :]
1002 df = branch_copies1.dirmove[d] + f[len(d) :]
999 break
1003 break
1000 if df is not None and df in m1:
1004 if df is not None and df in m1:
1001 mresult.addfile(
1005 mresult.addfile(
1002 df,
1006 df,
1003 mergestatemod.ACTION_MERGE,
1007 mergestatemod.ACTION_MERGE,
1004 (df, f, f, False, pa.node()),
1008 (df, f, f, False, pa.node()),
1005 b'local directory rename - respect move '
1009 b'local directory rename - respect move '
1006 b'from %s' % f,
1010 b'from %s' % f,
1007 )
1011 )
1008 elif acceptremote:
1012 elif acceptremote:
1009 mresult.addfile(
1013 mresult.addfile(
1010 f,
1014 f,
1011 mergestatemod.ACTION_CREATED,
1015 mergestatemod.ACTION_CREATED,
1012 (fl2,),
1016 (fl2,),
1013 b'remote recreating',
1017 b'remote recreating',
1014 )
1018 )
1015 else:
1019 else:
1016 mresult.addfile(
1020 mresult.addfile(
1017 f,
1021 f,
1018 mergestatemod.ACTION_DELETED_CHANGED,
1022 mergestatemod.ACTION_DELETED_CHANGED,
1019 (None, f, f, False, pa.node()),
1023 (None, f, f, False, pa.node()),
1020 b'prompt deleted/changed',
1024 b'prompt deleted/changed',
1021 )
1025 )
1022
1026
1023 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1027 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1024 # If we are merging, look for path conflicts.
1028 # If we are merging, look for path conflicts.
1025 checkpathconflicts(repo, wctx, p2, mresult)
1029 checkpathconflicts(repo, wctx, p2, mresult)
1026
1030
1027 narrowmatch = repo.narrowmatch()
1031 narrowmatch = repo.narrowmatch()
1028 if not narrowmatch.always():
1032 if not narrowmatch.always():
1029 # Updates "actions" in place
1033 # Updates "actions" in place
1030 _filternarrowactions(narrowmatch, branchmerge, mresult)
1034 _filternarrowactions(narrowmatch, branchmerge, mresult)
1031
1035
1032 renamedelete = branch_copies1.renamedelete
1036 renamedelete = branch_copies1.renamedelete
1033 renamedelete.update(branch_copies2.renamedelete)
1037 renamedelete.update(branch_copies2.renamedelete)
1034
1038
1035 mresult.updatevalues(diverge, renamedelete, commitinfo)
1039 mresult.updatevalues(diverge, renamedelete, commitinfo)
1036 return mresult
1040 return mresult
1037
1041
1038
1042
1039 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1043 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1040 """Resolves false conflicts where the nodeid changed but the content
1044 """Resolves false conflicts where the nodeid changed but the content
1041 remained the same."""
1045 remained the same."""
1042 # We force a copy of actions.items() because we're going to mutate
1046 # We force a copy of actions.items() because we're going to mutate
1043 # actions as we resolve trivial conflicts.
1047 # actions as we resolve trivial conflicts.
1044 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1048 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1045 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1049 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1046 # local did change but ended up with same content
1050 # local did change but ended up with same content
1047 mresult.addfile(
1051 mresult.addfile(
1048 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1052 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1049 )
1053 )
1050
1054
1051 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1055 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1052 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1056 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1053 # remote did change but ended up with same content
1057 # remote did change but ended up with same content
1054 mresult.removefile(f) # don't get = keep local deleted
1058 mresult.removefile(f) # don't get = keep local deleted
1055
1059
1056
1060
1057 def calculateupdates(
1061 def calculateupdates(
1058 repo,
1062 repo,
1059 wctx,
1063 wctx,
1060 mctx,
1064 mctx,
1061 ancestors,
1065 ancestors,
1062 branchmerge,
1066 branchmerge,
1063 force,
1067 force,
1064 acceptremote,
1068 acceptremote,
1065 followcopies,
1069 followcopies,
1066 matcher=None,
1070 matcher=None,
1067 mergeforce=False,
1071 mergeforce=False,
1068 ):
1072 ):
1069 """
1073 """
1070 Calculate the actions needed to merge mctx into wctx using ancestors
1074 Calculate the actions needed to merge mctx into wctx using ancestors
1071
1075
1072 Uses manifestmerge() to merge manifest and get list of actions required to
1076 Uses manifestmerge() to merge manifest and get list of actions required to
1073 perform for merging two manifests. If there are multiple ancestors, uses bid
1077 perform for merging two manifests. If there are multiple ancestors, uses bid
1074 merge if enabled.
1078 merge if enabled.
1075
1079
1076 Also filters out actions which are unrequired if repository is sparse.
1080 Also filters out actions which are unrequired if repository is sparse.
1077
1081
1078 Returns mergeresult object same as manifestmerge().
1082 Returns mergeresult object same as manifestmerge().
1079 """
1083 """
1080 # Avoid cycle.
1084 # Avoid cycle.
1081 from . import sparse
1085 from . import sparse
1082
1086
1083 mresult = None
1087 mresult = None
1084 if len(ancestors) == 1: # default
1088 if len(ancestors) == 1: # default
1085 mresult = manifestmerge(
1089 mresult = manifestmerge(
1086 repo,
1090 repo,
1087 wctx,
1091 wctx,
1088 mctx,
1092 mctx,
1089 ancestors[0],
1093 ancestors[0],
1090 branchmerge,
1094 branchmerge,
1091 force,
1095 force,
1092 matcher,
1096 matcher,
1093 acceptremote,
1097 acceptremote,
1094 followcopies,
1098 followcopies,
1095 )
1099 )
1096 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1100 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1097
1101
1098 else: # only when merge.preferancestor=* - the default
1102 else: # only when merge.preferancestor=* - the default
1099 repo.ui.note(
1103 repo.ui.note(
1100 _(b"note: merging %s and %s using bids from ancestors %s\n")
1104 _(b"note: merging %s and %s using bids from ancestors %s\n")
1101 % (
1105 % (
1102 wctx,
1106 wctx,
1103 mctx,
1107 mctx,
1104 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1108 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1105 )
1109 )
1106 )
1110 )
1107
1111
1108 # mapping filename to bids (action method to list af actions)
1112 # mapping filename to bids (action method to list af actions)
1109 # {FILENAME1 : BID1, FILENAME2 : BID2}
1113 # {FILENAME1 : BID1, FILENAME2 : BID2}
1110 # BID is another dictionary which contains
1114 # BID is another dictionary which contains
1111 # mapping of following form:
1115 # mapping of following form:
1112 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1116 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1113 fbids = {}
1117 fbids = {}
1114 diverge, renamedelete = None, None
1118 diverge, renamedelete = None, None
1115 for ancestor in ancestors:
1119 for ancestor in ancestors:
1116 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1120 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1117 mresult1 = manifestmerge(
1121 mresult1 = manifestmerge(
1118 repo,
1122 repo,
1119 wctx,
1123 wctx,
1120 mctx,
1124 mctx,
1121 ancestor,
1125 ancestor,
1122 branchmerge,
1126 branchmerge,
1123 force,
1127 force,
1124 matcher,
1128 matcher,
1125 acceptremote,
1129 acceptremote,
1126 followcopies,
1130 followcopies,
1127 forcefulldiff=True,
1131 forcefulldiff=True,
1128 )
1132 )
1129 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1133 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1130
1134
1131 # Track the shortest set of warning on the theory that bid
1135 # Track the shortest set of warning on the theory that bid
1132 # merge will correctly incorporate more information
1136 # merge will correctly incorporate more information
1133 if diverge is None or len(mresult1.diverge) < len(diverge):
1137 if diverge is None or len(mresult1.diverge) < len(diverge):
1134 diverge = mresult1.diverge
1138 diverge = mresult1.diverge
1135 if renamedelete is None or len(renamedelete) < len(
1139 if renamedelete is None or len(renamedelete) < len(
1136 mresult1.renamedelete
1140 mresult1.renamedelete
1137 ):
1141 ):
1138 renamedelete = mresult1.renamedelete
1142 renamedelete = mresult1.renamedelete
1139
1143
1140 for f, a in sorted(pycompat.iteritems(mresult1.actions)):
1144 for f, a in mresult1.filemap(sort=True):
1141 m, args, msg = a
1145 m, args, msg = a
1142 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1146 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1143 if f in fbids:
1147 if f in fbids:
1144 d = fbids[f]
1148 d = fbids[f]
1145 if m in d:
1149 if m in d:
1146 d[m].append(a)
1150 d[m].append(a)
1147 else:
1151 else:
1148 d[m] = [a]
1152 d[m] = [a]
1149 else:
1153 else:
1150 fbids[f] = {m: [a]}
1154 fbids[f] = {m: [a]}
1151
1155
1152 # Call for bids
1156 # Call for bids
1153 # Pick the best bid for each file
1157 # Pick the best bid for each file
1154 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1158 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1155 mresult = mergeresult()
1159 mresult = mergeresult()
1156 for f, bids in sorted(fbids.items()):
1160 for f, bids in sorted(fbids.items()):
1157 # bids is a mapping from action method to list af actions
1161 # bids is a mapping from action method to list af actions
1158 # Consensus?
1162 # Consensus?
1159 if len(bids) == 1: # all bids are the same kind of method
1163 if len(bids) == 1: # all bids are the same kind of method
1160 m, l = list(bids.items())[0]
1164 m, l = list(bids.items())[0]
1161 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1165 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1162 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1166 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1163 mresult.addfile(f, *l[0])
1167 mresult.addfile(f, *l[0])
1164 continue
1168 continue
1165 # If keep is an option, just do it.
1169 # If keep is an option, just do it.
1166 if mergestatemod.ACTION_KEEP in bids:
1170 if mergestatemod.ACTION_KEEP in bids:
1167 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1171 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1168 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1172 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1169 continue
1173 continue
1170 # If there are gets and they all agree [how could they not?], do it.
1174 # If there are gets and they all agree [how could they not?], do it.
1171 if mergestatemod.ACTION_GET in bids:
1175 if mergestatemod.ACTION_GET in bids:
1172 ga0 = bids[mergestatemod.ACTION_GET][0]
1176 ga0 = bids[mergestatemod.ACTION_GET][0]
1173 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1177 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1174 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1178 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1175 mresult.addfile(f, *ga0)
1179 mresult.addfile(f, *ga0)
1176 continue
1180 continue
1177 # TODO: Consider other simple actions such as mode changes
1181 # TODO: Consider other simple actions such as mode changes
1178 # Handle inefficient democrazy.
1182 # Handle inefficient democrazy.
1179 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1183 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1180 for m, l in sorted(bids.items()):
1184 for m, l in sorted(bids.items()):
1181 for _f, args, msg in l:
1185 for _f, args, msg in l:
1182 repo.ui.note(b' %s -> %s\n' % (msg, m))
1186 repo.ui.note(b' %s -> %s\n' % (msg, m))
1183 # Pick random action. TODO: Instead, prompt user when resolving
1187 # Pick random action. TODO: Instead, prompt user when resolving
1184 m, l = list(bids.items())[0]
1188 m, l = list(bids.items())[0]
1185 repo.ui.warn(
1189 repo.ui.warn(
1186 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1190 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1187 )
1191 )
1188 mresult.addfile(f, *l[0])
1192 mresult.addfile(f, *l[0])
1189 continue
1193 continue
1190 repo.ui.note(_(b'end of auction\n\n'))
1194 repo.ui.note(_(b'end of auction\n\n'))
1191 # TODO: think about commitinfo when bid merge is used
1195 # TODO: think about commitinfo when bid merge is used
1192 mresult.updatevalues(diverge, renamedelete, {})
1196 mresult.updatevalues(diverge, renamedelete, {})
1193
1197
1194 if wctx.rev() is None:
1198 if wctx.rev() is None:
1195 fractions = _forgetremoved(wctx, mctx, branchmerge)
1199 fractions = _forgetremoved(wctx, mctx, branchmerge)
1196 mresult.updateactions(fractions)
1200 mresult.updateactions(fractions)
1197
1201
1198 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1202 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1199 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1203 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1200
1204
1201 return mresult
1205 return mresult
1202
1206
1203
1207
1204 def _getcwd():
1208 def _getcwd():
1205 try:
1209 try:
1206 return encoding.getcwd()
1210 return encoding.getcwd()
1207 except OSError as err:
1211 except OSError as err:
1208 if err.errno == errno.ENOENT:
1212 if err.errno == errno.ENOENT:
1209 return None
1213 return None
1210 raise
1214 raise
1211
1215
1212
1216
1213 def batchremove(repo, wctx, actions):
1217 def batchremove(repo, wctx, actions):
1214 """apply removes to the working directory
1218 """apply removes to the working directory
1215
1219
1216 yields tuples for progress updates
1220 yields tuples for progress updates
1217 """
1221 """
1218 verbose = repo.ui.verbose
1222 verbose = repo.ui.verbose
1219 cwd = _getcwd()
1223 cwd = _getcwd()
1220 i = 0
1224 i = 0
1221 for f, args, msg in actions:
1225 for f, args, msg in actions:
1222 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1226 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1223 if verbose:
1227 if verbose:
1224 repo.ui.note(_(b"removing %s\n") % f)
1228 repo.ui.note(_(b"removing %s\n") % f)
1225 wctx[f].audit()
1229 wctx[f].audit()
1226 try:
1230 try:
1227 wctx[f].remove(ignoremissing=True)
1231 wctx[f].remove(ignoremissing=True)
1228 except OSError as inst:
1232 except OSError as inst:
1229 repo.ui.warn(
1233 repo.ui.warn(
1230 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1234 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1231 )
1235 )
1232 if i == 100:
1236 if i == 100:
1233 yield i, f
1237 yield i, f
1234 i = 0
1238 i = 0
1235 i += 1
1239 i += 1
1236 if i > 0:
1240 if i > 0:
1237 yield i, f
1241 yield i, f
1238
1242
1239 if cwd and not _getcwd():
1243 if cwd and not _getcwd():
1240 # cwd was removed in the course of removing files; print a helpful
1244 # cwd was removed in the course of removing files; print a helpful
1241 # warning.
1245 # warning.
1242 repo.ui.warn(
1246 repo.ui.warn(
1243 _(
1247 _(
1244 b"current directory was removed\n"
1248 b"current directory was removed\n"
1245 b"(consider changing to repo root: %s)\n"
1249 b"(consider changing to repo root: %s)\n"
1246 )
1250 )
1247 % repo.root
1251 % repo.root
1248 )
1252 )
1249
1253
1250
1254
1251 def batchget(repo, mctx, wctx, wantfiledata, actions):
1255 def batchget(repo, mctx, wctx, wantfiledata, actions):
1252 """apply gets to the working directory
1256 """apply gets to the working directory
1253
1257
1254 mctx is the context to get from
1258 mctx is the context to get from
1255
1259
1256 Yields arbitrarily many (False, tuple) for progress updates, followed by
1260 Yields arbitrarily many (False, tuple) for progress updates, followed by
1257 exactly one (True, filedata). When wantfiledata is false, filedata is an
1261 exactly one (True, filedata). When wantfiledata is false, filedata is an
1258 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1262 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1259 mtime) of the file f written for each action.
1263 mtime) of the file f written for each action.
1260 """
1264 """
1261 filedata = {}
1265 filedata = {}
1262 verbose = repo.ui.verbose
1266 verbose = repo.ui.verbose
1263 fctx = mctx.filectx
1267 fctx = mctx.filectx
1264 ui = repo.ui
1268 ui = repo.ui
1265 i = 0
1269 i = 0
1266 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1270 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1267 for f, (flags, backup), msg in actions:
1271 for f, (flags, backup), msg in actions:
1268 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1272 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1269 if verbose:
1273 if verbose:
1270 repo.ui.note(_(b"getting %s\n") % f)
1274 repo.ui.note(_(b"getting %s\n") % f)
1271
1275
1272 if backup:
1276 if backup:
1273 # If a file or directory exists with the same name, back that
1277 # If a file or directory exists with the same name, back that
1274 # up. Otherwise, look to see if there is a file that conflicts
1278 # up. Otherwise, look to see if there is a file that conflicts
1275 # with a directory this file is in, and if so, back that up.
1279 # with a directory this file is in, and if so, back that up.
1276 conflicting = f
1280 conflicting = f
1277 if not repo.wvfs.lexists(f):
1281 if not repo.wvfs.lexists(f):
1278 for p in pathutil.finddirs(f):
1282 for p in pathutil.finddirs(f):
1279 if repo.wvfs.isfileorlink(p):
1283 if repo.wvfs.isfileorlink(p):
1280 conflicting = p
1284 conflicting = p
1281 break
1285 break
1282 if repo.wvfs.lexists(conflicting):
1286 if repo.wvfs.lexists(conflicting):
1283 orig = scmutil.backuppath(ui, repo, conflicting)
1287 orig = scmutil.backuppath(ui, repo, conflicting)
1284 util.rename(repo.wjoin(conflicting), orig)
1288 util.rename(repo.wjoin(conflicting), orig)
1285 wfctx = wctx[f]
1289 wfctx = wctx[f]
1286 wfctx.clearunknown()
1290 wfctx.clearunknown()
1287 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1291 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1288 size = wfctx.write(
1292 size = wfctx.write(
1289 fctx(f).data(),
1293 fctx(f).data(),
1290 flags,
1294 flags,
1291 backgroundclose=True,
1295 backgroundclose=True,
1292 atomictemp=atomictemp,
1296 atomictemp=atomictemp,
1293 )
1297 )
1294 if wantfiledata:
1298 if wantfiledata:
1295 s = wfctx.lstat()
1299 s = wfctx.lstat()
1296 mode = s.st_mode
1300 mode = s.st_mode
1297 mtime = s[stat.ST_MTIME]
1301 mtime = s[stat.ST_MTIME]
1298 filedata[f] = (mode, size, mtime) # for dirstate.normal
1302 filedata[f] = (mode, size, mtime) # for dirstate.normal
1299 if i == 100:
1303 if i == 100:
1300 yield False, (i, f)
1304 yield False, (i, f)
1301 i = 0
1305 i = 0
1302 i += 1
1306 i += 1
1303 if i > 0:
1307 if i > 0:
1304 yield False, (i, f)
1308 yield False, (i, f)
1305 yield True, filedata
1309 yield True, filedata
1306
1310
1307
1311
1308 def _prefetchfiles(repo, ctx, mresult):
1312 def _prefetchfiles(repo, ctx, mresult):
1309 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1313 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1310 of merge actions. ``ctx`` is the context being merged in."""
1314 of merge actions. ``ctx`` is the context being merged in."""
1311
1315
1312 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1316 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1313 # don't touch the context to be merged in. 'cd' is skipped, because
1317 # don't touch the context to be merged in. 'cd' is skipped, because
1314 # changed/deleted never resolves to something from the remote side.
1318 # changed/deleted never resolves to something from the remote side.
1315 files = mresult.files(
1319 files = mresult.files(
1316 [
1320 [
1317 mergestatemod.ACTION_GET,
1321 mergestatemod.ACTION_GET,
1318 mergestatemod.ACTION_DELETED_CHANGED,
1322 mergestatemod.ACTION_DELETED_CHANGED,
1319 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1323 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1320 mergestatemod.ACTION_MERGE,
1324 mergestatemod.ACTION_MERGE,
1321 ]
1325 ]
1322 )
1326 )
1323
1327
1324 prefetch = scmutil.prefetchfiles
1328 prefetch = scmutil.prefetchfiles
1325 matchfiles = scmutil.matchfiles
1329 matchfiles = scmutil.matchfiles
1326 prefetch(
1330 prefetch(
1327 repo, [(ctx.rev(), matchfiles(repo, files),)],
1331 repo, [(ctx.rev(), matchfiles(repo, files),)],
1328 )
1332 )
1329
1333
1330
1334
1331 @attr.s(frozen=True)
1335 @attr.s(frozen=True)
1332 class updateresult(object):
1336 class updateresult(object):
1333 updatedcount = attr.ib()
1337 updatedcount = attr.ib()
1334 mergedcount = attr.ib()
1338 mergedcount = attr.ib()
1335 removedcount = attr.ib()
1339 removedcount = attr.ib()
1336 unresolvedcount = attr.ib()
1340 unresolvedcount = attr.ib()
1337
1341
1338 def isempty(self):
1342 def isempty(self):
1339 return not (
1343 return not (
1340 self.updatedcount
1344 self.updatedcount
1341 or self.mergedcount
1345 or self.mergedcount
1342 or self.removedcount
1346 or self.removedcount
1343 or self.unresolvedcount
1347 or self.unresolvedcount
1344 )
1348 )
1345
1349
1346
1350
1347 def emptyactions():
1351 def emptyactions():
1348 """create an actions dict, to be populated and passed to applyupdates()"""
1352 """create an actions dict, to be populated and passed to applyupdates()"""
1349 return {
1353 return {
1350 m: []
1354 m: []
1351 for m in (
1355 for m in (
1352 mergestatemod.ACTION_ADD,
1356 mergestatemod.ACTION_ADD,
1353 mergestatemod.ACTION_ADD_MODIFIED,
1357 mergestatemod.ACTION_ADD_MODIFIED,
1354 mergestatemod.ACTION_FORGET,
1358 mergestatemod.ACTION_FORGET,
1355 mergestatemod.ACTION_GET,
1359 mergestatemod.ACTION_GET,
1356 mergestatemod.ACTION_CHANGED_DELETED,
1360 mergestatemod.ACTION_CHANGED_DELETED,
1357 mergestatemod.ACTION_DELETED_CHANGED,
1361 mergestatemod.ACTION_DELETED_CHANGED,
1358 mergestatemod.ACTION_REMOVE,
1362 mergestatemod.ACTION_REMOVE,
1359 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1363 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1360 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1364 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1361 mergestatemod.ACTION_MERGE,
1365 mergestatemod.ACTION_MERGE,
1362 mergestatemod.ACTION_EXEC,
1366 mergestatemod.ACTION_EXEC,
1363 mergestatemod.ACTION_KEEP,
1367 mergestatemod.ACTION_KEEP,
1364 mergestatemod.ACTION_PATH_CONFLICT,
1368 mergestatemod.ACTION_PATH_CONFLICT,
1365 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1369 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1366 )
1370 )
1367 }
1371 }
1368
1372
1369
1373
1370 def applyupdates(
1374 def applyupdates(
1371 repo,
1375 repo,
1372 mresult,
1376 mresult,
1373 wctx,
1377 wctx,
1374 mctx,
1378 mctx,
1375 overwrite,
1379 overwrite,
1376 wantfiledata,
1380 wantfiledata,
1377 labels=None,
1381 labels=None,
1378 commitinfo=None,
1382 commitinfo=None,
1379 ):
1383 ):
1380 """apply the merge action list to the working directory
1384 """apply the merge action list to the working directory
1381
1385
1382 mresult is a mergeresult object representing result of the merge
1386 mresult is a mergeresult object representing result of the merge
1383 wctx is the working copy context
1387 wctx is the working copy context
1384 mctx is the context to be merged into the working copy
1388 mctx is the context to be merged into the working copy
1385 commitinfo is a mapping of information which needs to be stored somewhere
1389 commitinfo is a mapping of information which needs to be stored somewhere
1386 (probably mergestate) so that it can be used at commit time.
1390 (probably mergestate) so that it can be used at commit time.
1387
1391
1388 Return a tuple of (counts, filedata), where counts is a tuple
1392 Return a tuple of (counts, filedata), where counts is a tuple
1389 (updated, merged, removed, unresolved) that describes how many
1393 (updated, merged, removed, unresolved) that describes how many
1390 files were affected by the update, and filedata is as described in
1394 files were affected by the update, and filedata is as described in
1391 batchget.
1395 batchget.
1392 """
1396 """
1393
1397
1394 _prefetchfiles(repo, mctx, mresult)
1398 _prefetchfiles(repo, mctx, mresult)
1395
1399
1396 updated, merged, removed = 0, 0, 0
1400 updated, merged, removed = 0, 0, 0
1397 ms = mergestatemod.mergestate.clean(
1401 ms = mergestatemod.mergestate.clean(
1398 repo, wctx.p1().node(), mctx.node(), labels
1402 repo, wctx.p1().node(), mctx.node(), labels
1399 )
1403 )
1400
1404
1401 if commitinfo is None:
1405 if commitinfo is None:
1402 commitinfo = {}
1406 commitinfo = {}
1403
1407
1404 for f, op in pycompat.iteritems(commitinfo):
1408 for f, op in pycompat.iteritems(commitinfo):
1405 # the other side of filenode was choosen while merging, store this in
1409 # the other side of filenode was choosen while merging, store this in
1406 # mergestate so that it can be reused on commit
1410 # mergestate so that it can be reused on commit
1407 if op == b'other':
1411 if op == b'other':
1408 ms.addmergedother(f)
1412 ms.addmergedother(f)
1409
1413
1410 moves = []
1414 moves = []
1411
1415
1412 # 'cd' and 'dc' actions are treated like other merge conflicts
1416 # 'cd' and 'dc' actions are treated like other merge conflicts
1413 mergeactions = list(
1417 mergeactions = list(
1414 mresult.getactions(
1418 mresult.getactions(
1415 [
1419 [
1416 mergestatemod.ACTION_CHANGED_DELETED,
1420 mergestatemod.ACTION_CHANGED_DELETED,
1417 mergestatemod.ACTION_DELETED_CHANGED,
1421 mergestatemod.ACTION_DELETED_CHANGED,
1418 mergestatemod.ACTION_MERGE,
1422 mergestatemod.ACTION_MERGE,
1419 ],
1423 ],
1420 sort=True,
1424 sort=True,
1421 )
1425 )
1422 )
1426 )
1423 for f, args, msg in mergeactions:
1427 for f, args, msg in mergeactions:
1424 f1, f2, fa, move, anc = args
1428 f1, f2, fa, move, anc = args
1425 if f == b'.hgsubstate': # merged internally
1429 if f == b'.hgsubstate': # merged internally
1426 continue
1430 continue
1427 if f1 is None:
1431 if f1 is None:
1428 fcl = filemerge.absentfilectx(wctx, fa)
1432 fcl = filemerge.absentfilectx(wctx, fa)
1429 else:
1433 else:
1430 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1434 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1431 fcl = wctx[f1]
1435 fcl = wctx[f1]
1432 if f2 is None:
1436 if f2 is None:
1433 fco = filemerge.absentfilectx(mctx, fa)
1437 fco = filemerge.absentfilectx(mctx, fa)
1434 else:
1438 else:
1435 fco = mctx[f2]
1439 fco = mctx[f2]
1436 actx = repo[anc]
1440 actx = repo[anc]
1437 if fa in actx:
1441 if fa in actx:
1438 fca = actx[fa]
1442 fca = actx[fa]
1439 else:
1443 else:
1440 # TODO: move to absentfilectx
1444 # TODO: move to absentfilectx
1441 fca = repo.filectx(f1, fileid=nullrev)
1445 fca = repo.filectx(f1, fileid=nullrev)
1442 ms.add(fcl, fco, fca, f)
1446 ms.add(fcl, fco, fca, f)
1443 if f1 != f and move:
1447 if f1 != f and move:
1444 moves.append(f1)
1448 moves.append(f1)
1445
1449
1446 # remove renamed files after safely stored
1450 # remove renamed files after safely stored
1447 for f in moves:
1451 for f in moves:
1448 if wctx[f].lexists():
1452 if wctx[f].lexists():
1449 repo.ui.debug(b"removing %s\n" % f)
1453 repo.ui.debug(b"removing %s\n" % f)
1450 wctx[f].audit()
1454 wctx[f].audit()
1451 wctx[f].remove()
1455 wctx[f].remove()
1452
1456
1453 numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
1457 numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
1454 progress = repo.ui.makeprogress(
1458 progress = repo.ui.makeprogress(
1455 _(b'updating'), unit=_(b'files'), total=numupdates
1459 _(b'updating'), unit=_(b'files'), total=numupdates
1456 )
1460 )
1457
1461
1458 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1462 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1459 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1463 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1460
1464
1461 # record path conflicts
1465 # record path conflicts
1462 for f, args, msg in mresult.getactions(
1466 for f, args, msg in mresult.getactions(
1463 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1467 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1464 ):
1468 ):
1465 f1, fo = args
1469 f1, fo = args
1466 s = repo.ui.status
1470 s = repo.ui.status
1467 s(
1471 s(
1468 _(
1472 _(
1469 b"%s: path conflict - a file or link has the same name as a "
1473 b"%s: path conflict - a file or link has the same name as a "
1470 b"directory\n"
1474 b"directory\n"
1471 )
1475 )
1472 % f
1476 % f
1473 )
1477 )
1474 if fo == b'l':
1478 if fo == b'l':
1475 s(_(b"the local file has been renamed to %s\n") % f1)
1479 s(_(b"the local file has been renamed to %s\n") % f1)
1476 else:
1480 else:
1477 s(_(b"the remote file has been renamed to %s\n") % f1)
1481 s(_(b"the remote file has been renamed to %s\n") % f1)
1478 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1482 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1479 ms.addpathconflict(f, f1, fo)
1483 ms.addpathconflict(f, f1, fo)
1480 progress.increment(item=f)
1484 progress.increment(item=f)
1481
1485
1482 # When merging in-memory, we can't support worker processes, so set the
1486 # When merging in-memory, we can't support worker processes, so set the
1483 # per-item cost at 0 in that case.
1487 # per-item cost at 0 in that case.
1484 cost = 0 if wctx.isinmemory() else 0.001
1488 cost = 0 if wctx.isinmemory() else 0.001
1485
1489
1486 # remove in parallel (must come before resolving path conflicts and getting)
1490 # remove in parallel (must come before resolving path conflicts and getting)
1487 prog = worker.worker(
1491 prog = worker.worker(
1488 repo.ui,
1492 repo.ui,
1489 cost,
1493 cost,
1490 batchremove,
1494 batchremove,
1491 (repo, wctx),
1495 (repo, wctx),
1492 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1496 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1493 )
1497 )
1494 for i, item in prog:
1498 for i, item in prog:
1495 progress.increment(step=i, item=item)
1499 progress.increment(step=i, item=item)
1496 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1500 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1497
1501
1498 # resolve path conflicts (must come before getting)
1502 # resolve path conflicts (must come before getting)
1499 for f, args, msg in mresult.getactions(
1503 for f, args, msg in mresult.getactions(
1500 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1504 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1501 ):
1505 ):
1502 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1506 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1503 (f0, origf0) = args
1507 (f0, origf0) = args
1504 if wctx[f0].lexists():
1508 if wctx[f0].lexists():
1505 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1509 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1506 wctx[f].audit()
1510 wctx[f].audit()
1507 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1511 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1508 wctx[f0].remove()
1512 wctx[f0].remove()
1509 progress.increment(item=f)
1513 progress.increment(item=f)
1510
1514
1511 # get in parallel.
1515 # get in parallel.
1512 threadsafe = repo.ui.configbool(
1516 threadsafe = repo.ui.configbool(
1513 b'experimental', b'worker.wdir-get-thread-safe'
1517 b'experimental', b'worker.wdir-get-thread-safe'
1514 )
1518 )
1515 prog = worker.worker(
1519 prog = worker.worker(
1516 repo.ui,
1520 repo.ui,
1517 cost,
1521 cost,
1518 batchget,
1522 batchget,
1519 (repo, mctx, wctx, wantfiledata),
1523 (repo, mctx, wctx, wantfiledata),
1520 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1524 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1521 threadsafe=threadsafe,
1525 threadsafe=threadsafe,
1522 hasretval=True,
1526 hasretval=True,
1523 )
1527 )
1524 getfiledata = {}
1528 getfiledata = {}
1525 for final, res in prog:
1529 for final, res in prog:
1526 if final:
1530 if final:
1527 getfiledata = res
1531 getfiledata = res
1528 else:
1532 else:
1529 i, item = res
1533 i, item = res
1530 progress.increment(step=i, item=item)
1534 progress.increment(step=i, item=item)
1531
1535
1532 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1536 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1533 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1537 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1534
1538
1535 # forget (manifest only, just log it) (must come first)
1539 # forget (manifest only, just log it) (must come first)
1536 for f, args, msg in mresult.getactions(
1540 for f, args, msg in mresult.getactions(
1537 (mergestatemod.ACTION_FORGET,), sort=True
1541 (mergestatemod.ACTION_FORGET,), sort=True
1538 ):
1542 ):
1539 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1543 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1540 progress.increment(item=f)
1544 progress.increment(item=f)
1541
1545
1542 # re-add (manifest only, just log it)
1546 # re-add (manifest only, just log it)
1543 for f, args, msg in mresult.getactions(
1547 for f, args, msg in mresult.getactions(
1544 (mergestatemod.ACTION_ADD,), sort=True
1548 (mergestatemod.ACTION_ADD,), sort=True
1545 ):
1549 ):
1546 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1550 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1547 progress.increment(item=f)
1551 progress.increment(item=f)
1548
1552
1549 # re-add/mark as modified (manifest only, just log it)
1553 # re-add/mark as modified (manifest only, just log it)
1550 for f, args, msg in mresult.getactions(
1554 for f, args, msg in mresult.getactions(
1551 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1555 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1552 ):
1556 ):
1553 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1557 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1554 progress.increment(item=f)
1558 progress.increment(item=f)
1555
1559
1556 # keep (noop, just log it)
1560 # keep (noop, just log it)
1557 for f, args, msg in mresult.getactions(
1561 for f, args, msg in mresult.getactions(
1558 (mergestatemod.ACTION_KEEP,), sort=True
1562 (mergestatemod.ACTION_KEEP,), sort=True
1559 ):
1563 ):
1560 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1564 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1561 # no progress
1565 # no progress
1562
1566
1563 # directory rename, move local
1567 # directory rename, move local
1564 for f, args, msg in mresult.getactions(
1568 for f, args, msg in mresult.getactions(
1565 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1569 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1566 ):
1570 ):
1567 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1571 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1568 progress.increment(item=f)
1572 progress.increment(item=f)
1569 f0, flags = args
1573 f0, flags = args
1570 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1574 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1571 wctx[f].audit()
1575 wctx[f].audit()
1572 wctx[f].write(wctx.filectx(f0).data(), flags)
1576 wctx[f].write(wctx.filectx(f0).data(), flags)
1573 wctx[f0].remove()
1577 wctx[f0].remove()
1574
1578
1575 # local directory rename, get
1579 # local directory rename, get
1576 for f, args, msg in mresult.getactions(
1580 for f, args, msg in mresult.getactions(
1577 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1581 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1578 ):
1582 ):
1579 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1583 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1580 progress.increment(item=f)
1584 progress.increment(item=f)
1581 f0, flags = args
1585 f0, flags = args
1582 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1586 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1583 wctx[f].write(mctx.filectx(f0).data(), flags)
1587 wctx[f].write(mctx.filectx(f0).data(), flags)
1584
1588
1585 # exec
1589 # exec
1586 for f, args, msg in mresult.getactions(
1590 for f, args, msg in mresult.getactions(
1587 (mergestatemod.ACTION_EXEC,), sort=True
1591 (mergestatemod.ACTION_EXEC,), sort=True
1588 ):
1592 ):
1589 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1593 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1590 progress.increment(item=f)
1594 progress.increment(item=f)
1591 (flags,) = args
1595 (flags,) = args
1592 wctx[f].audit()
1596 wctx[f].audit()
1593 wctx[f].setflags(b'l' in flags, b'x' in flags)
1597 wctx[f].setflags(b'l' in flags, b'x' in flags)
1594
1598
1595 # these actions updates the file
1599 # these actions updates the file
1596 updated = mresult.len(
1600 updated = mresult.len(
1597 (
1601 (
1598 mergestatemod.ACTION_GET,
1602 mergestatemod.ACTION_GET,
1599 mergestatemod.ACTION_EXEC,
1603 mergestatemod.ACTION_EXEC,
1600 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1604 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1601 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1605 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1602 )
1606 )
1603 )
1607 )
1604 # the ordering is important here -- ms.mergedriver will raise if the merge
1608 # the ordering is important here -- ms.mergedriver will raise if the merge
1605 # driver has changed, and we want to be able to bypass it when overwrite is
1609 # driver has changed, and we want to be able to bypass it when overwrite is
1606 # True
1610 # True
1607 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1611 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1608
1612
1609 if usemergedriver:
1613 if usemergedriver:
1610 if wctx.isinmemory():
1614 if wctx.isinmemory():
1611 raise error.InMemoryMergeConflictsError(
1615 raise error.InMemoryMergeConflictsError(
1612 b"in-memory merge does not support mergedriver"
1616 b"in-memory merge does not support mergedriver"
1613 )
1617 )
1614 ms.commit()
1618 ms.commit()
1615 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1619 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1616 # the driver might leave some files unresolved
1620 # the driver might leave some files unresolved
1617 unresolvedf = set(ms.unresolved())
1621 unresolvedf = set(ms.unresolved())
1618 if not proceed:
1622 if not proceed:
1619 # XXX setting unresolved to at least 1 is a hack to make sure we
1623 # XXX setting unresolved to at least 1 is a hack to make sure we
1620 # error out
1624 # error out
1621 return updateresult(
1625 return updateresult(
1622 updated, merged, removed, max(len(unresolvedf), 1)
1626 updated, merged, removed, max(len(unresolvedf), 1)
1623 )
1627 )
1624 newactions = []
1628 newactions = []
1625 for f, args, msg in mergeactions:
1629 for f, args, msg in mergeactions:
1626 if f in unresolvedf:
1630 if f in unresolvedf:
1627 newactions.append((f, args, msg))
1631 newactions.append((f, args, msg))
1628 mergeactions = newactions
1632 mergeactions = newactions
1629
1633
1630 try:
1634 try:
1631 # premerge
1635 # premerge
1632 tocomplete = []
1636 tocomplete = []
1633 for f, args, msg in mergeactions:
1637 for f, args, msg in mergeactions:
1634 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1638 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1635 progress.increment(item=f)
1639 progress.increment(item=f)
1636 if f == b'.hgsubstate': # subrepo states need updating
1640 if f == b'.hgsubstate': # subrepo states need updating
1637 subrepoutil.submerge(
1641 subrepoutil.submerge(
1638 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1642 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1639 )
1643 )
1640 continue
1644 continue
1641 wctx[f].audit()
1645 wctx[f].audit()
1642 complete, r = ms.preresolve(f, wctx)
1646 complete, r = ms.preresolve(f, wctx)
1643 if not complete:
1647 if not complete:
1644 numupdates += 1
1648 numupdates += 1
1645 tocomplete.append((f, args, msg))
1649 tocomplete.append((f, args, msg))
1646
1650
1647 # merge
1651 # merge
1648 for f, args, msg in tocomplete:
1652 for f, args, msg in tocomplete:
1649 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1653 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1650 progress.increment(item=f, total=numupdates)
1654 progress.increment(item=f, total=numupdates)
1651 ms.resolve(f, wctx)
1655 ms.resolve(f, wctx)
1652
1656
1653 finally:
1657 finally:
1654 ms.commit()
1658 ms.commit()
1655
1659
1656 unresolved = ms.unresolvedcount()
1660 unresolved = ms.unresolvedcount()
1657
1661
1658 if (
1662 if (
1659 usemergedriver
1663 usemergedriver
1660 and not unresolved
1664 and not unresolved
1661 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1665 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1662 ):
1666 ):
1663 if not driverconclude(repo, ms, wctx, labels=labels):
1667 if not driverconclude(repo, ms, wctx, labels=labels):
1664 # XXX setting unresolved to at least 1 is a hack to make sure we
1668 # XXX setting unresolved to at least 1 is a hack to make sure we
1665 # error out
1669 # error out
1666 unresolved = max(unresolved, 1)
1670 unresolved = max(unresolved, 1)
1667
1671
1668 ms.commit()
1672 ms.commit()
1669
1673
1670 msupdated, msmerged, msremoved = ms.counts()
1674 msupdated, msmerged, msremoved = ms.counts()
1671 updated += msupdated
1675 updated += msupdated
1672 merged += msmerged
1676 merged += msmerged
1673 removed += msremoved
1677 removed += msremoved
1674
1678
1675 extraactions = ms.actions()
1679 extraactions = ms.actions()
1676 if extraactions:
1680 if extraactions:
1677 mfiles = {
1681 mfiles = {
1678 a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
1682 a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
1679 }
1683 }
1680 for k, acts in pycompat.iteritems(extraactions):
1684 for k, acts in pycompat.iteritems(extraactions):
1681 for a in acts:
1685 for a in acts:
1682 mresult.addfile(a[0], k, *a[1:])
1686 mresult.addfile(a[0], k, *a[1:])
1683 if k == mergestatemod.ACTION_GET and wantfiledata:
1687 if k == mergestatemod.ACTION_GET and wantfiledata:
1684 # no filedata until mergestate is updated to provide it
1688 # no filedata until mergestate is updated to provide it
1685 for a in acts:
1689 for a in acts:
1686 getfiledata[a[0]] = None
1690 getfiledata[a[0]] = None
1687 # Remove these files from actions[ACTION_MERGE] as well. This is
1691 # Remove these files from actions[ACTION_MERGE] as well. This is
1688 # important because in recordupdates, files in actions[ACTION_MERGE]
1692 # important because in recordupdates, files in actions[ACTION_MERGE]
1689 # are processed after files in other actions, and the merge driver
1693 # are processed after files in other actions, and the merge driver
1690 # might add files to those actions via extraactions above. This can
1694 # might add files to those actions via extraactions above. This can
1691 # lead to a file being recorded twice, with poor results. This is
1695 # lead to a file being recorded twice, with poor results. This is
1692 # especially problematic for actions[ACTION_REMOVE] (currently only
1696 # especially problematic for actions[ACTION_REMOVE] (currently only
1693 # possible with the merge driver in the initial merge process;
1697 # possible with the merge driver in the initial merge process;
1694 # interrupted merges don't go through this flow).
1698 # interrupted merges don't go through this flow).
1695 #
1699 #
1696 # The real fix here is to have indexes by both file and action so
1700 # The real fix here is to have indexes by both file and action so
1697 # that when the action for a file is changed it is automatically
1701 # that when the action for a file is changed it is automatically
1698 # reflected in the other action lists. But that involves a more
1702 # reflected in the other action lists. But that involves a more
1699 # complex data structure, so this will do for now.
1703 # complex data structure, so this will do for now.
1700 #
1704 #
1701 # We don't need to do the same operation for 'dc' and 'cd' because
1705 # We don't need to do the same operation for 'dc' and 'cd' because
1702 # those lists aren't consulted again.
1706 # those lists aren't consulted again.
1703 mfiles.difference_update(a[0] for a in acts)
1707 mfiles.difference_update(a[0] for a in acts)
1704
1708
1705 for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
1709 for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
1706 if a[0] not in mfiles:
1710 if a[0] not in mfiles:
1707 mresult.removefile(a[0])
1711 mresult.removefile(a[0])
1708
1712
1709 progress.complete()
1713 progress.complete()
1710 assert len(getfiledata) == (
1714 assert len(getfiledata) == (
1711 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1715 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1712 )
1716 )
1713 return updateresult(updated, merged, removed, unresolved), getfiledata
1717 return updateresult(updated, merged, removed, unresolved), getfiledata
1714
1718
1715
1719
1716 def _advertisefsmonitor(repo, num_gets, p1node):
1720 def _advertisefsmonitor(repo, num_gets, p1node):
1717 # Advertise fsmonitor when its presence could be useful.
1721 # Advertise fsmonitor when its presence could be useful.
1718 #
1722 #
1719 # We only advertise when performing an update from an empty working
1723 # We only advertise when performing an update from an empty working
1720 # directory. This typically only occurs during initial clone.
1724 # directory. This typically only occurs during initial clone.
1721 #
1725 #
1722 # We give users a mechanism to disable the warning in case it is
1726 # We give users a mechanism to disable the warning in case it is
1723 # annoying.
1727 # annoying.
1724 #
1728 #
1725 # We only allow on Linux and MacOS because that's where fsmonitor is
1729 # We only allow on Linux and MacOS because that's where fsmonitor is
1726 # considered stable.
1730 # considered stable.
1727 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1731 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1728 fsmonitorthreshold = repo.ui.configint(
1732 fsmonitorthreshold = repo.ui.configint(
1729 b'fsmonitor', b'warn_update_file_count'
1733 b'fsmonitor', b'warn_update_file_count'
1730 )
1734 )
1731 try:
1735 try:
1732 # avoid cycle: extensions -> cmdutil -> merge
1736 # avoid cycle: extensions -> cmdutil -> merge
1733 from . import extensions
1737 from . import extensions
1734
1738
1735 extensions.find(b'fsmonitor')
1739 extensions.find(b'fsmonitor')
1736 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1740 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1737 # We intentionally don't look at whether fsmonitor has disabled
1741 # We intentionally don't look at whether fsmonitor has disabled
1738 # itself because a) fsmonitor may have already printed a warning
1742 # itself because a) fsmonitor may have already printed a warning
1739 # b) we only care about the config state here.
1743 # b) we only care about the config state here.
1740 except KeyError:
1744 except KeyError:
1741 fsmonitorenabled = False
1745 fsmonitorenabled = False
1742
1746
1743 if (
1747 if (
1744 fsmonitorwarning
1748 fsmonitorwarning
1745 and not fsmonitorenabled
1749 and not fsmonitorenabled
1746 and p1node == nullid
1750 and p1node == nullid
1747 and num_gets >= fsmonitorthreshold
1751 and num_gets >= fsmonitorthreshold
1748 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1752 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1749 ):
1753 ):
1750 repo.ui.warn(
1754 repo.ui.warn(
1751 _(
1755 _(
1752 b'(warning: large working directory being used without '
1756 b'(warning: large working directory being used without '
1753 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1757 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1754 b'see "hg help -e fsmonitor")\n'
1758 b'see "hg help -e fsmonitor")\n'
1755 )
1759 )
1756 )
1760 )
1757
1761
1758
1762
1759 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1763 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1760 UPDATECHECK_NONE = b'none'
1764 UPDATECHECK_NONE = b'none'
1761 UPDATECHECK_LINEAR = b'linear'
1765 UPDATECHECK_LINEAR = b'linear'
1762 UPDATECHECK_NO_CONFLICT = b'noconflict'
1766 UPDATECHECK_NO_CONFLICT = b'noconflict'
1763
1767
1764
1768
1765 def update(
1769 def update(
1766 repo,
1770 repo,
1767 node,
1771 node,
1768 branchmerge,
1772 branchmerge,
1769 force,
1773 force,
1770 ancestor=None,
1774 ancestor=None,
1771 mergeancestor=False,
1775 mergeancestor=False,
1772 labels=None,
1776 labels=None,
1773 matcher=None,
1777 matcher=None,
1774 mergeforce=False,
1778 mergeforce=False,
1775 updatedirstate=True,
1779 updatedirstate=True,
1776 updatecheck=None,
1780 updatecheck=None,
1777 wc=None,
1781 wc=None,
1778 ):
1782 ):
1779 """
1783 """
1780 Perform a merge between the working directory and the given node
1784 Perform a merge between the working directory and the given node
1781
1785
1782 node = the node to update to
1786 node = the node to update to
1783 branchmerge = whether to merge between branches
1787 branchmerge = whether to merge between branches
1784 force = whether to force branch merging or file overwriting
1788 force = whether to force branch merging or file overwriting
1785 matcher = a matcher to filter file lists (dirstate not updated)
1789 matcher = a matcher to filter file lists (dirstate not updated)
1786 mergeancestor = whether it is merging with an ancestor. If true,
1790 mergeancestor = whether it is merging with an ancestor. If true,
1787 we should accept the incoming changes for any prompts that occur.
1791 we should accept the incoming changes for any prompts that occur.
1788 If false, merging with an ancestor (fast-forward) is only allowed
1792 If false, merging with an ancestor (fast-forward) is only allowed
1789 between different named branches. This flag is used by rebase extension
1793 between different named branches. This flag is used by rebase extension
1790 as a temporary fix and should be avoided in general.
1794 as a temporary fix and should be avoided in general.
1791 labels = labels to use for base, local and other
1795 labels = labels to use for base, local and other
1792 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1796 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1793 this is True, then 'force' should be True as well.
1797 this is True, then 'force' should be True as well.
1794
1798
1795 The table below shows all the behaviors of the update command given the
1799 The table below shows all the behaviors of the update command given the
1796 -c/--check and -C/--clean or no options, whether the working directory is
1800 -c/--check and -C/--clean or no options, whether the working directory is
1797 dirty, whether a revision is specified, and the relationship of the parent
1801 dirty, whether a revision is specified, and the relationship of the parent
1798 rev to the target rev (linear or not). Match from top first. The -n
1802 rev to the target rev (linear or not). Match from top first. The -n
1799 option doesn't exist on the command line, but represents the
1803 option doesn't exist on the command line, but represents the
1800 experimental.updatecheck=noconflict option.
1804 experimental.updatecheck=noconflict option.
1801
1805
1802 This logic is tested by test-update-branches.t.
1806 This logic is tested by test-update-branches.t.
1803
1807
1804 -c -C -n -m dirty rev linear | result
1808 -c -C -n -m dirty rev linear | result
1805 y y * * * * * | (1)
1809 y y * * * * * | (1)
1806 y * y * * * * | (1)
1810 y * y * * * * | (1)
1807 y * * y * * * | (1)
1811 y * * y * * * | (1)
1808 * y y * * * * | (1)
1812 * y y * * * * | (1)
1809 * y * y * * * | (1)
1813 * y * y * * * | (1)
1810 * * y y * * * | (1)
1814 * * y y * * * | (1)
1811 * * * * * n n | x
1815 * * * * * n n | x
1812 * * * * n * * | ok
1816 * * * * n * * | ok
1813 n n n n y * y | merge
1817 n n n n y * y | merge
1814 n n n n y y n | (2)
1818 n n n n y y n | (2)
1815 n n n y y * * | merge
1819 n n n y y * * | merge
1816 n n y n y * * | merge if no conflict
1820 n n y n y * * | merge if no conflict
1817 n y n n y * * | discard
1821 n y n n y * * | discard
1818 y n n n y * * | (3)
1822 y n n n y * * | (3)
1819
1823
1820 x = can't happen
1824 x = can't happen
1821 * = don't-care
1825 * = don't-care
1822 1 = incompatible options (checked in commands.py)
1826 1 = incompatible options (checked in commands.py)
1823 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1827 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1824 3 = abort: uncommitted changes (checked in commands.py)
1828 3 = abort: uncommitted changes (checked in commands.py)
1825
1829
1826 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1830 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1827 to repo[None] if None is passed.
1831 to repo[None] if None is passed.
1828
1832
1829 Return the same tuple as applyupdates().
1833 Return the same tuple as applyupdates().
1830 """
1834 """
1831 # Avoid cycle.
1835 # Avoid cycle.
1832 from . import sparse
1836 from . import sparse
1833
1837
1834 # This function used to find the default destination if node was None, but
1838 # This function used to find the default destination if node was None, but
1835 # that's now in destutil.py.
1839 # that's now in destutil.py.
1836 assert node is not None
1840 assert node is not None
1837 if not branchmerge and not force:
1841 if not branchmerge and not force:
1838 # TODO: remove the default once all callers that pass branchmerge=False
1842 # TODO: remove the default once all callers that pass branchmerge=False
1839 # and force=False pass a value for updatecheck. We may want to allow
1843 # and force=False pass a value for updatecheck. We may want to allow
1840 # updatecheck='abort' to better suppport some of these callers.
1844 # updatecheck='abort' to better suppport some of these callers.
1841 if updatecheck is None:
1845 if updatecheck is None:
1842 updatecheck = UPDATECHECK_LINEAR
1846 updatecheck = UPDATECHECK_LINEAR
1843 if updatecheck not in (
1847 if updatecheck not in (
1844 UPDATECHECK_NONE,
1848 UPDATECHECK_NONE,
1845 UPDATECHECK_LINEAR,
1849 UPDATECHECK_LINEAR,
1846 UPDATECHECK_NO_CONFLICT,
1850 UPDATECHECK_NO_CONFLICT,
1847 ):
1851 ):
1848 raise ValueError(
1852 raise ValueError(
1849 r'Invalid updatecheck %r (can accept %r)'
1853 r'Invalid updatecheck %r (can accept %r)'
1850 % (
1854 % (
1851 updatecheck,
1855 updatecheck,
1852 (
1856 (
1853 UPDATECHECK_NONE,
1857 UPDATECHECK_NONE,
1854 UPDATECHECK_LINEAR,
1858 UPDATECHECK_LINEAR,
1855 UPDATECHECK_NO_CONFLICT,
1859 UPDATECHECK_NO_CONFLICT,
1856 ),
1860 ),
1857 )
1861 )
1858 )
1862 )
1859 if wc is not None and wc.isinmemory():
1863 if wc is not None and wc.isinmemory():
1860 maybe_wlock = util.nullcontextmanager()
1864 maybe_wlock = util.nullcontextmanager()
1861 else:
1865 else:
1862 maybe_wlock = repo.wlock()
1866 maybe_wlock = repo.wlock()
1863 with maybe_wlock:
1867 with maybe_wlock:
1864 if wc is None:
1868 if wc is None:
1865 wc = repo[None]
1869 wc = repo[None]
1866 pl = wc.parents()
1870 pl = wc.parents()
1867 p1 = pl[0]
1871 p1 = pl[0]
1868 p2 = repo[node]
1872 p2 = repo[node]
1869 if ancestor is not None:
1873 if ancestor is not None:
1870 pas = [repo[ancestor]]
1874 pas = [repo[ancestor]]
1871 else:
1875 else:
1872 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1876 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1873 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1877 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1874 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1878 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1875 else:
1879 else:
1876 pas = [p1.ancestor(p2, warn=branchmerge)]
1880 pas = [p1.ancestor(p2, warn=branchmerge)]
1877
1881
1878 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1882 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1879
1883
1880 overwrite = force and not branchmerge
1884 overwrite = force and not branchmerge
1881 ### check phase
1885 ### check phase
1882 if not overwrite:
1886 if not overwrite:
1883 if len(pl) > 1:
1887 if len(pl) > 1:
1884 raise error.Abort(_(b"outstanding uncommitted merge"))
1888 raise error.Abort(_(b"outstanding uncommitted merge"))
1885 ms = mergestatemod.mergestate.read(repo)
1889 ms = mergestatemod.mergestate.read(repo)
1886 if list(ms.unresolved()):
1890 if list(ms.unresolved()):
1887 raise error.Abort(
1891 raise error.Abort(
1888 _(b"outstanding merge conflicts"),
1892 _(b"outstanding merge conflicts"),
1889 hint=_(b"use 'hg resolve' to resolve"),
1893 hint=_(b"use 'hg resolve' to resolve"),
1890 )
1894 )
1891 if branchmerge:
1895 if branchmerge:
1892 if pas == [p2]:
1896 if pas == [p2]:
1893 raise error.Abort(
1897 raise error.Abort(
1894 _(
1898 _(
1895 b"merging with a working directory ancestor"
1899 b"merging with a working directory ancestor"
1896 b" has no effect"
1900 b" has no effect"
1897 )
1901 )
1898 )
1902 )
1899 elif pas == [p1]:
1903 elif pas == [p1]:
1900 if not mergeancestor and wc.branch() == p2.branch():
1904 if not mergeancestor and wc.branch() == p2.branch():
1901 raise error.Abort(
1905 raise error.Abort(
1902 _(b"nothing to merge"),
1906 _(b"nothing to merge"),
1903 hint=_(b"use 'hg update' or check 'hg heads'"),
1907 hint=_(b"use 'hg update' or check 'hg heads'"),
1904 )
1908 )
1905 if not force and (wc.files() or wc.deleted()):
1909 if not force and (wc.files() or wc.deleted()):
1906 raise error.Abort(
1910 raise error.Abort(
1907 _(b"uncommitted changes"),
1911 _(b"uncommitted changes"),
1908 hint=_(b"use 'hg status' to list changes"),
1912 hint=_(b"use 'hg status' to list changes"),
1909 )
1913 )
1910 if not wc.isinmemory():
1914 if not wc.isinmemory():
1911 for s in sorted(wc.substate):
1915 for s in sorted(wc.substate):
1912 wc.sub(s).bailifchanged()
1916 wc.sub(s).bailifchanged()
1913
1917
1914 elif not overwrite:
1918 elif not overwrite:
1915 if p1 == p2: # no-op update
1919 if p1 == p2: # no-op update
1916 # call the hooks and exit early
1920 # call the hooks and exit early
1917 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1921 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1918 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1922 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1919 return updateresult(0, 0, 0, 0)
1923 return updateresult(0, 0, 0, 0)
1920
1924
1921 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1925 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1922 [p1],
1926 [p1],
1923 [p2],
1927 [p2],
1924 ): # nonlinear
1928 ): # nonlinear
1925 dirty = wc.dirty(missing=True)
1929 dirty = wc.dirty(missing=True)
1926 if dirty:
1930 if dirty:
1927 # Branching is a bit strange to ensure we do the minimal
1931 # Branching is a bit strange to ensure we do the minimal
1928 # amount of call to obsutil.foreground.
1932 # amount of call to obsutil.foreground.
1929 foreground = obsutil.foreground(repo, [p1.node()])
1933 foreground = obsutil.foreground(repo, [p1.node()])
1930 # note: the <node> variable contains a random identifier
1934 # note: the <node> variable contains a random identifier
1931 if repo[node].node() in foreground:
1935 if repo[node].node() in foreground:
1932 pass # allow updating to successors
1936 pass # allow updating to successors
1933 else:
1937 else:
1934 msg = _(b"uncommitted changes")
1938 msg = _(b"uncommitted changes")
1935 hint = _(b"commit or update --clean to discard changes")
1939 hint = _(b"commit or update --clean to discard changes")
1936 raise error.UpdateAbort(msg, hint=hint)
1940 raise error.UpdateAbort(msg, hint=hint)
1937 else:
1941 else:
1938 # Allow jumping branches if clean and specific rev given
1942 # Allow jumping branches if clean and specific rev given
1939 pass
1943 pass
1940
1944
1941 if overwrite:
1945 if overwrite:
1942 pas = [wc]
1946 pas = [wc]
1943 elif not branchmerge:
1947 elif not branchmerge:
1944 pas = [p1]
1948 pas = [p1]
1945
1949
1946 # deprecated config: merge.followcopies
1950 # deprecated config: merge.followcopies
1947 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1951 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1948 if overwrite:
1952 if overwrite:
1949 followcopies = False
1953 followcopies = False
1950 elif not pas[0]:
1954 elif not pas[0]:
1951 followcopies = False
1955 followcopies = False
1952 if not branchmerge and not wc.dirty(missing=True):
1956 if not branchmerge and not wc.dirty(missing=True):
1953 followcopies = False
1957 followcopies = False
1954
1958
1955 ### calculate phase
1959 ### calculate phase
1956 mresult = calculateupdates(
1960 mresult = calculateupdates(
1957 repo,
1961 repo,
1958 wc,
1962 wc,
1959 p2,
1963 p2,
1960 pas,
1964 pas,
1961 branchmerge,
1965 branchmerge,
1962 force,
1966 force,
1963 mergeancestor,
1967 mergeancestor,
1964 followcopies,
1968 followcopies,
1965 matcher=matcher,
1969 matcher=matcher,
1966 mergeforce=mergeforce,
1970 mergeforce=mergeforce,
1967 )
1971 )
1968
1972
1969 if updatecheck == UPDATECHECK_NO_CONFLICT:
1973 if updatecheck == UPDATECHECK_NO_CONFLICT:
1970 if mresult.hasconflicts():
1974 if mresult.hasconflicts():
1971 msg = _(b"conflicting changes")
1975 msg = _(b"conflicting changes")
1972 hint = _(b"commit or update --clean to discard changes")
1976 hint = _(b"commit or update --clean to discard changes")
1973 raise error.Abort(msg, hint=hint)
1977 raise error.Abort(msg, hint=hint)
1974
1978
1975 # Prompt and create actions. Most of this is in the resolve phase
1979 # Prompt and create actions. Most of this is in the resolve phase
1976 # already, but we can't handle .hgsubstate in filemerge or
1980 # already, but we can't handle .hgsubstate in filemerge or
1977 # subrepoutil.submerge yet so we have to keep prompting for it.
1981 # subrepoutil.submerge yet so we have to keep prompting for it.
1978 vals = mresult.getfile(b'.hgsubstate')
1982 vals = mresult.getfile(b'.hgsubstate')
1979 if vals:
1983 if vals:
1980 f = b'.hgsubstate'
1984 f = b'.hgsubstate'
1981 m, args, msg = vals
1985 m, args, msg = vals
1982 prompts = filemerge.partextras(labels)
1986 prompts = filemerge.partextras(labels)
1983 prompts[b'f'] = f
1987 prompts[b'f'] = f
1984 if m == mergestatemod.ACTION_CHANGED_DELETED:
1988 if m == mergestatemod.ACTION_CHANGED_DELETED:
1985 if repo.ui.promptchoice(
1989 if repo.ui.promptchoice(
1986 _(
1990 _(
1987 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1991 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1988 b"use (c)hanged version or (d)elete?"
1992 b"use (c)hanged version or (d)elete?"
1989 b"$$ &Changed $$ &Delete"
1993 b"$$ &Changed $$ &Delete"
1990 )
1994 )
1991 % prompts,
1995 % prompts,
1992 0,
1996 0,
1993 ):
1997 ):
1994 mresult.addfile(
1998 mresult.addfile(
1995 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1999 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1996 )
2000 )
1997 elif f in p1:
2001 elif f in p1:
1998 mresult.addfile(
2002 mresult.addfile(
1999 f,
2003 f,
2000 mergestatemod.ACTION_ADD_MODIFIED,
2004 mergestatemod.ACTION_ADD_MODIFIED,
2001 None,
2005 None,
2002 b'prompt keep',
2006 b'prompt keep',
2003 )
2007 )
2004 else:
2008 else:
2005 mresult.addfile(
2009 mresult.addfile(
2006 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
2010 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
2007 )
2011 )
2008 elif m == mergestatemod.ACTION_DELETED_CHANGED:
2012 elif m == mergestatemod.ACTION_DELETED_CHANGED:
2009 f1, f2, fa, move, anc = args
2013 f1, f2, fa, move, anc = args
2010 flags = p2[f2].flags()
2014 flags = p2[f2].flags()
2011 if (
2015 if (
2012 repo.ui.promptchoice(
2016 repo.ui.promptchoice(
2013 _(
2017 _(
2014 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2018 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2015 b"use (c)hanged version or leave (d)eleted?"
2019 b"use (c)hanged version or leave (d)eleted?"
2016 b"$$ &Changed $$ &Deleted"
2020 b"$$ &Changed $$ &Deleted"
2017 )
2021 )
2018 % prompts,
2022 % prompts,
2019 0,
2023 0,
2020 )
2024 )
2021 == 0
2025 == 0
2022 ):
2026 ):
2023 mresult.addfile(
2027 mresult.addfile(
2024 f,
2028 f,
2025 mergestatemod.ACTION_GET,
2029 mergestatemod.ACTION_GET,
2026 (flags, False),
2030 (flags, False),
2027 b'prompt recreating',
2031 b'prompt recreating',
2028 )
2032 )
2029 else:
2033 else:
2030 mresult.removefile(f)
2034 mresult.removefile(f)
2031
2035
2032 if not util.fscasesensitive(repo.path):
2036 if not util.fscasesensitive(repo.path):
2033 # check collision between files only in p2 for clean update
2037 # check collision between files only in p2 for clean update
2034 if not branchmerge and (
2038 if not branchmerge and (
2035 force or not wc.dirty(missing=True, branch=False)
2039 force or not wc.dirty(missing=True, branch=False)
2036 ):
2040 ):
2037 _checkcollision(repo, p2.manifest(), None)
2041 _checkcollision(repo, p2.manifest(), None)
2038 else:
2042 else:
2039 _checkcollision(repo, wc.manifest(), mresult)
2043 _checkcollision(repo, wc.manifest(), mresult)
2040
2044
2041 # divergent renames
2045 # divergent renames
2042 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2046 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2043 repo.ui.warn(
2047 repo.ui.warn(
2044 _(
2048 _(
2045 b"note: possible conflict - %s was renamed "
2049 b"note: possible conflict - %s was renamed "
2046 b"multiple times to:\n"
2050 b"multiple times to:\n"
2047 )
2051 )
2048 % f
2052 % f
2049 )
2053 )
2050 for nf in sorted(fl):
2054 for nf in sorted(fl):
2051 repo.ui.warn(b" %s\n" % nf)
2055 repo.ui.warn(b" %s\n" % nf)
2052
2056
2053 # rename and delete
2057 # rename and delete
2054 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2058 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2055 repo.ui.warn(
2059 repo.ui.warn(
2056 _(
2060 _(
2057 b"note: possible conflict - %s was deleted "
2061 b"note: possible conflict - %s was deleted "
2058 b"and renamed to:\n"
2062 b"and renamed to:\n"
2059 )
2063 )
2060 % f
2064 % f
2061 )
2065 )
2062 for nf in sorted(fl):
2066 for nf in sorted(fl):
2063 repo.ui.warn(b" %s\n" % nf)
2067 repo.ui.warn(b" %s\n" % nf)
2064
2068
2065 ### apply phase
2069 ### apply phase
2066 if not branchmerge: # just jump to the new rev
2070 if not branchmerge: # just jump to the new rev
2067 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2071 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2068 # If we're doing a partial update, we need to skip updating
2072 # If we're doing a partial update, we need to skip updating
2069 # the dirstate.
2073 # the dirstate.
2070 always = matcher is None or matcher.always()
2074 always = matcher is None or matcher.always()
2071 updatedirstate = updatedirstate and always and not wc.isinmemory()
2075 updatedirstate = updatedirstate and always and not wc.isinmemory()
2072 if updatedirstate:
2076 if updatedirstate:
2073 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2077 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2074 # note that we're in the middle of an update
2078 # note that we're in the middle of an update
2075 repo.vfs.write(b'updatestate', p2.hex())
2079 repo.vfs.write(b'updatestate', p2.hex())
2076
2080
2077 _advertisefsmonitor(
2081 _advertisefsmonitor(
2078 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2082 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2079 )
2083 )
2080
2084
2081 wantfiledata = updatedirstate and not branchmerge
2085 wantfiledata = updatedirstate and not branchmerge
2082 stats, getfiledata = applyupdates(
2086 stats, getfiledata = applyupdates(
2083 repo,
2087 repo,
2084 mresult,
2088 mresult,
2085 wc,
2089 wc,
2086 p2,
2090 p2,
2087 overwrite,
2091 overwrite,
2088 wantfiledata,
2092 wantfiledata,
2089 labels=labels,
2093 labels=labels,
2090 commitinfo=mresult.commitinfo,
2094 commitinfo=mresult.commitinfo,
2091 )
2095 )
2092
2096
2093 if updatedirstate:
2097 if updatedirstate:
2094 with repo.dirstate.parentchange():
2098 with repo.dirstate.parentchange():
2095 repo.setparents(fp1, fp2)
2099 repo.setparents(fp1, fp2)
2096 mergestatemod.recordupdates(
2100 mergestatemod.recordupdates(
2097 repo, mresult.actionsdict, branchmerge, getfiledata
2101 repo, mresult.actionsdict, branchmerge, getfiledata
2098 )
2102 )
2099 # update completed, clear state
2103 # update completed, clear state
2100 util.unlink(repo.vfs.join(b'updatestate'))
2104 util.unlink(repo.vfs.join(b'updatestate'))
2101
2105
2102 if not branchmerge:
2106 if not branchmerge:
2103 repo.dirstate.setbranch(p2.branch())
2107 repo.dirstate.setbranch(p2.branch())
2104
2108
2105 # If we're updating to a location, clean up any stale temporary includes
2109 # If we're updating to a location, clean up any stale temporary includes
2106 # (ex: this happens during hg rebase --abort).
2110 # (ex: this happens during hg rebase --abort).
2107 if not branchmerge:
2111 if not branchmerge:
2108 sparse.prunetemporaryincludes(repo)
2112 sparse.prunetemporaryincludes(repo)
2109
2113
2110 if updatedirstate:
2114 if updatedirstate:
2111 repo.hook(
2115 repo.hook(
2112 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2116 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2113 )
2117 )
2114 return stats
2118 return stats
2115
2119
2116
2120
2117 def merge(ctx, labels=None, force=False, wc=None):
2121 def merge(ctx, labels=None, force=False, wc=None):
2118 """Merge another topological branch into the working copy.
2122 """Merge another topological branch into the working copy.
2119
2123
2120 force = whether the merge was run with 'merge --force' (deprecated)
2124 force = whether the merge was run with 'merge --force' (deprecated)
2121 """
2125 """
2122
2126
2123 return update(
2127 return update(
2124 ctx.repo(),
2128 ctx.repo(),
2125 ctx.rev(),
2129 ctx.rev(),
2126 labels=labels,
2130 labels=labels,
2127 branchmerge=True,
2131 branchmerge=True,
2128 force=force,
2132 force=force,
2129 mergeforce=force,
2133 mergeforce=force,
2130 wc=wc,
2134 wc=wc,
2131 )
2135 )
2132
2136
2133
2137
2134 def clean_update(ctx, wc=None):
2138 def clean_update(ctx, wc=None):
2135 """Do a clean update to the given commit.
2139 """Do a clean update to the given commit.
2136
2140
2137 This involves updating to the commit and discarding any changes in the
2141 This involves updating to the commit and discarding any changes in the
2138 working copy.
2142 working copy.
2139 """
2143 """
2140 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2144 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2141
2145
2142
2146
2143 def revert_to(ctx, matcher=None, wc=None):
2147 def revert_to(ctx, matcher=None, wc=None):
2144 """Revert the working copy to the given commit.
2148 """Revert the working copy to the given commit.
2145
2149
2146 The working copy will keep its current parent(s) but its content will
2150 The working copy will keep its current parent(s) but its content will
2147 be the same as in the given commit.
2151 be the same as in the given commit.
2148 """
2152 """
2149
2153
2150 return update(
2154 return update(
2151 ctx.repo(),
2155 ctx.repo(),
2152 ctx.rev(),
2156 ctx.rev(),
2153 branchmerge=False,
2157 branchmerge=False,
2154 force=True,
2158 force=True,
2155 updatedirstate=False,
2159 updatedirstate=False,
2156 matcher=matcher,
2160 matcher=matcher,
2157 wc=wc,
2161 wc=wc,
2158 )
2162 )
2159
2163
2160
2164
2161 def graft(
2165 def graft(
2162 repo,
2166 repo,
2163 ctx,
2167 ctx,
2164 base=None,
2168 base=None,
2165 labels=None,
2169 labels=None,
2166 keepparent=False,
2170 keepparent=False,
2167 keepconflictparent=False,
2171 keepconflictparent=False,
2168 wctx=None,
2172 wctx=None,
2169 ):
2173 ):
2170 """Do a graft-like merge.
2174 """Do a graft-like merge.
2171
2175
2172 This is a merge where the merge ancestor is chosen such that one
2176 This is a merge where the merge ancestor is chosen such that one
2173 or more changesets are grafted onto the current changeset. In
2177 or more changesets are grafted onto the current changeset. In
2174 addition to the merge, this fixes up the dirstate to include only
2178 addition to the merge, this fixes up the dirstate to include only
2175 a single parent (if keepparent is False) and tries to duplicate any
2179 a single parent (if keepparent is False) and tries to duplicate any
2176 renames/copies appropriately.
2180 renames/copies appropriately.
2177
2181
2178 ctx - changeset to rebase
2182 ctx - changeset to rebase
2179 base - merge base, or ctx.p1() if not specified
2183 base - merge base, or ctx.p1() if not specified
2180 labels - merge labels eg ['local', 'graft']
2184 labels - merge labels eg ['local', 'graft']
2181 keepparent - keep second parent if any
2185 keepparent - keep second parent if any
2182 keepconflictparent - if unresolved, keep parent used for the merge
2186 keepconflictparent - if unresolved, keep parent used for the merge
2183
2187
2184 """
2188 """
2185 # If we're grafting a descendant onto an ancestor, be sure to pass
2189 # If we're grafting a descendant onto an ancestor, be sure to pass
2186 # mergeancestor=True to update. This does two things: 1) allows the merge if
2190 # mergeancestor=True to update. This does two things: 1) allows the merge if
2187 # the destination is the same as the parent of the ctx (so we can use graft
2191 # the destination is the same as the parent of the ctx (so we can use graft
2188 # to copy commits), and 2) informs update that the incoming changes are
2192 # to copy commits), and 2) informs update that the incoming changes are
2189 # newer than the destination so it doesn't prompt about "remote changed foo
2193 # newer than the destination so it doesn't prompt about "remote changed foo
2190 # which local deleted".
2194 # which local deleted".
2191 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2195 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2192 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2196 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2193 wctx = wctx or repo[None]
2197 wctx = wctx or repo[None]
2194 pctx = wctx.p1()
2198 pctx = wctx.p1()
2195 base = base or ctx.p1()
2199 base = base or ctx.p1()
2196 mergeancestor = (
2200 mergeancestor = (
2197 repo.changelog.isancestor(pctx.node(), ctx.node())
2201 repo.changelog.isancestor(pctx.node(), ctx.node())
2198 or pctx.rev() == base.rev()
2202 or pctx.rev() == base.rev()
2199 )
2203 )
2200
2204
2201 stats = update(
2205 stats = update(
2202 repo,
2206 repo,
2203 ctx.node(),
2207 ctx.node(),
2204 True,
2208 True,
2205 True,
2209 True,
2206 base.node(),
2210 base.node(),
2207 mergeancestor=mergeancestor,
2211 mergeancestor=mergeancestor,
2208 labels=labels,
2212 labels=labels,
2209 wc=wctx,
2213 wc=wctx,
2210 )
2214 )
2211
2215
2212 if keepconflictparent and stats.unresolvedcount:
2216 if keepconflictparent and stats.unresolvedcount:
2213 pother = ctx.node()
2217 pother = ctx.node()
2214 else:
2218 else:
2215 pother = nullid
2219 pother = nullid
2216 parents = ctx.parents()
2220 parents = ctx.parents()
2217 if keepparent and len(parents) == 2 and base in parents:
2221 if keepparent and len(parents) == 2 and base in parents:
2218 parents.remove(base)
2222 parents.remove(base)
2219 pother = parents[0].node()
2223 pother = parents[0].node()
2220 # Never set both parents equal to each other
2224 # Never set both parents equal to each other
2221 if pother == pctx.node():
2225 if pother == pctx.node():
2222 pother = nullid
2226 pother = nullid
2223
2227
2224 if wctx.isinmemory():
2228 if wctx.isinmemory():
2225 wctx.setparents(pctx.node(), pother)
2229 wctx.setparents(pctx.node(), pother)
2226 # fix up dirstate for copies and renames
2230 # fix up dirstate for copies and renames
2227 copies.graftcopies(wctx, ctx, base)
2231 copies.graftcopies(wctx, ctx, base)
2228 else:
2232 else:
2229 with repo.dirstate.parentchange():
2233 with repo.dirstate.parentchange():
2230 repo.setparents(pctx.node(), pother)
2234 repo.setparents(pctx.node(), pother)
2231 repo.dirstate.write(repo.currenttransaction())
2235 repo.dirstate.write(repo.currenttransaction())
2232 # fix up dirstate for copies and renames
2236 # fix up dirstate for copies and renames
2233 copies.graftcopies(wctx, ctx, base)
2237 copies.graftcopies(wctx, ctx, base)
2234 return stats
2238 return stats
2235
2239
2236
2240
2237 def purge(
2241 def purge(
2238 repo,
2242 repo,
2239 matcher,
2243 matcher,
2240 unknown=True,
2244 unknown=True,
2241 ignored=False,
2245 ignored=False,
2242 removeemptydirs=True,
2246 removeemptydirs=True,
2243 removefiles=True,
2247 removefiles=True,
2244 abortonerror=False,
2248 abortonerror=False,
2245 noop=False,
2249 noop=False,
2246 ):
2250 ):
2247 """Purge the working directory of untracked files.
2251 """Purge the working directory of untracked files.
2248
2252
2249 ``matcher`` is a matcher configured to scan the working directory -
2253 ``matcher`` is a matcher configured to scan the working directory -
2250 potentially a subset.
2254 potentially a subset.
2251
2255
2252 ``unknown`` controls whether unknown files should be purged.
2256 ``unknown`` controls whether unknown files should be purged.
2253
2257
2254 ``ignored`` controls whether ignored files should be purged.
2258 ``ignored`` controls whether ignored files should be purged.
2255
2259
2256 ``removeemptydirs`` controls whether empty directories should be removed.
2260 ``removeemptydirs`` controls whether empty directories should be removed.
2257
2261
2258 ``removefiles`` controls whether files are removed.
2262 ``removefiles`` controls whether files are removed.
2259
2263
2260 ``abortonerror`` causes an exception to be raised if an error occurs
2264 ``abortonerror`` causes an exception to be raised if an error occurs
2261 deleting a file or directory.
2265 deleting a file or directory.
2262
2266
2263 ``noop`` controls whether to actually remove files. If not defined, actions
2267 ``noop`` controls whether to actually remove files. If not defined, actions
2264 will be taken.
2268 will be taken.
2265
2269
2266 Returns an iterable of relative paths in the working directory that were
2270 Returns an iterable of relative paths in the working directory that were
2267 or would be removed.
2271 or would be removed.
2268 """
2272 """
2269
2273
2270 def remove(removefn, path):
2274 def remove(removefn, path):
2271 try:
2275 try:
2272 removefn(path)
2276 removefn(path)
2273 except OSError:
2277 except OSError:
2274 m = _(b'%s cannot be removed') % path
2278 m = _(b'%s cannot be removed') % path
2275 if abortonerror:
2279 if abortonerror:
2276 raise error.Abort(m)
2280 raise error.Abort(m)
2277 else:
2281 else:
2278 repo.ui.warn(_(b'warning: %s\n') % m)
2282 repo.ui.warn(_(b'warning: %s\n') % m)
2279
2283
2280 # There's no API to copy a matcher. So mutate the passed matcher and
2284 # There's no API to copy a matcher. So mutate the passed matcher and
2281 # restore it when we're done.
2285 # restore it when we're done.
2282 oldtraversedir = matcher.traversedir
2286 oldtraversedir = matcher.traversedir
2283
2287
2284 res = []
2288 res = []
2285
2289
2286 try:
2290 try:
2287 if removeemptydirs:
2291 if removeemptydirs:
2288 directories = []
2292 directories = []
2289 matcher.traversedir = directories.append
2293 matcher.traversedir = directories.append
2290
2294
2291 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2295 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2292
2296
2293 if removefiles:
2297 if removefiles:
2294 for f in sorted(status.unknown + status.ignored):
2298 for f in sorted(status.unknown + status.ignored):
2295 if not noop:
2299 if not noop:
2296 repo.ui.note(_(b'removing file %s\n') % f)
2300 repo.ui.note(_(b'removing file %s\n') % f)
2297 remove(repo.wvfs.unlink, f)
2301 remove(repo.wvfs.unlink, f)
2298 res.append(f)
2302 res.append(f)
2299
2303
2300 if removeemptydirs:
2304 if removeemptydirs:
2301 for f in sorted(directories, reverse=True):
2305 for f in sorted(directories, reverse=True):
2302 if matcher(f) and not repo.wvfs.listdir(f):
2306 if matcher(f) and not repo.wvfs.listdir(f):
2303 if not noop:
2307 if not noop:
2304 repo.ui.note(_(b'removing directory %s\n') % f)
2308 repo.ui.note(_(b'removing directory %s\n') % f)
2305 remove(repo.wvfs.rmdir, f)
2309 remove(repo.wvfs.rmdir, f)
2306 res.append(f)
2310 res.append(f)
2307
2311
2308 return res
2312 return res
2309
2313
2310 finally:
2314 finally:
2311 matcher.traversedir = oldtraversedir
2315 matcher.traversedir = oldtraversedir
@@ -1,831 +1,831 b''
1 # sparse.py - functionality for sparse checkouts
1 # sparse.py - functionality for sparse checkouts
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 )
16 )
17 from . import (
17 from . import (
18 error,
18 error,
19 match as matchmod,
19 match as matchmod,
20 merge as mergemod,
20 merge as mergemod,
21 mergestate as mergestatemod,
21 mergestate as mergestatemod,
22 pathutil,
22 pathutil,
23 pycompat,
23 pycompat,
24 scmutil,
24 scmutil,
25 util,
25 util,
26 )
26 )
27 from .utils import hashutil
27 from .utils import hashutil
28
28
29 # Whether sparse features are enabled. This variable is intended to be
29 # Whether sparse features are enabled. This variable is intended to be
30 # temporary to facilitate porting sparse to core. It should eventually be
30 # temporary to facilitate porting sparse to core. It should eventually be
31 # a per-repo option, possibly a repo requirement.
31 # a per-repo option, possibly a repo requirement.
32 enabled = False
32 enabled = False
33
33
34
34
35 def parseconfig(ui, raw, action):
35 def parseconfig(ui, raw, action):
36 """Parse sparse config file content.
36 """Parse sparse config file content.
37
37
38 action is the command which is trigerring this read, can be narrow, sparse
38 action is the command which is trigerring this read, can be narrow, sparse
39
39
40 Returns a tuple of includes, excludes, and profiles.
40 Returns a tuple of includes, excludes, and profiles.
41 """
41 """
42 includes = set()
42 includes = set()
43 excludes = set()
43 excludes = set()
44 profiles = set()
44 profiles = set()
45 current = None
45 current = None
46 havesection = False
46 havesection = False
47
47
48 for line in raw.split(b'\n'):
48 for line in raw.split(b'\n'):
49 line = line.strip()
49 line = line.strip()
50 if not line or line.startswith(b'#'):
50 if not line or line.startswith(b'#'):
51 # empty or comment line, skip
51 # empty or comment line, skip
52 continue
52 continue
53 elif line.startswith(b'%include '):
53 elif line.startswith(b'%include '):
54 line = line[9:].strip()
54 line = line[9:].strip()
55 if line:
55 if line:
56 profiles.add(line)
56 profiles.add(line)
57 elif line == b'[include]':
57 elif line == b'[include]':
58 if havesection and current != includes:
58 if havesection and current != includes:
59 # TODO pass filename into this API so we can report it.
59 # TODO pass filename into this API so we can report it.
60 raise error.Abort(
60 raise error.Abort(
61 _(
61 _(
62 b'%(action)s config cannot have includes '
62 b'%(action)s config cannot have includes '
63 b'after excludes'
63 b'after excludes'
64 )
64 )
65 % {b'action': action}
65 % {b'action': action}
66 )
66 )
67 havesection = True
67 havesection = True
68 current = includes
68 current = includes
69 continue
69 continue
70 elif line == b'[exclude]':
70 elif line == b'[exclude]':
71 havesection = True
71 havesection = True
72 current = excludes
72 current = excludes
73 elif line:
73 elif line:
74 if current is None:
74 if current is None:
75 raise error.Abort(
75 raise error.Abort(
76 _(
76 _(
77 b'%(action)s config entry outside of '
77 b'%(action)s config entry outside of '
78 b'section: %(line)s'
78 b'section: %(line)s'
79 )
79 )
80 % {b'action': action, b'line': line},
80 % {b'action': action, b'line': line},
81 hint=_(
81 hint=_(
82 b'add an [include] or [exclude] line '
82 b'add an [include] or [exclude] line '
83 b'to declare the entry type'
83 b'to declare the entry type'
84 ),
84 ),
85 )
85 )
86
86
87 if line.strip().startswith(b'/'):
87 if line.strip().startswith(b'/'):
88 ui.warn(
88 ui.warn(
89 _(
89 _(
90 b'warning: %(action)s profile cannot use'
90 b'warning: %(action)s profile cannot use'
91 b' paths starting with /, ignoring %(line)s\n'
91 b' paths starting with /, ignoring %(line)s\n'
92 )
92 )
93 % {b'action': action, b'line': line}
93 % {b'action': action, b'line': line}
94 )
94 )
95 continue
95 continue
96 current.add(line)
96 current.add(line)
97
97
98 return includes, excludes, profiles
98 return includes, excludes, profiles
99
99
100
100
101 # Exists as separate function to facilitate monkeypatching.
101 # Exists as separate function to facilitate monkeypatching.
102 def readprofile(repo, profile, changeid):
102 def readprofile(repo, profile, changeid):
103 """Resolve the raw content of a sparse profile file."""
103 """Resolve the raw content of a sparse profile file."""
104 # TODO add some kind of cache here because this incurs a manifest
104 # TODO add some kind of cache here because this incurs a manifest
105 # resolve and can be slow.
105 # resolve and can be slow.
106 return repo.filectx(profile, changeid=changeid).data()
106 return repo.filectx(profile, changeid=changeid).data()
107
107
108
108
109 def patternsforrev(repo, rev):
109 def patternsforrev(repo, rev):
110 """Obtain sparse checkout patterns for the given rev.
110 """Obtain sparse checkout patterns for the given rev.
111
111
112 Returns a tuple of iterables representing includes, excludes, and
112 Returns a tuple of iterables representing includes, excludes, and
113 patterns.
113 patterns.
114 """
114 """
115 # Feature isn't enabled. No-op.
115 # Feature isn't enabled. No-op.
116 if not enabled:
116 if not enabled:
117 return set(), set(), set()
117 return set(), set(), set()
118
118
119 raw = repo.vfs.tryread(b'sparse')
119 raw = repo.vfs.tryread(b'sparse')
120 if not raw:
120 if not raw:
121 return set(), set(), set()
121 return set(), set(), set()
122
122
123 if rev is None:
123 if rev is None:
124 raise error.Abort(
124 raise error.Abort(
125 _(b'cannot parse sparse patterns from working directory')
125 _(b'cannot parse sparse patterns from working directory')
126 )
126 )
127
127
128 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
128 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
129 ctx = repo[rev]
129 ctx = repo[rev]
130
130
131 if profiles:
131 if profiles:
132 visited = set()
132 visited = set()
133 while profiles:
133 while profiles:
134 profile = profiles.pop()
134 profile = profiles.pop()
135 if profile in visited:
135 if profile in visited:
136 continue
136 continue
137
137
138 visited.add(profile)
138 visited.add(profile)
139
139
140 try:
140 try:
141 raw = readprofile(repo, profile, rev)
141 raw = readprofile(repo, profile, rev)
142 except error.ManifestLookupError:
142 except error.ManifestLookupError:
143 msg = (
143 msg = (
144 b"warning: sparse profile '%s' not found "
144 b"warning: sparse profile '%s' not found "
145 b"in rev %s - ignoring it\n" % (profile, ctx)
145 b"in rev %s - ignoring it\n" % (profile, ctx)
146 )
146 )
147 # experimental config: sparse.missingwarning
147 # experimental config: sparse.missingwarning
148 if repo.ui.configbool(b'sparse', b'missingwarning'):
148 if repo.ui.configbool(b'sparse', b'missingwarning'):
149 repo.ui.warn(msg)
149 repo.ui.warn(msg)
150 else:
150 else:
151 repo.ui.debug(msg)
151 repo.ui.debug(msg)
152 continue
152 continue
153
153
154 pincludes, pexcludes, subprofs = parseconfig(
154 pincludes, pexcludes, subprofs = parseconfig(
155 repo.ui, raw, b'sparse'
155 repo.ui, raw, b'sparse'
156 )
156 )
157 includes.update(pincludes)
157 includes.update(pincludes)
158 excludes.update(pexcludes)
158 excludes.update(pexcludes)
159 profiles.update(subprofs)
159 profiles.update(subprofs)
160
160
161 profiles = visited
161 profiles = visited
162
162
163 if includes:
163 if includes:
164 includes.add(b'.hg*')
164 includes.add(b'.hg*')
165
165
166 return includes, excludes, profiles
166 return includes, excludes, profiles
167
167
168
168
169 def activeconfig(repo):
169 def activeconfig(repo):
170 """Determine the active sparse config rules.
170 """Determine the active sparse config rules.
171
171
172 Rules are constructed by reading the current sparse config and bringing in
172 Rules are constructed by reading the current sparse config and bringing in
173 referenced profiles from parents of the working directory.
173 referenced profiles from parents of the working directory.
174 """
174 """
175 revs = [
175 revs = [
176 repo.changelog.rev(node)
176 repo.changelog.rev(node)
177 for node in repo.dirstate.parents()
177 for node in repo.dirstate.parents()
178 if node != nullid
178 if node != nullid
179 ]
179 ]
180
180
181 allincludes = set()
181 allincludes = set()
182 allexcludes = set()
182 allexcludes = set()
183 allprofiles = set()
183 allprofiles = set()
184
184
185 for rev in revs:
185 for rev in revs:
186 includes, excludes, profiles = patternsforrev(repo, rev)
186 includes, excludes, profiles = patternsforrev(repo, rev)
187 allincludes |= includes
187 allincludes |= includes
188 allexcludes |= excludes
188 allexcludes |= excludes
189 allprofiles |= profiles
189 allprofiles |= profiles
190
190
191 return allincludes, allexcludes, allprofiles
191 return allincludes, allexcludes, allprofiles
192
192
193
193
194 def configsignature(repo, includetemp=True):
194 def configsignature(repo, includetemp=True):
195 """Obtain the signature string for the current sparse configuration.
195 """Obtain the signature string for the current sparse configuration.
196
196
197 This is used to construct a cache key for matchers.
197 This is used to construct a cache key for matchers.
198 """
198 """
199 cache = repo._sparsesignaturecache
199 cache = repo._sparsesignaturecache
200
200
201 signature = cache.get(b'signature')
201 signature = cache.get(b'signature')
202
202
203 if includetemp:
203 if includetemp:
204 tempsignature = cache.get(b'tempsignature')
204 tempsignature = cache.get(b'tempsignature')
205 else:
205 else:
206 tempsignature = b'0'
206 tempsignature = b'0'
207
207
208 if signature is None or (includetemp and tempsignature is None):
208 if signature is None or (includetemp and tempsignature is None):
209 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
209 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
210 cache[b'signature'] = signature
210 cache[b'signature'] = signature
211
211
212 if includetemp:
212 if includetemp:
213 raw = repo.vfs.tryread(b'tempsparse')
213 raw = repo.vfs.tryread(b'tempsparse')
214 tempsignature = hex(hashutil.sha1(raw).digest())
214 tempsignature = hex(hashutil.sha1(raw).digest())
215 cache[b'tempsignature'] = tempsignature
215 cache[b'tempsignature'] = tempsignature
216
216
217 return b'%s %s' % (signature, tempsignature)
217 return b'%s %s' % (signature, tempsignature)
218
218
219
219
220 def writeconfig(repo, includes, excludes, profiles):
220 def writeconfig(repo, includes, excludes, profiles):
221 """Write the sparse config file given a sparse configuration."""
221 """Write the sparse config file given a sparse configuration."""
222 with repo.vfs(b'sparse', b'wb') as fh:
222 with repo.vfs(b'sparse', b'wb') as fh:
223 for p in sorted(profiles):
223 for p in sorted(profiles):
224 fh.write(b'%%include %s\n' % p)
224 fh.write(b'%%include %s\n' % p)
225
225
226 if includes:
226 if includes:
227 fh.write(b'[include]\n')
227 fh.write(b'[include]\n')
228 for i in sorted(includes):
228 for i in sorted(includes):
229 fh.write(i)
229 fh.write(i)
230 fh.write(b'\n')
230 fh.write(b'\n')
231
231
232 if excludes:
232 if excludes:
233 fh.write(b'[exclude]\n')
233 fh.write(b'[exclude]\n')
234 for e in sorted(excludes):
234 for e in sorted(excludes):
235 fh.write(e)
235 fh.write(e)
236 fh.write(b'\n')
236 fh.write(b'\n')
237
237
238 repo._sparsesignaturecache.clear()
238 repo._sparsesignaturecache.clear()
239
239
240
240
241 def readtemporaryincludes(repo):
241 def readtemporaryincludes(repo):
242 raw = repo.vfs.tryread(b'tempsparse')
242 raw = repo.vfs.tryread(b'tempsparse')
243 if not raw:
243 if not raw:
244 return set()
244 return set()
245
245
246 return set(raw.split(b'\n'))
246 return set(raw.split(b'\n'))
247
247
248
248
249 def writetemporaryincludes(repo, includes):
249 def writetemporaryincludes(repo, includes):
250 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
250 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
251 repo._sparsesignaturecache.clear()
251 repo._sparsesignaturecache.clear()
252
252
253
253
254 def addtemporaryincludes(repo, additional):
254 def addtemporaryincludes(repo, additional):
255 includes = readtemporaryincludes(repo)
255 includes = readtemporaryincludes(repo)
256 for i in additional:
256 for i in additional:
257 includes.add(i)
257 includes.add(i)
258 writetemporaryincludes(repo, includes)
258 writetemporaryincludes(repo, includes)
259
259
260
260
261 def prunetemporaryincludes(repo):
261 def prunetemporaryincludes(repo):
262 if not enabled or not repo.vfs.exists(b'tempsparse'):
262 if not enabled or not repo.vfs.exists(b'tempsparse'):
263 return
263 return
264
264
265 s = repo.status()
265 s = repo.status()
266 if s.modified or s.added or s.removed or s.deleted:
266 if s.modified or s.added or s.removed or s.deleted:
267 # Still have pending changes. Don't bother trying to prune.
267 # Still have pending changes. Don't bother trying to prune.
268 return
268 return
269
269
270 sparsematch = matcher(repo, includetemp=False)
270 sparsematch = matcher(repo, includetemp=False)
271 dirstate = repo.dirstate
271 dirstate = repo.dirstate
272 mresult = mergemod.mergeresult()
272 mresult = mergemod.mergeresult()
273 dropped = []
273 dropped = []
274 tempincludes = readtemporaryincludes(repo)
274 tempincludes = readtemporaryincludes(repo)
275 for file in tempincludes:
275 for file in tempincludes:
276 if file in dirstate and not sparsematch(file):
276 if file in dirstate and not sparsematch(file):
277 message = _(b'dropping temporarily included sparse files')
277 message = _(b'dropping temporarily included sparse files')
278 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
278 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
279 dropped.append(file)
279 dropped.append(file)
280
280
281 mergemod.applyupdates(
281 mergemod.applyupdates(
282 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
282 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
283 )
283 )
284
284
285 # Fix dirstate
285 # Fix dirstate
286 for file in dropped:
286 for file in dropped:
287 dirstate.drop(file)
287 dirstate.drop(file)
288
288
289 repo.vfs.unlink(b'tempsparse')
289 repo.vfs.unlink(b'tempsparse')
290 repo._sparsesignaturecache.clear()
290 repo._sparsesignaturecache.clear()
291 msg = _(
291 msg = _(
292 b'cleaned up %d temporarily added file(s) from the '
292 b'cleaned up %d temporarily added file(s) from the '
293 b'sparse checkout\n'
293 b'sparse checkout\n'
294 )
294 )
295 repo.ui.status(msg % len(tempincludes))
295 repo.ui.status(msg % len(tempincludes))
296
296
297
297
298 def forceincludematcher(matcher, includes):
298 def forceincludematcher(matcher, includes):
299 """Returns a matcher that returns true for any of the forced includes
299 """Returns a matcher that returns true for any of the forced includes
300 before testing against the actual matcher."""
300 before testing against the actual matcher."""
301 kindpats = [(b'path', include, b'') for include in includes]
301 kindpats = [(b'path', include, b'') for include in includes]
302 includematcher = matchmod.includematcher(b'', kindpats)
302 includematcher = matchmod.includematcher(b'', kindpats)
303 return matchmod.unionmatcher([includematcher, matcher])
303 return matchmod.unionmatcher([includematcher, matcher])
304
304
305
305
306 def matcher(repo, revs=None, includetemp=True):
306 def matcher(repo, revs=None, includetemp=True):
307 """Obtain a matcher for sparse working directories for the given revs.
307 """Obtain a matcher for sparse working directories for the given revs.
308
308
309 If multiple revisions are specified, the matcher is the union of all
309 If multiple revisions are specified, the matcher is the union of all
310 revs.
310 revs.
311
311
312 ``includetemp`` indicates whether to use the temporary sparse profile.
312 ``includetemp`` indicates whether to use the temporary sparse profile.
313 """
313 """
314 # If sparse isn't enabled, sparse matcher matches everything.
314 # If sparse isn't enabled, sparse matcher matches everything.
315 if not enabled:
315 if not enabled:
316 return matchmod.always()
316 return matchmod.always()
317
317
318 if not revs or revs == [None]:
318 if not revs or revs == [None]:
319 revs = [
319 revs = [
320 repo.changelog.rev(node)
320 repo.changelog.rev(node)
321 for node in repo.dirstate.parents()
321 for node in repo.dirstate.parents()
322 if node != nullid
322 if node != nullid
323 ]
323 ]
324
324
325 signature = configsignature(repo, includetemp=includetemp)
325 signature = configsignature(repo, includetemp=includetemp)
326
326
327 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
327 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
328
328
329 result = repo._sparsematchercache.get(key)
329 result = repo._sparsematchercache.get(key)
330 if result:
330 if result:
331 return result
331 return result
332
332
333 matchers = []
333 matchers = []
334 for rev in revs:
334 for rev in revs:
335 try:
335 try:
336 includes, excludes, profiles = patternsforrev(repo, rev)
336 includes, excludes, profiles = patternsforrev(repo, rev)
337
337
338 if includes or excludes:
338 if includes or excludes:
339 matcher = matchmod.match(
339 matcher = matchmod.match(
340 repo.root,
340 repo.root,
341 b'',
341 b'',
342 [],
342 [],
343 include=includes,
343 include=includes,
344 exclude=excludes,
344 exclude=excludes,
345 default=b'relpath',
345 default=b'relpath',
346 )
346 )
347 matchers.append(matcher)
347 matchers.append(matcher)
348 except IOError:
348 except IOError:
349 pass
349 pass
350
350
351 if not matchers:
351 if not matchers:
352 result = matchmod.always()
352 result = matchmod.always()
353 elif len(matchers) == 1:
353 elif len(matchers) == 1:
354 result = matchers[0]
354 result = matchers[0]
355 else:
355 else:
356 result = matchmod.unionmatcher(matchers)
356 result = matchmod.unionmatcher(matchers)
357
357
358 if includetemp:
358 if includetemp:
359 tempincludes = readtemporaryincludes(repo)
359 tempincludes = readtemporaryincludes(repo)
360 result = forceincludematcher(result, tempincludes)
360 result = forceincludematcher(result, tempincludes)
361
361
362 repo._sparsematchercache[key] = result
362 repo._sparsematchercache[key] = result
363
363
364 return result
364 return result
365
365
366
366
367 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
367 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
368 """Filter updates to only lay out files that match the sparse rules."""
368 """Filter updates to only lay out files that match the sparse rules."""
369 if not enabled:
369 if not enabled:
370 return
370 return
371
371
372 oldrevs = [pctx.rev() for pctx in wctx.parents()]
372 oldrevs = [pctx.rev() for pctx in wctx.parents()]
373 oldsparsematch = matcher(repo, oldrevs)
373 oldsparsematch = matcher(repo, oldrevs)
374
374
375 if oldsparsematch.always():
375 if oldsparsematch.always():
376 return
376 return
377
377
378 files = set()
378 files = set()
379 prunedactions = {}
379 prunedactions = {}
380
380
381 if branchmerge:
381 if branchmerge:
382 # If we're merging, use the wctx filter, since we're merging into
382 # If we're merging, use the wctx filter, since we're merging into
383 # the wctx.
383 # the wctx.
384 sparsematch = matcher(repo, [wctx.p1().rev()])
384 sparsematch = matcher(repo, [wctx.p1().rev()])
385 else:
385 else:
386 # If we're updating, use the target context's filter, since we're
386 # If we're updating, use the target context's filter, since we're
387 # moving to the target context.
387 # moving to the target context.
388 sparsematch = matcher(repo, [mctx.rev()])
388 sparsematch = matcher(repo, [mctx.rev()])
389
389
390 temporaryfiles = []
390 temporaryfiles = []
391 for file, action in pycompat.iteritems(mresult.actions):
391 for file, action in mresult.filemap():
392 type, args, msg = action
392 type, args, msg = action
393 files.add(file)
393 files.add(file)
394 if sparsematch(file):
394 if sparsematch(file):
395 prunedactions[file] = action
395 prunedactions[file] = action
396 elif type == mergestatemod.ACTION_MERGE:
396 elif type == mergestatemod.ACTION_MERGE:
397 temporaryfiles.append(file)
397 temporaryfiles.append(file)
398 prunedactions[file] = action
398 prunedactions[file] = action
399 elif branchmerge:
399 elif branchmerge:
400 if type != mergestatemod.ACTION_KEEP:
400 if type != mergestatemod.ACTION_KEEP:
401 temporaryfiles.append(file)
401 temporaryfiles.append(file)
402 prunedactions[file] = action
402 prunedactions[file] = action
403 elif type == mergestatemod.ACTION_FORGET:
403 elif type == mergestatemod.ACTION_FORGET:
404 prunedactions[file] = action
404 prunedactions[file] = action
405 elif file in wctx:
405 elif file in wctx:
406 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
406 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
407
407
408 # in case or rename on one side, it is possible that f1 might not
408 # in case or rename on one side, it is possible that f1 might not
409 # be present in sparse checkout we should include it
409 # be present in sparse checkout we should include it
410 # TODO: should we do the same for f2?
410 # TODO: should we do the same for f2?
411 # exists as a separate check because file can be in sparse and hence
411 # exists as a separate check because file can be in sparse and hence
412 # if we try to club this condition in above `elif type == ACTION_MERGE`
412 # if we try to club this condition in above `elif type == ACTION_MERGE`
413 # it won't be triggered
413 # it won't be triggered
414 if branchmerge and type == mergestatemod.ACTION_MERGE:
414 if branchmerge and type == mergestatemod.ACTION_MERGE:
415 f1, f2, fa, move, anc = args
415 f1, f2, fa, move, anc = args
416 if not sparsematch(f1):
416 if not sparsematch(f1):
417 temporaryfiles.append(f1)
417 temporaryfiles.append(f1)
418
418
419 if len(temporaryfiles) > 0:
419 if len(temporaryfiles) > 0:
420 repo.ui.status(
420 repo.ui.status(
421 _(
421 _(
422 b'temporarily included %d file(s) in the sparse '
422 b'temporarily included %d file(s) in the sparse '
423 b'checkout for merging\n'
423 b'checkout for merging\n'
424 )
424 )
425 % len(temporaryfiles)
425 % len(temporaryfiles)
426 )
426 )
427 addtemporaryincludes(repo, temporaryfiles)
427 addtemporaryincludes(repo, temporaryfiles)
428
428
429 # Add the new files to the working copy so they can be merged, etc
429 # Add the new files to the working copy so they can be merged, etc
430 tmresult = mergemod.mergeresult()
430 tmresult = mergemod.mergeresult()
431 message = b'temporarily adding to sparse checkout'
431 message = b'temporarily adding to sparse checkout'
432 wctxmanifest = repo[None].manifest()
432 wctxmanifest = repo[None].manifest()
433 for file in temporaryfiles:
433 for file in temporaryfiles:
434 if file in wctxmanifest:
434 if file in wctxmanifest:
435 fctx = repo[None][file]
435 fctx = repo[None][file]
436 tmresult.addfile(
436 tmresult.addfile(
437 file,
437 file,
438 mergestatemod.ACTION_GET,
438 mergestatemod.ACTION_GET,
439 (fctx.flags(), False),
439 (fctx.flags(), False),
440 message,
440 message,
441 )
441 )
442
442
443 mergemod.applyupdates(
443 mergemod.applyupdates(
444 repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
444 repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
445 )
445 )
446
446
447 dirstate = repo.dirstate
447 dirstate = repo.dirstate
448 for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
448 for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
449 dirstate.normal(file)
449 dirstate.normal(file)
450
450
451 profiles = activeconfig(repo)[2]
451 profiles = activeconfig(repo)[2]
452 changedprofiles = profiles & files
452 changedprofiles = profiles & files
453 # If an active profile changed during the update, refresh the checkout.
453 # If an active profile changed during the update, refresh the checkout.
454 # Don't do this during a branch merge, since all incoming changes should
454 # Don't do this during a branch merge, since all incoming changes should
455 # have been handled by the temporary includes above.
455 # have been handled by the temporary includes above.
456 if changedprofiles and not branchmerge:
456 if changedprofiles and not branchmerge:
457 mf = mctx.manifest()
457 mf = mctx.manifest()
458 for file in mf:
458 for file in mf:
459 old = oldsparsematch(file)
459 old = oldsparsematch(file)
460 new = sparsematch(file)
460 new = sparsematch(file)
461 if not old and new:
461 if not old and new:
462 flags = mf.flags(file)
462 flags = mf.flags(file)
463 prunedactions[file] = (
463 prunedactions[file] = (
464 mergestatemod.ACTION_GET,
464 mergestatemod.ACTION_GET,
465 (flags, False),
465 (flags, False),
466 b'',
466 b'',
467 )
467 )
468 elif old and not new:
468 elif old and not new:
469 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
469 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
470
470
471 mresult.setactions(prunedactions)
471 mresult.setactions(prunedactions)
472
472
473
473
474 def refreshwdir(repo, origstatus, origsparsematch, force=False):
474 def refreshwdir(repo, origstatus, origsparsematch, force=False):
475 """Refreshes working directory by taking sparse config into account.
475 """Refreshes working directory by taking sparse config into account.
476
476
477 The old status and sparse matcher is compared against the current sparse
477 The old status and sparse matcher is compared against the current sparse
478 matcher.
478 matcher.
479
479
480 Will abort if a file with pending changes is being excluded or included
480 Will abort if a file with pending changes is being excluded or included
481 unless ``force`` is True.
481 unless ``force`` is True.
482 """
482 """
483 # Verify there are no pending changes
483 # Verify there are no pending changes
484 pending = set()
484 pending = set()
485 pending.update(origstatus.modified)
485 pending.update(origstatus.modified)
486 pending.update(origstatus.added)
486 pending.update(origstatus.added)
487 pending.update(origstatus.removed)
487 pending.update(origstatus.removed)
488 sparsematch = matcher(repo)
488 sparsematch = matcher(repo)
489 abort = False
489 abort = False
490
490
491 for f in pending:
491 for f in pending:
492 if not sparsematch(f):
492 if not sparsematch(f):
493 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
493 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
494 abort = not force
494 abort = not force
495
495
496 if abort:
496 if abort:
497 raise error.Abort(
497 raise error.Abort(
498 _(b'could not update sparseness due to pending changes')
498 _(b'could not update sparseness due to pending changes')
499 )
499 )
500
500
501 # Calculate merge result
501 # Calculate merge result
502 dirstate = repo.dirstate
502 dirstate = repo.dirstate
503 ctx = repo[b'.']
503 ctx = repo[b'.']
504 added = []
504 added = []
505 lookup = []
505 lookup = []
506 dropped = []
506 dropped = []
507 mf = ctx.manifest()
507 mf = ctx.manifest()
508 files = set(mf)
508 files = set(mf)
509 mresult = mergemod.mergeresult()
509 mresult = mergemod.mergeresult()
510
510
511 for file in files:
511 for file in files:
512 old = origsparsematch(file)
512 old = origsparsematch(file)
513 new = sparsematch(file)
513 new = sparsematch(file)
514 # Add files that are newly included, or that don't exist in
514 # Add files that are newly included, or that don't exist in
515 # the dirstate yet.
515 # the dirstate yet.
516 if (new and not old) or (old and new and not file in dirstate):
516 if (new and not old) or (old and new and not file in dirstate):
517 fl = mf.flags(file)
517 fl = mf.flags(file)
518 if repo.wvfs.exists(file):
518 if repo.wvfs.exists(file):
519 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
519 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
520 lookup.append(file)
520 lookup.append(file)
521 else:
521 else:
522 mresult.addfile(
522 mresult.addfile(
523 file, mergestatemod.ACTION_GET, (fl, False), b''
523 file, mergestatemod.ACTION_GET, (fl, False), b''
524 )
524 )
525 added.append(file)
525 added.append(file)
526 # Drop files that are newly excluded, or that still exist in
526 # Drop files that are newly excluded, or that still exist in
527 # the dirstate.
527 # the dirstate.
528 elif (old and not new) or (not old and not new and file in dirstate):
528 elif (old and not new) or (not old and not new and file in dirstate):
529 dropped.append(file)
529 dropped.append(file)
530 if file not in pending:
530 if file not in pending:
531 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
531 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
532
532
533 # Verify there are no pending changes in newly included files
533 # Verify there are no pending changes in newly included files
534 abort = False
534 abort = False
535 for file in lookup:
535 for file in lookup:
536 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
536 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
537 abort = not force
537 abort = not force
538 if abort:
538 if abort:
539 raise error.Abort(
539 raise error.Abort(
540 _(
540 _(
541 b'cannot change sparseness due to pending '
541 b'cannot change sparseness due to pending '
542 b'changes (delete the files or use '
542 b'changes (delete the files or use '
543 b'--force to bring them back dirty)'
543 b'--force to bring them back dirty)'
544 )
544 )
545 )
545 )
546
546
547 # Check for files that were only in the dirstate.
547 # Check for files that were only in the dirstate.
548 for file, state in pycompat.iteritems(dirstate):
548 for file, state in pycompat.iteritems(dirstate):
549 if not file in files:
549 if not file in files:
550 old = origsparsematch(file)
550 old = origsparsematch(file)
551 new = sparsematch(file)
551 new = sparsematch(file)
552 if old and not new:
552 if old and not new:
553 dropped.append(file)
553 dropped.append(file)
554
554
555 mergemod.applyupdates(
555 mergemod.applyupdates(
556 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
556 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
557 )
557 )
558
558
559 # Fix dirstate
559 # Fix dirstate
560 for file in added:
560 for file in added:
561 dirstate.normal(file)
561 dirstate.normal(file)
562
562
563 for file in dropped:
563 for file in dropped:
564 dirstate.drop(file)
564 dirstate.drop(file)
565
565
566 for file in lookup:
566 for file in lookup:
567 # File exists on disk, and we're bringing it back in an unknown state.
567 # File exists on disk, and we're bringing it back in an unknown state.
568 dirstate.normallookup(file)
568 dirstate.normallookup(file)
569
569
570 return added, dropped, lookup
570 return added, dropped, lookup
571
571
572
572
573 def aftercommit(repo, node):
573 def aftercommit(repo, node):
574 """Perform actions after a working directory commit."""
574 """Perform actions after a working directory commit."""
575 # This function is called unconditionally, even if sparse isn't
575 # This function is called unconditionally, even if sparse isn't
576 # enabled.
576 # enabled.
577 ctx = repo[node]
577 ctx = repo[node]
578
578
579 profiles = patternsforrev(repo, ctx.rev())[2]
579 profiles = patternsforrev(repo, ctx.rev())[2]
580
580
581 # profiles will only have data if sparse is enabled.
581 # profiles will only have data if sparse is enabled.
582 if profiles & set(ctx.files()):
582 if profiles & set(ctx.files()):
583 origstatus = repo.status()
583 origstatus = repo.status()
584 origsparsematch = matcher(repo)
584 origsparsematch = matcher(repo)
585 refreshwdir(repo, origstatus, origsparsematch, force=True)
585 refreshwdir(repo, origstatus, origsparsematch, force=True)
586
586
587 prunetemporaryincludes(repo)
587 prunetemporaryincludes(repo)
588
588
589
589
590 def _updateconfigandrefreshwdir(
590 def _updateconfigandrefreshwdir(
591 repo, includes, excludes, profiles, force=False, removing=False
591 repo, includes, excludes, profiles, force=False, removing=False
592 ):
592 ):
593 """Update the sparse config and working directory state."""
593 """Update the sparse config and working directory state."""
594 raw = repo.vfs.tryread(b'sparse')
594 raw = repo.vfs.tryread(b'sparse')
595 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
595 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
596
596
597 oldstatus = repo.status()
597 oldstatus = repo.status()
598 oldmatch = matcher(repo)
598 oldmatch = matcher(repo)
599 oldrequires = set(repo.requirements)
599 oldrequires = set(repo.requirements)
600
600
601 # TODO remove this try..except once the matcher integrates better
601 # TODO remove this try..except once the matcher integrates better
602 # with dirstate. We currently have to write the updated config
602 # with dirstate. We currently have to write the updated config
603 # because that will invalidate the matcher cache and force a
603 # because that will invalidate the matcher cache and force a
604 # re-read. We ideally want to update the cached matcher on the
604 # re-read. We ideally want to update the cached matcher on the
605 # repo instance then flush the new config to disk once wdir is
605 # repo instance then flush the new config to disk once wdir is
606 # updated. But this requires massive rework to matcher() and its
606 # updated. But this requires massive rework to matcher() and its
607 # consumers.
607 # consumers.
608
608
609 if b'exp-sparse' in oldrequires and removing:
609 if b'exp-sparse' in oldrequires and removing:
610 repo.requirements.discard(b'exp-sparse')
610 repo.requirements.discard(b'exp-sparse')
611 scmutil.writereporequirements(repo)
611 scmutil.writereporequirements(repo)
612 elif b'exp-sparse' not in oldrequires:
612 elif b'exp-sparse' not in oldrequires:
613 repo.requirements.add(b'exp-sparse')
613 repo.requirements.add(b'exp-sparse')
614 scmutil.writereporequirements(repo)
614 scmutil.writereporequirements(repo)
615
615
616 try:
616 try:
617 writeconfig(repo, includes, excludes, profiles)
617 writeconfig(repo, includes, excludes, profiles)
618 return refreshwdir(repo, oldstatus, oldmatch, force=force)
618 return refreshwdir(repo, oldstatus, oldmatch, force=force)
619 except Exception:
619 except Exception:
620 if repo.requirements != oldrequires:
620 if repo.requirements != oldrequires:
621 repo.requirements.clear()
621 repo.requirements.clear()
622 repo.requirements |= oldrequires
622 repo.requirements |= oldrequires
623 scmutil.writereporequirements(repo)
623 scmutil.writereporequirements(repo)
624 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
624 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
625 raise
625 raise
626
626
627
627
628 def clearrules(repo, force=False):
628 def clearrules(repo, force=False):
629 """Clears include/exclude rules from the sparse config.
629 """Clears include/exclude rules from the sparse config.
630
630
631 The remaining sparse config only has profiles, if defined. The working
631 The remaining sparse config only has profiles, if defined. The working
632 directory is refreshed, as needed.
632 directory is refreshed, as needed.
633 """
633 """
634 with repo.wlock():
634 with repo.wlock():
635 raw = repo.vfs.tryread(b'sparse')
635 raw = repo.vfs.tryread(b'sparse')
636 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
636 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
637
637
638 if not includes and not excludes:
638 if not includes and not excludes:
639 return
639 return
640
640
641 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
641 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
642
642
643
643
644 def importfromfiles(repo, opts, paths, force=False):
644 def importfromfiles(repo, opts, paths, force=False):
645 """Import sparse config rules from files.
645 """Import sparse config rules from files.
646
646
647 The updated sparse config is written out and the working directory
647 The updated sparse config is written out and the working directory
648 is refreshed, as needed.
648 is refreshed, as needed.
649 """
649 """
650 with repo.wlock():
650 with repo.wlock():
651 # read current configuration
651 # read current configuration
652 raw = repo.vfs.tryread(b'sparse')
652 raw = repo.vfs.tryread(b'sparse')
653 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
653 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
654 aincludes, aexcludes, aprofiles = activeconfig(repo)
654 aincludes, aexcludes, aprofiles = activeconfig(repo)
655
655
656 # Import rules on top; only take in rules that are not yet
656 # Import rules on top; only take in rules that are not yet
657 # part of the active rules.
657 # part of the active rules.
658 changed = False
658 changed = False
659 for p in paths:
659 for p in paths:
660 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
660 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
661 raw = fh.read()
661 raw = fh.read()
662
662
663 iincludes, iexcludes, iprofiles = parseconfig(
663 iincludes, iexcludes, iprofiles = parseconfig(
664 repo.ui, raw, b'sparse'
664 repo.ui, raw, b'sparse'
665 )
665 )
666 oldsize = len(includes) + len(excludes) + len(profiles)
666 oldsize = len(includes) + len(excludes) + len(profiles)
667 includes.update(iincludes - aincludes)
667 includes.update(iincludes - aincludes)
668 excludes.update(iexcludes - aexcludes)
668 excludes.update(iexcludes - aexcludes)
669 profiles.update(iprofiles - aprofiles)
669 profiles.update(iprofiles - aprofiles)
670 if len(includes) + len(excludes) + len(profiles) > oldsize:
670 if len(includes) + len(excludes) + len(profiles) > oldsize:
671 changed = True
671 changed = True
672
672
673 profilecount = includecount = excludecount = 0
673 profilecount = includecount = excludecount = 0
674 fcounts = (0, 0, 0)
674 fcounts = (0, 0, 0)
675
675
676 if changed:
676 if changed:
677 profilecount = len(profiles - aprofiles)
677 profilecount = len(profiles - aprofiles)
678 includecount = len(includes - aincludes)
678 includecount = len(includes - aincludes)
679 excludecount = len(excludes - aexcludes)
679 excludecount = len(excludes - aexcludes)
680
680
681 fcounts = map(
681 fcounts = map(
682 len,
682 len,
683 _updateconfigandrefreshwdir(
683 _updateconfigandrefreshwdir(
684 repo, includes, excludes, profiles, force=force
684 repo, includes, excludes, profiles, force=force
685 ),
685 ),
686 )
686 )
687
687
688 printchanges(
688 printchanges(
689 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
689 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
690 )
690 )
691
691
692
692
693 def updateconfig(
693 def updateconfig(
694 repo,
694 repo,
695 pats,
695 pats,
696 opts,
696 opts,
697 include=False,
697 include=False,
698 exclude=False,
698 exclude=False,
699 reset=False,
699 reset=False,
700 delete=False,
700 delete=False,
701 enableprofile=False,
701 enableprofile=False,
702 disableprofile=False,
702 disableprofile=False,
703 force=False,
703 force=False,
704 usereporootpaths=False,
704 usereporootpaths=False,
705 ):
705 ):
706 """Perform a sparse config update.
706 """Perform a sparse config update.
707
707
708 Only one of the actions may be performed.
708 Only one of the actions may be performed.
709
709
710 The new config is written out and a working directory refresh is performed.
710 The new config is written out and a working directory refresh is performed.
711 """
711 """
712 with repo.wlock():
712 with repo.wlock():
713 raw = repo.vfs.tryread(b'sparse')
713 raw = repo.vfs.tryread(b'sparse')
714 oldinclude, oldexclude, oldprofiles = parseconfig(
714 oldinclude, oldexclude, oldprofiles = parseconfig(
715 repo.ui, raw, b'sparse'
715 repo.ui, raw, b'sparse'
716 )
716 )
717
717
718 if reset:
718 if reset:
719 newinclude = set()
719 newinclude = set()
720 newexclude = set()
720 newexclude = set()
721 newprofiles = set()
721 newprofiles = set()
722 else:
722 else:
723 newinclude = set(oldinclude)
723 newinclude = set(oldinclude)
724 newexclude = set(oldexclude)
724 newexclude = set(oldexclude)
725 newprofiles = set(oldprofiles)
725 newprofiles = set(oldprofiles)
726
726
727 if any(os.path.isabs(pat) for pat in pats):
727 if any(os.path.isabs(pat) for pat in pats):
728 raise error.Abort(_(b'paths cannot be absolute'))
728 raise error.Abort(_(b'paths cannot be absolute'))
729
729
730 if not usereporootpaths:
730 if not usereporootpaths:
731 # let's treat paths as relative to cwd
731 # let's treat paths as relative to cwd
732 root, cwd = repo.root, repo.getcwd()
732 root, cwd = repo.root, repo.getcwd()
733 abspats = []
733 abspats = []
734 for kindpat in pats:
734 for kindpat in pats:
735 kind, pat = matchmod._patsplit(kindpat, None)
735 kind, pat = matchmod._patsplit(kindpat, None)
736 if kind in matchmod.cwdrelativepatternkinds or kind is None:
736 if kind in matchmod.cwdrelativepatternkinds or kind is None:
737 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
737 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
738 root, cwd, pat
738 root, cwd, pat
739 )
739 )
740 abspats.append(ap)
740 abspats.append(ap)
741 else:
741 else:
742 abspats.append(kindpat)
742 abspats.append(kindpat)
743 pats = abspats
743 pats = abspats
744
744
745 if include:
745 if include:
746 newinclude.update(pats)
746 newinclude.update(pats)
747 elif exclude:
747 elif exclude:
748 newexclude.update(pats)
748 newexclude.update(pats)
749 elif enableprofile:
749 elif enableprofile:
750 newprofiles.update(pats)
750 newprofiles.update(pats)
751 elif disableprofile:
751 elif disableprofile:
752 newprofiles.difference_update(pats)
752 newprofiles.difference_update(pats)
753 elif delete:
753 elif delete:
754 newinclude.difference_update(pats)
754 newinclude.difference_update(pats)
755 newexclude.difference_update(pats)
755 newexclude.difference_update(pats)
756
756
757 profilecount = len(newprofiles - oldprofiles) - len(
757 profilecount = len(newprofiles - oldprofiles) - len(
758 oldprofiles - newprofiles
758 oldprofiles - newprofiles
759 )
759 )
760 includecount = len(newinclude - oldinclude) - len(
760 includecount = len(newinclude - oldinclude) - len(
761 oldinclude - newinclude
761 oldinclude - newinclude
762 )
762 )
763 excludecount = len(newexclude - oldexclude) - len(
763 excludecount = len(newexclude - oldexclude) - len(
764 oldexclude - newexclude
764 oldexclude - newexclude
765 )
765 )
766
766
767 fcounts = map(
767 fcounts = map(
768 len,
768 len,
769 _updateconfigandrefreshwdir(
769 _updateconfigandrefreshwdir(
770 repo,
770 repo,
771 newinclude,
771 newinclude,
772 newexclude,
772 newexclude,
773 newprofiles,
773 newprofiles,
774 force=force,
774 force=force,
775 removing=reset,
775 removing=reset,
776 ),
776 ),
777 )
777 )
778
778
779 printchanges(
779 printchanges(
780 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
780 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
781 )
781 )
782
782
783
783
784 def printchanges(
784 def printchanges(
785 ui,
785 ui,
786 opts,
786 opts,
787 profilecount=0,
787 profilecount=0,
788 includecount=0,
788 includecount=0,
789 excludecount=0,
789 excludecount=0,
790 added=0,
790 added=0,
791 dropped=0,
791 dropped=0,
792 conflicting=0,
792 conflicting=0,
793 ):
793 ):
794 """Print output summarizing sparse config changes."""
794 """Print output summarizing sparse config changes."""
795 with ui.formatter(b'sparse', opts) as fm:
795 with ui.formatter(b'sparse', opts) as fm:
796 fm.startitem()
796 fm.startitem()
797 fm.condwrite(
797 fm.condwrite(
798 ui.verbose,
798 ui.verbose,
799 b'profiles_added',
799 b'profiles_added',
800 _(b'Profiles changed: %d\n'),
800 _(b'Profiles changed: %d\n'),
801 profilecount,
801 profilecount,
802 )
802 )
803 fm.condwrite(
803 fm.condwrite(
804 ui.verbose,
804 ui.verbose,
805 b'include_rules_added',
805 b'include_rules_added',
806 _(b'Include rules changed: %d\n'),
806 _(b'Include rules changed: %d\n'),
807 includecount,
807 includecount,
808 )
808 )
809 fm.condwrite(
809 fm.condwrite(
810 ui.verbose,
810 ui.verbose,
811 b'exclude_rules_added',
811 b'exclude_rules_added',
812 _(b'Exclude rules changed: %d\n'),
812 _(b'Exclude rules changed: %d\n'),
813 excludecount,
813 excludecount,
814 )
814 )
815
815
816 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
816 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
817 # files are added or removed outside of the templating formatter
817 # files are added or removed outside of the templating formatter
818 # framework. No point in repeating ourselves in that case.
818 # framework. No point in repeating ourselves in that case.
819 if not fm.isplain():
819 if not fm.isplain():
820 fm.condwrite(
820 fm.condwrite(
821 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
821 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
822 )
822 )
823 fm.condwrite(
823 fm.condwrite(
824 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
824 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
825 )
825 )
826 fm.condwrite(
826 fm.condwrite(
827 ui.verbose,
827 ui.verbose,
828 b'files_conflicting',
828 b'files_conflicting',
829 _(b'Files conflicting: %d\n'),
829 _(b'Files conflicting: %d\n'),
830 conflicting,
830 conflicting,
831 )
831 )
General Comments 0
You need to be logged in to leave comments. Login now