##// END OF EJS Templates
scmutil: allowing different files to be prefetched per revision...
Rodrigo Damazio Bovendorp -
r45604:2fac78d5 default draft
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,546 +1,546 b''
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, hex, nullid, short
13 from mercurial.node import bin, hex, nullid, short
14 from mercurial.pycompat import (
14 from mercurial.pycompat import (
15 getattr,
15 getattr,
16 setattr,
16 setattr,
17 )
17 )
18
18
19 from mercurial import (
19 from mercurial import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 cmdutil,
22 cmdutil,
23 context,
23 context,
24 error,
24 error,
25 exchange,
25 exchange,
26 exthelper,
26 exthelper,
27 localrepo,
27 localrepo,
28 pycompat,
28 pycompat,
29 revlog,
29 revlog,
30 scmutil,
30 scmutil,
31 upgrade,
31 upgrade,
32 util,
32 util,
33 vfs as vfsmod,
33 vfs as vfsmod,
34 wireprotov1server,
34 wireprotov1server,
35 )
35 )
36
36
37 from mercurial.interfaces import repository
37 from mercurial.interfaces import repository
38
38
39 from mercurial.utils import (
39 from mercurial.utils import (
40 storageutil,
40 storageutil,
41 stringutil,
41 stringutil,
42 )
42 )
43
43
44 from ..largefiles import lfutil
44 from ..largefiles import lfutil
45
45
46 from . import (
46 from . import (
47 blobstore,
47 blobstore,
48 pointer,
48 pointer,
49 )
49 )
50
50
51 eh = exthelper.exthelper()
51 eh = exthelper.exthelper()
52
52
53
53
54 @eh.wrapfunction(localrepo, b'makefilestorage')
54 @eh.wrapfunction(localrepo, b'makefilestorage')
55 def localrepomakefilestorage(orig, requirements, features, **kwargs):
55 def localrepomakefilestorage(orig, requirements, features, **kwargs):
56 if b'lfs' in requirements:
56 if b'lfs' in requirements:
57 features.add(repository.REPO_FEATURE_LFS)
57 features.add(repository.REPO_FEATURE_LFS)
58
58
59 return orig(requirements=requirements, features=features, **kwargs)
59 return orig(requirements=requirements, features=features, **kwargs)
60
60
61
61
62 @eh.wrapfunction(changegroup, b'allsupportedversions')
62 @eh.wrapfunction(changegroup, b'allsupportedversions')
63 def allsupportedversions(orig, ui):
63 def allsupportedversions(orig, ui):
64 versions = orig(ui)
64 versions = orig(ui)
65 versions.add(b'03')
65 versions.add(b'03')
66 return versions
66 return versions
67
67
68
68
69 @eh.wrapfunction(wireprotov1server, b'_capabilities')
69 @eh.wrapfunction(wireprotov1server, b'_capabilities')
70 def _capabilities(orig, repo, proto):
70 def _capabilities(orig, repo, proto):
71 '''Wrap server command to announce lfs server capability'''
71 '''Wrap server command to announce lfs server capability'''
72 caps = orig(repo, proto)
72 caps = orig(repo, proto)
73 if util.safehasattr(repo.svfs, b'lfslocalblobstore'):
73 if util.safehasattr(repo.svfs, b'lfslocalblobstore'):
74 # Advertise a slightly different capability when lfs is *required*, so
74 # Advertise a slightly different capability when lfs is *required*, so
75 # that the client knows it MUST load the extension. If lfs is not
75 # that the client knows it MUST load the extension. If lfs is not
76 # required on the server, there's no reason to autoload the extension
76 # required on the server, there's no reason to autoload the extension
77 # on the client.
77 # on the client.
78 if b'lfs' in repo.requirements:
78 if b'lfs' in repo.requirements:
79 caps.append(b'lfs-serve')
79 caps.append(b'lfs-serve')
80
80
81 caps.append(b'lfs')
81 caps.append(b'lfs')
82 return caps
82 return caps
83
83
84
84
85 def bypasscheckhash(self, text):
85 def bypasscheckhash(self, text):
86 return False
86 return False
87
87
88
88
89 def readfromstore(self, text):
89 def readfromstore(self, text):
90 """Read filelog content from local blobstore transform for flagprocessor.
90 """Read filelog content from local blobstore transform for flagprocessor.
91
91
92 Default tranform for flagprocessor, returning contents from blobstore.
92 Default tranform for flagprocessor, returning contents from blobstore.
93 Returns a 2-typle (text, validatehash) where validatehash is True as the
93 Returns a 2-typle (text, validatehash) where validatehash is True as the
94 contents of the blobstore should be checked using checkhash.
94 contents of the blobstore should be checked using checkhash.
95 """
95 """
96 p = pointer.deserialize(text)
96 p = pointer.deserialize(text)
97 oid = p.oid()
97 oid = p.oid()
98 store = self.opener.lfslocalblobstore
98 store = self.opener.lfslocalblobstore
99 if not store.has(oid):
99 if not store.has(oid):
100 p.filename = self.filename
100 p.filename = self.filename
101 self.opener.lfsremoteblobstore.readbatch([p], store)
101 self.opener.lfsremoteblobstore.readbatch([p], store)
102
102
103 # The caller will validate the content
103 # The caller will validate the content
104 text = store.read(oid, verify=False)
104 text = store.read(oid, verify=False)
105
105
106 # pack hg filelog metadata
106 # pack hg filelog metadata
107 hgmeta = {}
107 hgmeta = {}
108 for k in p.keys():
108 for k in p.keys():
109 if k.startswith(b'x-hg-'):
109 if k.startswith(b'x-hg-'):
110 name = k[len(b'x-hg-') :]
110 name = k[len(b'x-hg-') :]
111 hgmeta[name] = p[k]
111 hgmeta[name] = p[k]
112 if hgmeta or text.startswith(b'\1\n'):
112 if hgmeta or text.startswith(b'\1\n'):
113 text = storageutil.packmeta(hgmeta, text)
113 text = storageutil.packmeta(hgmeta, text)
114
114
115 return (text, True, {})
115 return (text, True, {})
116
116
117
117
118 def writetostore(self, text, sidedata):
118 def writetostore(self, text, sidedata):
119 # hg filelog metadata (includes rename, etc)
119 # hg filelog metadata (includes rename, etc)
120 hgmeta, offset = storageutil.parsemeta(text)
120 hgmeta, offset = storageutil.parsemeta(text)
121 if offset and offset > 0:
121 if offset and offset > 0:
122 # lfs blob does not contain hg filelog metadata
122 # lfs blob does not contain hg filelog metadata
123 text = text[offset:]
123 text = text[offset:]
124
124
125 # git-lfs only supports sha256
125 # git-lfs only supports sha256
126 oid = hex(hashlib.sha256(text).digest())
126 oid = hex(hashlib.sha256(text).digest())
127 self.opener.lfslocalblobstore.write(oid, text)
127 self.opener.lfslocalblobstore.write(oid, text)
128
128
129 # replace contents with metadata
129 # replace contents with metadata
130 longoid = b'sha256:%s' % oid
130 longoid = b'sha256:%s' % oid
131 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
131 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
132
132
133 # by default, we expect the content to be binary. however, LFS could also
133 # by default, we expect the content to be binary. however, LFS could also
134 # be used for non-binary content. add a special entry for non-binary data.
134 # be used for non-binary content. add a special entry for non-binary data.
135 # this will be used by filectx.isbinary().
135 # this will be used by filectx.isbinary().
136 if not stringutil.binary(text):
136 if not stringutil.binary(text):
137 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
137 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
138 metadata[b'x-is-binary'] = b'0'
138 metadata[b'x-is-binary'] = b'0'
139
139
140 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
140 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
141 if hgmeta is not None:
141 if hgmeta is not None:
142 for k, v in pycompat.iteritems(hgmeta):
142 for k, v in pycompat.iteritems(hgmeta):
143 metadata[b'x-hg-%s' % k] = v
143 metadata[b'x-hg-%s' % k] = v
144
144
145 rawtext = metadata.serialize()
145 rawtext = metadata.serialize()
146 return (rawtext, False)
146 return (rawtext, False)
147
147
148
148
149 def _islfs(rlog, node=None, rev=None):
149 def _islfs(rlog, node=None, rev=None):
150 if rev is None:
150 if rev is None:
151 if node is None:
151 if node is None:
152 # both None - likely working copy content where node is not ready
152 # both None - likely working copy content where node is not ready
153 return False
153 return False
154 rev = rlog.rev(node)
154 rev = rlog.rev(node)
155 else:
155 else:
156 node = rlog.node(rev)
156 node = rlog.node(rev)
157 if node == nullid:
157 if node == nullid:
158 return False
158 return False
159 flags = rlog.flags(rev)
159 flags = rlog.flags(rev)
160 return bool(flags & revlog.REVIDX_EXTSTORED)
160 return bool(flags & revlog.REVIDX_EXTSTORED)
161
161
162
162
163 # Wrapping may also be applied by remotefilelog
163 # Wrapping may also be applied by remotefilelog
164 def filelogaddrevision(
164 def filelogaddrevision(
165 orig,
165 orig,
166 self,
166 self,
167 text,
167 text,
168 transaction,
168 transaction,
169 link,
169 link,
170 p1,
170 p1,
171 p2,
171 p2,
172 cachedelta=None,
172 cachedelta=None,
173 node=None,
173 node=None,
174 flags=revlog.REVIDX_DEFAULT_FLAGS,
174 flags=revlog.REVIDX_DEFAULT_FLAGS,
175 **kwds
175 **kwds
176 ):
176 ):
177 # The matcher isn't available if reposetup() wasn't called.
177 # The matcher isn't available if reposetup() wasn't called.
178 lfstrack = self._revlog.opener.options.get(b'lfstrack')
178 lfstrack = self._revlog.opener.options.get(b'lfstrack')
179
179
180 if lfstrack:
180 if lfstrack:
181 textlen = len(text)
181 textlen = len(text)
182 # exclude hg rename meta from file size
182 # exclude hg rename meta from file size
183 meta, offset = storageutil.parsemeta(text)
183 meta, offset = storageutil.parsemeta(text)
184 if offset:
184 if offset:
185 textlen -= offset
185 textlen -= offset
186
186
187 if lfstrack(self._revlog.filename, textlen):
187 if lfstrack(self._revlog.filename, textlen):
188 flags |= revlog.REVIDX_EXTSTORED
188 flags |= revlog.REVIDX_EXTSTORED
189
189
190 return orig(
190 return orig(
191 self,
191 self,
192 text,
192 text,
193 transaction,
193 transaction,
194 link,
194 link,
195 p1,
195 p1,
196 p2,
196 p2,
197 cachedelta=cachedelta,
197 cachedelta=cachedelta,
198 node=node,
198 node=node,
199 flags=flags,
199 flags=flags,
200 **kwds
200 **kwds
201 )
201 )
202
202
203
203
204 # Wrapping may also be applied by remotefilelog
204 # Wrapping may also be applied by remotefilelog
205 def filelogrenamed(orig, self, node):
205 def filelogrenamed(orig, self, node):
206 if _islfs(self._revlog, node):
206 if _islfs(self._revlog, node):
207 rawtext = self._revlog.rawdata(node)
207 rawtext = self._revlog.rawdata(node)
208 if not rawtext:
208 if not rawtext:
209 return False
209 return False
210 metadata = pointer.deserialize(rawtext)
210 metadata = pointer.deserialize(rawtext)
211 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
211 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
212 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
212 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
213 else:
213 else:
214 return False
214 return False
215 return orig(self, node)
215 return orig(self, node)
216
216
217
217
218 # Wrapping may also be applied by remotefilelog
218 # Wrapping may also be applied by remotefilelog
219 def filelogsize(orig, self, rev):
219 def filelogsize(orig, self, rev):
220 if _islfs(self._revlog, rev=rev):
220 if _islfs(self._revlog, rev=rev):
221 # fast path: use lfs metadata to answer size
221 # fast path: use lfs metadata to answer size
222 rawtext = self._revlog.rawdata(rev)
222 rawtext = self._revlog.rawdata(rev)
223 metadata = pointer.deserialize(rawtext)
223 metadata = pointer.deserialize(rawtext)
224 return int(metadata[b'size'])
224 return int(metadata[b'size'])
225 return orig(self, rev)
225 return orig(self, rev)
226
226
227
227
228 @eh.wrapfunction(revlog, b'_verify_revision')
228 @eh.wrapfunction(revlog, b'_verify_revision')
229 def _verify_revision(orig, rl, skipflags, state, node):
229 def _verify_revision(orig, rl, skipflags, state, node):
230 if _islfs(rl, node=node):
230 if _islfs(rl, node=node):
231 rawtext = rl.rawdata(node)
231 rawtext = rl.rawdata(node)
232 metadata = pointer.deserialize(rawtext)
232 metadata = pointer.deserialize(rawtext)
233
233
234 # Don't skip blobs that are stored locally, as local verification is
234 # Don't skip blobs that are stored locally, as local verification is
235 # relatively cheap and there's no other way to verify the raw data in
235 # relatively cheap and there's no other way to verify the raw data in
236 # the revlog.
236 # the revlog.
237 if rl.opener.lfslocalblobstore.has(metadata.oid()):
237 if rl.opener.lfslocalblobstore.has(metadata.oid()):
238 skipflags &= ~revlog.REVIDX_EXTSTORED
238 skipflags &= ~revlog.REVIDX_EXTSTORED
239 elif skipflags & revlog.REVIDX_EXTSTORED:
239 elif skipflags & revlog.REVIDX_EXTSTORED:
240 # The wrapped method will set `skipread`, but there's enough local
240 # The wrapped method will set `skipread`, but there's enough local
241 # info to check renames.
241 # info to check renames.
242 state[b'safe_renamed'].add(node)
242 state[b'safe_renamed'].add(node)
243
243
244 orig(rl, skipflags, state, node)
244 orig(rl, skipflags, state, node)
245
245
246
246
247 @eh.wrapfunction(context.basefilectx, b'cmp')
247 @eh.wrapfunction(context.basefilectx, b'cmp')
248 def filectxcmp(orig, self, fctx):
248 def filectxcmp(orig, self, fctx):
249 """returns True if text is different than fctx"""
249 """returns True if text is different than fctx"""
250 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
250 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
251 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
251 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
252 # fast path: check LFS oid
252 # fast path: check LFS oid
253 p1 = pointer.deserialize(self.rawdata())
253 p1 = pointer.deserialize(self.rawdata())
254 p2 = pointer.deserialize(fctx.rawdata())
254 p2 = pointer.deserialize(fctx.rawdata())
255 return p1.oid() != p2.oid()
255 return p1.oid() != p2.oid()
256 return orig(self, fctx)
256 return orig(self, fctx)
257
257
258
258
259 @eh.wrapfunction(context.basefilectx, b'isbinary')
259 @eh.wrapfunction(context.basefilectx, b'isbinary')
260 def filectxisbinary(orig, self):
260 def filectxisbinary(orig, self):
261 if self.islfs():
261 if self.islfs():
262 # fast path: use lfs metadata to answer isbinary
262 # fast path: use lfs metadata to answer isbinary
263 metadata = pointer.deserialize(self.rawdata())
263 metadata = pointer.deserialize(self.rawdata())
264 # if lfs metadata says nothing, assume it's binary by default
264 # if lfs metadata says nothing, assume it's binary by default
265 return bool(int(metadata.get(b'x-is-binary', 1)))
265 return bool(int(metadata.get(b'x-is-binary', 1)))
266 return orig(self)
266 return orig(self)
267
267
268
268
269 def filectxislfs(self):
269 def filectxislfs(self):
270 return _islfs(self.filelog()._revlog, self.filenode())
270 return _islfs(self.filelog()._revlog, self.filenode())
271
271
272
272
273 @eh.wrapfunction(cmdutil, b'_updatecatformatter')
273 @eh.wrapfunction(cmdutil, b'_updatecatformatter')
274 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
274 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
275 orig(fm, ctx, matcher, path, decode)
275 orig(fm, ctx, matcher, path, decode)
276 fm.data(rawdata=ctx[path].rawdata())
276 fm.data(rawdata=ctx[path].rawdata())
277
277
278
278
279 @eh.wrapfunction(scmutil, b'wrapconvertsink')
279 @eh.wrapfunction(scmutil, b'wrapconvertsink')
280 def convertsink(orig, sink):
280 def convertsink(orig, sink):
281 sink = orig(sink)
281 sink = orig(sink)
282 if sink.repotype == b'hg':
282 if sink.repotype == b'hg':
283
283
284 class lfssink(sink.__class__):
284 class lfssink(sink.__class__):
285 def putcommit(
285 def putcommit(
286 self,
286 self,
287 files,
287 files,
288 copies,
288 copies,
289 parents,
289 parents,
290 commit,
290 commit,
291 source,
291 source,
292 revmap,
292 revmap,
293 full,
293 full,
294 cleanp2,
294 cleanp2,
295 ):
295 ):
296 pc = super(lfssink, self).putcommit
296 pc = super(lfssink, self).putcommit
297 node = pc(
297 node = pc(
298 files,
298 files,
299 copies,
299 copies,
300 parents,
300 parents,
301 commit,
301 commit,
302 source,
302 source,
303 revmap,
303 revmap,
304 full,
304 full,
305 cleanp2,
305 cleanp2,
306 )
306 )
307
307
308 if b'lfs' not in self.repo.requirements:
308 if b'lfs' not in self.repo.requirements:
309 ctx = self.repo[node]
309 ctx = self.repo[node]
310
310
311 # The file list may contain removed files, so check for
311 # The file list may contain removed files, so check for
312 # membership before assuming it is in the context.
312 # membership before assuming it is in the context.
313 if any(f in ctx and ctx[f].islfs() for f, n in files):
313 if any(f in ctx and ctx[f].islfs() for f, n in files):
314 self.repo.requirements.add(b'lfs')
314 self.repo.requirements.add(b'lfs')
315 self.repo._writerequirements()
315 self.repo._writerequirements()
316
316
317 return node
317 return node
318
318
319 sink.__class__ = lfssink
319 sink.__class__ = lfssink
320
320
321 return sink
321 return sink
322
322
323
323
324 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
324 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
325 # options and blob stores are passed from othervfs to the new readonlyvfs.
325 # options and blob stores are passed from othervfs to the new readonlyvfs.
326 @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__')
326 @eh.wrapfunction(vfsmod.readonlyvfs, b'__init__')
327 def vfsinit(orig, self, othervfs):
327 def vfsinit(orig, self, othervfs):
328 orig(self, othervfs)
328 orig(self, othervfs)
329 # copy lfs related options
329 # copy lfs related options
330 for k, v in othervfs.options.items():
330 for k, v in othervfs.options.items():
331 if k.startswith(b'lfs'):
331 if k.startswith(b'lfs'):
332 self.options[k] = v
332 self.options[k] = v
333 # also copy lfs blobstores. note: this can run before reposetup, so lfs
333 # also copy lfs blobstores. note: this can run before reposetup, so lfs
334 # blobstore attributes are not always ready at this time.
334 # blobstore attributes are not always ready at this time.
335 for name in [b'lfslocalblobstore', b'lfsremoteblobstore']:
335 for name in [b'lfslocalblobstore', b'lfsremoteblobstore']:
336 if util.safehasattr(othervfs, name):
336 if util.safehasattr(othervfs, name):
337 setattr(self, name, getattr(othervfs, name))
337 setattr(self, name, getattr(othervfs, name))
338
338
339
339
340 def _prefetchfiles(repo, revs, match):
340 def _prefetchfiles(repo, revmatches):
341 """Ensure that required LFS blobs are present, fetching them as a group if
341 """Ensure that required LFS blobs are present, fetching them as a group if
342 needed."""
342 needed."""
343 if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
343 if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
344 return
344 return
345
345
346 pointers = []
346 pointers = []
347 oids = set()
347 oids = set()
348 localstore = repo.svfs.lfslocalblobstore
348 localstore = repo.svfs.lfslocalblobstore
349
349
350 for rev in revs:
350 for rev, match in revmatches:
351 ctx = repo[rev]
351 ctx = repo[rev]
352 for f in ctx.walk(match):
352 for f in ctx.walk(match):
353 p = pointerfromctx(ctx, f)
353 p = pointerfromctx(ctx, f)
354 if p and p.oid() not in oids and not localstore.has(p.oid()):
354 if p and p.oid() not in oids and not localstore.has(p.oid()):
355 p.filename = f
355 p.filename = f
356 pointers.append(p)
356 pointers.append(p)
357 oids.add(p.oid())
357 oids.add(p.oid())
358
358
359 if pointers:
359 if pointers:
360 # Recalculating the repo store here allows 'paths.default' that is set
360 # Recalculating the repo store here allows 'paths.default' that is set
361 # on the repo by a clone command to be used for the update.
361 # on the repo by a clone command to be used for the update.
362 blobstore.remote(repo).readbatch(pointers, localstore)
362 blobstore.remote(repo).readbatch(pointers, localstore)
363
363
364
364
365 def _canskipupload(repo):
365 def _canskipupload(repo):
366 # Skip if this hasn't been passed to reposetup()
366 # Skip if this hasn't been passed to reposetup()
367 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
367 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
368 return True
368 return True
369
369
370 # if remotestore is a null store, upload is a no-op and can be skipped
370 # if remotestore is a null store, upload is a no-op and can be skipped
371 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
371 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
372
372
373
373
374 def candownload(repo):
374 def candownload(repo):
375 # Skip if this hasn't been passed to reposetup()
375 # Skip if this hasn't been passed to reposetup()
376 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
376 if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
377 return False
377 return False
378
378
379 # if remotestore is a null store, downloads will lead to nothing
379 # if remotestore is a null store, downloads will lead to nothing
380 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
380 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
381
381
382
382
383 def uploadblobsfromrevs(repo, revs):
383 def uploadblobsfromrevs(repo, revs):
384 '''upload lfs blobs introduced by revs
384 '''upload lfs blobs introduced by revs
385
385
386 Note: also used by other extensions e. g. infinitepush. avoid renaming.
386 Note: also used by other extensions e. g. infinitepush. avoid renaming.
387 '''
387 '''
388 if _canskipupload(repo):
388 if _canskipupload(repo):
389 return
389 return
390 pointers = extractpointers(repo, revs)
390 pointers = extractpointers(repo, revs)
391 uploadblobs(repo, pointers)
391 uploadblobs(repo, pointers)
392
392
393
393
394 def prepush(pushop):
394 def prepush(pushop):
395 """Prepush hook.
395 """Prepush hook.
396
396
397 Read through the revisions to push, looking for filelog entries that can be
397 Read through the revisions to push, looking for filelog entries that can be
398 deserialized into metadata so that we can block the push on their upload to
398 deserialized into metadata so that we can block the push on their upload to
399 the remote blobstore.
399 the remote blobstore.
400 """
400 """
401 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
401 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
402
402
403
403
404 @eh.wrapfunction(exchange, b'push')
404 @eh.wrapfunction(exchange, b'push')
405 def push(orig, repo, remote, *args, **kwargs):
405 def push(orig, repo, remote, *args, **kwargs):
406 """bail on push if the extension isn't enabled on remote when needed, and
406 """bail on push if the extension isn't enabled on remote when needed, and
407 update the remote store based on the destination path."""
407 update the remote store based on the destination path."""
408 if b'lfs' in repo.requirements:
408 if b'lfs' in repo.requirements:
409 # If the remote peer is for a local repo, the requirement tests in the
409 # If the remote peer is for a local repo, the requirement tests in the
410 # base class method enforce lfs support. Otherwise, some revisions in
410 # base class method enforce lfs support. Otherwise, some revisions in
411 # this repo use lfs, and the remote repo needs the extension loaded.
411 # this repo use lfs, and the remote repo needs the extension loaded.
412 if not remote.local() and not remote.capable(b'lfs'):
412 if not remote.local() and not remote.capable(b'lfs'):
413 # This is a copy of the message in exchange.push() when requirements
413 # This is a copy of the message in exchange.push() when requirements
414 # are missing between local repos.
414 # are missing between local repos.
415 m = _(b"required features are not supported in the destination: %s")
415 m = _(b"required features are not supported in the destination: %s")
416 raise error.Abort(
416 raise error.Abort(
417 m % b'lfs', hint=_(b'enable the lfs extension on the server')
417 m % b'lfs', hint=_(b'enable the lfs extension on the server')
418 )
418 )
419
419
420 # Repositories where this extension is disabled won't have the field.
420 # Repositories where this extension is disabled won't have the field.
421 # But if there's a requirement, then the extension must be loaded AND
421 # But if there's a requirement, then the extension must be loaded AND
422 # there may be blobs to push.
422 # there may be blobs to push.
423 remotestore = repo.svfs.lfsremoteblobstore
423 remotestore = repo.svfs.lfsremoteblobstore
424 try:
424 try:
425 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
425 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
426 return orig(repo, remote, *args, **kwargs)
426 return orig(repo, remote, *args, **kwargs)
427 finally:
427 finally:
428 repo.svfs.lfsremoteblobstore = remotestore
428 repo.svfs.lfsremoteblobstore = remotestore
429 else:
429 else:
430 return orig(repo, remote, *args, **kwargs)
430 return orig(repo, remote, *args, **kwargs)
431
431
432
432
433 # when writing a bundle via "hg bundle" command, upload related LFS blobs
433 # when writing a bundle via "hg bundle" command, upload related LFS blobs
434 @eh.wrapfunction(bundle2, b'writenewbundle')
434 @eh.wrapfunction(bundle2, b'writenewbundle')
435 def writenewbundle(
435 def writenewbundle(
436 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
436 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
437 ):
437 ):
438 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
438 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
439 uploadblobsfromrevs(repo, outgoing.missing)
439 uploadblobsfromrevs(repo, outgoing.missing)
440 return orig(
440 return orig(
441 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
441 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
442 )
442 )
443
443
444
444
445 def extractpointers(repo, revs):
445 def extractpointers(repo, revs):
446 """return a list of lfs pointers added by given revs"""
446 """return a list of lfs pointers added by given revs"""
447 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
447 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
448 pointers = {}
448 pointers = {}
449
449
450 makeprogress = repo.ui.makeprogress
450 makeprogress = repo.ui.makeprogress
451 with makeprogress(
451 with makeprogress(
452 _(b'lfs search'), _(b'changesets'), len(revs)
452 _(b'lfs search'), _(b'changesets'), len(revs)
453 ) as progress:
453 ) as progress:
454 for r in revs:
454 for r in revs:
455 ctx = repo[r]
455 ctx = repo[r]
456 for p in pointersfromctx(ctx).values():
456 for p in pointersfromctx(ctx).values():
457 pointers[p.oid()] = p
457 pointers[p.oid()] = p
458 progress.increment()
458 progress.increment()
459 return sorted(pointers.values(), key=lambda p: p.oid())
459 return sorted(pointers.values(), key=lambda p: p.oid())
460
460
461
461
462 def pointerfromctx(ctx, f, removed=False):
462 def pointerfromctx(ctx, f, removed=False):
463 """return a pointer for the named file from the given changectx, or None if
463 """return a pointer for the named file from the given changectx, or None if
464 the file isn't LFS.
464 the file isn't LFS.
465
465
466 Optionally, the pointer for a file deleted from the context can be returned.
466 Optionally, the pointer for a file deleted from the context can be returned.
467 Since no such pointer is actually stored, and to distinguish from a non LFS
467 Since no such pointer is actually stored, and to distinguish from a non LFS
468 file, this pointer is represented by an empty dict.
468 file, this pointer is represented by an empty dict.
469 """
469 """
470 _ctx = ctx
470 _ctx = ctx
471 if f not in ctx:
471 if f not in ctx:
472 if not removed:
472 if not removed:
473 return None
473 return None
474 if f in ctx.p1():
474 if f in ctx.p1():
475 _ctx = ctx.p1()
475 _ctx = ctx.p1()
476 elif f in ctx.p2():
476 elif f in ctx.p2():
477 _ctx = ctx.p2()
477 _ctx = ctx.p2()
478 else:
478 else:
479 return None
479 return None
480 fctx = _ctx[f]
480 fctx = _ctx[f]
481 if not _islfs(fctx.filelog()._revlog, fctx.filenode()):
481 if not _islfs(fctx.filelog()._revlog, fctx.filenode()):
482 return None
482 return None
483 try:
483 try:
484 p = pointer.deserialize(fctx.rawdata())
484 p = pointer.deserialize(fctx.rawdata())
485 if ctx == _ctx:
485 if ctx == _ctx:
486 return p
486 return p
487 return {}
487 return {}
488 except pointer.InvalidPointer as ex:
488 except pointer.InvalidPointer as ex:
489 raise error.Abort(
489 raise error.Abort(
490 _(b'lfs: corrupted pointer (%s@%s): %s\n')
490 _(b'lfs: corrupted pointer (%s@%s): %s\n')
491 % (f, short(_ctx.node()), ex)
491 % (f, short(_ctx.node()), ex)
492 )
492 )
493
493
494
494
495 def pointersfromctx(ctx, removed=False):
495 def pointersfromctx(ctx, removed=False):
496 """return a dict {path: pointer} for given single changectx.
496 """return a dict {path: pointer} for given single changectx.
497
497
498 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
498 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
499 stored for the path is an empty dict.
499 stored for the path is an empty dict.
500 """
500 """
501 result = {}
501 result = {}
502 m = ctx.repo().narrowmatch()
502 m = ctx.repo().narrowmatch()
503
503
504 # TODO: consider manifest.fastread() instead
504 # TODO: consider manifest.fastread() instead
505 for f in ctx.files():
505 for f in ctx.files():
506 if not m(f):
506 if not m(f):
507 continue
507 continue
508 p = pointerfromctx(ctx, f, removed=removed)
508 p = pointerfromctx(ctx, f, removed=removed)
509 if p is not None:
509 if p is not None:
510 result[f] = p
510 result[f] = p
511 return result
511 return result
512
512
513
513
514 def uploadblobs(repo, pointers):
514 def uploadblobs(repo, pointers):
515 """upload given pointers from local blobstore"""
515 """upload given pointers from local blobstore"""
516 if not pointers:
516 if not pointers:
517 return
517 return
518
518
519 remoteblob = repo.svfs.lfsremoteblobstore
519 remoteblob = repo.svfs.lfsremoteblobstore
520 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
520 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
521
521
522
522
523 @eh.wrapfunction(upgrade, b'_finishdatamigration')
523 @eh.wrapfunction(upgrade, b'_finishdatamigration')
524 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
524 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
525 orig(ui, srcrepo, dstrepo, requirements)
525 orig(ui, srcrepo, dstrepo, requirements)
526
526
527 # Skip if this hasn't been passed to reposetup()
527 # Skip if this hasn't been passed to reposetup()
528 if util.safehasattr(
528 if util.safehasattr(
529 srcrepo.svfs, b'lfslocalblobstore'
529 srcrepo.svfs, b'lfslocalblobstore'
530 ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'):
530 ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'):
531 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
531 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
532 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
532 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
533
533
534 for dirpath, dirs, files in srclfsvfs.walk():
534 for dirpath, dirs, files in srclfsvfs.walk():
535 for oid in files:
535 for oid in files:
536 ui.write(_(b'copying lfs blob %s\n') % oid)
536 ui.write(_(b'copying lfs blob %s\n') % oid)
537 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
537 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
538
538
539
539
540 @eh.wrapfunction(upgrade, b'preservedrequirements')
540 @eh.wrapfunction(upgrade, b'preservedrequirements')
541 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
541 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
542 def upgraderequirements(orig, repo):
542 def upgraderequirements(orig, repo):
543 reqs = orig(repo)
543 reqs = orig(repo)
544 if b'lfs' in repo.requirements:
544 if b'lfs' in repo.requirements:
545 reqs.add(b'lfs')
545 reqs.add(b'lfs')
546 return reqs
546 return reqs
@@ -1,1285 +1,1285 b''
1 # __init__.py - remotefilelog extension
1 # __init__.py - remotefilelog extension
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8
8
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 GUARANTEES. This means that repositories created with this extension may
10 GUARANTEES. This means that repositories created with this extension may
11 only be usable with the exact version of this extension/Mercurial that was
11 only be usable with the exact version of this extension/Mercurial that was
12 used. The extension attempts to enforce this in order to prevent repository
12 used. The extension attempts to enforce this in order to prevent repository
13 corruption.
13 corruption.
14
14
15 remotefilelog works by fetching file contents lazily and storing them
15 remotefilelog works by fetching file contents lazily and storing them
16 in a cache on the client rather than in revlogs. This allows enormous
16 in a cache on the client rather than in revlogs. This allows enormous
17 histories to be transferred only partially, making them easier to
17 histories to be transferred only partially, making them easier to
18 operate on.
18 operate on.
19
19
20 Configs:
20 Configs:
21
21
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23
23
24 ``packs.maxpacksize`` specifies the maximum pack file size
24 ``packs.maxpacksize`` specifies the maximum pack file size
25
25
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 shared cache (trees only for now)
27 shared cache (trees only for now)
28
28
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30
30
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 update, and on other commands that use them. Different from pullprefetch.
32 update, and on other commands that use them. Different from pullprefetch.
33
33
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35
35
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 it is garbage collected
37 it is garbage collected
38
38
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40
40
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 days after which it is no longer prefetched.
42 days after which it is no longer prefetched.
43
43
44 ``remotefilelog.prefetchdelay`` specifies delay between background
44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 prefetches in seconds after operations that change the working copy parent
45 prefetches in seconds after operations that change the working copy parent
46
46
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 pack files required to be considered part of a generation. In particular,
48 pack files required to be considered part of a generation. In particular,
49 minimum number of packs files > gencountlimit.
49 minimum number of packs files > gencountlimit.
50
50
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 each generation of the data pack files. For example, list ['100MB','1MB']
52 each generation of the data pack files. For example, list ['100MB','1MB']
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 1MB, 100MB) and [100MB, infinity).
54 1MB, 100MB) and [100MB, infinity).
55
55
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 include in an incremental data repack.
57 include in an incremental data repack.
58
58
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 it to be considered for an incremental data repack.
60 it to be considered for an incremental data repack.
61
61
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 to include in an incremental data repack.
63 to include in an incremental data repack.
64
64
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 history pack files required to be considered part of a generation. In
66 history pack files required to be considered part of a generation. In
67 particular, minimum number of packs files > gencountlimit.
67 particular, minimum number of packs files > gencountlimit.
68
68
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 each generation of the history pack files. For example, list [
70 each generation of the history pack files. For example, list [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73
73
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 include in an incremental history repack.
75 include in an incremental history repack.
76
76
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 for it to be considered for an incremental history repack.
78 for it to be considered for an incremental history repack.
79
79
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 files to include in an incremental history repack.
81 files to include in an incremental history repack.
82
82
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 background
84 background
85
85
86 ``remotefilelog.cachepath`` path to cache
86 ``remotefilelog.cachepath`` path to cache
87
87
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 group
89 group
90
90
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92
92
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94
94
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96
96
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98
98
99 ``remotefilelog.fetchwarning``: message to print when too many
99 ``remotefilelog.fetchwarning``: message to print when too many
100 single-file fetches occur
100 single-file fetches occur
101
101
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103
103
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 files, otherwise use optimistic fetching
105 files, otherwise use optimistic fetching
106
106
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 eagerly downloaded rather than lazily
108 eagerly downloaded rather than lazily
109
109
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 data from other repos in a shared store.
111 data from other repos in a shared store.
112
112
113 ``remotefilelog.server`` if true, enable server-side functionality
113 ``remotefilelog.server`` if true, enable server-side functionality
114
114
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116
116
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 blobs
118 blobs
119
119
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 before returning blobs
121 before returning blobs
122
122
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 corruption before returning metadata
124 corruption before returning metadata
125
125
126 """
126 """
127 from __future__ import absolute_import
127 from __future__ import absolute_import
128
128
129 import os
129 import os
130 import time
130 import time
131 import traceback
131 import traceback
132
132
133 from mercurial.node import hex
133 from mercurial.node import hex
134 from mercurial.i18n import _
134 from mercurial.i18n import _
135 from mercurial.pycompat import open
135 from mercurial.pycompat import open
136 from mercurial import (
136 from mercurial import (
137 changegroup,
137 changegroup,
138 changelog,
138 changelog,
139 cmdutil,
139 cmdutil,
140 commands,
140 commands,
141 configitems,
141 configitems,
142 context,
142 context,
143 copies,
143 copies,
144 debugcommands as hgdebugcommands,
144 debugcommands as hgdebugcommands,
145 dispatch,
145 dispatch,
146 error,
146 error,
147 exchange,
147 exchange,
148 extensions,
148 extensions,
149 hg,
149 hg,
150 localrepo,
150 localrepo,
151 match,
151 match,
152 merge,
152 merge,
153 node as nodemod,
153 node as nodemod,
154 patch,
154 patch,
155 pycompat,
155 pycompat,
156 registrar,
156 registrar,
157 repair,
157 repair,
158 repoview,
158 repoview,
159 revset,
159 revset,
160 scmutil,
160 scmutil,
161 smartset,
161 smartset,
162 streamclone,
162 streamclone,
163 util,
163 util,
164 )
164 )
165 from . import (
165 from . import (
166 constants,
166 constants,
167 debugcommands,
167 debugcommands,
168 fileserverclient,
168 fileserverclient,
169 remotefilectx,
169 remotefilectx,
170 remotefilelog,
170 remotefilelog,
171 remotefilelogserver,
171 remotefilelogserver,
172 repack as repackmod,
172 repack as repackmod,
173 shallowbundle,
173 shallowbundle,
174 shallowrepo,
174 shallowrepo,
175 shallowstore,
175 shallowstore,
176 shallowutil,
176 shallowutil,
177 shallowverifier,
177 shallowverifier,
178 )
178 )
179
179
180 # ensures debug commands are registered
180 # ensures debug commands are registered
181 hgdebugcommands.command
181 hgdebugcommands.command
182
182
183 cmdtable = {}
183 cmdtable = {}
184 command = registrar.command(cmdtable)
184 command = registrar.command(cmdtable)
185
185
186 configtable = {}
186 configtable = {}
187 configitem = registrar.configitem(configtable)
187 configitem = registrar.configitem(configtable)
188
188
189 configitem(b'remotefilelog', b'debug', default=False)
189 configitem(b'remotefilelog', b'debug', default=False)
190
190
191 configitem(b'remotefilelog', b'reponame', default=b'')
191 configitem(b'remotefilelog', b'reponame', default=b'')
192 configitem(b'remotefilelog', b'cachepath', default=None)
192 configitem(b'remotefilelog', b'cachepath', default=None)
193 configitem(b'remotefilelog', b'cachegroup', default=None)
193 configitem(b'remotefilelog', b'cachegroup', default=None)
194 configitem(b'remotefilelog', b'cacheprocess', default=None)
194 configitem(b'remotefilelog', b'cacheprocess', default=None)
195 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
195 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
196 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
196 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
197
197
198 configitem(
198 configitem(
199 b'remotefilelog',
199 b'remotefilelog',
200 b'fallbackpath',
200 b'fallbackpath',
201 default=configitems.dynamicdefault,
201 default=configitems.dynamicdefault,
202 alias=[(b'remotefilelog', b'fallbackrepo')],
202 alias=[(b'remotefilelog', b'fallbackrepo')],
203 )
203 )
204
204
205 configitem(b'remotefilelog', b'validatecachelog', default=None)
205 configitem(b'remotefilelog', b'validatecachelog', default=None)
206 configitem(b'remotefilelog', b'validatecache', default=b'on')
206 configitem(b'remotefilelog', b'validatecache', default=b'on')
207 configitem(b'remotefilelog', b'server', default=None)
207 configitem(b'remotefilelog', b'server', default=None)
208 configitem(b'remotefilelog', b'servercachepath', default=None)
208 configitem(b'remotefilelog', b'servercachepath', default=None)
209 configitem(b"remotefilelog", b"serverexpiration", default=30)
209 configitem(b"remotefilelog", b"serverexpiration", default=30)
210 configitem(b'remotefilelog', b'backgroundrepack', default=False)
210 configitem(b'remotefilelog', b'backgroundrepack', default=False)
211 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
211 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
212 configitem(b'remotefilelog', b'pullprefetch', default=None)
212 configitem(b'remotefilelog', b'pullprefetch', default=None)
213 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
213 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
214 configitem(b'remotefilelog', b'prefetchdelay', default=120)
214 configitem(b'remotefilelog', b'prefetchdelay', default=120)
215 configitem(b'remotefilelog', b'prefetchdays', default=14)
215 configitem(b'remotefilelog', b'prefetchdays', default=14)
216
216
217 configitem(b'remotefilelog', b'getfilesstep', default=10000)
217 configitem(b'remotefilelog', b'getfilesstep', default=10000)
218 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
218 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
219 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
219 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
220 configitem(b'remotefilelog', b'fetchwarning', default=b'')
220 configitem(b'remotefilelog', b'fetchwarning', default=b'')
221
221
222 configitem(b'remotefilelog', b'includepattern', default=None)
222 configitem(b'remotefilelog', b'includepattern', default=None)
223 configitem(b'remotefilelog', b'excludepattern', default=None)
223 configitem(b'remotefilelog', b'excludepattern', default=None)
224
224
225 configitem(b'remotefilelog', b'gcrepack', default=False)
225 configitem(b'remotefilelog', b'gcrepack', default=False)
226 configitem(b'remotefilelog', b'repackonhggc', default=False)
226 configitem(b'remotefilelog', b'repackonhggc', default=False)
227 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
227 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
228
228
229 configitem(b'packs', b'maxpacksize', default=0)
229 configitem(b'packs', b'maxpacksize', default=0)
230 configitem(b'packs', b'maxchainlen', default=1000)
230 configitem(b'packs', b'maxchainlen', default=1000)
231
231
232 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
232 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
233
233
234 # default TTL limit is 30 days
234 # default TTL limit is 30 days
235 _defaultlimit = 60 * 60 * 24 * 30
235 _defaultlimit = 60 * 60 * 24 * 30
236 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
236 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
237
237
238 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
238 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
239 configitem(
239 configitem(
240 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
240 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
241 )
241 )
242 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
242 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
243 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
243 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
244 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
244 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
245
245
246 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
246 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
247 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
247 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
248 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
248 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
249 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
249 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
250 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
250 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
251
251
252 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
252 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
253 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
253 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
254 # be specifying the version(s) of Mercurial they are tested with, or
254 # be specifying the version(s) of Mercurial they are tested with, or
255 # leave the attribute unspecified.
255 # leave the attribute unspecified.
256 testedwith = b'ships-with-hg-core'
256 testedwith = b'ships-with-hg-core'
257
257
258 repoclass = localrepo.localrepository
258 repoclass = localrepo.localrepository
259 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
259 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
260
260
261 isenabled = shallowutil.isenabled
261 isenabled = shallowutil.isenabled
262
262
263
263
264 def uisetup(ui):
264 def uisetup(ui):
265 """Wraps user facing Mercurial commands to swap them out with shallow
265 """Wraps user facing Mercurial commands to swap them out with shallow
266 versions.
266 versions.
267 """
267 """
268 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
268 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
269
269
270 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
270 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
271 entry[1].append(
271 entry[1].append(
272 (
272 (
273 b'',
273 b'',
274 b'shallow',
274 b'shallow',
275 None,
275 None,
276 _(b"create a shallow clone which uses remote file history"),
276 _(b"create a shallow clone which uses remote file history"),
277 )
277 )
278 )
278 )
279
279
280 extensions.wrapcommand(
280 extensions.wrapcommand(
281 commands.table, b'debugindex', debugcommands.debugindex
281 commands.table, b'debugindex', debugcommands.debugindex
282 )
282 )
283 extensions.wrapcommand(
283 extensions.wrapcommand(
284 commands.table, b'debugindexdot', debugcommands.debugindexdot
284 commands.table, b'debugindexdot', debugcommands.debugindexdot
285 )
285 )
286 extensions.wrapcommand(commands.table, b'log', log)
286 extensions.wrapcommand(commands.table, b'log', log)
287 extensions.wrapcommand(commands.table, b'pull', pull)
287 extensions.wrapcommand(commands.table, b'pull', pull)
288
288
289 # Prevent 'hg manifest --all'
289 # Prevent 'hg manifest --all'
290 def _manifest(orig, ui, repo, *args, **opts):
290 def _manifest(orig, ui, repo, *args, **opts):
291 if isenabled(repo) and opts.get('all'):
291 if isenabled(repo) and opts.get('all'):
292 raise error.Abort(_(b"--all is not supported in a shallow repo"))
292 raise error.Abort(_(b"--all is not supported in a shallow repo"))
293
293
294 return orig(ui, repo, *args, **opts)
294 return orig(ui, repo, *args, **opts)
295
295
296 extensions.wrapcommand(commands.table, b"manifest", _manifest)
296 extensions.wrapcommand(commands.table, b"manifest", _manifest)
297
297
298 # Wrap remotefilelog with lfs code
298 # Wrap remotefilelog with lfs code
299 def _lfsloaded(loaded=False):
299 def _lfsloaded(loaded=False):
300 lfsmod = None
300 lfsmod = None
301 try:
301 try:
302 lfsmod = extensions.find(b'lfs')
302 lfsmod = extensions.find(b'lfs')
303 except KeyError:
303 except KeyError:
304 pass
304 pass
305 if lfsmod:
305 if lfsmod:
306 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
306 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
307 fileserverclient._lfsmod = lfsmod
307 fileserverclient._lfsmod = lfsmod
308
308
309 extensions.afterloaded(b'lfs', _lfsloaded)
309 extensions.afterloaded(b'lfs', _lfsloaded)
310
310
311 # debugdata needs remotefilelog.len to work
311 # debugdata needs remotefilelog.len to work
312 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
312 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
313
313
314 changegroup.cgpacker = shallowbundle.shallowcg1packer
314 changegroup.cgpacker = shallowbundle.shallowcg1packer
315
315
316 extensions.wrapfunction(
316 extensions.wrapfunction(
317 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
317 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
318 )
318 )
319 extensions.wrapfunction(
319 extensions.wrapfunction(
320 changegroup, b'makechangegroup', shallowbundle.makechangegroup
320 changegroup, b'makechangegroup', shallowbundle.makechangegroup
321 )
321 )
322 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
322 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
323 extensions.wrapfunction(exchange, b'pull', exchangepull)
323 extensions.wrapfunction(exchange, b'pull', exchangepull)
324 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
324 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
325 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
325 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
326 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
326 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
327 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
327 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
328 extensions.wrapfunction(
328 extensions.wrapfunction(
329 copies, b'_computeforwardmissing', computeforwardmissing
329 copies, b'_computeforwardmissing', computeforwardmissing
330 )
330 )
331 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
331 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
332 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
332 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
333 extensions.wrapfunction(context.changectx, b'filectx', filectx)
333 extensions.wrapfunction(context.changectx, b'filectx', filectx)
334 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
334 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
335 extensions.wrapfunction(patch, b'trydiff', trydiff)
335 extensions.wrapfunction(patch, b'trydiff', trydiff)
336 extensions.wrapfunction(hg, b'verify', _verify)
336 extensions.wrapfunction(hg, b'verify', _verify)
337 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
337 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
338
338
339 # disappointing hacks below
339 # disappointing hacks below
340 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
340 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
341 extensions.wrapfunction(revset, b'filelog', filelogrevset)
341 extensions.wrapfunction(revset, b'filelog', filelogrevset)
342 revset.symbols[b'filelog'] = revset.filelog
342 revset.symbols[b'filelog'] = revset.filelog
343 extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
343 extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
344
344
345
345
346 def cloneshallow(orig, ui, repo, *args, **opts):
346 def cloneshallow(orig, ui, repo, *args, **opts):
347 if opts.get('shallow'):
347 if opts.get('shallow'):
348 repos = []
348 repos = []
349
349
350 def pull_shallow(orig, self, *args, **kwargs):
350 def pull_shallow(orig, self, *args, **kwargs):
351 if not isenabled(self):
351 if not isenabled(self):
352 repos.append(self.unfiltered())
352 repos.append(self.unfiltered())
353 # set up the client hooks so the post-clone update works
353 # set up the client hooks so the post-clone update works
354 setupclient(self.ui, self.unfiltered())
354 setupclient(self.ui, self.unfiltered())
355
355
356 # setupclient fixed the class on the repo itself
356 # setupclient fixed the class on the repo itself
357 # but we also need to fix it on the repoview
357 # but we also need to fix it on the repoview
358 if isinstance(self, repoview.repoview):
358 if isinstance(self, repoview.repoview):
359 self.__class__.__bases__ = (
359 self.__class__.__bases__ = (
360 self.__class__.__bases__[0],
360 self.__class__.__bases__[0],
361 self.unfiltered().__class__,
361 self.unfiltered().__class__,
362 )
362 )
363 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
363 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
364 self._writerequirements()
364 self._writerequirements()
365
365
366 # Since setupclient hadn't been called, exchange.pull was not
366 # Since setupclient hadn't been called, exchange.pull was not
367 # wrapped. So we need to manually invoke our version of it.
367 # wrapped. So we need to manually invoke our version of it.
368 return exchangepull(orig, self, *args, **kwargs)
368 return exchangepull(orig, self, *args, **kwargs)
369 else:
369 else:
370 return orig(self, *args, **kwargs)
370 return orig(self, *args, **kwargs)
371
371
372 extensions.wrapfunction(exchange, b'pull', pull_shallow)
372 extensions.wrapfunction(exchange, b'pull', pull_shallow)
373
373
374 # Wrap the stream logic to add requirements and to pass include/exclude
374 # Wrap the stream logic to add requirements and to pass include/exclude
375 # patterns around.
375 # patterns around.
376 def setup_streamout(repo, remote):
376 def setup_streamout(repo, remote):
377 # Replace remote.stream_out with a version that sends file
377 # Replace remote.stream_out with a version that sends file
378 # patterns.
378 # patterns.
379 def stream_out_shallow(orig):
379 def stream_out_shallow(orig):
380 caps = remote.capabilities()
380 caps = remote.capabilities()
381 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
381 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
382 opts = {}
382 opts = {}
383 if repo.includepattern:
383 if repo.includepattern:
384 opts['includepattern'] = b'\0'.join(repo.includepattern)
384 opts['includepattern'] = b'\0'.join(repo.includepattern)
385 if repo.excludepattern:
385 if repo.excludepattern:
386 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
386 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
387 return remote._callstream(b'stream_out_shallow', **opts)
387 return remote._callstream(b'stream_out_shallow', **opts)
388 else:
388 else:
389 return orig()
389 return orig()
390
390
391 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
391 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
392
392
393 def stream_wrap(orig, op):
393 def stream_wrap(orig, op):
394 setup_streamout(op.repo, op.remote)
394 setup_streamout(op.repo, op.remote)
395 return orig(op)
395 return orig(op)
396
396
397 extensions.wrapfunction(
397 extensions.wrapfunction(
398 streamclone, b'maybeperformlegacystreamclone', stream_wrap
398 streamclone, b'maybeperformlegacystreamclone', stream_wrap
399 )
399 )
400
400
401 def canperformstreamclone(orig, pullop, bundle2=False):
401 def canperformstreamclone(orig, pullop, bundle2=False):
402 # remotefilelog is currently incompatible with the
402 # remotefilelog is currently incompatible with the
403 # bundle2 flavor of streamclones, so force us to use
403 # bundle2 flavor of streamclones, so force us to use
404 # v1 instead.
404 # v1 instead.
405 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
405 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
406 pullop.remotebundle2caps[b'stream'] = [
406 pullop.remotebundle2caps[b'stream'] = [
407 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
407 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
408 ]
408 ]
409 if bundle2:
409 if bundle2:
410 return False, None
410 return False, None
411 supported, requirements = orig(pullop, bundle2=bundle2)
411 supported, requirements = orig(pullop, bundle2=bundle2)
412 if requirements is not None:
412 if requirements is not None:
413 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
413 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
414 return supported, requirements
414 return supported, requirements
415
415
416 extensions.wrapfunction(
416 extensions.wrapfunction(
417 streamclone, b'canperformstreamclone', canperformstreamclone
417 streamclone, b'canperformstreamclone', canperformstreamclone
418 )
418 )
419
419
420 try:
420 try:
421 orig(ui, repo, *args, **opts)
421 orig(ui, repo, *args, **opts)
422 finally:
422 finally:
423 if opts.get('shallow'):
423 if opts.get('shallow'):
424 for r in repos:
424 for r in repos:
425 if util.safehasattr(r, b'fileservice'):
425 if util.safehasattr(r, b'fileservice'):
426 r.fileservice.close()
426 r.fileservice.close()
427
427
428
428
429 def debugdatashallow(orig, *args, **kwds):
429 def debugdatashallow(orig, *args, **kwds):
430 oldlen = remotefilelog.remotefilelog.__len__
430 oldlen = remotefilelog.remotefilelog.__len__
431 try:
431 try:
432 remotefilelog.remotefilelog.__len__ = lambda x: 1
432 remotefilelog.remotefilelog.__len__ = lambda x: 1
433 return orig(*args, **kwds)
433 return orig(*args, **kwds)
434 finally:
434 finally:
435 remotefilelog.remotefilelog.__len__ = oldlen
435 remotefilelog.remotefilelog.__len__ = oldlen
436
436
437
437
438 def reposetup(ui, repo):
438 def reposetup(ui, repo):
439 if not repo.local():
439 if not repo.local():
440 return
440 return
441
441
442 # put here intentionally bc doesnt work in uisetup
442 # put here intentionally bc doesnt work in uisetup
443 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
443 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
444 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
444 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
445
445
446 isserverenabled = ui.configbool(b'remotefilelog', b'server')
446 isserverenabled = ui.configbool(b'remotefilelog', b'server')
447 isshallowclient = isenabled(repo)
447 isshallowclient = isenabled(repo)
448
448
449 if isserverenabled and isshallowclient:
449 if isserverenabled and isshallowclient:
450 raise RuntimeError(b"Cannot be both a server and shallow client.")
450 raise RuntimeError(b"Cannot be both a server and shallow client.")
451
451
452 if isshallowclient:
452 if isshallowclient:
453 setupclient(ui, repo)
453 setupclient(ui, repo)
454
454
455 if isserverenabled:
455 if isserverenabled:
456 remotefilelogserver.setupserver(ui, repo)
456 remotefilelogserver.setupserver(ui, repo)
457
457
458
458
459 def setupclient(ui, repo):
459 def setupclient(ui, repo):
460 if not isinstance(repo, localrepo.localrepository):
460 if not isinstance(repo, localrepo.localrepository):
461 return
461 return
462
462
463 # Even clients get the server setup since they need to have the
463 # Even clients get the server setup since they need to have the
464 # wireprotocol endpoints registered.
464 # wireprotocol endpoints registered.
465 remotefilelogserver.onetimesetup(ui)
465 remotefilelogserver.onetimesetup(ui)
466 onetimeclientsetup(ui)
466 onetimeclientsetup(ui)
467
467
468 shallowrepo.wraprepo(repo)
468 shallowrepo.wraprepo(repo)
469 repo.store = shallowstore.wrapstore(repo.store)
469 repo.store = shallowstore.wrapstore(repo.store)
470
470
471
471
472 def storewrapper(orig, requirements, path, vfstype):
472 def storewrapper(orig, requirements, path, vfstype):
473 s = orig(requirements, path, vfstype)
473 s = orig(requirements, path, vfstype)
474 if constants.SHALLOWREPO_REQUIREMENT in requirements:
474 if constants.SHALLOWREPO_REQUIREMENT in requirements:
475 s = shallowstore.wrapstore(s)
475 s = shallowstore.wrapstore(s)
476
476
477 return s
477 return s
478
478
479
479
480 # prefetch files before update
480 # prefetch files before update
481 def applyupdates(
481 def applyupdates(
482 orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
482 orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
483 ):
483 ):
484 if isenabled(repo):
484 if isenabled(repo):
485 manifest = mctx.manifest()
485 manifest = mctx.manifest()
486 files = []
486 files = []
487 for f, args, msg in actions[b'g']:
487 for f, args, msg in actions[b'g']:
488 files.append((f, hex(manifest[f])))
488 files.append((f, hex(manifest[f])))
489 # batch fetch the needed files from the server
489 # batch fetch the needed files from the server
490 repo.fileservice.prefetch(files)
490 repo.fileservice.prefetch(files)
491 return orig(
491 return orig(
492 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels
492 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels
493 )
493 )
494
494
495
495
496 # Prefetch merge checkunknownfiles
496 # Prefetch merge checkunknownfiles
497 def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs):
497 def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs):
498 if isenabled(repo):
498 if isenabled(repo):
499 files = []
499 files = []
500 sparsematch = repo.maybesparsematch(mctx.rev())
500 sparsematch = repo.maybesparsematch(mctx.rev())
501 for f, (m, actionargs, msg) in pycompat.iteritems(actions):
501 for f, (m, actionargs, msg) in pycompat.iteritems(actions):
502 if sparsematch and not sparsematch(f):
502 if sparsematch and not sparsematch(f):
503 continue
503 continue
504 if m in (b'c', b'dc', b'cm'):
504 if m in (b'c', b'dc', b'cm'):
505 files.append((f, hex(mctx.filenode(f))))
505 files.append((f, hex(mctx.filenode(f))))
506 elif m == b'dg':
506 elif m == b'dg':
507 f2 = actionargs[0]
507 f2 = actionargs[0]
508 files.append((f2, hex(mctx.filenode(f2))))
508 files.append((f2, hex(mctx.filenode(f2))))
509 # batch fetch the needed files from the server
509 # batch fetch the needed files from the server
510 repo.fileservice.prefetch(files)
510 repo.fileservice.prefetch(files)
511 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
511 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
512
512
513
513
514 # Prefetch files before status attempts to look at their size and contents
514 # Prefetch files before status attempts to look at their size and contents
515 def checklookup(orig, self, files):
515 def checklookup(orig, self, files):
516 repo = self._repo
516 repo = self._repo
517 if isenabled(repo):
517 if isenabled(repo):
518 prefetchfiles = []
518 prefetchfiles = []
519 for parent in self._parents:
519 for parent in self._parents:
520 for f in files:
520 for f in files:
521 if f in parent:
521 if f in parent:
522 prefetchfiles.append((f, hex(parent.filenode(f))))
522 prefetchfiles.append((f, hex(parent.filenode(f))))
523 # batch fetch the needed files from the server
523 # batch fetch the needed files from the server
524 repo.fileservice.prefetch(prefetchfiles)
524 repo.fileservice.prefetch(prefetchfiles)
525 return orig(self, files)
525 return orig(self, files)
526
526
527
527
528 # Prefetch the logic that compares added and removed files for renames
528 # Prefetch the logic that compares added and removed files for renames
529 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
529 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
530 if isenabled(repo):
530 if isenabled(repo):
531 files = []
531 files = []
532 pmf = repo[b'.'].manifest()
532 pmf = repo[b'.'].manifest()
533 for f in removed:
533 for f in removed:
534 if f in pmf:
534 if f in pmf:
535 files.append((f, hex(pmf[f])))
535 files.append((f, hex(pmf[f])))
536 # batch fetch the needed files from the server
536 # batch fetch the needed files from the server
537 repo.fileservice.prefetch(files)
537 repo.fileservice.prefetch(files)
538 return orig(repo, matcher, added, removed, *args, **kwargs)
538 return orig(repo, matcher, added, removed, *args, **kwargs)
539
539
540
540
541 # prefetch files before pathcopies check
541 # prefetch files before pathcopies check
542 def computeforwardmissing(orig, a, b, match=None):
542 def computeforwardmissing(orig, a, b, match=None):
543 missing = orig(a, b, match=match)
543 missing = orig(a, b, match=match)
544 repo = a._repo
544 repo = a._repo
545 if isenabled(repo):
545 if isenabled(repo):
546 mb = b.manifest()
546 mb = b.manifest()
547
547
548 files = []
548 files = []
549 sparsematch = repo.maybesparsematch(b.rev())
549 sparsematch = repo.maybesparsematch(b.rev())
550 if sparsematch:
550 if sparsematch:
551 sparsemissing = set()
551 sparsemissing = set()
552 for f in missing:
552 for f in missing:
553 if sparsematch(f):
553 if sparsematch(f):
554 files.append((f, hex(mb[f])))
554 files.append((f, hex(mb[f])))
555 sparsemissing.add(f)
555 sparsemissing.add(f)
556 missing = sparsemissing
556 missing = sparsemissing
557
557
558 # batch fetch the needed files from the server
558 # batch fetch the needed files from the server
559 repo.fileservice.prefetch(files)
559 repo.fileservice.prefetch(files)
560 return missing
560 return missing
561
561
562
562
563 # close cache miss server connection after the command has finished
563 # close cache miss server connection after the command has finished
564 def runcommand(orig, lui, repo, *args, **kwargs):
564 def runcommand(orig, lui, repo, *args, **kwargs):
565 fileservice = None
565 fileservice = None
566 # repo can be None when running in chg:
566 # repo can be None when running in chg:
567 # - at startup, reposetup was called because serve is not norepo
567 # - at startup, reposetup was called because serve is not norepo
568 # - a norepo command like "help" is called
568 # - a norepo command like "help" is called
569 if repo and isenabled(repo):
569 if repo and isenabled(repo):
570 fileservice = repo.fileservice
570 fileservice = repo.fileservice
571 try:
571 try:
572 return orig(lui, repo, *args, **kwargs)
572 return orig(lui, repo, *args, **kwargs)
573 finally:
573 finally:
574 if fileservice:
574 if fileservice:
575 fileservice.close()
575 fileservice.close()
576
576
577
577
578 # prevent strip from stripping remotefilelogs
578 # prevent strip from stripping remotefilelogs
579 def _collectbrokencsets(orig, repo, files, striprev):
579 def _collectbrokencsets(orig, repo, files, striprev):
580 if isenabled(repo):
580 if isenabled(repo):
581 files = list([f for f in files if not repo.shallowmatch(f)])
581 files = list([f for f in files if not repo.shallowmatch(f)])
582 return orig(repo, files, striprev)
582 return orig(repo, files, striprev)
583
583
584
584
585 # changectx wrappers
585 # changectx wrappers
586 def filectx(orig, self, path, fileid=None, filelog=None):
586 def filectx(orig, self, path, fileid=None, filelog=None):
587 if fileid is None:
587 if fileid is None:
588 fileid = self.filenode(path)
588 fileid = self.filenode(path)
589 if isenabled(self._repo) and self._repo.shallowmatch(path):
589 if isenabled(self._repo) and self._repo.shallowmatch(path):
590 return remotefilectx.remotefilectx(
590 return remotefilectx.remotefilectx(
591 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
591 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
592 )
592 )
593 return orig(self, path, fileid=fileid, filelog=filelog)
593 return orig(self, path, fileid=fileid, filelog=filelog)
594
594
595
595
596 def workingfilectx(orig, self, path, filelog=None):
596 def workingfilectx(orig, self, path, filelog=None):
597 if isenabled(self._repo) and self._repo.shallowmatch(path):
597 if isenabled(self._repo) and self._repo.shallowmatch(path):
598 return remotefilectx.remoteworkingfilectx(
598 return remotefilectx.remoteworkingfilectx(
599 self._repo, path, workingctx=self, filelog=filelog
599 self._repo, path, workingctx=self, filelog=filelog
600 )
600 )
601 return orig(self, path, filelog=filelog)
601 return orig(self, path, filelog=filelog)
602
602
603
603
604 # prefetch required revisions before a diff
604 # prefetch required revisions before a diff
605 def trydiff(
605 def trydiff(
606 orig,
606 orig,
607 repo,
607 repo,
608 revs,
608 revs,
609 ctx1,
609 ctx1,
610 ctx2,
610 ctx2,
611 modified,
611 modified,
612 added,
612 added,
613 removed,
613 removed,
614 copy,
614 copy,
615 getfilectx,
615 getfilectx,
616 *args,
616 *args,
617 **kwargs
617 **kwargs
618 ):
618 ):
619 if isenabled(repo):
619 if isenabled(repo):
620 prefetch = []
620 prefetch = []
621 mf1 = ctx1.manifest()
621 mf1 = ctx1.manifest()
622 for fname in modified + added + removed:
622 for fname in modified + added + removed:
623 if fname in mf1:
623 if fname in mf1:
624 fnode = getfilectx(fname, ctx1).filenode()
624 fnode = getfilectx(fname, ctx1).filenode()
625 # fnode can be None if it's a edited working ctx file
625 # fnode can be None if it's a edited working ctx file
626 if fnode:
626 if fnode:
627 prefetch.append((fname, hex(fnode)))
627 prefetch.append((fname, hex(fnode)))
628 if fname not in removed:
628 if fname not in removed:
629 fnode = getfilectx(fname, ctx2).filenode()
629 fnode = getfilectx(fname, ctx2).filenode()
630 if fnode:
630 if fnode:
631 prefetch.append((fname, hex(fnode)))
631 prefetch.append((fname, hex(fnode)))
632
632
633 repo.fileservice.prefetch(prefetch)
633 repo.fileservice.prefetch(prefetch)
634
634
635 return orig(
635 return orig(
636 repo,
636 repo,
637 revs,
637 revs,
638 ctx1,
638 ctx1,
639 ctx2,
639 ctx2,
640 modified,
640 modified,
641 added,
641 added,
642 removed,
642 removed,
643 copy,
643 copy,
644 getfilectx,
644 getfilectx,
645 *args,
645 *args,
646 **kwargs
646 **kwargs
647 )
647 )
648
648
649
649
650 # Prevent verify from processing files
650 # Prevent verify from processing files
651 # a stub for mercurial.hg.verify()
651 # a stub for mercurial.hg.verify()
652 def _verify(orig, repo, level=None):
652 def _verify(orig, repo, level=None):
653 lock = repo.lock()
653 lock = repo.lock()
654 try:
654 try:
655 return shallowverifier.shallowverifier(repo).verify()
655 return shallowverifier.shallowverifier(repo).verify()
656 finally:
656 finally:
657 lock.release()
657 lock.release()
658
658
659
659
660 clientonetime = False
660 clientonetime = False
661
661
662
662
663 def onetimeclientsetup(ui):
663 def onetimeclientsetup(ui):
664 global clientonetime
664 global clientonetime
665 if clientonetime:
665 if clientonetime:
666 return
666 return
667 clientonetime = True
667 clientonetime = True
668
668
669 # Don't commit filelogs until we know the commit hash, since the hash
669 # Don't commit filelogs until we know the commit hash, since the hash
670 # is present in the filelog blob.
670 # is present in the filelog blob.
671 # This violates Mercurial's filelog->manifest->changelog write order,
671 # This violates Mercurial's filelog->manifest->changelog write order,
672 # but is generally fine for client repos.
672 # but is generally fine for client repos.
673 pendingfilecommits = []
673 pendingfilecommits = []
674
674
675 def addrawrevision(
675 def addrawrevision(
676 orig,
676 orig,
677 self,
677 self,
678 rawtext,
678 rawtext,
679 transaction,
679 transaction,
680 link,
680 link,
681 p1,
681 p1,
682 p2,
682 p2,
683 node,
683 node,
684 flags,
684 flags,
685 cachedelta=None,
685 cachedelta=None,
686 _metatuple=None,
686 _metatuple=None,
687 ):
687 ):
688 if isinstance(link, int):
688 if isinstance(link, int):
689 pendingfilecommits.append(
689 pendingfilecommits.append(
690 (
690 (
691 self,
691 self,
692 rawtext,
692 rawtext,
693 transaction,
693 transaction,
694 link,
694 link,
695 p1,
695 p1,
696 p2,
696 p2,
697 node,
697 node,
698 flags,
698 flags,
699 cachedelta,
699 cachedelta,
700 _metatuple,
700 _metatuple,
701 )
701 )
702 )
702 )
703 return node
703 return node
704 else:
704 else:
705 return orig(
705 return orig(
706 self,
706 self,
707 rawtext,
707 rawtext,
708 transaction,
708 transaction,
709 link,
709 link,
710 p1,
710 p1,
711 p2,
711 p2,
712 node,
712 node,
713 flags,
713 flags,
714 cachedelta,
714 cachedelta,
715 _metatuple=_metatuple,
715 _metatuple=_metatuple,
716 )
716 )
717
717
718 extensions.wrapfunction(
718 extensions.wrapfunction(
719 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
719 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
720 )
720 )
721
721
722 def changelogadd(orig, self, *args, **kwargs):
722 def changelogadd(orig, self, *args, **kwargs):
723 oldlen = len(self)
723 oldlen = len(self)
724 node = orig(self, *args, **kwargs)
724 node = orig(self, *args, **kwargs)
725 newlen = len(self)
725 newlen = len(self)
726 if oldlen != newlen:
726 if oldlen != newlen:
727 for oldargs in pendingfilecommits:
727 for oldargs in pendingfilecommits:
728 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
728 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
729 linknode = self.node(link)
729 linknode = self.node(link)
730 if linknode == node:
730 if linknode == node:
731 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
731 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
732 else:
732 else:
733 raise error.ProgrammingError(
733 raise error.ProgrammingError(
734 b'pending multiple integer revisions are not supported'
734 b'pending multiple integer revisions are not supported'
735 )
735 )
736 else:
736 else:
737 # "link" is actually wrong here (it is set to len(changelog))
737 # "link" is actually wrong here (it is set to len(changelog))
738 # if changelog remains unchanged, skip writing file revisions
738 # if changelog remains unchanged, skip writing file revisions
739 # but still do a sanity check about pending multiple revisions
739 # but still do a sanity check about pending multiple revisions
740 if len({x[3] for x in pendingfilecommits}) > 1:
740 if len({x[3] for x in pendingfilecommits}) > 1:
741 raise error.ProgrammingError(
741 raise error.ProgrammingError(
742 b'pending multiple integer revisions are not supported'
742 b'pending multiple integer revisions are not supported'
743 )
743 )
744 del pendingfilecommits[:]
744 del pendingfilecommits[:]
745 return node
745 return node
746
746
747 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
747 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
748
748
749
749
750 def getrenamedfn(orig, repo, endrev=None):
750 def getrenamedfn(orig, repo, endrev=None):
751 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
751 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
752 return orig(repo, endrev)
752 return orig(repo, endrev)
753
753
754 rcache = {}
754 rcache = {}
755
755
756 def getrenamed(fn, rev):
756 def getrenamed(fn, rev):
757 '''looks up all renames for a file (up to endrev) the first
757 '''looks up all renames for a file (up to endrev) the first
758 time the file is given. It indexes on the changerev and only
758 time the file is given. It indexes on the changerev and only
759 parses the manifest if linkrev != changerev.
759 parses the manifest if linkrev != changerev.
760 Returns rename info for fn at changerev rev.'''
760 Returns rename info for fn at changerev rev.'''
761 if rev in rcache.setdefault(fn, {}):
761 if rev in rcache.setdefault(fn, {}):
762 return rcache[fn][rev]
762 return rcache[fn][rev]
763
763
764 try:
764 try:
765 fctx = repo[rev].filectx(fn)
765 fctx = repo[rev].filectx(fn)
766 for ancestor in fctx.ancestors():
766 for ancestor in fctx.ancestors():
767 if ancestor.path() == fn:
767 if ancestor.path() == fn:
768 renamed = ancestor.renamed()
768 renamed = ancestor.renamed()
769 rcache[fn][ancestor.rev()] = renamed and renamed[0]
769 rcache[fn][ancestor.rev()] = renamed and renamed[0]
770
770
771 renamed = fctx.renamed()
771 renamed = fctx.renamed()
772 return renamed and renamed[0]
772 return renamed and renamed[0]
773 except error.LookupError:
773 except error.LookupError:
774 return None
774 return None
775
775
776 return getrenamed
776 return getrenamed
777
777
778
778
779 def walkfilerevs(orig, repo, match, follow, revs, fncache):
779 def walkfilerevs(orig, repo, match, follow, revs, fncache):
780 if not isenabled(repo):
780 if not isenabled(repo):
781 return orig(repo, match, follow, revs, fncache)
781 return orig(repo, match, follow, revs, fncache)
782
782
783 # remotefilelog's can't be walked in rev order, so throw.
783 # remotefilelog's can't be walked in rev order, so throw.
784 # The caller will see the exception and walk the commit tree instead.
784 # The caller will see the exception and walk the commit tree instead.
785 if not follow:
785 if not follow:
786 raise cmdutil.FileWalkError(b"Cannot walk via filelog")
786 raise cmdutil.FileWalkError(b"Cannot walk via filelog")
787
787
788 wanted = set()
788 wanted = set()
789 minrev, maxrev = min(revs), max(revs)
789 minrev, maxrev = min(revs), max(revs)
790
790
791 pctx = repo[b'.']
791 pctx = repo[b'.']
792 for filename in match.files():
792 for filename in match.files():
793 if filename not in pctx:
793 if filename not in pctx:
794 raise error.Abort(
794 raise error.Abort(
795 _(b'cannot follow file not in parent revision: "%s"') % filename
795 _(b'cannot follow file not in parent revision: "%s"') % filename
796 )
796 )
797 fctx = pctx[filename]
797 fctx = pctx[filename]
798
798
799 linkrev = fctx.linkrev()
799 linkrev = fctx.linkrev()
800 if linkrev >= minrev and linkrev <= maxrev:
800 if linkrev >= minrev and linkrev <= maxrev:
801 fncache.setdefault(linkrev, []).append(filename)
801 fncache.setdefault(linkrev, []).append(filename)
802 wanted.add(linkrev)
802 wanted.add(linkrev)
803
803
804 for ancestor in fctx.ancestors():
804 for ancestor in fctx.ancestors():
805 linkrev = ancestor.linkrev()
805 linkrev = ancestor.linkrev()
806 if linkrev >= minrev and linkrev <= maxrev:
806 if linkrev >= minrev and linkrev <= maxrev:
807 fncache.setdefault(linkrev, []).append(ancestor.path())
807 fncache.setdefault(linkrev, []).append(ancestor.path())
808 wanted.add(linkrev)
808 wanted.add(linkrev)
809
809
810 return wanted
810 return wanted
811
811
812
812
813 def filelogrevset(orig, repo, subset, x):
813 def filelogrevset(orig, repo, subset, x):
814 """``filelog(pattern)``
814 """``filelog(pattern)``
815 Changesets connected to the specified filelog.
815 Changesets connected to the specified filelog.
816
816
817 For performance reasons, ``filelog()`` does not show every changeset
817 For performance reasons, ``filelog()`` does not show every changeset
818 that affects the requested file(s). See :hg:`help log` for details. For
818 that affects the requested file(s). See :hg:`help log` for details. For
819 a slower, more accurate result, use ``file()``.
819 a slower, more accurate result, use ``file()``.
820 """
820 """
821
821
822 if not isenabled(repo):
822 if not isenabled(repo):
823 return orig(repo, subset, x)
823 return orig(repo, subset, x)
824
824
825 # i18n: "filelog" is a keyword
825 # i18n: "filelog" is a keyword
826 pat = revset.getstring(x, _(b"filelog requires a pattern"))
826 pat = revset.getstring(x, _(b"filelog requires a pattern"))
827 m = match.match(
827 m = match.match(
828 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
828 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
829 )
829 )
830 s = set()
830 s = set()
831
831
832 if not match.patkind(pat):
832 if not match.patkind(pat):
833 # slow
833 # slow
834 for r in subset:
834 for r in subset:
835 ctx = repo[r]
835 ctx = repo[r]
836 cfiles = ctx.files()
836 cfiles = ctx.files()
837 for f in m.files():
837 for f in m.files():
838 if f in cfiles:
838 if f in cfiles:
839 s.add(ctx.rev())
839 s.add(ctx.rev())
840 break
840 break
841 else:
841 else:
842 # partial
842 # partial
843 files = (f for f in repo[None] if m(f))
843 files = (f for f in repo[None] if m(f))
844 for f in files:
844 for f in files:
845 fctx = repo[None].filectx(f)
845 fctx = repo[None].filectx(f)
846 s.add(fctx.linkrev())
846 s.add(fctx.linkrev())
847 for actx in fctx.ancestors():
847 for actx in fctx.ancestors():
848 s.add(actx.linkrev())
848 s.add(actx.linkrev())
849
849
850 return smartset.baseset([r for r in subset if r in s])
850 return smartset.baseset([r for r in subset if r in s])
851
851
852
852
853 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
853 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
854 def gc(ui, *args, **opts):
854 def gc(ui, *args, **opts):
855 '''garbage collect the client and server filelog caches
855 '''garbage collect the client and server filelog caches
856 '''
856 '''
857 cachepaths = set()
857 cachepaths = set()
858
858
859 # get the system client cache
859 # get the system client cache
860 systemcache = shallowutil.getcachepath(ui, allowempty=True)
860 systemcache = shallowutil.getcachepath(ui, allowempty=True)
861 if systemcache:
861 if systemcache:
862 cachepaths.add(systemcache)
862 cachepaths.add(systemcache)
863
863
864 # get repo client and server cache
864 # get repo client and server cache
865 repopaths = []
865 repopaths = []
866 pwd = ui.environ.get(b'PWD')
866 pwd = ui.environ.get(b'PWD')
867 if pwd:
867 if pwd:
868 repopaths.append(pwd)
868 repopaths.append(pwd)
869
869
870 repopaths.extend(args)
870 repopaths.extend(args)
871 repos = []
871 repos = []
872 for repopath in repopaths:
872 for repopath in repopaths:
873 try:
873 try:
874 repo = hg.peer(ui, {}, repopath)
874 repo = hg.peer(ui, {}, repopath)
875 repos.append(repo)
875 repos.append(repo)
876
876
877 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
877 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
878 if repocache:
878 if repocache:
879 cachepaths.add(repocache)
879 cachepaths.add(repocache)
880 except error.RepoError:
880 except error.RepoError:
881 pass
881 pass
882
882
883 # gc client cache
883 # gc client cache
884 for cachepath in cachepaths:
884 for cachepath in cachepaths:
885 gcclient(ui, cachepath)
885 gcclient(ui, cachepath)
886
886
887 # gc server cache
887 # gc server cache
888 for repo in repos:
888 for repo in repos:
889 remotefilelogserver.gcserver(ui, repo._repo)
889 remotefilelogserver.gcserver(ui, repo._repo)
890
890
891
891
892 def gcclient(ui, cachepath):
892 def gcclient(ui, cachepath):
893 # get list of repos that use this cache
893 # get list of repos that use this cache
894 repospath = os.path.join(cachepath, b'repos')
894 repospath = os.path.join(cachepath, b'repos')
895 if not os.path.exists(repospath):
895 if not os.path.exists(repospath):
896 ui.warn(_(b"no known cache at %s\n") % cachepath)
896 ui.warn(_(b"no known cache at %s\n") % cachepath)
897 return
897 return
898
898
899 reposfile = open(repospath, b'rb')
899 reposfile = open(repospath, b'rb')
900 repos = {r[:-1] for r in reposfile.readlines()}
900 repos = {r[:-1] for r in reposfile.readlines()}
901 reposfile.close()
901 reposfile.close()
902
902
903 # build list of useful files
903 # build list of useful files
904 validrepos = []
904 validrepos = []
905 keepkeys = set()
905 keepkeys = set()
906
906
907 sharedcache = None
907 sharedcache = None
908 filesrepacked = False
908 filesrepacked = False
909
909
910 count = 0
910 count = 0
911 progress = ui.makeprogress(
911 progress = ui.makeprogress(
912 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
912 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
913 )
913 )
914 for path in repos:
914 for path in repos:
915 progress.update(count)
915 progress.update(count)
916 count += 1
916 count += 1
917 try:
917 try:
918 path = ui.expandpath(os.path.normpath(path))
918 path = ui.expandpath(os.path.normpath(path))
919 except TypeError as e:
919 except TypeError as e:
920 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
920 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
921 traceback.print_exc()
921 traceback.print_exc()
922 continue
922 continue
923 try:
923 try:
924 peer = hg.peer(ui, {}, path)
924 peer = hg.peer(ui, {}, path)
925 repo = peer._repo
925 repo = peer._repo
926 except error.RepoError:
926 except error.RepoError:
927 continue
927 continue
928
928
929 validrepos.append(path)
929 validrepos.append(path)
930
930
931 # Protect against any repo or config changes that have happened since
931 # Protect against any repo or config changes that have happened since
932 # this repo was added to the repos file. We'd rather this loop succeed
932 # this repo was added to the repos file. We'd rather this loop succeed
933 # and too much be deleted, than the loop fail and nothing gets deleted.
933 # and too much be deleted, than the loop fail and nothing gets deleted.
934 if not isenabled(repo):
934 if not isenabled(repo):
935 continue
935 continue
936
936
937 if not util.safehasattr(repo, b'name'):
937 if not util.safehasattr(repo, b'name'):
938 ui.warn(
938 ui.warn(
939 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
939 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
940 )
940 )
941 continue
941 continue
942
942
943 # If garbage collection on repack and repack on hg gc are enabled
943 # If garbage collection on repack and repack on hg gc are enabled
944 # then loose files are repacked and garbage collected.
944 # then loose files are repacked and garbage collected.
945 # Otherwise regular garbage collection is performed.
945 # Otherwise regular garbage collection is performed.
946 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
946 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
947 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
947 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
948 if repackonhggc and gcrepack:
948 if repackonhggc and gcrepack:
949 try:
949 try:
950 repackmod.incrementalrepack(repo)
950 repackmod.incrementalrepack(repo)
951 filesrepacked = True
951 filesrepacked = True
952 continue
952 continue
953 except (IOError, repackmod.RepackAlreadyRunning):
953 except (IOError, repackmod.RepackAlreadyRunning):
954 # If repack cannot be performed due to not enough disk space
954 # If repack cannot be performed due to not enough disk space
955 # continue doing garbage collection of loose files w/o repack
955 # continue doing garbage collection of loose files w/o repack
956 pass
956 pass
957
957
958 reponame = repo.name
958 reponame = repo.name
959 if not sharedcache:
959 if not sharedcache:
960 sharedcache = repo.sharedstore
960 sharedcache = repo.sharedstore
961
961
962 # Compute a keepset which is not garbage collected
962 # Compute a keepset which is not garbage collected
963 def keyfn(fname, fnode):
963 def keyfn(fname, fnode):
964 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
964 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
965
965
966 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
966 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
967
967
968 progress.complete()
968 progress.complete()
969
969
970 # write list of valid repos back
970 # write list of valid repos back
971 oldumask = os.umask(0o002)
971 oldumask = os.umask(0o002)
972 try:
972 try:
973 reposfile = open(repospath, b'wb')
973 reposfile = open(repospath, b'wb')
974 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
974 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
975 reposfile.close()
975 reposfile.close()
976 finally:
976 finally:
977 os.umask(oldumask)
977 os.umask(oldumask)
978
978
979 # prune cache
979 # prune cache
980 if sharedcache is not None:
980 if sharedcache is not None:
981 sharedcache.gc(keepkeys)
981 sharedcache.gc(keepkeys)
982 elif not filesrepacked:
982 elif not filesrepacked:
983 ui.warn(_(b"warning: no valid repos in repofile\n"))
983 ui.warn(_(b"warning: no valid repos in repofile\n"))
984
984
985
985
986 def log(orig, ui, repo, *pats, **opts):
986 def log(orig, ui, repo, *pats, **opts):
987 if not isenabled(repo):
987 if not isenabled(repo):
988 return orig(ui, repo, *pats, **opts)
988 return orig(ui, repo, *pats, **opts)
989
989
990 follow = opts.get('follow')
990 follow = opts.get('follow')
991 revs = opts.get('rev')
991 revs = opts.get('rev')
992 if pats:
992 if pats:
993 # Force slowpath for non-follow patterns and follows that start from
993 # Force slowpath for non-follow patterns and follows that start from
994 # non-working-copy-parent revs.
994 # non-working-copy-parent revs.
995 if not follow or revs:
995 if not follow or revs:
996 # This forces the slowpath
996 # This forces the slowpath
997 opts['removed'] = True
997 opts['removed'] = True
998
998
999 # If this is a non-follow log without any revs specified, recommend that
999 # If this is a non-follow log without any revs specified, recommend that
1000 # the user add -f to speed it up.
1000 # the user add -f to speed it up.
1001 if not follow and not revs:
1001 if not follow and not revs:
1002 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
1002 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
1003 isfile = not match.anypats()
1003 isfile = not match.anypats()
1004 if isfile:
1004 if isfile:
1005 for file in match.files():
1005 for file in match.files():
1006 if not os.path.isfile(repo.wjoin(file)):
1006 if not os.path.isfile(repo.wjoin(file)):
1007 isfile = False
1007 isfile = False
1008 break
1008 break
1009
1009
1010 if isfile:
1010 if isfile:
1011 ui.warn(
1011 ui.warn(
1012 _(
1012 _(
1013 b"warning: file log can be slow on large repos - "
1013 b"warning: file log can be slow on large repos - "
1014 + b"use -f to speed it up\n"
1014 + b"use -f to speed it up\n"
1015 )
1015 )
1016 )
1016 )
1017
1017
1018 return orig(ui, repo, *pats, **opts)
1018 return orig(ui, repo, *pats, **opts)
1019
1019
1020
1020
1021 def revdatelimit(ui, revset):
1021 def revdatelimit(ui, revset):
1022 """Update revset so that only changesets no older than 'prefetchdays' days
1022 """Update revset so that only changesets no older than 'prefetchdays' days
1023 are included. The default value is set to 14 days. If 'prefetchdays' is set
1023 are included. The default value is set to 14 days. If 'prefetchdays' is set
1024 to zero or negative value then date restriction is not applied.
1024 to zero or negative value then date restriction is not applied.
1025 """
1025 """
1026 days = ui.configint(b'remotefilelog', b'prefetchdays')
1026 days = ui.configint(b'remotefilelog', b'prefetchdays')
1027 if days > 0:
1027 if days > 0:
1028 revset = b'(%s) & date(-%s)' % (revset, days)
1028 revset = b'(%s) & date(-%s)' % (revset, days)
1029 return revset
1029 return revset
1030
1030
1031
1031
1032 def readytofetch(repo):
1032 def readytofetch(repo):
1033 """Check that enough time has passed since the last background prefetch.
1033 """Check that enough time has passed since the last background prefetch.
1034 This only relates to prefetches after operations that change the working
1034 This only relates to prefetches after operations that change the working
1035 copy parent. Default delay between background prefetches is 2 minutes.
1035 copy parent. Default delay between background prefetches is 2 minutes.
1036 """
1036 """
1037 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1037 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1038 fname = repo.vfs.join(b'lastprefetch')
1038 fname = repo.vfs.join(b'lastprefetch')
1039
1039
1040 ready = False
1040 ready = False
1041 with open(fname, b'a'):
1041 with open(fname, b'a'):
1042 # the with construct above is used to avoid race conditions
1042 # the with construct above is used to avoid race conditions
1043 modtime = os.path.getmtime(fname)
1043 modtime = os.path.getmtime(fname)
1044 if (time.time() - modtime) > timeout:
1044 if (time.time() - modtime) > timeout:
1045 os.utime(fname, None)
1045 os.utime(fname, None)
1046 ready = True
1046 ready = True
1047
1047
1048 return ready
1048 return ready
1049
1049
1050
1050
1051 def wcpprefetch(ui, repo, **kwargs):
1051 def wcpprefetch(ui, repo, **kwargs):
1052 """Prefetches in background revisions specified by bgprefetchrevs revset.
1052 """Prefetches in background revisions specified by bgprefetchrevs revset.
1053 Does background repack if backgroundrepack flag is set in config.
1053 Does background repack if backgroundrepack flag is set in config.
1054 """
1054 """
1055 shallow = isenabled(repo)
1055 shallow = isenabled(repo)
1056 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1056 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1057 isready = readytofetch(repo)
1057 isready = readytofetch(repo)
1058
1058
1059 if not (shallow and bgprefetchrevs and isready):
1059 if not (shallow and bgprefetchrevs and isready):
1060 return
1060 return
1061
1061
1062 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1062 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1063 # update a revset with a date limit
1063 # update a revset with a date limit
1064 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1064 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1065
1065
1066 def anon(unused_success):
1066 def anon(unused_success):
1067 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1067 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1068 return
1068 return
1069 repo.ranprefetch = True
1069 repo.ranprefetch = True
1070 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1070 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1071
1071
1072 repo._afterlock(anon)
1072 repo._afterlock(anon)
1073
1073
1074
1074
1075 def pull(orig, ui, repo, *pats, **opts):
1075 def pull(orig, ui, repo, *pats, **opts):
1076 result = orig(ui, repo, *pats, **opts)
1076 result = orig(ui, repo, *pats, **opts)
1077
1077
1078 if isenabled(repo):
1078 if isenabled(repo):
1079 # prefetch if it's configured
1079 # prefetch if it's configured
1080 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1080 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1081 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1081 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1082 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1082 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1083
1083
1084 if prefetchrevset:
1084 if prefetchrevset:
1085 ui.status(_(b"prefetching file contents\n"))
1085 ui.status(_(b"prefetching file contents\n"))
1086 revs = scmutil.revrange(repo, [prefetchrevset])
1086 revs = scmutil.revrange(repo, [prefetchrevset])
1087 base = repo[b'.'].rev()
1087 base = repo[b'.'].rev()
1088 if bgprefetch:
1088 if bgprefetch:
1089 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1089 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1090 else:
1090 else:
1091 repo.prefetch(revs, base=base)
1091 repo.prefetch(revs, base=base)
1092 if bgrepack:
1092 if bgrepack:
1093 repackmod.backgroundrepack(repo, incremental=True)
1093 repackmod.backgroundrepack(repo, incremental=True)
1094 elif bgrepack:
1094 elif bgrepack:
1095 repackmod.backgroundrepack(repo, incremental=True)
1095 repackmod.backgroundrepack(repo, incremental=True)
1096
1096
1097 return result
1097 return result
1098
1098
1099
1099
1100 def exchangepull(orig, repo, remote, *args, **kwargs):
1100 def exchangepull(orig, repo, remote, *args, **kwargs):
1101 # Hook into the callstream/getbundle to insert bundle capabilities
1101 # Hook into the callstream/getbundle to insert bundle capabilities
1102 # during a pull.
1102 # during a pull.
1103 def localgetbundle(
1103 def localgetbundle(
1104 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1104 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1105 ):
1105 ):
1106 if not bundlecaps:
1106 if not bundlecaps:
1107 bundlecaps = set()
1107 bundlecaps = set()
1108 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1108 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1109 return orig(
1109 return orig(
1110 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1110 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1111 )
1111 )
1112
1112
1113 if util.safehasattr(remote, b'_callstream'):
1113 if util.safehasattr(remote, b'_callstream'):
1114 remote._localrepo = repo
1114 remote._localrepo = repo
1115 elif util.safehasattr(remote, b'getbundle'):
1115 elif util.safehasattr(remote, b'getbundle'):
1116 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1116 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1117
1117
1118 return orig(repo, remote, *args, **kwargs)
1118 return orig(repo, remote, *args, **kwargs)
1119
1119
1120
1120
1121 def _fileprefetchhook(repo, revs, match):
1121 def _fileprefetchhook(repo, revmatches):
1122 if isenabled(repo):
1122 if isenabled(repo):
1123 allfiles = []
1123 allfiles = []
1124 for rev in revs:
1124 for rev, match in revmatches:
1125 if rev == nodemod.wdirrev or rev is None:
1125 if rev == nodemod.wdirrev or rev is None:
1126 continue
1126 continue
1127 ctx = repo[rev]
1127 ctx = repo[rev]
1128 mf = ctx.manifest()
1128 mf = ctx.manifest()
1129 sparsematch = repo.maybesparsematch(ctx.rev())
1129 sparsematch = repo.maybesparsematch(ctx.rev())
1130 for path in ctx.walk(match):
1130 for path in ctx.walk(match):
1131 if (not sparsematch or sparsematch(path)) and path in mf:
1131 if (not sparsematch or sparsematch(path)) and path in mf:
1132 allfiles.append((path, hex(mf[path])))
1132 allfiles.append((path, hex(mf[path])))
1133 repo.fileservice.prefetch(allfiles)
1133 repo.fileservice.prefetch(allfiles)
1134
1134
1135
1135
1136 @command(
1136 @command(
1137 b'debugremotefilelog',
1137 b'debugremotefilelog',
1138 [(b'd', b'decompress', None, _(b'decompress the filelog first')),],
1138 [(b'd', b'decompress', None, _(b'decompress the filelog first')),],
1139 _(b'hg debugremotefilelog <path>'),
1139 _(b'hg debugremotefilelog <path>'),
1140 norepo=True,
1140 norepo=True,
1141 )
1141 )
1142 def debugremotefilelog(ui, path, **opts):
1142 def debugremotefilelog(ui, path, **opts):
1143 return debugcommands.debugremotefilelog(ui, path, **opts)
1143 return debugcommands.debugremotefilelog(ui, path, **opts)
1144
1144
1145
1145
1146 @command(
1146 @command(
1147 b'verifyremotefilelog',
1147 b'verifyremotefilelog',
1148 [(b'd', b'decompress', None, _(b'decompress the filelogs first')),],
1148 [(b'd', b'decompress', None, _(b'decompress the filelogs first')),],
1149 _(b'hg verifyremotefilelogs <directory>'),
1149 _(b'hg verifyremotefilelogs <directory>'),
1150 norepo=True,
1150 norepo=True,
1151 )
1151 )
1152 def verifyremotefilelog(ui, path, **opts):
1152 def verifyremotefilelog(ui, path, **opts):
1153 return debugcommands.verifyremotefilelog(ui, path, **opts)
1153 return debugcommands.verifyremotefilelog(ui, path, **opts)
1154
1154
1155
1155
1156 @command(
1156 @command(
1157 b'debugdatapack',
1157 b'debugdatapack',
1158 [
1158 [
1159 (b'', b'long', None, _(b'print the long hashes')),
1159 (b'', b'long', None, _(b'print the long hashes')),
1160 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1160 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1161 ],
1161 ],
1162 _(b'hg debugdatapack <paths>'),
1162 _(b'hg debugdatapack <paths>'),
1163 norepo=True,
1163 norepo=True,
1164 )
1164 )
1165 def debugdatapack(ui, *paths, **opts):
1165 def debugdatapack(ui, *paths, **opts):
1166 return debugcommands.debugdatapack(ui, *paths, **opts)
1166 return debugcommands.debugdatapack(ui, *paths, **opts)
1167
1167
1168
1168
1169 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1169 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1170 def debughistorypack(ui, path, **opts):
1170 def debughistorypack(ui, path, **opts):
1171 return debugcommands.debughistorypack(ui, path)
1171 return debugcommands.debughistorypack(ui, path)
1172
1172
1173
1173
1174 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1174 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1175 def debugkeepset(ui, repo, **opts):
1175 def debugkeepset(ui, repo, **opts):
1176 # The command is used to measure keepset computation time
1176 # The command is used to measure keepset computation time
1177 def keyfn(fname, fnode):
1177 def keyfn(fname, fnode):
1178 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1178 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1179
1179
1180 repackmod.keepset(repo, keyfn)
1180 repackmod.keepset(repo, keyfn)
1181 return
1181 return
1182
1182
1183
1183
1184 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1184 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1185 def debugwaitonrepack(ui, repo, **opts):
1185 def debugwaitonrepack(ui, repo, **opts):
1186 return debugcommands.debugwaitonrepack(repo)
1186 return debugcommands.debugwaitonrepack(repo)
1187
1187
1188
1188
1189 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1189 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1190 def debugwaitonprefetch(ui, repo, **opts):
1190 def debugwaitonprefetch(ui, repo, **opts):
1191 return debugcommands.debugwaitonprefetch(repo)
1191 return debugcommands.debugwaitonprefetch(repo)
1192
1192
1193
1193
1194 def resolveprefetchopts(ui, opts):
1194 def resolveprefetchopts(ui, opts):
1195 if not opts.get(b'rev'):
1195 if not opts.get(b'rev'):
1196 revset = [b'.', b'draft()']
1196 revset = [b'.', b'draft()']
1197
1197
1198 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1198 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1199 if prefetchrevset:
1199 if prefetchrevset:
1200 revset.append(b'(%s)' % prefetchrevset)
1200 revset.append(b'(%s)' % prefetchrevset)
1201 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1201 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1202 if bgprefetchrevs:
1202 if bgprefetchrevs:
1203 revset.append(b'(%s)' % bgprefetchrevs)
1203 revset.append(b'(%s)' % bgprefetchrevs)
1204 revset = b'+'.join(revset)
1204 revset = b'+'.join(revset)
1205
1205
1206 # update a revset with a date limit
1206 # update a revset with a date limit
1207 revset = revdatelimit(ui, revset)
1207 revset = revdatelimit(ui, revset)
1208
1208
1209 opts[b'rev'] = [revset]
1209 opts[b'rev'] = [revset]
1210
1210
1211 if not opts.get(b'base'):
1211 if not opts.get(b'base'):
1212 opts[b'base'] = None
1212 opts[b'base'] = None
1213
1213
1214 return opts
1214 return opts
1215
1215
1216
1216
1217 @command(
1217 @command(
1218 b'prefetch',
1218 b'prefetch',
1219 [
1219 [
1220 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1220 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1221 (b'', b'repack', False, _(b'run repack after prefetch')),
1221 (b'', b'repack', False, _(b'run repack after prefetch')),
1222 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1222 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1223 ]
1223 ]
1224 + commands.walkopts,
1224 + commands.walkopts,
1225 _(b'hg prefetch [OPTIONS] [FILE...]'),
1225 _(b'hg prefetch [OPTIONS] [FILE...]'),
1226 helpcategory=command.CATEGORY_MAINTENANCE,
1226 helpcategory=command.CATEGORY_MAINTENANCE,
1227 )
1227 )
1228 def prefetch(ui, repo, *pats, **opts):
1228 def prefetch(ui, repo, *pats, **opts):
1229 """prefetch file revisions from the server
1229 """prefetch file revisions from the server
1230
1230
1231 Prefetchs file revisions for the specified revs and stores them in the
1231 Prefetchs file revisions for the specified revs and stores them in the
1232 local remotefilelog cache. If no rev is specified, the default rev is
1232 local remotefilelog cache. If no rev is specified, the default rev is
1233 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1233 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1234 File names or patterns can be used to limit which files are downloaded.
1234 File names or patterns can be used to limit which files are downloaded.
1235
1235
1236 Return 0 on success.
1236 Return 0 on success.
1237 """
1237 """
1238 opts = pycompat.byteskwargs(opts)
1238 opts = pycompat.byteskwargs(opts)
1239 if not isenabled(repo):
1239 if not isenabled(repo):
1240 raise error.Abort(_(b"repo is not shallow"))
1240 raise error.Abort(_(b"repo is not shallow"))
1241
1241
1242 opts = resolveprefetchopts(ui, opts)
1242 opts = resolveprefetchopts(ui, opts)
1243 revs = scmutil.revrange(repo, opts.get(b'rev'))
1243 revs = scmutil.revrange(repo, opts.get(b'rev'))
1244 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1244 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1245
1245
1246 # Run repack in background
1246 # Run repack in background
1247 if opts.get(b'repack'):
1247 if opts.get(b'repack'):
1248 repackmod.backgroundrepack(repo, incremental=True)
1248 repackmod.backgroundrepack(repo, incremental=True)
1249
1249
1250
1250
1251 @command(
1251 @command(
1252 b'repack',
1252 b'repack',
1253 [
1253 [
1254 (b'', b'background', None, _(b'run in a background process'), None),
1254 (b'', b'background', None, _(b'run in a background process'), None),
1255 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1255 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1256 (
1256 (
1257 b'',
1257 b'',
1258 b'packsonly',
1258 b'packsonly',
1259 None,
1259 None,
1260 _(b'only repack packs (skip loose objects)'),
1260 _(b'only repack packs (skip loose objects)'),
1261 None,
1261 None,
1262 ),
1262 ),
1263 ],
1263 ],
1264 _(b'hg repack [OPTIONS]'),
1264 _(b'hg repack [OPTIONS]'),
1265 )
1265 )
1266 def repack_(ui, repo, *pats, **opts):
1266 def repack_(ui, repo, *pats, **opts):
1267 if opts.get('background'):
1267 if opts.get('background'):
1268 repackmod.backgroundrepack(
1268 repackmod.backgroundrepack(
1269 repo,
1269 repo,
1270 incremental=opts.get('incremental'),
1270 incremental=opts.get('incremental'),
1271 packsonly=opts.get('packsonly', False),
1271 packsonly=opts.get('packsonly', False),
1272 )
1272 )
1273 return
1273 return
1274
1274
1275 options = {b'packsonly': opts.get('packsonly')}
1275 options = {b'packsonly': opts.get('packsonly')}
1276
1276
1277 try:
1277 try:
1278 if opts.get('incremental'):
1278 if opts.get('incremental'):
1279 repackmod.incrementalrepack(repo, options=options)
1279 repackmod.incrementalrepack(repo, options=options)
1280 else:
1280 else:
1281 repackmod.fullrepack(repo, options=options)
1281 repackmod.fullrepack(repo, options=options)
1282 except repackmod.RepackAlreadyRunning as ex:
1282 except repackmod.RepackAlreadyRunning as ex:
1283 # Don't propogate the exception if the repack is already in
1283 # Don't propogate the exception if the repack is already in
1284 # progress, since we want the command to exit 0.
1284 # progress, since we want the command to exit 0.
1285 repo.ui.warn(b'%s\n' % ex)
1285 repo.ui.warn(b'%s\n' % ex)
@@ -1,390 +1,390 b''
1 # archival.py - revision archival for mercurial
1 # archival.py - revision archival for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import gzip
10 import gzip
11 import os
11 import os
12 import struct
12 import struct
13 import tarfile
13 import tarfile
14 import time
14 import time
15 import zipfile
15 import zipfile
16 import zlib
16 import zlib
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import nullrev
19 from .node import nullrev
20 from .pycompat import open
20 from .pycompat import open
21
21
22 from . import (
22 from . import (
23 error,
23 error,
24 formatter,
24 formatter,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 scmutil,
27 scmutil,
28 util,
28 util,
29 vfs as vfsmod,
29 vfs as vfsmod,
30 )
30 )
31
31
32 stringio = util.stringio
32 stringio = util.stringio
33
33
34 # from unzip source code:
34 # from unzip source code:
35 _UNX_IFREG = 0x8000
35 _UNX_IFREG = 0x8000
36 _UNX_IFLNK = 0xA000
36 _UNX_IFLNK = 0xA000
37
37
38
38
39 def tidyprefix(dest, kind, prefix):
39 def tidyprefix(dest, kind, prefix):
40 '''choose prefix to use for names in archive. make sure prefix is
40 '''choose prefix to use for names in archive. make sure prefix is
41 safe for consumers.'''
41 safe for consumers.'''
42
42
43 if prefix:
43 if prefix:
44 prefix = util.normpath(prefix)
44 prefix = util.normpath(prefix)
45 else:
45 else:
46 if not isinstance(dest, bytes):
46 if not isinstance(dest, bytes):
47 raise ValueError(b'dest must be string if no prefix')
47 raise ValueError(b'dest must be string if no prefix')
48 prefix = os.path.basename(dest)
48 prefix = os.path.basename(dest)
49 lower = prefix.lower()
49 lower = prefix.lower()
50 for sfx in exts.get(kind, []):
50 for sfx in exts.get(kind, []):
51 if lower.endswith(sfx):
51 if lower.endswith(sfx):
52 prefix = prefix[: -len(sfx)]
52 prefix = prefix[: -len(sfx)]
53 break
53 break
54 lpfx = os.path.normpath(util.localpath(prefix))
54 lpfx = os.path.normpath(util.localpath(prefix))
55 prefix = util.pconvert(lpfx)
55 prefix = util.pconvert(lpfx)
56 if not prefix.endswith(b'/'):
56 if not prefix.endswith(b'/'):
57 prefix += b'/'
57 prefix += b'/'
58 # Drop the leading '.' path component if present, so Windows can read the
58 # Drop the leading '.' path component if present, so Windows can read the
59 # zip files (issue4634)
59 # zip files (issue4634)
60 if prefix.startswith(b'./'):
60 if prefix.startswith(b'./'):
61 prefix = prefix[2:]
61 prefix = prefix[2:]
62 if prefix.startswith(b'../') or os.path.isabs(lpfx) or b'/../' in prefix:
62 if prefix.startswith(b'../') or os.path.isabs(lpfx) or b'/../' in prefix:
63 raise error.Abort(_(b'archive prefix contains illegal components'))
63 raise error.Abort(_(b'archive prefix contains illegal components'))
64 return prefix
64 return prefix
65
65
66
66
67 exts = {
67 exts = {
68 b'tar': [b'.tar'],
68 b'tar': [b'.tar'],
69 b'tbz2': [b'.tbz2', b'.tar.bz2'],
69 b'tbz2': [b'.tbz2', b'.tar.bz2'],
70 b'tgz': [b'.tgz', b'.tar.gz'],
70 b'tgz': [b'.tgz', b'.tar.gz'],
71 b'zip': [b'.zip'],
71 b'zip': [b'.zip'],
72 b'txz': [b'.txz', b'.tar.xz'],
72 b'txz': [b'.txz', b'.tar.xz'],
73 }
73 }
74
74
75
75
76 def guesskind(dest):
76 def guesskind(dest):
77 for kind, extensions in pycompat.iteritems(exts):
77 for kind, extensions in pycompat.iteritems(exts):
78 if any(dest.endswith(ext) for ext in extensions):
78 if any(dest.endswith(ext) for ext in extensions):
79 return kind
79 return kind
80 return None
80 return None
81
81
82
82
83 def _rootctx(repo):
83 def _rootctx(repo):
84 # repo[0] may be hidden
84 # repo[0] may be hidden
85 for rev in repo:
85 for rev in repo:
86 return repo[rev]
86 return repo[rev]
87 return repo[nullrev]
87 return repo[nullrev]
88
88
89
89
90 # {tags} on ctx includes local tags and 'tip', with no current way to limit
90 # {tags} on ctx includes local tags and 'tip', with no current way to limit
91 # that to global tags. Therefore, use {latesttag} as a substitute when
91 # that to global tags. Therefore, use {latesttag} as a substitute when
92 # the distance is 0, since that will be the list of global tags on ctx.
92 # the distance is 0, since that will be the list of global tags on ctx.
93 _defaultmetatemplate = br'''
93 _defaultmetatemplate = br'''
94 repo: {root}
94 repo: {root}
95 node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
95 node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
96 branch: {branch|utf8}
96 branch: {branch|utf8}
97 {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
97 {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
98 separate("\n",
98 separate("\n",
99 join(latesttag % "latesttag: {tag}", "\n"),
99 join(latesttag % "latesttag: {tag}", "\n"),
100 "latesttagdistance: {latesttagdistance}",
100 "latesttagdistance: {latesttagdistance}",
101 "changessincelatesttag: {changessincelatesttag}"))}
101 "changessincelatesttag: {changessincelatesttag}"))}
102 '''[
102 '''[
103 1:
103 1:
104 ] # drop leading '\n'
104 ] # drop leading '\n'
105
105
106
106
107 def buildmetadata(ctx):
107 def buildmetadata(ctx):
108 '''build content of .hg_archival.txt'''
108 '''build content of .hg_archival.txt'''
109 repo = ctx.repo()
109 repo = ctx.repo()
110
110
111 opts = {
111 opts = {
112 b'template': repo.ui.config(
112 b'template': repo.ui.config(
113 b'experimental', b'archivemetatemplate', _defaultmetatemplate
113 b'experimental', b'archivemetatemplate', _defaultmetatemplate
114 )
114 )
115 }
115 }
116
116
117 out = util.stringio()
117 out = util.stringio()
118
118
119 fm = formatter.formatter(repo.ui, out, b'archive', opts)
119 fm = formatter.formatter(repo.ui, out, b'archive', opts)
120 fm.startitem()
120 fm.startitem()
121 fm.context(ctx=ctx)
121 fm.context(ctx=ctx)
122 fm.data(root=_rootctx(repo).hex())
122 fm.data(root=_rootctx(repo).hex())
123
123
124 if ctx.rev() is None:
124 if ctx.rev() is None:
125 dirty = b''
125 dirty = b''
126 if ctx.dirty(missing=True):
126 if ctx.dirty(missing=True):
127 dirty = b'+'
127 dirty = b'+'
128 fm.data(dirty=dirty)
128 fm.data(dirty=dirty)
129 fm.end()
129 fm.end()
130
130
131 return out.getvalue()
131 return out.getvalue()
132
132
133
133
134 class tarit(object):
134 class tarit(object):
135 '''write archive to tar file or stream. can write uncompressed,
135 '''write archive to tar file or stream. can write uncompressed,
136 or compress with gzip or bzip2.'''
136 or compress with gzip or bzip2.'''
137
137
138 if pycompat.ispy3:
138 if pycompat.ispy3:
139 GzipFileWithTime = gzip.GzipFile # camelcase-required
139 GzipFileWithTime = gzip.GzipFile # camelcase-required
140 else:
140 else:
141
141
142 class GzipFileWithTime(gzip.GzipFile):
142 class GzipFileWithTime(gzip.GzipFile):
143 def __init__(self, *args, **kw):
143 def __init__(self, *args, **kw):
144 timestamp = None
144 timestamp = None
145 if 'mtime' in kw:
145 if 'mtime' in kw:
146 timestamp = kw.pop('mtime')
146 timestamp = kw.pop('mtime')
147 if timestamp is None:
147 if timestamp is None:
148 self.timestamp = time.time()
148 self.timestamp = time.time()
149 else:
149 else:
150 self.timestamp = timestamp
150 self.timestamp = timestamp
151 gzip.GzipFile.__init__(self, *args, **kw)
151 gzip.GzipFile.__init__(self, *args, **kw)
152
152
153 def _write_gzip_header(self):
153 def _write_gzip_header(self):
154 self.fileobj.write(b'\037\213') # magic header
154 self.fileobj.write(b'\037\213') # magic header
155 self.fileobj.write(b'\010') # compression method
155 self.fileobj.write(b'\010') # compression method
156 fname = self.name
156 fname = self.name
157 if fname and fname.endswith(b'.gz'):
157 if fname and fname.endswith(b'.gz'):
158 fname = fname[:-3]
158 fname = fname[:-3]
159 flags = 0
159 flags = 0
160 if fname:
160 if fname:
161 flags = gzip.FNAME # pytype: disable=module-attr
161 flags = gzip.FNAME # pytype: disable=module-attr
162 self.fileobj.write(pycompat.bytechr(flags))
162 self.fileobj.write(pycompat.bytechr(flags))
163 gzip.write32u( # pytype: disable=module-attr
163 gzip.write32u( # pytype: disable=module-attr
164 self.fileobj, int(self.timestamp)
164 self.fileobj, int(self.timestamp)
165 )
165 )
166 self.fileobj.write(b'\002')
166 self.fileobj.write(b'\002')
167 self.fileobj.write(b'\377')
167 self.fileobj.write(b'\377')
168 if fname:
168 if fname:
169 self.fileobj.write(fname + b'\000')
169 self.fileobj.write(fname + b'\000')
170
170
171 def __init__(self, dest, mtime, kind=b''):
171 def __init__(self, dest, mtime, kind=b''):
172 self.mtime = mtime
172 self.mtime = mtime
173 self.fileobj = None
173 self.fileobj = None
174
174
175 def taropen(mode, name=b'', fileobj=None):
175 def taropen(mode, name=b'', fileobj=None):
176 if kind == b'gz':
176 if kind == b'gz':
177 mode = mode[0:1]
177 mode = mode[0:1]
178 if not fileobj:
178 if not fileobj:
179 fileobj = open(name, mode + b'b')
179 fileobj = open(name, mode + b'b')
180 gzfileobj = self.GzipFileWithTime(
180 gzfileobj = self.GzipFileWithTime(
181 name,
181 name,
182 pycompat.sysstr(mode + b'b'),
182 pycompat.sysstr(mode + b'b'),
183 zlib.Z_BEST_COMPRESSION,
183 zlib.Z_BEST_COMPRESSION,
184 fileobj,
184 fileobj,
185 mtime=mtime,
185 mtime=mtime,
186 )
186 )
187 self.fileobj = gzfileobj
187 self.fileobj = gzfileobj
188 return tarfile.TarFile.taropen( # pytype: disable=attribute-error
188 return tarfile.TarFile.taropen( # pytype: disable=attribute-error
189 name, pycompat.sysstr(mode), gzfileobj
189 name, pycompat.sysstr(mode), gzfileobj
190 )
190 )
191 else:
191 else:
192 return tarfile.open(name, pycompat.sysstr(mode + kind), fileobj)
192 return tarfile.open(name, pycompat.sysstr(mode + kind), fileobj)
193
193
194 if isinstance(dest, bytes):
194 if isinstance(dest, bytes):
195 self.z = taropen(b'w:', name=dest)
195 self.z = taropen(b'w:', name=dest)
196 else:
196 else:
197 self.z = taropen(b'w|', fileobj=dest)
197 self.z = taropen(b'w|', fileobj=dest)
198
198
199 def addfile(self, name, mode, islink, data):
199 def addfile(self, name, mode, islink, data):
200 name = pycompat.fsdecode(name)
200 name = pycompat.fsdecode(name)
201 i = tarfile.TarInfo(name)
201 i = tarfile.TarInfo(name)
202 i.mtime = self.mtime
202 i.mtime = self.mtime
203 i.size = len(data)
203 i.size = len(data)
204 if islink:
204 if islink:
205 i.type = tarfile.SYMTYPE
205 i.type = tarfile.SYMTYPE
206 i.mode = 0o777
206 i.mode = 0o777
207 i.linkname = pycompat.fsdecode(data)
207 i.linkname = pycompat.fsdecode(data)
208 data = None
208 data = None
209 i.size = 0
209 i.size = 0
210 else:
210 else:
211 i.mode = mode
211 i.mode = mode
212 data = stringio(data)
212 data = stringio(data)
213 self.z.addfile(i, data)
213 self.z.addfile(i, data)
214
214
215 def done(self):
215 def done(self):
216 self.z.close()
216 self.z.close()
217 if self.fileobj:
217 if self.fileobj:
218 self.fileobj.close()
218 self.fileobj.close()
219
219
220
220
221 class zipit(object):
221 class zipit(object):
222 '''write archive to zip file or stream. can write uncompressed,
222 '''write archive to zip file or stream. can write uncompressed,
223 or compressed with deflate.'''
223 or compressed with deflate.'''
224
224
225 def __init__(self, dest, mtime, compress=True):
225 def __init__(self, dest, mtime, compress=True):
226 if isinstance(dest, bytes):
226 if isinstance(dest, bytes):
227 dest = pycompat.fsdecode(dest)
227 dest = pycompat.fsdecode(dest)
228 self.z = zipfile.ZipFile(
228 self.z = zipfile.ZipFile(
229 dest, 'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED
229 dest, 'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED
230 )
230 )
231
231
232 # Python's zipfile module emits deprecation warnings if we try
232 # Python's zipfile module emits deprecation warnings if we try
233 # to store files with a date before 1980.
233 # to store files with a date before 1980.
234 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
234 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
235 if mtime < epoch:
235 if mtime < epoch:
236 mtime = epoch
236 mtime = epoch
237
237
238 self.mtime = mtime
238 self.mtime = mtime
239 self.date_time = time.gmtime(mtime)[:6]
239 self.date_time = time.gmtime(mtime)[:6]
240
240
241 def addfile(self, name, mode, islink, data):
241 def addfile(self, name, mode, islink, data):
242 i = zipfile.ZipInfo(pycompat.fsdecode(name), self.date_time)
242 i = zipfile.ZipInfo(pycompat.fsdecode(name), self.date_time)
243 i.compress_type = self.z.compression # pytype: disable=attribute-error
243 i.compress_type = self.z.compression # pytype: disable=attribute-error
244 # unzip will not honor unix file modes unless file creator is
244 # unzip will not honor unix file modes unless file creator is
245 # set to unix (id 3).
245 # set to unix (id 3).
246 i.create_system = 3
246 i.create_system = 3
247 ftype = _UNX_IFREG
247 ftype = _UNX_IFREG
248 if islink:
248 if islink:
249 mode = 0o777
249 mode = 0o777
250 ftype = _UNX_IFLNK
250 ftype = _UNX_IFLNK
251 i.external_attr = (mode | ftype) << 16
251 i.external_attr = (mode | ftype) << 16
252 # add "extended-timestamp" extra block, because zip archives
252 # add "extended-timestamp" extra block, because zip archives
253 # without this will be extracted with unexpected timestamp,
253 # without this will be extracted with unexpected timestamp,
254 # if TZ is not configured as GMT
254 # if TZ is not configured as GMT
255 i.extra += struct.pack(
255 i.extra += struct.pack(
256 b'<hhBl',
256 b'<hhBl',
257 0x5455, # block type: "extended-timestamp"
257 0x5455, # block type: "extended-timestamp"
258 1 + 4, # size of this block
258 1 + 4, # size of this block
259 1, # "modification time is present"
259 1, # "modification time is present"
260 int(self.mtime),
260 int(self.mtime),
261 ) # last modification (UTC)
261 ) # last modification (UTC)
262 self.z.writestr(i, data)
262 self.z.writestr(i, data)
263
263
264 def done(self):
264 def done(self):
265 self.z.close()
265 self.z.close()
266
266
267
267
268 class fileit(object):
268 class fileit(object):
269 '''write archive as files in directory.'''
269 '''write archive as files in directory.'''
270
270
271 def __init__(self, name, mtime):
271 def __init__(self, name, mtime):
272 self.basedir = name
272 self.basedir = name
273 self.opener = vfsmod.vfs(self.basedir)
273 self.opener = vfsmod.vfs(self.basedir)
274 self.mtime = mtime
274 self.mtime = mtime
275
275
276 def addfile(self, name, mode, islink, data):
276 def addfile(self, name, mode, islink, data):
277 if islink:
277 if islink:
278 self.opener.symlink(data, name)
278 self.opener.symlink(data, name)
279 return
279 return
280 f = self.opener(name, b"w", atomictemp=False)
280 f = self.opener(name, b"w", atomictemp=False)
281 f.write(data)
281 f.write(data)
282 f.close()
282 f.close()
283 destfile = os.path.join(self.basedir, name)
283 destfile = os.path.join(self.basedir, name)
284 os.chmod(destfile, mode)
284 os.chmod(destfile, mode)
285 if self.mtime is not None:
285 if self.mtime is not None:
286 os.utime(destfile, (self.mtime, self.mtime))
286 os.utime(destfile, (self.mtime, self.mtime))
287
287
288 def done(self):
288 def done(self):
289 pass
289 pass
290
290
291
291
292 archivers = {
292 archivers = {
293 b'files': fileit,
293 b'files': fileit,
294 b'tar': tarit,
294 b'tar': tarit,
295 b'tbz2': lambda name, mtime: tarit(name, mtime, b'bz2'),
295 b'tbz2': lambda name, mtime: tarit(name, mtime, b'bz2'),
296 b'tgz': lambda name, mtime: tarit(name, mtime, b'gz'),
296 b'tgz': lambda name, mtime: tarit(name, mtime, b'gz'),
297 b'txz': lambda name, mtime: tarit(name, mtime, b'xz'),
297 b'txz': lambda name, mtime: tarit(name, mtime, b'xz'),
298 b'uzip': lambda name, mtime: zipit(name, mtime, False),
298 b'uzip': lambda name, mtime: zipit(name, mtime, False),
299 b'zip': zipit,
299 b'zip': zipit,
300 }
300 }
301
301
302
302
303 def archive(
303 def archive(
304 repo,
304 repo,
305 dest,
305 dest,
306 node,
306 node,
307 kind,
307 kind,
308 decode=True,
308 decode=True,
309 match=None,
309 match=None,
310 prefix=b'',
310 prefix=b'',
311 mtime=None,
311 mtime=None,
312 subrepos=False,
312 subrepos=False,
313 ):
313 ):
314 '''create archive of repo as it was at node.
314 '''create archive of repo as it was at node.
315
315
316 dest can be name of directory, name of archive file, or file
316 dest can be name of directory, name of archive file, or file
317 object to write archive to.
317 object to write archive to.
318
318
319 kind is type of archive to create.
319 kind is type of archive to create.
320
320
321 decode tells whether to put files through decode filters from
321 decode tells whether to put files through decode filters from
322 hgrc.
322 hgrc.
323
323
324 match is a matcher to filter names of files to write to archive.
324 match is a matcher to filter names of files to write to archive.
325
325
326 prefix is name of path to put before every archive member.
326 prefix is name of path to put before every archive member.
327
327
328 mtime is the modified time, in seconds, or None to use the changeset time.
328 mtime is the modified time, in seconds, or None to use the changeset time.
329
329
330 subrepos tells whether to include subrepos.
330 subrepos tells whether to include subrepos.
331 '''
331 '''
332
332
333 if kind == b'txz' and not pycompat.ispy3:
333 if kind == b'txz' and not pycompat.ispy3:
334 raise error.Abort(_(b'xz compression is only available in Python 3'))
334 raise error.Abort(_(b'xz compression is only available in Python 3'))
335
335
336 if kind == b'files':
336 if kind == b'files':
337 if prefix:
337 if prefix:
338 raise error.Abort(_(b'cannot give prefix when archiving to files'))
338 raise error.Abort(_(b'cannot give prefix when archiving to files'))
339 else:
339 else:
340 prefix = tidyprefix(dest, kind, prefix)
340 prefix = tidyprefix(dest, kind, prefix)
341
341
342 def write(name, mode, islink, getdata):
342 def write(name, mode, islink, getdata):
343 data = getdata()
343 data = getdata()
344 if decode:
344 if decode:
345 data = repo.wwritedata(name, data)
345 data = repo.wwritedata(name, data)
346 archiver.addfile(prefix + name, mode, islink, data)
346 archiver.addfile(prefix + name, mode, islink, data)
347
347
348 if kind not in archivers:
348 if kind not in archivers:
349 raise error.Abort(_(b"unknown archive type '%s'") % kind)
349 raise error.Abort(_(b"unknown archive type '%s'") % kind)
350
350
351 ctx = repo[node]
351 ctx = repo[node]
352 archiver = archivers[kind](dest, mtime or ctx.date()[0])
352 archiver = archivers[kind](dest, mtime or ctx.date()[0])
353
353
354 if not match:
354 if not match:
355 match = scmutil.matchall(repo)
355 match = scmutil.matchall(repo)
356
356
357 if repo.ui.configbool(b"ui", b"archivemeta"):
357 if repo.ui.configbool(b"ui", b"archivemeta"):
358 name = b'.hg_archival.txt'
358 name = b'.hg_archival.txt'
359 if match(name):
359 if match(name):
360 write(name, 0o644, False, lambda: buildmetadata(ctx))
360 write(name, 0o644, False, lambda: buildmetadata(ctx))
361
361
362 files = list(ctx.manifest().walk(match))
362 files = list(ctx.manifest().walk(match))
363 total = len(files)
363 total = len(files)
364 if total:
364 if total:
365 files.sort()
365 files.sort()
366 scmutil.prefetchfiles(
366 scmutil.prefetchfiles(
367 repo, [ctx.rev()], scmutil.matchfiles(repo, files)
367 repo, [(ctx.rev(), scmutil.matchfiles(repo, files))]
368 )
368 )
369 progress = repo.ui.makeprogress(
369 progress = repo.ui.makeprogress(
370 _(b'archiving'), unit=_(b'files'), total=total
370 _(b'archiving'), unit=_(b'files'), total=total
371 )
371 )
372 progress.update(0)
372 progress.update(0)
373 for f in files:
373 for f in files:
374 ff = ctx.flags(f)
374 ff = ctx.flags(f)
375 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, ctx[f].data)
375 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, ctx[f].data)
376 progress.increment(item=f)
376 progress.increment(item=f)
377 progress.complete()
377 progress.complete()
378
378
379 if subrepos:
379 if subrepos:
380 for subpath in sorted(ctx.substate):
380 for subpath in sorted(ctx.substate):
381 sub = ctx.workingsub(subpath)
381 sub = ctx.workingsub(subpath)
382 submatch = matchmod.subdirmatcher(subpath, match)
382 submatch = matchmod.subdirmatcher(subpath, match)
383 subprefix = prefix + subpath + b'/'
383 subprefix = prefix + subpath + b'/'
384 total += sub.archive(archiver, subprefix, submatch, decode)
384 total += sub.archive(archiver, subprefix, submatch, decode)
385
385
386 if total == 0:
386 if total == 0:
387 raise error.Abort(_(b'no files match the archive pattern'))
387 raise error.Abort(_(b'no files match the archive pattern'))
388
388
389 archiver.done()
389 archiver.done()
390 return total
390 return total
@@ -1,4213 +1,4215 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import copy as copymod
10 import copy as copymod
11 import errno
11 import errno
12 import os
12 import os
13 import re
13 import re
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22 from .pycompat import (
22 from .pycompat import (
23 getattr,
23 getattr,
24 open,
24 open,
25 setattr,
25 setattr,
26 )
26 )
27 from .thirdparty import attr
27 from .thirdparty import attr
28
28
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 changelog,
31 changelog,
32 copies,
32 copies,
33 crecord as crecordmod,
33 crecord as crecordmod,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 formatter,
37 formatter,
38 logcmdutil,
38 logcmdutil,
39 match as matchmod,
39 match as matchmod,
40 merge as mergemod,
40 merge as mergemod,
41 mergestate as mergestatemod,
41 mergestate as mergestatemod,
42 mergeutil,
42 mergeutil,
43 obsolete,
43 obsolete,
44 patch,
44 patch,
45 pathutil,
45 pathutil,
46 phases,
46 phases,
47 pycompat,
47 pycompat,
48 repair,
48 repair,
49 revlog,
49 revlog,
50 rewriteutil,
50 rewriteutil,
51 scmutil,
51 scmutil,
52 smartset,
52 smartset,
53 state as statemod,
53 state as statemod,
54 subrepoutil,
54 subrepoutil,
55 templatekw,
55 templatekw,
56 templater,
56 templater,
57 util,
57 util,
58 vfs as vfsmod,
58 vfs as vfsmod,
59 )
59 )
60
60
61 from .utils import (
61 from .utils import (
62 dateutil,
62 dateutil,
63 stringutil,
63 stringutil,
64 )
64 )
65
65
66 if pycompat.TYPE_CHECKING:
66 if pycompat.TYPE_CHECKING:
67 from typing import (
67 from typing import (
68 Any,
68 Any,
69 Dict,
69 Dict,
70 )
70 )
71
71
72 for t in (Any, Dict):
72 for t in (Any, Dict):
73 assert t
73 assert t
74
74
75 stringio = util.stringio
75 stringio = util.stringio
76
76
77 # templates of common command options
77 # templates of common command options
78
78
79 dryrunopts = [
79 dryrunopts = [
80 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
80 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
81 ]
81 ]
82
82
83 confirmopts = [
83 confirmopts = [
84 (b'', b'confirm', None, _(b'ask before applying actions')),
84 (b'', b'confirm', None, _(b'ask before applying actions')),
85 ]
85 ]
86
86
87 remoteopts = [
87 remoteopts = [
88 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
88 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
89 (
89 (
90 b'',
90 b'',
91 b'remotecmd',
91 b'remotecmd',
92 b'',
92 b'',
93 _(b'specify hg command to run on the remote side'),
93 _(b'specify hg command to run on the remote side'),
94 _(b'CMD'),
94 _(b'CMD'),
95 ),
95 ),
96 (
96 (
97 b'',
97 b'',
98 b'insecure',
98 b'insecure',
99 None,
99 None,
100 _(b'do not verify server certificate (ignoring web.cacerts config)'),
100 _(b'do not verify server certificate (ignoring web.cacerts config)'),
101 ),
101 ),
102 ]
102 ]
103
103
104 walkopts = [
104 walkopts = [
105 (
105 (
106 b'I',
106 b'I',
107 b'include',
107 b'include',
108 [],
108 [],
109 _(b'include names matching the given patterns'),
109 _(b'include names matching the given patterns'),
110 _(b'PATTERN'),
110 _(b'PATTERN'),
111 ),
111 ),
112 (
112 (
113 b'X',
113 b'X',
114 b'exclude',
114 b'exclude',
115 [],
115 [],
116 _(b'exclude names matching the given patterns'),
116 _(b'exclude names matching the given patterns'),
117 _(b'PATTERN'),
117 _(b'PATTERN'),
118 ),
118 ),
119 ]
119 ]
120
120
121 commitopts = [
121 commitopts = [
122 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
122 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
123 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
123 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
124 ]
124 ]
125
125
126 commitopts2 = [
126 commitopts2 = [
127 (
127 (
128 b'd',
128 b'd',
129 b'date',
129 b'date',
130 b'',
130 b'',
131 _(b'record the specified date as commit date'),
131 _(b'record the specified date as commit date'),
132 _(b'DATE'),
132 _(b'DATE'),
133 ),
133 ),
134 (
134 (
135 b'u',
135 b'u',
136 b'user',
136 b'user',
137 b'',
137 b'',
138 _(b'record the specified user as committer'),
138 _(b'record the specified user as committer'),
139 _(b'USER'),
139 _(b'USER'),
140 ),
140 ),
141 ]
141 ]
142
142
143 commitopts3 = [
143 commitopts3 = [
144 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
144 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
145 (b'U', b'currentuser', None, _(b'record the current user as committer')),
145 (b'U', b'currentuser', None, _(b'record the current user as committer')),
146 ]
146 ]
147
147
148 formatteropts = [
148 formatteropts = [
149 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
149 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
150 ]
150 ]
151
151
152 templateopts = [
152 templateopts = [
153 (
153 (
154 b'',
154 b'',
155 b'style',
155 b'style',
156 b'',
156 b'',
157 _(b'display using template map file (DEPRECATED)'),
157 _(b'display using template map file (DEPRECATED)'),
158 _(b'STYLE'),
158 _(b'STYLE'),
159 ),
159 ),
160 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
160 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
161 ]
161 ]
162
162
163 logopts = [
163 logopts = [
164 (b'p', b'patch', None, _(b'show patch')),
164 (b'p', b'patch', None, _(b'show patch')),
165 (b'g', b'git', None, _(b'use git extended diff format')),
165 (b'g', b'git', None, _(b'use git extended diff format')),
166 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
166 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
167 (b'M', b'no-merges', None, _(b'do not show merges')),
167 (b'M', b'no-merges', None, _(b'do not show merges')),
168 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
168 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
169 (b'G', b'graph', None, _(b"show the revision DAG")),
169 (b'G', b'graph', None, _(b"show the revision DAG")),
170 ] + templateopts
170 ] + templateopts
171
171
172 diffopts = [
172 diffopts = [
173 (b'a', b'text', None, _(b'treat all files as text')),
173 (b'a', b'text', None, _(b'treat all files as text')),
174 (
174 (
175 b'g',
175 b'g',
176 b'git',
176 b'git',
177 None,
177 None,
178 _(b'use git extended diff format (DEFAULT: diff.git)'),
178 _(b'use git extended diff format (DEFAULT: diff.git)'),
179 ),
179 ),
180 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
180 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
181 (b'', b'nodates', None, _(b'omit dates from diff headers')),
181 (b'', b'nodates', None, _(b'omit dates from diff headers')),
182 ]
182 ]
183
183
184 diffwsopts = [
184 diffwsopts = [
185 (
185 (
186 b'w',
186 b'w',
187 b'ignore-all-space',
187 b'ignore-all-space',
188 None,
188 None,
189 _(b'ignore white space when comparing lines'),
189 _(b'ignore white space when comparing lines'),
190 ),
190 ),
191 (
191 (
192 b'b',
192 b'b',
193 b'ignore-space-change',
193 b'ignore-space-change',
194 None,
194 None,
195 _(b'ignore changes in the amount of white space'),
195 _(b'ignore changes in the amount of white space'),
196 ),
196 ),
197 (
197 (
198 b'B',
198 b'B',
199 b'ignore-blank-lines',
199 b'ignore-blank-lines',
200 None,
200 None,
201 _(b'ignore changes whose lines are all blank'),
201 _(b'ignore changes whose lines are all blank'),
202 ),
202 ),
203 (
203 (
204 b'Z',
204 b'Z',
205 b'ignore-space-at-eol',
205 b'ignore-space-at-eol',
206 None,
206 None,
207 _(b'ignore changes in whitespace at EOL'),
207 _(b'ignore changes in whitespace at EOL'),
208 ),
208 ),
209 ]
209 ]
210
210
211 diffopts2 = (
211 diffopts2 = (
212 [
212 [
213 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
213 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
214 (
214 (
215 b'p',
215 b'p',
216 b'show-function',
216 b'show-function',
217 None,
217 None,
218 _(
218 _(
219 b'show which function each change is in (DEFAULT: diff.showfunc)'
219 b'show which function each change is in (DEFAULT: diff.showfunc)'
220 ),
220 ),
221 ),
221 ),
222 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
222 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
223 ]
223 ]
224 + diffwsopts
224 + diffwsopts
225 + [
225 + [
226 (
226 (
227 b'U',
227 b'U',
228 b'unified',
228 b'unified',
229 b'',
229 b'',
230 _(b'number of lines of context to show'),
230 _(b'number of lines of context to show'),
231 _(b'NUM'),
231 _(b'NUM'),
232 ),
232 ),
233 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
233 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
234 (
234 (
235 b'',
235 b'',
236 b'root',
236 b'root',
237 b'',
237 b'',
238 _(b'produce diffs relative to subdirectory'),
238 _(b'produce diffs relative to subdirectory'),
239 _(b'DIR'),
239 _(b'DIR'),
240 ),
240 ),
241 ]
241 ]
242 )
242 )
243
243
244 mergetoolopts = [
244 mergetoolopts = [
245 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
245 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
246 ]
246 ]
247
247
248 similarityopts = [
248 similarityopts = [
249 (
249 (
250 b's',
250 b's',
251 b'similarity',
251 b'similarity',
252 b'',
252 b'',
253 _(b'guess renamed files by similarity (0<=s<=100)'),
253 _(b'guess renamed files by similarity (0<=s<=100)'),
254 _(b'SIMILARITY'),
254 _(b'SIMILARITY'),
255 )
255 )
256 ]
256 ]
257
257
258 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
258 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
259
259
260 debugrevlogopts = [
260 debugrevlogopts = [
261 (b'c', b'changelog', False, _(b'open changelog')),
261 (b'c', b'changelog', False, _(b'open changelog')),
262 (b'm', b'manifest', False, _(b'open manifest')),
262 (b'm', b'manifest', False, _(b'open manifest')),
263 (b'', b'dir', b'', _(b'open directory manifest')),
263 (b'', b'dir', b'', _(b'open directory manifest')),
264 ]
264 ]
265
265
266 # special string such that everything below this line will be ingored in the
266 # special string such that everything below this line will be ingored in the
267 # editor text
267 # editor text
268 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
268 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
269
269
270
270
271 def check_at_most_one_arg(opts, *args):
271 def check_at_most_one_arg(opts, *args):
272 """abort if more than one of the arguments are in opts
272 """abort if more than one of the arguments are in opts
273
273
274 Returns the unique argument or None if none of them were specified.
274 Returns the unique argument or None if none of them were specified.
275 """
275 """
276
276
277 def to_display(name):
277 def to_display(name):
278 return pycompat.sysbytes(name).replace(b'_', b'-')
278 return pycompat.sysbytes(name).replace(b'_', b'-')
279
279
280 previous = None
280 previous = None
281 for x in args:
281 for x in args:
282 if opts.get(x):
282 if opts.get(x):
283 if previous:
283 if previous:
284 raise error.Abort(
284 raise error.Abort(
285 _(b'cannot specify both --%s and --%s')
285 _(b'cannot specify both --%s and --%s')
286 % (to_display(previous), to_display(x))
286 % (to_display(previous), to_display(x))
287 )
287 )
288 previous = x
288 previous = x
289 return previous
289 return previous
290
290
291
291
292 def check_incompatible_arguments(opts, first, others):
292 def check_incompatible_arguments(opts, first, others):
293 """abort if the first argument is given along with any of the others
293 """abort if the first argument is given along with any of the others
294
294
295 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
295 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
296 among themselves, and they're passed as a single collection.
296 among themselves, and they're passed as a single collection.
297 """
297 """
298 for other in others:
298 for other in others:
299 check_at_most_one_arg(opts, first, other)
299 check_at_most_one_arg(opts, first, other)
300
300
301
301
302 def resolvecommitoptions(ui, opts):
302 def resolvecommitoptions(ui, opts):
303 """modify commit options dict to handle related options
303 """modify commit options dict to handle related options
304
304
305 The return value indicates that ``rewrite.update-timestamp`` is the reason
305 The return value indicates that ``rewrite.update-timestamp`` is the reason
306 the ``date`` option is set.
306 the ``date`` option is set.
307 """
307 """
308 check_at_most_one_arg(opts, b'date', b'currentdate')
308 check_at_most_one_arg(opts, b'date', b'currentdate')
309 check_at_most_one_arg(opts, b'user', b'currentuser')
309 check_at_most_one_arg(opts, b'user', b'currentuser')
310
310
311 datemaydiffer = False # date-only change should be ignored?
311 datemaydiffer = False # date-only change should be ignored?
312
312
313 if opts.get(b'currentdate'):
313 if opts.get(b'currentdate'):
314 opts[b'date'] = b'%d %d' % dateutil.makedate()
314 opts[b'date'] = b'%d %d' % dateutil.makedate()
315 elif (
315 elif (
316 not opts.get(b'date')
316 not opts.get(b'date')
317 and ui.configbool(b'rewrite', b'update-timestamp')
317 and ui.configbool(b'rewrite', b'update-timestamp')
318 and opts.get(b'currentdate') is None
318 and opts.get(b'currentdate') is None
319 ):
319 ):
320 opts[b'date'] = b'%d %d' % dateutil.makedate()
320 opts[b'date'] = b'%d %d' % dateutil.makedate()
321 datemaydiffer = True
321 datemaydiffer = True
322
322
323 if opts.get(b'currentuser'):
323 if opts.get(b'currentuser'):
324 opts[b'user'] = ui.username()
324 opts[b'user'] = ui.username()
325
325
326 return datemaydiffer
326 return datemaydiffer
327
327
328
328
329 def checknotesize(ui, opts):
329 def checknotesize(ui, opts):
330 """ make sure note is of valid format """
330 """ make sure note is of valid format """
331
331
332 note = opts.get(b'note')
332 note = opts.get(b'note')
333 if not note:
333 if not note:
334 return
334 return
335
335
336 if len(note) > 255:
336 if len(note) > 255:
337 raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
337 raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
338 if b'\n' in note:
338 if b'\n' in note:
339 raise error.Abort(_(b"note cannot contain a newline"))
339 raise error.Abort(_(b"note cannot contain a newline"))
340
340
341
341
342 def ishunk(x):
342 def ishunk(x):
343 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
343 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
344 return isinstance(x, hunkclasses)
344 return isinstance(x, hunkclasses)
345
345
346
346
347 def newandmodified(chunks, originalchunks):
347 def newandmodified(chunks, originalchunks):
348 newlyaddedandmodifiedfiles = set()
348 newlyaddedandmodifiedfiles = set()
349 alsorestore = set()
349 alsorestore = set()
350 for chunk in chunks:
350 for chunk in chunks:
351 if (
351 if (
352 ishunk(chunk)
352 ishunk(chunk)
353 and chunk.header.isnewfile()
353 and chunk.header.isnewfile()
354 and chunk not in originalchunks
354 and chunk not in originalchunks
355 ):
355 ):
356 newlyaddedandmodifiedfiles.add(chunk.header.filename())
356 newlyaddedandmodifiedfiles.add(chunk.header.filename())
357 alsorestore.update(
357 alsorestore.update(
358 set(chunk.header.files()) - {chunk.header.filename()}
358 set(chunk.header.files()) - {chunk.header.filename()}
359 )
359 )
360 return newlyaddedandmodifiedfiles, alsorestore
360 return newlyaddedandmodifiedfiles, alsorestore
361
361
362
362
363 def parsealiases(cmd):
363 def parsealiases(cmd):
364 return cmd.split(b"|")
364 return cmd.split(b"|")
365
365
366
366
367 def setupwrapcolorwrite(ui):
367 def setupwrapcolorwrite(ui):
368 # wrap ui.write so diff output can be labeled/colorized
368 # wrap ui.write so diff output can be labeled/colorized
369 def wrapwrite(orig, *args, **kw):
369 def wrapwrite(orig, *args, **kw):
370 label = kw.pop('label', b'')
370 label = kw.pop('label', b'')
371 for chunk, l in patch.difflabel(lambda: args):
371 for chunk, l in patch.difflabel(lambda: args):
372 orig(chunk, label=label + l)
372 orig(chunk, label=label + l)
373
373
374 oldwrite = ui.write
374 oldwrite = ui.write
375
375
376 def wrap(*args, **kwargs):
376 def wrap(*args, **kwargs):
377 return wrapwrite(oldwrite, *args, **kwargs)
377 return wrapwrite(oldwrite, *args, **kwargs)
378
378
379 setattr(ui, 'write', wrap)
379 setattr(ui, 'write', wrap)
380 return oldwrite
380 return oldwrite
381
381
382
382
383 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
383 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
384 try:
384 try:
385 if usecurses:
385 if usecurses:
386 if testfile:
386 if testfile:
387 recordfn = crecordmod.testdecorator(
387 recordfn = crecordmod.testdecorator(
388 testfile, crecordmod.testchunkselector
388 testfile, crecordmod.testchunkselector
389 )
389 )
390 else:
390 else:
391 recordfn = crecordmod.chunkselector
391 recordfn = crecordmod.chunkselector
392
392
393 return crecordmod.filterpatch(
393 return crecordmod.filterpatch(
394 ui, originalhunks, recordfn, operation
394 ui, originalhunks, recordfn, operation
395 )
395 )
396 except crecordmod.fallbackerror as e:
396 except crecordmod.fallbackerror as e:
397 ui.warn(b'%s\n' % e)
397 ui.warn(b'%s\n' % e)
398 ui.warn(_(b'falling back to text mode\n'))
398 ui.warn(_(b'falling back to text mode\n'))
399
399
400 return patch.filterpatch(ui, originalhunks, match, operation)
400 return patch.filterpatch(ui, originalhunks, match, operation)
401
401
402
402
403 def recordfilter(ui, originalhunks, match, operation=None):
403 def recordfilter(ui, originalhunks, match, operation=None):
404 """ Prompts the user to filter the originalhunks and return a list of
404 """ Prompts the user to filter the originalhunks and return a list of
405 selected hunks.
405 selected hunks.
406 *operation* is used for to build ui messages to indicate the user what
406 *operation* is used for to build ui messages to indicate the user what
407 kind of filtering they are doing: reverting, committing, shelving, etc.
407 kind of filtering they are doing: reverting, committing, shelving, etc.
408 (see patch.filterpatch).
408 (see patch.filterpatch).
409 """
409 """
410 usecurses = crecordmod.checkcurses(ui)
410 usecurses = crecordmod.checkcurses(ui)
411 testfile = ui.config(b'experimental', b'crecordtest')
411 testfile = ui.config(b'experimental', b'crecordtest')
412 oldwrite = setupwrapcolorwrite(ui)
412 oldwrite = setupwrapcolorwrite(ui)
413 try:
413 try:
414 newchunks, newopts = filterchunks(
414 newchunks, newopts = filterchunks(
415 ui, originalhunks, usecurses, testfile, match, operation
415 ui, originalhunks, usecurses, testfile, match, operation
416 )
416 )
417 finally:
417 finally:
418 ui.write = oldwrite
418 ui.write = oldwrite
419 return newchunks, newopts
419 return newchunks, newopts
420
420
421
421
422 def dorecord(
422 def dorecord(
423 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
423 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
424 ):
424 ):
425 opts = pycompat.byteskwargs(opts)
425 opts = pycompat.byteskwargs(opts)
426 if not ui.interactive():
426 if not ui.interactive():
427 if cmdsuggest:
427 if cmdsuggest:
428 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
428 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
429 else:
429 else:
430 msg = _(b'running non-interactively')
430 msg = _(b'running non-interactively')
431 raise error.Abort(msg)
431 raise error.Abort(msg)
432
432
433 # make sure username is set before going interactive
433 # make sure username is set before going interactive
434 if not opts.get(b'user'):
434 if not opts.get(b'user'):
435 ui.username() # raise exception, username not provided
435 ui.username() # raise exception, username not provided
436
436
437 def recordfunc(ui, repo, message, match, opts):
437 def recordfunc(ui, repo, message, match, opts):
438 """This is generic record driver.
438 """This is generic record driver.
439
439
440 Its job is to interactively filter local changes, and
440 Its job is to interactively filter local changes, and
441 accordingly prepare working directory into a state in which the
441 accordingly prepare working directory into a state in which the
442 job can be delegated to a non-interactive commit command such as
442 job can be delegated to a non-interactive commit command such as
443 'commit' or 'qrefresh'.
443 'commit' or 'qrefresh'.
444
444
445 After the actual job is done by non-interactive command, the
445 After the actual job is done by non-interactive command, the
446 working directory is restored to its original state.
446 working directory is restored to its original state.
447
447
448 In the end we'll record interesting changes, and everything else
448 In the end we'll record interesting changes, and everything else
449 will be left in place, so the user can continue working.
449 will be left in place, so the user can continue working.
450 """
450 """
451 if not opts.get(b'interactive-unshelve'):
451 if not opts.get(b'interactive-unshelve'):
452 checkunfinished(repo, commit=True)
452 checkunfinished(repo, commit=True)
453 wctx = repo[None]
453 wctx = repo[None]
454 merge = len(wctx.parents()) > 1
454 merge = len(wctx.parents()) > 1
455 if merge:
455 if merge:
456 raise error.Abort(
456 raise error.Abort(
457 _(
457 _(
458 b'cannot partially commit a merge '
458 b'cannot partially commit a merge '
459 b'(use "hg commit" instead)'
459 b'(use "hg commit" instead)'
460 )
460 )
461 )
461 )
462
462
463 def fail(f, msg):
463 def fail(f, msg):
464 raise error.Abort(b'%s: %s' % (f, msg))
464 raise error.Abort(b'%s: %s' % (f, msg))
465
465
466 force = opts.get(b'force')
466 force = opts.get(b'force')
467 if not force:
467 if not force:
468 match = matchmod.badmatch(match, fail)
468 match = matchmod.badmatch(match, fail)
469
469
470 status = repo.status(match=match)
470 status = repo.status(match=match)
471
471
472 overrides = {(b'ui', b'commitsubrepos'): True}
472 overrides = {(b'ui', b'commitsubrepos'): True}
473
473
474 with repo.ui.configoverride(overrides, b'record'):
474 with repo.ui.configoverride(overrides, b'record'):
475 # subrepoutil.precommit() modifies the status
475 # subrepoutil.precommit() modifies the status
476 tmpstatus = scmutil.status(
476 tmpstatus = scmutil.status(
477 copymod.copy(status.modified),
477 copymod.copy(status.modified),
478 copymod.copy(status.added),
478 copymod.copy(status.added),
479 copymod.copy(status.removed),
479 copymod.copy(status.removed),
480 copymod.copy(status.deleted),
480 copymod.copy(status.deleted),
481 copymod.copy(status.unknown),
481 copymod.copy(status.unknown),
482 copymod.copy(status.ignored),
482 copymod.copy(status.ignored),
483 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
483 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
484 )
484 )
485
485
486 # Force allows -X subrepo to skip the subrepo.
486 # Force allows -X subrepo to skip the subrepo.
487 subs, commitsubs, newstate = subrepoutil.precommit(
487 subs, commitsubs, newstate = subrepoutil.precommit(
488 repo.ui, wctx, tmpstatus, match, force=True
488 repo.ui, wctx, tmpstatus, match, force=True
489 )
489 )
490 for s in subs:
490 for s in subs:
491 if s in commitsubs:
491 if s in commitsubs:
492 dirtyreason = wctx.sub(s).dirtyreason(True)
492 dirtyreason = wctx.sub(s).dirtyreason(True)
493 raise error.Abort(dirtyreason)
493 raise error.Abort(dirtyreason)
494
494
495 if not force:
495 if not force:
496 repo.checkcommitpatterns(wctx, match, status, fail)
496 repo.checkcommitpatterns(wctx, match, status, fail)
497 diffopts = patch.difffeatureopts(
497 diffopts = patch.difffeatureopts(
498 ui,
498 ui,
499 opts=opts,
499 opts=opts,
500 whitespace=True,
500 whitespace=True,
501 section=b'commands',
501 section=b'commands',
502 configprefix=b'commit.interactive.',
502 configprefix=b'commit.interactive.',
503 )
503 )
504 diffopts.nodates = True
504 diffopts.nodates = True
505 diffopts.git = True
505 diffopts.git = True
506 diffopts.showfunc = True
506 diffopts.showfunc = True
507 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
507 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
508 originalchunks = patch.parsepatch(originaldiff)
508 originalchunks = patch.parsepatch(originaldiff)
509 match = scmutil.match(repo[None], pats)
509 match = scmutil.match(repo[None], pats)
510
510
511 # 1. filter patch, since we are intending to apply subset of it
511 # 1. filter patch, since we are intending to apply subset of it
512 try:
512 try:
513 chunks, newopts = filterfn(ui, originalchunks, match)
513 chunks, newopts = filterfn(ui, originalchunks, match)
514 except error.PatchError as err:
514 except error.PatchError as err:
515 raise error.Abort(_(b'error parsing patch: %s') % err)
515 raise error.Abort(_(b'error parsing patch: %s') % err)
516 opts.update(newopts)
516 opts.update(newopts)
517
517
518 # We need to keep a backup of files that have been newly added and
518 # We need to keep a backup of files that have been newly added and
519 # modified during the recording process because there is a previous
519 # modified during the recording process because there is a previous
520 # version without the edit in the workdir. We also will need to restore
520 # version without the edit in the workdir. We also will need to restore
521 # files that were the sources of renames so that the patch application
521 # files that were the sources of renames so that the patch application
522 # works.
522 # works.
523 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
523 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
524 chunks, originalchunks
524 chunks, originalchunks
525 )
525 )
526 contenders = set()
526 contenders = set()
527 for h in chunks:
527 for h in chunks:
528 try:
528 try:
529 contenders.update(set(h.files()))
529 contenders.update(set(h.files()))
530 except AttributeError:
530 except AttributeError:
531 pass
531 pass
532
532
533 changed = status.modified + status.added + status.removed
533 changed = status.modified + status.added + status.removed
534 newfiles = [f for f in changed if f in contenders]
534 newfiles = [f for f in changed if f in contenders]
535 if not newfiles:
535 if not newfiles:
536 ui.status(_(b'no changes to record\n'))
536 ui.status(_(b'no changes to record\n'))
537 return 0
537 return 0
538
538
539 modified = set(status.modified)
539 modified = set(status.modified)
540
540
541 # 2. backup changed files, so we can restore them in the end
541 # 2. backup changed files, so we can restore them in the end
542
542
543 if backupall:
543 if backupall:
544 tobackup = changed
544 tobackup = changed
545 else:
545 else:
546 tobackup = [
546 tobackup = [
547 f
547 f
548 for f in newfiles
548 for f in newfiles
549 if f in modified or f in newlyaddedandmodifiedfiles
549 if f in modified or f in newlyaddedandmodifiedfiles
550 ]
550 ]
551 backups = {}
551 backups = {}
552 if tobackup:
552 if tobackup:
553 backupdir = repo.vfs.join(b'record-backups')
553 backupdir = repo.vfs.join(b'record-backups')
554 try:
554 try:
555 os.mkdir(backupdir)
555 os.mkdir(backupdir)
556 except OSError as err:
556 except OSError as err:
557 if err.errno != errno.EEXIST:
557 if err.errno != errno.EEXIST:
558 raise
558 raise
559 try:
559 try:
560 # backup continues
560 # backup continues
561 for f in tobackup:
561 for f in tobackup:
562 fd, tmpname = pycompat.mkstemp(
562 fd, tmpname = pycompat.mkstemp(
563 prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
563 prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
564 )
564 )
565 os.close(fd)
565 os.close(fd)
566 ui.debug(b'backup %r as %r\n' % (f, tmpname))
566 ui.debug(b'backup %r as %r\n' % (f, tmpname))
567 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
567 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
568 backups[f] = tmpname
568 backups[f] = tmpname
569
569
570 fp = stringio()
570 fp = stringio()
571 for c in chunks:
571 for c in chunks:
572 fname = c.filename()
572 fname = c.filename()
573 if fname in backups:
573 if fname in backups:
574 c.write(fp)
574 c.write(fp)
575 dopatch = fp.tell()
575 dopatch = fp.tell()
576 fp.seek(0)
576 fp.seek(0)
577
577
578 # 2.5 optionally review / modify patch in text editor
578 # 2.5 optionally review / modify patch in text editor
579 if opts.get(b'review', False):
579 if opts.get(b'review', False):
580 patchtext = (
580 patchtext = (
581 crecordmod.diffhelptext
581 crecordmod.diffhelptext
582 + crecordmod.patchhelptext
582 + crecordmod.patchhelptext
583 + fp.read()
583 + fp.read()
584 )
584 )
585 reviewedpatch = ui.edit(
585 reviewedpatch = ui.edit(
586 patchtext, b"", action=b"diff", repopath=repo.path
586 patchtext, b"", action=b"diff", repopath=repo.path
587 )
587 )
588 fp.truncate(0)
588 fp.truncate(0)
589 fp.write(reviewedpatch)
589 fp.write(reviewedpatch)
590 fp.seek(0)
590 fp.seek(0)
591
591
592 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
592 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
593 # 3a. apply filtered patch to clean repo (clean)
593 # 3a. apply filtered patch to clean repo (clean)
594 if backups:
594 if backups:
595 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
595 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
596 mergemod.revert_to(repo[b'.'], matcher=m)
596 mergemod.revert_to(repo[b'.'], matcher=m)
597
597
598 # 3b. (apply)
598 # 3b. (apply)
599 if dopatch:
599 if dopatch:
600 try:
600 try:
601 ui.debug(b'applying patch\n')
601 ui.debug(b'applying patch\n')
602 ui.debug(fp.getvalue())
602 ui.debug(fp.getvalue())
603 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
603 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
604 except error.PatchError as err:
604 except error.PatchError as err:
605 raise error.Abort(pycompat.bytestr(err))
605 raise error.Abort(pycompat.bytestr(err))
606 del fp
606 del fp
607
607
608 # 4. We prepared working directory according to filtered
608 # 4. We prepared working directory according to filtered
609 # patch. Now is the time to delegate the job to
609 # patch. Now is the time to delegate the job to
610 # commit/qrefresh or the like!
610 # commit/qrefresh or the like!
611
611
612 # Make all of the pathnames absolute.
612 # Make all of the pathnames absolute.
613 newfiles = [repo.wjoin(nf) for nf in newfiles]
613 newfiles = [repo.wjoin(nf) for nf in newfiles]
614 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
614 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
615 finally:
615 finally:
616 # 5. finally restore backed-up files
616 # 5. finally restore backed-up files
617 try:
617 try:
618 dirstate = repo.dirstate
618 dirstate = repo.dirstate
619 for realname, tmpname in pycompat.iteritems(backups):
619 for realname, tmpname in pycompat.iteritems(backups):
620 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
620 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
621
621
622 if dirstate[realname] == b'n':
622 if dirstate[realname] == b'n':
623 # without normallookup, restoring timestamp
623 # without normallookup, restoring timestamp
624 # may cause partially committed files
624 # may cause partially committed files
625 # to be treated as unmodified
625 # to be treated as unmodified
626 dirstate.normallookup(realname)
626 dirstate.normallookup(realname)
627
627
628 # copystat=True here and above are a hack to trick any
628 # copystat=True here and above are a hack to trick any
629 # editors that have f open that we haven't modified them.
629 # editors that have f open that we haven't modified them.
630 #
630 #
631 # Also note that this racy as an editor could notice the
631 # Also note that this racy as an editor could notice the
632 # file's mtime before we've finished writing it.
632 # file's mtime before we've finished writing it.
633 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
633 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
634 os.unlink(tmpname)
634 os.unlink(tmpname)
635 if tobackup:
635 if tobackup:
636 os.rmdir(backupdir)
636 os.rmdir(backupdir)
637 except OSError:
637 except OSError:
638 pass
638 pass
639
639
640 def recordinwlock(ui, repo, message, match, opts):
640 def recordinwlock(ui, repo, message, match, opts):
641 with repo.wlock():
641 with repo.wlock():
642 return recordfunc(ui, repo, message, match, opts)
642 return recordfunc(ui, repo, message, match, opts)
643
643
644 return commit(ui, repo, recordinwlock, pats, opts)
644 return commit(ui, repo, recordinwlock, pats, opts)
645
645
646
646
647 class dirnode(object):
647 class dirnode(object):
648 """
648 """
649 Represent a directory in user working copy with information required for
649 Represent a directory in user working copy with information required for
650 the purpose of tersing its status.
650 the purpose of tersing its status.
651
651
652 path is the path to the directory, without a trailing '/'
652 path is the path to the directory, without a trailing '/'
653
653
654 statuses is a set of statuses of all files in this directory (this includes
654 statuses is a set of statuses of all files in this directory (this includes
655 all the files in all the subdirectories too)
655 all the files in all the subdirectories too)
656
656
657 files is a list of files which are direct child of this directory
657 files is a list of files which are direct child of this directory
658
658
659 subdirs is a dictionary of sub-directory name as the key and it's own
659 subdirs is a dictionary of sub-directory name as the key and it's own
660 dirnode object as the value
660 dirnode object as the value
661 """
661 """
662
662
663 def __init__(self, dirpath):
663 def __init__(self, dirpath):
664 self.path = dirpath
664 self.path = dirpath
665 self.statuses = set()
665 self.statuses = set()
666 self.files = []
666 self.files = []
667 self.subdirs = {}
667 self.subdirs = {}
668
668
669 def _addfileindir(self, filename, status):
669 def _addfileindir(self, filename, status):
670 """Add a file in this directory as a direct child."""
670 """Add a file in this directory as a direct child."""
671 self.files.append((filename, status))
671 self.files.append((filename, status))
672
672
673 def addfile(self, filename, status):
673 def addfile(self, filename, status):
674 """
674 """
675 Add a file to this directory or to its direct parent directory.
675 Add a file to this directory or to its direct parent directory.
676
676
677 If the file is not direct child of this directory, we traverse to the
677 If the file is not direct child of this directory, we traverse to the
678 directory of which this file is a direct child of and add the file
678 directory of which this file is a direct child of and add the file
679 there.
679 there.
680 """
680 """
681
681
682 # the filename contains a path separator, it means it's not the direct
682 # the filename contains a path separator, it means it's not the direct
683 # child of this directory
683 # child of this directory
684 if b'/' in filename:
684 if b'/' in filename:
685 subdir, filep = filename.split(b'/', 1)
685 subdir, filep = filename.split(b'/', 1)
686
686
687 # does the dirnode object for subdir exists
687 # does the dirnode object for subdir exists
688 if subdir not in self.subdirs:
688 if subdir not in self.subdirs:
689 subdirpath = pathutil.join(self.path, subdir)
689 subdirpath = pathutil.join(self.path, subdir)
690 self.subdirs[subdir] = dirnode(subdirpath)
690 self.subdirs[subdir] = dirnode(subdirpath)
691
691
692 # try adding the file in subdir
692 # try adding the file in subdir
693 self.subdirs[subdir].addfile(filep, status)
693 self.subdirs[subdir].addfile(filep, status)
694
694
695 else:
695 else:
696 self._addfileindir(filename, status)
696 self._addfileindir(filename, status)
697
697
698 if status not in self.statuses:
698 if status not in self.statuses:
699 self.statuses.add(status)
699 self.statuses.add(status)
700
700
701 def iterfilepaths(self):
701 def iterfilepaths(self):
702 """Yield (status, path) for files directly under this directory."""
702 """Yield (status, path) for files directly under this directory."""
703 for f, st in self.files:
703 for f, st in self.files:
704 yield st, pathutil.join(self.path, f)
704 yield st, pathutil.join(self.path, f)
705
705
706 def tersewalk(self, terseargs):
706 def tersewalk(self, terseargs):
707 """
707 """
708 Yield (status, path) obtained by processing the status of this
708 Yield (status, path) obtained by processing the status of this
709 dirnode.
709 dirnode.
710
710
711 terseargs is the string of arguments passed by the user with `--terse`
711 terseargs is the string of arguments passed by the user with `--terse`
712 flag.
712 flag.
713
713
714 Following are the cases which can happen:
714 Following are the cases which can happen:
715
715
716 1) All the files in the directory (including all the files in its
716 1) All the files in the directory (including all the files in its
717 subdirectories) share the same status and the user has asked us to terse
717 subdirectories) share the same status and the user has asked us to terse
718 that status. -> yield (status, dirpath). dirpath will end in '/'.
718 that status. -> yield (status, dirpath). dirpath will end in '/'.
719
719
720 2) Otherwise, we do following:
720 2) Otherwise, we do following:
721
721
722 a) Yield (status, filepath) for all the files which are in this
722 a) Yield (status, filepath) for all the files which are in this
723 directory (only the ones in this directory, not the subdirs)
723 directory (only the ones in this directory, not the subdirs)
724
724
725 b) Recurse the function on all the subdirectories of this
725 b) Recurse the function on all the subdirectories of this
726 directory
726 directory
727 """
727 """
728
728
729 if len(self.statuses) == 1:
729 if len(self.statuses) == 1:
730 onlyst = self.statuses.pop()
730 onlyst = self.statuses.pop()
731
731
732 # Making sure we terse only when the status abbreviation is
732 # Making sure we terse only when the status abbreviation is
733 # passed as terse argument
733 # passed as terse argument
734 if onlyst in terseargs:
734 if onlyst in terseargs:
735 yield onlyst, self.path + b'/'
735 yield onlyst, self.path + b'/'
736 return
736 return
737
737
738 # add the files to status list
738 # add the files to status list
739 for st, fpath in self.iterfilepaths():
739 for st, fpath in self.iterfilepaths():
740 yield st, fpath
740 yield st, fpath
741
741
742 # recurse on the subdirs
742 # recurse on the subdirs
743 for dirobj in self.subdirs.values():
743 for dirobj in self.subdirs.values():
744 for st, fpath in dirobj.tersewalk(terseargs):
744 for st, fpath in dirobj.tersewalk(terseargs):
745 yield st, fpath
745 yield st, fpath
746
746
747
747
748 def tersedir(statuslist, terseargs):
748 def tersedir(statuslist, terseargs):
749 """
749 """
750 Terse the status if all the files in a directory shares the same status.
750 Terse the status if all the files in a directory shares the same status.
751
751
752 statuslist is scmutil.status() object which contains a list of files for
752 statuslist is scmutil.status() object which contains a list of files for
753 each status.
753 each status.
754 terseargs is string which is passed by the user as the argument to `--terse`
754 terseargs is string which is passed by the user as the argument to `--terse`
755 flag.
755 flag.
756
756
757 The function makes a tree of objects of dirnode class, and at each node it
757 The function makes a tree of objects of dirnode class, and at each node it
758 stores the information required to know whether we can terse a certain
758 stores the information required to know whether we can terse a certain
759 directory or not.
759 directory or not.
760 """
760 """
761 # the order matters here as that is used to produce final list
761 # the order matters here as that is used to produce final list
762 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
762 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
763
763
764 # checking the argument validity
764 # checking the argument validity
765 for s in pycompat.bytestr(terseargs):
765 for s in pycompat.bytestr(terseargs):
766 if s not in allst:
766 if s not in allst:
767 raise error.Abort(_(b"'%s' not recognized") % s)
767 raise error.Abort(_(b"'%s' not recognized") % s)
768
768
769 # creating a dirnode object for the root of the repo
769 # creating a dirnode object for the root of the repo
770 rootobj = dirnode(b'')
770 rootobj = dirnode(b'')
771 pstatus = (
771 pstatus = (
772 b'modified',
772 b'modified',
773 b'added',
773 b'added',
774 b'deleted',
774 b'deleted',
775 b'clean',
775 b'clean',
776 b'unknown',
776 b'unknown',
777 b'ignored',
777 b'ignored',
778 b'removed',
778 b'removed',
779 )
779 )
780
780
781 tersedict = {}
781 tersedict = {}
782 for attrname in pstatus:
782 for attrname in pstatus:
783 statuschar = attrname[0:1]
783 statuschar = attrname[0:1]
784 for f in getattr(statuslist, attrname):
784 for f in getattr(statuslist, attrname):
785 rootobj.addfile(f, statuschar)
785 rootobj.addfile(f, statuschar)
786 tersedict[statuschar] = []
786 tersedict[statuschar] = []
787
787
788 # we won't be tersing the root dir, so add files in it
788 # we won't be tersing the root dir, so add files in it
789 for st, fpath in rootobj.iterfilepaths():
789 for st, fpath in rootobj.iterfilepaths():
790 tersedict[st].append(fpath)
790 tersedict[st].append(fpath)
791
791
792 # process each sub-directory and build tersedict
792 # process each sub-directory and build tersedict
793 for subdir in rootobj.subdirs.values():
793 for subdir in rootobj.subdirs.values():
794 for st, f in subdir.tersewalk(terseargs):
794 for st, f in subdir.tersewalk(terseargs):
795 tersedict[st].append(f)
795 tersedict[st].append(f)
796
796
797 tersedlist = []
797 tersedlist = []
798 for st in allst:
798 for st in allst:
799 tersedict[st].sort()
799 tersedict[st].sort()
800 tersedlist.append(tersedict[st])
800 tersedlist.append(tersedict[st])
801
801
802 return scmutil.status(*tersedlist)
802 return scmutil.status(*tersedlist)
803
803
804
804
805 def _commentlines(raw):
805 def _commentlines(raw):
806 '''Surround lineswith a comment char and a new line'''
806 '''Surround lineswith a comment char and a new line'''
807 lines = raw.splitlines()
807 lines = raw.splitlines()
808 commentedlines = [b'# %s' % line for line in lines]
808 commentedlines = [b'# %s' % line for line in lines]
809 return b'\n'.join(commentedlines) + b'\n'
809 return b'\n'.join(commentedlines) + b'\n'
810
810
811
811
812 @attr.s(frozen=True)
812 @attr.s(frozen=True)
813 class morestatus(object):
813 class morestatus(object):
814 reporoot = attr.ib()
814 reporoot = attr.ib()
815 unfinishedop = attr.ib()
815 unfinishedop = attr.ib()
816 unfinishedmsg = attr.ib()
816 unfinishedmsg = attr.ib()
817 activemerge = attr.ib()
817 activemerge = attr.ib()
818 unresolvedpaths = attr.ib()
818 unresolvedpaths = attr.ib()
819 _formattedpaths = attr.ib(init=False, default=set())
819 _formattedpaths = attr.ib(init=False, default=set())
820 _label = b'status.morestatus'
820 _label = b'status.morestatus'
821
821
822 def formatfile(self, path, fm):
822 def formatfile(self, path, fm):
823 self._formattedpaths.add(path)
823 self._formattedpaths.add(path)
824 if self.activemerge and path in self.unresolvedpaths:
824 if self.activemerge and path in self.unresolvedpaths:
825 fm.data(unresolved=True)
825 fm.data(unresolved=True)
826
826
827 def formatfooter(self, fm):
827 def formatfooter(self, fm):
828 if self.unfinishedop or self.unfinishedmsg:
828 if self.unfinishedop or self.unfinishedmsg:
829 fm.startitem()
829 fm.startitem()
830 fm.data(itemtype=b'morestatus')
830 fm.data(itemtype=b'morestatus')
831
831
832 if self.unfinishedop:
832 if self.unfinishedop:
833 fm.data(unfinished=self.unfinishedop)
833 fm.data(unfinished=self.unfinishedop)
834 statemsg = (
834 statemsg = (
835 _(b'The repository is in an unfinished *%s* state.')
835 _(b'The repository is in an unfinished *%s* state.')
836 % self.unfinishedop
836 % self.unfinishedop
837 )
837 )
838 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
838 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
839 if self.unfinishedmsg:
839 if self.unfinishedmsg:
840 fm.data(unfinishedmsg=self.unfinishedmsg)
840 fm.data(unfinishedmsg=self.unfinishedmsg)
841
841
842 # May also start new data items.
842 # May also start new data items.
843 self._formatconflicts(fm)
843 self._formatconflicts(fm)
844
844
845 if self.unfinishedmsg:
845 if self.unfinishedmsg:
846 fm.plain(
846 fm.plain(
847 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
847 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
848 )
848 )
849
849
850 def _formatconflicts(self, fm):
850 def _formatconflicts(self, fm):
851 if not self.activemerge:
851 if not self.activemerge:
852 return
852 return
853
853
854 if self.unresolvedpaths:
854 if self.unresolvedpaths:
855 mergeliststr = b'\n'.join(
855 mergeliststr = b'\n'.join(
856 [
856 [
857 b' %s'
857 b' %s'
858 % util.pathto(self.reporoot, encoding.getcwd(), path)
858 % util.pathto(self.reporoot, encoding.getcwd(), path)
859 for path in self.unresolvedpaths
859 for path in self.unresolvedpaths
860 ]
860 ]
861 )
861 )
862 msg = (
862 msg = (
863 _(
863 _(
864 '''Unresolved merge conflicts:
864 '''Unresolved merge conflicts:
865
865
866 %s
866 %s
867
867
868 To mark files as resolved: hg resolve --mark FILE'''
868 To mark files as resolved: hg resolve --mark FILE'''
869 )
869 )
870 % mergeliststr
870 % mergeliststr
871 )
871 )
872
872
873 # If any paths with unresolved conflicts were not previously
873 # If any paths with unresolved conflicts were not previously
874 # formatted, output them now.
874 # formatted, output them now.
875 for f in self.unresolvedpaths:
875 for f in self.unresolvedpaths:
876 if f in self._formattedpaths:
876 if f in self._formattedpaths:
877 # Already output.
877 # Already output.
878 continue
878 continue
879 fm.startitem()
879 fm.startitem()
880 # We can't claim to know the status of the file - it may just
880 # We can't claim to know the status of the file - it may just
881 # have been in one of the states that were not requested for
881 # have been in one of the states that were not requested for
882 # display, so it could be anything.
882 # display, so it could be anything.
883 fm.data(itemtype=b'file', path=f, unresolved=True)
883 fm.data(itemtype=b'file', path=f, unresolved=True)
884
884
885 else:
885 else:
886 msg = _(b'No unresolved merge conflicts.')
886 msg = _(b'No unresolved merge conflicts.')
887
887
888 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
888 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
889
889
890
890
891 def readmorestatus(repo):
891 def readmorestatus(repo):
892 """Returns a morestatus object if the repo has unfinished state."""
892 """Returns a morestatus object if the repo has unfinished state."""
893 statetuple = statemod.getrepostate(repo)
893 statetuple = statemod.getrepostate(repo)
894 mergestate = mergestatemod.mergestate.read(repo)
894 mergestate = mergestatemod.mergestate.read(repo)
895 activemerge = mergestate.active()
895 activemerge = mergestate.active()
896 if not statetuple and not activemerge:
896 if not statetuple and not activemerge:
897 return None
897 return None
898
898
899 unfinishedop = unfinishedmsg = unresolved = None
899 unfinishedop = unfinishedmsg = unresolved = None
900 if statetuple:
900 if statetuple:
901 unfinishedop, unfinishedmsg = statetuple
901 unfinishedop, unfinishedmsg = statetuple
902 if activemerge:
902 if activemerge:
903 unresolved = sorted(mergestate.unresolved())
903 unresolved = sorted(mergestate.unresolved())
904 return morestatus(
904 return morestatus(
905 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
905 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
906 )
906 )
907
907
908
908
909 def findpossible(cmd, table, strict=False):
909 def findpossible(cmd, table, strict=False):
910 """
910 """
911 Return cmd -> (aliases, command table entry)
911 Return cmd -> (aliases, command table entry)
912 for each matching command.
912 for each matching command.
913 Return debug commands (or their aliases) only if no normal command matches.
913 Return debug commands (or their aliases) only if no normal command matches.
914 """
914 """
915 choice = {}
915 choice = {}
916 debugchoice = {}
916 debugchoice = {}
917
917
918 if cmd in table:
918 if cmd in table:
919 # short-circuit exact matches, "log" alias beats "log|history"
919 # short-circuit exact matches, "log" alias beats "log|history"
920 keys = [cmd]
920 keys = [cmd]
921 else:
921 else:
922 keys = table.keys()
922 keys = table.keys()
923
923
924 allcmds = []
924 allcmds = []
925 for e in keys:
925 for e in keys:
926 aliases = parsealiases(e)
926 aliases = parsealiases(e)
927 allcmds.extend(aliases)
927 allcmds.extend(aliases)
928 found = None
928 found = None
929 if cmd in aliases:
929 if cmd in aliases:
930 found = cmd
930 found = cmd
931 elif not strict:
931 elif not strict:
932 for a in aliases:
932 for a in aliases:
933 if a.startswith(cmd):
933 if a.startswith(cmd):
934 found = a
934 found = a
935 break
935 break
936 if found is not None:
936 if found is not None:
937 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
937 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
938 debugchoice[found] = (aliases, table[e])
938 debugchoice[found] = (aliases, table[e])
939 else:
939 else:
940 choice[found] = (aliases, table[e])
940 choice[found] = (aliases, table[e])
941
941
942 if not choice and debugchoice:
942 if not choice and debugchoice:
943 choice = debugchoice
943 choice = debugchoice
944
944
945 return choice, allcmds
945 return choice, allcmds
946
946
947
947
948 def findcmd(cmd, table, strict=True):
948 def findcmd(cmd, table, strict=True):
949 """Return (aliases, command table entry) for command string."""
949 """Return (aliases, command table entry) for command string."""
950 choice, allcmds = findpossible(cmd, table, strict)
950 choice, allcmds = findpossible(cmd, table, strict)
951
951
952 if cmd in choice:
952 if cmd in choice:
953 return choice[cmd]
953 return choice[cmd]
954
954
955 if len(choice) > 1:
955 if len(choice) > 1:
956 clist = sorted(choice)
956 clist = sorted(choice)
957 raise error.AmbiguousCommand(cmd, clist)
957 raise error.AmbiguousCommand(cmd, clist)
958
958
959 if choice:
959 if choice:
960 return list(choice.values())[0]
960 return list(choice.values())[0]
961
961
962 raise error.UnknownCommand(cmd, allcmds)
962 raise error.UnknownCommand(cmd, allcmds)
963
963
964
964
965 def changebranch(ui, repo, revs, label, opts):
965 def changebranch(ui, repo, revs, label, opts):
966 """ Change the branch name of given revs to label """
966 """ Change the branch name of given revs to label """
967
967
968 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
968 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
969 # abort in case of uncommitted merge or dirty wdir
969 # abort in case of uncommitted merge or dirty wdir
970 bailifchanged(repo)
970 bailifchanged(repo)
971 revs = scmutil.revrange(repo, revs)
971 revs = scmutil.revrange(repo, revs)
972 if not revs:
972 if not revs:
973 raise error.Abort(b"empty revision set")
973 raise error.Abort(b"empty revision set")
974 roots = repo.revs(b'roots(%ld)', revs)
974 roots = repo.revs(b'roots(%ld)', revs)
975 if len(roots) > 1:
975 if len(roots) > 1:
976 raise error.Abort(
976 raise error.Abort(
977 _(b"cannot change branch of non-linear revisions")
977 _(b"cannot change branch of non-linear revisions")
978 )
978 )
979 rewriteutil.precheck(repo, revs, b'change branch of')
979 rewriteutil.precheck(repo, revs, b'change branch of')
980
980
981 root = repo[roots.first()]
981 root = repo[roots.first()]
982 rpb = {parent.branch() for parent in root.parents()}
982 rpb = {parent.branch() for parent in root.parents()}
983 if (
983 if (
984 not opts.get(b'force')
984 not opts.get(b'force')
985 and label not in rpb
985 and label not in rpb
986 and label in repo.branchmap()
986 and label in repo.branchmap()
987 ):
987 ):
988 raise error.Abort(_(b"a branch of the same name already exists"))
988 raise error.Abort(_(b"a branch of the same name already exists"))
989
989
990 if repo.revs(b'obsolete() and %ld', revs):
990 if repo.revs(b'obsolete() and %ld', revs):
991 raise error.Abort(
991 raise error.Abort(
992 _(b"cannot change branch of a obsolete changeset")
992 _(b"cannot change branch of a obsolete changeset")
993 )
993 )
994
994
995 # make sure only topological heads
995 # make sure only topological heads
996 if repo.revs(b'heads(%ld) - head()', revs):
996 if repo.revs(b'heads(%ld) - head()', revs):
997 raise error.Abort(_(b"cannot change branch in middle of a stack"))
997 raise error.Abort(_(b"cannot change branch in middle of a stack"))
998
998
999 replacements = {}
999 replacements = {}
1000 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1000 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1001 # mercurial.subrepo -> mercurial.cmdutil
1001 # mercurial.subrepo -> mercurial.cmdutil
1002 from . import context
1002 from . import context
1003
1003
1004 for rev in revs:
1004 for rev in revs:
1005 ctx = repo[rev]
1005 ctx = repo[rev]
1006 oldbranch = ctx.branch()
1006 oldbranch = ctx.branch()
1007 # check if ctx has same branch
1007 # check if ctx has same branch
1008 if oldbranch == label:
1008 if oldbranch == label:
1009 continue
1009 continue
1010
1010
1011 def filectxfn(repo, newctx, path):
1011 def filectxfn(repo, newctx, path):
1012 try:
1012 try:
1013 return ctx[path]
1013 return ctx[path]
1014 except error.ManifestLookupError:
1014 except error.ManifestLookupError:
1015 return None
1015 return None
1016
1016
1017 ui.debug(
1017 ui.debug(
1018 b"changing branch of '%s' from '%s' to '%s'\n"
1018 b"changing branch of '%s' from '%s' to '%s'\n"
1019 % (hex(ctx.node()), oldbranch, label)
1019 % (hex(ctx.node()), oldbranch, label)
1020 )
1020 )
1021 extra = ctx.extra()
1021 extra = ctx.extra()
1022 extra[b'branch_change'] = hex(ctx.node())
1022 extra[b'branch_change'] = hex(ctx.node())
1023 # While changing branch of set of linear commits, make sure that
1023 # While changing branch of set of linear commits, make sure that
1024 # we base our commits on new parent rather than old parent which
1024 # we base our commits on new parent rather than old parent which
1025 # was obsoleted while changing the branch
1025 # was obsoleted while changing the branch
1026 p1 = ctx.p1().node()
1026 p1 = ctx.p1().node()
1027 p2 = ctx.p2().node()
1027 p2 = ctx.p2().node()
1028 if p1 in replacements:
1028 if p1 in replacements:
1029 p1 = replacements[p1][0]
1029 p1 = replacements[p1][0]
1030 if p2 in replacements:
1030 if p2 in replacements:
1031 p2 = replacements[p2][0]
1031 p2 = replacements[p2][0]
1032
1032
1033 mc = context.memctx(
1033 mc = context.memctx(
1034 repo,
1034 repo,
1035 (p1, p2),
1035 (p1, p2),
1036 ctx.description(),
1036 ctx.description(),
1037 ctx.files(),
1037 ctx.files(),
1038 filectxfn,
1038 filectxfn,
1039 user=ctx.user(),
1039 user=ctx.user(),
1040 date=ctx.date(),
1040 date=ctx.date(),
1041 extra=extra,
1041 extra=extra,
1042 branch=label,
1042 branch=label,
1043 )
1043 )
1044
1044
1045 newnode = repo.commitctx(mc)
1045 newnode = repo.commitctx(mc)
1046 replacements[ctx.node()] = (newnode,)
1046 replacements[ctx.node()] = (newnode,)
1047 ui.debug(b'new node id is %s\n' % hex(newnode))
1047 ui.debug(b'new node id is %s\n' % hex(newnode))
1048
1048
1049 # create obsmarkers and move bookmarks
1049 # create obsmarkers and move bookmarks
1050 scmutil.cleanupnodes(
1050 scmutil.cleanupnodes(
1051 repo, replacements, b'branch-change', fixphase=True
1051 repo, replacements, b'branch-change', fixphase=True
1052 )
1052 )
1053
1053
1054 # move the working copy too
1054 # move the working copy too
1055 wctx = repo[None]
1055 wctx = repo[None]
1056 # in-progress merge is a bit too complex for now.
1056 # in-progress merge is a bit too complex for now.
1057 if len(wctx.parents()) == 1:
1057 if len(wctx.parents()) == 1:
1058 newid = replacements.get(wctx.p1().node())
1058 newid = replacements.get(wctx.p1().node())
1059 if newid is not None:
1059 if newid is not None:
1060 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1060 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1061 # mercurial.cmdutil
1061 # mercurial.cmdutil
1062 from . import hg
1062 from . import hg
1063
1063
1064 hg.update(repo, newid[0], quietempty=True)
1064 hg.update(repo, newid[0], quietempty=True)
1065
1065
1066 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1066 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1067
1067
1068
1068
1069 def findrepo(p):
1069 def findrepo(p):
1070 while not os.path.isdir(os.path.join(p, b".hg")):
1070 while not os.path.isdir(os.path.join(p, b".hg")):
1071 oldp, p = p, os.path.dirname(p)
1071 oldp, p = p, os.path.dirname(p)
1072 if p == oldp:
1072 if p == oldp:
1073 return None
1073 return None
1074
1074
1075 return p
1075 return p
1076
1076
1077
1077
1078 def bailifchanged(repo, merge=True, hint=None):
1078 def bailifchanged(repo, merge=True, hint=None):
1079 """ enforce the precondition that working directory must be clean.
1079 """ enforce the precondition that working directory must be clean.
1080
1080
1081 'merge' can be set to false if a pending uncommitted merge should be
1081 'merge' can be set to false if a pending uncommitted merge should be
1082 ignored (such as when 'update --check' runs).
1082 ignored (such as when 'update --check' runs).
1083
1083
1084 'hint' is the usual hint given to Abort exception.
1084 'hint' is the usual hint given to Abort exception.
1085 """
1085 """
1086
1086
1087 if merge and repo.dirstate.p2() != nullid:
1087 if merge and repo.dirstate.p2() != nullid:
1088 raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
1088 raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
1089 st = repo.status()
1089 st = repo.status()
1090 if st.modified or st.added or st.removed or st.deleted:
1090 if st.modified or st.added or st.removed or st.deleted:
1091 raise error.Abort(_(b'uncommitted changes'), hint=hint)
1091 raise error.Abort(_(b'uncommitted changes'), hint=hint)
1092 ctx = repo[None]
1092 ctx = repo[None]
1093 for s in sorted(ctx.substate):
1093 for s in sorted(ctx.substate):
1094 ctx.sub(s).bailifchanged(hint=hint)
1094 ctx.sub(s).bailifchanged(hint=hint)
1095
1095
1096
1096
1097 def logmessage(ui, opts):
1097 def logmessage(ui, opts):
1098 """ get the log message according to -m and -l option """
1098 """ get the log message according to -m and -l option """
1099
1099
1100 check_at_most_one_arg(opts, b'message', b'logfile')
1100 check_at_most_one_arg(opts, b'message', b'logfile')
1101
1101
1102 message = opts.get(b'message')
1102 message = opts.get(b'message')
1103 logfile = opts.get(b'logfile')
1103 logfile = opts.get(b'logfile')
1104
1104
1105 if not message and logfile:
1105 if not message and logfile:
1106 try:
1106 try:
1107 if isstdiofilename(logfile):
1107 if isstdiofilename(logfile):
1108 message = ui.fin.read()
1108 message = ui.fin.read()
1109 else:
1109 else:
1110 message = b'\n'.join(util.readfile(logfile).splitlines())
1110 message = b'\n'.join(util.readfile(logfile).splitlines())
1111 except IOError as inst:
1111 except IOError as inst:
1112 raise error.Abort(
1112 raise error.Abort(
1113 _(b"can't read commit message '%s': %s")
1113 _(b"can't read commit message '%s': %s")
1114 % (logfile, encoding.strtolocal(inst.strerror))
1114 % (logfile, encoding.strtolocal(inst.strerror))
1115 )
1115 )
1116 return message
1116 return message
1117
1117
1118
1118
1119 def mergeeditform(ctxorbool, baseformname):
1119 def mergeeditform(ctxorbool, baseformname):
1120 """return appropriate editform name (referencing a committemplate)
1120 """return appropriate editform name (referencing a committemplate)
1121
1121
1122 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1122 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1123 merging is committed.
1123 merging is committed.
1124
1124
1125 This returns baseformname with '.merge' appended if it is a merge,
1125 This returns baseformname with '.merge' appended if it is a merge,
1126 otherwise '.normal' is appended.
1126 otherwise '.normal' is appended.
1127 """
1127 """
1128 if isinstance(ctxorbool, bool):
1128 if isinstance(ctxorbool, bool):
1129 if ctxorbool:
1129 if ctxorbool:
1130 return baseformname + b".merge"
1130 return baseformname + b".merge"
1131 elif len(ctxorbool.parents()) > 1:
1131 elif len(ctxorbool.parents()) > 1:
1132 return baseformname + b".merge"
1132 return baseformname + b".merge"
1133
1133
1134 return baseformname + b".normal"
1134 return baseformname + b".normal"
1135
1135
1136
1136
1137 def getcommiteditor(
1137 def getcommiteditor(
1138 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1138 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1139 ):
1139 ):
1140 """get appropriate commit message editor according to '--edit' option
1140 """get appropriate commit message editor according to '--edit' option
1141
1141
1142 'finishdesc' is a function to be called with edited commit message
1142 'finishdesc' is a function to be called with edited commit message
1143 (= 'description' of the new changeset) just after editing, but
1143 (= 'description' of the new changeset) just after editing, but
1144 before checking empty-ness. It should return actual text to be
1144 before checking empty-ness. It should return actual text to be
1145 stored into history. This allows to change description before
1145 stored into history. This allows to change description before
1146 storing.
1146 storing.
1147
1147
1148 'extramsg' is a extra message to be shown in the editor instead of
1148 'extramsg' is a extra message to be shown in the editor instead of
1149 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1149 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1150 is automatically added.
1150 is automatically added.
1151
1151
1152 'editform' is a dot-separated list of names, to distinguish
1152 'editform' is a dot-separated list of names, to distinguish
1153 the purpose of commit text editing.
1153 the purpose of commit text editing.
1154
1154
1155 'getcommiteditor' returns 'commitforceeditor' regardless of
1155 'getcommiteditor' returns 'commitforceeditor' regardless of
1156 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1156 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1157 they are specific for usage in MQ.
1157 they are specific for usage in MQ.
1158 """
1158 """
1159 if edit or finishdesc or extramsg:
1159 if edit or finishdesc or extramsg:
1160 return lambda r, c, s: commitforceeditor(
1160 return lambda r, c, s: commitforceeditor(
1161 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1161 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1162 )
1162 )
1163 elif editform:
1163 elif editform:
1164 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1164 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1165 else:
1165 else:
1166 return commiteditor
1166 return commiteditor
1167
1167
1168
1168
1169 def _escapecommandtemplate(tmpl):
1169 def _escapecommandtemplate(tmpl):
1170 parts = []
1170 parts = []
1171 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1171 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1172 if typ == b'string':
1172 if typ == b'string':
1173 parts.append(stringutil.escapestr(tmpl[start:end]))
1173 parts.append(stringutil.escapestr(tmpl[start:end]))
1174 else:
1174 else:
1175 parts.append(tmpl[start:end])
1175 parts.append(tmpl[start:end])
1176 return b''.join(parts)
1176 return b''.join(parts)
1177
1177
1178
1178
1179 def rendercommandtemplate(ui, tmpl, props):
1179 def rendercommandtemplate(ui, tmpl, props):
1180 r"""Expand a literal template 'tmpl' in a way suitable for command line
1180 r"""Expand a literal template 'tmpl' in a way suitable for command line
1181
1181
1182 '\' in outermost string is not taken as an escape character because it
1182 '\' in outermost string is not taken as an escape character because it
1183 is a directory separator on Windows.
1183 is a directory separator on Windows.
1184
1184
1185 >>> from . import ui as uimod
1185 >>> from . import ui as uimod
1186 >>> ui = uimod.ui()
1186 >>> ui = uimod.ui()
1187 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1187 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1188 'c:\\foo'
1188 'c:\\foo'
1189 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1189 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1190 'c:{path}'
1190 'c:{path}'
1191 """
1191 """
1192 if not tmpl:
1192 if not tmpl:
1193 return tmpl
1193 return tmpl
1194 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1194 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1195 return t.renderdefault(props)
1195 return t.renderdefault(props)
1196
1196
1197
1197
1198 def rendertemplate(ctx, tmpl, props=None):
1198 def rendertemplate(ctx, tmpl, props=None):
1199 """Expand a literal template 'tmpl' byte-string against one changeset
1199 """Expand a literal template 'tmpl' byte-string against one changeset
1200
1200
1201 Each props item must be a stringify-able value or a callable returning
1201 Each props item must be a stringify-able value or a callable returning
1202 such value, i.e. no bare list nor dict should be passed.
1202 such value, i.e. no bare list nor dict should be passed.
1203 """
1203 """
1204 repo = ctx.repo()
1204 repo = ctx.repo()
1205 tres = formatter.templateresources(repo.ui, repo)
1205 tres = formatter.templateresources(repo.ui, repo)
1206 t = formatter.maketemplater(
1206 t = formatter.maketemplater(
1207 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1207 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1208 )
1208 )
1209 mapping = {b'ctx': ctx}
1209 mapping = {b'ctx': ctx}
1210 if props:
1210 if props:
1211 mapping.update(props)
1211 mapping.update(props)
1212 return t.renderdefault(mapping)
1212 return t.renderdefault(mapping)
1213
1213
1214
1214
1215 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1215 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1216 r"""Convert old-style filename format string to template string
1216 r"""Convert old-style filename format string to template string
1217
1217
1218 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1218 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1219 'foo-{reporoot|basename}-{seqno}.patch'
1219 'foo-{reporoot|basename}-{seqno}.patch'
1220 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1220 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1221 '{rev}{tags % "{tag}"}{node}'
1221 '{rev}{tags % "{tag}"}{node}'
1222
1222
1223 '\' in outermost strings has to be escaped because it is a directory
1223 '\' in outermost strings has to be escaped because it is a directory
1224 separator on Windows:
1224 separator on Windows:
1225
1225
1226 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1226 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1227 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1227 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1228 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1228 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1229 '\\\\\\\\foo\\\\bar.patch'
1229 '\\\\\\\\foo\\\\bar.patch'
1230 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1230 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1231 '\\\\{tags % "{tag}"}'
1231 '\\\\{tags % "{tag}"}'
1232
1232
1233 but inner strings follow the template rules (i.e. '\' is taken as an
1233 but inner strings follow the template rules (i.e. '\' is taken as an
1234 escape character):
1234 escape character):
1235
1235
1236 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1236 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1237 '{"c:\\tmp"}'
1237 '{"c:\\tmp"}'
1238 """
1238 """
1239 expander = {
1239 expander = {
1240 b'H': b'{node}',
1240 b'H': b'{node}',
1241 b'R': b'{rev}',
1241 b'R': b'{rev}',
1242 b'h': b'{node|short}',
1242 b'h': b'{node|short}',
1243 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1243 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1244 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1244 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1245 b'%': b'%',
1245 b'%': b'%',
1246 b'b': b'{reporoot|basename}',
1246 b'b': b'{reporoot|basename}',
1247 }
1247 }
1248 if total is not None:
1248 if total is not None:
1249 expander[b'N'] = b'{total}'
1249 expander[b'N'] = b'{total}'
1250 if seqno is not None:
1250 if seqno is not None:
1251 expander[b'n'] = b'{seqno}'
1251 expander[b'n'] = b'{seqno}'
1252 if total is not None and seqno is not None:
1252 if total is not None and seqno is not None:
1253 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1253 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1254 if pathname is not None:
1254 if pathname is not None:
1255 expander[b's'] = b'{pathname|basename}'
1255 expander[b's'] = b'{pathname|basename}'
1256 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1256 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1257 expander[b'p'] = b'{pathname}'
1257 expander[b'p'] = b'{pathname}'
1258
1258
1259 newname = []
1259 newname = []
1260 for typ, start, end in templater.scantemplate(pat, raw=True):
1260 for typ, start, end in templater.scantemplate(pat, raw=True):
1261 if typ != b'string':
1261 if typ != b'string':
1262 newname.append(pat[start:end])
1262 newname.append(pat[start:end])
1263 continue
1263 continue
1264 i = start
1264 i = start
1265 while i < end:
1265 while i < end:
1266 n = pat.find(b'%', i, end)
1266 n = pat.find(b'%', i, end)
1267 if n < 0:
1267 if n < 0:
1268 newname.append(stringutil.escapestr(pat[i:end]))
1268 newname.append(stringutil.escapestr(pat[i:end]))
1269 break
1269 break
1270 newname.append(stringutil.escapestr(pat[i:n]))
1270 newname.append(stringutil.escapestr(pat[i:n]))
1271 if n + 2 > end:
1271 if n + 2 > end:
1272 raise error.Abort(
1272 raise error.Abort(
1273 _(b"incomplete format spec in output filename")
1273 _(b"incomplete format spec in output filename")
1274 )
1274 )
1275 c = pat[n + 1 : n + 2]
1275 c = pat[n + 1 : n + 2]
1276 i = n + 2
1276 i = n + 2
1277 try:
1277 try:
1278 newname.append(expander[c])
1278 newname.append(expander[c])
1279 except KeyError:
1279 except KeyError:
1280 raise error.Abort(
1280 raise error.Abort(
1281 _(b"invalid format spec '%%%s' in output filename") % c
1281 _(b"invalid format spec '%%%s' in output filename") % c
1282 )
1282 )
1283 return b''.join(newname)
1283 return b''.join(newname)
1284
1284
1285
1285
1286 def makefilename(ctx, pat, **props):
1286 def makefilename(ctx, pat, **props):
1287 if not pat:
1287 if not pat:
1288 return pat
1288 return pat
1289 tmpl = _buildfntemplate(pat, **props)
1289 tmpl = _buildfntemplate(pat, **props)
1290 # BUG: alias expansion shouldn't be made against template fragments
1290 # BUG: alias expansion shouldn't be made against template fragments
1291 # rewritten from %-format strings, but we have no easy way to partially
1291 # rewritten from %-format strings, but we have no easy way to partially
1292 # disable the expansion.
1292 # disable the expansion.
1293 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1293 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1294
1294
1295
1295
1296 def isstdiofilename(pat):
1296 def isstdiofilename(pat):
1297 """True if the given pat looks like a filename denoting stdin/stdout"""
1297 """True if the given pat looks like a filename denoting stdin/stdout"""
1298 return not pat or pat == b'-'
1298 return not pat or pat == b'-'
1299
1299
1300
1300
1301 class _unclosablefile(object):
1301 class _unclosablefile(object):
1302 def __init__(self, fp):
1302 def __init__(self, fp):
1303 self._fp = fp
1303 self._fp = fp
1304
1304
1305 def close(self):
1305 def close(self):
1306 pass
1306 pass
1307
1307
1308 def __iter__(self):
1308 def __iter__(self):
1309 return iter(self._fp)
1309 return iter(self._fp)
1310
1310
1311 def __getattr__(self, attr):
1311 def __getattr__(self, attr):
1312 return getattr(self._fp, attr)
1312 return getattr(self._fp, attr)
1313
1313
1314 def __enter__(self):
1314 def __enter__(self):
1315 return self
1315 return self
1316
1316
1317 def __exit__(self, exc_type, exc_value, exc_tb):
1317 def __exit__(self, exc_type, exc_value, exc_tb):
1318 pass
1318 pass
1319
1319
1320
1320
1321 def makefileobj(ctx, pat, mode=b'wb', **props):
1321 def makefileobj(ctx, pat, mode=b'wb', **props):
1322 writable = mode not in (b'r', b'rb')
1322 writable = mode not in (b'r', b'rb')
1323
1323
1324 if isstdiofilename(pat):
1324 if isstdiofilename(pat):
1325 repo = ctx.repo()
1325 repo = ctx.repo()
1326 if writable:
1326 if writable:
1327 fp = repo.ui.fout
1327 fp = repo.ui.fout
1328 else:
1328 else:
1329 fp = repo.ui.fin
1329 fp = repo.ui.fin
1330 return _unclosablefile(fp)
1330 return _unclosablefile(fp)
1331 fn = makefilename(ctx, pat, **props)
1331 fn = makefilename(ctx, pat, **props)
1332 return open(fn, mode)
1332 return open(fn, mode)
1333
1333
1334
1334
1335 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1335 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1336 """opens the changelog, manifest, a filelog or a given revlog"""
1336 """opens the changelog, manifest, a filelog or a given revlog"""
1337 cl = opts[b'changelog']
1337 cl = opts[b'changelog']
1338 mf = opts[b'manifest']
1338 mf = opts[b'manifest']
1339 dir = opts[b'dir']
1339 dir = opts[b'dir']
1340 msg = None
1340 msg = None
1341 if cl and mf:
1341 if cl and mf:
1342 msg = _(b'cannot specify --changelog and --manifest at the same time')
1342 msg = _(b'cannot specify --changelog and --manifest at the same time')
1343 elif cl and dir:
1343 elif cl and dir:
1344 msg = _(b'cannot specify --changelog and --dir at the same time')
1344 msg = _(b'cannot specify --changelog and --dir at the same time')
1345 elif cl or mf or dir:
1345 elif cl or mf or dir:
1346 if file_:
1346 if file_:
1347 msg = _(b'cannot specify filename with --changelog or --manifest')
1347 msg = _(b'cannot specify filename with --changelog or --manifest')
1348 elif not repo:
1348 elif not repo:
1349 msg = _(
1349 msg = _(
1350 b'cannot specify --changelog or --manifest or --dir '
1350 b'cannot specify --changelog or --manifest or --dir '
1351 b'without a repository'
1351 b'without a repository'
1352 )
1352 )
1353 if msg:
1353 if msg:
1354 raise error.Abort(msg)
1354 raise error.Abort(msg)
1355
1355
1356 r = None
1356 r = None
1357 if repo:
1357 if repo:
1358 if cl:
1358 if cl:
1359 r = repo.unfiltered().changelog
1359 r = repo.unfiltered().changelog
1360 elif dir:
1360 elif dir:
1361 if b'treemanifest' not in repo.requirements:
1361 if b'treemanifest' not in repo.requirements:
1362 raise error.Abort(
1362 raise error.Abort(
1363 _(
1363 _(
1364 b"--dir can only be used on repos with "
1364 b"--dir can only be used on repos with "
1365 b"treemanifest enabled"
1365 b"treemanifest enabled"
1366 )
1366 )
1367 )
1367 )
1368 if not dir.endswith(b'/'):
1368 if not dir.endswith(b'/'):
1369 dir = dir + b'/'
1369 dir = dir + b'/'
1370 dirlog = repo.manifestlog.getstorage(dir)
1370 dirlog = repo.manifestlog.getstorage(dir)
1371 if len(dirlog):
1371 if len(dirlog):
1372 r = dirlog
1372 r = dirlog
1373 elif mf:
1373 elif mf:
1374 r = repo.manifestlog.getstorage(b'')
1374 r = repo.manifestlog.getstorage(b'')
1375 elif file_:
1375 elif file_:
1376 filelog = repo.file(file_)
1376 filelog = repo.file(file_)
1377 if len(filelog):
1377 if len(filelog):
1378 r = filelog
1378 r = filelog
1379
1379
1380 # Not all storage may be revlogs. If requested, try to return an actual
1380 # Not all storage may be revlogs. If requested, try to return an actual
1381 # revlog instance.
1381 # revlog instance.
1382 if returnrevlog:
1382 if returnrevlog:
1383 if isinstance(r, revlog.revlog):
1383 if isinstance(r, revlog.revlog):
1384 pass
1384 pass
1385 elif util.safehasattr(r, b'_revlog'):
1385 elif util.safehasattr(r, b'_revlog'):
1386 r = r._revlog # pytype: disable=attribute-error
1386 r = r._revlog # pytype: disable=attribute-error
1387 elif r is not None:
1387 elif r is not None:
1388 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
1388 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
1389
1389
1390 if not r:
1390 if not r:
1391 if not returnrevlog:
1391 if not returnrevlog:
1392 raise error.Abort(_(b'cannot give path to non-revlog'))
1392 raise error.Abort(_(b'cannot give path to non-revlog'))
1393
1393
1394 if not file_:
1394 if not file_:
1395 raise error.CommandError(cmd, _(b'invalid arguments'))
1395 raise error.CommandError(cmd, _(b'invalid arguments'))
1396 if not os.path.isfile(file_):
1396 if not os.path.isfile(file_):
1397 raise error.Abort(_(b"revlog '%s' not found") % file_)
1397 raise error.Abort(_(b"revlog '%s' not found") % file_)
1398 r = revlog.revlog(
1398 r = revlog.revlog(
1399 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1399 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1400 )
1400 )
1401 return r
1401 return r
1402
1402
1403
1403
1404 def openrevlog(repo, cmd, file_, opts):
1404 def openrevlog(repo, cmd, file_, opts):
1405 """Obtain a revlog backing storage of an item.
1405 """Obtain a revlog backing storage of an item.
1406
1406
1407 This is similar to ``openstorage()`` except it always returns a revlog.
1407 This is similar to ``openstorage()`` except it always returns a revlog.
1408
1408
1409 In most cases, a caller cares about the main storage object - not the
1409 In most cases, a caller cares about the main storage object - not the
1410 revlog backing it. Therefore, this function should only be used by code
1410 revlog backing it. Therefore, this function should only be used by code
1411 that needs to examine low-level revlog implementation details. e.g. debug
1411 that needs to examine low-level revlog implementation details. e.g. debug
1412 commands.
1412 commands.
1413 """
1413 """
1414 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1414 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1415
1415
1416
1416
1417 def copy(ui, repo, pats, opts, rename=False):
1417 def copy(ui, repo, pats, opts, rename=False):
1418 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1418 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1419
1419
1420 # called with the repo lock held
1420 # called with the repo lock held
1421 #
1421 #
1422 # hgsep => pathname that uses "/" to separate directories
1422 # hgsep => pathname that uses "/" to separate directories
1423 # ossep => pathname that uses os.sep to separate directories
1423 # ossep => pathname that uses os.sep to separate directories
1424 cwd = repo.getcwd()
1424 cwd = repo.getcwd()
1425 targets = {}
1425 targets = {}
1426 forget = opts.get(b"forget")
1426 forget = opts.get(b"forget")
1427 after = opts.get(b"after")
1427 after = opts.get(b"after")
1428 dryrun = opts.get(b"dry_run")
1428 dryrun = opts.get(b"dry_run")
1429 rev = opts.get(b'at_rev')
1429 rev = opts.get(b'at_rev')
1430 if rev:
1430 if rev:
1431 if not forget and not after:
1431 if not forget and not after:
1432 # TODO: Remove this restriction and make it also create the copy
1432 # TODO: Remove this restriction and make it also create the copy
1433 # targets (and remove the rename source if rename==True).
1433 # targets (and remove the rename source if rename==True).
1434 raise error.Abort(_(b'--at-rev requires --after'))
1434 raise error.Abort(_(b'--at-rev requires --after'))
1435 ctx = scmutil.revsingle(repo, rev)
1435 ctx = scmutil.revsingle(repo, rev)
1436 if len(ctx.parents()) > 1:
1436 if len(ctx.parents()) > 1:
1437 raise error.Abort(_(b'cannot mark/unmark copy in merge commit'))
1437 raise error.Abort(_(b'cannot mark/unmark copy in merge commit'))
1438 else:
1438 else:
1439 ctx = repo[None]
1439 ctx = repo[None]
1440
1440
1441 pctx = ctx.p1()
1441 pctx = ctx.p1()
1442
1442
1443 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1443 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1444
1444
1445 if forget:
1445 if forget:
1446 if ctx.rev() is None:
1446 if ctx.rev() is None:
1447 new_ctx = ctx
1447 new_ctx = ctx
1448 else:
1448 else:
1449 if len(ctx.parents()) > 1:
1449 if len(ctx.parents()) > 1:
1450 raise error.Abort(_(b'cannot unmark copy in merge commit'))
1450 raise error.Abort(_(b'cannot unmark copy in merge commit'))
1451 # avoid cycle context -> subrepo -> cmdutil
1451 # avoid cycle context -> subrepo -> cmdutil
1452 from . import context
1452 from . import context
1453
1453
1454 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1454 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1455 new_ctx = context.overlayworkingctx(repo)
1455 new_ctx = context.overlayworkingctx(repo)
1456 new_ctx.setbase(ctx.p1())
1456 new_ctx.setbase(ctx.p1())
1457 mergemod.graft(repo, ctx, wctx=new_ctx)
1457 mergemod.graft(repo, ctx, wctx=new_ctx)
1458
1458
1459 match = scmutil.match(ctx, pats, opts)
1459 match = scmutil.match(ctx, pats, opts)
1460
1460
1461 current_copies = ctx.p1copies()
1461 current_copies = ctx.p1copies()
1462 current_copies.update(ctx.p2copies())
1462 current_copies.update(ctx.p2copies())
1463
1463
1464 uipathfn = scmutil.getuipathfn(repo)
1464 uipathfn = scmutil.getuipathfn(repo)
1465 for f in ctx.walk(match):
1465 for f in ctx.walk(match):
1466 if f in current_copies:
1466 if f in current_copies:
1467 new_ctx[f].markcopied(None)
1467 new_ctx[f].markcopied(None)
1468 elif match.exact(f):
1468 elif match.exact(f):
1469 ui.warn(
1469 ui.warn(
1470 _(
1470 _(
1471 b'%s: not unmarking as copy - file is not marked as copied\n'
1471 b'%s: not unmarking as copy - file is not marked as copied\n'
1472 )
1472 )
1473 % uipathfn(f)
1473 % uipathfn(f)
1474 )
1474 )
1475
1475
1476 if ctx.rev() is not None:
1476 if ctx.rev() is not None:
1477 with repo.lock():
1477 with repo.lock():
1478 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1478 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1479 new_node = mem_ctx.commit()
1479 new_node = mem_ctx.commit()
1480
1480
1481 if repo.dirstate.p1() == ctx.node():
1481 if repo.dirstate.p1() == ctx.node():
1482 with repo.dirstate.parentchange():
1482 with repo.dirstate.parentchange():
1483 scmutil.movedirstate(repo, repo[new_node])
1483 scmutil.movedirstate(repo, repo[new_node])
1484 replacements = {ctx.node(): [new_node]}
1484 replacements = {ctx.node(): [new_node]}
1485 scmutil.cleanupnodes(
1485 scmutil.cleanupnodes(
1486 repo, replacements, b'uncopy', fixphase=True
1486 repo, replacements, b'uncopy', fixphase=True
1487 )
1487 )
1488
1488
1489 return
1489 return
1490
1490
1491 pats = scmutil.expandpats(pats)
1491 pats = scmutil.expandpats(pats)
1492 if not pats:
1492 if not pats:
1493 raise error.Abort(_(b'no source or destination specified'))
1493 raise error.Abort(_(b'no source or destination specified'))
1494 if len(pats) == 1:
1494 if len(pats) == 1:
1495 raise error.Abort(_(b'no destination specified'))
1495 raise error.Abort(_(b'no destination specified'))
1496 dest = pats.pop()
1496 dest = pats.pop()
1497
1497
1498 def walkpat(pat):
1498 def walkpat(pat):
1499 srcs = []
1499 srcs = []
1500 # TODO: Inline and simplify the non-working-copy version of this code
1500 # TODO: Inline and simplify the non-working-copy version of this code
1501 # since it shares very little with the working-copy version of it.
1501 # since it shares very little with the working-copy version of it.
1502 ctx_to_walk = ctx if ctx.rev() is None else pctx
1502 ctx_to_walk = ctx if ctx.rev() is None else pctx
1503 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1503 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1504 for abs in ctx_to_walk.walk(m):
1504 for abs in ctx_to_walk.walk(m):
1505 rel = uipathfn(abs)
1505 rel = uipathfn(abs)
1506 exact = m.exact(abs)
1506 exact = m.exact(abs)
1507 if abs not in ctx:
1507 if abs not in ctx:
1508 if abs in pctx:
1508 if abs in pctx:
1509 if not after:
1509 if not after:
1510 if exact:
1510 if exact:
1511 ui.warn(
1511 ui.warn(
1512 _(
1512 _(
1513 b'%s: not copying - file has been marked '
1513 b'%s: not copying - file has been marked '
1514 b'for remove\n'
1514 b'for remove\n'
1515 )
1515 )
1516 % rel
1516 % rel
1517 )
1517 )
1518 continue
1518 continue
1519 else:
1519 else:
1520 if exact:
1520 if exact:
1521 ui.warn(
1521 ui.warn(
1522 _(b'%s: not copying - file is not managed\n') % rel
1522 _(b'%s: not copying - file is not managed\n') % rel
1523 )
1523 )
1524 continue
1524 continue
1525
1525
1526 # abs: hgsep
1526 # abs: hgsep
1527 # rel: ossep
1527 # rel: ossep
1528 srcs.append((abs, rel, exact))
1528 srcs.append((abs, rel, exact))
1529 return srcs
1529 return srcs
1530
1530
1531 if ctx.rev() is not None:
1531 if ctx.rev() is not None:
1532 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1532 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1533 absdest = pathutil.canonpath(repo.root, cwd, dest)
1533 absdest = pathutil.canonpath(repo.root, cwd, dest)
1534 if ctx.hasdir(absdest):
1534 if ctx.hasdir(absdest):
1535 raise error.Abort(
1535 raise error.Abort(
1536 _(b'%s: --at-rev does not support a directory as destination')
1536 _(b'%s: --at-rev does not support a directory as destination')
1537 % uipathfn(absdest)
1537 % uipathfn(absdest)
1538 )
1538 )
1539 if absdest not in ctx:
1539 if absdest not in ctx:
1540 raise error.Abort(
1540 raise error.Abort(
1541 _(b'%s: copy destination does not exist in %s')
1541 _(b'%s: copy destination does not exist in %s')
1542 % (uipathfn(absdest), ctx)
1542 % (uipathfn(absdest), ctx)
1543 )
1543 )
1544
1544
1545 # avoid cycle context -> subrepo -> cmdutil
1545 # avoid cycle context -> subrepo -> cmdutil
1546 from . import context
1546 from . import context
1547
1547
1548 copylist = []
1548 copylist = []
1549 for pat in pats:
1549 for pat in pats:
1550 srcs = walkpat(pat)
1550 srcs = walkpat(pat)
1551 if not srcs:
1551 if not srcs:
1552 continue
1552 continue
1553 for abs, rel, exact in srcs:
1553 for abs, rel, exact in srcs:
1554 copylist.append(abs)
1554 copylist.append(abs)
1555
1555
1556 if not copylist:
1556 if not copylist:
1557 raise error.Abort(_(b'no files to copy'))
1557 raise error.Abort(_(b'no files to copy'))
1558 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1558 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1559 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1559 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1560 # existing functions below.
1560 # existing functions below.
1561 if len(copylist) != 1:
1561 if len(copylist) != 1:
1562 raise error.Abort(_(b'--at-rev requires a single source'))
1562 raise error.Abort(_(b'--at-rev requires a single source'))
1563
1563
1564 new_ctx = context.overlayworkingctx(repo)
1564 new_ctx = context.overlayworkingctx(repo)
1565 new_ctx.setbase(ctx.p1())
1565 new_ctx.setbase(ctx.p1())
1566 mergemod.graft(repo, ctx, wctx=new_ctx)
1566 mergemod.graft(repo, ctx, wctx=new_ctx)
1567
1567
1568 new_ctx.markcopied(absdest, copylist[0])
1568 new_ctx.markcopied(absdest, copylist[0])
1569
1569
1570 with repo.lock():
1570 with repo.lock():
1571 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1571 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1572 new_node = mem_ctx.commit()
1572 new_node = mem_ctx.commit()
1573
1573
1574 if repo.dirstate.p1() == ctx.node():
1574 if repo.dirstate.p1() == ctx.node():
1575 with repo.dirstate.parentchange():
1575 with repo.dirstate.parentchange():
1576 scmutil.movedirstate(repo, repo[new_node])
1576 scmutil.movedirstate(repo, repo[new_node])
1577 replacements = {ctx.node(): [new_node]}
1577 replacements = {ctx.node(): [new_node]}
1578 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1578 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1579
1579
1580 return
1580 return
1581
1581
1582 # abssrc: hgsep
1582 # abssrc: hgsep
1583 # relsrc: ossep
1583 # relsrc: ossep
1584 # otarget: ossep
1584 # otarget: ossep
1585 def copyfile(abssrc, relsrc, otarget, exact):
1585 def copyfile(abssrc, relsrc, otarget, exact):
1586 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1586 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1587 if b'/' in abstarget:
1587 if b'/' in abstarget:
1588 # We cannot normalize abstarget itself, this would prevent
1588 # We cannot normalize abstarget itself, this would prevent
1589 # case only renames, like a => A.
1589 # case only renames, like a => A.
1590 abspath, absname = abstarget.rsplit(b'/', 1)
1590 abspath, absname = abstarget.rsplit(b'/', 1)
1591 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1591 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1592 reltarget = repo.pathto(abstarget, cwd)
1592 reltarget = repo.pathto(abstarget, cwd)
1593 target = repo.wjoin(abstarget)
1593 target = repo.wjoin(abstarget)
1594 src = repo.wjoin(abssrc)
1594 src = repo.wjoin(abssrc)
1595 state = repo.dirstate[abstarget]
1595 state = repo.dirstate[abstarget]
1596
1596
1597 scmutil.checkportable(ui, abstarget)
1597 scmutil.checkportable(ui, abstarget)
1598
1598
1599 # check for collisions
1599 # check for collisions
1600 prevsrc = targets.get(abstarget)
1600 prevsrc = targets.get(abstarget)
1601 if prevsrc is not None:
1601 if prevsrc is not None:
1602 ui.warn(
1602 ui.warn(
1603 _(b'%s: not overwriting - %s collides with %s\n')
1603 _(b'%s: not overwriting - %s collides with %s\n')
1604 % (
1604 % (
1605 reltarget,
1605 reltarget,
1606 repo.pathto(abssrc, cwd),
1606 repo.pathto(abssrc, cwd),
1607 repo.pathto(prevsrc, cwd),
1607 repo.pathto(prevsrc, cwd),
1608 )
1608 )
1609 )
1609 )
1610 return True # report a failure
1610 return True # report a failure
1611
1611
1612 # check for overwrites
1612 # check for overwrites
1613 exists = os.path.lexists(target)
1613 exists = os.path.lexists(target)
1614 samefile = False
1614 samefile = False
1615 if exists and abssrc != abstarget:
1615 if exists and abssrc != abstarget:
1616 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1616 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1617 abstarget
1617 abstarget
1618 ):
1618 ):
1619 if not rename:
1619 if not rename:
1620 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1620 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1621 return True # report a failure
1621 return True # report a failure
1622 exists = False
1622 exists = False
1623 samefile = True
1623 samefile = True
1624
1624
1625 if not after and exists or after and state in b'mn':
1625 if not after and exists or after and state in b'mn':
1626 if not opts[b'force']:
1626 if not opts[b'force']:
1627 if state in b'mn':
1627 if state in b'mn':
1628 msg = _(b'%s: not overwriting - file already committed\n')
1628 msg = _(b'%s: not overwriting - file already committed\n')
1629 if after:
1629 if after:
1630 flags = b'--after --force'
1630 flags = b'--after --force'
1631 else:
1631 else:
1632 flags = b'--force'
1632 flags = b'--force'
1633 if rename:
1633 if rename:
1634 hint = (
1634 hint = (
1635 _(
1635 _(
1636 b"('hg rename %s' to replace the file by "
1636 b"('hg rename %s' to replace the file by "
1637 b'recording a rename)\n'
1637 b'recording a rename)\n'
1638 )
1638 )
1639 % flags
1639 % flags
1640 )
1640 )
1641 else:
1641 else:
1642 hint = (
1642 hint = (
1643 _(
1643 _(
1644 b"('hg copy %s' to replace the file by "
1644 b"('hg copy %s' to replace the file by "
1645 b'recording a copy)\n'
1645 b'recording a copy)\n'
1646 )
1646 )
1647 % flags
1647 % flags
1648 )
1648 )
1649 else:
1649 else:
1650 msg = _(b'%s: not overwriting - file exists\n')
1650 msg = _(b'%s: not overwriting - file exists\n')
1651 if rename:
1651 if rename:
1652 hint = _(
1652 hint = _(
1653 b"('hg rename --after' to record the rename)\n"
1653 b"('hg rename --after' to record the rename)\n"
1654 )
1654 )
1655 else:
1655 else:
1656 hint = _(b"('hg copy --after' to record the copy)\n")
1656 hint = _(b"('hg copy --after' to record the copy)\n")
1657 ui.warn(msg % reltarget)
1657 ui.warn(msg % reltarget)
1658 ui.warn(hint)
1658 ui.warn(hint)
1659 return True # report a failure
1659 return True # report a failure
1660
1660
1661 if after:
1661 if after:
1662 if not exists:
1662 if not exists:
1663 if rename:
1663 if rename:
1664 ui.warn(
1664 ui.warn(
1665 _(b'%s: not recording move - %s does not exist\n')
1665 _(b'%s: not recording move - %s does not exist\n')
1666 % (relsrc, reltarget)
1666 % (relsrc, reltarget)
1667 )
1667 )
1668 else:
1668 else:
1669 ui.warn(
1669 ui.warn(
1670 _(b'%s: not recording copy - %s does not exist\n')
1670 _(b'%s: not recording copy - %s does not exist\n')
1671 % (relsrc, reltarget)
1671 % (relsrc, reltarget)
1672 )
1672 )
1673 return True # report a failure
1673 return True # report a failure
1674 elif not dryrun:
1674 elif not dryrun:
1675 try:
1675 try:
1676 if exists:
1676 if exists:
1677 os.unlink(target)
1677 os.unlink(target)
1678 targetdir = os.path.dirname(target) or b'.'
1678 targetdir = os.path.dirname(target) or b'.'
1679 if not os.path.isdir(targetdir):
1679 if not os.path.isdir(targetdir):
1680 os.makedirs(targetdir)
1680 os.makedirs(targetdir)
1681 if samefile:
1681 if samefile:
1682 tmp = target + b"~hgrename"
1682 tmp = target + b"~hgrename"
1683 os.rename(src, tmp)
1683 os.rename(src, tmp)
1684 os.rename(tmp, target)
1684 os.rename(tmp, target)
1685 else:
1685 else:
1686 # Preserve stat info on renames, not on copies; this matches
1686 # Preserve stat info on renames, not on copies; this matches
1687 # Linux CLI behavior.
1687 # Linux CLI behavior.
1688 util.copyfile(src, target, copystat=rename)
1688 util.copyfile(src, target, copystat=rename)
1689 srcexists = True
1689 srcexists = True
1690 except IOError as inst:
1690 except IOError as inst:
1691 if inst.errno == errno.ENOENT:
1691 if inst.errno == errno.ENOENT:
1692 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1692 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1693 srcexists = False
1693 srcexists = False
1694 else:
1694 else:
1695 ui.warn(
1695 ui.warn(
1696 _(b'%s: cannot copy - %s\n')
1696 _(b'%s: cannot copy - %s\n')
1697 % (relsrc, encoding.strtolocal(inst.strerror))
1697 % (relsrc, encoding.strtolocal(inst.strerror))
1698 )
1698 )
1699 return True # report a failure
1699 return True # report a failure
1700
1700
1701 if ui.verbose or not exact:
1701 if ui.verbose or not exact:
1702 if rename:
1702 if rename:
1703 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1703 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1704 else:
1704 else:
1705 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1705 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1706
1706
1707 targets[abstarget] = abssrc
1707 targets[abstarget] = abssrc
1708
1708
1709 # fix up dirstate
1709 # fix up dirstate
1710 scmutil.dirstatecopy(
1710 scmutil.dirstatecopy(
1711 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1711 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1712 )
1712 )
1713 if rename and not dryrun:
1713 if rename and not dryrun:
1714 if not after and srcexists and not samefile:
1714 if not after and srcexists and not samefile:
1715 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1715 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1716 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1716 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1717 ctx.forget([abssrc])
1717 ctx.forget([abssrc])
1718
1718
1719 # pat: ossep
1719 # pat: ossep
1720 # dest ossep
1720 # dest ossep
1721 # srcs: list of (hgsep, hgsep, ossep, bool)
1721 # srcs: list of (hgsep, hgsep, ossep, bool)
1722 # return: function that takes hgsep and returns ossep
1722 # return: function that takes hgsep and returns ossep
1723 def targetpathfn(pat, dest, srcs):
1723 def targetpathfn(pat, dest, srcs):
1724 if os.path.isdir(pat):
1724 if os.path.isdir(pat):
1725 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1725 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1726 abspfx = util.localpath(abspfx)
1726 abspfx = util.localpath(abspfx)
1727 if destdirexists:
1727 if destdirexists:
1728 striplen = len(os.path.split(abspfx)[0])
1728 striplen = len(os.path.split(abspfx)[0])
1729 else:
1729 else:
1730 striplen = len(abspfx)
1730 striplen = len(abspfx)
1731 if striplen:
1731 if striplen:
1732 striplen += len(pycompat.ossep)
1732 striplen += len(pycompat.ossep)
1733 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1733 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1734 elif destdirexists:
1734 elif destdirexists:
1735 res = lambda p: os.path.join(
1735 res = lambda p: os.path.join(
1736 dest, os.path.basename(util.localpath(p))
1736 dest, os.path.basename(util.localpath(p))
1737 )
1737 )
1738 else:
1738 else:
1739 res = lambda p: dest
1739 res = lambda p: dest
1740 return res
1740 return res
1741
1741
1742 # pat: ossep
1742 # pat: ossep
1743 # dest ossep
1743 # dest ossep
1744 # srcs: list of (hgsep, hgsep, ossep, bool)
1744 # srcs: list of (hgsep, hgsep, ossep, bool)
1745 # return: function that takes hgsep and returns ossep
1745 # return: function that takes hgsep and returns ossep
1746 def targetpathafterfn(pat, dest, srcs):
1746 def targetpathafterfn(pat, dest, srcs):
1747 if matchmod.patkind(pat):
1747 if matchmod.patkind(pat):
1748 # a mercurial pattern
1748 # a mercurial pattern
1749 res = lambda p: os.path.join(
1749 res = lambda p: os.path.join(
1750 dest, os.path.basename(util.localpath(p))
1750 dest, os.path.basename(util.localpath(p))
1751 )
1751 )
1752 else:
1752 else:
1753 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1753 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1754 if len(abspfx) < len(srcs[0][0]):
1754 if len(abspfx) < len(srcs[0][0]):
1755 # A directory. Either the target path contains the last
1755 # A directory. Either the target path contains the last
1756 # component of the source path or it does not.
1756 # component of the source path or it does not.
1757 def evalpath(striplen):
1757 def evalpath(striplen):
1758 score = 0
1758 score = 0
1759 for s in srcs:
1759 for s in srcs:
1760 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1760 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1761 if os.path.lexists(t):
1761 if os.path.lexists(t):
1762 score += 1
1762 score += 1
1763 return score
1763 return score
1764
1764
1765 abspfx = util.localpath(abspfx)
1765 abspfx = util.localpath(abspfx)
1766 striplen = len(abspfx)
1766 striplen = len(abspfx)
1767 if striplen:
1767 if striplen:
1768 striplen += len(pycompat.ossep)
1768 striplen += len(pycompat.ossep)
1769 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1769 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1770 score = evalpath(striplen)
1770 score = evalpath(striplen)
1771 striplen1 = len(os.path.split(abspfx)[0])
1771 striplen1 = len(os.path.split(abspfx)[0])
1772 if striplen1:
1772 if striplen1:
1773 striplen1 += len(pycompat.ossep)
1773 striplen1 += len(pycompat.ossep)
1774 if evalpath(striplen1) > score:
1774 if evalpath(striplen1) > score:
1775 striplen = striplen1
1775 striplen = striplen1
1776 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1776 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1777 else:
1777 else:
1778 # a file
1778 # a file
1779 if destdirexists:
1779 if destdirexists:
1780 res = lambda p: os.path.join(
1780 res = lambda p: os.path.join(
1781 dest, os.path.basename(util.localpath(p))
1781 dest, os.path.basename(util.localpath(p))
1782 )
1782 )
1783 else:
1783 else:
1784 res = lambda p: dest
1784 res = lambda p: dest
1785 return res
1785 return res
1786
1786
1787 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1787 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1788 if not destdirexists:
1788 if not destdirexists:
1789 if len(pats) > 1 or matchmod.patkind(pats[0]):
1789 if len(pats) > 1 or matchmod.patkind(pats[0]):
1790 raise error.Abort(
1790 raise error.Abort(
1791 _(
1791 _(
1792 b'with multiple sources, destination must be an '
1792 b'with multiple sources, destination must be an '
1793 b'existing directory'
1793 b'existing directory'
1794 )
1794 )
1795 )
1795 )
1796 if util.endswithsep(dest):
1796 if util.endswithsep(dest):
1797 raise error.Abort(_(b'destination %s is not a directory') % dest)
1797 raise error.Abort(_(b'destination %s is not a directory') % dest)
1798
1798
1799 tfn = targetpathfn
1799 tfn = targetpathfn
1800 if after:
1800 if after:
1801 tfn = targetpathafterfn
1801 tfn = targetpathafterfn
1802 copylist = []
1802 copylist = []
1803 for pat in pats:
1803 for pat in pats:
1804 srcs = walkpat(pat)
1804 srcs = walkpat(pat)
1805 if not srcs:
1805 if not srcs:
1806 continue
1806 continue
1807 copylist.append((tfn(pat, dest, srcs), srcs))
1807 copylist.append((tfn(pat, dest, srcs), srcs))
1808 if not copylist:
1808 if not copylist:
1809 raise error.Abort(_(b'no files to copy'))
1809 raise error.Abort(_(b'no files to copy'))
1810
1810
1811 errors = 0
1811 errors = 0
1812 for targetpath, srcs in copylist:
1812 for targetpath, srcs in copylist:
1813 for abssrc, relsrc, exact in srcs:
1813 for abssrc, relsrc, exact in srcs:
1814 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1814 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1815 errors += 1
1815 errors += 1
1816
1816
1817 return errors != 0
1817 return errors != 0
1818
1818
1819
1819
1820 ## facility to let extension process additional data into an import patch
1820 ## facility to let extension process additional data into an import patch
1821 # list of identifier to be executed in order
1821 # list of identifier to be executed in order
1822 extrapreimport = [] # run before commit
1822 extrapreimport = [] # run before commit
1823 extrapostimport = [] # run after commit
1823 extrapostimport = [] # run after commit
1824 # mapping from identifier to actual import function
1824 # mapping from identifier to actual import function
1825 #
1825 #
1826 # 'preimport' are run before the commit is made and are provided the following
1826 # 'preimport' are run before the commit is made and are provided the following
1827 # arguments:
1827 # arguments:
1828 # - repo: the localrepository instance,
1828 # - repo: the localrepository instance,
1829 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1829 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1830 # - extra: the future extra dictionary of the changeset, please mutate it,
1830 # - extra: the future extra dictionary of the changeset, please mutate it,
1831 # - opts: the import options.
1831 # - opts: the import options.
1832 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1832 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1833 # mutation of in memory commit and more. Feel free to rework the code to get
1833 # mutation of in memory commit and more. Feel free to rework the code to get
1834 # there.
1834 # there.
1835 extrapreimportmap = {}
1835 extrapreimportmap = {}
1836 # 'postimport' are run after the commit is made and are provided the following
1836 # 'postimport' are run after the commit is made and are provided the following
1837 # argument:
1837 # argument:
1838 # - ctx: the changectx created by import.
1838 # - ctx: the changectx created by import.
1839 extrapostimportmap = {}
1839 extrapostimportmap = {}
1840
1840
1841
1841
1842 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1842 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1843 """Utility function used by commands.import to import a single patch
1843 """Utility function used by commands.import to import a single patch
1844
1844
1845 This function is explicitly defined here to help the evolve extension to
1845 This function is explicitly defined here to help the evolve extension to
1846 wrap this part of the import logic.
1846 wrap this part of the import logic.
1847
1847
1848 The API is currently a bit ugly because it a simple code translation from
1848 The API is currently a bit ugly because it a simple code translation from
1849 the import command. Feel free to make it better.
1849 the import command. Feel free to make it better.
1850
1850
1851 :patchdata: a dictionary containing parsed patch data (such as from
1851 :patchdata: a dictionary containing parsed patch data (such as from
1852 ``patch.extract()``)
1852 ``patch.extract()``)
1853 :parents: nodes that will be parent of the created commit
1853 :parents: nodes that will be parent of the created commit
1854 :opts: the full dict of option passed to the import command
1854 :opts: the full dict of option passed to the import command
1855 :msgs: list to save commit message to.
1855 :msgs: list to save commit message to.
1856 (used in case we need to save it when failing)
1856 (used in case we need to save it when failing)
1857 :updatefunc: a function that update a repo to a given node
1857 :updatefunc: a function that update a repo to a given node
1858 updatefunc(<repo>, <node>)
1858 updatefunc(<repo>, <node>)
1859 """
1859 """
1860 # avoid cycle context -> subrepo -> cmdutil
1860 # avoid cycle context -> subrepo -> cmdutil
1861 from . import context
1861 from . import context
1862
1862
1863 tmpname = patchdata.get(b'filename')
1863 tmpname = patchdata.get(b'filename')
1864 message = patchdata.get(b'message')
1864 message = patchdata.get(b'message')
1865 user = opts.get(b'user') or patchdata.get(b'user')
1865 user = opts.get(b'user') or patchdata.get(b'user')
1866 date = opts.get(b'date') or patchdata.get(b'date')
1866 date = opts.get(b'date') or patchdata.get(b'date')
1867 branch = patchdata.get(b'branch')
1867 branch = patchdata.get(b'branch')
1868 nodeid = patchdata.get(b'nodeid')
1868 nodeid = patchdata.get(b'nodeid')
1869 p1 = patchdata.get(b'p1')
1869 p1 = patchdata.get(b'p1')
1870 p2 = patchdata.get(b'p2')
1870 p2 = patchdata.get(b'p2')
1871
1871
1872 nocommit = opts.get(b'no_commit')
1872 nocommit = opts.get(b'no_commit')
1873 importbranch = opts.get(b'import_branch')
1873 importbranch = opts.get(b'import_branch')
1874 update = not opts.get(b'bypass')
1874 update = not opts.get(b'bypass')
1875 strip = opts[b"strip"]
1875 strip = opts[b"strip"]
1876 prefix = opts[b"prefix"]
1876 prefix = opts[b"prefix"]
1877 sim = float(opts.get(b'similarity') or 0)
1877 sim = float(opts.get(b'similarity') or 0)
1878
1878
1879 if not tmpname:
1879 if not tmpname:
1880 return None, None, False
1880 return None, None, False
1881
1881
1882 rejects = False
1882 rejects = False
1883
1883
1884 cmdline_message = logmessage(ui, opts)
1884 cmdline_message = logmessage(ui, opts)
1885 if cmdline_message:
1885 if cmdline_message:
1886 # pickup the cmdline msg
1886 # pickup the cmdline msg
1887 message = cmdline_message
1887 message = cmdline_message
1888 elif message:
1888 elif message:
1889 # pickup the patch msg
1889 # pickup the patch msg
1890 message = message.strip()
1890 message = message.strip()
1891 else:
1891 else:
1892 # launch the editor
1892 # launch the editor
1893 message = None
1893 message = None
1894 ui.debug(b'message:\n%s\n' % (message or b''))
1894 ui.debug(b'message:\n%s\n' % (message or b''))
1895
1895
1896 if len(parents) == 1:
1896 if len(parents) == 1:
1897 parents.append(repo[nullid])
1897 parents.append(repo[nullid])
1898 if opts.get(b'exact'):
1898 if opts.get(b'exact'):
1899 if not nodeid or not p1:
1899 if not nodeid or not p1:
1900 raise error.Abort(_(b'not a Mercurial patch'))
1900 raise error.Abort(_(b'not a Mercurial patch'))
1901 p1 = repo[p1]
1901 p1 = repo[p1]
1902 p2 = repo[p2 or nullid]
1902 p2 = repo[p2 or nullid]
1903 elif p2:
1903 elif p2:
1904 try:
1904 try:
1905 p1 = repo[p1]
1905 p1 = repo[p1]
1906 p2 = repo[p2]
1906 p2 = repo[p2]
1907 # Without any options, consider p2 only if the
1907 # Without any options, consider p2 only if the
1908 # patch is being applied on top of the recorded
1908 # patch is being applied on top of the recorded
1909 # first parent.
1909 # first parent.
1910 if p1 != parents[0]:
1910 if p1 != parents[0]:
1911 p1 = parents[0]
1911 p1 = parents[0]
1912 p2 = repo[nullid]
1912 p2 = repo[nullid]
1913 except error.RepoError:
1913 except error.RepoError:
1914 p1, p2 = parents
1914 p1, p2 = parents
1915 if p2.node() == nullid:
1915 if p2.node() == nullid:
1916 ui.warn(
1916 ui.warn(
1917 _(
1917 _(
1918 b"warning: import the patch as a normal revision\n"
1918 b"warning: import the patch as a normal revision\n"
1919 b"(use --exact to import the patch as a merge)\n"
1919 b"(use --exact to import the patch as a merge)\n"
1920 )
1920 )
1921 )
1921 )
1922 else:
1922 else:
1923 p1, p2 = parents
1923 p1, p2 = parents
1924
1924
1925 n = None
1925 n = None
1926 if update:
1926 if update:
1927 if p1 != parents[0]:
1927 if p1 != parents[0]:
1928 updatefunc(repo, p1.node())
1928 updatefunc(repo, p1.node())
1929 if p2 != parents[1]:
1929 if p2 != parents[1]:
1930 repo.setparents(p1.node(), p2.node())
1930 repo.setparents(p1.node(), p2.node())
1931
1931
1932 if opts.get(b'exact') or importbranch:
1932 if opts.get(b'exact') or importbranch:
1933 repo.dirstate.setbranch(branch or b'default')
1933 repo.dirstate.setbranch(branch or b'default')
1934
1934
1935 partial = opts.get(b'partial', False)
1935 partial = opts.get(b'partial', False)
1936 files = set()
1936 files = set()
1937 try:
1937 try:
1938 patch.patch(
1938 patch.patch(
1939 ui,
1939 ui,
1940 repo,
1940 repo,
1941 tmpname,
1941 tmpname,
1942 strip=strip,
1942 strip=strip,
1943 prefix=prefix,
1943 prefix=prefix,
1944 files=files,
1944 files=files,
1945 eolmode=None,
1945 eolmode=None,
1946 similarity=sim / 100.0,
1946 similarity=sim / 100.0,
1947 )
1947 )
1948 except error.PatchError as e:
1948 except error.PatchError as e:
1949 if not partial:
1949 if not partial:
1950 raise error.Abort(pycompat.bytestr(e))
1950 raise error.Abort(pycompat.bytestr(e))
1951 if partial:
1951 if partial:
1952 rejects = True
1952 rejects = True
1953
1953
1954 files = list(files)
1954 files = list(files)
1955 if nocommit:
1955 if nocommit:
1956 if message:
1956 if message:
1957 msgs.append(message)
1957 msgs.append(message)
1958 else:
1958 else:
1959 if opts.get(b'exact') or p2:
1959 if opts.get(b'exact') or p2:
1960 # If you got here, you either use --force and know what
1960 # If you got here, you either use --force and know what
1961 # you are doing or used --exact or a merge patch while
1961 # you are doing or used --exact or a merge patch while
1962 # being updated to its first parent.
1962 # being updated to its first parent.
1963 m = None
1963 m = None
1964 else:
1964 else:
1965 m = scmutil.matchfiles(repo, files or [])
1965 m = scmutil.matchfiles(repo, files or [])
1966 editform = mergeeditform(repo[None], b'import.normal')
1966 editform = mergeeditform(repo[None], b'import.normal')
1967 if opts.get(b'exact'):
1967 if opts.get(b'exact'):
1968 editor = None
1968 editor = None
1969 else:
1969 else:
1970 editor = getcommiteditor(
1970 editor = getcommiteditor(
1971 editform=editform, **pycompat.strkwargs(opts)
1971 editform=editform, **pycompat.strkwargs(opts)
1972 )
1972 )
1973 extra = {}
1973 extra = {}
1974 for idfunc in extrapreimport:
1974 for idfunc in extrapreimport:
1975 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1975 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1976 overrides = {}
1976 overrides = {}
1977 if partial:
1977 if partial:
1978 overrides[(b'ui', b'allowemptycommit')] = True
1978 overrides[(b'ui', b'allowemptycommit')] = True
1979 if opts.get(b'secret'):
1979 if opts.get(b'secret'):
1980 overrides[(b'phases', b'new-commit')] = b'secret'
1980 overrides[(b'phases', b'new-commit')] = b'secret'
1981 with repo.ui.configoverride(overrides, b'import'):
1981 with repo.ui.configoverride(overrides, b'import'):
1982 n = repo.commit(
1982 n = repo.commit(
1983 message, user, date, match=m, editor=editor, extra=extra
1983 message, user, date, match=m, editor=editor, extra=extra
1984 )
1984 )
1985 for idfunc in extrapostimport:
1985 for idfunc in extrapostimport:
1986 extrapostimportmap[idfunc](repo[n])
1986 extrapostimportmap[idfunc](repo[n])
1987 else:
1987 else:
1988 if opts.get(b'exact') or importbranch:
1988 if opts.get(b'exact') or importbranch:
1989 branch = branch or b'default'
1989 branch = branch or b'default'
1990 else:
1990 else:
1991 branch = p1.branch()
1991 branch = p1.branch()
1992 store = patch.filestore()
1992 store = patch.filestore()
1993 try:
1993 try:
1994 files = set()
1994 files = set()
1995 try:
1995 try:
1996 patch.patchrepo(
1996 patch.patchrepo(
1997 ui,
1997 ui,
1998 repo,
1998 repo,
1999 p1,
1999 p1,
2000 store,
2000 store,
2001 tmpname,
2001 tmpname,
2002 strip,
2002 strip,
2003 prefix,
2003 prefix,
2004 files,
2004 files,
2005 eolmode=None,
2005 eolmode=None,
2006 )
2006 )
2007 except error.PatchError as e:
2007 except error.PatchError as e:
2008 raise error.Abort(stringutil.forcebytestr(e))
2008 raise error.Abort(stringutil.forcebytestr(e))
2009 if opts.get(b'exact'):
2009 if opts.get(b'exact'):
2010 editor = None
2010 editor = None
2011 else:
2011 else:
2012 editor = getcommiteditor(editform=b'import.bypass')
2012 editor = getcommiteditor(editform=b'import.bypass')
2013 memctx = context.memctx(
2013 memctx = context.memctx(
2014 repo,
2014 repo,
2015 (p1.node(), p2.node()),
2015 (p1.node(), p2.node()),
2016 message,
2016 message,
2017 files=files,
2017 files=files,
2018 filectxfn=store,
2018 filectxfn=store,
2019 user=user,
2019 user=user,
2020 date=date,
2020 date=date,
2021 branch=branch,
2021 branch=branch,
2022 editor=editor,
2022 editor=editor,
2023 )
2023 )
2024
2024
2025 overrides = {}
2025 overrides = {}
2026 if opts.get(b'secret'):
2026 if opts.get(b'secret'):
2027 overrides[(b'phases', b'new-commit')] = b'secret'
2027 overrides[(b'phases', b'new-commit')] = b'secret'
2028 with repo.ui.configoverride(overrides, b'import'):
2028 with repo.ui.configoverride(overrides, b'import'):
2029 n = memctx.commit()
2029 n = memctx.commit()
2030 finally:
2030 finally:
2031 store.close()
2031 store.close()
2032 if opts.get(b'exact') and nocommit:
2032 if opts.get(b'exact') and nocommit:
2033 # --exact with --no-commit is still useful in that it does merge
2033 # --exact with --no-commit is still useful in that it does merge
2034 # and branch bits
2034 # and branch bits
2035 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2035 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2036 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2036 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2037 raise error.Abort(_(b'patch is damaged or loses information'))
2037 raise error.Abort(_(b'patch is damaged or loses information'))
2038 msg = _(b'applied to working directory')
2038 msg = _(b'applied to working directory')
2039 if n:
2039 if n:
2040 # i18n: refers to a short changeset id
2040 # i18n: refers to a short changeset id
2041 msg = _(b'created %s') % short(n)
2041 msg = _(b'created %s') % short(n)
2042 return msg, n, rejects
2042 return msg, n, rejects
2043
2043
2044
2044
2045 # facility to let extensions include additional data in an exported patch
2045 # facility to let extensions include additional data in an exported patch
2046 # list of identifiers to be executed in order
2046 # list of identifiers to be executed in order
2047 extraexport = []
2047 extraexport = []
2048 # mapping from identifier to actual export function
2048 # mapping from identifier to actual export function
2049 # function as to return a string to be added to the header or None
2049 # function as to return a string to be added to the header or None
2050 # it is given two arguments (sequencenumber, changectx)
2050 # it is given two arguments (sequencenumber, changectx)
2051 extraexportmap = {}
2051 extraexportmap = {}
2052
2052
2053
2053
2054 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2054 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2055 node = scmutil.binnode(ctx)
2055 node = scmutil.binnode(ctx)
2056 parents = [p.node() for p in ctx.parents() if p]
2056 parents = [p.node() for p in ctx.parents() if p]
2057 branch = ctx.branch()
2057 branch = ctx.branch()
2058 if switch_parent:
2058 if switch_parent:
2059 parents.reverse()
2059 parents.reverse()
2060
2060
2061 if parents:
2061 if parents:
2062 prev = parents[0]
2062 prev = parents[0]
2063 else:
2063 else:
2064 prev = nullid
2064 prev = nullid
2065
2065
2066 fm.context(ctx=ctx)
2066 fm.context(ctx=ctx)
2067 fm.plain(b'# HG changeset patch\n')
2067 fm.plain(b'# HG changeset patch\n')
2068 fm.write(b'user', b'# User %s\n', ctx.user())
2068 fm.write(b'user', b'# User %s\n', ctx.user())
2069 fm.plain(b'# Date %d %d\n' % ctx.date())
2069 fm.plain(b'# Date %d %d\n' % ctx.date())
2070 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2070 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2071 fm.condwrite(
2071 fm.condwrite(
2072 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2072 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2073 )
2073 )
2074 fm.write(b'node', b'# Node ID %s\n', hex(node))
2074 fm.write(b'node', b'# Node ID %s\n', hex(node))
2075 fm.plain(b'# Parent %s\n' % hex(prev))
2075 fm.plain(b'# Parent %s\n' % hex(prev))
2076 if len(parents) > 1:
2076 if len(parents) > 1:
2077 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2077 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2078 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2078 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2079
2079
2080 # TODO: redesign extraexportmap function to support formatter
2080 # TODO: redesign extraexportmap function to support formatter
2081 for headerid in extraexport:
2081 for headerid in extraexport:
2082 header = extraexportmap[headerid](seqno, ctx)
2082 header = extraexportmap[headerid](seqno, ctx)
2083 if header is not None:
2083 if header is not None:
2084 fm.plain(b'# %s\n' % header)
2084 fm.plain(b'# %s\n' % header)
2085
2085
2086 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2086 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2087 fm.plain(b'\n')
2087 fm.plain(b'\n')
2088
2088
2089 if fm.isplain():
2089 if fm.isplain():
2090 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2090 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2091 for chunk, label in chunkiter:
2091 for chunk, label in chunkiter:
2092 fm.plain(chunk, label=label)
2092 fm.plain(chunk, label=label)
2093 else:
2093 else:
2094 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2094 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2095 # TODO: make it structured?
2095 # TODO: make it structured?
2096 fm.data(diff=b''.join(chunkiter))
2096 fm.data(diff=b''.join(chunkiter))
2097
2097
2098
2098
2099 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2099 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2100 """Export changesets to stdout or a single file"""
2100 """Export changesets to stdout or a single file"""
2101 for seqno, rev in enumerate(revs, 1):
2101 for seqno, rev in enumerate(revs, 1):
2102 ctx = repo[rev]
2102 ctx = repo[rev]
2103 if not dest.startswith(b'<'):
2103 if not dest.startswith(b'<'):
2104 repo.ui.note(b"%s\n" % dest)
2104 repo.ui.note(b"%s\n" % dest)
2105 fm.startitem()
2105 fm.startitem()
2106 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2106 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2107
2107
2108
2108
2109 def _exportfntemplate(
2109 def _exportfntemplate(
2110 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2110 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2111 ):
2111 ):
2112 """Export changesets to possibly multiple files"""
2112 """Export changesets to possibly multiple files"""
2113 total = len(revs)
2113 total = len(revs)
2114 revwidth = max(len(str(rev)) for rev in revs)
2114 revwidth = max(len(str(rev)) for rev in revs)
2115 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2115 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2116
2116
2117 for seqno, rev in enumerate(revs, 1):
2117 for seqno, rev in enumerate(revs, 1):
2118 ctx = repo[rev]
2118 ctx = repo[rev]
2119 dest = makefilename(
2119 dest = makefilename(
2120 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2120 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2121 )
2121 )
2122 filemap.setdefault(dest, []).append((seqno, rev))
2122 filemap.setdefault(dest, []).append((seqno, rev))
2123
2123
2124 for dest in filemap:
2124 for dest in filemap:
2125 with formatter.maybereopen(basefm, dest) as fm:
2125 with formatter.maybereopen(basefm, dest) as fm:
2126 repo.ui.note(b"%s\n" % dest)
2126 repo.ui.note(b"%s\n" % dest)
2127 for seqno, rev in filemap[dest]:
2127 for seqno, rev in filemap[dest]:
2128 fm.startitem()
2128 fm.startitem()
2129 ctx = repo[rev]
2129 ctx = repo[rev]
2130 _exportsingle(
2130 _exportsingle(
2131 repo, ctx, fm, match, switch_parent, seqno, diffopts
2131 repo, ctx, fm, match, switch_parent, seqno, diffopts
2132 )
2132 )
2133
2133
2134
2134
2135 def _prefetchchangedfiles(repo, revs, match):
2135 def _prefetchchangedfiles(repo, revs, match):
2136 allfiles = set()
2136 allfiles = set()
2137 for rev in revs:
2137 for rev in revs:
2138 for file in repo[rev].files():
2138 for file in repo[rev].files():
2139 if not match or match(file):
2139 if not match or match(file):
2140 allfiles.add(file)
2140 allfiles.add(file)
2141 scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
2141 match = scmutil.matchfiles(repo, allfiles)
2142 revmatches = [(rev, match) for rev in revs]
2143 scmutil.prefetchfiles(repo, revmatches)
2142
2144
2143
2145
2144 def export(
2146 def export(
2145 repo,
2147 repo,
2146 revs,
2148 revs,
2147 basefm,
2149 basefm,
2148 fntemplate=b'hg-%h.patch',
2150 fntemplate=b'hg-%h.patch',
2149 switch_parent=False,
2151 switch_parent=False,
2150 opts=None,
2152 opts=None,
2151 match=None,
2153 match=None,
2152 ):
2154 ):
2153 '''export changesets as hg patches
2155 '''export changesets as hg patches
2154
2156
2155 Args:
2157 Args:
2156 repo: The repository from which we're exporting revisions.
2158 repo: The repository from which we're exporting revisions.
2157 revs: A list of revisions to export as revision numbers.
2159 revs: A list of revisions to export as revision numbers.
2158 basefm: A formatter to which patches should be written.
2160 basefm: A formatter to which patches should be written.
2159 fntemplate: An optional string to use for generating patch file names.
2161 fntemplate: An optional string to use for generating patch file names.
2160 switch_parent: If True, show diffs against second parent when not nullid.
2162 switch_parent: If True, show diffs against second parent when not nullid.
2161 Default is false, which always shows diff against p1.
2163 Default is false, which always shows diff against p1.
2162 opts: diff options to use for generating the patch.
2164 opts: diff options to use for generating the patch.
2163 match: If specified, only export changes to files matching this matcher.
2165 match: If specified, only export changes to files matching this matcher.
2164
2166
2165 Returns:
2167 Returns:
2166 Nothing.
2168 Nothing.
2167
2169
2168 Side Effect:
2170 Side Effect:
2169 "HG Changeset Patch" data is emitted to one of the following
2171 "HG Changeset Patch" data is emitted to one of the following
2170 destinations:
2172 destinations:
2171 fntemplate specified: Each rev is written to a unique file named using
2173 fntemplate specified: Each rev is written to a unique file named using
2172 the given template.
2174 the given template.
2173 Otherwise: All revs will be written to basefm.
2175 Otherwise: All revs will be written to basefm.
2174 '''
2176 '''
2175 _prefetchchangedfiles(repo, revs, match)
2177 _prefetchchangedfiles(repo, revs, match)
2176
2178
2177 if not fntemplate:
2179 if not fntemplate:
2178 _exportfile(
2180 _exportfile(
2179 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2181 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2180 )
2182 )
2181 else:
2183 else:
2182 _exportfntemplate(
2184 _exportfntemplate(
2183 repo, revs, basefm, fntemplate, switch_parent, opts, match
2185 repo, revs, basefm, fntemplate, switch_parent, opts, match
2184 )
2186 )
2185
2187
2186
2188
2187 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2189 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2188 """Export changesets to the given file stream"""
2190 """Export changesets to the given file stream"""
2189 _prefetchchangedfiles(repo, revs, match)
2191 _prefetchchangedfiles(repo, revs, match)
2190
2192
2191 dest = getattr(fp, 'name', b'<unnamed>')
2193 dest = getattr(fp, 'name', b'<unnamed>')
2192 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2194 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2193 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2195 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2194
2196
2195
2197
2196 def showmarker(fm, marker, index=None):
2198 def showmarker(fm, marker, index=None):
2197 """utility function to display obsolescence marker in a readable way
2199 """utility function to display obsolescence marker in a readable way
2198
2200
2199 To be used by debug function."""
2201 To be used by debug function."""
2200 if index is not None:
2202 if index is not None:
2201 fm.write(b'index', b'%i ', index)
2203 fm.write(b'index', b'%i ', index)
2202 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2204 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2203 succs = marker.succnodes()
2205 succs = marker.succnodes()
2204 fm.condwrite(
2206 fm.condwrite(
2205 succs,
2207 succs,
2206 b'succnodes',
2208 b'succnodes',
2207 b'%s ',
2209 b'%s ',
2208 fm.formatlist(map(hex, succs), name=b'node'),
2210 fm.formatlist(map(hex, succs), name=b'node'),
2209 )
2211 )
2210 fm.write(b'flag', b'%X ', marker.flags())
2212 fm.write(b'flag', b'%X ', marker.flags())
2211 parents = marker.parentnodes()
2213 parents = marker.parentnodes()
2212 if parents is not None:
2214 if parents is not None:
2213 fm.write(
2215 fm.write(
2214 b'parentnodes',
2216 b'parentnodes',
2215 b'{%s} ',
2217 b'{%s} ',
2216 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2218 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2217 )
2219 )
2218 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2220 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2219 meta = marker.metadata().copy()
2221 meta = marker.metadata().copy()
2220 meta.pop(b'date', None)
2222 meta.pop(b'date', None)
2221 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2223 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2222 fm.write(
2224 fm.write(
2223 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2225 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2224 )
2226 )
2225 fm.plain(b'\n')
2227 fm.plain(b'\n')
2226
2228
2227
2229
2228 def finddate(ui, repo, date):
2230 def finddate(ui, repo, date):
2229 """Find the tipmost changeset that matches the given date spec"""
2231 """Find the tipmost changeset that matches the given date spec"""
2230
2232
2231 df = dateutil.matchdate(date)
2233 df = dateutil.matchdate(date)
2232 m = scmutil.matchall(repo)
2234 m = scmutil.matchall(repo)
2233 results = {}
2235 results = {}
2234
2236
2235 def prep(ctx, fns):
2237 def prep(ctx, fns):
2236 d = ctx.date()
2238 d = ctx.date()
2237 if df(d[0]):
2239 if df(d[0]):
2238 results[ctx.rev()] = d
2240 results[ctx.rev()] = d
2239
2241
2240 for ctx in walkchangerevs(repo, m, {b'rev': None}, prep):
2242 for ctx in walkchangerevs(repo, m, {b'rev': None}, prep):
2241 rev = ctx.rev()
2243 rev = ctx.rev()
2242 if rev in results:
2244 if rev in results:
2243 ui.status(
2245 ui.status(
2244 _(b"found revision %d from %s\n")
2246 _(b"found revision %d from %s\n")
2245 % (rev, dateutil.datestr(results[rev]))
2247 % (rev, dateutil.datestr(results[rev]))
2246 )
2248 )
2247 return b'%d' % rev
2249 return b'%d' % rev
2248
2250
2249 raise error.Abort(_(b"revision matching date not found"))
2251 raise error.Abort(_(b"revision matching date not found"))
2250
2252
2251
2253
2252 def increasingwindows(windowsize=8, sizelimit=512):
2254 def increasingwindows(windowsize=8, sizelimit=512):
2253 while True:
2255 while True:
2254 yield windowsize
2256 yield windowsize
2255 if windowsize < sizelimit:
2257 if windowsize < sizelimit:
2256 windowsize *= 2
2258 windowsize *= 2
2257
2259
2258
2260
2259 def _walkrevs(repo, opts):
2261 def _walkrevs(repo, opts):
2260 # Default --rev value depends on --follow but --follow behavior
2262 # Default --rev value depends on --follow but --follow behavior
2261 # depends on revisions resolved from --rev...
2263 # depends on revisions resolved from --rev...
2262 follow = opts.get(b'follow') or opts.get(b'follow_first')
2264 follow = opts.get(b'follow') or opts.get(b'follow_first')
2263 if opts.get(b'rev'):
2265 if opts.get(b'rev'):
2264 revs = scmutil.revrange(repo, opts[b'rev'])
2266 revs = scmutil.revrange(repo, opts[b'rev'])
2265 elif follow and repo.dirstate.p1() == nullid:
2267 elif follow and repo.dirstate.p1() == nullid:
2266 revs = smartset.baseset()
2268 revs = smartset.baseset()
2267 elif follow:
2269 elif follow:
2268 revs = repo.revs(b'reverse(:.)')
2270 revs = repo.revs(b'reverse(:.)')
2269 else:
2271 else:
2270 revs = smartset.spanset(repo)
2272 revs = smartset.spanset(repo)
2271 revs.reverse()
2273 revs.reverse()
2272 return revs
2274 return revs
2273
2275
2274
2276
2275 class FileWalkError(Exception):
2277 class FileWalkError(Exception):
2276 pass
2278 pass
2277
2279
2278
2280
2279 def walkfilerevs(repo, match, follow, revs, fncache):
2281 def walkfilerevs(repo, match, follow, revs, fncache):
2280 '''Walks the file history for the matched files.
2282 '''Walks the file history for the matched files.
2281
2283
2282 Returns the changeset revs that are involved in the file history.
2284 Returns the changeset revs that are involved in the file history.
2283
2285
2284 Throws FileWalkError if the file history can't be walked using
2286 Throws FileWalkError if the file history can't be walked using
2285 filelogs alone.
2287 filelogs alone.
2286 '''
2288 '''
2287 wanted = set()
2289 wanted = set()
2288 copies = []
2290 copies = []
2289 minrev, maxrev = min(revs), max(revs)
2291 minrev, maxrev = min(revs), max(revs)
2290
2292
2291 def filerevs(filelog, last):
2293 def filerevs(filelog, last):
2292 """
2294 """
2293 Only files, no patterns. Check the history of each file.
2295 Only files, no patterns. Check the history of each file.
2294
2296
2295 Examines filelog entries within minrev, maxrev linkrev range
2297 Examines filelog entries within minrev, maxrev linkrev range
2296 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2298 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2297 tuples in backwards order
2299 tuples in backwards order
2298 """
2300 """
2299 cl_count = len(repo)
2301 cl_count = len(repo)
2300 revs = []
2302 revs = []
2301 for j in pycompat.xrange(0, last + 1):
2303 for j in pycompat.xrange(0, last + 1):
2302 linkrev = filelog.linkrev(j)
2304 linkrev = filelog.linkrev(j)
2303 if linkrev < minrev:
2305 if linkrev < minrev:
2304 continue
2306 continue
2305 # only yield rev for which we have the changelog, it can
2307 # only yield rev for which we have the changelog, it can
2306 # happen while doing "hg log" during a pull or commit
2308 # happen while doing "hg log" during a pull or commit
2307 if linkrev >= cl_count:
2309 if linkrev >= cl_count:
2308 break
2310 break
2309
2311
2310 parentlinkrevs = []
2312 parentlinkrevs = []
2311 for p in filelog.parentrevs(j):
2313 for p in filelog.parentrevs(j):
2312 if p != nullrev:
2314 if p != nullrev:
2313 parentlinkrevs.append(filelog.linkrev(p))
2315 parentlinkrevs.append(filelog.linkrev(p))
2314 n = filelog.node(j)
2316 n = filelog.node(j)
2315 revs.append(
2317 revs.append(
2316 (linkrev, parentlinkrevs, follow and filelog.renamed(n))
2318 (linkrev, parentlinkrevs, follow and filelog.renamed(n))
2317 )
2319 )
2318
2320
2319 return reversed(revs)
2321 return reversed(revs)
2320
2322
2321 def iterfiles():
2323 def iterfiles():
2322 pctx = repo[b'.']
2324 pctx = repo[b'.']
2323 for filename in match.files():
2325 for filename in match.files():
2324 if follow:
2326 if follow:
2325 if filename not in pctx:
2327 if filename not in pctx:
2326 raise error.Abort(
2328 raise error.Abort(
2327 _(
2329 _(
2328 b'cannot follow file not in parent '
2330 b'cannot follow file not in parent '
2329 b'revision: "%s"'
2331 b'revision: "%s"'
2330 )
2332 )
2331 % filename
2333 % filename
2332 )
2334 )
2333 yield filename, pctx[filename].filenode()
2335 yield filename, pctx[filename].filenode()
2334 else:
2336 else:
2335 yield filename, None
2337 yield filename, None
2336 for filename_node in copies:
2338 for filename_node in copies:
2337 yield filename_node
2339 yield filename_node
2338
2340
2339 for file_, node in iterfiles():
2341 for file_, node in iterfiles():
2340 filelog = repo.file(file_)
2342 filelog = repo.file(file_)
2341 if not len(filelog):
2343 if not len(filelog):
2342 if node is None:
2344 if node is None:
2343 # A zero count may be a directory or deleted file, so
2345 # A zero count may be a directory or deleted file, so
2344 # try to find matching entries on the slow path.
2346 # try to find matching entries on the slow path.
2345 if follow:
2347 if follow:
2346 raise error.Abort(
2348 raise error.Abort(
2347 _(b'cannot follow nonexistent file: "%s"') % file_
2349 _(b'cannot follow nonexistent file: "%s"') % file_
2348 )
2350 )
2349 raise FileWalkError(b"Cannot walk via filelog")
2351 raise FileWalkError(b"Cannot walk via filelog")
2350 else:
2352 else:
2351 continue
2353 continue
2352
2354
2353 if node is None:
2355 if node is None:
2354 last = len(filelog) - 1
2356 last = len(filelog) - 1
2355 else:
2357 else:
2356 last = filelog.rev(node)
2358 last = filelog.rev(node)
2357
2359
2358 # keep track of all ancestors of the file
2360 # keep track of all ancestors of the file
2359 ancestors = {filelog.linkrev(last)}
2361 ancestors = {filelog.linkrev(last)}
2360
2362
2361 # iterate from latest to oldest revision
2363 # iterate from latest to oldest revision
2362 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
2364 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
2363 if not follow:
2365 if not follow:
2364 if rev > maxrev:
2366 if rev > maxrev:
2365 continue
2367 continue
2366 else:
2368 else:
2367 # Note that last might not be the first interesting
2369 # Note that last might not be the first interesting
2368 # rev to us:
2370 # rev to us:
2369 # if the file has been changed after maxrev, we'll
2371 # if the file has been changed after maxrev, we'll
2370 # have linkrev(last) > maxrev, and we still need
2372 # have linkrev(last) > maxrev, and we still need
2371 # to explore the file graph
2373 # to explore the file graph
2372 if rev not in ancestors:
2374 if rev not in ancestors:
2373 continue
2375 continue
2374 # XXX insert 1327 fix here
2376 # XXX insert 1327 fix here
2375 if flparentlinkrevs:
2377 if flparentlinkrevs:
2376 ancestors.update(flparentlinkrevs)
2378 ancestors.update(flparentlinkrevs)
2377
2379
2378 fncache.setdefault(rev, []).append(file_)
2380 fncache.setdefault(rev, []).append(file_)
2379 wanted.add(rev)
2381 wanted.add(rev)
2380 if copied:
2382 if copied:
2381 copies.append(copied)
2383 copies.append(copied)
2382
2384
2383 return wanted
2385 return wanted
2384
2386
2385
2387
2386 class _followfilter(object):
2388 class _followfilter(object):
2387 def __init__(self, repo, onlyfirst=False):
2389 def __init__(self, repo, onlyfirst=False):
2388 self.repo = repo
2390 self.repo = repo
2389 self.startrev = nullrev
2391 self.startrev = nullrev
2390 self.roots = set()
2392 self.roots = set()
2391 self.onlyfirst = onlyfirst
2393 self.onlyfirst = onlyfirst
2392
2394
2393 def match(self, rev):
2395 def match(self, rev):
2394 def realparents(rev):
2396 def realparents(rev):
2395 if self.onlyfirst:
2397 if self.onlyfirst:
2396 return self.repo.changelog.parentrevs(rev)[0:1]
2398 return self.repo.changelog.parentrevs(rev)[0:1]
2397 else:
2399 else:
2398 return filter(
2400 return filter(
2399 lambda x: x != nullrev, self.repo.changelog.parentrevs(rev)
2401 lambda x: x != nullrev, self.repo.changelog.parentrevs(rev)
2400 )
2402 )
2401
2403
2402 if self.startrev == nullrev:
2404 if self.startrev == nullrev:
2403 self.startrev = rev
2405 self.startrev = rev
2404 return True
2406 return True
2405
2407
2406 if rev > self.startrev:
2408 if rev > self.startrev:
2407 # forward: all descendants
2409 # forward: all descendants
2408 if not self.roots:
2410 if not self.roots:
2409 self.roots.add(self.startrev)
2411 self.roots.add(self.startrev)
2410 for parent in realparents(rev):
2412 for parent in realparents(rev):
2411 if parent in self.roots:
2413 if parent in self.roots:
2412 self.roots.add(rev)
2414 self.roots.add(rev)
2413 return True
2415 return True
2414 else:
2416 else:
2415 # backwards: all parents
2417 # backwards: all parents
2416 if not self.roots:
2418 if not self.roots:
2417 self.roots.update(realparents(self.startrev))
2419 self.roots.update(realparents(self.startrev))
2418 if rev in self.roots:
2420 if rev in self.roots:
2419 self.roots.remove(rev)
2421 self.roots.remove(rev)
2420 self.roots.update(realparents(rev))
2422 self.roots.update(realparents(rev))
2421 return True
2423 return True
2422
2424
2423 return False
2425 return False
2424
2426
2425
2427
2426 def walkchangerevs(repo, match, opts, prepare):
2428 def walkchangerevs(repo, match, opts, prepare):
2427 '''Iterate over files and the revs in which they changed.
2429 '''Iterate over files and the revs in which they changed.
2428
2430
2429 Callers most commonly need to iterate backwards over the history
2431 Callers most commonly need to iterate backwards over the history
2430 in which they are interested. Doing so has awful (quadratic-looking)
2432 in which they are interested. Doing so has awful (quadratic-looking)
2431 performance, so we use iterators in a "windowed" way.
2433 performance, so we use iterators in a "windowed" way.
2432
2434
2433 We walk a window of revisions in the desired order. Within the
2435 We walk a window of revisions in the desired order. Within the
2434 window, we first walk forwards to gather data, then in the desired
2436 window, we first walk forwards to gather data, then in the desired
2435 order (usually backwards) to display it.
2437 order (usually backwards) to display it.
2436
2438
2437 This function returns an iterator yielding contexts. Before
2439 This function returns an iterator yielding contexts. Before
2438 yielding each context, the iterator will first call the prepare
2440 yielding each context, the iterator will first call the prepare
2439 function on each context in the window in forward order.'''
2441 function on each context in the window in forward order.'''
2440
2442
2441 allfiles = opts.get(b'all_files')
2443 allfiles = opts.get(b'all_files')
2442 follow = opts.get(b'follow') or opts.get(b'follow_first')
2444 follow = opts.get(b'follow') or opts.get(b'follow_first')
2443 revs = _walkrevs(repo, opts)
2445 revs = _walkrevs(repo, opts)
2444 if not revs:
2446 if not revs:
2445 return []
2447 return []
2446 wanted = set()
2448 wanted = set()
2447 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
2449 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
2448 fncache = {}
2450 fncache = {}
2449 change = repo.__getitem__
2451 change = repo.__getitem__
2450
2452
2451 # First step is to fill wanted, the set of revisions that we want to yield.
2453 # First step is to fill wanted, the set of revisions that we want to yield.
2452 # When it does not induce extra cost, we also fill fncache for revisions in
2454 # When it does not induce extra cost, we also fill fncache for revisions in
2453 # wanted: a cache of filenames that were changed (ctx.files()) and that
2455 # wanted: a cache of filenames that were changed (ctx.files()) and that
2454 # match the file filtering conditions.
2456 # match the file filtering conditions.
2455
2457
2456 if match.always() or allfiles:
2458 if match.always() or allfiles:
2457 # No files, no patterns. Display all revs.
2459 # No files, no patterns. Display all revs.
2458 wanted = revs
2460 wanted = revs
2459 elif not slowpath:
2461 elif not slowpath:
2460 # We only have to read through the filelog to find wanted revisions
2462 # We only have to read through the filelog to find wanted revisions
2461
2463
2462 try:
2464 try:
2463 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2465 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2464 except FileWalkError:
2466 except FileWalkError:
2465 slowpath = True
2467 slowpath = True
2466
2468
2467 # We decided to fall back to the slowpath because at least one
2469 # We decided to fall back to the slowpath because at least one
2468 # of the paths was not a file. Check to see if at least one of them
2470 # of the paths was not a file. Check to see if at least one of them
2469 # existed in history, otherwise simply return
2471 # existed in history, otherwise simply return
2470 for path in match.files():
2472 for path in match.files():
2471 if path == b'.' or path in repo.store:
2473 if path == b'.' or path in repo.store:
2472 break
2474 break
2473 else:
2475 else:
2474 return []
2476 return []
2475
2477
2476 if slowpath:
2478 if slowpath:
2477 # We have to read the changelog to match filenames against
2479 # We have to read the changelog to match filenames against
2478 # changed files
2480 # changed files
2479
2481
2480 if follow:
2482 if follow:
2481 raise error.Abort(
2483 raise error.Abort(
2482 _(b'can only follow copies/renames for explicit filenames')
2484 _(b'can only follow copies/renames for explicit filenames')
2483 )
2485 )
2484
2486
2485 # The slow path checks files modified in every changeset.
2487 # The slow path checks files modified in every changeset.
2486 # This is really slow on large repos, so compute the set lazily.
2488 # This is really slow on large repos, so compute the set lazily.
2487 class lazywantedset(object):
2489 class lazywantedset(object):
2488 def __init__(self):
2490 def __init__(self):
2489 self.set = set()
2491 self.set = set()
2490 self.revs = set(revs)
2492 self.revs = set(revs)
2491
2493
2492 # No need to worry about locality here because it will be accessed
2494 # No need to worry about locality here because it will be accessed
2493 # in the same order as the increasing window below.
2495 # in the same order as the increasing window below.
2494 def __contains__(self, value):
2496 def __contains__(self, value):
2495 if value in self.set:
2497 if value in self.set:
2496 return True
2498 return True
2497 elif not value in self.revs:
2499 elif not value in self.revs:
2498 return False
2500 return False
2499 else:
2501 else:
2500 self.revs.discard(value)
2502 self.revs.discard(value)
2501 ctx = change(value)
2503 ctx = change(value)
2502 if allfiles:
2504 if allfiles:
2503 matches = list(ctx.manifest().walk(match))
2505 matches = list(ctx.manifest().walk(match))
2504 else:
2506 else:
2505 matches = [f for f in ctx.files() if match(f)]
2507 matches = [f for f in ctx.files() if match(f)]
2506 if matches:
2508 if matches:
2507 fncache[value] = matches
2509 fncache[value] = matches
2508 self.set.add(value)
2510 self.set.add(value)
2509 return True
2511 return True
2510 return False
2512 return False
2511
2513
2512 def discard(self, value):
2514 def discard(self, value):
2513 self.revs.discard(value)
2515 self.revs.discard(value)
2514 self.set.discard(value)
2516 self.set.discard(value)
2515
2517
2516 wanted = lazywantedset()
2518 wanted = lazywantedset()
2517
2519
2518 # it might be worthwhile to do this in the iterator if the rev range
2520 # it might be worthwhile to do this in the iterator if the rev range
2519 # is descending and the prune args are all within that range
2521 # is descending and the prune args are all within that range
2520 for rev in opts.get(b'prune', ()):
2522 for rev in opts.get(b'prune', ()):
2521 rev = repo[rev].rev()
2523 rev = repo[rev].rev()
2522 ff = _followfilter(repo)
2524 ff = _followfilter(repo)
2523 stop = min(revs[0], revs[-1])
2525 stop = min(revs[0], revs[-1])
2524 for x in pycompat.xrange(rev, stop - 1, -1):
2526 for x in pycompat.xrange(rev, stop - 1, -1):
2525 if ff.match(x):
2527 if ff.match(x):
2526 wanted = wanted - [x]
2528 wanted = wanted - [x]
2527
2529
2528 # Now that wanted is correctly initialized, we can iterate over the
2530 # Now that wanted is correctly initialized, we can iterate over the
2529 # revision range, yielding only revisions in wanted.
2531 # revision range, yielding only revisions in wanted.
2530 def iterate():
2532 def iterate():
2531 if follow and match.always():
2533 if follow and match.always():
2532 ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
2534 ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
2533
2535
2534 def want(rev):
2536 def want(rev):
2535 return ff.match(rev) and rev in wanted
2537 return ff.match(rev) and rev in wanted
2536
2538
2537 else:
2539 else:
2538
2540
2539 def want(rev):
2541 def want(rev):
2540 return rev in wanted
2542 return rev in wanted
2541
2543
2542 it = iter(revs)
2544 it = iter(revs)
2543 stopiteration = False
2545 stopiteration = False
2544 for windowsize in increasingwindows():
2546 for windowsize in increasingwindows():
2545 nrevs = []
2547 nrevs = []
2546 for i in pycompat.xrange(windowsize):
2548 for i in pycompat.xrange(windowsize):
2547 rev = next(it, None)
2549 rev = next(it, None)
2548 if rev is None:
2550 if rev is None:
2549 stopiteration = True
2551 stopiteration = True
2550 break
2552 break
2551 elif want(rev):
2553 elif want(rev):
2552 nrevs.append(rev)
2554 nrevs.append(rev)
2553 for rev in sorted(nrevs):
2555 for rev in sorted(nrevs):
2554 fns = fncache.get(rev)
2556 fns = fncache.get(rev)
2555 ctx = change(rev)
2557 ctx = change(rev)
2556 if not fns:
2558 if not fns:
2557
2559
2558 def fns_generator():
2560 def fns_generator():
2559 if allfiles:
2561 if allfiles:
2560
2562
2561 def bad(f, msg):
2563 def bad(f, msg):
2562 pass
2564 pass
2563
2565
2564 for f in ctx.matches(matchmod.badmatch(match, bad)):
2566 for f in ctx.matches(matchmod.badmatch(match, bad)):
2565 yield f
2567 yield f
2566 else:
2568 else:
2567 for f in ctx.files():
2569 for f in ctx.files():
2568 if match(f):
2570 if match(f):
2569 yield f
2571 yield f
2570
2572
2571 fns = fns_generator()
2573 fns = fns_generator()
2572 prepare(ctx, fns)
2574 prepare(ctx, fns)
2573 for rev in nrevs:
2575 for rev in nrevs:
2574 yield change(rev)
2576 yield change(rev)
2575
2577
2576 if stopiteration:
2578 if stopiteration:
2577 break
2579 break
2578
2580
2579 return iterate()
2581 return iterate()
2580
2582
2581
2583
2582 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2584 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2583 bad = []
2585 bad = []
2584
2586
2585 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2587 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2586 names = []
2588 names = []
2587 wctx = repo[None]
2589 wctx = repo[None]
2588 cca = None
2590 cca = None
2589 abort, warn = scmutil.checkportabilityalert(ui)
2591 abort, warn = scmutil.checkportabilityalert(ui)
2590 if abort or warn:
2592 if abort or warn:
2591 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2593 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2592
2594
2593 match = repo.narrowmatch(match, includeexact=True)
2595 match = repo.narrowmatch(match, includeexact=True)
2594 badmatch = matchmod.badmatch(match, badfn)
2596 badmatch = matchmod.badmatch(match, badfn)
2595 dirstate = repo.dirstate
2597 dirstate = repo.dirstate
2596 # We don't want to just call wctx.walk here, since it would return a lot of
2598 # We don't want to just call wctx.walk here, since it would return a lot of
2597 # clean files, which we aren't interested in and takes time.
2599 # clean files, which we aren't interested in and takes time.
2598 for f in sorted(
2600 for f in sorted(
2599 dirstate.walk(
2601 dirstate.walk(
2600 badmatch,
2602 badmatch,
2601 subrepos=sorted(wctx.substate),
2603 subrepos=sorted(wctx.substate),
2602 unknown=True,
2604 unknown=True,
2603 ignored=False,
2605 ignored=False,
2604 full=False,
2606 full=False,
2605 )
2607 )
2606 ):
2608 ):
2607 exact = match.exact(f)
2609 exact = match.exact(f)
2608 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2610 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2609 if cca:
2611 if cca:
2610 cca(f)
2612 cca(f)
2611 names.append(f)
2613 names.append(f)
2612 if ui.verbose or not exact:
2614 if ui.verbose or not exact:
2613 ui.status(
2615 ui.status(
2614 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2616 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2615 )
2617 )
2616
2618
2617 for subpath in sorted(wctx.substate):
2619 for subpath in sorted(wctx.substate):
2618 sub = wctx.sub(subpath)
2620 sub = wctx.sub(subpath)
2619 try:
2621 try:
2620 submatch = matchmod.subdirmatcher(subpath, match)
2622 submatch = matchmod.subdirmatcher(subpath, match)
2621 subprefix = repo.wvfs.reljoin(prefix, subpath)
2623 subprefix = repo.wvfs.reljoin(prefix, subpath)
2622 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2624 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2623 if opts.get('subrepos'):
2625 if opts.get('subrepos'):
2624 bad.extend(
2626 bad.extend(
2625 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2627 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2626 )
2628 )
2627 else:
2629 else:
2628 bad.extend(
2630 bad.extend(
2629 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2631 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2630 )
2632 )
2631 except error.LookupError:
2633 except error.LookupError:
2632 ui.status(
2634 ui.status(
2633 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2635 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2634 )
2636 )
2635
2637
2636 if not opts.get('dry_run'):
2638 if not opts.get('dry_run'):
2637 rejected = wctx.add(names, prefix)
2639 rejected = wctx.add(names, prefix)
2638 bad.extend(f for f in rejected if f in match.files())
2640 bad.extend(f for f in rejected if f in match.files())
2639 return bad
2641 return bad
2640
2642
2641
2643
2642 def addwebdirpath(repo, serverpath, webconf):
2644 def addwebdirpath(repo, serverpath, webconf):
2643 webconf[serverpath] = repo.root
2645 webconf[serverpath] = repo.root
2644 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2646 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2645
2647
2646 for r in repo.revs(b'filelog("path:.hgsub")'):
2648 for r in repo.revs(b'filelog("path:.hgsub")'):
2647 ctx = repo[r]
2649 ctx = repo[r]
2648 for subpath in ctx.substate:
2650 for subpath in ctx.substate:
2649 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2651 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2650
2652
2651
2653
2652 def forget(
2654 def forget(
2653 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2655 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2654 ):
2656 ):
2655 if dryrun and interactive:
2657 if dryrun and interactive:
2656 raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
2658 raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
2657 bad = []
2659 bad = []
2658 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2660 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2659 wctx = repo[None]
2661 wctx = repo[None]
2660 forgot = []
2662 forgot = []
2661
2663
2662 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2664 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2663 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2665 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2664 if explicitonly:
2666 if explicitonly:
2665 forget = [f for f in forget if match.exact(f)]
2667 forget = [f for f in forget if match.exact(f)]
2666
2668
2667 for subpath in sorted(wctx.substate):
2669 for subpath in sorted(wctx.substate):
2668 sub = wctx.sub(subpath)
2670 sub = wctx.sub(subpath)
2669 submatch = matchmod.subdirmatcher(subpath, match)
2671 submatch = matchmod.subdirmatcher(subpath, match)
2670 subprefix = repo.wvfs.reljoin(prefix, subpath)
2672 subprefix = repo.wvfs.reljoin(prefix, subpath)
2671 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2673 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2672 try:
2674 try:
2673 subbad, subforgot = sub.forget(
2675 subbad, subforgot = sub.forget(
2674 submatch,
2676 submatch,
2675 subprefix,
2677 subprefix,
2676 subuipathfn,
2678 subuipathfn,
2677 dryrun=dryrun,
2679 dryrun=dryrun,
2678 interactive=interactive,
2680 interactive=interactive,
2679 )
2681 )
2680 bad.extend([subpath + b'/' + f for f in subbad])
2682 bad.extend([subpath + b'/' + f for f in subbad])
2681 forgot.extend([subpath + b'/' + f for f in subforgot])
2683 forgot.extend([subpath + b'/' + f for f in subforgot])
2682 except error.LookupError:
2684 except error.LookupError:
2683 ui.status(
2685 ui.status(
2684 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2686 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2685 )
2687 )
2686
2688
2687 if not explicitonly:
2689 if not explicitonly:
2688 for f in match.files():
2690 for f in match.files():
2689 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2691 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2690 if f not in forgot:
2692 if f not in forgot:
2691 if repo.wvfs.exists(f):
2693 if repo.wvfs.exists(f):
2692 # Don't complain if the exact case match wasn't given.
2694 # Don't complain if the exact case match wasn't given.
2693 # But don't do this until after checking 'forgot', so
2695 # But don't do this until after checking 'forgot', so
2694 # that subrepo files aren't normalized, and this op is
2696 # that subrepo files aren't normalized, and this op is
2695 # purely from data cached by the status walk above.
2697 # purely from data cached by the status walk above.
2696 if repo.dirstate.normalize(f) in repo.dirstate:
2698 if repo.dirstate.normalize(f) in repo.dirstate:
2697 continue
2699 continue
2698 ui.warn(
2700 ui.warn(
2699 _(
2701 _(
2700 b'not removing %s: '
2702 b'not removing %s: '
2701 b'file is already untracked\n'
2703 b'file is already untracked\n'
2702 )
2704 )
2703 % uipathfn(f)
2705 % uipathfn(f)
2704 )
2706 )
2705 bad.append(f)
2707 bad.append(f)
2706
2708
2707 if interactive:
2709 if interactive:
2708 responses = _(
2710 responses = _(
2709 b'[Ynsa?]'
2711 b'[Ynsa?]'
2710 b'$$ &Yes, forget this file'
2712 b'$$ &Yes, forget this file'
2711 b'$$ &No, skip this file'
2713 b'$$ &No, skip this file'
2712 b'$$ &Skip remaining files'
2714 b'$$ &Skip remaining files'
2713 b'$$ Include &all remaining files'
2715 b'$$ Include &all remaining files'
2714 b'$$ &? (display help)'
2716 b'$$ &? (display help)'
2715 )
2717 )
2716 for filename in forget[:]:
2718 for filename in forget[:]:
2717 r = ui.promptchoice(
2719 r = ui.promptchoice(
2718 _(b'forget %s %s') % (uipathfn(filename), responses)
2720 _(b'forget %s %s') % (uipathfn(filename), responses)
2719 )
2721 )
2720 if r == 4: # ?
2722 if r == 4: # ?
2721 while r == 4:
2723 while r == 4:
2722 for c, t in ui.extractchoices(responses)[1]:
2724 for c, t in ui.extractchoices(responses)[1]:
2723 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2725 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2724 r = ui.promptchoice(
2726 r = ui.promptchoice(
2725 _(b'forget %s %s') % (uipathfn(filename), responses)
2727 _(b'forget %s %s') % (uipathfn(filename), responses)
2726 )
2728 )
2727 if r == 0: # yes
2729 if r == 0: # yes
2728 continue
2730 continue
2729 elif r == 1: # no
2731 elif r == 1: # no
2730 forget.remove(filename)
2732 forget.remove(filename)
2731 elif r == 2: # Skip
2733 elif r == 2: # Skip
2732 fnindex = forget.index(filename)
2734 fnindex = forget.index(filename)
2733 del forget[fnindex:]
2735 del forget[fnindex:]
2734 break
2736 break
2735 elif r == 3: # All
2737 elif r == 3: # All
2736 break
2738 break
2737
2739
2738 for f in forget:
2740 for f in forget:
2739 if ui.verbose or not match.exact(f) or interactive:
2741 if ui.verbose or not match.exact(f) or interactive:
2740 ui.status(
2742 ui.status(
2741 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2743 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2742 )
2744 )
2743
2745
2744 if not dryrun:
2746 if not dryrun:
2745 rejected = wctx.forget(forget, prefix)
2747 rejected = wctx.forget(forget, prefix)
2746 bad.extend(f for f in rejected if f in match.files())
2748 bad.extend(f for f in rejected if f in match.files())
2747 forgot.extend(f for f in forget if f not in rejected)
2749 forgot.extend(f for f in forget if f not in rejected)
2748 return bad, forgot
2750 return bad, forgot
2749
2751
2750
2752
2751 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2753 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2752 ret = 1
2754 ret = 1
2753
2755
2754 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2756 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2755 if fm.isplain() and not needsfctx:
2757 if fm.isplain() and not needsfctx:
2756 # Fast path. The speed-up comes from skipping the formatter, and batching
2758 # Fast path. The speed-up comes from skipping the formatter, and batching
2757 # calls to ui.write.
2759 # calls to ui.write.
2758 buf = []
2760 buf = []
2759 for f in ctx.matches(m):
2761 for f in ctx.matches(m):
2760 buf.append(fmt % uipathfn(f))
2762 buf.append(fmt % uipathfn(f))
2761 if len(buf) > 100:
2763 if len(buf) > 100:
2762 ui.write(b''.join(buf))
2764 ui.write(b''.join(buf))
2763 del buf[:]
2765 del buf[:]
2764 ret = 0
2766 ret = 0
2765 if buf:
2767 if buf:
2766 ui.write(b''.join(buf))
2768 ui.write(b''.join(buf))
2767 else:
2769 else:
2768 for f in ctx.matches(m):
2770 for f in ctx.matches(m):
2769 fm.startitem()
2771 fm.startitem()
2770 fm.context(ctx=ctx)
2772 fm.context(ctx=ctx)
2771 if needsfctx:
2773 if needsfctx:
2772 fc = ctx[f]
2774 fc = ctx[f]
2773 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2775 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2774 fm.data(path=f)
2776 fm.data(path=f)
2775 fm.plain(fmt % uipathfn(f))
2777 fm.plain(fmt % uipathfn(f))
2776 ret = 0
2778 ret = 0
2777
2779
2778 for subpath in sorted(ctx.substate):
2780 for subpath in sorted(ctx.substate):
2779 submatch = matchmod.subdirmatcher(subpath, m)
2781 submatch = matchmod.subdirmatcher(subpath, m)
2780 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2782 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2781 if subrepos or m.exact(subpath) or any(submatch.files()):
2783 if subrepos or m.exact(subpath) or any(submatch.files()):
2782 sub = ctx.sub(subpath)
2784 sub = ctx.sub(subpath)
2783 try:
2785 try:
2784 recurse = m.exact(subpath) or subrepos
2786 recurse = m.exact(subpath) or subrepos
2785 if (
2787 if (
2786 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2788 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2787 == 0
2789 == 0
2788 ):
2790 ):
2789 ret = 0
2791 ret = 0
2790 except error.LookupError:
2792 except error.LookupError:
2791 ui.status(
2793 ui.status(
2792 _(b"skipping missing subrepository: %s\n")
2794 _(b"skipping missing subrepository: %s\n")
2793 % uipathfn(subpath)
2795 % uipathfn(subpath)
2794 )
2796 )
2795
2797
2796 return ret
2798 return ret
2797
2799
2798
2800
2799 def remove(
2801 def remove(
2800 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2802 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2801 ):
2803 ):
2802 ret = 0
2804 ret = 0
2803 s = repo.status(match=m, clean=True)
2805 s = repo.status(match=m, clean=True)
2804 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2806 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2805
2807
2806 wctx = repo[None]
2808 wctx = repo[None]
2807
2809
2808 if warnings is None:
2810 if warnings is None:
2809 warnings = []
2811 warnings = []
2810 warn = True
2812 warn = True
2811 else:
2813 else:
2812 warn = False
2814 warn = False
2813
2815
2814 subs = sorted(wctx.substate)
2816 subs = sorted(wctx.substate)
2815 progress = ui.makeprogress(
2817 progress = ui.makeprogress(
2816 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2818 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2817 )
2819 )
2818 for subpath in subs:
2820 for subpath in subs:
2819 submatch = matchmod.subdirmatcher(subpath, m)
2821 submatch = matchmod.subdirmatcher(subpath, m)
2820 subprefix = repo.wvfs.reljoin(prefix, subpath)
2822 subprefix = repo.wvfs.reljoin(prefix, subpath)
2821 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2823 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2822 if subrepos or m.exact(subpath) or any(submatch.files()):
2824 if subrepos or m.exact(subpath) or any(submatch.files()):
2823 progress.increment()
2825 progress.increment()
2824 sub = wctx.sub(subpath)
2826 sub = wctx.sub(subpath)
2825 try:
2827 try:
2826 if sub.removefiles(
2828 if sub.removefiles(
2827 submatch,
2829 submatch,
2828 subprefix,
2830 subprefix,
2829 subuipathfn,
2831 subuipathfn,
2830 after,
2832 after,
2831 force,
2833 force,
2832 subrepos,
2834 subrepos,
2833 dryrun,
2835 dryrun,
2834 warnings,
2836 warnings,
2835 ):
2837 ):
2836 ret = 1
2838 ret = 1
2837 except error.LookupError:
2839 except error.LookupError:
2838 warnings.append(
2840 warnings.append(
2839 _(b"skipping missing subrepository: %s\n")
2841 _(b"skipping missing subrepository: %s\n")
2840 % uipathfn(subpath)
2842 % uipathfn(subpath)
2841 )
2843 )
2842 progress.complete()
2844 progress.complete()
2843
2845
2844 # warn about failure to delete explicit files/dirs
2846 # warn about failure to delete explicit files/dirs
2845 deleteddirs = pathutil.dirs(deleted)
2847 deleteddirs = pathutil.dirs(deleted)
2846 files = m.files()
2848 files = m.files()
2847 progress = ui.makeprogress(
2849 progress = ui.makeprogress(
2848 _(b'deleting'), total=len(files), unit=_(b'files')
2850 _(b'deleting'), total=len(files), unit=_(b'files')
2849 )
2851 )
2850 for f in files:
2852 for f in files:
2851
2853
2852 def insubrepo():
2854 def insubrepo():
2853 for subpath in wctx.substate:
2855 for subpath in wctx.substate:
2854 if f.startswith(subpath + b'/'):
2856 if f.startswith(subpath + b'/'):
2855 return True
2857 return True
2856 return False
2858 return False
2857
2859
2858 progress.increment()
2860 progress.increment()
2859 isdir = f in deleteddirs or wctx.hasdir(f)
2861 isdir = f in deleteddirs or wctx.hasdir(f)
2860 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2862 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2861 continue
2863 continue
2862
2864
2863 if repo.wvfs.exists(f):
2865 if repo.wvfs.exists(f):
2864 if repo.wvfs.isdir(f):
2866 if repo.wvfs.isdir(f):
2865 warnings.append(
2867 warnings.append(
2866 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2868 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2867 )
2869 )
2868 else:
2870 else:
2869 warnings.append(
2871 warnings.append(
2870 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2872 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2871 )
2873 )
2872 # missing files will generate a warning elsewhere
2874 # missing files will generate a warning elsewhere
2873 ret = 1
2875 ret = 1
2874 progress.complete()
2876 progress.complete()
2875
2877
2876 if force:
2878 if force:
2877 list = modified + deleted + clean + added
2879 list = modified + deleted + clean + added
2878 elif after:
2880 elif after:
2879 list = deleted
2881 list = deleted
2880 remaining = modified + added + clean
2882 remaining = modified + added + clean
2881 progress = ui.makeprogress(
2883 progress = ui.makeprogress(
2882 _(b'skipping'), total=len(remaining), unit=_(b'files')
2884 _(b'skipping'), total=len(remaining), unit=_(b'files')
2883 )
2885 )
2884 for f in remaining:
2886 for f in remaining:
2885 progress.increment()
2887 progress.increment()
2886 if ui.verbose or (f in files):
2888 if ui.verbose or (f in files):
2887 warnings.append(
2889 warnings.append(
2888 _(b'not removing %s: file still exists\n') % uipathfn(f)
2890 _(b'not removing %s: file still exists\n') % uipathfn(f)
2889 )
2891 )
2890 ret = 1
2892 ret = 1
2891 progress.complete()
2893 progress.complete()
2892 else:
2894 else:
2893 list = deleted + clean
2895 list = deleted + clean
2894 progress = ui.makeprogress(
2896 progress = ui.makeprogress(
2895 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2897 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2896 )
2898 )
2897 for f in modified:
2899 for f in modified:
2898 progress.increment()
2900 progress.increment()
2899 warnings.append(
2901 warnings.append(
2900 _(
2902 _(
2901 b'not removing %s: file is modified (use -f'
2903 b'not removing %s: file is modified (use -f'
2902 b' to force removal)\n'
2904 b' to force removal)\n'
2903 )
2905 )
2904 % uipathfn(f)
2906 % uipathfn(f)
2905 )
2907 )
2906 ret = 1
2908 ret = 1
2907 for f in added:
2909 for f in added:
2908 progress.increment()
2910 progress.increment()
2909 warnings.append(
2911 warnings.append(
2910 _(
2912 _(
2911 b"not removing %s: file has been marked for add"
2913 b"not removing %s: file has been marked for add"
2912 b" (use 'hg forget' to undo add)\n"
2914 b" (use 'hg forget' to undo add)\n"
2913 )
2915 )
2914 % uipathfn(f)
2916 % uipathfn(f)
2915 )
2917 )
2916 ret = 1
2918 ret = 1
2917 progress.complete()
2919 progress.complete()
2918
2920
2919 list = sorted(list)
2921 list = sorted(list)
2920 progress = ui.makeprogress(
2922 progress = ui.makeprogress(
2921 _(b'deleting'), total=len(list), unit=_(b'files')
2923 _(b'deleting'), total=len(list), unit=_(b'files')
2922 )
2924 )
2923 for f in list:
2925 for f in list:
2924 if ui.verbose or not m.exact(f):
2926 if ui.verbose or not m.exact(f):
2925 progress.increment()
2927 progress.increment()
2926 ui.status(
2928 ui.status(
2927 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2929 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2928 )
2930 )
2929 progress.complete()
2931 progress.complete()
2930
2932
2931 if not dryrun:
2933 if not dryrun:
2932 with repo.wlock():
2934 with repo.wlock():
2933 if not after:
2935 if not after:
2934 for f in list:
2936 for f in list:
2935 if f in added:
2937 if f in added:
2936 continue # we never unlink added files on remove
2938 continue # we never unlink added files on remove
2937 rmdir = repo.ui.configbool(
2939 rmdir = repo.ui.configbool(
2938 b'experimental', b'removeemptydirs'
2940 b'experimental', b'removeemptydirs'
2939 )
2941 )
2940 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2942 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2941 repo[None].forget(list)
2943 repo[None].forget(list)
2942
2944
2943 if warn:
2945 if warn:
2944 for warning in warnings:
2946 for warning in warnings:
2945 ui.warn(warning)
2947 ui.warn(warning)
2946
2948
2947 return ret
2949 return ret
2948
2950
2949
2951
2950 def _catfmtneedsdata(fm):
2952 def _catfmtneedsdata(fm):
2951 return not fm.datahint() or b'data' in fm.datahint()
2953 return not fm.datahint() or b'data' in fm.datahint()
2952
2954
2953
2955
2954 def _updatecatformatter(fm, ctx, matcher, path, decode):
2956 def _updatecatformatter(fm, ctx, matcher, path, decode):
2955 """Hook for adding data to the formatter used by ``hg cat``.
2957 """Hook for adding data to the formatter used by ``hg cat``.
2956
2958
2957 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2959 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2958 this method first."""
2960 this method first."""
2959
2961
2960 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2962 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2961 # wasn't requested.
2963 # wasn't requested.
2962 data = b''
2964 data = b''
2963 if _catfmtneedsdata(fm):
2965 if _catfmtneedsdata(fm):
2964 data = ctx[path].data()
2966 data = ctx[path].data()
2965 if decode:
2967 if decode:
2966 data = ctx.repo().wwritedata(path, data)
2968 data = ctx.repo().wwritedata(path, data)
2967 fm.startitem()
2969 fm.startitem()
2968 fm.context(ctx=ctx)
2970 fm.context(ctx=ctx)
2969 fm.write(b'data', b'%s', data)
2971 fm.write(b'data', b'%s', data)
2970 fm.data(path=path)
2972 fm.data(path=path)
2971
2973
2972
2974
2973 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2975 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2974 err = 1
2976 err = 1
2975 opts = pycompat.byteskwargs(opts)
2977 opts = pycompat.byteskwargs(opts)
2976
2978
2977 def write(path):
2979 def write(path):
2978 filename = None
2980 filename = None
2979 if fntemplate:
2981 if fntemplate:
2980 filename = makefilename(
2982 filename = makefilename(
2981 ctx, fntemplate, pathname=os.path.join(prefix, path)
2983 ctx, fntemplate, pathname=os.path.join(prefix, path)
2982 )
2984 )
2983 # attempt to create the directory if it does not already exist
2985 # attempt to create the directory if it does not already exist
2984 try:
2986 try:
2985 os.makedirs(os.path.dirname(filename))
2987 os.makedirs(os.path.dirname(filename))
2986 except OSError:
2988 except OSError:
2987 pass
2989 pass
2988 with formatter.maybereopen(basefm, filename) as fm:
2990 with formatter.maybereopen(basefm, filename) as fm:
2989 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2991 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2990
2992
2991 # Automation often uses hg cat on single files, so special case it
2993 # Automation often uses hg cat on single files, so special case it
2992 # for performance to avoid the cost of parsing the manifest.
2994 # for performance to avoid the cost of parsing the manifest.
2993 if len(matcher.files()) == 1 and not matcher.anypats():
2995 if len(matcher.files()) == 1 and not matcher.anypats():
2994 file = matcher.files()[0]
2996 file = matcher.files()[0]
2995 mfl = repo.manifestlog
2997 mfl = repo.manifestlog
2996 mfnode = ctx.manifestnode()
2998 mfnode = ctx.manifestnode()
2997 try:
2999 try:
2998 if mfnode and mfl[mfnode].find(file)[0]:
3000 if mfnode and mfl[mfnode].find(file)[0]:
2999 if _catfmtneedsdata(basefm):
3001 if _catfmtneedsdata(basefm):
3000 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
3002 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
3001 write(file)
3003 write(file)
3002 return 0
3004 return 0
3003 except KeyError:
3005 except KeyError:
3004 pass
3006 pass
3005
3007
3006 if _catfmtneedsdata(basefm):
3008 if _catfmtneedsdata(basefm):
3007 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
3009 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
3008
3010
3009 for abs in ctx.walk(matcher):
3011 for abs in ctx.walk(matcher):
3010 write(abs)
3012 write(abs)
3011 err = 0
3013 err = 0
3012
3014
3013 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3015 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3014 for subpath in sorted(ctx.substate):
3016 for subpath in sorted(ctx.substate):
3015 sub = ctx.sub(subpath)
3017 sub = ctx.sub(subpath)
3016 try:
3018 try:
3017 submatch = matchmod.subdirmatcher(subpath, matcher)
3019 submatch = matchmod.subdirmatcher(subpath, matcher)
3018 subprefix = os.path.join(prefix, subpath)
3020 subprefix = os.path.join(prefix, subpath)
3019 if not sub.cat(
3021 if not sub.cat(
3020 submatch,
3022 submatch,
3021 basefm,
3023 basefm,
3022 fntemplate,
3024 fntemplate,
3023 subprefix,
3025 subprefix,
3024 **pycompat.strkwargs(opts)
3026 **pycompat.strkwargs(opts)
3025 ):
3027 ):
3026 err = 0
3028 err = 0
3027 except error.RepoLookupError:
3029 except error.RepoLookupError:
3028 ui.status(
3030 ui.status(
3029 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
3031 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
3030 )
3032 )
3031
3033
3032 return err
3034 return err
3033
3035
3034
3036
3035 def commit(ui, repo, commitfunc, pats, opts):
3037 def commit(ui, repo, commitfunc, pats, opts):
3036 '''commit the specified files or all outstanding changes'''
3038 '''commit the specified files or all outstanding changes'''
3037 date = opts.get(b'date')
3039 date = opts.get(b'date')
3038 if date:
3040 if date:
3039 opts[b'date'] = dateutil.parsedate(date)
3041 opts[b'date'] = dateutil.parsedate(date)
3040 message = logmessage(ui, opts)
3042 message = logmessage(ui, opts)
3041 matcher = scmutil.match(repo[None], pats, opts)
3043 matcher = scmutil.match(repo[None], pats, opts)
3042
3044
3043 dsguard = None
3045 dsguard = None
3044 # extract addremove carefully -- this function can be called from a command
3046 # extract addremove carefully -- this function can be called from a command
3045 # that doesn't support addremove
3047 # that doesn't support addremove
3046 if opts.get(b'addremove'):
3048 if opts.get(b'addremove'):
3047 dsguard = dirstateguard.dirstateguard(repo, b'commit')
3049 dsguard = dirstateguard.dirstateguard(repo, b'commit')
3048 with dsguard or util.nullcontextmanager():
3050 with dsguard or util.nullcontextmanager():
3049 if dsguard:
3051 if dsguard:
3050 relative = scmutil.anypats(pats, opts)
3052 relative = scmutil.anypats(pats, opts)
3051 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3053 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3052 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
3054 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
3053 raise error.Abort(
3055 raise error.Abort(
3054 _(b"failed to mark all new/missing files as added/removed")
3056 _(b"failed to mark all new/missing files as added/removed")
3055 )
3057 )
3056
3058
3057 return commitfunc(ui, repo, message, matcher, opts)
3059 return commitfunc(ui, repo, message, matcher, opts)
3058
3060
3059
3061
3060 def samefile(f, ctx1, ctx2):
3062 def samefile(f, ctx1, ctx2):
3061 if f in ctx1.manifest():
3063 if f in ctx1.manifest():
3062 a = ctx1.filectx(f)
3064 a = ctx1.filectx(f)
3063 if f in ctx2.manifest():
3065 if f in ctx2.manifest():
3064 b = ctx2.filectx(f)
3066 b = ctx2.filectx(f)
3065 return not a.cmp(b) and a.flags() == b.flags()
3067 return not a.cmp(b) and a.flags() == b.flags()
3066 else:
3068 else:
3067 return False
3069 return False
3068 else:
3070 else:
3069 return f not in ctx2.manifest()
3071 return f not in ctx2.manifest()
3070
3072
3071
3073
3072 def amend(ui, repo, old, extra, pats, opts):
3074 def amend(ui, repo, old, extra, pats, opts):
3073 # avoid cycle context -> subrepo -> cmdutil
3075 # avoid cycle context -> subrepo -> cmdutil
3074 from . import context
3076 from . import context
3075
3077
3076 # amend will reuse the existing user if not specified, but the obsolete
3078 # amend will reuse the existing user if not specified, but the obsolete
3077 # marker creation requires that the current user's name is specified.
3079 # marker creation requires that the current user's name is specified.
3078 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3080 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3079 ui.username() # raise exception if username not set
3081 ui.username() # raise exception if username not set
3080
3082
3081 ui.note(_(b'amending changeset %s\n') % old)
3083 ui.note(_(b'amending changeset %s\n') % old)
3082 base = old.p1()
3084 base = old.p1()
3083
3085
3084 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
3086 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
3085 # Participating changesets:
3087 # Participating changesets:
3086 #
3088 #
3087 # wctx o - workingctx that contains changes from working copy
3089 # wctx o - workingctx that contains changes from working copy
3088 # | to go into amending commit
3090 # | to go into amending commit
3089 # |
3091 # |
3090 # old o - changeset to amend
3092 # old o - changeset to amend
3091 # |
3093 # |
3092 # base o - first parent of the changeset to amend
3094 # base o - first parent of the changeset to amend
3093 wctx = repo[None]
3095 wctx = repo[None]
3094
3096
3095 # Copy to avoid mutating input
3097 # Copy to avoid mutating input
3096 extra = extra.copy()
3098 extra = extra.copy()
3097 # Update extra dict from amended commit (e.g. to preserve graft
3099 # Update extra dict from amended commit (e.g. to preserve graft
3098 # source)
3100 # source)
3099 extra.update(old.extra())
3101 extra.update(old.extra())
3100
3102
3101 # Also update it from the from the wctx
3103 # Also update it from the from the wctx
3102 extra.update(wctx.extra())
3104 extra.update(wctx.extra())
3103
3105
3104 # date-only change should be ignored?
3106 # date-only change should be ignored?
3105 datemaydiffer = resolvecommitoptions(ui, opts)
3107 datemaydiffer = resolvecommitoptions(ui, opts)
3106
3108
3107 date = old.date()
3109 date = old.date()
3108 if opts.get(b'date'):
3110 if opts.get(b'date'):
3109 date = dateutil.parsedate(opts.get(b'date'))
3111 date = dateutil.parsedate(opts.get(b'date'))
3110 user = opts.get(b'user') or old.user()
3112 user = opts.get(b'user') or old.user()
3111
3113
3112 if len(old.parents()) > 1:
3114 if len(old.parents()) > 1:
3113 # ctx.files() isn't reliable for merges, so fall back to the
3115 # ctx.files() isn't reliable for merges, so fall back to the
3114 # slower repo.status() method
3116 # slower repo.status() method
3115 st = base.status(old)
3117 st = base.status(old)
3116 files = set(st.modified) | set(st.added) | set(st.removed)
3118 files = set(st.modified) | set(st.added) | set(st.removed)
3117 else:
3119 else:
3118 files = set(old.files())
3120 files = set(old.files())
3119
3121
3120 # add/remove the files to the working copy if the "addremove" option
3122 # add/remove the files to the working copy if the "addremove" option
3121 # was specified.
3123 # was specified.
3122 matcher = scmutil.match(wctx, pats, opts)
3124 matcher = scmutil.match(wctx, pats, opts)
3123 relative = scmutil.anypats(pats, opts)
3125 relative = scmutil.anypats(pats, opts)
3124 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3126 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3125 if opts.get(b'addremove') and scmutil.addremove(
3127 if opts.get(b'addremove') and scmutil.addremove(
3126 repo, matcher, b"", uipathfn, opts
3128 repo, matcher, b"", uipathfn, opts
3127 ):
3129 ):
3128 raise error.Abort(
3130 raise error.Abort(
3129 _(b"failed to mark all new/missing files as added/removed")
3131 _(b"failed to mark all new/missing files as added/removed")
3130 )
3132 )
3131
3133
3132 # Check subrepos. This depends on in-place wctx._status update in
3134 # Check subrepos. This depends on in-place wctx._status update in
3133 # subrepo.precommit(). To minimize the risk of this hack, we do
3135 # subrepo.precommit(). To minimize the risk of this hack, we do
3134 # nothing if .hgsub does not exist.
3136 # nothing if .hgsub does not exist.
3135 if b'.hgsub' in wctx or b'.hgsub' in old:
3137 if b'.hgsub' in wctx or b'.hgsub' in old:
3136 subs, commitsubs, newsubstate = subrepoutil.precommit(
3138 subs, commitsubs, newsubstate = subrepoutil.precommit(
3137 ui, wctx, wctx._status, matcher
3139 ui, wctx, wctx._status, matcher
3138 )
3140 )
3139 # amend should abort if commitsubrepos is enabled
3141 # amend should abort if commitsubrepos is enabled
3140 assert not commitsubs
3142 assert not commitsubs
3141 if subs:
3143 if subs:
3142 subrepoutil.writestate(repo, newsubstate)
3144 subrepoutil.writestate(repo, newsubstate)
3143
3145
3144 ms = mergestatemod.mergestate.read(repo)
3146 ms = mergestatemod.mergestate.read(repo)
3145 mergeutil.checkunresolved(ms)
3147 mergeutil.checkunresolved(ms)
3146
3148
3147 filestoamend = {f for f in wctx.files() if matcher(f)}
3149 filestoamend = {f for f in wctx.files() if matcher(f)}
3148
3150
3149 changes = len(filestoamend) > 0
3151 changes = len(filestoamend) > 0
3150 if changes:
3152 if changes:
3151 # Recompute copies (avoid recording a -> b -> a)
3153 # Recompute copies (avoid recording a -> b -> a)
3152 copied = copies.pathcopies(base, wctx, matcher)
3154 copied = copies.pathcopies(base, wctx, matcher)
3153 if old.p2:
3155 if old.p2:
3154 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3156 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3155
3157
3156 # Prune files which were reverted by the updates: if old
3158 # Prune files which were reverted by the updates: if old
3157 # introduced file X and the file was renamed in the working
3159 # introduced file X and the file was renamed in the working
3158 # copy, then those two files are the same and
3160 # copy, then those two files are the same and
3159 # we can discard X from our list of files. Likewise if X
3161 # we can discard X from our list of files. Likewise if X
3160 # was removed, it's no longer relevant. If X is missing (aka
3162 # was removed, it's no longer relevant. If X is missing (aka
3161 # deleted), old X must be preserved.
3163 # deleted), old X must be preserved.
3162 files.update(filestoamend)
3164 files.update(filestoamend)
3163 files = [
3165 files = [
3164 f
3166 f
3165 for f in files
3167 for f in files
3166 if (f not in filestoamend or not samefile(f, wctx, base))
3168 if (f not in filestoamend or not samefile(f, wctx, base))
3167 ]
3169 ]
3168
3170
3169 def filectxfn(repo, ctx_, path):
3171 def filectxfn(repo, ctx_, path):
3170 try:
3172 try:
3171 # If the file being considered is not amongst the files
3173 # If the file being considered is not amongst the files
3172 # to be amended, we should return the file context from the
3174 # to be amended, we should return the file context from the
3173 # old changeset. This avoids issues when only some files in
3175 # old changeset. This avoids issues when only some files in
3174 # the working copy are being amended but there are also
3176 # the working copy are being amended but there are also
3175 # changes to other files from the old changeset.
3177 # changes to other files from the old changeset.
3176 if path not in filestoamend:
3178 if path not in filestoamend:
3177 return old.filectx(path)
3179 return old.filectx(path)
3178
3180
3179 # Return None for removed files.
3181 # Return None for removed files.
3180 if path in wctx.removed():
3182 if path in wctx.removed():
3181 return None
3183 return None
3182
3184
3183 fctx = wctx[path]
3185 fctx = wctx[path]
3184 flags = fctx.flags()
3186 flags = fctx.flags()
3185 mctx = context.memfilectx(
3187 mctx = context.memfilectx(
3186 repo,
3188 repo,
3187 ctx_,
3189 ctx_,
3188 fctx.path(),
3190 fctx.path(),
3189 fctx.data(),
3191 fctx.data(),
3190 islink=b'l' in flags,
3192 islink=b'l' in flags,
3191 isexec=b'x' in flags,
3193 isexec=b'x' in flags,
3192 copysource=copied.get(path),
3194 copysource=copied.get(path),
3193 )
3195 )
3194 return mctx
3196 return mctx
3195 except KeyError:
3197 except KeyError:
3196 return None
3198 return None
3197
3199
3198 else:
3200 else:
3199 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
3201 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
3200
3202
3201 # Use version of files as in the old cset
3203 # Use version of files as in the old cset
3202 def filectxfn(repo, ctx_, path):
3204 def filectxfn(repo, ctx_, path):
3203 try:
3205 try:
3204 return old.filectx(path)
3206 return old.filectx(path)
3205 except KeyError:
3207 except KeyError:
3206 return None
3208 return None
3207
3209
3208 # See if we got a message from -m or -l, if not, open the editor with
3210 # See if we got a message from -m or -l, if not, open the editor with
3209 # the message of the changeset to amend.
3211 # the message of the changeset to amend.
3210 message = logmessage(ui, opts)
3212 message = logmessage(ui, opts)
3211
3213
3212 editform = mergeeditform(old, b'commit.amend')
3214 editform = mergeeditform(old, b'commit.amend')
3213
3215
3214 if not message:
3216 if not message:
3215 message = old.description()
3217 message = old.description()
3216 # Default if message isn't provided and --edit is not passed is to
3218 # Default if message isn't provided and --edit is not passed is to
3217 # invoke editor, but allow --no-edit. If somehow we don't have any
3219 # invoke editor, but allow --no-edit. If somehow we don't have any
3218 # description, let's always start the editor.
3220 # description, let's always start the editor.
3219 doedit = not message or opts.get(b'edit') in [True, None]
3221 doedit = not message or opts.get(b'edit') in [True, None]
3220 else:
3222 else:
3221 # Default if message is provided is to not invoke editor, but allow
3223 # Default if message is provided is to not invoke editor, but allow
3222 # --edit.
3224 # --edit.
3223 doedit = opts.get(b'edit') is True
3225 doedit = opts.get(b'edit') is True
3224 editor = getcommiteditor(edit=doedit, editform=editform)
3226 editor = getcommiteditor(edit=doedit, editform=editform)
3225
3227
3226 pureextra = extra.copy()
3228 pureextra = extra.copy()
3227 extra[b'amend_source'] = old.hex()
3229 extra[b'amend_source'] = old.hex()
3228
3230
3229 new = context.memctx(
3231 new = context.memctx(
3230 repo,
3232 repo,
3231 parents=[base.node(), old.p2().node()],
3233 parents=[base.node(), old.p2().node()],
3232 text=message,
3234 text=message,
3233 files=files,
3235 files=files,
3234 filectxfn=filectxfn,
3236 filectxfn=filectxfn,
3235 user=user,
3237 user=user,
3236 date=date,
3238 date=date,
3237 extra=extra,
3239 extra=extra,
3238 editor=editor,
3240 editor=editor,
3239 )
3241 )
3240
3242
3241 newdesc = changelog.stripdesc(new.description())
3243 newdesc = changelog.stripdesc(new.description())
3242 if (
3244 if (
3243 (not changes)
3245 (not changes)
3244 and newdesc == old.description()
3246 and newdesc == old.description()
3245 and user == old.user()
3247 and user == old.user()
3246 and (date == old.date() or datemaydiffer)
3248 and (date == old.date() or datemaydiffer)
3247 and pureextra == old.extra()
3249 and pureextra == old.extra()
3248 ):
3250 ):
3249 # nothing changed. continuing here would create a new node
3251 # nothing changed. continuing here would create a new node
3250 # anyway because of the amend_source noise.
3252 # anyway because of the amend_source noise.
3251 #
3253 #
3252 # This not what we expect from amend.
3254 # This not what we expect from amend.
3253 return old.node()
3255 return old.node()
3254
3256
3255 commitphase = None
3257 commitphase = None
3256 if opts.get(b'secret'):
3258 if opts.get(b'secret'):
3257 commitphase = phases.secret
3259 commitphase = phases.secret
3258 newid = repo.commitctx(new)
3260 newid = repo.commitctx(new)
3259
3261
3260 # Reroute the working copy parent to the new changeset
3262 # Reroute the working copy parent to the new changeset
3261 repo.setparents(newid, nullid)
3263 repo.setparents(newid, nullid)
3262 mapping = {old.node(): (newid,)}
3264 mapping = {old.node(): (newid,)}
3263 obsmetadata = None
3265 obsmetadata = None
3264 if opts.get(b'note'):
3266 if opts.get(b'note'):
3265 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3267 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3266 backup = ui.configbool(b'rewrite', b'backup-bundle')
3268 backup = ui.configbool(b'rewrite', b'backup-bundle')
3267 scmutil.cleanupnodes(
3269 scmutil.cleanupnodes(
3268 repo,
3270 repo,
3269 mapping,
3271 mapping,
3270 b'amend',
3272 b'amend',
3271 metadata=obsmetadata,
3273 metadata=obsmetadata,
3272 fixphase=True,
3274 fixphase=True,
3273 targetphase=commitphase,
3275 targetphase=commitphase,
3274 backup=backup,
3276 backup=backup,
3275 )
3277 )
3276
3278
3277 # Fixing the dirstate because localrepo.commitctx does not update
3279 # Fixing the dirstate because localrepo.commitctx does not update
3278 # it. This is rather convenient because we did not need to update
3280 # it. This is rather convenient because we did not need to update
3279 # the dirstate for all the files in the new commit which commitctx
3281 # the dirstate for all the files in the new commit which commitctx
3280 # could have done if it updated the dirstate. Now, we can
3282 # could have done if it updated the dirstate. Now, we can
3281 # selectively update the dirstate only for the amended files.
3283 # selectively update the dirstate only for the amended files.
3282 dirstate = repo.dirstate
3284 dirstate = repo.dirstate
3283
3285
3284 # Update the state of the files which were added and modified in the
3286 # Update the state of the files which were added and modified in the
3285 # amend to "normal" in the dirstate. We need to use "normallookup" since
3287 # amend to "normal" in the dirstate. We need to use "normallookup" since
3286 # the files may have changed since the command started; using "normal"
3288 # the files may have changed since the command started; using "normal"
3287 # would mark them as clean but with uncommitted contents.
3289 # would mark them as clean but with uncommitted contents.
3288 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3290 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3289 for f in normalfiles:
3291 for f in normalfiles:
3290 dirstate.normallookup(f)
3292 dirstate.normallookup(f)
3291
3293
3292 # Update the state of files which were removed in the amend
3294 # Update the state of files which were removed in the amend
3293 # to "removed" in the dirstate.
3295 # to "removed" in the dirstate.
3294 removedfiles = set(wctx.removed()) & filestoamend
3296 removedfiles = set(wctx.removed()) & filestoamend
3295 for f in removedfiles:
3297 for f in removedfiles:
3296 dirstate.drop(f)
3298 dirstate.drop(f)
3297
3299
3298 return newid
3300 return newid
3299
3301
3300
3302
3301 def commiteditor(repo, ctx, subs, editform=b''):
3303 def commiteditor(repo, ctx, subs, editform=b''):
3302 if ctx.description():
3304 if ctx.description():
3303 return ctx.description()
3305 return ctx.description()
3304 return commitforceeditor(
3306 return commitforceeditor(
3305 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3307 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3306 )
3308 )
3307
3309
3308
3310
3309 def commitforceeditor(
3311 def commitforceeditor(
3310 repo,
3312 repo,
3311 ctx,
3313 ctx,
3312 subs,
3314 subs,
3313 finishdesc=None,
3315 finishdesc=None,
3314 extramsg=None,
3316 extramsg=None,
3315 editform=b'',
3317 editform=b'',
3316 unchangedmessagedetection=False,
3318 unchangedmessagedetection=False,
3317 ):
3319 ):
3318 if not extramsg:
3320 if not extramsg:
3319 extramsg = _(b"Leave message empty to abort commit.")
3321 extramsg = _(b"Leave message empty to abort commit.")
3320
3322
3321 forms = [e for e in editform.split(b'.') if e]
3323 forms = [e for e in editform.split(b'.') if e]
3322 forms.insert(0, b'changeset')
3324 forms.insert(0, b'changeset')
3323 templatetext = None
3325 templatetext = None
3324 while forms:
3326 while forms:
3325 ref = b'.'.join(forms)
3327 ref = b'.'.join(forms)
3326 if repo.ui.config(b'committemplate', ref):
3328 if repo.ui.config(b'committemplate', ref):
3327 templatetext = committext = buildcommittemplate(
3329 templatetext = committext = buildcommittemplate(
3328 repo, ctx, subs, extramsg, ref
3330 repo, ctx, subs, extramsg, ref
3329 )
3331 )
3330 break
3332 break
3331 forms.pop()
3333 forms.pop()
3332 else:
3334 else:
3333 committext = buildcommittext(repo, ctx, subs, extramsg)
3335 committext = buildcommittext(repo, ctx, subs, extramsg)
3334
3336
3335 # run editor in the repository root
3337 # run editor in the repository root
3336 olddir = encoding.getcwd()
3338 olddir = encoding.getcwd()
3337 os.chdir(repo.root)
3339 os.chdir(repo.root)
3338
3340
3339 # make in-memory changes visible to external process
3341 # make in-memory changes visible to external process
3340 tr = repo.currenttransaction()
3342 tr = repo.currenttransaction()
3341 repo.dirstate.write(tr)
3343 repo.dirstate.write(tr)
3342 pending = tr and tr.writepending() and repo.root
3344 pending = tr and tr.writepending() and repo.root
3343
3345
3344 editortext = repo.ui.edit(
3346 editortext = repo.ui.edit(
3345 committext,
3347 committext,
3346 ctx.user(),
3348 ctx.user(),
3347 ctx.extra(),
3349 ctx.extra(),
3348 editform=editform,
3350 editform=editform,
3349 pending=pending,
3351 pending=pending,
3350 repopath=repo.path,
3352 repopath=repo.path,
3351 action=b'commit',
3353 action=b'commit',
3352 )
3354 )
3353 text = editortext
3355 text = editortext
3354
3356
3355 # strip away anything below this special string (used for editors that want
3357 # strip away anything below this special string (used for editors that want
3356 # to display the diff)
3358 # to display the diff)
3357 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3359 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3358 if stripbelow:
3360 if stripbelow:
3359 text = text[: stripbelow.start()]
3361 text = text[: stripbelow.start()]
3360
3362
3361 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3363 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3362 os.chdir(olddir)
3364 os.chdir(olddir)
3363
3365
3364 if finishdesc:
3366 if finishdesc:
3365 text = finishdesc(text)
3367 text = finishdesc(text)
3366 if not text.strip():
3368 if not text.strip():
3367 raise error.Abort(_(b"empty commit message"))
3369 raise error.Abort(_(b"empty commit message"))
3368 if unchangedmessagedetection and editortext == templatetext:
3370 if unchangedmessagedetection and editortext == templatetext:
3369 raise error.Abort(_(b"commit message unchanged"))
3371 raise error.Abort(_(b"commit message unchanged"))
3370
3372
3371 return text
3373 return text
3372
3374
3373
3375
3374 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3376 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3375 ui = repo.ui
3377 ui = repo.ui
3376 spec = formatter.templatespec(ref, None, None)
3378 spec = formatter.templatespec(ref, None, None)
3377 t = logcmdutil.changesettemplater(ui, repo, spec)
3379 t = logcmdutil.changesettemplater(ui, repo, spec)
3378 t.t.cache.update(
3380 t.t.cache.update(
3379 (k, templater.unquotestring(v))
3381 (k, templater.unquotestring(v))
3380 for k, v in repo.ui.configitems(b'committemplate')
3382 for k, v in repo.ui.configitems(b'committemplate')
3381 )
3383 )
3382
3384
3383 if not extramsg:
3385 if not extramsg:
3384 extramsg = b'' # ensure that extramsg is string
3386 extramsg = b'' # ensure that extramsg is string
3385
3387
3386 ui.pushbuffer()
3388 ui.pushbuffer()
3387 t.show(ctx, extramsg=extramsg)
3389 t.show(ctx, extramsg=extramsg)
3388 return ui.popbuffer()
3390 return ui.popbuffer()
3389
3391
3390
3392
3391 def hgprefix(msg):
3393 def hgprefix(msg):
3392 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3394 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3393
3395
3394
3396
3395 def buildcommittext(repo, ctx, subs, extramsg):
3397 def buildcommittext(repo, ctx, subs, extramsg):
3396 edittext = []
3398 edittext = []
3397 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3399 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3398 if ctx.description():
3400 if ctx.description():
3399 edittext.append(ctx.description())
3401 edittext.append(ctx.description())
3400 edittext.append(b"")
3402 edittext.append(b"")
3401 edittext.append(b"") # Empty line between message and comments.
3403 edittext.append(b"") # Empty line between message and comments.
3402 edittext.append(
3404 edittext.append(
3403 hgprefix(
3405 hgprefix(
3404 _(
3406 _(
3405 b"Enter commit message."
3407 b"Enter commit message."
3406 b" Lines beginning with 'HG:' are removed."
3408 b" Lines beginning with 'HG:' are removed."
3407 )
3409 )
3408 )
3410 )
3409 )
3411 )
3410 edittext.append(hgprefix(extramsg))
3412 edittext.append(hgprefix(extramsg))
3411 edittext.append(b"HG: --")
3413 edittext.append(b"HG: --")
3412 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3414 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3413 if ctx.p2():
3415 if ctx.p2():
3414 edittext.append(hgprefix(_(b"branch merge")))
3416 edittext.append(hgprefix(_(b"branch merge")))
3415 if ctx.branch():
3417 if ctx.branch():
3416 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3418 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3417 if bookmarks.isactivewdirparent(repo):
3419 if bookmarks.isactivewdirparent(repo):
3418 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3420 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3419 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3421 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3420 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3422 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3421 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3423 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3422 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3424 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3423 if not added and not modified and not removed:
3425 if not added and not modified and not removed:
3424 edittext.append(hgprefix(_(b"no files changed")))
3426 edittext.append(hgprefix(_(b"no files changed")))
3425 edittext.append(b"")
3427 edittext.append(b"")
3426
3428
3427 return b"\n".join(edittext)
3429 return b"\n".join(edittext)
3428
3430
3429
3431
3430 def commitstatus(repo, node, branch, bheads=None, opts=None):
3432 def commitstatus(repo, node, branch, bheads=None, opts=None):
3431 if opts is None:
3433 if opts is None:
3432 opts = {}
3434 opts = {}
3433 ctx = repo[node]
3435 ctx = repo[node]
3434 parents = ctx.parents()
3436 parents = ctx.parents()
3435
3437
3436 if (
3438 if (
3437 not opts.get(b'amend')
3439 not opts.get(b'amend')
3438 and bheads
3440 and bheads
3439 and node not in bheads
3441 and node not in bheads
3440 and not any(
3442 and not any(
3441 p.node() in bheads and p.branch() == branch for p in parents
3443 p.node() in bheads and p.branch() == branch for p in parents
3442 )
3444 )
3443 ):
3445 ):
3444 repo.ui.status(_(b'created new head\n'))
3446 repo.ui.status(_(b'created new head\n'))
3445 # The message is not printed for initial roots. For the other
3447 # The message is not printed for initial roots. For the other
3446 # changesets, it is printed in the following situations:
3448 # changesets, it is printed in the following situations:
3447 #
3449 #
3448 # Par column: for the 2 parents with ...
3450 # Par column: for the 2 parents with ...
3449 # N: null or no parent
3451 # N: null or no parent
3450 # B: parent is on another named branch
3452 # B: parent is on another named branch
3451 # C: parent is a regular non head changeset
3453 # C: parent is a regular non head changeset
3452 # H: parent was a branch head of the current branch
3454 # H: parent was a branch head of the current branch
3453 # Msg column: whether we print "created new head" message
3455 # Msg column: whether we print "created new head" message
3454 # In the following, it is assumed that there already exists some
3456 # In the following, it is assumed that there already exists some
3455 # initial branch heads of the current branch, otherwise nothing is
3457 # initial branch heads of the current branch, otherwise nothing is
3456 # printed anyway.
3458 # printed anyway.
3457 #
3459 #
3458 # Par Msg Comment
3460 # Par Msg Comment
3459 # N N y additional topo root
3461 # N N y additional topo root
3460 #
3462 #
3461 # B N y additional branch root
3463 # B N y additional branch root
3462 # C N y additional topo head
3464 # C N y additional topo head
3463 # H N n usual case
3465 # H N n usual case
3464 #
3466 #
3465 # B B y weird additional branch root
3467 # B B y weird additional branch root
3466 # C B y branch merge
3468 # C B y branch merge
3467 # H B n merge with named branch
3469 # H B n merge with named branch
3468 #
3470 #
3469 # C C y additional head from merge
3471 # C C y additional head from merge
3470 # C H n merge with a head
3472 # C H n merge with a head
3471 #
3473 #
3472 # H H n head merge: head count decreases
3474 # H H n head merge: head count decreases
3473
3475
3474 if not opts.get(b'close_branch'):
3476 if not opts.get(b'close_branch'):
3475 for r in parents:
3477 for r in parents:
3476 if r.closesbranch() and r.branch() == branch:
3478 if r.closesbranch() and r.branch() == branch:
3477 repo.ui.status(
3479 repo.ui.status(
3478 _(b'reopening closed branch head %d\n') % r.rev()
3480 _(b'reopening closed branch head %d\n') % r.rev()
3479 )
3481 )
3480
3482
3481 if repo.ui.debugflag:
3483 if repo.ui.debugflag:
3482 repo.ui.write(
3484 repo.ui.write(
3483 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3485 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3484 )
3486 )
3485 elif repo.ui.verbose:
3487 elif repo.ui.verbose:
3486 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3488 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3487
3489
3488
3490
3489 def postcommitstatus(repo, pats, opts):
3491 def postcommitstatus(repo, pats, opts):
3490 return repo.status(match=scmutil.match(repo[None], pats, opts))
3492 return repo.status(match=scmutil.match(repo[None], pats, opts))
3491
3493
3492
3494
3493 def revert(ui, repo, ctx, parents, *pats, **opts):
3495 def revert(ui, repo, ctx, parents, *pats, **opts):
3494 opts = pycompat.byteskwargs(opts)
3496 opts = pycompat.byteskwargs(opts)
3495 parent, p2 = parents
3497 parent, p2 = parents
3496 node = ctx.node()
3498 node = ctx.node()
3497
3499
3498 mf = ctx.manifest()
3500 mf = ctx.manifest()
3499 if node == p2:
3501 if node == p2:
3500 parent = p2
3502 parent = p2
3501
3503
3502 # need all matching names in dirstate and manifest of target rev,
3504 # need all matching names in dirstate and manifest of target rev,
3503 # so have to walk both. do not print errors if files exist in one
3505 # so have to walk both. do not print errors if files exist in one
3504 # but not other. in both cases, filesets should be evaluated against
3506 # but not other. in both cases, filesets should be evaluated against
3505 # workingctx to get consistent result (issue4497). this means 'set:**'
3507 # workingctx to get consistent result (issue4497). this means 'set:**'
3506 # cannot be used to select missing files from target rev.
3508 # cannot be used to select missing files from target rev.
3507
3509
3508 # `names` is a mapping for all elements in working copy and target revision
3510 # `names` is a mapping for all elements in working copy and target revision
3509 # The mapping is in the form:
3511 # The mapping is in the form:
3510 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3512 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3511 names = {}
3513 names = {}
3512 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3514 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3513
3515
3514 with repo.wlock():
3516 with repo.wlock():
3515 ## filling of the `names` mapping
3517 ## filling of the `names` mapping
3516 # walk dirstate to fill `names`
3518 # walk dirstate to fill `names`
3517
3519
3518 interactive = opts.get(b'interactive', False)
3520 interactive = opts.get(b'interactive', False)
3519 wctx = repo[None]
3521 wctx = repo[None]
3520 m = scmutil.match(wctx, pats, opts)
3522 m = scmutil.match(wctx, pats, opts)
3521
3523
3522 # we'll need this later
3524 # we'll need this later
3523 targetsubs = sorted(s for s in wctx.substate if m(s))
3525 targetsubs = sorted(s for s in wctx.substate if m(s))
3524
3526
3525 if not m.always():
3527 if not m.always():
3526 matcher = matchmod.badmatch(m, lambda x, y: False)
3528 matcher = matchmod.badmatch(m, lambda x, y: False)
3527 for abs in wctx.walk(matcher):
3529 for abs in wctx.walk(matcher):
3528 names[abs] = m.exact(abs)
3530 names[abs] = m.exact(abs)
3529
3531
3530 # walk target manifest to fill `names`
3532 # walk target manifest to fill `names`
3531
3533
3532 def badfn(path, msg):
3534 def badfn(path, msg):
3533 if path in names:
3535 if path in names:
3534 return
3536 return
3535 if path in ctx.substate:
3537 if path in ctx.substate:
3536 return
3538 return
3537 path_ = path + b'/'
3539 path_ = path + b'/'
3538 for f in names:
3540 for f in names:
3539 if f.startswith(path_):
3541 if f.startswith(path_):
3540 return
3542 return
3541 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3543 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3542
3544
3543 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3545 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3544 if abs not in names:
3546 if abs not in names:
3545 names[abs] = m.exact(abs)
3547 names[abs] = m.exact(abs)
3546
3548
3547 # Find status of all file in `names`.
3549 # Find status of all file in `names`.
3548 m = scmutil.matchfiles(repo, names)
3550 m = scmutil.matchfiles(repo, names)
3549
3551
3550 changes = repo.status(
3552 changes = repo.status(
3551 node1=node, match=m, unknown=True, ignored=True, clean=True
3553 node1=node, match=m, unknown=True, ignored=True, clean=True
3552 )
3554 )
3553 else:
3555 else:
3554 changes = repo.status(node1=node, match=m)
3556 changes = repo.status(node1=node, match=m)
3555 for kind in changes:
3557 for kind in changes:
3556 for abs in kind:
3558 for abs in kind:
3557 names[abs] = m.exact(abs)
3559 names[abs] = m.exact(abs)
3558
3560
3559 m = scmutil.matchfiles(repo, names)
3561 m = scmutil.matchfiles(repo, names)
3560
3562
3561 modified = set(changes.modified)
3563 modified = set(changes.modified)
3562 added = set(changes.added)
3564 added = set(changes.added)
3563 removed = set(changes.removed)
3565 removed = set(changes.removed)
3564 _deleted = set(changes.deleted)
3566 _deleted = set(changes.deleted)
3565 unknown = set(changes.unknown)
3567 unknown = set(changes.unknown)
3566 unknown.update(changes.ignored)
3568 unknown.update(changes.ignored)
3567 clean = set(changes.clean)
3569 clean = set(changes.clean)
3568 modadded = set()
3570 modadded = set()
3569
3571
3570 # We need to account for the state of the file in the dirstate,
3572 # We need to account for the state of the file in the dirstate,
3571 # even when we revert against something else than parent. This will
3573 # even when we revert against something else than parent. This will
3572 # slightly alter the behavior of revert (doing back up or not, delete
3574 # slightly alter the behavior of revert (doing back up or not, delete
3573 # or just forget etc).
3575 # or just forget etc).
3574 if parent == node:
3576 if parent == node:
3575 dsmodified = modified
3577 dsmodified = modified
3576 dsadded = added
3578 dsadded = added
3577 dsremoved = removed
3579 dsremoved = removed
3578 # store all local modifications, useful later for rename detection
3580 # store all local modifications, useful later for rename detection
3579 localchanges = dsmodified | dsadded
3581 localchanges = dsmodified | dsadded
3580 modified, added, removed = set(), set(), set()
3582 modified, added, removed = set(), set(), set()
3581 else:
3583 else:
3582 changes = repo.status(node1=parent, match=m)
3584 changes = repo.status(node1=parent, match=m)
3583 dsmodified = set(changes.modified)
3585 dsmodified = set(changes.modified)
3584 dsadded = set(changes.added)
3586 dsadded = set(changes.added)
3585 dsremoved = set(changes.removed)
3587 dsremoved = set(changes.removed)
3586 # store all local modifications, useful later for rename detection
3588 # store all local modifications, useful later for rename detection
3587 localchanges = dsmodified | dsadded
3589 localchanges = dsmodified | dsadded
3588
3590
3589 # only take into account for removes between wc and target
3591 # only take into account for removes between wc and target
3590 clean |= dsremoved - removed
3592 clean |= dsremoved - removed
3591 dsremoved &= removed
3593 dsremoved &= removed
3592 # distinct between dirstate remove and other
3594 # distinct between dirstate remove and other
3593 removed -= dsremoved
3595 removed -= dsremoved
3594
3596
3595 modadded = added & dsmodified
3597 modadded = added & dsmodified
3596 added -= modadded
3598 added -= modadded
3597
3599
3598 # tell newly modified apart.
3600 # tell newly modified apart.
3599 dsmodified &= modified
3601 dsmodified &= modified
3600 dsmodified |= modified & dsadded # dirstate added may need backup
3602 dsmodified |= modified & dsadded # dirstate added may need backup
3601 modified -= dsmodified
3603 modified -= dsmodified
3602
3604
3603 # We need to wait for some post-processing to update this set
3605 # We need to wait for some post-processing to update this set
3604 # before making the distinction. The dirstate will be used for
3606 # before making the distinction. The dirstate will be used for
3605 # that purpose.
3607 # that purpose.
3606 dsadded = added
3608 dsadded = added
3607
3609
3608 # in case of merge, files that are actually added can be reported as
3610 # in case of merge, files that are actually added can be reported as
3609 # modified, we need to post process the result
3611 # modified, we need to post process the result
3610 if p2 != nullid:
3612 if p2 != nullid:
3611 mergeadd = set(dsmodified)
3613 mergeadd = set(dsmodified)
3612 for path in dsmodified:
3614 for path in dsmodified:
3613 if path in mf:
3615 if path in mf:
3614 mergeadd.remove(path)
3616 mergeadd.remove(path)
3615 dsadded |= mergeadd
3617 dsadded |= mergeadd
3616 dsmodified -= mergeadd
3618 dsmodified -= mergeadd
3617
3619
3618 # if f is a rename, update `names` to also revert the source
3620 # if f is a rename, update `names` to also revert the source
3619 for f in localchanges:
3621 for f in localchanges:
3620 src = repo.dirstate.copied(f)
3622 src = repo.dirstate.copied(f)
3621 # XXX should we check for rename down to target node?
3623 # XXX should we check for rename down to target node?
3622 if src and src not in names and repo.dirstate[src] == b'r':
3624 if src and src not in names and repo.dirstate[src] == b'r':
3623 dsremoved.add(src)
3625 dsremoved.add(src)
3624 names[src] = True
3626 names[src] = True
3625
3627
3626 # determine the exact nature of the deleted changesets
3628 # determine the exact nature of the deleted changesets
3627 deladded = set(_deleted)
3629 deladded = set(_deleted)
3628 for path in _deleted:
3630 for path in _deleted:
3629 if path in mf:
3631 if path in mf:
3630 deladded.remove(path)
3632 deladded.remove(path)
3631 deleted = _deleted - deladded
3633 deleted = _deleted - deladded
3632
3634
3633 # distinguish between file to forget and the other
3635 # distinguish between file to forget and the other
3634 added = set()
3636 added = set()
3635 for abs in dsadded:
3637 for abs in dsadded:
3636 if repo.dirstate[abs] != b'a':
3638 if repo.dirstate[abs] != b'a':
3637 added.add(abs)
3639 added.add(abs)
3638 dsadded -= added
3640 dsadded -= added
3639
3641
3640 for abs in deladded:
3642 for abs in deladded:
3641 if repo.dirstate[abs] == b'a':
3643 if repo.dirstate[abs] == b'a':
3642 dsadded.add(abs)
3644 dsadded.add(abs)
3643 deladded -= dsadded
3645 deladded -= dsadded
3644
3646
3645 # For files marked as removed, we check if an unknown file is present at
3647 # For files marked as removed, we check if an unknown file is present at
3646 # the same path. If a such file exists it may need to be backed up.
3648 # the same path. If a such file exists it may need to be backed up.
3647 # Making the distinction at this stage helps have simpler backup
3649 # Making the distinction at this stage helps have simpler backup
3648 # logic.
3650 # logic.
3649 removunk = set()
3651 removunk = set()
3650 for abs in removed:
3652 for abs in removed:
3651 target = repo.wjoin(abs)
3653 target = repo.wjoin(abs)
3652 if os.path.lexists(target):
3654 if os.path.lexists(target):
3653 removunk.add(abs)
3655 removunk.add(abs)
3654 removed -= removunk
3656 removed -= removunk
3655
3657
3656 dsremovunk = set()
3658 dsremovunk = set()
3657 for abs in dsremoved:
3659 for abs in dsremoved:
3658 target = repo.wjoin(abs)
3660 target = repo.wjoin(abs)
3659 if os.path.lexists(target):
3661 if os.path.lexists(target):
3660 dsremovunk.add(abs)
3662 dsremovunk.add(abs)
3661 dsremoved -= dsremovunk
3663 dsremoved -= dsremovunk
3662
3664
3663 # action to be actually performed by revert
3665 # action to be actually performed by revert
3664 # (<list of file>, message>) tuple
3666 # (<list of file>, message>) tuple
3665 actions = {
3667 actions = {
3666 b'revert': ([], _(b'reverting %s\n')),
3668 b'revert': ([], _(b'reverting %s\n')),
3667 b'add': ([], _(b'adding %s\n')),
3669 b'add': ([], _(b'adding %s\n')),
3668 b'remove': ([], _(b'removing %s\n')),
3670 b'remove': ([], _(b'removing %s\n')),
3669 b'drop': ([], _(b'removing %s\n')),
3671 b'drop': ([], _(b'removing %s\n')),
3670 b'forget': ([], _(b'forgetting %s\n')),
3672 b'forget': ([], _(b'forgetting %s\n')),
3671 b'undelete': ([], _(b'undeleting %s\n')),
3673 b'undelete': ([], _(b'undeleting %s\n')),
3672 b'noop': (None, _(b'no changes needed to %s\n')),
3674 b'noop': (None, _(b'no changes needed to %s\n')),
3673 b'unknown': (None, _(b'file not managed: %s\n')),
3675 b'unknown': (None, _(b'file not managed: %s\n')),
3674 }
3676 }
3675
3677
3676 # "constant" that convey the backup strategy.
3678 # "constant" that convey the backup strategy.
3677 # All set to `discard` if `no-backup` is set do avoid checking
3679 # All set to `discard` if `no-backup` is set do avoid checking
3678 # no_backup lower in the code.
3680 # no_backup lower in the code.
3679 # These values are ordered for comparison purposes
3681 # These values are ordered for comparison purposes
3680 backupinteractive = 3 # do backup if interactively modified
3682 backupinteractive = 3 # do backup if interactively modified
3681 backup = 2 # unconditionally do backup
3683 backup = 2 # unconditionally do backup
3682 check = 1 # check if the existing file differs from target
3684 check = 1 # check if the existing file differs from target
3683 discard = 0 # never do backup
3685 discard = 0 # never do backup
3684 if opts.get(b'no_backup'):
3686 if opts.get(b'no_backup'):
3685 backupinteractive = backup = check = discard
3687 backupinteractive = backup = check = discard
3686 if interactive:
3688 if interactive:
3687 dsmodifiedbackup = backupinteractive
3689 dsmodifiedbackup = backupinteractive
3688 else:
3690 else:
3689 dsmodifiedbackup = backup
3691 dsmodifiedbackup = backup
3690 tobackup = set()
3692 tobackup = set()
3691
3693
3692 backupanddel = actions[b'remove']
3694 backupanddel = actions[b'remove']
3693 if not opts.get(b'no_backup'):
3695 if not opts.get(b'no_backup'):
3694 backupanddel = actions[b'drop']
3696 backupanddel = actions[b'drop']
3695
3697
3696 disptable = (
3698 disptable = (
3697 # dispatch table:
3699 # dispatch table:
3698 # file state
3700 # file state
3699 # action
3701 # action
3700 # make backup
3702 # make backup
3701 ## Sets that results that will change file on disk
3703 ## Sets that results that will change file on disk
3702 # Modified compared to target, no local change
3704 # Modified compared to target, no local change
3703 (modified, actions[b'revert'], discard),
3705 (modified, actions[b'revert'], discard),
3704 # Modified compared to target, but local file is deleted
3706 # Modified compared to target, but local file is deleted
3705 (deleted, actions[b'revert'], discard),
3707 (deleted, actions[b'revert'], discard),
3706 # Modified compared to target, local change
3708 # Modified compared to target, local change
3707 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3709 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3708 # Added since target
3710 # Added since target
3709 (added, actions[b'remove'], discard),
3711 (added, actions[b'remove'], discard),
3710 # Added in working directory
3712 # Added in working directory
3711 (dsadded, actions[b'forget'], discard),
3713 (dsadded, actions[b'forget'], discard),
3712 # Added since target, have local modification
3714 # Added since target, have local modification
3713 (modadded, backupanddel, backup),
3715 (modadded, backupanddel, backup),
3714 # Added since target but file is missing in working directory
3716 # Added since target but file is missing in working directory
3715 (deladded, actions[b'drop'], discard),
3717 (deladded, actions[b'drop'], discard),
3716 # Removed since target, before working copy parent
3718 # Removed since target, before working copy parent
3717 (removed, actions[b'add'], discard),
3719 (removed, actions[b'add'], discard),
3718 # Same as `removed` but an unknown file exists at the same path
3720 # Same as `removed` but an unknown file exists at the same path
3719 (removunk, actions[b'add'], check),
3721 (removunk, actions[b'add'], check),
3720 # Removed since targe, marked as such in working copy parent
3722 # Removed since targe, marked as such in working copy parent
3721 (dsremoved, actions[b'undelete'], discard),
3723 (dsremoved, actions[b'undelete'], discard),
3722 # Same as `dsremoved` but an unknown file exists at the same path
3724 # Same as `dsremoved` but an unknown file exists at the same path
3723 (dsremovunk, actions[b'undelete'], check),
3725 (dsremovunk, actions[b'undelete'], check),
3724 ## the following sets does not result in any file changes
3726 ## the following sets does not result in any file changes
3725 # File with no modification
3727 # File with no modification
3726 (clean, actions[b'noop'], discard),
3728 (clean, actions[b'noop'], discard),
3727 # Existing file, not tracked anywhere
3729 # Existing file, not tracked anywhere
3728 (unknown, actions[b'unknown'], discard),
3730 (unknown, actions[b'unknown'], discard),
3729 )
3731 )
3730
3732
3731 for abs, exact in sorted(names.items()):
3733 for abs, exact in sorted(names.items()):
3732 # target file to be touch on disk (relative to cwd)
3734 # target file to be touch on disk (relative to cwd)
3733 target = repo.wjoin(abs)
3735 target = repo.wjoin(abs)
3734 # search the entry in the dispatch table.
3736 # search the entry in the dispatch table.
3735 # if the file is in any of these sets, it was touched in the working
3737 # if the file is in any of these sets, it was touched in the working
3736 # directory parent and we are sure it needs to be reverted.
3738 # directory parent and we are sure it needs to be reverted.
3737 for table, (xlist, msg), dobackup in disptable:
3739 for table, (xlist, msg), dobackup in disptable:
3738 if abs not in table:
3740 if abs not in table:
3739 continue
3741 continue
3740 if xlist is not None:
3742 if xlist is not None:
3741 xlist.append(abs)
3743 xlist.append(abs)
3742 if dobackup:
3744 if dobackup:
3743 # If in interactive mode, don't automatically create
3745 # If in interactive mode, don't automatically create
3744 # .orig files (issue4793)
3746 # .orig files (issue4793)
3745 if dobackup == backupinteractive:
3747 if dobackup == backupinteractive:
3746 tobackup.add(abs)
3748 tobackup.add(abs)
3747 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3749 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3748 absbakname = scmutil.backuppath(ui, repo, abs)
3750 absbakname = scmutil.backuppath(ui, repo, abs)
3749 bakname = os.path.relpath(
3751 bakname = os.path.relpath(
3750 absbakname, start=repo.root
3752 absbakname, start=repo.root
3751 )
3753 )
3752 ui.note(
3754 ui.note(
3753 _(b'saving current version of %s as %s\n')
3755 _(b'saving current version of %s as %s\n')
3754 % (uipathfn(abs), uipathfn(bakname))
3756 % (uipathfn(abs), uipathfn(bakname))
3755 )
3757 )
3756 if not opts.get(b'dry_run'):
3758 if not opts.get(b'dry_run'):
3757 if interactive:
3759 if interactive:
3758 util.copyfile(target, absbakname)
3760 util.copyfile(target, absbakname)
3759 else:
3761 else:
3760 util.rename(target, absbakname)
3762 util.rename(target, absbakname)
3761 if opts.get(b'dry_run'):
3763 if opts.get(b'dry_run'):
3762 if ui.verbose or not exact:
3764 if ui.verbose or not exact:
3763 ui.status(msg % uipathfn(abs))
3765 ui.status(msg % uipathfn(abs))
3764 elif exact:
3766 elif exact:
3765 ui.warn(msg % uipathfn(abs))
3767 ui.warn(msg % uipathfn(abs))
3766 break
3768 break
3767
3769
3768 if not opts.get(b'dry_run'):
3770 if not opts.get(b'dry_run'):
3769 needdata = (b'revert', b'add', b'undelete')
3771 needdata = (b'revert', b'add', b'undelete')
3770 oplist = [actions[name][0] for name in needdata]
3772 oplist = [actions[name][0] for name in needdata]
3771 prefetch = scmutil.prefetchfiles
3773 prefetch = scmutil.prefetchfiles
3772 matchfiles = scmutil.matchfiles
3774 matchfiles = scmutil.matchfiles(
3775 repo, [f for sublist in oplist for f in sublist])
3773 prefetch(
3776 prefetch(
3774 repo,
3777 repo,
3775 [ctx.rev()],
3778 [(ctx.rev(), matchfiles)],
3776 matchfiles(repo, [f for sublist in oplist for f in sublist]),
3777 )
3779 )
3778 match = scmutil.match(repo[None], pats)
3780 match = scmutil.match(repo[None], pats)
3779 _performrevert(
3781 _performrevert(
3780 repo,
3782 repo,
3781 parents,
3783 parents,
3782 ctx,
3784 ctx,
3783 names,
3785 names,
3784 uipathfn,
3786 uipathfn,
3785 actions,
3787 actions,
3786 match,
3788 match,
3787 interactive,
3789 interactive,
3788 tobackup,
3790 tobackup,
3789 )
3791 )
3790
3792
3791 if targetsubs:
3793 if targetsubs:
3792 # Revert the subrepos on the revert list
3794 # Revert the subrepos on the revert list
3793 for sub in targetsubs:
3795 for sub in targetsubs:
3794 try:
3796 try:
3795 wctx.sub(sub).revert(
3797 wctx.sub(sub).revert(
3796 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3798 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3797 )
3799 )
3798 except KeyError:
3800 except KeyError:
3799 raise error.Abort(
3801 raise error.Abort(
3800 b"subrepository '%s' does not exist in %s!"
3802 b"subrepository '%s' does not exist in %s!"
3801 % (sub, short(ctx.node()))
3803 % (sub, short(ctx.node()))
3802 )
3804 )
3803
3805
3804
3806
3805 def _performrevert(
3807 def _performrevert(
3806 repo,
3808 repo,
3807 parents,
3809 parents,
3808 ctx,
3810 ctx,
3809 names,
3811 names,
3810 uipathfn,
3812 uipathfn,
3811 actions,
3813 actions,
3812 match,
3814 match,
3813 interactive=False,
3815 interactive=False,
3814 tobackup=None,
3816 tobackup=None,
3815 ):
3817 ):
3816 """function that actually perform all the actions computed for revert
3818 """function that actually perform all the actions computed for revert
3817
3819
3818 This is an independent function to let extension to plug in and react to
3820 This is an independent function to let extension to plug in and react to
3819 the imminent revert.
3821 the imminent revert.
3820
3822
3821 Make sure you have the working directory locked when calling this function.
3823 Make sure you have the working directory locked when calling this function.
3822 """
3824 """
3823 parent, p2 = parents
3825 parent, p2 = parents
3824 node = ctx.node()
3826 node = ctx.node()
3825 excluded_files = []
3827 excluded_files = []
3826
3828
3827 def checkout(f):
3829 def checkout(f):
3828 fc = ctx[f]
3830 fc = ctx[f]
3829 repo.wwrite(f, fc.data(), fc.flags())
3831 repo.wwrite(f, fc.data(), fc.flags())
3830
3832
3831 def doremove(f):
3833 def doremove(f):
3832 try:
3834 try:
3833 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3835 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3834 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3836 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3835 except OSError:
3837 except OSError:
3836 pass
3838 pass
3837 repo.dirstate.remove(f)
3839 repo.dirstate.remove(f)
3838
3840
3839 def prntstatusmsg(action, f):
3841 def prntstatusmsg(action, f):
3840 exact = names[f]
3842 exact = names[f]
3841 if repo.ui.verbose or not exact:
3843 if repo.ui.verbose or not exact:
3842 repo.ui.status(actions[action][1] % uipathfn(f))
3844 repo.ui.status(actions[action][1] % uipathfn(f))
3843
3845
3844 audit_path = pathutil.pathauditor(repo.root, cached=True)
3846 audit_path = pathutil.pathauditor(repo.root, cached=True)
3845 for f in actions[b'forget'][0]:
3847 for f in actions[b'forget'][0]:
3846 if interactive:
3848 if interactive:
3847 choice = repo.ui.promptchoice(
3849 choice = repo.ui.promptchoice(
3848 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3850 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3849 )
3851 )
3850 if choice == 0:
3852 if choice == 0:
3851 prntstatusmsg(b'forget', f)
3853 prntstatusmsg(b'forget', f)
3852 repo.dirstate.drop(f)
3854 repo.dirstate.drop(f)
3853 else:
3855 else:
3854 excluded_files.append(f)
3856 excluded_files.append(f)
3855 else:
3857 else:
3856 prntstatusmsg(b'forget', f)
3858 prntstatusmsg(b'forget', f)
3857 repo.dirstate.drop(f)
3859 repo.dirstate.drop(f)
3858 for f in actions[b'remove'][0]:
3860 for f in actions[b'remove'][0]:
3859 audit_path(f)
3861 audit_path(f)
3860 if interactive:
3862 if interactive:
3861 choice = repo.ui.promptchoice(
3863 choice = repo.ui.promptchoice(
3862 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3864 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3863 )
3865 )
3864 if choice == 0:
3866 if choice == 0:
3865 prntstatusmsg(b'remove', f)
3867 prntstatusmsg(b'remove', f)
3866 doremove(f)
3868 doremove(f)
3867 else:
3869 else:
3868 excluded_files.append(f)
3870 excluded_files.append(f)
3869 else:
3871 else:
3870 prntstatusmsg(b'remove', f)
3872 prntstatusmsg(b'remove', f)
3871 doremove(f)
3873 doremove(f)
3872 for f in actions[b'drop'][0]:
3874 for f in actions[b'drop'][0]:
3873 audit_path(f)
3875 audit_path(f)
3874 prntstatusmsg(b'drop', f)
3876 prntstatusmsg(b'drop', f)
3875 repo.dirstate.remove(f)
3877 repo.dirstate.remove(f)
3876
3878
3877 normal = None
3879 normal = None
3878 if node == parent:
3880 if node == parent:
3879 # We're reverting to our parent. If possible, we'd like status
3881 # We're reverting to our parent. If possible, we'd like status
3880 # to report the file as clean. We have to use normallookup for
3882 # to report the file as clean. We have to use normallookup for
3881 # merges to avoid losing information about merged/dirty files.
3883 # merges to avoid losing information about merged/dirty files.
3882 if p2 != nullid:
3884 if p2 != nullid:
3883 normal = repo.dirstate.normallookup
3885 normal = repo.dirstate.normallookup
3884 else:
3886 else:
3885 normal = repo.dirstate.normal
3887 normal = repo.dirstate.normal
3886
3888
3887 newlyaddedandmodifiedfiles = set()
3889 newlyaddedandmodifiedfiles = set()
3888 if interactive:
3890 if interactive:
3889 # Prompt the user for changes to revert
3891 # Prompt the user for changes to revert
3890 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3892 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3891 m = scmutil.matchfiles(repo, torevert)
3893 m = scmutil.matchfiles(repo, torevert)
3892 diffopts = patch.difffeatureopts(
3894 diffopts = patch.difffeatureopts(
3893 repo.ui,
3895 repo.ui,
3894 whitespace=True,
3896 whitespace=True,
3895 section=b'commands',
3897 section=b'commands',
3896 configprefix=b'revert.interactive.',
3898 configprefix=b'revert.interactive.',
3897 )
3899 )
3898 diffopts.nodates = True
3900 diffopts.nodates = True
3899 diffopts.git = True
3901 diffopts.git = True
3900 operation = b'apply'
3902 operation = b'apply'
3901 if node == parent:
3903 if node == parent:
3902 if repo.ui.configbool(
3904 if repo.ui.configbool(
3903 b'experimental', b'revert.interactive.select-to-keep'
3905 b'experimental', b'revert.interactive.select-to-keep'
3904 ):
3906 ):
3905 operation = b'keep'
3907 operation = b'keep'
3906 else:
3908 else:
3907 operation = b'discard'
3909 operation = b'discard'
3908
3910
3909 if operation == b'apply':
3911 if operation == b'apply':
3910 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3912 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3911 else:
3913 else:
3912 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3914 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3913 originalchunks = patch.parsepatch(diff)
3915 originalchunks = patch.parsepatch(diff)
3914
3916
3915 try:
3917 try:
3916
3918
3917 chunks, opts = recordfilter(
3919 chunks, opts = recordfilter(
3918 repo.ui, originalchunks, match, operation=operation
3920 repo.ui, originalchunks, match, operation=operation
3919 )
3921 )
3920 if operation == b'discard':
3922 if operation == b'discard':
3921 chunks = patch.reversehunks(chunks)
3923 chunks = patch.reversehunks(chunks)
3922
3924
3923 except error.PatchError as err:
3925 except error.PatchError as err:
3924 raise error.Abort(_(b'error parsing patch: %s') % err)
3926 raise error.Abort(_(b'error parsing patch: %s') % err)
3925
3927
3926 # FIXME: when doing an interactive revert of a copy, there's no way of
3928 # FIXME: when doing an interactive revert of a copy, there's no way of
3927 # performing a partial revert of the added file, the only option is
3929 # performing a partial revert of the added file, the only option is
3928 # "remove added file <name> (Yn)?", so we don't need to worry about the
3930 # "remove added file <name> (Yn)?", so we don't need to worry about the
3929 # alsorestore value. Ideally we'd be able to partially revert
3931 # alsorestore value. Ideally we'd be able to partially revert
3930 # copied/renamed files.
3932 # copied/renamed files.
3931 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3933 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3932 chunks, originalchunks
3934 chunks, originalchunks
3933 )
3935 )
3934 if tobackup is None:
3936 if tobackup is None:
3935 tobackup = set()
3937 tobackup = set()
3936 # Apply changes
3938 # Apply changes
3937 fp = stringio()
3939 fp = stringio()
3938 # chunks are serialized per file, but files aren't sorted
3940 # chunks are serialized per file, but files aren't sorted
3939 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3941 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3940 prntstatusmsg(b'revert', f)
3942 prntstatusmsg(b'revert', f)
3941 files = set()
3943 files = set()
3942 for c in chunks:
3944 for c in chunks:
3943 if ishunk(c):
3945 if ishunk(c):
3944 abs = c.header.filename()
3946 abs = c.header.filename()
3945 # Create a backup file only if this hunk should be backed up
3947 # Create a backup file only if this hunk should be backed up
3946 if c.header.filename() in tobackup:
3948 if c.header.filename() in tobackup:
3947 target = repo.wjoin(abs)
3949 target = repo.wjoin(abs)
3948 bakname = scmutil.backuppath(repo.ui, repo, abs)
3950 bakname = scmutil.backuppath(repo.ui, repo, abs)
3949 util.copyfile(target, bakname)
3951 util.copyfile(target, bakname)
3950 tobackup.remove(abs)
3952 tobackup.remove(abs)
3951 if abs not in files:
3953 if abs not in files:
3952 files.add(abs)
3954 files.add(abs)
3953 if operation == b'keep':
3955 if operation == b'keep':
3954 checkout(abs)
3956 checkout(abs)
3955 c.write(fp)
3957 c.write(fp)
3956 dopatch = fp.tell()
3958 dopatch = fp.tell()
3957 fp.seek(0)
3959 fp.seek(0)
3958 if dopatch:
3960 if dopatch:
3959 try:
3961 try:
3960 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3962 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3961 except error.PatchError as err:
3963 except error.PatchError as err:
3962 raise error.Abort(pycompat.bytestr(err))
3964 raise error.Abort(pycompat.bytestr(err))
3963 del fp
3965 del fp
3964 else:
3966 else:
3965 for f in actions[b'revert'][0]:
3967 for f in actions[b'revert'][0]:
3966 prntstatusmsg(b'revert', f)
3968 prntstatusmsg(b'revert', f)
3967 checkout(f)
3969 checkout(f)
3968 if normal:
3970 if normal:
3969 normal(f)
3971 normal(f)
3970
3972
3971 for f in actions[b'add'][0]:
3973 for f in actions[b'add'][0]:
3972 # Don't checkout modified files, they are already created by the diff
3974 # Don't checkout modified files, they are already created by the diff
3973 if f not in newlyaddedandmodifiedfiles:
3975 if f not in newlyaddedandmodifiedfiles:
3974 prntstatusmsg(b'add', f)
3976 prntstatusmsg(b'add', f)
3975 checkout(f)
3977 checkout(f)
3976 repo.dirstate.add(f)
3978 repo.dirstate.add(f)
3977
3979
3978 normal = repo.dirstate.normallookup
3980 normal = repo.dirstate.normallookup
3979 if node == parent and p2 == nullid:
3981 if node == parent and p2 == nullid:
3980 normal = repo.dirstate.normal
3982 normal = repo.dirstate.normal
3981 for f in actions[b'undelete'][0]:
3983 for f in actions[b'undelete'][0]:
3982 if interactive:
3984 if interactive:
3983 choice = repo.ui.promptchoice(
3985 choice = repo.ui.promptchoice(
3984 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3986 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3985 )
3987 )
3986 if choice == 0:
3988 if choice == 0:
3987 prntstatusmsg(b'undelete', f)
3989 prntstatusmsg(b'undelete', f)
3988 checkout(f)
3990 checkout(f)
3989 normal(f)
3991 normal(f)
3990 else:
3992 else:
3991 excluded_files.append(f)
3993 excluded_files.append(f)
3992 else:
3994 else:
3993 prntstatusmsg(b'undelete', f)
3995 prntstatusmsg(b'undelete', f)
3994 checkout(f)
3996 checkout(f)
3995 normal(f)
3997 normal(f)
3996
3998
3997 copied = copies.pathcopies(repo[parent], ctx)
3999 copied = copies.pathcopies(repo[parent], ctx)
3998
4000
3999 for f in (
4001 for f in (
4000 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
4002 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
4001 ):
4003 ):
4002 if f in copied:
4004 if f in copied:
4003 repo.dirstate.copy(copied[f], f)
4005 repo.dirstate.copy(copied[f], f)
4004
4006
4005
4007
4006 # a list of (ui, repo, otherpeer, opts, missing) functions called by
4008 # a list of (ui, repo, otherpeer, opts, missing) functions called by
4007 # commands.outgoing. "missing" is "missing" of the result of
4009 # commands.outgoing. "missing" is "missing" of the result of
4008 # "findcommonoutgoing()"
4010 # "findcommonoutgoing()"
4009 outgoinghooks = util.hooks()
4011 outgoinghooks = util.hooks()
4010
4012
4011 # a list of (ui, repo) functions called by commands.summary
4013 # a list of (ui, repo) functions called by commands.summary
4012 summaryhooks = util.hooks()
4014 summaryhooks = util.hooks()
4013
4015
4014 # a list of (ui, repo, opts, changes) functions called by commands.summary.
4016 # a list of (ui, repo, opts, changes) functions called by commands.summary.
4015 #
4017 #
4016 # functions should return tuple of booleans below, if 'changes' is None:
4018 # functions should return tuple of booleans below, if 'changes' is None:
4017 # (whether-incomings-are-needed, whether-outgoings-are-needed)
4019 # (whether-incomings-are-needed, whether-outgoings-are-needed)
4018 #
4020 #
4019 # otherwise, 'changes' is a tuple of tuples below:
4021 # otherwise, 'changes' is a tuple of tuples below:
4020 # - (sourceurl, sourcebranch, sourcepeer, incoming)
4022 # - (sourceurl, sourcebranch, sourcepeer, incoming)
4021 # - (desturl, destbranch, destpeer, outgoing)
4023 # - (desturl, destbranch, destpeer, outgoing)
4022 summaryremotehooks = util.hooks()
4024 summaryremotehooks = util.hooks()
4023
4025
4024
4026
4025 def checkunfinished(repo, commit=False, skipmerge=False):
4027 def checkunfinished(repo, commit=False, skipmerge=False):
4026 '''Look for an unfinished multistep operation, like graft, and abort
4028 '''Look for an unfinished multistep operation, like graft, and abort
4027 if found. It's probably good to check this right before
4029 if found. It's probably good to check this right before
4028 bailifchanged().
4030 bailifchanged().
4029 '''
4031 '''
4030 # Check for non-clearable states first, so things like rebase will take
4032 # Check for non-clearable states first, so things like rebase will take
4031 # precedence over update.
4033 # precedence over update.
4032 for state in statemod._unfinishedstates:
4034 for state in statemod._unfinishedstates:
4033 if (
4035 if (
4034 state._clearable
4036 state._clearable
4035 or (commit and state._allowcommit)
4037 or (commit and state._allowcommit)
4036 or state._reportonly
4038 or state._reportonly
4037 ):
4039 ):
4038 continue
4040 continue
4039 if state.isunfinished(repo):
4041 if state.isunfinished(repo):
4040 raise error.Abort(state.msg(), hint=state.hint())
4042 raise error.Abort(state.msg(), hint=state.hint())
4041
4043
4042 for s in statemod._unfinishedstates:
4044 for s in statemod._unfinishedstates:
4043 if (
4045 if (
4044 not s._clearable
4046 not s._clearable
4045 or (commit and s._allowcommit)
4047 or (commit and s._allowcommit)
4046 or (s._opname == b'merge' and skipmerge)
4048 or (s._opname == b'merge' and skipmerge)
4047 or s._reportonly
4049 or s._reportonly
4048 ):
4050 ):
4049 continue
4051 continue
4050 if s.isunfinished(repo):
4052 if s.isunfinished(repo):
4051 raise error.Abort(s.msg(), hint=s.hint())
4053 raise error.Abort(s.msg(), hint=s.hint())
4052
4054
4053
4055
4054 def clearunfinished(repo):
4056 def clearunfinished(repo):
4055 '''Check for unfinished operations (as above), and clear the ones
4057 '''Check for unfinished operations (as above), and clear the ones
4056 that are clearable.
4058 that are clearable.
4057 '''
4059 '''
4058 for state in statemod._unfinishedstates:
4060 for state in statemod._unfinishedstates:
4059 if state._reportonly:
4061 if state._reportonly:
4060 continue
4062 continue
4061 if not state._clearable and state.isunfinished(repo):
4063 if not state._clearable and state.isunfinished(repo):
4062 raise error.Abort(state.msg(), hint=state.hint())
4064 raise error.Abort(state.msg(), hint=state.hint())
4063
4065
4064 for s in statemod._unfinishedstates:
4066 for s in statemod._unfinishedstates:
4065 if s._opname == b'merge' or state._reportonly:
4067 if s._opname == b'merge' or state._reportonly:
4066 continue
4068 continue
4067 if s._clearable and s.isunfinished(repo):
4069 if s._clearable and s.isunfinished(repo):
4068 util.unlink(repo.vfs.join(s._fname))
4070 util.unlink(repo.vfs.join(s._fname))
4069
4071
4070
4072
4071 def getunfinishedstate(repo):
4073 def getunfinishedstate(repo):
4072 ''' Checks for unfinished operations and returns statecheck object
4074 ''' Checks for unfinished operations and returns statecheck object
4073 for it'''
4075 for it'''
4074 for state in statemod._unfinishedstates:
4076 for state in statemod._unfinishedstates:
4075 if state.isunfinished(repo):
4077 if state.isunfinished(repo):
4076 return state
4078 return state
4077 return None
4079 return None
4078
4080
4079
4081
4080 def howtocontinue(repo):
4082 def howtocontinue(repo):
4081 '''Check for an unfinished operation and return the command to finish
4083 '''Check for an unfinished operation and return the command to finish
4082 it.
4084 it.
4083
4085
4084 statemod._unfinishedstates list is checked for an unfinished operation
4086 statemod._unfinishedstates list is checked for an unfinished operation
4085 and the corresponding message to finish it is generated if a method to
4087 and the corresponding message to finish it is generated if a method to
4086 continue is supported by the operation.
4088 continue is supported by the operation.
4087
4089
4088 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
4090 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
4089 a boolean.
4091 a boolean.
4090 '''
4092 '''
4091 contmsg = _(b"continue: %s")
4093 contmsg = _(b"continue: %s")
4092 for state in statemod._unfinishedstates:
4094 for state in statemod._unfinishedstates:
4093 if not state._continueflag:
4095 if not state._continueflag:
4094 continue
4096 continue
4095 if state.isunfinished(repo):
4097 if state.isunfinished(repo):
4096 return contmsg % state.continuemsg(), True
4098 return contmsg % state.continuemsg(), True
4097 if repo[None].dirty(missing=True, merge=False, branch=False):
4099 if repo[None].dirty(missing=True, merge=False, branch=False):
4098 return contmsg % _(b"hg commit"), False
4100 return contmsg % _(b"hg commit"), False
4099 return None, None
4101 return None, None
4100
4102
4101
4103
4102 def checkafterresolved(repo):
4104 def checkafterresolved(repo):
4103 '''Inform the user about the next action after completing hg resolve
4105 '''Inform the user about the next action after completing hg resolve
4104
4106
4105 If there's a an unfinished operation that supports continue flag,
4107 If there's a an unfinished operation that supports continue flag,
4106 howtocontinue will yield repo.ui.warn as the reporter.
4108 howtocontinue will yield repo.ui.warn as the reporter.
4107
4109
4108 Otherwise, it will yield repo.ui.note.
4110 Otherwise, it will yield repo.ui.note.
4109 '''
4111 '''
4110 msg, warning = howtocontinue(repo)
4112 msg, warning = howtocontinue(repo)
4111 if msg is not None:
4113 if msg is not None:
4112 if warning:
4114 if warning:
4113 repo.ui.warn(b"%s\n" % msg)
4115 repo.ui.warn(b"%s\n" % msg)
4114 else:
4116 else:
4115 repo.ui.note(b"%s\n" % msg)
4117 repo.ui.note(b"%s\n" % msg)
4116
4118
4117
4119
4118 def wrongtooltocontinue(repo, task):
4120 def wrongtooltocontinue(repo, task):
4119 '''Raise an abort suggesting how to properly continue if there is an
4121 '''Raise an abort suggesting how to properly continue if there is an
4120 active task.
4122 active task.
4121
4123
4122 Uses howtocontinue() to find the active task.
4124 Uses howtocontinue() to find the active task.
4123
4125
4124 If there's no task (repo.ui.note for 'hg commit'), it does not offer
4126 If there's no task (repo.ui.note for 'hg commit'), it does not offer
4125 a hint.
4127 a hint.
4126 '''
4128 '''
4127 after = howtocontinue(repo)
4129 after = howtocontinue(repo)
4128 hint = None
4130 hint = None
4129 if after[1]:
4131 if after[1]:
4130 hint = after[0]
4132 hint = after[0]
4131 raise error.Abort(_(b'no %s in progress') % task, hint=hint)
4133 raise error.Abort(_(b'no %s in progress') % task, hint=hint)
4132
4134
4133
4135
4134 def abortgraft(ui, repo, graftstate):
4136 def abortgraft(ui, repo, graftstate):
4135 """abort the interrupted graft and rollbacks to the state before interrupted
4137 """abort the interrupted graft and rollbacks to the state before interrupted
4136 graft"""
4138 graft"""
4137 if not graftstate.exists():
4139 if not graftstate.exists():
4138 raise error.Abort(_(b"no interrupted graft to abort"))
4140 raise error.Abort(_(b"no interrupted graft to abort"))
4139 statedata = readgraftstate(repo, graftstate)
4141 statedata = readgraftstate(repo, graftstate)
4140 newnodes = statedata.get(b'newnodes')
4142 newnodes = statedata.get(b'newnodes')
4141 if newnodes is None:
4143 if newnodes is None:
4142 # and old graft state which does not have all the data required to abort
4144 # and old graft state which does not have all the data required to abort
4143 # the graft
4145 # the graft
4144 raise error.Abort(_(b"cannot abort using an old graftstate"))
4146 raise error.Abort(_(b"cannot abort using an old graftstate"))
4145
4147
4146 # changeset from which graft operation was started
4148 # changeset from which graft operation was started
4147 if len(newnodes) > 0:
4149 if len(newnodes) > 0:
4148 startctx = repo[newnodes[0]].p1()
4150 startctx = repo[newnodes[0]].p1()
4149 else:
4151 else:
4150 startctx = repo[b'.']
4152 startctx = repo[b'.']
4151 # whether to strip or not
4153 # whether to strip or not
4152 cleanup = False
4154 cleanup = False
4153 from . import hg
4155 from . import hg
4154
4156
4155 if newnodes:
4157 if newnodes:
4156 newnodes = [repo[r].rev() for r in newnodes]
4158 newnodes = [repo[r].rev() for r in newnodes]
4157 cleanup = True
4159 cleanup = True
4158 # checking that none of the newnodes turned public or is public
4160 # checking that none of the newnodes turned public or is public
4159 immutable = [c for c in newnodes if not repo[c].mutable()]
4161 immutable = [c for c in newnodes if not repo[c].mutable()]
4160 if immutable:
4162 if immutable:
4161 repo.ui.warn(
4163 repo.ui.warn(
4162 _(b"cannot clean up public changesets %s\n")
4164 _(b"cannot clean up public changesets %s\n")
4163 % b', '.join(bytes(repo[r]) for r in immutable),
4165 % b', '.join(bytes(repo[r]) for r in immutable),
4164 hint=_(b"see 'hg help phases' for details"),
4166 hint=_(b"see 'hg help phases' for details"),
4165 )
4167 )
4166 cleanup = False
4168 cleanup = False
4167
4169
4168 # checking that no new nodes are created on top of grafted revs
4170 # checking that no new nodes are created on top of grafted revs
4169 desc = set(repo.changelog.descendants(newnodes))
4171 desc = set(repo.changelog.descendants(newnodes))
4170 if desc - set(newnodes):
4172 if desc - set(newnodes):
4171 repo.ui.warn(
4173 repo.ui.warn(
4172 _(
4174 _(
4173 b"new changesets detected on destination "
4175 b"new changesets detected on destination "
4174 b"branch, can't strip\n"
4176 b"branch, can't strip\n"
4175 )
4177 )
4176 )
4178 )
4177 cleanup = False
4179 cleanup = False
4178
4180
4179 if cleanup:
4181 if cleanup:
4180 with repo.wlock(), repo.lock():
4182 with repo.wlock(), repo.lock():
4181 hg.updaterepo(repo, startctx.node(), overwrite=True)
4183 hg.updaterepo(repo, startctx.node(), overwrite=True)
4182 # stripping the new nodes created
4184 # stripping the new nodes created
4183 strippoints = [
4185 strippoints = [
4184 c.node() for c in repo.set(b"roots(%ld)", newnodes)
4186 c.node() for c in repo.set(b"roots(%ld)", newnodes)
4185 ]
4187 ]
4186 repair.strip(repo.ui, repo, strippoints, backup=False)
4188 repair.strip(repo.ui, repo, strippoints, backup=False)
4187
4189
4188 if not cleanup:
4190 if not cleanup:
4189 # we don't update to the startnode if we can't strip
4191 # we don't update to the startnode if we can't strip
4190 startctx = repo[b'.']
4192 startctx = repo[b'.']
4191 hg.updaterepo(repo, startctx.node(), overwrite=True)
4193 hg.updaterepo(repo, startctx.node(), overwrite=True)
4192
4194
4193 ui.status(_(b"graft aborted\n"))
4195 ui.status(_(b"graft aborted\n"))
4194 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
4196 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
4195 graftstate.delete()
4197 graftstate.delete()
4196 return 0
4198 return 0
4197
4199
4198
4200
4199 def readgraftstate(repo, graftstate):
4201 def readgraftstate(repo, graftstate):
4200 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
4202 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
4201 """read the graft state file and return a dict of the data stored in it"""
4203 """read the graft state file and return a dict of the data stored in it"""
4202 try:
4204 try:
4203 return graftstate.read()
4205 return graftstate.read()
4204 except error.CorruptedState:
4206 except error.CorruptedState:
4205 nodes = repo.vfs.read(b'graftstate').splitlines()
4207 nodes = repo.vfs.read(b'graftstate').splitlines()
4206 return {b'nodes': nodes}
4208 return {b'nodes': nodes}
4207
4209
4208
4210
4209 def hgabortgraft(ui, repo):
4211 def hgabortgraft(ui, repo):
4210 """ abort logic for aborting graft using 'hg abort'"""
4212 """ abort logic for aborting graft using 'hg abort'"""
4211 with repo.wlock():
4213 with repo.wlock():
4212 graftstate = statemod.cmdstate(repo, b'graftstate')
4214 graftstate = statemod.cmdstate(repo, b'graftstate')
4213 return abortgraft(ui, repo, graftstate)
4215 return abortgraft(ui, repo, graftstate)
@@ -1,3085 +1,3085 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 dagop,
31 dagop,
32 encoding,
32 encoding,
33 error,
33 error,
34 fileset,
34 fileset,
35 match as matchmod,
35 match as matchmod,
36 mergestate as mergestatemod,
36 mergestate as mergestatemod,
37 metadata,
37 metadata,
38 obsolete as obsmod,
38 obsolete as obsmod,
39 patch,
39 patch,
40 pathutil,
40 pathutil,
41 phases,
41 phases,
42 pycompat,
42 pycompat,
43 repoview,
43 repoview,
44 scmutil,
44 scmutil,
45 sparse,
45 sparse,
46 subrepo,
46 subrepo,
47 subrepoutil,
47 subrepoutil,
48 util,
48 util,
49 )
49 )
50 from .utils import (
50 from .utils import (
51 dateutil,
51 dateutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 propertycache = util.propertycache
55 propertycache = util.propertycache
56
56
57
57
58 class basectx(object):
58 class basectx(object):
59 """A basectx object represents the common logic for its children:
59 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
60 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
61 workingctx: a context that represents the working directory and can
62 be committed,
62 be committed,
63 memctx: a context that represents changes in-memory and can also
63 memctx: a context that represents changes in-memory and can also
64 be committed."""
64 be committed."""
65
65
66 def __init__(self, repo):
66 def __init__(self, repo):
67 self._repo = repo
67 self._repo = repo
68
68
69 def __bytes__(self):
69 def __bytes__(self):
70 return short(self.node())
70 return short(self.node())
71
71
72 __str__ = encoding.strmethod(__bytes__)
72 __str__ = encoding.strmethod(__bytes__)
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _buildstatusmanifest(self, status):
95 def _buildstatusmanifest(self, status):
96 """Builds a manifest that includes the given status results, if this is
96 """Builds a manifest that includes the given status results, if this is
97 a working copy context. For non-working copy contexts, it just returns
97 a working copy context. For non-working copy contexts, it just returns
98 the normal manifest."""
98 the normal manifest."""
99 return self.manifest()
99 return self.manifest()
100
100
101 def _matchstatus(self, other, match):
101 def _matchstatus(self, other, match):
102 """This internal method provides a way for child objects to override the
102 """This internal method provides a way for child objects to override the
103 match operator.
103 match operator.
104 """
104 """
105 return match
105 return match
106
106
107 def _buildstatus(
107 def _buildstatus(
108 self, other, s, match, listignored, listclean, listunknown
108 self, other, s, match, listignored, listclean, listunknown
109 ):
109 ):
110 """build a status with respect to another context"""
110 """build a status with respect to another context"""
111 # Load earliest manifest first for caching reasons. More specifically,
111 # Load earliest manifest first for caching reasons. More specifically,
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta to what's in the cache. So that's one full reconstruction + one
116 # delta application.
116 # delta application.
117 mf2 = None
117 mf2 = None
118 if self.rev() is not None and self.rev() < other.rev():
118 if self.rev() is not None and self.rev() < other.rev():
119 mf2 = self._buildstatusmanifest(s)
119 mf2 = self._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
121 if mf2 is None:
121 if mf2 is None:
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123
123
124 modified, added = [], []
124 modified, added = [], []
125 removed = []
125 removed = []
126 clean = []
126 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
128 deletedset = set(deleted)
129 d = mf1.diff(mf2, match=match, clean=listclean)
129 d = mf1.diff(mf2, match=match, clean=listclean)
130 for fn, value in pycompat.iteritems(d):
130 for fn, value in pycompat.iteritems(d):
131 if fn in deletedset:
131 if fn in deletedset:
132 continue
132 continue
133 if value is None:
133 if value is None:
134 clean.append(fn)
134 clean.append(fn)
135 continue
135 continue
136 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
137 if node1 is None:
138 added.append(fn)
138 added.append(fn)
139 elif node2 is None:
139 elif node2 is None:
140 removed.append(fn)
140 removed.append(fn)
141 elif flag1 != flag2:
141 elif flag1 != flag2:
142 modified.append(fn)
142 modified.append(fn)
143 elif node2 not in wdirfilenodeids:
143 elif node2 not in wdirfilenodeids:
144 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
147 # to a file as a modification.
148 modified.append(fn)
148 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
150 modified.append(fn)
151 else:
151 else:
152 clean.append(fn)
152 clean.append(fn)
153
153
154 if removed:
154 if removed:
155 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
156 unknown = [
156 unknown = [
157 fn
157 fn
158 for fn in unknown
158 for fn in unknown
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 ignored = [
161 ignored = [
162 fn
162 fn
163 for fn in ignored
163 for fn in ignored
164 if fn not in mf1 and (not match or match(fn))
164 if fn not in mf1 and (not match or match(fn))
165 ]
165 ]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(
169 return scmutil.status(
170 modified, added, removed, deleted, unknown, ignored, clean
170 modified, added, removed, deleted, unknown, ignored, clean
171 )
171 )
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepoutil.state(self, self._repo.ui)
175 return subrepoutil.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182
182
183 def node(self):
183 def node(self):
184 return self._node
184 return self._node
185
185
186 def hex(self):
186 def hex(self):
187 return hex(self.node())
187 return hex(self.node())
188
188
189 def manifest(self):
189 def manifest(self):
190 return self._manifest
190 return self._manifest
191
191
192 def manifestctx(self):
192 def manifestctx(self):
193 return self._manifestctx
193 return self._manifestctx
194
194
195 def repo(self):
195 def repo(self):
196 return self._repo
196 return self._repo
197
197
198 def phasestr(self):
198 def phasestr(self):
199 return phases.phasenames[self.phase()]
199 return phases.phasenames[self.phase()]
200
200
201 def mutable(self):
201 def mutable(self):
202 return self.phase() > phases.public
202 return self.phase() > phases.public
203
203
204 def matchfileset(self, cwd, expr, badfn=None):
204 def matchfileset(self, cwd, expr, badfn=None):
205 return fileset.match(self, cwd, expr, badfn=badfn)
205 return fileset.match(self, cwd, expr, badfn=badfn)
206
206
207 def obsolete(self):
207 def obsolete(self):
208 """True if the changeset is obsolete"""
208 """True if the changeset is obsolete"""
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210
210
211 def extinct(self):
211 def extinct(self):
212 """True if the changeset is extinct"""
212 """True if the changeset is extinct"""
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214
214
215 def orphan(self):
215 def orphan(self):
216 """True if the changeset is not obsolete, but its ancestor is"""
216 """True if the changeset is not obsolete, but its ancestor is"""
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218
218
219 def phasedivergent(self):
219 def phasedivergent(self):
220 """True if the changeset tries to be a successor of a public changeset
220 """True if the changeset tries to be a successor of a public changeset
221
221
222 Only non-public and non-obsolete changesets may be phase-divergent.
222 Only non-public and non-obsolete changesets may be phase-divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225
225
226 def contentdivergent(self):
226 def contentdivergent(self):
227 """Is a successor of a changeset with multiple possible successor sets
227 """Is a successor of a changeset with multiple possible successor sets
228
228
229 Only non-public and non-obsolete changesets may be content-divergent.
229 Only non-public and non-obsolete changesets may be content-divergent.
230 """
230 """
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232
232
233 def isunstable(self):
233 def isunstable(self):
234 """True if the changeset is either orphan, phase-divergent or
234 """True if the changeset is either orphan, phase-divergent or
235 content-divergent"""
235 content-divergent"""
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237
237
238 def instabilities(self):
238 def instabilities(self):
239 """return the list of instabilities affecting this changeset.
239 """return the list of instabilities affecting this changeset.
240
240
241 Instabilities are returned as strings. possible values are:
241 Instabilities are returned as strings. possible values are:
242 - orphan,
242 - orphan,
243 - phase-divergent,
243 - phase-divergent,
244 - content-divergent.
244 - content-divergent.
245 """
245 """
246 instabilities = []
246 instabilities = []
247 if self.orphan():
247 if self.orphan():
248 instabilities.append(b'orphan')
248 instabilities.append(b'orphan')
249 if self.phasedivergent():
249 if self.phasedivergent():
250 instabilities.append(b'phase-divergent')
250 instabilities.append(b'phase-divergent')
251 if self.contentdivergent():
251 if self.contentdivergent():
252 instabilities.append(b'content-divergent')
252 instabilities.append(b'content-divergent')
253 return instabilities
253 return instabilities
254
254
255 def parents(self):
255 def parents(self):
256 """return contexts for each parent changeset"""
256 """return contexts for each parent changeset"""
257 return self._parents
257 return self._parents
258
258
259 def p1(self):
259 def p1(self):
260 return self._parents[0]
260 return self._parents[0]
261
261
262 def p2(self):
262 def p2(self):
263 parents = self._parents
263 parents = self._parents
264 if len(parents) == 2:
264 if len(parents) == 2:
265 return parents[1]
265 return parents[1]
266 return self._repo[nullrev]
266 return self._repo[nullrev]
267
267
268 def _fileinfo(self, path):
268 def _fileinfo(self, path):
269 if '_manifest' in self.__dict__:
269 if '_manifest' in self.__dict__:
270 try:
270 try:
271 return self._manifest.find(path)
271 return self._manifest.find(path)
272 except KeyError:
272 except KeyError:
273 raise error.ManifestLookupError(
273 raise error.ManifestLookupError(
274 self._node, path, _(b'not found in manifest')
274 self._node, path, _(b'not found in manifest')
275 )
275 )
276 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 if path in self._manifestdelta:
277 if path in self._manifestdelta:
278 return (
278 return (
279 self._manifestdelta[path],
279 self._manifestdelta[path],
280 self._manifestdelta.flags(path),
280 self._manifestdelta.flags(path),
281 )
281 )
282 mfl = self._repo.manifestlog
282 mfl = self._repo.manifestlog
283 try:
283 try:
284 node, flag = mfl[self._changeset.manifest].find(path)
284 node, flag = mfl[self._changeset.manifest].find(path)
285 except KeyError:
285 except KeyError:
286 raise error.ManifestLookupError(
286 raise error.ManifestLookupError(
287 self._node, path, _(b'not found in manifest')
287 self._node, path, _(b'not found in manifest')
288 )
288 )
289
289
290 return node, flag
290 return node, flag
291
291
292 def filenode(self, path):
292 def filenode(self, path):
293 return self._fileinfo(path)[0]
293 return self._fileinfo(path)[0]
294
294
295 def flags(self, path):
295 def flags(self, path):
296 try:
296 try:
297 return self._fileinfo(path)[1]
297 return self._fileinfo(path)[1]
298 except error.LookupError:
298 except error.LookupError:
299 return b''
299 return b''
300
300
301 @propertycache
301 @propertycache
302 def _copies(self):
302 def _copies(self):
303 return metadata.computechangesetcopies(self)
303 return metadata.computechangesetcopies(self)
304
304
305 def p1copies(self):
305 def p1copies(self):
306 return self._copies[0]
306 return self._copies[0]
307
307
308 def p2copies(self):
308 def p2copies(self):
309 return self._copies[1]
309 return self._copies[1]
310
310
311 def sub(self, path, allowcreate=True):
311 def sub(self, path, allowcreate=True):
312 '''return a subrepo for the stored revision of path, never wdir()'''
312 '''return a subrepo for the stored revision of path, never wdir()'''
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314
314
315 def nullsub(self, path, pctx):
315 def nullsub(self, path, pctx):
316 return subrepo.nullsubrepo(self, path, pctx)
316 return subrepo.nullsubrepo(self, path, pctx)
317
317
318 def workingsub(self, path):
318 def workingsub(self, path):
319 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 '''return a subrepo for the stored revision, or wdir if this is a wdir
320 context.
320 context.
321 '''
321 '''
322 return subrepo.subrepo(self, path, allowwdir=True)
322 return subrepo.subrepo(self, path, allowwdir=True)
323
323
324 def match(
324 def match(
325 self,
325 self,
326 pats=None,
326 pats=None,
327 include=None,
327 include=None,
328 exclude=None,
328 exclude=None,
329 default=b'glob',
329 default=b'glob',
330 listsubrepos=False,
330 listsubrepos=False,
331 badfn=None,
331 badfn=None,
332 cwd=None,
332 cwd=None,
333 ):
333 ):
334 r = self._repo
334 r = self._repo
335 if not cwd:
335 if not cwd:
336 cwd = r.getcwd()
336 cwd = r.getcwd()
337 return matchmod.match(
337 return matchmod.match(
338 r.root,
338 r.root,
339 cwd,
339 cwd,
340 pats,
340 pats,
341 include,
341 include,
342 exclude,
342 exclude,
343 default,
343 default,
344 auditor=r.nofsauditor,
344 auditor=r.nofsauditor,
345 ctx=self,
345 ctx=self,
346 listsubrepos=listsubrepos,
346 listsubrepos=listsubrepos,
347 badfn=badfn,
347 badfn=badfn,
348 )
348 )
349
349
350 def diff(
350 def diff(
351 self,
351 self,
352 ctx2=None,
352 ctx2=None,
353 match=None,
353 match=None,
354 changes=None,
354 changes=None,
355 opts=None,
355 opts=None,
356 losedatafn=None,
356 losedatafn=None,
357 pathfn=None,
357 pathfn=None,
358 copy=None,
358 copy=None,
359 copysourcematch=None,
359 copysourcematch=None,
360 hunksfilterfn=None,
360 hunksfilterfn=None,
361 ):
361 ):
362 """Returns a diff generator for the given contexts and matcher"""
362 """Returns a diff generator for the given contexts and matcher"""
363 if ctx2 is None:
363 if ctx2 is None:
364 ctx2 = self.p1()
364 ctx2 = self.p1()
365 if ctx2 is not None:
365 if ctx2 is not None:
366 ctx2 = self._repo[ctx2]
366 ctx2 = self._repo[ctx2]
367 return patch.diff(
367 return patch.diff(
368 self._repo,
368 self._repo,
369 ctx2,
369 ctx2,
370 self,
370 self,
371 match=match,
371 match=match,
372 changes=changes,
372 changes=changes,
373 opts=opts,
373 opts=opts,
374 losedatafn=losedatafn,
374 losedatafn=losedatafn,
375 pathfn=pathfn,
375 pathfn=pathfn,
376 copy=copy,
376 copy=copy,
377 copysourcematch=copysourcematch,
377 copysourcematch=copysourcematch,
378 hunksfilterfn=hunksfilterfn,
378 hunksfilterfn=hunksfilterfn,
379 )
379 )
380
380
381 def dirs(self):
381 def dirs(self):
382 return self._manifest.dirs()
382 return self._manifest.dirs()
383
383
384 def hasdir(self, dir):
384 def hasdir(self, dir):
385 return self._manifest.hasdir(dir)
385 return self._manifest.hasdir(dir)
386
386
387 def status(
387 def status(
388 self,
388 self,
389 other=None,
389 other=None,
390 match=None,
390 match=None,
391 listignored=False,
391 listignored=False,
392 listclean=False,
392 listclean=False,
393 listunknown=False,
393 listunknown=False,
394 listsubrepos=False,
394 listsubrepos=False,
395 ):
395 ):
396 """return status of files between two nodes or node and working
396 """return status of files between two nodes or node and working
397 directory.
397 directory.
398
398
399 If other is None, compare this node with working directory.
399 If other is None, compare this node with working directory.
400
400
401 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 returns (modified, added, removed, deleted, unknown, ignored, clean)
402 """
402 """
403
403
404 ctx1 = self
404 ctx1 = self
405 ctx2 = self._repo[other]
405 ctx2 = self._repo[other]
406
406
407 # This next code block is, admittedly, fragile logic that tests for
407 # This next code block is, admittedly, fragile logic that tests for
408 # reversing the contexts and wouldn't need to exist if it weren't for
408 # reversing the contexts and wouldn't need to exist if it weren't for
409 # the fast (and common) code path of comparing the working directory
409 # the fast (and common) code path of comparing the working directory
410 # with its first parent.
410 # with its first parent.
411 #
411 #
412 # What we're aiming for here is the ability to call:
412 # What we're aiming for here is the ability to call:
413 #
413 #
414 # workingctx.status(parentctx)
414 # workingctx.status(parentctx)
415 #
415 #
416 # If we always built the manifest for each context and compared those,
416 # If we always built the manifest for each context and compared those,
417 # then we'd be done. But the special case of the above call means we
417 # then we'd be done. But the special case of the above call means we
418 # just copy the manifest of the parent.
418 # just copy the manifest of the parent.
419 reversed = False
419 reversed = False
420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
421 reversed = True
421 reversed = True
422 ctx1, ctx2 = ctx2, ctx1
422 ctx1, ctx2 = ctx2, ctx1
423
423
424 match = self._repo.narrowmatch(match)
424 match = self._repo.narrowmatch(match)
425 match = ctx2._matchstatus(ctx1, match)
425 match = ctx2._matchstatus(ctx1, match)
426 r = scmutil.status([], [], [], [], [], [], [])
426 r = scmutil.status([], [], [], [], [], [], [])
427 r = ctx2._buildstatus(
427 r = ctx2._buildstatus(
428 ctx1, r, match, listignored, listclean, listunknown
428 ctx1, r, match, listignored, listclean, listunknown
429 )
429 )
430
430
431 if reversed:
431 if reversed:
432 # Reverse added and removed. Clear deleted, unknown and ignored as
432 # Reverse added and removed. Clear deleted, unknown and ignored as
433 # these make no sense to reverse.
433 # these make no sense to reverse.
434 r = scmutil.status(
434 r = scmutil.status(
435 r.modified, r.removed, r.added, [], [], [], r.clean
435 r.modified, r.removed, r.added, [], [], [], r.clean
436 )
436 )
437
437
438 if listsubrepos:
438 if listsubrepos:
439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
440 try:
440 try:
441 rev2 = ctx2.subrev(subpath)
441 rev2 = ctx2.subrev(subpath)
442 except KeyError:
442 except KeyError:
443 # A subrepo that existed in node1 was deleted between
443 # A subrepo that existed in node1 was deleted between
444 # node1 and node2 (inclusive). Thus, ctx2's substate
444 # node1 and node2 (inclusive). Thus, ctx2's substate
445 # won't contain that subpath. The best we can do ignore it.
445 # won't contain that subpath. The best we can do ignore it.
446 rev2 = None
446 rev2 = None
447 submatch = matchmod.subdirmatcher(subpath, match)
447 submatch = matchmod.subdirmatcher(subpath, match)
448 s = sub.status(
448 s = sub.status(
449 rev2,
449 rev2,
450 match=submatch,
450 match=submatch,
451 ignored=listignored,
451 ignored=listignored,
452 clean=listclean,
452 clean=listclean,
453 unknown=listunknown,
453 unknown=listunknown,
454 listsubrepos=True,
454 listsubrepos=True,
455 )
455 )
456 for k in (
456 for k in (
457 'modified',
457 'modified',
458 'added',
458 'added',
459 'removed',
459 'removed',
460 'deleted',
460 'deleted',
461 'unknown',
461 'unknown',
462 'ignored',
462 'ignored',
463 'clean',
463 'clean',
464 ):
464 ):
465 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 rfiles, sfiles = getattr(r, k), getattr(s, k)
466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
467
467
468 r.modified.sort()
468 r.modified.sort()
469 r.added.sort()
469 r.added.sort()
470 r.removed.sort()
470 r.removed.sort()
471 r.deleted.sort()
471 r.deleted.sort()
472 r.unknown.sort()
472 r.unknown.sort()
473 r.ignored.sort()
473 r.ignored.sort()
474 r.clean.sort()
474 r.clean.sort()
475
475
476 return r
476 return r
477
477
478 def mergestate(self, clean=False):
478 def mergestate(self, clean=False):
479 """Get a mergestate object for this context."""
479 """Get a mergestate object for this context."""
480 raise NotImplementedError(
480 raise NotImplementedError(
481 '%s does not implement mergestate()' % self.__class__
481 '%s does not implement mergestate()' % self.__class__
482 )
482 )
483
483
484
484
485 class changectx(basectx):
485 class changectx(basectx):
486 """A changecontext object makes access to data related to a particular
486 """A changecontext object makes access to data related to a particular
487 changeset convenient. It represents a read-only context already present in
487 changeset convenient. It represents a read-only context already present in
488 the repo."""
488 the repo."""
489
489
490 def __init__(self, repo, rev, node, maybe_filtered=True):
490 def __init__(self, repo, rev, node, maybe_filtered=True):
491 super(changectx, self).__init__(repo)
491 super(changectx, self).__init__(repo)
492 self._rev = rev
492 self._rev = rev
493 self._node = node
493 self._node = node
494 # When maybe_filtered is True, the revision might be affected by
494 # When maybe_filtered is True, the revision might be affected by
495 # changelog filtering and operation through the filtered changelog must be used.
495 # changelog filtering and operation through the filtered changelog must be used.
496 #
496 #
497 # When maybe_filtered is False, the revision has already been checked
497 # When maybe_filtered is False, the revision has already been checked
498 # against filtering and is not filtered. Operation through the
498 # against filtering and is not filtered. Operation through the
499 # unfiltered changelog might be used in some case.
499 # unfiltered changelog might be used in some case.
500 self._maybe_filtered = maybe_filtered
500 self._maybe_filtered = maybe_filtered
501
501
502 def __hash__(self):
502 def __hash__(self):
503 try:
503 try:
504 return hash(self._rev)
504 return hash(self._rev)
505 except AttributeError:
505 except AttributeError:
506 return id(self)
506 return id(self)
507
507
508 def __nonzero__(self):
508 def __nonzero__(self):
509 return self._rev != nullrev
509 return self._rev != nullrev
510
510
511 __bool__ = __nonzero__
511 __bool__ = __nonzero__
512
512
513 @propertycache
513 @propertycache
514 def _changeset(self):
514 def _changeset(self):
515 if self._maybe_filtered:
515 if self._maybe_filtered:
516 repo = self._repo
516 repo = self._repo
517 else:
517 else:
518 repo = self._repo.unfiltered()
518 repo = self._repo.unfiltered()
519 return repo.changelog.changelogrevision(self.rev())
519 return repo.changelog.changelogrevision(self.rev())
520
520
521 @propertycache
521 @propertycache
522 def _manifest(self):
522 def _manifest(self):
523 return self._manifestctx.read()
523 return self._manifestctx.read()
524
524
525 @property
525 @property
526 def _manifestctx(self):
526 def _manifestctx(self):
527 return self._repo.manifestlog[self._changeset.manifest]
527 return self._repo.manifestlog[self._changeset.manifest]
528
528
529 @propertycache
529 @propertycache
530 def _manifestdelta(self):
530 def _manifestdelta(self):
531 return self._manifestctx.readdelta()
531 return self._manifestctx.readdelta()
532
532
533 @propertycache
533 @propertycache
534 def _parents(self):
534 def _parents(self):
535 repo = self._repo
535 repo = self._repo
536 if self._maybe_filtered:
536 if self._maybe_filtered:
537 cl = repo.changelog
537 cl = repo.changelog
538 else:
538 else:
539 cl = repo.unfiltered().changelog
539 cl = repo.unfiltered().changelog
540
540
541 p1, p2 = cl.parentrevs(self._rev)
541 p1, p2 = cl.parentrevs(self._rev)
542 if p2 == nullrev:
542 if p2 == nullrev:
543 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
543 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
544 return [
544 return [
545 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
545 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
546 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
546 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
547 ]
547 ]
548
548
549 def changeset(self):
549 def changeset(self):
550 c = self._changeset
550 c = self._changeset
551 return (
551 return (
552 c.manifest,
552 c.manifest,
553 c.user,
553 c.user,
554 c.date,
554 c.date,
555 c.files,
555 c.files,
556 c.description,
556 c.description,
557 c.extra,
557 c.extra,
558 )
558 )
559
559
560 def manifestnode(self):
560 def manifestnode(self):
561 return self._changeset.manifest
561 return self._changeset.manifest
562
562
563 def user(self):
563 def user(self):
564 return self._changeset.user
564 return self._changeset.user
565
565
566 def date(self):
566 def date(self):
567 return self._changeset.date
567 return self._changeset.date
568
568
569 def files(self):
569 def files(self):
570 return self._changeset.files
570 return self._changeset.files
571
571
572 def filesmodified(self):
572 def filesmodified(self):
573 modified = set(self.files())
573 modified = set(self.files())
574 modified.difference_update(self.filesadded())
574 modified.difference_update(self.filesadded())
575 modified.difference_update(self.filesremoved())
575 modified.difference_update(self.filesremoved())
576 return sorted(modified)
576 return sorted(modified)
577
577
578 def filesadded(self):
578 def filesadded(self):
579 filesadded = self._changeset.filesadded
579 filesadded = self._changeset.filesadded
580 compute_on_none = True
580 compute_on_none = True
581 if self._repo.filecopiesmode == b'changeset-sidedata':
581 if self._repo.filecopiesmode == b'changeset-sidedata':
582 compute_on_none = False
582 compute_on_none = False
583 else:
583 else:
584 source = self._repo.ui.config(b'experimental', b'copies.read-from')
584 source = self._repo.ui.config(b'experimental', b'copies.read-from')
585 if source == b'changeset-only':
585 if source == b'changeset-only':
586 compute_on_none = False
586 compute_on_none = False
587 elif source != b'compatibility':
587 elif source != b'compatibility':
588 # filelog mode, ignore any changelog content
588 # filelog mode, ignore any changelog content
589 filesadded = None
589 filesadded = None
590 if filesadded is None:
590 if filesadded is None:
591 if compute_on_none:
591 if compute_on_none:
592 filesadded = metadata.computechangesetfilesadded(self)
592 filesadded = metadata.computechangesetfilesadded(self)
593 else:
593 else:
594 filesadded = []
594 filesadded = []
595 return filesadded
595 return filesadded
596
596
597 def filesremoved(self):
597 def filesremoved(self):
598 filesremoved = self._changeset.filesremoved
598 filesremoved = self._changeset.filesremoved
599 compute_on_none = True
599 compute_on_none = True
600 if self._repo.filecopiesmode == b'changeset-sidedata':
600 if self._repo.filecopiesmode == b'changeset-sidedata':
601 compute_on_none = False
601 compute_on_none = False
602 else:
602 else:
603 source = self._repo.ui.config(b'experimental', b'copies.read-from')
603 source = self._repo.ui.config(b'experimental', b'copies.read-from')
604 if source == b'changeset-only':
604 if source == b'changeset-only':
605 compute_on_none = False
605 compute_on_none = False
606 elif source != b'compatibility':
606 elif source != b'compatibility':
607 # filelog mode, ignore any changelog content
607 # filelog mode, ignore any changelog content
608 filesremoved = None
608 filesremoved = None
609 if filesremoved is None:
609 if filesremoved is None:
610 if compute_on_none:
610 if compute_on_none:
611 filesremoved = metadata.computechangesetfilesremoved(self)
611 filesremoved = metadata.computechangesetfilesremoved(self)
612 else:
612 else:
613 filesremoved = []
613 filesremoved = []
614 return filesremoved
614 return filesremoved
615
615
616 @propertycache
616 @propertycache
617 def _copies(self):
617 def _copies(self):
618 p1copies = self._changeset.p1copies
618 p1copies = self._changeset.p1copies
619 p2copies = self._changeset.p2copies
619 p2copies = self._changeset.p2copies
620 compute_on_none = True
620 compute_on_none = True
621 if self._repo.filecopiesmode == b'changeset-sidedata':
621 if self._repo.filecopiesmode == b'changeset-sidedata':
622 compute_on_none = False
622 compute_on_none = False
623 else:
623 else:
624 source = self._repo.ui.config(b'experimental', b'copies.read-from')
624 source = self._repo.ui.config(b'experimental', b'copies.read-from')
625 # If config says to get copy metadata only from changeset, then
625 # If config says to get copy metadata only from changeset, then
626 # return that, defaulting to {} if there was no copy metadata. In
626 # return that, defaulting to {} if there was no copy metadata. In
627 # compatibility mode, we return copy data from the changeset if it
627 # compatibility mode, we return copy data from the changeset if it
628 # was recorded there, and otherwise we fall back to getting it from
628 # was recorded there, and otherwise we fall back to getting it from
629 # the filelogs (below).
629 # the filelogs (below).
630 #
630 #
631 # If we are in compatiblity mode and there is not data in the
631 # If we are in compatiblity mode and there is not data in the
632 # changeset), we get the copy metadata from the filelogs.
632 # changeset), we get the copy metadata from the filelogs.
633 #
633 #
634 # otherwise, when config said to read only from filelog, we get the
634 # otherwise, when config said to read only from filelog, we get the
635 # copy metadata from the filelogs.
635 # copy metadata from the filelogs.
636 if source == b'changeset-only':
636 if source == b'changeset-only':
637 compute_on_none = False
637 compute_on_none = False
638 elif source != b'compatibility':
638 elif source != b'compatibility':
639 # filelog mode, ignore any changelog content
639 # filelog mode, ignore any changelog content
640 p1copies = p2copies = None
640 p1copies = p2copies = None
641 if p1copies is None:
641 if p1copies is None:
642 if compute_on_none:
642 if compute_on_none:
643 p1copies, p2copies = super(changectx, self)._copies
643 p1copies, p2copies = super(changectx, self)._copies
644 else:
644 else:
645 if p1copies is None:
645 if p1copies is None:
646 p1copies = {}
646 p1copies = {}
647 if p2copies is None:
647 if p2copies is None:
648 p2copies = {}
648 p2copies = {}
649 return p1copies, p2copies
649 return p1copies, p2copies
650
650
651 def description(self):
651 def description(self):
652 return self._changeset.description
652 return self._changeset.description
653
653
654 def branch(self):
654 def branch(self):
655 return encoding.tolocal(self._changeset.extra.get(b"branch"))
655 return encoding.tolocal(self._changeset.extra.get(b"branch"))
656
656
657 def closesbranch(self):
657 def closesbranch(self):
658 return b'close' in self._changeset.extra
658 return b'close' in self._changeset.extra
659
659
660 def extra(self):
660 def extra(self):
661 """Return a dict of extra information."""
661 """Return a dict of extra information."""
662 return self._changeset.extra
662 return self._changeset.extra
663
663
664 def tags(self):
664 def tags(self):
665 """Return a list of byte tag names"""
665 """Return a list of byte tag names"""
666 return self._repo.nodetags(self._node)
666 return self._repo.nodetags(self._node)
667
667
668 def bookmarks(self):
668 def bookmarks(self):
669 """Return a list of byte bookmark names."""
669 """Return a list of byte bookmark names."""
670 return self._repo.nodebookmarks(self._node)
670 return self._repo.nodebookmarks(self._node)
671
671
672 def phase(self):
672 def phase(self):
673 return self._repo._phasecache.phase(self._repo, self._rev)
673 return self._repo._phasecache.phase(self._repo, self._rev)
674
674
675 def hidden(self):
675 def hidden(self):
676 return self._rev in repoview.filterrevs(self._repo, b'visible')
676 return self._rev in repoview.filterrevs(self._repo, b'visible')
677
677
678 def isinmemory(self):
678 def isinmemory(self):
679 return False
679 return False
680
680
681 def children(self):
681 def children(self):
682 """return list of changectx contexts for each child changeset.
682 """return list of changectx contexts for each child changeset.
683
683
684 This returns only the immediate child changesets. Use descendants() to
684 This returns only the immediate child changesets. Use descendants() to
685 recursively walk children.
685 recursively walk children.
686 """
686 """
687 c = self._repo.changelog.children(self._node)
687 c = self._repo.changelog.children(self._node)
688 return [self._repo[x] for x in c]
688 return [self._repo[x] for x in c]
689
689
690 def ancestors(self):
690 def ancestors(self):
691 for a in self._repo.changelog.ancestors([self._rev]):
691 for a in self._repo.changelog.ancestors([self._rev]):
692 yield self._repo[a]
692 yield self._repo[a]
693
693
694 def descendants(self):
694 def descendants(self):
695 """Recursively yield all children of the changeset.
695 """Recursively yield all children of the changeset.
696
696
697 For just the immediate children, use children()
697 For just the immediate children, use children()
698 """
698 """
699 for d in self._repo.changelog.descendants([self._rev]):
699 for d in self._repo.changelog.descendants([self._rev]):
700 yield self._repo[d]
700 yield self._repo[d]
701
701
702 def filectx(self, path, fileid=None, filelog=None):
702 def filectx(self, path, fileid=None, filelog=None):
703 """get a file context from this changeset"""
703 """get a file context from this changeset"""
704 if fileid is None:
704 if fileid is None:
705 fileid = self.filenode(path)
705 fileid = self.filenode(path)
706 return filectx(
706 return filectx(
707 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
707 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
708 )
708 )
709
709
710 def ancestor(self, c2, warn=False):
710 def ancestor(self, c2, warn=False):
711 """return the "best" ancestor context of self and c2
711 """return the "best" ancestor context of self and c2
712
712
713 If there are multiple candidates, it will show a message and check
713 If there are multiple candidates, it will show a message and check
714 merge.preferancestor configuration before falling back to the
714 merge.preferancestor configuration before falling back to the
715 revlog ancestor."""
715 revlog ancestor."""
716 # deal with workingctxs
716 # deal with workingctxs
717 n2 = c2._node
717 n2 = c2._node
718 if n2 is None:
718 if n2 is None:
719 n2 = c2._parents[0]._node
719 n2 = c2._parents[0]._node
720 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
720 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
721 if not cahs:
721 if not cahs:
722 anc = nullid
722 anc = nullid
723 elif len(cahs) == 1:
723 elif len(cahs) == 1:
724 anc = cahs[0]
724 anc = cahs[0]
725 else:
725 else:
726 # experimental config: merge.preferancestor
726 # experimental config: merge.preferancestor
727 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
727 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
728 try:
728 try:
729 ctx = scmutil.revsymbol(self._repo, r)
729 ctx = scmutil.revsymbol(self._repo, r)
730 except error.RepoLookupError:
730 except error.RepoLookupError:
731 continue
731 continue
732 anc = ctx.node()
732 anc = ctx.node()
733 if anc in cahs:
733 if anc in cahs:
734 break
734 break
735 else:
735 else:
736 anc = self._repo.changelog.ancestor(self._node, n2)
736 anc = self._repo.changelog.ancestor(self._node, n2)
737 if warn:
737 if warn:
738 self._repo.ui.status(
738 self._repo.ui.status(
739 (
739 (
740 _(b"note: using %s as ancestor of %s and %s\n")
740 _(b"note: using %s as ancestor of %s and %s\n")
741 % (short(anc), short(self._node), short(n2))
741 % (short(anc), short(self._node), short(n2))
742 )
742 )
743 + b''.join(
743 + b''.join(
744 _(
744 _(
745 b" alternatively, use --config "
745 b" alternatively, use --config "
746 b"merge.preferancestor=%s\n"
746 b"merge.preferancestor=%s\n"
747 )
747 )
748 % short(n)
748 % short(n)
749 for n in sorted(cahs)
749 for n in sorted(cahs)
750 if n != anc
750 if n != anc
751 )
751 )
752 )
752 )
753 return self._repo[anc]
753 return self._repo[anc]
754
754
755 def isancestorof(self, other):
755 def isancestorof(self, other):
756 """True if this changeset is an ancestor of other"""
756 """True if this changeset is an ancestor of other"""
757 return self._repo.changelog.isancestorrev(self._rev, other._rev)
757 return self._repo.changelog.isancestorrev(self._rev, other._rev)
758
758
759 def walk(self, match):
759 def walk(self, match):
760 '''Generates matching file names.'''
760 '''Generates matching file names.'''
761
761
762 # Wrap match.bad method to have message with nodeid
762 # Wrap match.bad method to have message with nodeid
763 def bad(fn, msg):
763 def bad(fn, msg):
764 # The manifest doesn't know about subrepos, so don't complain about
764 # The manifest doesn't know about subrepos, so don't complain about
765 # paths into valid subrepos.
765 # paths into valid subrepos.
766 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
766 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
767 return
767 return
768 match.bad(fn, _(b'no such file in rev %s') % self)
768 match.bad(fn, _(b'no such file in rev %s') % self)
769
769
770 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
770 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
771 return self._manifest.walk(m)
771 return self._manifest.walk(m)
772
772
773 def matches(self, match):
773 def matches(self, match):
774 return self.walk(match)
774 return self.walk(match)
775
775
776
776
777 class basefilectx(object):
777 class basefilectx(object):
778 """A filecontext object represents the common logic for its children:
778 """A filecontext object represents the common logic for its children:
779 filectx: read-only access to a filerevision that is already present
779 filectx: read-only access to a filerevision that is already present
780 in the repo,
780 in the repo,
781 workingfilectx: a filecontext that represents files from the working
781 workingfilectx: a filecontext that represents files from the working
782 directory,
782 directory,
783 memfilectx: a filecontext that represents files in-memory,
783 memfilectx: a filecontext that represents files in-memory,
784 """
784 """
785
785
786 @propertycache
786 @propertycache
787 def _filelog(self):
787 def _filelog(self):
788 return self._repo.file(self._path)
788 return self._repo.file(self._path)
789
789
790 @propertycache
790 @propertycache
791 def _changeid(self):
791 def _changeid(self):
792 if '_changectx' in self.__dict__:
792 if '_changectx' in self.__dict__:
793 return self._changectx.rev()
793 return self._changectx.rev()
794 elif '_descendantrev' in self.__dict__:
794 elif '_descendantrev' in self.__dict__:
795 # this file context was created from a revision with a known
795 # this file context was created from a revision with a known
796 # descendant, we can (lazily) correct for linkrev aliases
796 # descendant, we can (lazily) correct for linkrev aliases
797 return self._adjustlinkrev(self._descendantrev)
797 return self._adjustlinkrev(self._descendantrev)
798 else:
798 else:
799 return self._filelog.linkrev(self._filerev)
799 return self._filelog.linkrev(self._filerev)
800
800
801 @propertycache
801 @propertycache
802 def _filenode(self):
802 def _filenode(self):
803 if '_fileid' in self.__dict__:
803 if '_fileid' in self.__dict__:
804 return self._filelog.lookup(self._fileid)
804 return self._filelog.lookup(self._fileid)
805 else:
805 else:
806 return self._changectx.filenode(self._path)
806 return self._changectx.filenode(self._path)
807
807
808 @propertycache
808 @propertycache
809 def _filerev(self):
809 def _filerev(self):
810 return self._filelog.rev(self._filenode)
810 return self._filelog.rev(self._filenode)
811
811
812 @propertycache
812 @propertycache
813 def _repopath(self):
813 def _repopath(self):
814 return self._path
814 return self._path
815
815
816 def __nonzero__(self):
816 def __nonzero__(self):
817 try:
817 try:
818 self._filenode
818 self._filenode
819 return True
819 return True
820 except error.LookupError:
820 except error.LookupError:
821 # file is missing
821 # file is missing
822 return False
822 return False
823
823
824 __bool__ = __nonzero__
824 __bool__ = __nonzero__
825
825
826 def __bytes__(self):
826 def __bytes__(self):
827 try:
827 try:
828 return b"%s@%s" % (self.path(), self._changectx)
828 return b"%s@%s" % (self.path(), self._changectx)
829 except error.LookupError:
829 except error.LookupError:
830 return b"%s@???" % self.path()
830 return b"%s@???" % self.path()
831
831
832 __str__ = encoding.strmethod(__bytes__)
832 __str__ = encoding.strmethod(__bytes__)
833
833
834 def __repr__(self):
834 def __repr__(self):
835 return "<%s %s>" % (type(self).__name__, str(self))
835 return "<%s %s>" % (type(self).__name__, str(self))
836
836
837 def __hash__(self):
837 def __hash__(self):
838 try:
838 try:
839 return hash((self._path, self._filenode))
839 return hash((self._path, self._filenode))
840 except AttributeError:
840 except AttributeError:
841 return id(self)
841 return id(self)
842
842
843 def __eq__(self, other):
843 def __eq__(self, other):
844 try:
844 try:
845 return (
845 return (
846 type(self) == type(other)
846 type(self) == type(other)
847 and self._path == other._path
847 and self._path == other._path
848 and self._filenode == other._filenode
848 and self._filenode == other._filenode
849 )
849 )
850 except AttributeError:
850 except AttributeError:
851 return False
851 return False
852
852
853 def __ne__(self, other):
853 def __ne__(self, other):
854 return not (self == other)
854 return not (self == other)
855
855
856 def filerev(self):
856 def filerev(self):
857 return self._filerev
857 return self._filerev
858
858
859 def filenode(self):
859 def filenode(self):
860 return self._filenode
860 return self._filenode
861
861
862 @propertycache
862 @propertycache
863 def _flags(self):
863 def _flags(self):
864 return self._changectx.flags(self._path)
864 return self._changectx.flags(self._path)
865
865
866 def flags(self):
866 def flags(self):
867 return self._flags
867 return self._flags
868
868
869 def filelog(self):
869 def filelog(self):
870 return self._filelog
870 return self._filelog
871
871
872 def rev(self):
872 def rev(self):
873 return self._changeid
873 return self._changeid
874
874
875 def linkrev(self):
875 def linkrev(self):
876 return self._filelog.linkrev(self._filerev)
876 return self._filelog.linkrev(self._filerev)
877
877
878 def node(self):
878 def node(self):
879 return self._changectx.node()
879 return self._changectx.node()
880
880
881 def hex(self):
881 def hex(self):
882 return self._changectx.hex()
882 return self._changectx.hex()
883
883
884 def user(self):
884 def user(self):
885 return self._changectx.user()
885 return self._changectx.user()
886
886
887 def date(self):
887 def date(self):
888 return self._changectx.date()
888 return self._changectx.date()
889
889
890 def files(self):
890 def files(self):
891 return self._changectx.files()
891 return self._changectx.files()
892
892
893 def description(self):
893 def description(self):
894 return self._changectx.description()
894 return self._changectx.description()
895
895
896 def branch(self):
896 def branch(self):
897 return self._changectx.branch()
897 return self._changectx.branch()
898
898
899 def extra(self):
899 def extra(self):
900 return self._changectx.extra()
900 return self._changectx.extra()
901
901
902 def phase(self):
902 def phase(self):
903 return self._changectx.phase()
903 return self._changectx.phase()
904
904
905 def phasestr(self):
905 def phasestr(self):
906 return self._changectx.phasestr()
906 return self._changectx.phasestr()
907
907
908 def obsolete(self):
908 def obsolete(self):
909 return self._changectx.obsolete()
909 return self._changectx.obsolete()
910
910
911 def instabilities(self):
911 def instabilities(self):
912 return self._changectx.instabilities()
912 return self._changectx.instabilities()
913
913
914 def manifest(self):
914 def manifest(self):
915 return self._changectx.manifest()
915 return self._changectx.manifest()
916
916
917 def changectx(self):
917 def changectx(self):
918 return self._changectx
918 return self._changectx
919
919
920 def renamed(self):
920 def renamed(self):
921 return self._copied
921 return self._copied
922
922
923 def copysource(self):
923 def copysource(self):
924 return self._copied and self._copied[0]
924 return self._copied and self._copied[0]
925
925
926 def repo(self):
926 def repo(self):
927 return self._repo
927 return self._repo
928
928
929 def size(self):
929 def size(self):
930 return len(self.data())
930 return len(self.data())
931
931
932 def path(self):
932 def path(self):
933 return self._path
933 return self._path
934
934
935 def isbinary(self):
935 def isbinary(self):
936 try:
936 try:
937 return stringutil.binary(self.data())
937 return stringutil.binary(self.data())
938 except IOError:
938 except IOError:
939 return False
939 return False
940
940
941 def isexec(self):
941 def isexec(self):
942 return b'x' in self.flags()
942 return b'x' in self.flags()
943
943
944 def islink(self):
944 def islink(self):
945 return b'l' in self.flags()
945 return b'l' in self.flags()
946
946
947 def isabsent(self):
947 def isabsent(self):
948 """whether this filectx represents a file not in self._changectx
948 """whether this filectx represents a file not in self._changectx
949
949
950 This is mainly for merge code to detect change/delete conflicts. This is
950 This is mainly for merge code to detect change/delete conflicts. This is
951 expected to be True for all subclasses of basectx."""
951 expected to be True for all subclasses of basectx."""
952 return False
952 return False
953
953
954 _customcmp = False
954 _customcmp = False
955
955
956 def cmp(self, fctx):
956 def cmp(self, fctx):
957 """compare with other file context
957 """compare with other file context
958
958
959 returns True if different than fctx.
959 returns True if different than fctx.
960 """
960 """
961 if fctx._customcmp:
961 if fctx._customcmp:
962 return fctx.cmp(self)
962 return fctx.cmp(self)
963
963
964 if self._filenode is None:
964 if self._filenode is None:
965 raise error.ProgrammingError(
965 raise error.ProgrammingError(
966 b'filectx.cmp() must be reimplemented if not backed by revlog'
966 b'filectx.cmp() must be reimplemented if not backed by revlog'
967 )
967 )
968
968
969 if fctx._filenode is None:
969 if fctx._filenode is None:
970 if self._repo._encodefilterpats:
970 if self._repo._encodefilterpats:
971 # can't rely on size() because wdir content may be decoded
971 # can't rely on size() because wdir content may be decoded
972 return self._filelog.cmp(self._filenode, fctx.data())
972 return self._filelog.cmp(self._filenode, fctx.data())
973 if self.size() - 4 == fctx.size():
973 if self.size() - 4 == fctx.size():
974 # size() can match:
974 # size() can match:
975 # if file data starts with '\1\n', empty metadata block is
975 # if file data starts with '\1\n', empty metadata block is
976 # prepended, which adds 4 bytes to filelog.size().
976 # prepended, which adds 4 bytes to filelog.size().
977 return self._filelog.cmp(self._filenode, fctx.data())
977 return self._filelog.cmp(self._filenode, fctx.data())
978 if self.size() == fctx.size():
978 if self.size() == fctx.size():
979 # size() matches: need to compare content
979 # size() matches: need to compare content
980 return self._filelog.cmp(self._filenode, fctx.data())
980 return self._filelog.cmp(self._filenode, fctx.data())
981
981
982 # size() differs
982 # size() differs
983 return True
983 return True
984
984
985 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
985 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
986 """return the first ancestor of <srcrev> introducing <fnode>
986 """return the first ancestor of <srcrev> introducing <fnode>
987
987
988 If the linkrev of the file revision does not point to an ancestor of
988 If the linkrev of the file revision does not point to an ancestor of
989 srcrev, we'll walk down the ancestors until we find one introducing
989 srcrev, we'll walk down the ancestors until we find one introducing
990 this file revision.
990 this file revision.
991
991
992 :srcrev: the changeset revision we search ancestors from
992 :srcrev: the changeset revision we search ancestors from
993 :inclusive: if true, the src revision will also be checked
993 :inclusive: if true, the src revision will also be checked
994 :stoprev: an optional revision to stop the walk at. If no introduction
994 :stoprev: an optional revision to stop the walk at. If no introduction
995 of this file content could be found before this floor
995 of this file content could be found before this floor
996 revision, the function will returns "None" and stops its
996 revision, the function will returns "None" and stops its
997 iteration.
997 iteration.
998 """
998 """
999 repo = self._repo
999 repo = self._repo
1000 cl = repo.unfiltered().changelog
1000 cl = repo.unfiltered().changelog
1001 mfl = repo.manifestlog
1001 mfl = repo.manifestlog
1002 # fetch the linkrev
1002 # fetch the linkrev
1003 lkr = self.linkrev()
1003 lkr = self.linkrev()
1004 if srcrev == lkr:
1004 if srcrev == lkr:
1005 return lkr
1005 return lkr
1006 # hack to reuse ancestor computation when searching for renames
1006 # hack to reuse ancestor computation when searching for renames
1007 memberanc = getattr(self, '_ancestrycontext', None)
1007 memberanc = getattr(self, '_ancestrycontext', None)
1008 iteranc = None
1008 iteranc = None
1009 if srcrev is None:
1009 if srcrev is None:
1010 # wctx case, used by workingfilectx during mergecopy
1010 # wctx case, used by workingfilectx during mergecopy
1011 revs = [p.rev() for p in self._repo[None].parents()]
1011 revs = [p.rev() for p in self._repo[None].parents()]
1012 inclusive = True # we skipped the real (revless) source
1012 inclusive = True # we skipped the real (revless) source
1013 else:
1013 else:
1014 revs = [srcrev]
1014 revs = [srcrev]
1015 if memberanc is None:
1015 if memberanc is None:
1016 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1016 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1017 # check if this linkrev is an ancestor of srcrev
1017 # check if this linkrev is an ancestor of srcrev
1018 if lkr not in memberanc:
1018 if lkr not in memberanc:
1019 if iteranc is None:
1019 if iteranc is None:
1020 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1020 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1021 fnode = self._filenode
1021 fnode = self._filenode
1022 path = self._path
1022 path = self._path
1023 for a in iteranc:
1023 for a in iteranc:
1024 if stoprev is not None and a < stoprev:
1024 if stoprev is not None and a < stoprev:
1025 return None
1025 return None
1026 ac = cl.read(a) # get changeset data (we avoid object creation)
1026 ac = cl.read(a) # get changeset data (we avoid object creation)
1027 if path in ac[3]: # checking the 'files' field.
1027 if path in ac[3]: # checking the 'files' field.
1028 # The file has been touched, check if the content is
1028 # The file has been touched, check if the content is
1029 # similar to the one we search for.
1029 # similar to the one we search for.
1030 if fnode == mfl[ac[0]].readfast().get(path):
1030 if fnode == mfl[ac[0]].readfast().get(path):
1031 return a
1031 return a
1032 # In theory, we should never get out of that loop without a result.
1032 # In theory, we should never get out of that loop without a result.
1033 # But if manifest uses a buggy file revision (not children of the
1033 # But if manifest uses a buggy file revision (not children of the
1034 # one it replaces) we could. Such a buggy situation will likely
1034 # one it replaces) we could. Such a buggy situation will likely
1035 # result is crash somewhere else at to some point.
1035 # result is crash somewhere else at to some point.
1036 return lkr
1036 return lkr
1037
1037
1038 def isintroducedafter(self, changelogrev):
1038 def isintroducedafter(self, changelogrev):
1039 """True if a filectx has been introduced after a given floor revision
1039 """True if a filectx has been introduced after a given floor revision
1040 """
1040 """
1041 if self.linkrev() >= changelogrev:
1041 if self.linkrev() >= changelogrev:
1042 return True
1042 return True
1043 introrev = self._introrev(stoprev=changelogrev)
1043 introrev = self._introrev(stoprev=changelogrev)
1044 if introrev is None:
1044 if introrev is None:
1045 return False
1045 return False
1046 return introrev >= changelogrev
1046 return introrev >= changelogrev
1047
1047
1048 def introrev(self):
1048 def introrev(self):
1049 """return the rev of the changeset which introduced this file revision
1049 """return the rev of the changeset which introduced this file revision
1050
1050
1051 This method is different from linkrev because it take into account the
1051 This method is different from linkrev because it take into account the
1052 changeset the filectx was created from. It ensures the returned
1052 changeset the filectx was created from. It ensures the returned
1053 revision is one of its ancestors. This prevents bugs from
1053 revision is one of its ancestors. This prevents bugs from
1054 'linkrev-shadowing' when a file revision is used by multiple
1054 'linkrev-shadowing' when a file revision is used by multiple
1055 changesets.
1055 changesets.
1056 """
1056 """
1057 return self._introrev()
1057 return self._introrev()
1058
1058
1059 def _introrev(self, stoprev=None):
1059 def _introrev(self, stoprev=None):
1060 """
1060 """
1061 Same as `introrev` but, with an extra argument to limit changelog
1061 Same as `introrev` but, with an extra argument to limit changelog
1062 iteration range in some internal usecase.
1062 iteration range in some internal usecase.
1063
1063
1064 If `stoprev` is set, the `introrev` will not be searched past that
1064 If `stoprev` is set, the `introrev` will not be searched past that
1065 `stoprev` revision and "None" might be returned. This is useful to
1065 `stoprev` revision and "None" might be returned. This is useful to
1066 limit the iteration range.
1066 limit the iteration range.
1067 """
1067 """
1068 toprev = None
1068 toprev = None
1069 attrs = vars(self)
1069 attrs = vars(self)
1070 if '_changeid' in attrs:
1070 if '_changeid' in attrs:
1071 # We have a cached value already
1071 # We have a cached value already
1072 toprev = self._changeid
1072 toprev = self._changeid
1073 elif '_changectx' in attrs:
1073 elif '_changectx' in attrs:
1074 # We know which changelog entry we are coming from
1074 # We know which changelog entry we are coming from
1075 toprev = self._changectx.rev()
1075 toprev = self._changectx.rev()
1076
1076
1077 if toprev is not None:
1077 if toprev is not None:
1078 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1078 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1079 elif '_descendantrev' in attrs:
1079 elif '_descendantrev' in attrs:
1080 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1080 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1081 # be nice and cache the result of the computation
1081 # be nice and cache the result of the computation
1082 if introrev is not None:
1082 if introrev is not None:
1083 self._changeid = introrev
1083 self._changeid = introrev
1084 return introrev
1084 return introrev
1085 else:
1085 else:
1086 return self.linkrev()
1086 return self.linkrev()
1087
1087
1088 def introfilectx(self):
1088 def introfilectx(self):
1089 """Return filectx having identical contents, but pointing to the
1089 """Return filectx having identical contents, but pointing to the
1090 changeset revision where this filectx was introduced"""
1090 changeset revision where this filectx was introduced"""
1091 introrev = self.introrev()
1091 introrev = self.introrev()
1092 if self.rev() == introrev:
1092 if self.rev() == introrev:
1093 return self
1093 return self
1094 return self.filectx(self.filenode(), changeid=introrev)
1094 return self.filectx(self.filenode(), changeid=introrev)
1095
1095
1096 def _parentfilectx(self, path, fileid, filelog):
1096 def _parentfilectx(self, path, fileid, filelog):
1097 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1097 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1098 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1098 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1099 if '_changeid' in vars(self) or '_changectx' in vars(self):
1099 if '_changeid' in vars(self) or '_changectx' in vars(self):
1100 # If self is associated with a changeset (probably explicitly
1100 # If self is associated with a changeset (probably explicitly
1101 # fed), ensure the created filectx is associated with a
1101 # fed), ensure the created filectx is associated with a
1102 # changeset that is an ancestor of self.changectx.
1102 # changeset that is an ancestor of self.changectx.
1103 # This lets us later use _adjustlinkrev to get a correct link.
1103 # This lets us later use _adjustlinkrev to get a correct link.
1104 fctx._descendantrev = self.rev()
1104 fctx._descendantrev = self.rev()
1105 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1105 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1106 elif '_descendantrev' in vars(self):
1106 elif '_descendantrev' in vars(self):
1107 # Otherwise propagate _descendantrev if we have one associated.
1107 # Otherwise propagate _descendantrev if we have one associated.
1108 fctx._descendantrev = self._descendantrev
1108 fctx._descendantrev = self._descendantrev
1109 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1109 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1110 return fctx
1110 return fctx
1111
1111
1112 def parents(self):
1112 def parents(self):
1113 _path = self._path
1113 _path = self._path
1114 fl = self._filelog
1114 fl = self._filelog
1115 parents = self._filelog.parents(self._filenode)
1115 parents = self._filelog.parents(self._filenode)
1116 pl = [(_path, node, fl) for node in parents if node != nullid]
1116 pl = [(_path, node, fl) for node in parents if node != nullid]
1117
1117
1118 r = fl.renamed(self._filenode)
1118 r = fl.renamed(self._filenode)
1119 if r:
1119 if r:
1120 # - In the simple rename case, both parent are nullid, pl is empty.
1120 # - In the simple rename case, both parent are nullid, pl is empty.
1121 # - In case of merge, only one of the parent is null id and should
1121 # - In case of merge, only one of the parent is null id and should
1122 # be replaced with the rename information. This parent is -always-
1122 # be replaced with the rename information. This parent is -always-
1123 # the first one.
1123 # the first one.
1124 #
1124 #
1125 # As null id have always been filtered out in the previous list
1125 # As null id have always been filtered out in the previous list
1126 # comprehension, inserting to 0 will always result in "replacing
1126 # comprehension, inserting to 0 will always result in "replacing
1127 # first nullid parent with rename information.
1127 # first nullid parent with rename information.
1128 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1128 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1129
1129
1130 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1130 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1131
1131
1132 def p1(self):
1132 def p1(self):
1133 return self.parents()[0]
1133 return self.parents()[0]
1134
1134
1135 def p2(self):
1135 def p2(self):
1136 p = self.parents()
1136 p = self.parents()
1137 if len(p) == 2:
1137 if len(p) == 2:
1138 return p[1]
1138 return p[1]
1139 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1139 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1140
1140
1141 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1141 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1142 """Returns a list of annotateline objects for each line in the file
1142 """Returns a list of annotateline objects for each line in the file
1143
1143
1144 - line.fctx is the filectx of the node where that line was last changed
1144 - line.fctx is the filectx of the node where that line was last changed
1145 - line.lineno is the line number at the first appearance in the managed
1145 - line.lineno is the line number at the first appearance in the managed
1146 file
1146 file
1147 - line.text is the data on that line (including newline character)
1147 - line.text is the data on that line (including newline character)
1148 """
1148 """
1149 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1149 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1150
1150
1151 def parents(f):
1151 def parents(f):
1152 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1152 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1153 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1153 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1154 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1154 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1155 # isn't an ancestor of the srcrev.
1155 # isn't an ancestor of the srcrev.
1156 f._changeid
1156 f._changeid
1157 pl = f.parents()
1157 pl = f.parents()
1158
1158
1159 # Don't return renamed parents if we aren't following.
1159 # Don't return renamed parents if we aren't following.
1160 if not follow:
1160 if not follow:
1161 pl = [p for p in pl if p.path() == f.path()]
1161 pl = [p for p in pl if p.path() == f.path()]
1162
1162
1163 # renamed filectx won't have a filelog yet, so set it
1163 # renamed filectx won't have a filelog yet, so set it
1164 # from the cache to save time
1164 # from the cache to save time
1165 for p in pl:
1165 for p in pl:
1166 if not '_filelog' in p.__dict__:
1166 if not '_filelog' in p.__dict__:
1167 p._filelog = getlog(p.path())
1167 p._filelog = getlog(p.path())
1168
1168
1169 return pl
1169 return pl
1170
1170
1171 # use linkrev to find the first changeset where self appeared
1171 # use linkrev to find the first changeset where self appeared
1172 base = self.introfilectx()
1172 base = self.introfilectx()
1173 if getattr(base, '_ancestrycontext', None) is None:
1173 if getattr(base, '_ancestrycontext', None) is None:
1174 # it is safe to use an unfiltered repository here because we are
1174 # it is safe to use an unfiltered repository here because we are
1175 # walking ancestors only.
1175 # walking ancestors only.
1176 cl = self._repo.unfiltered().changelog
1176 cl = self._repo.unfiltered().changelog
1177 if base.rev() is None:
1177 if base.rev() is None:
1178 # wctx is not inclusive, but works because _ancestrycontext
1178 # wctx is not inclusive, but works because _ancestrycontext
1179 # is used to test filelog revisions
1179 # is used to test filelog revisions
1180 ac = cl.ancestors(
1180 ac = cl.ancestors(
1181 [p.rev() for p in base.parents()], inclusive=True
1181 [p.rev() for p in base.parents()], inclusive=True
1182 )
1182 )
1183 else:
1183 else:
1184 ac = cl.ancestors([base.rev()], inclusive=True)
1184 ac = cl.ancestors([base.rev()], inclusive=True)
1185 base._ancestrycontext = ac
1185 base._ancestrycontext = ac
1186
1186
1187 return dagop.annotate(
1187 return dagop.annotate(
1188 base, parents, skiprevs=skiprevs, diffopts=diffopts
1188 base, parents, skiprevs=skiprevs, diffopts=diffopts
1189 )
1189 )
1190
1190
1191 def ancestors(self, followfirst=False):
1191 def ancestors(self, followfirst=False):
1192 visit = {}
1192 visit = {}
1193 c = self
1193 c = self
1194 if followfirst:
1194 if followfirst:
1195 cut = 1
1195 cut = 1
1196 else:
1196 else:
1197 cut = None
1197 cut = None
1198
1198
1199 while True:
1199 while True:
1200 for parent in c.parents()[:cut]:
1200 for parent in c.parents()[:cut]:
1201 visit[(parent.linkrev(), parent.filenode())] = parent
1201 visit[(parent.linkrev(), parent.filenode())] = parent
1202 if not visit:
1202 if not visit:
1203 break
1203 break
1204 c = visit.pop(max(visit))
1204 c = visit.pop(max(visit))
1205 yield c
1205 yield c
1206
1206
1207 def decodeddata(self):
1207 def decodeddata(self):
1208 """Returns `data()` after running repository decoding filters.
1208 """Returns `data()` after running repository decoding filters.
1209
1209
1210 This is often equivalent to how the data would be expressed on disk.
1210 This is often equivalent to how the data would be expressed on disk.
1211 """
1211 """
1212 return self._repo.wwritedata(self.path(), self.data())
1212 return self._repo.wwritedata(self.path(), self.data())
1213
1213
1214
1214
1215 class filectx(basefilectx):
1215 class filectx(basefilectx):
1216 """A filecontext object makes access to data related to a particular
1216 """A filecontext object makes access to data related to a particular
1217 filerevision convenient."""
1217 filerevision convenient."""
1218
1218
1219 def __init__(
1219 def __init__(
1220 self,
1220 self,
1221 repo,
1221 repo,
1222 path,
1222 path,
1223 changeid=None,
1223 changeid=None,
1224 fileid=None,
1224 fileid=None,
1225 filelog=None,
1225 filelog=None,
1226 changectx=None,
1226 changectx=None,
1227 ):
1227 ):
1228 """changeid must be a revision number, if specified.
1228 """changeid must be a revision number, if specified.
1229 fileid can be a file revision or node."""
1229 fileid can be a file revision or node."""
1230 self._repo = repo
1230 self._repo = repo
1231 self._path = path
1231 self._path = path
1232
1232
1233 assert (
1233 assert (
1234 changeid is not None or fileid is not None or changectx is not None
1234 changeid is not None or fileid is not None or changectx is not None
1235 ), (
1235 ), (
1236 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1236 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1237 % (changeid, fileid, changectx,)
1237 % (changeid, fileid, changectx,)
1238 )
1238 )
1239
1239
1240 if filelog is not None:
1240 if filelog is not None:
1241 self._filelog = filelog
1241 self._filelog = filelog
1242
1242
1243 if changeid is not None:
1243 if changeid is not None:
1244 self._changeid = changeid
1244 self._changeid = changeid
1245 if changectx is not None:
1245 if changectx is not None:
1246 self._changectx = changectx
1246 self._changectx = changectx
1247 if fileid is not None:
1247 if fileid is not None:
1248 self._fileid = fileid
1248 self._fileid = fileid
1249
1249
1250 @propertycache
1250 @propertycache
1251 def _changectx(self):
1251 def _changectx(self):
1252 try:
1252 try:
1253 return self._repo[self._changeid]
1253 return self._repo[self._changeid]
1254 except error.FilteredRepoLookupError:
1254 except error.FilteredRepoLookupError:
1255 # Linkrev may point to any revision in the repository. When the
1255 # Linkrev may point to any revision in the repository. When the
1256 # repository is filtered this may lead to `filectx` trying to build
1256 # repository is filtered this may lead to `filectx` trying to build
1257 # `changectx` for filtered revision. In such case we fallback to
1257 # `changectx` for filtered revision. In such case we fallback to
1258 # creating `changectx` on the unfiltered version of the reposition.
1258 # creating `changectx` on the unfiltered version of the reposition.
1259 # This fallback should not be an issue because `changectx` from
1259 # This fallback should not be an issue because `changectx` from
1260 # `filectx` are not used in complex operations that care about
1260 # `filectx` are not used in complex operations that care about
1261 # filtering.
1261 # filtering.
1262 #
1262 #
1263 # This fallback is a cheap and dirty fix that prevent several
1263 # This fallback is a cheap and dirty fix that prevent several
1264 # crashes. It does not ensure the behavior is correct. However the
1264 # crashes. It does not ensure the behavior is correct. However the
1265 # behavior was not correct before filtering either and "incorrect
1265 # behavior was not correct before filtering either and "incorrect
1266 # behavior" is seen as better as "crash"
1266 # behavior" is seen as better as "crash"
1267 #
1267 #
1268 # Linkrevs have several serious troubles with filtering that are
1268 # Linkrevs have several serious troubles with filtering that are
1269 # complicated to solve. Proper handling of the issue here should be
1269 # complicated to solve. Proper handling of the issue here should be
1270 # considered when solving linkrev issue are on the table.
1270 # considered when solving linkrev issue are on the table.
1271 return self._repo.unfiltered()[self._changeid]
1271 return self._repo.unfiltered()[self._changeid]
1272
1272
1273 def filectx(self, fileid, changeid=None):
1273 def filectx(self, fileid, changeid=None):
1274 '''opens an arbitrary revision of the file without
1274 '''opens an arbitrary revision of the file without
1275 opening a new filelog'''
1275 opening a new filelog'''
1276 return filectx(
1276 return filectx(
1277 self._repo,
1277 self._repo,
1278 self._path,
1278 self._path,
1279 fileid=fileid,
1279 fileid=fileid,
1280 filelog=self._filelog,
1280 filelog=self._filelog,
1281 changeid=changeid,
1281 changeid=changeid,
1282 )
1282 )
1283
1283
1284 def rawdata(self):
1284 def rawdata(self):
1285 return self._filelog.rawdata(self._filenode)
1285 return self._filelog.rawdata(self._filenode)
1286
1286
1287 def rawflags(self):
1287 def rawflags(self):
1288 """low-level revlog flags"""
1288 """low-level revlog flags"""
1289 return self._filelog.flags(self._filerev)
1289 return self._filelog.flags(self._filerev)
1290
1290
1291 def data(self):
1291 def data(self):
1292 try:
1292 try:
1293 return self._filelog.read(self._filenode)
1293 return self._filelog.read(self._filenode)
1294 except error.CensoredNodeError:
1294 except error.CensoredNodeError:
1295 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1295 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1296 return b""
1296 return b""
1297 raise error.Abort(
1297 raise error.Abort(
1298 _(b"censored node: %s") % short(self._filenode),
1298 _(b"censored node: %s") % short(self._filenode),
1299 hint=_(b"set censor.policy to ignore errors"),
1299 hint=_(b"set censor.policy to ignore errors"),
1300 )
1300 )
1301
1301
1302 def size(self):
1302 def size(self):
1303 return self._filelog.size(self._filerev)
1303 return self._filelog.size(self._filerev)
1304
1304
1305 @propertycache
1305 @propertycache
1306 def _copied(self):
1306 def _copied(self):
1307 """check if file was actually renamed in this changeset revision
1307 """check if file was actually renamed in this changeset revision
1308
1308
1309 If rename logged in file revision, we report copy for changeset only
1309 If rename logged in file revision, we report copy for changeset only
1310 if file revisions linkrev points back to the changeset in question
1310 if file revisions linkrev points back to the changeset in question
1311 or both changeset parents contain different file revisions.
1311 or both changeset parents contain different file revisions.
1312 """
1312 """
1313
1313
1314 renamed = self._filelog.renamed(self._filenode)
1314 renamed = self._filelog.renamed(self._filenode)
1315 if not renamed:
1315 if not renamed:
1316 return None
1316 return None
1317
1317
1318 if self.rev() == self.linkrev():
1318 if self.rev() == self.linkrev():
1319 return renamed
1319 return renamed
1320
1320
1321 name = self.path()
1321 name = self.path()
1322 fnode = self._filenode
1322 fnode = self._filenode
1323 for p in self._changectx.parents():
1323 for p in self._changectx.parents():
1324 try:
1324 try:
1325 if fnode == p.filenode(name):
1325 if fnode == p.filenode(name):
1326 return None
1326 return None
1327 except error.LookupError:
1327 except error.LookupError:
1328 pass
1328 pass
1329 return renamed
1329 return renamed
1330
1330
1331 def children(self):
1331 def children(self):
1332 # hard for renames
1332 # hard for renames
1333 c = self._filelog.children(self._filenode)
1333 c = self._filelog.children(self._filenode)
1334 return [
1334 return [
1335 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1335 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1336 for x in c
1336 for x in c
1337 ]
1337 ]
1338
1338
1339
1339
1340 class committablectx(basectx):
1340 class committablectx(basectx):
1341 """A committablectx object provides common functionality for a context that
1341 """A committablectx object provides common functionality for a context that
1342 wants the ability to commit, e.g. workingctx or memctx."""
1342 wants the ability to commit, e.g. workingctx or memctx."""
1343
1343
1344 def __init__(
1344 def __init__(
1345 self,
1345 self,
1346 repo,
1346 repo,
1347 text=b"",
1347 text=b"",
1348 user=None,
1348 user=None,
1349 date=None,
1349 date=None,
1350 extra=None,
1350 extra=None,
1351 changes=None,
1351 changes=None,
1352 branch=None,
1352 branch=None,
1353 ):
1353 ):
1354 super(committablectx, self).__init__(repo)
1354 super(committablectx, self).__init__(repo)
1355 self._rev = None
1355 self._rev = None
1356 self._node = None
1356 self._node = None
1357 self._text = text
1357 self._text = text
1358 if date:
1358 if date:
1359 self._date = dateutil.parsedate(date)
1359 self._date = dateutil.parsedate(date)
1360 if user:
1360 if user:
1361 self._user = user
1361 self._user = user
1362 if changes:
1362 if changes:
1363 self._status = changes
1363 self._status = changes
1364
1364
1365 self._extra = {}
1365 self._extra = {}
1366 if extra:
1366 if extra:
1367 self._extra = extra.copy()
1367 self._extra = extra.copy()
1368 if branch is not None:
1368 if branch is not None:
1369 self._extra[b'branch'] = encoding.fromlocal(branch)
1369 self._extra[b'branch'] = encoding.fromlocal(branch)
1370 if not self._extra.get(b'branch'):
1370 if not self._extra.get(b'branch'):
1371 self._extra[b'branch'] = b'default'
1371 self._extra[b'branch'] = b'default'
1372
1372
1373 def __bytes__(self):
1373 def __bytes__(self):
1374 return bytes(self._parents[0]) + b"+"
1374 return bytes(self._parents[0]) + b"+"
1375
1375
1376 __str__ = encoding.strmethod(__bytes__)
1376 __str__ = encoding.strmethod(__bytes__)
1377
1377
1378 def __nonzero__(self):
1378 def __nonzero__(self):
1379 return True
1379 return True
1380
1380
1381 __bool__ = __nonzero__
1381 __bool__ = __nonzero__
1382
1382
1383 @propertycache
1383 @propertycache
1384 def _status(self):
1384 def _status(self):
1385 return self._repo.status()
1385 return self._repo.status()
1386
1386
1387 @propertycache
1387 @propertycache
1388 def _user(self):
1388 def _user(self):
1389 return self._repo.ui.username()
1389 return self._repo.ui.username()
1390
1390
1391 @propertycache
1391 @propertycache
1392 def _date(self):
1392 def _date(self):
1393 ui = self._repo.ui
1393 ui = self._repo.ui
1394 date = ui.configdate(b'devel', b'default-date')
1394 date = ui.configdate(b'devel', b'default-date')
1395 if date is None:
1395 if date is None:
1396 date = dateutil.makedate()
1396 date = dateutil.makedate()
1397 return date
1397 return date
1398
1398
1399 def subrev(self, subpath):
1399 def subrev(self, subpath):
1400 return None
1400 return None
1401
1401
1402 def manifestnode(self):
1402 def manifestnode(self):
1403 return None
1403 return None
1404
1404
1405 def user(self):
1405 def user(self):
1406 return self._user or self._repo.ui.username()
1406 return self._user or self._repo.ui.username()
1407
1407
1408 def date(self):
1408 def date(self):
1409 return self._date
1409 return self._date
1410
1410
1411 def description(self):
1411 def description(self):
1412 return self._text
1412 return self._text
1413
1413
1414 def files(self):
1414 def files(self):
1415 return sorted(
1415 return sorted(
1416 self._status.modified + self._status.added + self._status.removed
1416 self._status.modified + self._status.added + self._status.removed
1417 )
1417 )
1418
1418
1419 def modified(self):
1419 def modified(self):
1420 return self._status.modified
1420 return self._status.modified
1421
1421
1422 def added(self):
1422 def added(self):
1423 return self._status.added
1423 return self._status.added
1424
1424
1425 def removed(self):
1425 def removed(self):
1426 return self._status.removed
1426 return self._status.removed
1427
1427
1428 def deleted(self):
1428 def deleted(self):
1429 return self._status.deleted
1429 return self._status.deleted
1430
1430
1431 filesmodified = modified
1431 filesmodified = modified
1432 filesadded = added
1432 filesadded = added
1433 filesremoved = removed
1433 filesremoved = removed
1434
1434
1435 def branch(self):
1435 def branch(self):
1436 return encoding.tolocal(self._extra[b'branch'])
1436 return encoding.tolocal(self._extra[b'branch'])
1437
1437
1438 def closesbranch(self):
1438 def closesbranch(self):
1439 return b'close' in self._extra
1439 return b'close' in self._extra
1440
1440
1441 def extra(self):
1441 def extra(self):
1442 return self._extra
1442 return self._extra
1443
1443
1444 def isinmemory(self):
1444 def isinmemory(self):
1445 return False
1445 return False
1446
1446
1447 def tags(self):
1447 def tags(self):
1448 return []
1448 return []
1449
1449
1450 def bookmarks(self):
1450 def bookmarks(self):
1451 b = []
1451 b = []
1452 for p in self.parents():
1452 for p in self.parents():
1453 b.extend(p.bookmarks())
1453 b.extend(p.bookmarks())
1454 return b
1454 return b
1455
1455
1456 def phase(self):
1456 def phase(self):
1457 phase = phases.newcommitphase(self._repo.ui)
1457 phase = phases.newcommitphase(self._repo.ui)
1458 for p in self.parents():
1458 for p in self.parents():
1459 phase = max(phase, p.phase())
1459 phase = max(phase, p.phase())
1460 return phase
1460 return phase
1461
1461
1462 def hidden(self):
1462 def hidden(self):
1463 return False
1463 return False
1464
1464
1465 def children(self):
1465 def children(self):
1466 return []
1466 return []
1467
1467
1468 def flags(self, path):
1468 def flags(self, path):
1469 if '_manifest' in self.__dict__:
1469 if '_manifest' in self.__dict__:
1470 try:
1470 try:
1471 return self._manifest.flags(path)
1471 return self._manifest.flags(path)
1472 except KeyError:
1472 except KeyError:
1473 return b''
1473 return b''
1474
1474
1475 try:
1475 try:
1476 return self._flagfunc(path)
1476 return self._flagfunc(path)
1477 except OSError:
1477 except OSError:
1478 return b''
1478 return b''
1479
1479
1480 def ancestor(self, c2):
1480 def ancestor(self, c2):
1481 """return the "best" ancestor context of self and c2"""
1481 """return the "best" ancestor context of self and c2"""
1482 return self._parents[0].ancestor(c2) # punt on two parents for now
1482 return self._parents[0].ancestor(c2) # punt on two parents for now
1483
1483
1484 def ancestors(self):
1484 def ancestors(self):
1485 for p in self._parents:
1485 for p in self._parents:
1486 yield p
1486 yield p
1487 for a in self._repo.changelog.ancestors(
1487 for a in self._repo.changelog.ancestors(
1488 [p.rev() for p in self._parents]
1488 [p.rev() for p in self._parents]
1489 ):
1489 ):
1490 yield self._repo[a]
1490 yield self._repo[a]
1491
1491
1492 def markcommitted(self, node):
1492 def markcommitted(self, node):
1493 """Perform post-commit cleanup necessary after committing this ctx
1493 """Perform post-commit cleanup necessary after committing this ctx
1494
1494
1495 Specifically, this updates backing stores this working context
1495 Specifically, this updates backing stores this working context
1496 wraps to reflect the fact that the changes reflected by this
1496 wraps to reflect the fact that the changes reflected by this
1497 workingctx have been committed. For example, it marks
1497 workingctx have been committed. For example, it marks
1498 modified and added files as normal in the dirstate.
1498 modified and added files as normal in the dirstate.
1499
1499
1500 """
1500 """
1501
1501
1502 def dirty(self, missing=False, merge=True, branch=True):
1502 def dirty(self, missing=False, merge=True, branch=True):
1503 return False
1503 return False
1504
1504
1505
1505
1506 class workingctx(committablectx):
1506 class workingctx(committablectx):
1507 """A workingctx object makes access to data related to
1507 """A workingctx object makes access to data related to
1508 the current working directory convenient.
1508 the current working directory convenient.
1509 date - any valid date string or (unixtime, offset), or None.
1509 date - any valid date string or (unixtime, offset), or None.
1510 user - username string, or None.
1510 user - username string, or None.
1511 extra - a dictionary of extra values, or None.
1511 extra - a dictionary of extra values, or None.
1512 changes - a list of file lists as returned by localrepo.status()
1512 changes - a list of file lists as returned by localrepo.status()
1513 or None to use the repository status.
1513 or None to use the repository status.
1514 """
1514 """
1515
1515
1516 def __init__(
1516 def __init__(
1517 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1517 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1518 ):
1518 ):
1519 branch = None
1519 branch = None
1520 if not extra or b'branch' not in extra:
1520 if not extra or b'branch' not in extra:
1521 try:
1521 try:
1522 branch = repo.dirstate.branch()
1522 branch = repo.dirstate.branch()
1523 except UnicodeDecodeError:
1523 except UnicodeDecodeError:
1524 raise error.Abort(_(b'branch name not in UTF-8!'))
1524 raise error.Abort(_(b'branch name not in UTF-8!'))
1525 super(workingctx, self).__init__(
1525 super(workingctx, self).__init__(
1526 repo, text, user, date, extra, changes, branch=branch
1526 repo, text, user, date, extra, changes, branch=branch
1527 )
1527 )
1528
1528
1529 def __iter__(self):
1529 def __iter__(self):
1530 d = self._repo.dirstate
1530 d = self._repo.dirstate
1531 for f in d:
1531 for f in d:
1532 if d[f] != b'r':
1532 if d[f] != b'r':
1533 yield f
1533 yield f
1534
1534
1535 def __contains__(self, key):
1535 def __contains__(self, key):
1536 return self._repo.dirstate[key] not in b"?r"
1536 return self._repo.dirstate[key] not in b"?r"
1537
1537
1538 def hex(self):
1538 def hex(self):
1539 return wdirhex
1539 return wdirhex
1540
1540
1541 @propertycache
1541 @propertycache
1542 def _parents(self):
1542 def _parents(self):
1543 p = self._repo.dirstate.parents()
1543 p = self._repo.dirstate.parents()
1544 if p[1] == nullid:
1544 if p[1] == nullid:
1545 p = p[:-1]
1545 p = p[:-1]
1546 # use unfiltered repo to delay/avoid loading obsmarkers
1546 # use unfiltered repo to delay/avoid loading obsmarkers
1547 unfi = self._repo.unfiltered()
1547 unfi = self._repo.unfiltered()
1548 return [
1548 return [
1549 changectx(
1549 changectx(
1550 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1550 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1551 )
1551 )
1552 for n in p
1552 for n in p
1553 ]
1553 ]
1554
1554
1555 def setparents(self, p1node, p2node=nullid):
1555 def setparents(self, p1node, p2node=nullid):
1556 dirstate = self._repo.dirstate
1556 dirstate = self._repo.dirstate
1557 with dirstate.parentchange():
1557 with dirstate.parentchange():
1558 copies = dirstate.setparents(p1node, p2node)
1558 copies = dirstate.setparents(p1node, p2node)
1559 pctx = self._repo[p1node]
1559 pctx = self._repo[p1node]
1560 if copies:
1560 if copies:
1561 # Adjust copy records, the dirstate cannot do it, it
1561 # Adjust copy records, the dirstate cannot do it, it
1562 # requires access to parents manifests. Preserve them
1562 # requires access to parents manifests. Preserve them
1563 # only for entries added to first parent.
1563 # only for entries added to first parent.
1564 for f in copies:
1564 for f in copies:
1565 if f not in pctx and copies[f] in pctx:
1565 if f not in pctx and copies[f] in pctx:
1566 dirstate.copy(copies[f], f)
1566 dirstate.copy(copies[f], f)
1567 if p2node == nullid:
1567 if p2node == nullid:
1568 for f, s in sorted(dirstate.copies().items()):
1568 for f, s in sorted(dirstate.copies().items()):
1569 if f not in pctx and s not in pctx:
1569 if f not in pctx and s not in pctx:
1570 dirstate.copy(None, f)
1570 dirstate.copy(None, f)
1571
1571
1572 def _fileinfo(self, path):
1572 def _fileinfo(self, path):
1573 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1573 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1574 self._manifest
1574 self._manifest
1575 return super(workingctx, self)._fileinfo(path)
1575 return super(workingctx, self)._fileinfo(path)
1576
1576
1577 def _buildflagfunc(self):
1577 def _buildflagfunc(self):
1578 # Create a fallback function for getting file flags when the
1578 # Create a fallback function for getting file flags when the
1579 # filesystem doesn't support them
1579 # filesystem doesn't support them
1580
1580
1581 copiesget = self._repo.dirstate.copies().get
1581 copiesget = self._repo.dirstate.copies().get
1582 parents = self.parents()
1582 parents = self.parents()
1583 if len(parents) < 2:
1583 if len(parents) < 2:
1584 # when we have one parent, it's easy: copy from parent
1584 # when we have one parent, it's easy: copy from parent
1585 man = parents[0].manifest()
1585 man = parents[0].manifest()
1586
1586
1587 def func(f):
1587 def func(f):
1588 f = copiesget(f, f)
1588 f = copiesget(f, f)
1589 return man.flags(f)
1589 return man.flags(f)
1590
1590
1591 else:
1591 else:
1592 # merges are tricky: we try to reconstruct the unstored
1592 # merges are tricky: we try to reconstruct the unstored
1593 # result from the merge (issue1802)
1593 # result from the merge (issue1802)
1594 p1, p2 = parents
1594 p1, p2 = parents
1595 pa = p1.ancestor(p2)
1595 pa = p1.ancestor(p2)
1596 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1596 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1597
1597
1598 def func(f):
1598 def func(f):
1599 f = copiesget(f, f) # may be wrong for merges with copies
1599 f = copiesget(f, f) # may be wrong for merges with copies
1600 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1600 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1601 if fl1 == fl2:
1601 if fl1 == fl2:
1602 return fl1
1602 return fl1
1603 if fl1 == fla:
1603 if fl1 == fla:
1604 return fl2
1604 return fl2
1605 if fl2 == fla:
1605 if fl2 == fla:
1606 return fl1
1606 return fl1
1607 return b'' # punt for conflicts
1607 return b'' # punt for conflicts
1608
1608
1609 return func
1609 return func
1610
1610
1611 @propertycache
1611 @propertycache
1612 def _flagfunc(self):
1612 def _flagfunc(self):
1613 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1613 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1614
1614
1615 def flags(self, path):
1615 def flags(self, path):
1616 try:
1616 try:
1617 return self._flagfunc(path)
1617 return self._flagfunc(path)
1618 except OSError:
1618 except OSError:
1619 return b''
1619 return b''
1620
1620
1621 def filectx(self, path, filelog=None):
1621 def filectx(self, path, filelog=None):
1622 """get a file context from the working directory"""
1622 """get a file context from the working directory"""
1623 return workingfilectx(
1623 return workingfilectx(
1624 self._repo, path, workingctx=self, filelog=filelog
1624 self._repo, path, workingctx=self, filelog=filelog
1625 )
1625 )
1626
1626
1627 def dirty(self, missing=False, merge=True, branch=True):
1627 def dirty(self, missing=False, merge=True, branch=True):
1628 """check whether a working directory is modified"""
1628 """check whether a working directory is modified"""
1629 # check subrepos first
1629 # check subrepos first
1630 for s in sorted(self.substate):
1630 for s in sorted(self.substate):
1631 if self.sub(s).dirty(missing=missing):
1631 if self.sub(s).dirty(missing=missing):
1632 return True
1632 return True
1633 # check current working dir
1633 # check current working dir
1634 return (
1634 return (
1635 (merge and self.p2())
1635 (merge and self.p2())
1636 or (branch and self.branch() != self.p1().branch())
1636 or (branch and self.branch() != self.p1().branch())
1637 or self.modified()
1637 or self.modified()
1638 or self.added()
1638 or self.added()
1639 or self.removed()
1639 or self.removed()
1640 or (missing and self.deleted())
1640 or (missing and self.deleted())
1641 )
1641 )
1642
1642
1643 def add(self, list, prefix=b""):
1643 def add(self, list, prefix=b""):
1644 with self._repo.wlock():
1644 with self._repo.wlock():
1645 ui, ds = self._repo.ui, self._repo.dirstate
1645 ui, ds = self._repo.ui, self._repo.dirstate
1646 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1646 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1647 rejected = []
1647 rejected = []
1648 lstat = self._repo.wvfs.lstat
1648 lstat = self._repo.wvfs.lstat
1649 for f in list:
1649 for f in list:
1650 # ds.pathto() returns an absolute file when this is invoked from
1650 # ds.pathto() returns an absolute file when this is invoked from
1651 # the keyword extension. That gets flagged as non-portable on
1651 # the keyword extension. That gets flagged as non-portable on
1652 # Windows, since it contains the drive letter and colon.
1652 # Windows, since it contains the drive letter and colon.
1653 scmutil.checkportable(ui, os.path.join(prefix, f))
1653 scmutil.checkportable(ui, os.path.join(prefix, f))
1654 try:
1654 try:
1655 st = lstat(f)
1655 st = lstat(f)
1656 except OSError:
1656 except OSError:
1657 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1657 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1658 rejected.append(f)
1658 rejected.append(f)
1659 continue
1659 continue
1660 limit = ui.configbytes(b'ui', b'large-file-limit')
1660 limit = ui.configbytes(b'ui', b'large-file-limit')
1661 if limit != 0 and st.st_size > limit:
1661 if limit != 0 and st.st_size > limit:
1662 ui.warn(
1662 ui.warn(
1663 _(
1663 _(
1664 b"%s: up to %d MB of RAM may be required "
1664 b"%s: up to %d MB of RAM may be required "
1665 b"to manage this file\n"
1665 b"to manage this file\n"
1666 b"(use 'hg revert %s' to cancel the "
1666 b"(use 'hg revert %s' to cancel the "
1667 b"pending addition)\n"
1667 b"pending addition)\n"
1668 )
1668 )
1669 % (f, 3 * st.st_size // 1000000, uipath(f))
1669 % (f, 3 * st.st_size // 1000000, uipath(f))
1670 )
1670 )
1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1672 ui.warn(
1672 ui.warn(
1673 _(
1673 _(
1674 b"%s not added: only files and symlinks "
1674 b"%s not added: only files and symlinks "
1675 b"supported currently\n"
1675 b"supported currently\n"
1676 )
1676 )
1677 % uipath(f)
1677 % uipath(f)
1678 )
1678 )
1679 rejected.append(f)
1679 rejected.append(f)
1680 elif ds[f] in b'amn':
1680 elif ds[f] in b'amn':
1681 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1681 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1682 elif ds[f] == b'r':
1682 elif ds[f] == b'r':
1683 ds.normallookup(f)
1683 ds.normallookup(f)
1684 else:
1684 else:
1685 ds.add(f)
1685 ds.add(f)
1686 return rejected
1686 return rejected
1687
1687
1688 def forget(self, files, prefix=b""):
1688 def forget(self, files, prefix=b""):
1689 with self._repo.wlock():
1689 with self._repo.wlock():
1690 ds = self._repo.dirstate
1690 ds = self._repo.dirstate
1691 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1691 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1692 rejected = []
1692 rejected = []
1693 for f in files:
1693 for f in files:
1694 if f not in ds:
1694 if f not in ds:
1695 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1695 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1696 rejected.append(f)
1696 rejected.append(f)
1697 elif ds[f] != b'a':
1697 elif ds[f] != b'a':
1698 ds.remove(f)
1698 ds.remove(f)
1699 else:
1699 else:
1700 ds.drop(f)
1700 ds.drop(f)
1701 return rejected
1701 return rejected
1702
1702
1703 def copy(self, source, dest):
1703 def copy(self, source, dest):
1704 try:
1704 try:
1705 st = self._repo.wvfs.lstat(dest)
1705 st = self._repo.wvfs.lstat(dest)
1706 except OSError as err:
1706 except OSError as err:
1707 if err.errno != errno.ENOENT:
1707 if err.errno != errno.ENOENT:
1708 raise
1708 raise
1709 self._repo.ui.warn(
1709 self._repo.ui.warn(
1710 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1710 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1711 )
1711 )
1712 return
1712 return
1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1714 self._repo.ui.warn(
1714 self._repo.ui.warn(
1715 _(b"copy failed: %s is not a file or a symbolic link\n")
1715 _(b"copy failed: %s is not a file or a symbolic link\n")
1716 % self._repo.dirstate.pathto(dest)
1716 % self._repo.dirstate.pathto(dest)
1717 )
1717 )
1718 else:
1718 else:
1719 with self._repo.wlock():
1719 with self._repo.wlock():
1720 ds = self._repo.dirstate
1720 ds = self._repo.dirstate
1721 if ds[dest] in b'?':
1721 if ds[dest] in b'?':
1722 ds.add(dest)
1722 ds.add(dest)
1723 elif ds[dest] in b'r':
1723 elif ds[dest] in b'r':
1724 ds.normallookup(dest)
1724 ds.normallookup(dest)
1725 ds.copy(source, dest)
1725 ds.copy(source, dest)
1726
1726
1727 def match(
1727 def match(
1728 self,
1728 self,
1729 pats=None,
1729 pats=None,
1730 include=None,
1730 include=None,
1731 exclude=None,
1731 exclude=None,
1732 default=b'glob',
1732 default=b'glob',
1733 listsubrepos=False,
1733 listsubrepos=False,
1734 badfn=None,
1734 badfn=None,
1735 cwd=None,
1735 cwd=None,
1736 ):
1736 ):
1737 r = self._repo
1737 r = self._repo
1738 if not cwd:
1738 if not cwd:
1739 cwd = r.getcwd()
1739 cwd = r.getcwd()
1740
1740
1741 # Only a case insensitive filesystem needs magic to translate user input
1741 # Only a case insensitive filesystem needs magic to translate user input
1742 # to actual case in the filesystem.
1742 # to actual case in the filesystem.
1743 icasefs = not util.fscasesensitive(r.root)
1743 icasefs = not util.fscasesensitive(r.root)
1744 return matchmod.match(
1744 return matchmod.match(
1745 r.root,
1745 r.root,
1746 cwd,
1746 cwd,
1747 pats,
1747 pats,
1748 include,
1748 include,
1749 exclude,
1749 exclude,
1750 default,
1750 default,
1751 auditor=r.auditor,
1751 auditor=r.auditor,
1752 ctx=self,
1752 ctx=self,
1753 listsubrepos=listsubrepos,
1753 listsubrepos=listsubrepos,
1754 badfn=badfn,
1754 badfn=badfn,
1755 icasefs=icasefs,
1755 icasefs=icasefs,
1756 )
1756 )
1757
1757
1758 def _filtersuspectsymlink(self, files):
1758 def _filtersuspectsymlink(self, files):
1759 if not files or self._repo.dirstate._checklink:
1759 if not files or self._repo.dirstate._checklink:
1760 return files
1760 return files
1761
1761
1762 # Symlink placeholders may get non-symlink-like contents
1762 # Symlink placeholders may get non-symlink-like contents
1763 # via user error or dereferencing by NFS or Samba servers,
1763 # via user error or dereferencing by NFS or Samba servers,
1764 # so we filter out any placeholders that don't look like a
1764 # so we filter out any placeholders that don't look like a
1765 # symlink
1765 # symlink
1766 sane = []
1766 sane = []
1767 for f in files:
1767 for f in files:
1768 if self.flags(f) == b'l':
1768 if self.flags(f) == b'l':
1769 d = self[f].data()
1769 d = self[f].data()
1770 if (
1770 if (
1771 d == b''
1771 d == b''
1772 or len(d) >= 1024
1772 or len(d) >= 1024
1773 or b'\n' in d
1773 or b'\n' in d
1774 or stringutil.binary(d)
1774 or stringutil.binary(d)
1775 ):
1775 ):
1776 self._repo.ui.debug(
1776 self._repo.ui.debug(
1777 b'ignoring suspect symlink placeholder "%s"\n' % f
1777 b'ignoring suspect symlink placeholder "%s"\n' % f
1778 )
1778 )
1779 continue
1779 continue
1780 sane.append(f)
1780 sane.append(f)
1781 return sane
1781 return sane
1782
1782
1783 def _checklookup(self, files):
1783 def _checklookup(self, files):
1784 # check for any possibly clean files
1784 # check for any possibly clean files
1785 if not files:
1785 if not files:
1786 return [], [], []
1786 return [], [], []
1787
1787
1788 modified = []
1788 modified = []
1789 deleted = []
1789 deleted = []
1790 fixup = []
1790 fixup = []
1791 pctx = self._parents[0]
1791 pctx = self._parents[0]
1792 # do a full compare of any files that might have changed
1792 # do a full compare of any files that might have changed
1793 for f in sorted(files):
1793 for f in sorted(files):
1794 try:
1794 try:
1795 # This will return True for a file that got replaced by a
1795 # This will return True for a file that got replaced by a
1796 # directory in the interim, but fixing that is pretty hard.
1796 # directory in the interim, but fixing that is pretty hard.
1797 if (
1797 if (
1798 f not in pctx
1798 f not in pctx
1799 or self.flags(f) != pctx.flags(f)
1799 or self.flags(f) != pctx.flags(f)
1800 or pctx[f].cmp(self[f])
1800 or pctx[f].cmp(self[f])
1801 ):
1801 ):
1802 modified.append(f)
1802 modified.append(f)
1803 else:
1803 else:
1804 fixup.append(f)
1804 fixup.append(f)
1805 except (IOError, OSError):
1805 except (IOError, OSError):
1806 # A file become inaccessible in between? Mark it as deleted,
1806 # A file become inaccessible in between? Mark it as deleted,
1807 # matching dirstate behavior (issue5584).
1807 # matching dirstate behavior (issue5584).
1808 # The dirstate has more complex behavior around whether a
1808 # The dirstate has more complex behavior around whether a
1809 # missing file matches a directory, etc, but we don't need to
1809 # missing file matches a directory, etc, but we don't need to
1810 # bother with that: if f has made it to this point, we're sure
1810 # bother with that: if f has made it to this point, we're sure
1811 # it's in the dirstate.
1811 # it's in the dirstate.
1812 deleted.append(f)
1812 deleted.append(f)
1813
1813
1814 return modified, deleted, fixup
1814 return modified, deleted, fixup
1815
1815
1816 def _poststatusfixup(self, status, fixup):
1816 def _poststatusfixup(self, status, fixup):
1817 """update dirstate for files that are actually clean"""
1817 """update dirstate for files that are actually clean"""
1818 poststatus = self._repo.postdsstatus()
1818 poststatus = self._repo.postdsstatus()
1819 if fixup or poststatus:
1819 if fixup or poststatus:
1820 try:
1820 try:
1821 oldid = self._repo.dirstate.identity()
1821 oldid = self._repo.dirstate.identity()
1822
1822
1823 # updating the dirstate is optional
1823 # updating the dirstate is optional
1824 # so we don't wait on the lock
1824 # so we don't wait on the lock
1825 # wlock can invalidate the dirstate, so cache normal _after_
1825 # wlock can invalidate the dirstate, so cache normal _after_
1826 # taking the lock
1826 # taking the lock
1827 with self._repo.wlock(False):
1827 with self._repo.wlock(False):
1828 if self._repo.dirstate.identity() == oldid:
1828 if self._repo.dirstate.identity() == oldid:
1829 if fixup:
1829 if fixup:
1830 normal = self._repo.dirstate.normal
1830 normal = self._repo.dirstate.normal
1831 for f in fixup:
1831 for f in fixup:
1832 normal(f)
1832 normal(f)
1833 # write changes out explicitly, because nesting
1833 # write changes out explicitly, because nesting
1834 # wlock at runtime may prevent 'wlock.release()'
1834 # wlock at runtime may prevent 'wlock.release()'
1835 # after this block from doing so for subsequent
1835 # after this block from doing so for subsequent
1836 # changing files
1836 # changing files
1837 tr = self._repo.currenttransaction()
1837 tr = self._repo.currenttransaction()
1838 self._repo.dirstate.write(tr)
1838 self._repo.dirstate.write(tr)
1839
1839
1840 if poststatus:
1840 if poststatus:
1841 for ps in poststatus:
1841 for ps in poststatus:
1842 ps(self, status)
1842 ps(self, status)
1843 else:
1843 else:
1844 # in this case, writing changes out breaks
1844 # in this case, writing changes out breaks
1845 # consistency, because .hg/dirstate was
1845 # consistency, because .hg/dirstate was
1846 # already changed simultaneously after last
1846 # already changed simultaneously after last
1847 # caching (see also issue5584 for detail)
1847 # caching (see also issue5584 for detail)
1848 self._repo.ui.debug(
1848 self._repo.ui.debug(
1849 b'skip updating dirstate: identity mismatch\n'
1849 b'skip updating dirstate: identity mismatch\n'
1850 )
1850 )
1851 except error.LockError:
1851 except error.LockError:
1852 pass
1852 pass
1853 finally:
1853 finally:
1854 # Even if the wlock couldn't be grabbed, clear out the list.
1854 # Even if the wlock couldn't be grabbed, clear out the list.
1855 self._repo.clearpostdsstatus()
1855 self._repo.clearpostdsstatus()
1856
1856
1857 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1857 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1858 '''Gets the status from the dirstate -- internal use only.'''
1858 '''Gets the status from the dirstate -- internal use only.'''
1859 subrepos = []
1859 subrepos = []
1860 if b'.hgsub' in self:
1860 if b'.hgsub' in self:
1861 subrepos = sorted(self.substate)
1861 subrepos = sorted(self.substate)
1862 cmp, s = self._repo.dirstate.status(
1862 cmp, s = self._repo.dirstate.status(
1863 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1863 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1864 )
1864 )
1865
1865
1866 # check for any possibly clean files
1866 # check for any possibly clean files
1867 fixup = []
1867 fixup = []
1868 if cmp:
1868 if cmp:
1869 modified2, deleted2, fixup = self._checklookup(cmp)
1869 modified2, deleted2, fixup = self._checklookup(cmp)
1870 s.modified.extend(modified2)
1870 s.modified.extend(modified2)
1871 s.deleted.extend(deleted2)
1871 s.deleted.extend(deleted2)
1872
1872
1873 if fixup and clean:
1873 if fixup and clean:
1874 s.clean.extend(fixup)
1874 s.clean.extend(fixup)
1875
1875
1876 self._poststatusfixup(s, fixup)
1876 self._poststatusfixup(s, fixup)
1877
1877
1878 if match.always():
1878 if match.always():
1879 # cache for performance
1879 # cache for performance
1880 if s.unknown or s.ignored or s.clean:
1880 if s.unknown or s.ignored or s.clean:
1881 # "_status" is cached with list*=False in the normal route
1881 # "_status" is cached with list*=False in the normal route
1882 self._status = scmutil.status(
1882 self._status = scmutil.status(
1883 s.modified, s.added, s.removed, s.deleted, [], [], []
1883 s.modified, s.added, s.removed, s.deleted, [], [], []
1884 )
1884 )
1885 else:
1885 else:
1886 self._status = s
1886 self._status = s
1887
1887
1888 return s
1888 return s
1889
1889
1890 @propertycache
1890 @propertycache
1891 def _copies(self):
1891 def _copies(self):
1892 p1copies = {}
1892 p1copies = {}
1893 p2copies = {}
1893 p2copies = {}
1894 parents = self._repo.dirstate.parents()
1894 parents = self._repo.dirstate.parents()
1895 p1manifest = self._repo[parents[0]].manifest()
1895 p1manifest = self._repo[parents[0]].manifest()
1896 p2manifest = self._repo[parents[1]].manifest()
1896 p2manifest = self._repo[parents[1]].manifest()
1897 changedset = set(self.added()) | set(self.modified())
1897 changedset = set(self.added()) | set(self.modified())
1898 narrowmatch = self._repo.narrowmatch()
1898 narrowmatch = self._repo.narrowmatch()
1899 for dst, src in self._repo.dirstate.copies().items():
1899 for dst, src in self._repo.dirstate.copies().items():
1900 if dst not in changedset or not narrowmatch(dst):
1900 if dst not in changedset or not narrowmatch(dst):
1901 continue
1901 continue
1902 if src in p1manifest:
1902 if src in p1manifest:
1903 p1copies[dst] = src
1903 p1copies[dst] = src
1904 elif src in p2manifest:
1904 elif src in p2manifest:
1905 p2copies[dst] = src
1905 p2copies[dst] = src
1906 return p1copies, p2copies
1906 return p1copies, p2copies
1907
1907
1908 @propertycache
1908 @propertycache
1909 def _manifest(self):
1909 def _manifest(self):
1910 """generate a manifest corresponding to the values in self._status
1910 """generate a manifest corresponding to the values in self._status
1911
1911
1912 This reuse the file nodeid from parent, but we use special node
1912 This reuse the file nodeid from parent, but we use special node
1913 identifiers for added and modified files. This is used by manifests
1913 identifiers for added and modified files. This is used by manifests
1914 merge to see that files are different and by update logic to avoid
1914 merge to see that files are different and by update logic to avoid
1915 deleting newly added files.
1915 deleting newly added files.
1916 """
1916 """
1917 return self._buildstatusmanifest(self._status)
1917 return self._buildstatusmanifest(self._status)
1918
1918
1919 def _buildstatusmanifest(self, status):
1919 def _buildstatusmanifest(self, status):
1920 """Builds a manifest that includes the given status results."""
1920 """Builds a manifest that includes the given status results."""
1921 parents = self.parents()
1921 parents = self.parents()
1922
1922
1923 man = parents[0].manifest().copy()
1923 man = parents[0].manifest().copy()
1924
1924
1925 ff = self._flagfunc
1925 ff = self._flagfunc
1926 for i, l in (
1926 for i, l in (
1927 (addednodeid, status.added),
1927 (addednodeid, status.added),
1928 (modifiednodeid, status.modified),
1928 (modifiednodeid, status.modified),
1929 ):
1929 ):
1930 for f in l:
1930 for f in l:
1931 man[f] = i
1931 man[f] = i
1932 try:
1932 try:
1933 man.setflag(f, ff(f))
1933 man.setflag(f, ff(f))
1934 except OSError:
1934 except OSError:
1935 pass
1935 pass
1936
1936
1937 for f in status.deleted + status.removed:
1937 for f in status.deleted + status.removed:
1938 if f in man:
1938 if f in man:
1939 del man[f]
1939 del man[f]
1940
1940
1941 return man
1941 return man
1942
1942
1943 def _buildstatus(
1943 def _buildstatus(
1944 self, other, s, match, listignored, listclean, listunknown
1944 self, other, s, match, listignored, listclean, listunknown
1945 ):
1945 ):
1946 """build a status with respect to another context
1946 """build a status with respect to another context
1947
1947
1948 This includes logic for maintaining the fast path of status when
1948 This includes logic for maintaining the fast path of status when
1949 comparing the working directory against its parent, which is to skip
1949 comparing the working directory against its parent, which is to skip
1950 building a new manifest if self (working directory) is not comparing
1950 building a new manifest if self (working directory) is not comparing
1951 against its parent (repo['.']).
1951 against its parent (repo['.']).
1952 """
1952 """
1953 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1953 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1954 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1954 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1955 # might have accidentally ended up with the entire contents of the file
1955 # might have accidentally ended up with the entire contents of the file
1956 # they are supposed to be linking to.
1956 # they are supposed to be linking to.
1957 s.modified[:] = self._filtersuspectsymlink(s.modified)
1957 s.modified[:] = self._filtersuspectsymlink(s.modified)
1958 if other != self._repo[b'.']:
1958 if other != self._repo[b'.']:
1959 s = super(workingctx, self)._buildstatus(
1959 s = super(workingctx, self)._buildstatus(
1960 other, s, match, listignored, listclean, listunknown
1960 other, s, match, listignored, listclean, listunknown
1961 )
1961 )
1962 return s
1962 return s
1963
1963
1964 def _matchstatus(self, other, match):
1964 def _matchstatus(self, other, match):
1965 """override the match method with a filter for directory patterns
1965 """override the match method with a filter for directory patterns
1966
1966
1967 We use inheritance to customize the match.bad method only in cases of
1967 We use inheritance to customize the match.bad method only in cases of
1968 workingctx since it belongs only to the working directory when
1968 workingctx since it belongs only to the working directory when
1969 comparing against the parent changeset.
1969 comparing against the parent changeset.
1970
1970
1971 If we aren't comparing against the working directory's parent, then we
1971 If we aren't comparing against the working directory's parent, then we
1972 just use the default match object sent to us.
1972 just use the default match object sent to us.
1973 """
1973 """
1974 if other != self._repo[b'.']:
1974 if other != self._repo[b'.']:
1975
1975
1976 def bad(f, msg):
1976 def bad(f, msg):
1977 # 'f' may be a directory pattern from 'match.files()',
1977 # 'f' may be a directory pattern from 'match.files()',
1978 # so 'f not in ctx1' is not enough
1978 # so 'f not in ctx1' is not enough
1979 if f not in other and not other.hasdir(f):
1979 if f not in other and not other.hasdir(f):
1980 self._repo.ui.warn(
1980 self._repo.ui.warn(
1981 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1981 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1982 )
1982 )
1983
1983
1984 match.bad = bad
1984 match.bad = bad
1985 return match
1985 return match
1986
1986
1987 def walk(self, match):
1987 def walk(self, match):
1988 '''Generates matching file names.'''
1988 '''Generates matching file names.'''
1989 return sorted(
1989 return sorted(
1990 self._repo.dirstate.walk(
1990 self._repo.dirstate.walk(
1991 self._repo.narrowmatch(match),
1991 self._repo.narrowmatch(match),
1992 subrepos=sorted(self.substate),
1992 subrepos=sorted(self.substate),
1993 unknown=True,
1993 unknown=True,
1994 ignored=False,
1994 ignored=False,
1995 )
1995 )
1996 )
1996 )
1997
1997
1998 def matches(self, match):
1998 def matches(self, match):
1999 match = self._repo.narrowmatch(match)
1999 match = self._repo.narrowmatch(match)
2000 ds = self._repo.dirstate
2000 ds = self._repo.dirstate
2001 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2001 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2002
2002
2003 def markcommitted(self, node):
2003 def markcommitted(self, node):
2004 with self._repo.dirstate.parentchange():
2004 with self._repo.dirstate.parentchange():
2005 for f in self.modified() + self.added():
2005 for f in self.modified() + self.added():
2006 self._repo.dirstate.normal(f)
2006 self._repo.dirstate.normal(f)
2007 for f in self.removed():
2007 for f in self.removed():
2008 self._repo.dirstate.drop(f)
2008 self._repo.dirstate.drop(f)
2009 self._repo.dirstate.setparents(node)
2009 self._repo.dirstate.setparents(node)
2010 self._repo._quick_access_changeid_invalidate()
2010 self._repo._quick_access_changeid_invalidate()
2011
2011
2012 # write changes out explicitly, because nesting wlock at
2012 # write changes out explicitly, because nesting wlock at
2013 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2013 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2014 # from immediately doing so for subsequent changing files
2014 # from immediately doing so for subsequent changing files
2015 self._repo.dirstate.write(self._repo.currenttransaction())
2015 self._repo.dirstate.write(self._repo.currenttransaction())
2016
2016
2017 sparse.aftercommit(self._repo, node)
2017 sparse.aftercommit(self._repo, node)
2018
2018
2019 def mergestate(self, clean=False):
2019 def mergestate(self, clean=False):
2020 if clean:
2020 if clean:
2021 return mergestatemod.mergestate.clean(self._repo)
2021 return mergestatemod.mergestate.clean(self._repo)
2022 return mergestatemod.mergestate.read(self._repo)
2022 return mergestatemod.mergestate.read(self._repo)
2023
2023
2024
2024
2025 class committablefilectx(basefilectx):
2025 class committablefilectx(basefilectx):
2026 """A committablefilectx provides common functionality for a file context
2026 """A committablefilectx provides common functionality for a file context
2027 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2027 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2028
2028
2029 def __init__(self, repo, path, filelog=None, ctx=None):
2029 def __init__(self, repo, path, filelog=None, ctx=None):
2030 self._repo = repo
2030 self._repo = repo
2031 self._path = path
2031 self._path = path
2032 self._changeid = None
2032 self._changeid = None
2033 self._filerev = self._filenode = None
2033 self._filerev = self._filenode = None
2034
2034
2035 if filelog is not None:
2035 if filelog is not None:
2036 self._filelog = filelog
2036 self._filelog = filelog
2037 if ctx:
2037 if ctx:
2038 self._changectx = ctx
2038 self._changectx = ctx
2039
2039
2040 def __nonzero__(self):
2040 def __nonzero__(self):
2041 return True
2041 return True
2042
2042
2043 __bool__ = __nonzero__
2043 __bool__ = __nonzero__
2044
2044
2045 def linkrev(self):
2045 def linkrev(self):
2046 # linked to self._changectx no matter if file is modified or not
2046 # linked to self._changectx no matter if file is modified or not
2047 return self.rev()
2047 return self.rev()
2048
2048
2049 def renamed(self):
2049 def renamed(self):
2050 path = self.copysource()
2050 path = self.copysource()
2051 if not path:
2051 if not path:
2052 return None
2052 return None
2053 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2053 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2054
2054
2055 def parents(self):
2055 def parents(self):
2056 '''return parent filectxs, following copies if necessary'''
2056 '''return parent filectxs, following copies if necessary'''
2057
2057
2058 def filenode(ctx, path):
2058 def filenode(ctx, path):
2059 return ctx._manifest.get(path, nullid)
2059 return ctx._manifest.get(path, nullid)
2060
2060
2061 path = self._path
2061 path = self._path
2062 fl = self._filelog
2062 fl = self._filelog
2063 pcl = self._changectx._parents
2063 pcl = self._changectx._parents
2064 renamed = self.renamed()
2064 renamed = self.renamed()
2065
2065
2066 if renamed:
2066 if renamed:
2067 pl = [renamed + (None,)]
2067 pl = [renamed + (None,)]
2068 else:
2068 else:
2069 pl = [(path, filenode(pcl[0], path), fl)]
2069 pl = [(path, filenode(pcl[0], path), fl)]
2070
2070
2071 for pc in pcl[1:]:
2071 for pc in pcl[1:]:
2072 pl.append((path, filenode(pc, path), fl))
2072 pl.append((path, filenode(pc, path), fl))
2073
2073
2074 return [
2074 return [
2075 self._parentfilectx(p, fileid=n, filelog=l)
2075 self._parentfilectx(p, fileid=n, filelog=l)
2076 for p, n, l in pl
2076 for p, n, l in pl
2077 if n != nullid
2077 if n != nullid
2078 ]
2078 ]
2079
2079
2080 def children(self):
2080 def children(self):
2081 return []
2081 return []
2082
2082
2083
2083
2084 class workingfilectx(committablefilectx):
2084 class workingfilectx(committablefilectx):
2085 """A workingfilectx object makes access to data related to a particular
2085 """A workingfilectx object makes access to data related to a particular
2086 file in the working directory convenient."""
2086 file in the working directory convenient."""
2087
2087
2088 def __init__(self, repo, path, filelog=None, workingctx=None):
2088 def __init__(self, repo, path, filelog=None, workingctx=None):
2089 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2089 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2090
2090
2091 @propertycache
2091 @propertycache
2092 def _changectx(self):
2092 def _changectx(self):
2093 return workingctx(self._repo)
2093 return workingctx(self._repo)
2094
2094
2095 def data(self):
2095 def data(self):
2096 return self._repo.wread(self._path)
2096 return self._repo.wread(self._path)
2097
2097
2098 def copysource(self):
2098 def copysource(self):
2099 return self._repo.dirstate.copied(self._path)
2099 return self._repo.dirstate.copied(self._path)
2100
2100
2101 def size(self):
2101 def size(self):
2102 return self._repo.wvfs.lstat(self._path).st_size
2102 return self._repo.wvfs.lstat(self._path).st_size
2103
2103
2104 def lstat(self):
2104 def lstat(self):
2105 return self._repo.wvfs.lstat(self._path)
2105 return self._repo.wvfs.lstat(self._path)
2106
2106
2107 def date(self):
2107 def date(self):
2108 t, tz = self._changectx.date()
2108 t, tz = self._changectx.date()
2109 try:
2109 try:
2110 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2110 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2111 except OSError as err:
2111 except OSError as err:
2112 if err.errno != errno.ENOENT:
2112 if err.errno != errno.ENOENT:
2113 raise
2113 raise
2114 return (t, tz)
2114 return (t, tz)
2115
2115
2116 def exists(self):
2116 def exists(self):
2117 return self._repo.wvfs.exists(self._path)
2117 return self._repo.wvfs.exists(self._path)
2118
2118
2119 def lexists(self):
2119 def lexists(self):
2120 return self._repo.wvfs.lexists(self._path)
2120 return self._repo.wvfs.lexists(self._path)
2121
2121
2122 def audit(self):
2122 def audit(self):
2123 return self._repo.wvfs.audit(self._path)
2123 return self._repo.wvfs.audit(self._path)
2124
2124
2125 def cmp(self, fctx):
2125 def cmp(self, fctx):
2126 """compare with other file context
2126 """compare with other file context
2127
2127
2128 returns True if different than fctx.
2128 returns True if different than fctx.
2129 """
2129 """
2130 # fctx should be a filectx (not a workingfilectx)
2130 # fctx should be a filectx (not a workingfilectx)
2131 # invert comparison to reuse the same code path
2131 # invert comparison to reuse the same code path
2132 return fctx.cmp(self)
2132 return fctx.cmp(self)
2133
2133
2134 def remove(self, ignoremissing=False):
2134 def remove(self, ignoremissing=False):
2135 """wraps unlink for a repo's working directory"""
2135 """wraps unlink for a repo's working directory"""
2136 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2136 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2137 self._repo.wvfs.unlinkpath(
2137 self._repo.wvfs.unlinkpath(
2138 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2138 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2139 )
2139 )
2140
2140
2141 def write(self, data, flags, backgroundclose=False, **kwargs):
2141 def write(self, data, flags, backgroundclose=False, **kwargs):
2142 """wraps repo.wwrite"""
2142 """wraps repo.wwrite"""
2143 return self._repo.wwrite(
2143 return self._repo.wwrite(
2144 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2144 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2145 )
2145 )
2146
2146
2147 def markcopied(self, src):
2147 def markcopied(self, src):
2148 """marks this file a copy of `src`"""
2148 """marks this file a copy of `src`"""
2149 self._repo.dirstate.copy(src, self._path)
2149 self._repo.dirstate.copy(src, self._path)
2150
2150
2151 def clearunknown(self):
2151 def clearunknown(self):
2152 """Removes conflicting items in the working directory so that
2152 """Removes conflicting items in the working directory so that
2153 ``write()`` can be called successfully.
2153 ``write()`` can be called successfully.
2154 """
2154 """
2155 wvfs = self._repo.wvfs
2155 wvfs = self._repo.wvfs
2156 f = self._path
2156 f = self._path
2157 wvfs.audit(f)
2157 wvfs.audit(f)
2158 if self._repo.ui.configbool(
2158 if self._repo.ui.configbool(
2159 b'experimental', b'merge.checkpathconflicts'
2159 b'experimental', b'merge.checkpathconflicts'
2160 ):
2160 ):
2161 # remove files under the directory as they should already be
2161 # remove files under the directory as they should already be
2162 # warned and backed up
2162 # warned and backed up
2163 if wvfs.isdir(f) and not wvfs.islink(f):
2163 if wvfs.isdir(f) and not wvfs.islink(f):
2164 wvfs.rmtree(f, forcibly=True)
2164 wvfs.rmtree(f, forcibly=True)
2165 for p in reversed(list(pathutil.finddirs(f))):
2165 for p in reversed(list(pathutil.finddirs(f))):
2166 if wvfs.isfileorlink(p):
2166 if wvfs.isfileorlink(p):
2167 wvfs.unlink(p)
2167 wvfs.unlink(p)
2168 break
2168 break
2169 else:
2169 else:
2170 # don't remove files if path conflicts are not processed
2170 # don't remove files if path conflicts are not processed
2171 if wvfs.isdir(f) and not wvfs.islink(f):
2171 if wvfs.isdir(f) and not wvfs.islink(f):
2172 wvfs.removedirs(f)
2172 wvfs.removedirs(f)
2173
2173
2174 def setflags(self, l, x):
2174 def setflags(self, l, x):
2175 self._repo.wvfs.setflags(self._path, l, x)
2175 self._repo.wvfs.setflags(self._path, l, x)
2176
2176
2177
2177
2178 class overlayworkingctx(committablectx):
2178 class overlayworkingctx(committablectx):
2179 """Wraps another mutable context with a write-back cache that can be
2179 """Wraps another mutable context with a write-back cache that can be
2180 converted into a commit context.
2180 converted into a commit context.
2181
2181
2182 self._cache[path] maps to a dict with keys: {
2182 self._cache[path] maps to a dict with keys: {
2183 'exists': bool?
2183 'exists': bool?
2184 'date': date?
2184 'date': date?
2185 'data': str?
2185 'data': str?
2186 'flags': str?
2186 'flags': str?
2187 'copied': str? (path or None)
2187 'copied': str? (path or None)
2188 }
2188 }
2189 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2189 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2190 is `False`, the file was deleted.
2190 is `False`, the file was deleted.
2191 """
2191 """
2192
2192
2193 def __init__(self, repo):
2193 def __init__(self, repo):
2194 super(overlayworkingctx, self).__init__(repo)
2194 super(overlayworkingctx, self).__init__(repo)
2195 self.clean()
2195 self.clean()
2196
2196
2197 def setbase(self, wrappedctx):
2197 def setbase(self, wrappedctx):
2198 self._wrappedctx = wrappedctx
2198 self._wrappedctx = wrappedctx
2199 self._parents = [wrappedctx]
2199 self._parents = [wrappedctx]
2200 # Drop old manifest cache as it is now out of date.
2200 # Drop old manifest cache as it is now out of date.
2201 # This is necessary when, e.g., rebasing several nodes with one
2201 # This is necessary when, e.g., rebasing several nodes with one
2202 # ``overlayworkingctx`` (e.g. with --collapse).
2202 # ``overlayworkingctx`` (e.g. with --collapse).
2203 util.clearcachedproperty(self, b'_manifest')
2203 util.clearcachedproperty(self, b'_manifest')
2204
2204
2205 def setparents(self, p1node, p2node=nullid):
2205 def setparents(self, p1node, p2node=nullid):
2206 assert p1node == self._wrappedctx.node()
2206 assert p1node == self._wrappedctx.node()
2207 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2207 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2208
2208
2209 def data(self, path):
2209 def data(self, path):
2210 if self.isdirty(path):
2210 if self.isdirty(path):
2211 if self._cache[path][b'exists']:
2211 if self._cache[path][b'exists']:
2212 if self._cache[path][b'data'] is not None:
2212 if self._cache[path][b'data'] is not None:
2213 return self._cache[path][b'data']
2213 return self._cache[path][b'data']
2214 else:
2214 else:
2215 # Must fallback here, too, because we only set flags.
2215 # Must fallback here, too, because we only set flags.
2216 return self._wrappedctx[path].data()
2216 return self._wrappedctx[path].data()
2217 else:
2217 else:
2218 raise error.ProgrammingError(
2218 raise error.ProgrammingError(
2219 b"No such file or directory: %s" % path
2219 b"No such file or directory: %s" % path
2220 )
2220 )
2221 else:
2221 else:
2222 return self._wrappedctx[path].data()
2222 return self._wrappedctx[path].data()
2223
2223
2224 @propertycache
2224 @propertycache
2225 def _manifest(self):
2225 def _manifest(self):
2226 parents = self.parents()
2226 parents = self.parents()
2227 man = parents[0].manifest().copy()
2227 man = parents[0].manifest().copy()
2228
2228
2229 flag = self._flagfunc
2229 flag = self._flagfunc
2230 for path in self.added():
2230 for path in self.added():
2231 man[path] = addednodeid
2231 man[path] = addednodeid
2232 man.setflag(path, flag(path))
2232 man.setflag(path, flag(path))
2233 for path in self.modified():
2233 for path in self.modified():
2234 man[path] = modifiednodeid
2234 man[path] = modifiednodeid
2235 man.setflag(path, flag(path))
2235 man.setflag(path, flag(path))
2236 for path in self.removed():
2236 for path in self.removed():
2237 del man[path]
2237 del man[path]
2238 return man
2238 return man
2239
2239
2240 @propertycache
2240 @propertycache
2241 def _flagfunc(self):
2241 def _flagfunc(self):
2242 def f(path):
2242 def f(path):
2243 return self._cache[path][b'flags']
2243 return self._cache[path][b'flags']
2244
2244
2245 return f
2245 return f
2246
2246
2247 def files(self):
2247 def files(self):
2248 return sorted(self.added() + self.modified() + self.removed())
2248 return sorted(self.added() + self.modified() + self.removed())
2249
2249
2250 def modified(self):
2250 def modified(self):
2251 return [
2251 return [
2252 f
2252 f
2253 for f in self._cache.keys()
2253 for f in self._cache.keys()
2254 if self._cache[f][b'exists'] and self._existsinparent(f)
2254 if self._cache[f][b'exists'] and self._existsinparent(f)
2255 ]
2255 ]
2256
2256
2257 def added(self):
2257 def added(self):
2258 return [
2258 return [
2259 f
2259 f
2260 for f in self._cache.keys()
2260 for f in self._cache.keys()
2261 if self._cache[f][b'exists'] and not self._existsinparent(f)
2261 if self._cache[f][b'exists'] and not self._existsinparent(f)
2262 ]
2262 ]
2263
2263
2264 def removed(self):
2264 def removed(self):
2265 return [
2265 return [
2266 f
2266 f
2267 for f in self._cache.keys()
2267 for f in self._cache.keys()
2268 if not self._cache[f][b'exists'] and self._existsinparent(f)
2268 if not self._cache[f][b'exists'] and self._existsinparent(f)
2269 ]
2269 ]
2270
2270
2271 def p1copies(self):
2271 def p1copies(self):
2272 copies = {}
2272 copies = {}
2273 narrowmatch = self._repo.narrowmatch()
2273 narrowmatch = self._repo.narrowmatch()
2274 for f in self._cache.keys():
2274 for f in self._cache.keys():
2275 if not narrowmatch(f):
2275 if not narrowmatch(f):
2276 continue
2276 continue
2277 copies.pop(f, None) # delete if it exists
2277 copies.pop(f, None) # delete if it exists
2278 source = self._cache[f][b'copied']
2278 source = self._cache[f][b'copied']
2279 if source:
2279 if source:
2280 copies[f] = source
2280 copies[f] = source
2281 return copies
2281 return copies
2282
2282
2283 def p2copies(self):
2283 def p2copies(self):
2284 copies = {}
2284 copies = {}
2285 narrowmatch = self._repo.narrowmatch()
2285 narrowmatch = self._repo.narrowmatch()
2286 for f in self._cache.keys():
2286 for f in self._cache.keys():
2287 if not narrowmatch(f):
2287 if not narrowmatch(f):
2288 continue
2288 continue
2289 copies.pop(f, None) # delete if it exists
2289 copies.pop(f, None) # delete if it exists
2290 source = self._cache[f][b'copied']
2290 source = self._cache[f][b'copied']
2291 if source:
2291 if source:
2292 copies[f] = source
2292 copies[f] = source
2293 return copies
2293 return copies
2294
2294
2295 def isinmemory(self):
2295 def isinmemory(self):
2296 return True
2296 return True
2297
2297
2298 def filedate(self, path):
2298 def filedate(self, path):
2299 if self.isdirty(path):
2299 if self.isdirty(path):
2300 return self._cache[path][b'date']
2300 return self._cache[path][b'date']
2301 else:
2301 else:
2302 return self._wrappedctx[path].date()
2302 return self._wrappedctx[path].date()
2303
2303
2304 def markcopied(self, path, origin):
2304 def markcopied(self, path, origin):
2305 self._markdirty(
2305 self._markdirty(
2306 path,
2306 path,
2307 exists=True,
2307 exists=True,
2308 date=self.filedate(path),
2308 date=self.filedate(path),
2309 flags=self.flags(path),
2309 flags=self.flags(path),
2310 copied=origin,
2310 copied=origin,
2311 )
2311 )
2312
2312
2313 def copydata(self, path):
2313 def copydata(self, path):
2314 if self.isdirty(path):
2314 if self.isdirty(path):
2315 return self._cache[path][b'copied']
2315 return self._cache[path][b'copied']
2316 else:
2316 else:
2317 return None
2317 return None
2318
2318
2319 def flags(self, path):
2319 def flags(self, path):
2320 if self.isdirty(path):
2320 if self.isdirty(path):
2321 if self._cache[path][b'exists']:
2321 if self._cache[path][b'exists']:
2322 return self._cache[path][b'flags']
2322 return self._cache[path][b'flags']
2323 else:
2323 else:
2324 raise error.ProgrammingError(
2324 raise error.ProgrammingError(
2325 b"No such file or directory: %s" % path
2325 b"No such file or directory: %s" % path
2326 )
2326 )
2327 else:
2327 else:
2328 return self._wrappedctx[path].flags()
2328 return self._wrappedctx[path].flags()
2329
2329
2330 def __contains__(self, key):
2330 def __contains__(self, key):
2331 if key in self._cache:
2331 if key in self._cache:
2332 return self._cache[key][b'exists']
2332 return self._cache[key][b'exists']
2333 return key in self.p1()
2333 return key in self.p1()
2334
2334
2335 def _existsinparent(self, path):
2335 def _existsinparent(self, path):
2336 try:
2336 try:
2337 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2337 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2338 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2338 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2339 # with an ``exists()`` function.
2339 # with an ``exists()`` function.
2340 self._wrappedctx[path]
2340 self._wrappedctx[path]
2341 return True
2341 return True
2342 except error.ManifestLookupError:
2342 except error.ManifestLookupError:
2343 return False
2343 return False
2344
2344
2345 def _auditconflicts(self, path):
2345 def _auditconflicts(self, path):
2346 """Replicates conflict checks done by wvfs.write().
2346 """Replicates conflict checks done by wvfs.write().
2347
2347
2348 Since we never write to the filesystem and never call `applyupdates` in
2348 Since we never write to the filesystem and never call `applyupdates` in
2349 IMM, we'll never check that a path is actually writable -- e.g., because
2349 IMM, we'll never check that a path is actually writable -- e.g., because
2350 it adds `a/foo`, but `a` is actually a file in the other commit.
2350 it adds `a/foo`, but `a` is actually a file in the other commit.
2351 """
2351 """
2352
2352
2353 def fail(path, component):
2353 def fail(path, component):
2354 # p1() is the base and we're receiving "writes" for p2()'s
2354 # p1() is the base and we're receiving "writes" for p2()'s
2355 # files.
2355 # files.
2356 if b'l' in self.p1()[component].flags():
2356 if b'l' in self.p1()[component].flags():
2357 raise error.Abort(
2357 raise error.Abort(
2358 b"error: %s conflicts with symlink %s "
2358 b"error: %s conflicts with symlink %s "
2359 b"in %d." % (path, component, self.p1().rev())
2359 b"in %d." % (path, component, self.p1().rev())
2360 )
2360 )
2361 else:
2361 else:
2362 raise error.Abort(
2362 raise error.Abort(
2363 b"error: '%s' conflicts with file '%s' in "
2363 b"error: '%s' conflicts with file '%s' in "
2364 b"%d." % (path, component, self.p1().rev())
2364 b"%d." % (path, component, self.p1().rev())
2365 )
2365 )
2366
2366
2367 # Test that each new directory to be created to write this path from p2
2367 # Test that each new directory to be created to write this path from p2
2368 # is not a file in p1.
2368 # is not a file in p1.
2369 components = path.split(b'/')
2369 components = path.split(b'/')
2370 for i in pycompat.xrange(len(components)):
2370 for i in pycompat.xrange(len(components)):
2371 component = b"/".join(components[0:i])
2371 component = b"/".join(components[0:i])
2372 if component in self:
2372 if component in self:
2373 fail(path, component)
2373 fail(path, component)
2374
2374
2375 # Test the other direction -- that this path from p2 isn't a directory
2375 # Test the other direction -- that this path from p2 isn't a directory
2376 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2376 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2377 match = self.match([path], default=b'path')
2377 match = self.match([path], default=b'path')
2378 mfiles = list(self.p1().manifest().walk(match))
2378 mfiles = list(self.p1().manifest().walk(match))
2379 if len(mfiles) > 0:
2379 if len(mfiles) > 0:
2380 if len(mfiles) == 1 and mfiles[0] == path:
2380 if len(mfiles) == 1 and mfiles[0] == path:
2381 return
2381 return
2382 # omit the files which are deleted in current IMM wctx
2382 # omit the files which are deleted in current IMM wctx
2383 mfiles = [m for m in mfiles if m in self]
2383 mfiles = [m for m in mfiles if m in self]
2384 if not mfiles:
2384 if not mfiles:
2385 return
2385 return
2386 raise error.Abort(
2386 raise error.Abort(
2387 b"error: file '%s' cannot be written because "
2387 b"error: file '%s' cannot be written because "
2388 b" '%s/' is a directory in %s (containing %d "
2388 b" '%s/' is a directory in %s (containing %d "
2389 b"entries: %s)"
2389 b"entries: %s)"
2390 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2390 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2391 )
2391 )
2392
2392
2393 def write(self, path, data, flags=b'', **kwargs):
2393 def write(self, path, data, flags=b'', **kwargs):
2394 if data is None:
2394 if data is None:
2395 raise error.ProgrammingError(b"data must be non-None")
2395 raise error.ProgrammingError(b"data must be non-None")
2396 self._auditconflicts(path)
2396 self._auditconflicts(path)
2397 self._markdirty(
2397 self._markdirty(
2398 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2398 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2399 )
2399 )
2400
2400
2401 def setflags(self, path, l, x):
2401 def setflags(self, path, l, x):
2402 flag = b''
2402 flag = b''
2403 if l:
2403 if l:
2404 flag = b'l'
2404 flag = b'l'
2405 elif x:
2405 elif x:
2406 flag = b'x'
2406 flag = b'x'
2407 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2407 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2408
2408
2409 def remove(self, path):
2409 def remove(self, path):
2410 self._markdirty(path, exists=False)
2410 self._markdirty(path, exists=False)
2411
2411
2412 def exists(self, path):
2412 def exists(self, path):
2413 """exists behaves like `lexists`, but needs to follow symlinks and
2413 """exists behaves like `lexists`, but needs to follow symlinks and
2414 return False if they are broken.
2414 return False if they are broken.
2415 """
2415 """
2416 if self.isdirty(path):
2416 if self.isdirty(path):
2417 # If this path exists and is a symlink, "follow" it by calling
2417 # If this path exists and is a symlink, "follow" it by calling
2418 # exists on the destination path.
2418 # exists on the destination path.
2419 if (
2419 if (
2420 self._cache[path][b'exists']
2420 self._cache[path][b'exists']
2421 and b'l' in self._cache[path][b'flags']
2421 and b'l' in self._cache[path][b'flags']
2422 ):
2422 ):
2423 return self.exists(self._cache[path][b'data'].strip())
2423 return self.exists(self._cache[path][b'data'].strip())
2424 else:
2424 else:
2425 return self._cache[path][b'exists']
2425 return self._cache[path][b'exists']
2426
2426
2427 return self._existsinparent(path)
2427 return self._existsinparent(path)
2428
2428
2429 def lexists(self, path):
2429 def lexists(self, path):
2430 """lexists returns True if the path exists"""
2430 """lexists returns True if the path exists"""
2431 if self.isdirty(path):
2431 if self.isdirty(path):
2432 return self._cache[path][b'exists']
2432 return self._cache[path][b'exists']
2433
2433
2434 return self._existsinparent(path)
2434 return self._existsinparent(path)
2435
2435
2436 def size(self, path):
2436 def size(self, path):
2437 if self.isdirty(path):
2437 if self.isdirty(path):
2438 if self._cache[path][b'exists']:
2438 if self._cache[path][b'exists']:
2439 return len(self._cache[path][b'data'])
2439 return len(self._cache[path][b'data'])
2440 else:
2440 else:
2441 raise error.ProgrammingError(
2441 raise error.ProgrammingError(
2442 b"No such file or directory: %s" % path
2442 b"No such file or directory: %s" % path
2443 )
2443 )
2444 return self._wrappedctx[path].size()
2444 return self._wrappedctx[path].size()
2445
2445
2446 def tomemctx(
2446 def tomemctx(
2447 self,
2447 self,
2448 text,
2448 text,
2449 branch=None,
2449 branch=None,
2450 extra=None,
2450 extra=None,
2451 date=None,
2451 date=None,
2452 parents=None,
2452 parents=None,
2453 user=None,
2453 user=None,
2454 editor=None,
2454 editor=None,
2455 ):
2455 ):
2456 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2456 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2457 committed.
2457 committed.
2458
2458
2459 ``text`` is the commit message.
2459 ``text`` is the commit message.
2460 ``parents`` (optional) are rev numbers.
2460 ``parents`` (optional) are rev numbers.
2461 """
2461 """
2462 # Default parents to the wrapped context if not passed.
2462 # Default parents to the wrapped context if not passed.
2463 if parents is None:
2463 if parents is None:
2464 parents = self.parents()
2464 parents = self.parents()
2465 if len(parents) == 1:
2465 if len(parents) == 1:
2466 parents = (parents[0], None)
2466 parents = (parents[0], None)
2467
2467
2468 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2468 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2469 if parents[1] is None:
2469 if parents[1] is None:
2470 parents = (self._repo[parents[0]], None)
2470 parents = (self._repo[parents[0]], None)
2471 else:
2471 else:
2472 parents = (self._repo[parents[0]], self._repo[parents[1]])
2472 parents = (self._repo[parents[0]], self._repo[parents[1]])
2473
2473
2474 files = self.files()
2474 files = self.files()
2475
2475
2476 def getfile(repo, memctx, path):
2476 def getfile(repo, memctx, path):
2477 if self._cache[path][b'exists']:
2477 if self._cache[path][b'exists']:
2478 return memfilectx(
2478 return memfilectx(
2479 repo,
2479 repo,
2480 memctx,
2480 memctx,
2481 path,
2481 path,
2482 self._cache[path][b'data'],
2482 self._cache[path][b'data'],
2483 b'l' in self._cache[path][b'flags'],
2483 b'l' in self._cache[path][b'flags'],
2484 b'x' in self._cache[path][b'flags'],
2484 b'x' in self._cache[path][b'flags'],
2485 self._cache[path][b'copied'],
2485 self._cache[path][b'copied'],
2486 )
2486 )
2487 else:
2487 else:
2488 # Returning None, but including the path in `files`, is
2488 # Returning None, but including the path in `files`, is
2489 # necessary for memctx to register a deletion.
2489 # necessary for memctx to register a deletion.
2490 return None
2490 return None
2491
2491
2492 if branch is None:
2492 if branch is None:
2493 branch = self._wrappedctx.branch()
2493 branch = self._wrappedctx.branch()
2494
2494
2495 return memctx(
2495 return memctx(
2496 self._repo,
2496 self._repo,
2497 parents,
2497 parents,
2498 text,
2498 text,
2499 files,
2499 files,
2500 getfile,
2500 getfile,
2501 date=date,
2501 date=date,
2502 extra=extra,
2502 extra=extra,
2503 user=user,
2503 user=user,
2504 branch=branch,
2504 branch=branch,
2505 editor=editor,
2505 editor=editor,
2506 )
2506 )
2507
2507
2508 def tomemctx_for_amend(self, precursor):
2508 def tomemctx_for_amend(self, precursor):
2509 extra = precursor.extra().copy()
2509 extra = precursor.extra().copy()
2510 extra[b'amend_source'] = precursor.hex()
2510 extra[b'amend_source'] = precursor.hex()
2511 return self.tomemctx(
2511 return self.tomemctx(
2512 text=precursor.description(),
2512 text=precursor.description(),
2513 branch=precursor.branch(),
2513 branch=precursor.branch(),
2514 extra=extra,
2514 extra=extra,
2515 date=precursor.date(),
2515 date=precursor.date(),
2516 user=precursor.user(),
2516 user=precursor.user(),
2517 )
2517 )
2518
2518
2519 def isdirty(self, path):
2519 def isdirty(self, path):
2520 return path in self._cache
2520 return path in self._cache
2521
2521
2522 def isempty(self):
2522 def isempty(self):
2523 # We need to discard any keys that are actually clean before the empty
2523 # We need to discard any keys that are actually clean before the empty
2524 # commit check.
2524 # commit check.
2525 self._compact()
2525 self._compact()
2526 return len(self._cache) == 0
2526 return len(self._cache) == 0
2527
2527
2528 def clean(self):
2528 def clean(self):
2529 self._cache = {}
2529 self._cache = {}
2530
2530
2531 def _compact(self):
2531 def _compact(self):
2532 """Removes keys from the cache that are actually clean, by comparing
2532 """Removes keys from the cache that are actually clean, by comparing
2533 them with the underlying context.
2533 them with the underlying context.
2534
2534
2535 This can occur during the merge process, e.g. by passing --tool :local
2535 This can occur during the merge process, e.g. by passing --tool :local
2536 to resolve a conflict.
2536 to resolve a conflict.
2537 """
2537 """
2538 keys = []
2538 keys = []
2539 # This won't be perfect, but can help performance significantly when
2539 # This won't be perfect, but can help performance significantly when
2540 # using things like remotefilelog.
2540 # using things like remotefilelog.
2541 scmutil.prefetchfiles(
2541 scmutil.prefetchfiles(
2542 self.repo(),
2542 self.repo(),
2543 [self.p1().rev()],
2543 [(self.p1().rev(),
2544 scmutil.matchfiles(self.repo(), self._cache.keys()),
2544 scmutil.matchfiles(self.repo(), self._cache.keys()))],
2545 )
2545 )
2546
2546
2547 for path in self._cache.keys():
2547 for path in self._cache.keys():
2548 cache = self._cache[path]
2548 cache = self._cache[path]
2549 try:
2549 try:
2550 underlying = self._wrappedctx[path]
2550 underlying = self._wrappedctx[path]
2551 if (
2551 if (
2552 underlying.data() == cache[b'data']
2552 underlying.data() == cache[b'data']
2553 and underlying.flags() == cache[b'flags']
2553 and underlying.flags() == cache[b'flags']
2554 ):
2554 ):
2555 keys.append(path)
2555 keys.append(path)
2556 except error.ManifestLookupError:
2556 except error.ManifestLookupError:
2557 # Path not in the underlying manifest (created).
2557 # Path not in the underlying manifest (created).
2558 continue
2558 continue
2559
2559
2560 for path in keys:
2560 for path in keys:
2561 del self._cache[path]
2561 del self._cache[path]
2562 return keys
2562 return keys
2563
2563
2564 def _markdirty(
2564 def _markdirty(
2565 self, path, exists, data=None, date=None, flags=b'', copied=None
2565 self, path, exists, data=None, date=None, flags=b'', copied=None
2566 ):
2566 ):
2567 # data not provided, let's see if we already have some; if not, let's
2567 # data not provided, let's see if we already have some; if not, let's
2568 # grab it from our underlying context, so that we always have data if
2568 # grab it from our underlying context, so that we always have data if
2569 # the file is marked as existing.
2569 # the file is marked as existing.
2570 if exists and data is None:
2570 if exists and data is None:
2571 oldentry = self._cache.get(path) or {}
2571 oldentry = self._cache.get(path) or {}
2572 data = oldentry.get(b'data')
2572 data = oldentry.get(b'data')
2573 if data is None:
2573 if data is None:
2574 data = self._wrappedctx[path].data()
2574 data = self._wrappedctx[path].data()
2575
2575
2576 self._cache[path] = {
2576 self._cache[path] = {
2577 b'exists': exists,
2577 b'exists': exists,
2578 b'data': data,
2578 b'data': data,
2579 b'date': date,
2579 b'date': date,
2580 b'flags': flags,
2580 b'flags': flags,
2581 b'copied': copied,
2581 b'copied': copied,
2582 }
2582 }
2583
2583
2584 def filectx(self, path, filelog=None):
2584 def filectx(self, path, filelog=None):
2585 return overlayworkingfilectx(
2585 return overlayworkingfilectx(
2586 self._repo, path, parent=self, filelog=filelog
2586 self._repo, path, parent=self, filelog=filelog
2587 )
2587 )
2588
2588
2589
2589
2590 class overlayworkingfilectx(committablefilectx):
2590 class overlayworkingfilectx(committablefilectx):
2591 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2591 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2592 cache, which can be flushed through later by calling ``flush()``."""
2592 cache, which can be flushed through later by calling ``flush()``."""
2593
2593
2594 def __init__(self, repo, path, filelog=None, parent=None):
2594 def __init__(self, repo, path, filelog=None, parent=None):
2595 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2595 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2596 self._repo = repo
2596 self._repo = repo
2597 self._parent = parent
2597 self._parent = parent
2598 self._path = path
2598 self._path = path
2599
2599
2600 def cmp(self, fctx):
2600 def cmp(self, fctx):
2601 return self.data() != fctx.data()
2601 return self.data() != fctx.data()
2602
2602
2603 def changectx(self):
2603 def changectx(self):
2604 return self._parent
2604 return self._parent
2605
2605
2606 def data(self):
2606 def data(self):
2607 return self._parent.data(self._path)
2607 return self._parent.data(self._path)
2608
2608
2609 def date(self):
2609 def date(self):
2610 return self._parent.filedate(self._path)
2610 return self._parent.filedate(self._path)
2611
2611
2612 def exists(self):
2612 def exists(self):
2613 return self.lexists()
2613 return self.lexists()
2614
2614
2615 def lexists(self):
2615 def lexists(self):
2616 return self._parent.exists(self._path)
2616 return self._parent.exists(self._path)
2617
2617
2618 def copysource(self):
2618 def copysource(self):
2619 return self._parent.copydata(self._path)
2619 return self._parent.copydata(self._path)
2620
2620
2621 def size(self):
2621 def size(self):
2622 return self._parent.size(self._path)
2622 return self._parent.size(self._path)
2623
2623
2624 def markcopied(self, origin):
2624 def markcopied(self, origin):
2625 self._parent.markcopied(self._path, origin)
2625 self._parent.markcopied(self._path, origin)
2626
2626
2627 def audit(self):
2627 def audit(self):
2628 pass
2628 pass
2629
2629
2630 def flags(self):
2630 def flags(self):
2631 return self._parent.flags(self._path)
2631 return self._parent.flags(self._path)
2632
2632
2633 def setflags(self, islink, isexec):
2633 def setflags(self, islink, isexec):
2634 return self._parent.setflags(self._path, islink, isexec)
2634 return self._parent.setflags(self._path, islink, isexec)
2635
2635
2636 def write(self, data, flags, backgroundclose=False, **kwargs):
2636 def write(self, data, flags, backgroundclose=False, **kwargs):
2637 return self._parent.write(self._path, data, flags, **kwargs)
2637 return self._parent.write(self._path, data, flags, **kwargs)
2638
2638
2639 def remove(self, ignoremissing=False):
2639 def remove(self, ignoremissing=False):
2640 return self._parent.remove(self._path)
2640 return self._parent.remove(self._path)
2641
2641
2642 def clearunknown(self):
2642 def clearunknown(self):
2643 pass
2643 pass
2644
2644
2645
2645
2646 class workingcommitctx(workingctx):
2646 class workingcommitctx(workingctx):
2647 """A workingcommitctx object makes access to data related to
2647 """A workingcommitctx object makes access to data related to
2648 the revision being committed convenient.
2648 the revision being committed convenient.
2649
2649
2650 This hides changes in the working directory, if they aren't
2650 This hides changes in the working directory, if they aren't
2651 committed in this context.
2651 committed in this context.
2652 """
2652 """
2653
2653
2654 def __init__(
2654 def __init__(
2655 self, repo, changes, text=b"", user=None, date=None, extra=None
2655 self, repo, changes, text=b"", user=None, date=None, extra=None
2656 ):
2656 ):
2657 super(workingcommitctx, self).__init__(
2657 super(workingcommitctx, self).__init__(
2658 repo, text, user, date, extra, changes
2658 repo, text, user, date, extra, changes
2659 )
2659 )
2660
2660
2661 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2661 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2662 """Return matched files only in ``self._status``
2662 """Return matched files only in ``self._status``
2663
2663
2664 Uncommitted files appear "clean" via this context, even if
2664 Uncommitted files appear "clean" via this context, even if
2665 they aren't actually so in the working directory.
2665 they aren't actually so in the working directory.
2666 """
2666 """
2667 if clean:
2667 if clean:
2668 clean = [f for f in self._manifest if f not in self._changedset]
2668 clean = [f for f in self._manifest if f not in self._changedset]
2669 else:
2669 else:
2670 clean = []
2670 clean = []
2671 return scmutil.status(
2671 return scmutil.status(
2672 [f for f in self._status.modified if match(f)],
2672 [f for f in self._status.modified if match(f)],
2673 [f for f in self._status.added if match(f)],
2673 [f for f in self._status.added if match(f)],
2674 [f for f in self._status.removed if match(f)],
2674 [f for f in self._status.removed if match(f)],
2675 [],
2675 [],
2676 [],
2676 [],
2677 [],
2677 [],
2678 clean,
2678 clean,
2679 )
2679 )
2680
2680
2681 @propertycache
2681 @propertycache
2682 def _changedset(self):
2682 def _changedset(self):
2683 """Return the set of files changed in this context
2683 """Return the set of files changed in this context
2684 """
2684 """
2685 changed = set(self._status.modified)
2685 changed = set(self._status.modified)
2686 changed.update(self._status.added)
2686 changed.update(self._status.added)
2687 changed.update(self._status.removed)
2687 changed.update(self._status.removed)
2688 return changed
2688 return changed
2689
2689
2690
2690
2691 def makecachingfilectxfn(func):
2691 def makecachingfilectxfn(func):
2692 """Create a filectxfn that caches based on the path.
2692 """Create a filectxfn that caches based on the path.
2693
2693
2694 We can't use util.cachefunc because it uses all arguments as the cache
2694 We can't use util.cachefunc because it uses all arguments as the cache
2695 key and this creates a cycle since the arguments include the repo and
2695 key and this creates a cycle since the arguments include the repo and
2696 memctx.
2696 memctx.
2697 """
2697 """
2698 cache = {}
2698 cache = {}
2699
2699
2700 def getfilectx(repo, memctx, path):
2700 def getfilectx(repo, memctx, path):
2701 if path not in cache:
2701 if path not in cache:
2702 cache[path] = func(repo, memctx, path)
2702 cache[path] = func(repo, memctx, path)
2703 return cache[path]
2703 return cache[path]
2704
2704
2705 return getfilectx
2705 return getfilectx
2706
2706
2707
2707
2708 def memfilefromctx(ctx):
2708 def memfilefromctx(ctx):
2709 """Given a context return a memfilectx for ctx[path]
2709 """Given a context return a memfilectx for ctx[path]
2710
2710
2711 This is a convenience method for building a memctx based on another
2711 This is a convenience method for building a memctx based on another
2712 context.
2712 context.
2713 """
2713 """
2714
2714
2715 def getfilectx(repo, memctx, path):
2715 def getfilectx(repo, memctx, path):
2716 fctx = ctx[path]
2716 fctx = ctx[path]
2717 copysource = fctx.copysource()
2717 copysource = fctx.copysource()
2718 return memfilectx(
2718 return memfilectx(
2719 repo,
2719 repo,
2720 memctx,
2720 memctx,
2721 path,
2721 path,
2722 fctx.data(),
2722 fctx.data(),
2723 islink=fctx.islink(),
2723 islink=fctx.islink(),
2724 isexec=fctx.isexec(),
2724 isexec=fctx.isexec(),
2725 copysource=copysource,
2725 copysource=copysource,
2726 )
2726 )
2727
2727
2728 return getfilectx
2728 return getfilectx
2729
2729
2730
2730
2731 def memfilefrompatch(patchstore):
2731 def memfilefrompatch(patchstore):
2732 """Given a patch (e.g. patchstore object) return a memfilectx
2732 """Given a patch (e.g. patchstore object) return a memfilectx
2733
2733
2734 This is a convenience method for building a memctx based on a patchstore.
2734 This is a convenience method for building a memctx based on a patchstore.
2735 """
2735 """
2736
2736
2737 def getfilectx(repo, memctx, path):
2737 def getfilectx(repo, memctx, path):
2738 data, mode, copysource = patchstore.getfile(path)
2738 data, mode, copysource = patchstore.getfile(path)
2739 if data is None:
2739 if data is None:
2740 return None
2740 return None
2741 islink, isexec = mode
2741 islink, isexec = mode
2742 return memfilectx(
2742 return memfilectx(
2743 repo,
2743 repo,
2744 memctx,
2744 memctx,
2745 path,
2745 path,
2746 data,
2746 data,
2747 islink=islink,
2747 islink=islink,
2748 isexec=isexec,
2748 isexec=isexec,
2749 copysource=copysource,
2749 copysource=copysource,
2750 )
2750 )
2751
2751
2752 return getfilectx
2752 return getfilectx
2753
2753
2754
2754
2755 class memctx(committablectx):
2755 class memctx(committablectx):
2756 """Use memctx to perform in-memory commits via localrepo.commitctx().
2756 """Use memctx to perform in-memory commits via localrepo.commitctx().
2757
2757
2758 Revision information is supplied at initialization time while
2758 Revision information is supplied at initialization time while
2759 related files data and is made available through a callback
2759 related files data and is made available through a callback
2760 mechanism. 'repo' is the current localrepo, 'parents' is a
2760 mechanism. 'repo' is the current localrepo, 'parents' is a
2761 sequence of two parent revisions identifiers (pass None for every
2761 sequence of two parent revisions identifiers (pass None for every
2762 missing parent), 'text' is the commit message and 'files' lists
2762 missing parent), 'text' is the commit message and 'files' lists
2763 names of files touched by the revision (normalized and relative to
2763 names of files touched by the revision (normalized and relative to
2764 repository root).
2764 repository root).
2765
2765
2766 filectxfn(repo, memctx, path) is a callable receiving the
2766 filectxfn(repo, memctx, path) is a callable receiving the
2767 repository, the current memctx object and the normalized path of
2767 repository, the current memctx object and the normalized path of
2768 requested file, relative to repository root. It is fired by the
2768 requested file, relative to repository root. It is fired by the
2769 commit function for every file in 'files', but calls order is
2769 commit function for every file in 'files', but calls order is
2770 undefined. If the file is available in the revision being
2770 undefined. If the file is available in the revision being
2771 committed (updated or added), filectxfn returns a memfilectx
2771 committed (updated or added), filectxfn returns a memfilectx
2772 object. If the file was removed, filectxfn return None for recent
2772 object. If the file was removed, filectxfn return None for recent
2773 Mercurial. Moved files are represented by marking the source file
2773 Mercurial. Moved files are represented by marking the source file
2774 removed and the new file added with copy information (see
2774 removed and the new file added with copy information (see
2775 memfilectx).
2775 memfilectx).
2776
2776
2777 user receives the committer name and defaults to current
2777 user receives the committer name and defaults to current
2778 repository username, date is the commit date in any format
2778 repository username, date is the commit date in any format
2779 supported by dateutil.parsedate() and defaults to current date, extra
2779 supported by dateutil.parsedate() and defaults to current date, extra
2780 is a dictionary of metadata or is left empty.
2780 is a dictionary of metadata or is left empty.
2781 """
2781 """
2782
2782
2783 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2783 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2784 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2784 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2785 # this field to determine what to do in filectxfn.
2785 # this field to determine what to do in filectxfn.
2786 _returnnoneformissingfiles = True
2786 _returnnoneformissingfiles = True
2787
2787
2788 def __init__(
2788 def __init__(
2789 self,
2789 self,
2790 repo,
2790 repo,
2791 parents,
2791 parents,
2792 text,
2792 text,
2793 files,
2793 files,
2794 filectxfn,
2794 filectxfn,
2795 user=None,
2795 user=None,
2796 date=None,
2796 date=None,
2797 extra=None,
2797 extra=None,
2798 branch=None,
2798 branch=None,
2799 editor=None,
2799 editor=None,
2800 ):
2800 ):
2801 super(memctx, self).__init__(
2801 super(memctx, self).__init__(
2802 repo, text, user, date, extra, branch=branch
2802 repo, text, user, date, extra, branch=branch
2803 )
2803 )
2804 self._rev = None
2804 self._rev = None
2805 self._node = None
2805 self._node = None
2806 parents = [(p or nullid) for p in parents]
2806 parents = [(p or nullid) for p in parents]
2807 p1, p2 = parents
2807 p1, p2 = parents
2808 self._parents = [self._repo[p] for p in (p1, p2)]
2808 self._parents = [self._repo[p] for p in (p1, p2)]
2809 files = sorted(set(files))
2809 files = sorted(set(files))
2810 self._files = files
2810 self._files = files
2811 self.substate = {}
2811 self.substate = {}
2812
2812
2813 if isinstance(filectxfn, patch.filestore):
2813 if isinstance(filectxfn, patch.filestore):
2814 filectxfn = memfilefrompatch(filectxfn)
2814 filectxfn = memfilefrompatch(filectxfn)
2815 elif not callable(filectxfn):
2815 elif not callable(filectxfn):
2816 # if store is not callable, wrap it in a function
2816 # if store is not callable, wrap it in a function
2817 filectxfn = memfilefromctx(filectxfn)
2817 filectxfn = memfilefromctx(filectxfn)
2818
2818
2819 # memoizing increases performance for e.g. vcs convert scenarios.
2819 # memoizing increases performance for e.g. vcs convert scenarios.
2820 self._filectxfn = makecachingfilectxfn(filectxfn)
2820 self._filectxfn = makecachingfilectxfn(filectxfn)
2821
2821
2822 if editor:
2822 if editor:
2823 self._text = editor(self._repo, self, [])
2823 self._text = editor(self._repo, self, [])
2824 self._repo.savecommitmessage(self._text)
2824 self._repo.savecommitmessage(self._text)
2825
2825
2826 def filectx(self, path, filelog=None):
2826 def filectx(self, path, filelog=None):
2827 """get a file context from the working directory
2827 """get a file context from the working directory
2828
2828
2829 Returns None if file doesn't exist and should be removed."""
2829 Returns None if file doesn't exist and should be removed."""
2830 return self._filectxfn(self._repo, self, path)
2830 return self._filectxfn(self._repo, self, path)
2831
2831
2832 def commit(self):
2832 def commit(self):
2833 """commit context to the repo"""
2833 """commit context to the repo"""
2834 return self._repo.commitctx(self)
2834 return self._repo.commitctx(self)
2835
2835
2836 @propertycache
2836 @propertycache
2837 def _manifest(self):
2837 def _manifest(self):
2838 """generate a manifest based on the return values of filectxfn"""
2838 """generate a manifest based on the return values of filectxfn"""
2839
2839
2840 # keep this simple for now; just worry about p1
2840 # keep this simple for now; just worry about p1
2841 pctx = self._parents[0]
2841 pctx = self._parents[0]
2842 man = pctx.manifest().copy()
2842 man = pctx.manifest().copy()
2843
2843
2844 for f in self._status.modified:
2844 for f in self._status.modified:
2845 man[f] = modifiednodeid
2845 man[f] = modifiednodeid
2846
2846
2847 for f in self._status.added:
2847 for f in self._status.added:
2848 man[f] = addednodeid
2848 man[f] = addednodeid
2849
2849
2850 for f in self._status.removed:
2850 for f in self._status.removed:
2851 if f in man:
2851 if f in man:
2852 del man[f]
2852 del man[f]
2853
2853
2854 return man
2854 return man
2855
2855
2856 @propertycache
2856 @propertycache
2857 def _status(self):
2857 def _status(self):
2858 """Calculate exact status from ``files`` specified at construction
2858 """Calculate exact status from ``files`` specified at construction
2859 """
2859 """
2860 man1 = self.p1().manifest()
2860 man1 = self.p1().manifest()
2861 p2 = self._parents[1]
2861 p2 = self._parents[1]
2862 # "1 < len(self._parents)" can't be used for checking
2862 # "1 < len(self._parents)" can't be used for checking
2863 # existence of the 2nd parent, because "memctx._parents" is
2863 # existence of the 2nd parent, because "memctx._parents" is
2864 # explicitly initialized by the list, of which length is 2.
2864 # explicitly initialized by the list, of which length is 2.
2865 if p2.node() != nullid:
2865 if p2.node() != nullid:
2866 man2 = p2.manifest()
2866 man2 = p2.manifest()
2867 managing = lambda f: f in man1 or f in man2
2867 managing = lambda f: f in man1 or f in man2
2868 else:
2868 else:
2869 managing = lambda f: f in man1
2869 managing = lambda f: f in man1
2870
2870
2871 modified, added, removed = [], [], []
2871 modified, added, removed = [], [], []
2872 for f in self._files:
2872 for f in self._files:
2873 if not managing(f):
2873 if not managing(f):
2874 added.append(f)
2874 added.append(f)
2875 elif self[f]:
2875 elif self[f]:
2876 modified.append(f)
2876 modified.append(f)
2877 else:
2877 else:
2878 removed.append(f)
2878 removed.append(f)
2879
2879
2880 return scmutil.status(modified, added, removed, [], [], [], [])
2880 return scmutil.status(modified, added, removed, [], [], [], [])
2881
2881
2882
2882
2883 class memfilectx(committablefilectx):
2883 class memfilectx(committablefilectx):
2884 """memfilectx represents an in-memory file to commit.
2884 """memfilectx represents an in-memory file to commit.
2885
2885
2886 See memctx and committablefilectx for more details.
2886 See memctx and committablefilectx for more details.
2887 """
2887 """
2888
2888
2889 def __init__(
2889 def __init__(
2890 self,
2890 self,
2891 repo,
2891 repo,
2892 changectx,
2892 changectx,
2893 path,
2893 path,
2894 data,
2894 data,
2895 islink=False,
2895 islink=False,
2896 isexec=False,
2896 isexec=False,
2897 copysource=None,
2897 copysource=None,
2898 ):
2898 ):
2899 """
2899 """
2900 path is the normalized file path relative to repository root.
2900 path is the normalized file path relative to repository root.
2901 data is the file content as a string.
2901 data is the file content as a string.
2902 islink is True if the file is a symbolic link.
2902 islink is True if the file is a symbolic link.
2903 isexec is True if the file is executable.
2903 isexec is True if the file is executable.
2904 copied is the source file path if current file was copied in the
2904 copied is the source file path if current file was copied in the
2905 revision being committed, or None."""
2905 revision being committed, or None."""
2906 super(memfilectx, self).__init__(repo, path, None, changectx)
2906 super(memfilectx, self).__init__(repo, path, None, changectx)
2907 self._data = data
2907 self._data = data
2908 if islink:
2908 if islink:
2909 self._flags = b'l'
2909 self._flags = b'l'
2910 elif isexec:
2910 elif isexec:
2911 self._flags = b'x'
2911 self._flags = b'x'
2912 else:
2912 else:
2913 self._flags = b''
2913 self._flags = b''
2914 self._copysource = copysource
2914 self._copysource = copysource
2915
2915
2916 def copysource(self):
2916 def copysource(self):
2917 return self._copysource
2917 return self._copysource
2918
2918
2919 def cmp(self, fctx):
2919 def cmp(self, fctx):
2920 return self.data() != fctx.data()
2920 return self.data() != fctx.data()
2921
2921
2922 def data(self):
2922 def data(self):
2923 return self._data
2923 return self._data
2924
2924
2925 def remove(self, ignoremissing=False):
2925 def remove(self, ignoremissing=False):
2926 """wraps unlink for a repo's working directory"""
2926 """wraps unlink for a repo's working directory"""
2927 # need to figure out what to do here
2927 # need to figure out what to do here
2928 del self._changectx[self._path]
2928 del self._changectx[self._path]
2929
2929
2930 def write(self, data, flags, **kwargs):
2930 def write(self, data, flags, **kwargs):
2931 """wraps repo.wwrite"""
2931 """wraps repo.wwrite"""
2932 self._data = data
2932 self._data = data
2933
2933
2934
2934
2935 class metadataonlyctx(committablectx):
2935 class metadataonlyctx(committablectx):
2936 """Like memctx but it's reusing the manifest of different commit.
2936 """Like memctx but it's reusing the manifest of different commit.
2937 Intended to be used by lightweight operations that are creating
2937 Intended to be used by lightweight operations that are creating
2938 metadata-only changes.
2938 metadata-only changes.
2939
2939
2940 Revision information is supplied at initialization time. 'repo' is the
2940 Revision information is supplied at initialization time. 'repo' is the
2941 current localrepo, 'ctx' is original revision which manifest we're reuisng
2941 current localrepo, 'ctx' is original revision which manifest we're reuisng
2942 'parents' is a sequence of two parent revisions identifiers (pass None for
2942 'parents' is a sequence of two parent revisions identifiers (pass None for
2943 every missing parent), 'text' is the commit.
2943 every missing parent), 'text' is the commit.
2944
2944
2945 user receives the committer name and defaults to current repository
2945 user receives the committer name and defaults to current repository
2946 username, date is the commit date in any format supported by
2946 username, date is the commit date in any format supported by
2947 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2947 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2948 metadata or is left empty.
2948 metadata or is left empty.
2949 """
2949 """
2950
2950
2951 def __init__(
2951 def __init__(
2952 self,
2952 self,
2953 repo,
2953 repo,
2954 originalctx,
2954 originalctx,
2955 parents=None,
2955 parents=None,
2956 text=None,
2956 text=None,
2957 user=None,
2957 user=None,
2958 date=None,
2958 date=None,
2959 extra=None,
2959 extra=None,
2960 editor=None,
2960 editor=None,
2961 ):
2961 ):
2962 if text is None:
2962 if text is None:
2963 text = originalctx.description()
2963 text = originalctx.description()
2964 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2964 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2965 self._rev = None
2965 self._rev = None
2966 self._node = None
2966 self._node = None
2967 self._originalctx = originalctx
2967 self._originalctx = originalctx
2968 self._manifestnode = originalctx.manifestnode()
2968 self._manifestnode = originalctx.manifestnode()
2969 if parents is None:
2969 if parents is None:
2970 parents = originalctx.parents()
2970 parents = originalctx.parents()
2971 else:
2971 else:
2972 parents = [repo[p] for p in parents if p is not None]
2972 parents = [repo[p] for p in parents if p is not None]
2973 parents = parents[:]
2973 parents = parents[:]
2974 while len(parents) < 2:
2974 while len(parents) < 2:
2975 parents.append(repo[nullid])
2975 parents.append(repo[nullid])
2976 p1, p2 = self._parents = parents
2976 p1, p2 = self._parents = parents
2977
2977
2978 # sanity check to ensure that the reused manifest parents are
2978 # sanity check to ensure that the reused manifest parents are
2979 # manifests of our commit parents
2979 # manifests of our commit parents
2980 mp1, mp2 = self.manifestctx().parents
2980 mp1, mp2 = self.manifestctx().parents
2981 if p1 != nullid and p1.manifestnode() != mp1:
2981 if p1 != nullid and p1.manifestnode() != mp1:
2982 raise RuntimeError(
2982 raise RuntimeError(
2983 r"can't reuse the manifest: its p1 "
2983 r"can't reuse the manifest: its p1 "
2984 r"doesn't match the new ctx p1"
2984 r"doesn't match the new ctx p1"
2985 )
2985 )
2986 if p2 != nullid and p2.manifestnode() != mp2:
2986 if p2 != nullid and p2.manifestnode() != mp2:
2987 raise RuntimeError(
2987 raise RuntimeError(
2988 r"can't reuse the manifest: "
2988 r"can't reuse the manifest: "
2989 r"its p2 doesn't match the new ctx p2"
2989 r"its p2 doesn't match the new ctx p2"
2990 )
2990 )
2991
2991
2992 self._files = originalctx.files()
2992 self._files = originalctx.files()
2993 self.substate = {}
2993 self.substate = {}
2994
2994
2995 if editor:
2995 if editor:
2996 self._text = editor(self._repo, self, [])
2996 self._text = editor(self._repo, self, [])
2997 self._repo.savecommitmessage(self._text)
2997 self._repo.savecommitmessage(self._text)
2998
2998
2999 def manifestnode(self):
2999 def manifestnode(self):
3000 return self._manifestnode
3000 return self._manifestnode
3001
3001
3002 @property
3002 @property
3003 def _manifestctx(self):
3003 def _manifestctx(self):
3004 return self._repo.manifestlog[self._manifestnode]
3004 return self._repo.manifestlog[self._manifestnode]
3005
3005
3006 def filectx(self, path, filelog=None):
3006 def filectx(self, path, filelog=None):
3007 return self._originalctx.filectx(path, filelog=filelog)
3007 return self._originalctx.filectx(path, filelog=filelog)
3008
3008
3009 def commit(self):
3009 def commit(self):
3010 """commit context to the repo"""
3010 """commit context to the repo"""
3011 return self._repo.commitctx(self)
3011 return self._repo.commitctx(self)
3012
3012
3013 @property
3013 @property
3014 def _manifest(self):
3014 def _manifest(self):
3015 return self._originalctx.manifest()
3015 return self._originalctx.manifest()
3016
3016
3017 @propertycache
3017 @propertycache
3018 def _status(self):
3018 def _status(self):
3019 """Calculate exact status from ``files`` specified in the ``origctx``
3019 """Calculate exact status from ``files`` specified in the ``origctx``
3020 and parents manifests.
3020 and parents manifests.
3021 """
3021 """
3022 man1 = self.p1().manifest()
3022 man1 = self.p1().manifest()
3023 p2 = self._parents[1]
3023 p2 = self._parents[1]
3024 # "1 < len(self._parents)" can't be used for checking
3024 # "1 < len(self._parents)" can't be used for checking
3025 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3025 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3026 # explicitly initialized by the list, of which length is 2.
3026 # explicitly initialized by the list, of which length is 2.
3027 if p2.node() != nullid:
3027 if p2.node() != nullid:
3028 man2 = p2.manifest()
3028 man2 = p2.manifest()
3029 managing = lambda f: f in man1 or f in man2
3029 managing = lambda f: f in man1 or f in man2
3030 else:
3030 else:
3031 managing = lambda f: f in man1
3031 managing = lambda f: f in man1
3032
3032
3033 modified, added, removed = [], [], []
3033 modified, added, removed = [], [], []
3034 for f in self._files:
3034 for f in self._files:
3035 if not managing(f):
3035 if not managing(f):
3036 added.append(f)
3036 added.append(f)
3037 elif f in self:
3037 elif f in self:
3038 modified.append(f)
3038 modified.append(f)
3039 else:
3039 else:
3040 removed.append(f)
3040 removed.append(f)
3041
3041
3042 return scmutil.status(modified, added, removed, [], [], [], [])
3042 return scmutil.status(modified, added, removed, [], [], [], [])
3043
3043
3044
3044
3045 class arbitraryfilectx(object):
3045 class arbitraryfilectx(object):
3046 """Allows you to use filectx-like functions on a file in an arbitrary
3046 """Allows you to use filectx-like functions on a file in an arbitrary
3047 location on disk, possibly not in the working directory.
3047 location on disk, possibly not in the working directory.
3048 """
3048 """
3049
3049
3050 def __init__(self, path, repo=None):
3050 def __init__(self, path, repo=None):
3051 # Repo is optional because contrib/simplemerge uses this class.
3051 # Repo is optional because contrib/simplemerge uses this class.
3052 self._repo = repo
3052 self._repo = repo
3053 self._path = path
3053 self._path = path
3054
3054
3055 def cmp(self, fctx):
3055 def cmp(self, fctx):
3056 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3056 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3057 # path if either side is a symlink.
3057 # path if either side is a symlink.
3058 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3058 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3059 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3059 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3060 # Add a fast-path for merge if both sides are disk-backed.
3060 # Add a fast-path for merge if both sides are disk-backed.
3061 # Note that filecmp uses the opposite return values (True if same)
3061 # Note that filecmp uses the opposite return values (True if same)
3062 # from our cmp functions (True if different).
3062 # from our cmp functions (True if different).
3063 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3063 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3064 return self.data() != fctx.data()
3064 return self.data() != fctx.data()
3065
3065
3066 def path(self):
3066 def path(self):
3067 return self._path
3067 return self._path
3068
3068
3069 def flags(self):
3069 def flags(self):
3070 return b''
3070 return b''
3071
3071
3072 def data(self):
3072 def data(self):
3073 return util.readfile(self._path)
3073 return util.readfile(self._path)
3074
3074
3075 def decodeddata(self):
3075 def decodeddata(self):
3076 with open(self._path, b"rb") as f:
3076 with open(self._path, b"rb") as f:
3077 return f.read()
3077 return f.read()
3078
3078
3079 def remove(self):
3079 def remove(self):
3080 util.unlink(self._path)
3080 util.unlink(self._path)
3081
3081
3082 def write(self, data, flags, **kwargs):
3082 def write(self, data, flags, **kwargs):
3083 assert not flags
3083 assert not flags
3084 with open(self._path, b"wb") as f:
3084 with open(self._path, b"wb") as f:
3085 f.write(data)
3085 f.write(data)
@@ -1,2077 +1,2078 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import stat
11 import stat
12 import struct
12 import struct
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 addednodeid,
16 addednodeid,
17 modifiednodeid,
17 modifiednodeid,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 )
20 )
21 from .thirdparty import attr
21 from .thirdparty import attr
22 from . import (
22 from . import (
23 copies,
23 copies,
24 encoding,
24 encoding,
25 error,
25 error,
26 filemerge,
26 filemerge,
27 match as matchmod,
27 match as matchmod,
28 mergestate as mergestatemod,
28 mergestate as mergestatemod,
29 obsutil,
29 obsutil,
30 pathutil,
30 pathutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepoutil,
33 subrepoutil,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41
41
42 def _getcheckunknownconfig(repo, section, name):
42 def _getcheckunknownconfig(repo, section, name):
43 config = repo.ui.config(section, name)
43 config = repo.ui.config(section, name)
44 valid = [b'abort', b'ignore', b'warn']
44 valid = [b'abort', b'ignore', b'warn']
45 if config not in valid:
45 if config not in valid:
46 validstr = b', '.join([b"'" + v + b"'" for v in valid])
46 validstr = b', '.join([b"'" + v + b"'" for v in valid])
47 raise error.ConfigError(
47 raise error.ConfigError(
48 _(b"%s.%s not valid ('%s' is none of %s)")
48 _(b"%s.%s not valid ('%s' is none of %s)")
49 % (section, name, config, validstr)
49 % (section, name, config, validstr)
50 )
50 )
51 return config
51 return config
52
52
53
53
54 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
54 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
55 if wctx.isinmemory():
55 if wctx.isinmemory():
56 # Nothing to do in IMM because nothing in the "working copy" can be an
56 # Nothing to do in IMM because nothing in the "working copy" can be an
57 # unknown file.
57 # unknown file.
58 #
58 #
59 # Note that we should bail out here, not in ``_checkunknownfiles()``,
59 # Note that we should bail out here, not in ``_checkunknownfiles()``,
60 # because that function does other useful work.
60 # because that function does other useful work.
61 return False
61 return False
62
62
63 if f2 is None:
63 if f2 is None:
64 f2 = f
64 f2 = f
65 return (
65 return (
66 repo.wvfs.audit.check(f)
66 repo.wvfs.audit.check(f)
67 and repo.wvfs.isfileorlink(f)
67 and repo.wvfs.isfileorlink(f)
68 and repo.dirstate.normalize(f) not in repo.dirstate
68 and repo.dirstate.normalize(f) not in repo.dirstate
69 and mctx[f2].cmp(wctx[f])
69 and mctx[f2].cmp(wctx[f])
70 )
70 )
71
71
72
72
73 class _unknowndirschecker(object):
73 class _unknowndirschecker(object):
74 """
74 """
75 Look for any unknown files or directories that may have a path conflict
75 Look for any unknown files or directories that may have a path conflict
76 with a file. If any path prefix of the file exists as a file or link,
76 with a file. If any path prefix of the file exists as a file or link,
77 then it conflicts. If the file itself is a directory that contains any
77 then it conflicts. If the file itself is a directory that contains any
78 file that is not tracked, then it conflicts.
78 file that is not tracked, then it conflicts.
79
79
80 Returns the shortest path at which a conflict occurs, or None if there is
80 Returns the shortest path at which a conflict occurs, or None if there is
81 no conflict.
81 no conflict.
82 """
82 """
83
83
84 def __init__(self):
84 def __init__(self):
85 # A set of paths known to be good. This prevents repeated checking of
85 # A set of paths known to be good. This prevents repeated checking of
86 # dirs. It will be updated with any new dirs that are checked and found
86 # dirs. It will be updated with any new dirs that are checked and found
87 # to be safe.
87 # to be safe.
88 self._unknowndircache = set()
88 self._unknowndircache = set()
89
89
90 # A set of paths that are known to be absent. This prevents repeated
90 # A set of paths that are known to be absent. This prevents repeated
91 # checking of subdirectories that are known not to exist. It will be
91 # checking of subdirectories that are known not to exist. It will be
92 # updated with any new dirs that are checked and found to be absent.
92 # updated with any new dirs that are checked and found to be absent.
93 self._missingdircache = set()
93 self._missingdircache = set()
94
94
95 def __call__(self, repo, wctx, f):
95 def __call__(self, repo, wctx, f):
96 if wctx.isinmemory():
96 if wctx.isinmemory():
97 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
97 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
98 return False
98 return False
99
99
100 # Check for path prefixes that exist as unknown files.
100 # Check for path prefixes that exist as unknown files.
101 for p in reversed(list(pathutil.finddirs(f))):
101 for p in reversed(list(pathutil.finddirs(f))):
102 if p in self._missingdircache:
102 if p in self._missingdircache:
103 return
103 return
104 if p in self._unknowndircache:
104 if p in self._unknowndircache:
105 continue
105 continue
106 if repo.wvfs.audit.check(p):
106 if repo.wvfs.audit.check(p):
107 if (
107 if (
108 repo.wvfs.isfileorlink(p)
108 repo.wvfs.isfileorlink(p)
109 and repo.dirstate.normalize(p) not in repo.dirstate
109 and repo.dirstate.normalize(p) not in repo.dirstate
110 ):
110 ):
111 return p
111 return p
112 if not repo.wvfs.lexists(p):
112 if not repo.wvfs.lexists(p):
113 self._missingdircache.add(p)
113 self._missingdircache.add(p)
114 return
114 return
115 self._unknowndircache.add(p)
115 self._unknowndircache.add(p)
116
116
117 # Check if the file conflicts with a directory containing unknown files.
117 # Check if the file conflicts with a directory containing unknown files.
118 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
118 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
119 # Does the directory contain any files that are not in the dirstate?
119 # Does the directory contain any files that are not in the dirstate?
120 for p, dirs, files in repo.wvfs.walk(f):
120 for p, dirs, files in repo.wvfs.walk(f):
121 for fn in files:
121 for fn in files:
122 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
122 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
123 relf = repo.dirstate.normalize(relf, isknown=True)
123 relf = repo.dirstate.normalize(relf, isknown=True)
124 if relf not in repo.dirstate:
124 if relf not in repo.dirstate:
125 return f
125 return f
126 return None
126 return None
127
127
128
128
129 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
129 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
130 """
130 """
131 Considers any actions that care about the presence of conflicting unknown
131 Considers any actions that care about the presence of conflicting unknown
132 files. For some actions, the result is to abort; for others, it is to
132 files. For some actions, the result is to abort; for others, it is to
133 choose a different action.
133 choose a different action.
134 """
134 """
135 fileconflicts = set()
135 fileconflicts = set()
136 pathconflicts = set()
136 pathconflicts = set()
137 warnconflicts = set()
137 warnconflicts = set()
138 abortconflicts = set()
138 abortconflicts = set()
139 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
139 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
140 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
140 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
141 pathconfig = repo.ui.configbool(
141 pathconfig = repo.ui.configbool(
142 b'experimental', b'merge.checkpathconflicts'
142 b'experimental', b'merge.checkpathconflicts'
143 )
143 )
144 if not force:
144 if not force:
145
145
146 def collectconflicts(conflicts, config):
146 def collectconflicts(conflicts, config):
147 if config == b'abort':
147 if config == b'abort':
148 abortconflicts.update(conflicts)
148 abortconflicts.update(conflicts)
149 elif config == b'warn':
149 elif config == b'warn':
150 warnconflicts.update(conflicts)
150 warnconflicts.update(conflicts)
151
151
152 checkunknowndirs = _unknowndirschecker()
152 checkunknowndirs = _unknowndirschecker()
153 for f, (m, args, msg) in pycompat.iteritems(actions):
153 for f, (m, args, msg) in pycompat.iteritems(actions):
154 if m in (
154 if m in (
155 mergestatemod.ACTION_CREATED,
155 mergestatemod.ACTION_CREATED,
156 mergestatemod.ACTION_DELETED_CHANGED,
156 mergestatemod.ACTION_DELETED_CHANGED,
157 ):
157 ):
158 if _checkunknownfile(repo, wctx, mctx, f):
158 if _checkunknownfile(repo, wctx, mctx, f):
159 fileconflicts.add(f)
159 fileconflicts.add(f)
160 elif pathconfig and f not in wctx:
160 elif pathconfig and f not in wctx:
161 path = checkunknowndirs(repo, wctx, f)
161 path = checkunknowndirs(repo, wctx, f)
162 if path is not None:
162 if path is not None:
163 pathconflicts.add(path)
163 pathconflicts.add(path)
164 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
164 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
165 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
165 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
166 fileconflicts.add(f)
166 fileconflicts.add(f)
167
167
168 allconflicts = fileconflicts | pathconflicts
168 allconflicts = fileconflicts | pathconflicts
169 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
169 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
170 unknownconflicts = allconflicts - ignoredconflicts
170 unknownconflicts = allconflicts - ignoredconflicts
171 collectconflicts(ignoredconflicts, ignoredconfig)
171 collectconflicts(ignoredconflicts, ignoredconfig)
172 collectconflicts(unknownconflicts, unknownconfig)
172 collectconflicts(unknownconflicts, unknownconfig)
173 else:
173 else:
174 for f, (m, args, msg) in pycompat.iteritems(actions):
174 for f, (m, args, msg) in pycompat.iteritems(actions):
175 if m == mergestatemod.ACTION_CREATED_MERGE:
175 if m == mergestatemod.ACTION_CREATED_MERGE:
176 fl2, anc = args
176 fl2, anc = args
177 different = _checkunknownfile(repo, wctx, mctx, f)
177 different = _checkunknownfile(repo, wctx, mctx, f)
178 if repo.dirstate._ignore(f):
178 if repo.dirstate._ignore(f):
179 config = ignoredconfig
179 config = ignoredconfig
180 else:
180 else:
181 config = unknownconfig
181 config = unknownconfig
182
182
183 # The behavior when force is True is described by this table:
183 # The behavior when force is True is described by this table:
184 # config different mergeforce | action backup
184 # config different mergeforce | action backup
185 # * n * | get n
185 # * n * | get n
186 # * y y | merge -
186 # * y y | merge -
187 # abort y n | merge - (1)
187 # abort y n | merge - (1)
188 # warn y n | warn + get y
188 # warn y n | warn + get y
189 # ignore y n | get y
189 # ignore y n | get y
190 #
190 #
191 # (1) this is probably the wrong behavior here -- we should
191 # (1) this is probably the wrong behavior here -- we should
192 # probably abort, but some actions like rebases currently
192 # probably abort, but some actions like rebases currently
193 # don't like an abort happening in the middle of
193 # don't like an abort happening in the middle of
194 # merge.update.
194 # merge.update.
195 if not different:
195 if not different:
196 actions[f] = (
196 actions[f] = (
197 mergestatemod.ACTION_GET,
197 mergestatemod.ACTION_GET,
198 (fl2, False),
198 (fl2, False),
199 b'remote created',
199 b'remote created',
200 )
200 )
201 elif mergeforce or config == b'abort':
201 elif mergeforce or config == b'abort':
202 actions[f] = (
202 actions[f] = (
203 mergestatemod.ACTION_MERGE,
203 mergestatemod.ACTION_MERGE,
204 (f, f, None, False, anc),
204 (f, f, None, False, anc),
205 b'remote differs from untracked local',
205 b'remote differs from untracked local',
206 )
206 )
207 elif config == b'abort':
207 elif config == b'abort':
208 abortconflicts.add(f)
208 abortconflicts.add(f)
209 else:
209 else:
210 if config == b'warn':
210 if config == b'warn':
211 warnconflicts.add(f)
211 warnconflicts.add(f)
212 actions[f] = (
212 actions[f] = (
213 mergestatemod.ACTION_GET,
213 mergestatemod.ACTION_GET,
214 (fl2, True),
214 (fl2, True),
215 b'remote created',
215 b'remote created',
216 )
216 )
217
217
218 for f in sorted(abortconflicts):
218 for f in sorted(abortconflicts):
219 warn = repo.ui.warn
219 warn = repo.ui.warn
220 if f in pathconflicts:
220 if f in pathconflicts:
221 if repo.wvfs.isfileorlink(f):
221 if repo.wvfs.isfileorlink(f):
222 warn(_(b"%s: untracked file conflicts with directory\n") % f)
222 warn(_(b"%s: untracked file conflicts with directory\n") % f)
223 else:
223 else:
224 warn(_(b"%s: untracked directory conflicts with file\n") % f)
224 warn(_(b"%s: untracked directory conflicts with file\n") % f)
225 else:
225 else:
226 warn(_(b"%s: untracked file differs\n") % f)
226 warn(_(b"%s: untracked file differs\n") % f)
227 if abortconflicts:
227 if abortconflicts:
228 raise error.Abort(
228 raise error.Abort(
229 _(
229 _(
230 b"untracked files in working directory "
230 b"untracked files in working directory "
231 b"differ from files in requested revision"
231 b"differ from files in requested revision"
232 )
232 )
233 )
233 )
234
234
235 for f in sorted(warnconflicts):
235 for f in sorted(warnconflicts):
236 if repo.wvfs.isfileorlink(f):
236 if repo.wvfs.isfileorlink(f):
237 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
237 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
238 else:
238 else:
239 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
239 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
240
240
241 for f, (m, args, msg) in pycompat.iteritems(actions):
241 for f, (m, args, msg) in pycompat.iteritems(actions):
242 if m == mergestatemod.ACTION_CREATED:
242 if m == mergestatemod.ACTION_CREATED:
243 backup = (
243 backup = (
244 f in fileconflicts
244 f in fileconflicts
245 or f in pathconflicts
245 or f in pathconflicts
246 or any(p in pathconflicts for p in pathutil.finddirs(f))
246 or any(p in pathconflicts for p in pathutil.finddirs(f))
247 )
247 )
248 (flags,) = args
248 (flags,) = args
249 actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
249 actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
250
250
251
251
252 def _forgetremoved(wctx, mctx, branchmerge):
252 def _forgetremoved(wctx, mctx, branchmerge):
253 """
253 """
254 Forget removed files
254 Forget removed files
255
255
256 If we're jumping between revisions (as opposed to merging), and if
256 If we're jumping between revisions (as opposed to merging), and if
257 neither the working directory nor the target rev has the file,
257 neither the working directory nor the target rev has the file,
258 then we need to remove it from the dirstate, to prevent the
258 then we need to remove it from the dirstate, to prevent the
259 dirstate from listing the file when it is no longer in the
259 dirstate from listing the file when it is no longer in the
260 manifest.
260 manifest.
261
261
262 If we're merging, and the other revision has removed a file
262 If we're merging, and the other revision has removed a file
263 that is not present in the working directory, we need to mark it
263 that is not present in the working directory, we need to mark it
264 as removed.
264 as removed.
265 """
265 """
266
266
267 actions = {}
267 actions = {}
268 m = mergestatemod.ACTION_FORGET
268 m = mergestatemod.ACTION_FORGET
269 if branchmerge:
269 if branchmerge:
270 m = mergestatemod.ACTION_REMOVE
270 m = mergestatemod.ACTION_REMOVE
271 for f in wctx.deleted():
271 for f in wctx.deleted():
272 if f not in mctx:
272 if f not in mctx:
273 actions[f] = m, None, b"forget deleted"
273 actions[f] = m, None, b"forget deleted"
274
274
275 if not branchmerge:
275 if not branchmerge:
276 for f in wctx.removed():
276 for f in wctx.removed():
277 if f not in mctx:
277 if f not in mctx:
278 actions[f] = (
278 actions[f] = (
279 mergestatemod.ACTION_FORGET,
279 mergestatemod.ACTION_FORGET,
280 None,
280 None,
281 b"forget removed",
281 b"forget removed",
282 )
282 )
283
283
284 return actions
284 return actions
285
285
286
286
287 def _checkcollision(repo, wmf, actions):
287 def _checkcollision(repo, wmf, actions):
288 """
288 """
289 Check for case-folding collisions.
289 Check for case-folding collisions.
290 """
290 """
291 # If the repo is narrowed, filter out files outside the narrowspec.
291 # If the repo is narrowed, filter out files outside the narrowspec.
292 narrowmatch = repo.narrowmatch()
292 narrowmatch = repo.narrowmatch()
293 if not narrowmatch.always():
293 if not narrowmatch.always():
294 pmmf = set(wmf.walk(narrowmatch))
294 pmmf = set(wmf.walk(narrowmatch))
295 if actions:
295 if actions:
296 narrowactions = {}
296 narrowactions = {}
297 for m, actionsfortype in pycompat.iteritems(actions):
297 for m, actionsfortype in pycompat.iteritems(actions):
298 narrowactions[m] = []
298 narrowactions[m] = []
299 for (f, args, msg) in actionsfortype:
299 for (f, args, msg) in actionsfortype:
300 if narrowmatch(f):
300 if narrowmatch(f):
301 narrowactions[m].append((f, args, msg))
301 narrowactions[m].append((f, args, msg))
302 actions = narrowactions
302 actions = narrowactions
303 else:
303 else:
304 # build provisional merged manifest up
304 # build provisional merged manifest up
305 pmmf = set(wmf)
305 pmmf = set(wmf)
306
306
307 if actions:
307 if actions:
308 # KEEP and EXEC are no-op
308 # KEEP and EXEC are no-op
309 for m in (
309 for m in (
310 mergestatemod.ACTION_ADD,
310 mergestatemod.ACTION_ADD,
311 mergestatemod.ACTION_ADD_MODIFIED,
311 mergestatemod.ACTION_ADD_MODIFIED,
312 mergestatemod.ACTION_FORGET,
312 mergestatemod.ACTION_FORGET,
313 mergestatemod.ACTION_GET,
313 mergestatemod.ACTION_GET,
314 mergestatemod.ACTION_CHANGED_DELETED,
314 mergestatemod.ACTION_CHANGED_DELETED,
315 mergestatemod.ACTION_DELETED_CHANGED,
315 mergestatemod.ACTION_DELETED_CHANGED,
316 ):
316 ):
317 for f, args, msg in actions[m]:
317 for f, args, msg in actions[m]:
318 pmmf.add(f)
318 pmmf.add(f)
319 for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
319 for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
320 pmmf.discard(f)
320 pmmf.discard(f)
321 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
321 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
322 f2, flags = args
322 f2, flags = args
323 pmmf.discard(f2)
323 pmmf.discard(f2)
324 pmmf.add(f)
324 pmmf.add(f)
325 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
325 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
326 pmmf.add(f)
326 pmmf.add(f)
327 for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
327 for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
328 f1, f2, fa, move, anc = args
328 f1, f2, fa, move, anc = args
329 if move:
329 if move:
330 pmmf.discard(f1)
330 pmmf.discard(f1)
331 pmmf.add(f)
331 pmmf.add(f)
332
332
333 # check case-folding collision in provisional merged manifest
333 # check case-folding collision in provisional merged manifest
334 foldmap = {}
334 foldmap = {}
335 for f in pmmf:
335 for f in pmmf:
336 fold = util.normcase(f)
336 fold = util.normcase(f)
337 if fold in foldmap:
337 if fold in foldmap:
338 raise error.Abort(
338 raise error.Abort(
339 _(b"case-folding collision between %s and %s")
339 _(b"case-folding collision between %s and %s")
340 % (f, foldmap[fold])
340 % (f, foldmap[fold])
341 )
341 )
342 foldmap[fold] = f
342 foldmap[fold] = f
343
343
344 # check case-folding of directories
344 # check case-folding of directories
345 foldprefix = unfoldprefix = lastfull = b''
345 foldprefix = unfoldprefix = lastfull = b''
346 for fold, f in sorted(foldmap.items()):
346 for fold, f in sorted(foldmap.items()):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
348 # the folded prefix matches but actual casing is different
348 # the folded prefix matches but actual casing is different
349 raise error.Abort(
349 raise error.Abort(
350 _(b"case-folding collision between %s and directory of %s")
350 _(b"case-folding collision between %s and directory of %s")
351 % (lastfull, f)
351 % (lastfull, f)
352 )
352 )
353 foldprefix = fold + b'/'
353 foldprefix = fold + b'/'
354 unfoldprefix = f + b'/'
354 unfoldprefix = f + b'/'
355 lastfull = f
355 lastfull = f
356
356
357
357
358 def driverpreprocess(repo, ms, wctx, labels=None):
358 def driverpreprocess(repo, ms, wctx, labels=None):
359 """run the preprocess step of the merge driver, if any
359 """run the preprocess step of the merge driver, if any
360
360
361 This is currently not implemented -- it's an extension point."""
361 This is currently not implemented -- it's an extension point."""
362 return True
362 return True
363
363
364
364
365 def driverconclude(repo, ms, wctx, labels=None):
365 def driverconclude(repo, ms, wctx, labels=None):
366 """run the conclude step of the merge driver, if any
366 """run the conclude step of the merge driver, if any
367
367
368 This is currently not implemented -- it's an extension point."""
368 This is currently not implemented -- it's an extension point."""
369 return True
369 return True
370
370
371
371
372 def _filesindirs(repo, manifest, dirs):
372 def _filesindirs(repo, manifest, dirs):
373 """
373 """
374 Generator that yields pairs of all the files in the manifest that are found
374 Generator that yields pairs of all the files in the manifest that are found
375 inside the directories listed in dirs, and which directory they are found
375 inside the directories listed in dirs, and which directory they are found
376 in.
376 in.
377 """
377 """
378 for f in manifest:
378 for f in manifest:
379 for p in pathutil.finddirs(f):
379 for p in pathutil.finddirs(f):
380 if p in dirs:
380 if p in dirs:
381 yield f, p
381 yield f, p
382 break
382 break
383
383
384
384
385 def checkpathconflicts(repo, wctx, mctx, actions):
385 def checkpathconflicts(repo, wctx, mctx, actions):
386 """
386 """
387 Check if any actions introduce path conflicts in the repository, updating
387 Check if any actions introduce path conflicts in the repository, updating
388 actions to record or handle the path conflict accordingly.
388 actions to record or handle the path conflict accordingly.
389 """
389 """
390 mf = wctx.manifest()
390 mf = wctx.manifest()
391
391
392 # The set of local files that conflict with a remote directory.
392 # The set of local files that conflict with a remote directory.
393 localconflicts = set()
393 localconflicts = set()
394
394
395 # The set of directories that conflict with a remote file, and so may cause
395 # The set of directories that conflict with a remote file, and so may cause
396 # conflicts if they still contain any files after the merge.
396 # conflicts if they still contain any files after the merge.
397 remoteconflicts = set()
397 remoteconflicts = set()
398
398
399 # The set of directories that appear as both a file and a directory in the
399 # The set of directories that appear as both a file and a directory in the
400 # remote manifest. These indicate an invalid remote manifest, which
400 # remote manifest. These indicate an invalid remote manifest, which
401 # can't be updated to cleanly.
401 # can't be updated to cleanly.
402 invalidconflicts = set()
402 invalidconflicts = set()
403
403
404 # The set of directories that contain files that are being created.
404 # The set of directories that contain files that are being created.
405 createdfiledirs = set()
405 createdfiledirs = set()
406
406
407 # The set of files deleted by all the actions.
407 # The set of files deleted by all the actions.
408 deletedfiles = set()
408 deletedfiles = set()
409
409
410 for f, (m, args, msg) in actions.items():
410 for f, (m, args, msg) in actions.items():
411 if m in (
411 if m in (
412 mergestatemod.ACTION_CREATED,
412 mergestatemod.ACTION_CREATED,
413 mergestatemod.ACTION_DELETED_CHANGED,
413 mergestatemod.ACTION_DELETED_CHANGED,
414 mergestatemod.ACTION_MERGE,
414 mergestatemod.ACTION_MERGE,
415 mergestatemod.ACTION_CREATED_MERGE,
415 mergestatemod.ACTION_CREATED_MERGE,
416 ):
416 ):
417 # This action may create a new local file.
417 # This action may create a new local file.
418 createdfiledirs.update(pathutil.finddirs(f))
418 createdfiledirs.update(pathutil.finddirs(f))
419 if mf.hasdir(f):
419 if mf.hasdir(f):
420 # The file aliases a local directory. This might be ok if all
420 # The file aliases a local directory. This might be ok if all
421 # the files in the local directory are being deleted. This
421 # the files in the local directory are being deleted. This
422 # will be checked once we know what all the deleted files are.
422 # will be checked once we know what all the deleted files are.
423 remoteconflicts.add(f)
423 remoteconflicts.add(f)
424 # Track the names of all deleted files.
424 # Track the names of all deleted files.
425 if m == mergestatemod.ACTION_REMOVE:
425 if m == mergestatemod.ACTION_REMOVE:
426 deletedfiles.add(f)
426 deletedfiles.add(f)
427 if m == mergestatemod.ACTION_MERGE:
427 if m == mergestatemod.ACTION_MERGE:
428 f1, f2, fa, move, anc = args
428 f1, f2, fa, move, anc = args
429 if move:
429 if move:
430 deletedfiles.add(f1)
430 deletedfiles.add(f1)
431 if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
431 if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
432 f2, flags = args
432 f2, flags = args
433 deletedfiles.add(f2)
433 deletedfiles.add(f2)
434
434
435 # Check all directories that contain created files for path conflicts.
435 # Check all directories that contain created files for path conflicts.
436 for p in createdfiledirs:
436 for p in createdfiledirs:
437 if p in mf:
437 if p in mf:
438 if p in mctx:
438 if p in mctx:
439 # A file is in a directory which aliases both a local
439 # A file is in a directory which aliases both a local
440 # and a remote file. This is an internal inconsistency
440 # and a remote file. This is an internal inconsistency
441 # within the remote manifest.
441 # within the remote manifest.
442 invalidconflicts.add(p)
442 invalidconflicts.add(p)
443 else:
443 else:
444 # A file is in a directory which aliases a local file.
444 # A file is in a directory which aliases a local file.
445 # We will need to rename the local file.
445 # We will need to rename the local file.
446 localconflicts.add(p)
446 localconflicts.add(p)
447 if p in actions and actions[p][0] in (
447 if p in actions and actions[p][0] in (
448 mergestatemod.ACTION_CREATED,
448 mergestatemod.ACTION_CREATED,
449 mergestatemod.ACTION_DELETED_CHANGED,
449 mergestatemod.ACTION_DELETED_CHANGED,
450 mergestatemod.ACTION_MERGE,
450 mergestatemod.ACTION_MERGE,
451 mergestatemod.ACTION_CREATED_MERGE,
451 mergestatemod.ACTION_CREATED_MERGE,
452 ):
452 ):
453 # The file is in a directory which aliases a remote file.
453 # The file is in a directory which aliases a remote file.
454 # This is an internal inconsistency within the remote
454 # This is an internal inconsistency within the remote
455 # manifest.
455 # manifest.
456 invalidconflicts.add(p)
456 invalidconflicts.add(p)
457
457
458 # Rename all local conflicting files that have not been deleted.
458 # Rename all local conflicting files that have not been deleted.
459 for p in localconflicts:
459 for p in localconflicts:
460 if p not in deletedfiles:
460 if p not in deletedfiles:
461 ctxname = bytes(wctx).rstrip(b'+')
461 ctxname = bytes(wctx).rstrip(b'+')
462 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
462 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
463 porig = wctx[p].copysource() or p
463 porig = wctx[p].copysource() or p
464 actions[pnew] = (
464 actions[pnew] = (
465 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
465 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
466 (p, porig),
466 (p, porig),
467 b'local path conflict',
467 b'local path conflict',
468 )
468 )
469 actions[p] = (
469 actions[p] = (
470 mergestatemod.ACTION_PATH_CONFLICT,
470 mergestatemod.ACTION_PATH_CONFLICT,
471 (pnew, b'l'),
471 (pnew, b'l'),
472 b'path conflict',
472 b'path conflict',
473 )
473 )
474
474
475 if remoteconflicts:
475 if remoteconflicts:
476 # Check if all files in the conflicting directories have been removed.
476 # Check if all files in the conflicting directories have been removed.
477 ctxname = bytes(mctx).rstrip(b'+')
477 ctxname = bytes(mctx).rstrip(b'+')
478 for f, p in _filesindirs(repo, mf, remoteconflicts):
478 for f, p in _filesindirs(repo, mf, remoteconflicts):
479 if f not in deletedfiles:
479 if f not in deletedfiles:
480 m, args, msg = actions[p]
480 m, args, msg = actions[p]
481 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
481 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
482 if m in (
482 if m in (
483 mergestatemod.ACTION_DELETED_CHANGED,
483 mergestatemod.ACTION_DELETED_CHANGED,
484 mergestatemod.ACTION_MERGE,
484 mergestatemod.ACTION_MERGE,
485 ):
485 ):
486 # Action was merge, just update target.
486 # Action was merge, just update target.
487 actions[pnew] = (m, args, msg)
487 actions[pnew] = (m, args, msg)
488 else:
488 else:
489 # Action was create, change to renamed get action.
489 # Action was create, change to renamed get action.
490 fl = args[0]
490 fl = args[0]
491 actions[pnew] = (
491 actions[pnew] = (
492 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
492 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
493 (p, fl),
493 (p, fl),
494 b'remote path conflict',
494 b'remote path conflict',
495 )
495 )
496 actions[p] = (
496 actions[p] = (
497 mergestatemod.ACTION_PATH_CONFLICT,
497 mergestatemod.ACTION_PATH_CONFLICT,
498 (pnew, mergestatemod.ACTION_REMOVE),
498 (pnew, mergestatemod.ACTION_REMOVE),
499 b'path conflict',
499 b'path conflict',
500 )
500 )
501 remoteconflicts.remove(p)
501 remoteconflicts.remove(p)
502 break
502 break
503
503
504 if invalidconflicts:
504 if invalidconflicts:
505 for p in invalidconflicts:
505 for p in invalidconflicts:
506 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
506 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
507 raise error.Abort(_(b"destination manifest contains path conflicts"))
507 raise error.Abort(_(b"destination manifest contains path conflicts"))
508
508
509
509
510 def _filternarrowactions(narrowmatch, branchmerge, actions):
510 def _filternarrowactions(narrowmatch, branchmerge, actions):
511 """
511 """
512 Filters out actions that can ignored because the repo is narrowed.
512 Filters out actions that can ignored because the repo is narrowed.
513
513
514 Raise an exception if the merge cannot be completed because the repo is
514 Raise an exception if the merge cannot be completed because the repo is
515 narrowed.
515 narrowed.
516 """
516 """
517 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
517 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
518 nonconflicttypes = set(b'a am c cm f g gs r e'.split())
518 nonconflicttypes = set(b'a am c cm f g gs r e'.split())
519 # We mutate the items in the dict during iteration, so iterate
519 # We mutate the items in the dict during iteration, so iterate
520 # over a copy.
520 # over a copy.
521 for f, action in list(actions.items()):
521 for f, action in list(actions.items()):
522 if narrowmatch(f):
522 if narrowmatch(f):
523 pass
523 pass
524 elif not branchmerge:
524 elif not branchmerge:
525 del actions[f] # just updating, ignore changes outside clone
525 del actions[f] # just updating, ignore changes outside clone
526 elif action[0] in nooptypes:
526 elif action[0] in nooptypes:
527 del actions[f] # merge does not affect file
527 del actions[f] # merge does not affect file
528 elif action[0] in nonconflicttypes:
528 elif action[0] in nonconflicttypes:
529 raise error.Abort(
529 raise error.Abort(
530 _(
530 _(
531 b'merge affects file \'%s\' outside narrow, '
531 b'merge affects file \'%s\' outside narrow, '
532 b'which is not yet supported'
532 b'which is not yet supported'
533 )
533 )
534 % f,
534 % f,
535 hint=_(b'merging in the other direction may work'),
535 hint=_(b'merging in the other direction may work'),
536 )
536 )
537 else:
537 else:
538 raise error.Abort(
538 raise error.Abort(
539 _(b'conflict in file \'%s\' is outside narrow clone') % f
539 _(b'conflict in file \'%s\' is outside narrow clone') % f
540 )
540 )
541
541
542
542
543 def manifestmerge(
543 def manifestmerge(
544 repo,
544 repo,
545 wctx,
545 wctx,
546 p2,
546 p2,
547 pa,
547 pa,
548 branchmerge,
548 branchmerge,
549 force,
549 force,
550 matcher,
550 matcher,
551 acceptremote,
551 acceptremote,
552 followcopies,
552 followcopies,
553 forcefulldiff=False,
553 forcefulldiff=False,
554 ):
554 ):
555 """
555 """
556 Merge wctx and p2 with ancestor pa and generate merge action list
556 Merge wctx and p2 with ancestor pa and generate merge action list
557
557
558 branchmerge and force are as passed in to update
558 branchmerge and force are as passed in to update
559 matcher = matcher to filter file lists
559 matcher = matcher to filter file lists
560 acceptremote = accept the incoming changes without prompting
560 acceptremote = accept the incoming changes without prompting
561 """
561 """
562 if matcher is not None and matcher.always():
562 if matcher is not None and matcher.always():
563 matcher = None
563 matcher = None
564
564
565 # manifests fetched in order are going to be faster, so prime the caches
565 # manifests fetched in order are going to be faster, so prime the caches
566 [
566 [
567 x.manifest()
567 x.manifest()
568 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
568 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
569 ]
569 ]
570
570
571 branch_copies1 = copies.branch_copies()
571 branch_copies1 = copies.branch_copies()
572 branch_copies2 = copies.branch_copies()
572 branch_copies2 = copies.branch_copies()
573 diverge = {}
573 diverge = {}
574 if followcopies:
574 if followcopies:
575 branch_copies1, branch_copies2, diverge = copies.mergecopies(
575 branch_copies1, branch_copies2, diverge = copies.mergecopies(
576 repo, wctx, p2, pa
576 repo, wctx, p2, pa
577 )
577 )
578
578
579 boolbm = pycompat.bytestr(bool(branchmerge))
579 boolbm = pycompat.bytestr(bool(branchmerge))
580 boolf = pycompat.bytestr(bool(force))
580 boolf = pycompat.bytestr(bool(force))
581 boolm = pycompat.bytestr(bool(matcher))
581 boolm = pycompat.bytestr(bool(matcher))
582 repo.ui.note(_(b"resolving manifests\n"))
582 repo.ui.note(_(b"resolving manifests\n"))
583 repo.ui.debug(
583 repo.ui.debug(
584 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
584 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
585 )
585 )
586 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
586 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
587
587
588 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
588 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
589 copied1 = set(branch_copies1.copy.values())
589 copied1 = set(branch_copies1.copy.values())
590 copied1.update(branch_copies1.movewithdir.values())
590 copied1.update(branch_copies1.movewithdir.values())
591 copied2 = set(branch_copies2.copy.values())
591 copied2 = set(branch_copies2.copy.values())
592 copied2.update(branch_copies2.movewithdir.values())
592 copied2.update(branch_copies2.movewithdir.values())
593
593
594 if b'.hgsubstate' in m1 and wctx.rev() is None:
594 if b'.hgsubstate' in m1 and wctx.rev() is None:
595 # Check whether sub state is modified, and overwrite the manifest
595 # Check whether sub state is modified, and overwrite the manifest
596 # to flag the change. If wctx is a committed revision, we shouldn't
596 # to flag the change. If wctx is a committed revision, we shouldn't
597 # care for the dirty state of the working directory.
597 # care for the dirty state of the working directory.
598 if any(wctx.sub(s).dirty() for s in wctx.substate):
598 if any(wctx.sub(s).dirty() for s in wctx.substate):
599 m1[b'.hgsubstate'] = modifiednodeid
599 m1[b'.hgsubstate'] = modifiednodeid
600
600
601 # Don't use m2-vs-ma optimization if:
601 # Don't use m2-vs-ma optimization if:
602 # - ma is the same as m1 or m2, which we're just going to diff again later
602 # - ma is the same as m1 or m2, which we're just going to diff again later
603 # - The caller specifically asks for a full diff, which is useful during bid
603 # - The caller specifically asks for a full diff, which is useful during bid
604 # merge.
604 # merge.
605 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
605 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
606 # Identify which files are relevant to the merge, so we can limit the
606 # Identify which files are relevant to the merge, so we can limit the
607 # total m1-vs-m2 diff to just those files. This has significant
607 # total m1-vs-m2 diff to just those files. This has significant
608 # performance benefits in large repositories.
608 # performance benefits in large repositories.
609 relevantfiles = set(ma.diff(m2).keys())
609 relevantfiles = set(ma.diff(m2).keys())
610
610
611 # For copied and moved files, we need to add the source file too.
611 # For copied and moved files, we need to add the source file too.
612 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
612 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
613 if copyvalue in relevantfiles:
613 if copyvalue in relevantfiles:
614 relevantfiles.add(copykey)
614 relevantfiles.add(copykey)
615 for movedirkey in branch_copies1.movewithdir:
615 for movedirkey in branch_copies1.movewithdir:
616 relevantfiles.add(movedirkey)
616 relevantfiles.add(movedirkey)
617 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
617 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
618 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
618 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
619
619
620 diff = m1.diff(m2, match=matcher)
620 diff = m1.diff(m2, match=matcher)
621
621
622 actions = {}
622 actions = {}
623 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
623 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
624 if n1 and n2: # file exists on both local and remote side
624 if n1 and n2: # file exists on both local and remote side
625 if f not in ma:
625 if f not in ma:
626 # TODO: what if they're renamed from different sources?
626 # TODO: what if they're renamed from different sources?
627 fa = branch_copies1.copy.get(
627 fa = branch_copies1.copy.get(
628 f, None
628 f, None
629 ) or branch_copies2.copy.get(f, None)
629 ) or branch_copies2.copy.get(f, None)
630 if fa is not None:
630 if fa is not None:
631 actions[f] = (
631 actions[f] = (
632 mergestatemod.ACTION_MERGE,
632 mergestatemod.ACTION_MERGE,
633 (f, f, fa, False, pa.node()),
633 (f, f, fa, False, pa.node()),
634 b'both renamed from %s' % fa,
634 b'both renamed from %s' % fa,
635 )
635 )
636 else:
636 else:
637 actions[f] = (
637 actions[f] = (
638 mergestatemod.ACTION_MERGE,
638 mergestatemod.ACTION_MERGE,
639 (f, f, None, False, pa.node()),
639 (f, f, None, False, pa.node()),
640 b'both created',
640 b'both created',
641 )
641 )
642 else:
642 else:
643 a = ma[f]
643 a = ma[f]
644 fla = ma.flags(f)
644 fla = ma.flags(f)
645 nol = b'l' not in fl1 + fl2 + fla
645 nol = b'l' not in fl1 + fl2 + fla
646 if n2 == a and fl2 == fla:
646 if n2 == a and fl2 == fla:
647 actions[f] = (
647 actions[f] = (
648 mergestatemod.ACTION_KEEP,
648 mergestatemod.ACTION_KEEP,
649 (),
649 (),
650 b'remote unchanged',
650 b'remote unchanged',
651 )
651 )
652 elif n1 == a and fl1 == fla: # local unchanged - use remote
652 elif n1 == a and fl1 == fla: # local unchanged - use remote
653 if n1 == n2: # optimization: keep local content
653 if n1 == n2: # optimization: keep local content
654 actions[f] = (
654 actions[f] = (
655 mergestatemod.ACTION_EXEC,
655 mergestatemod.ACTION_EXEC,
656 (fl2,),
656 (fl2,),
657 b'update permissions',
657 b'update permissions',
658 )
658 )
659 else:
659 else:
660 actions[f] = (
660 actions[f] = (
661 mergestatemod.ACTION_GET_OTHER_AND_STORE
661 mergestatemod.ACTION_GET_OTHER_AND_STORE
662 if branchmerge
662 if branchmerge
663 else mergestatemod.ACTION_GET,
663 else mergestatemod.ACTION_GET,
664 (fl2, False),
664 (fl2, False),
665 b'remote is newer',
665 b'remote is newer',
666 )
666 )
667 elif nol and n2 == a: # remote only changed 'x'
667 elif nol and n2 == a: # remote only changed 'x'
668 actions[f] = (
668 actions[f] = (
669 mergestatemod.ACTION_EXEC,
669 mergestatemod.ACTION_EXEC,
670 (fl2,),
670 (fl2,),
671 b'update permissions',
671 b'update permissions',
672 )
672 )
673 elif nol and n1 == a: # local only changed 'x'
673 elif nol and n1 == a: # local only changed 'x'
674 actions[f] = (
674 actions[f] = (
675 mergestatemod.ACTION_GET_OTHER_AND_STORE
675 mergestatemod.ACTION_GET_OTHER_AND_STORE
676 if branchmerge
676 if branchmerge
677 else mergestatemod.ACTION_GET,
677 else mergestatemod.ACTION_GET,
678 (fl1, False),
678 (fl1, False),
679 b'remote is newer',
679 b'remote is newer',
680 )
680 )
681 else: # both changed something
681 else: # both changed something
682 actions[f] = (
682 actions[f] = (
683 mergestatemod.ACTION_MERGE,
683 mergestatemod.ACTION_MERGE,
684 (f, f, f, False, pa.node()),
684 (f, f, f, False, pa.node()),
685 b'versions differ',
685 b'versions differ',
686 )
686 )
687 elif n1: # file exists only on local side
687 elif n1: # file exists only on local side
688 if f in copied2:
688 if f in copied2:
689 pass # we'll deal with it on m2 side
689 pass # we'll deal with it on m2 side
690 elif (
690 elif (
691 f in branch_copies1.movewithdir
691 f in branch_copies1.movewithdir
692 ): # directory rename, move local
692 ): # directory rename, move local
693 f2 = branch_copies1.movewithdir[f]
693 f2 = branch_copies1.movewithdir[f]
694 if f2 in m2:
694 if f2 in m2:
695 actions[f2] = (
695 actions[f2] = (
696 mergestatemod.ACTION_MERGE,
696 mergestatemod.ACTION_MERGE,
697 (f, f2, None, True, pa.node()),
697 (f, f2, None, True, pa.node()),
698 b'remote directory rename, both created',
698 b'remote directory rename, both created',
699 )
699 )
700 else:
700 else:
701 actions[f2] = (
701 actions[f2] = (
702 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
702 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
703 (f, fl1),
703 (f, fl1),
704 b'remote directory rename - move from %s' % f,
704 b'remote directory rename - move from %s' % f,
705 )
705 )
706 elif f in branch_copies1.copy:
706 elif f in branch_copies1.copy:
707 f2 = branch_copies1.copy[f]
707 f2 = branch_copies1.copy[f]
708 actions[f] = (
708 actions[f] = (
709 mergestatemod.ACTION_MERGE,
709 mergestatemod.ACTION_MERGE,
710 (f, f2, f2, False, pa.node()),
710 (f, f2, f2, False, pa.node()),
711 b'local copied/moved from %s' % f2,
711 b'local copied/moved from %s' % f2,
712 )
712 )
713 elif f in ma: # clean, a different, no remote
713 elif f in ma: # clean, a different, no remote
714 if n1 != ma[f]:
714 if n1 != ma[f]:
715 if acceptremote:
715 if acceptremote:
716 actions[f] = (
716 actions[f] = (
717 mergestatemod.ACTION_REMOVE,
717 mergestatemod.ACTION_REMOVE,
718 None,
718 None,
719 b'remote delete',
719 b'remote delete',
720 )
720 )
721 else:
721 else:
722 actions[f] = (
722 actions[f] = (
723 mergestatemod.ACTION_CHANGED_DELETED,
723 mergestatemod.ACTION_CHANGED_DELETED,
724 (f, None, f, False, pa.node()),
724 (f, None, f, False, pa.node()),
725 b'prompt changed/deleted',
725 b'prompt changed/deleted',
726 )
726 )
727 elif n1 == addednodeid:
727 elif n1 == addednodeid:
728 # This extra 'a' is added by working copy manifest to mark
728 # This extra 'a' is added by working copy manifest to mark
729 # the file as locally added. We should forget it instead of
729 # the file as locally added. We should forget it instead of
730 # deleting it.
730 # deleting it.
731 actions[f] = (
731 actions[f] = (
732 mergestatemod.ACTION_FORGET,
732 mergestatemod.ACTION_FORGET,
733 None,
733 None,
734 b'remote deleted',
734 b'remote deleted',
735 )
735 )
736 else:
736 else:
737 actions[f] = (
737 actions[f] = (
738 mergestatemod.ACTION_REMOVE,
738 mergestatemod.ACTION_REMOVE,
739 None,
739 None,
740 b'other deleted',
740 b'other deleted',
741 )
741 )
742 elif n2: # file exists only on remote side
742 elif n2: # file exists only on remote side
743 if f in copied1:
743 if f in copied1:
744 pass # we'll deal with it on m1 side
744 pass # we'll deal with it on m1 side
745 elif f in branch_copies2.movewithdir:
745 elif f in branch_copies2.movewithdir:
746 f2 = branch_copies2.movewithdir[f]
746 f2 = branch_copies2.movewithdir[f]
747 if f2 in m1:
747 if f2 in m1:
748 actions[f2] = (
748 actions[f2] = (
749 mergestatemod.ACTION_MERGE,
749 mergestatemod.ACTION_MERGE,
750 (f2, f, None, False, pa.node()),
750 (f2, f, None, False, pa.node()),
751 b'local directory rename, both created',
751 b'local directory rename, both created',
752 )
752 )
753 else:
753 else:
754 actions[f2] = (
754 actions[f2] = (
755 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
755 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
756 (f, fl2),
756 (f, fl2),
757 b'local directory rename - get from %s' % f,
757 b'local directory rename - get from %s' % f,
758 )
758 )
759 elif f in branch_copies2.copy:
759 elif f in branch_copies2.copy:
760 f2 = branch_copies2.copy[f]
760 f2 = branch_copies2.copy[f]
761 if f2 in m2:
761 if f2 in m2:
762 actions[f] = (
762 actions[f] = (
763 mergestatemod.ACTION_MERGE,
763 mergestatemod.ACTION_MERGE,
764 (f2, f, f2, False, pa.node()),
764 (f2, f, f2, False, pa.node()),
765 b'remote copied from %s' % f2,
765 b'remote copied from %s' % f2,
766 )
766 )
767 else:
767 else:
768 actions[f] = (
768 actions[f] = (
769 mergestatemod.ACTION_MERGE,
769 mergestatemod.ACTION_MERGE,
770 (f2, f, f2, True, pa.node()),
770 (f2, f, f2, True, pa.node()),
771 b'remote moved from %s' % f2,
771 b'remote moved from %s' % f2,
772 )
772 )
773 elif f not in ma:
773 elif f not in ma:
774 # local unknown, remote created: the logic is described by the
774 # local unknown, remote created: the logic is described by the
775 # following table:
775 # following table:
776 #
776 #
777 # force branchmerge different | action
777 # force branchmerge different | action
778 # n * * | create
778 # n * * | create
779 # y n * | create
779 # y n * | create
780 # y y n | create
780 # y y n | create
781 # y y y | merge
781 # y y y | merge
782 #
782 #
783 # Checking whether the files are different is expensive, so we
783 # Checking whether the files are different is expensive, so we
784 # don't do that when we can avoid it.
784 # don't do that when we can avoid it.
785 if not force:
785 if not force:
786 actions[f] = (
786 actions[f] = (
787 mergestatemod.ACTION_CREATED,
787 mergestatemod.ACTION_CREATED,
788 (fl2,),
788 (fl2,),
789 b'remote created',
789 b'remote created',
790 )
790 )
791 elif not branchmerge:
791 elif not branchmerge:
792 actions[f] = (
792 actions[f] = (
793 mergestatemod.ACTION_CREATED,
793 mergestatemod.ACTION_CREATED,
794 (fl2,),
794 (fl2,),
795 b'remote created',
795 b'remote created',
796 )
796 )
797 else:
797 else:
798 actions[f] = (
798 actions[f] = (
799 mergestatemod.ACTION_CREATED_MERGE,
799 mergestatemod.ACTION_CREATED_MERGE,
800 (fl2, pa.node()),
800 (fl2, pa.node()),
801 b'remote created, get or merge',
801 b'remote created, get or merge',
802 )
802 )
803 elif n2 != ma[f]:
803 elif n2 != ma[f]:
804 df = None
804 df = None
805 for d in branch_copies1.dirmove:
805 for d in branch_copies1.dirmove:
806 if f.startswith(d):
806 if f.startswith(d):
807 # new file added in a directory that was moved
807 # new file added in a directory that was moved
808 df = branch_copies1.dirmove[d] + f[len(d) :]
808 df = branch_copies1.dirmove[d] + f[len(d) :]
809 break
809 break
810 if df is not None and df in m1:
810 if df is not None and df in m1:
811 actions[df] = (
811 actions[df] = (
812 mergestatemod.ACTION_MERGE,
812 mergestatemod.ACTION_MERGE,
813 (df, f, f, False, pa.node()),
813 (df, f, f, False, pa.node()),
814 b'local directory rename - respect move '
814 b'local directory rename - respect move '
815 b'from %s' % f,
815 b'from %s' % f,
816 )
816 )
817 elif acceptremote:
817 elif acceptremote:
818 actions[f] = (
818 actions[f] = (
819 mergestatemod.ACTION_CREATED,
819 mergestatemod.ACTION_CREATED,
820 (fl2,),
820 (fl2,),
821 b'remote recreating',
821 b'remote recreating',
822 )
822 )
823 else:
823 else:
824 actions[f] = (
824 actions[f] = (
825 mergestatemod.ACTION_DELETED_CHANGED,
825 mergestatemod.ACTION_DELETED_CHANGED,
826 (None, f, f, False, pa.node()),
826 (None, f, f, False, pa.node()),
827 b'prompt deleted/changed',
827 b'prompt deleted/changed',
828 )
828 )
829
829
830 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
830 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
831 # If we are merging, look for path conflicts.
831 # If we are merging, look for path conflicts.
832 checkpathconflicts(repo, wctx, p2, actions)
832 checkpathconflicts(repo, wctx, p2, actions)
833
833
834 narrowmatch = repo.narrowmatch()
834 narrowmatch = repo.narrowmatch()
835 if not narrowmatch.always():
835 if not narrowmatch.always():
836 # Updates "actions" in place
836 # Updates "actions" in place
837 _filternarrowactions(narrowmatch, branchmerge, actions)
837 _filternarrowactions(narrowmatch, branchmerge, actions)
838
838
839 renamedelete = branch_copies1.renamedelete
839 renamedelete = branch_copies1.renamedelete
840 renamedelete.update(branch_copies2.renamedelete)
840 renamedelete.update(branch_copies2.renamedelete)
841
841
842 return actions, diverge, renamedelete
842 return actions, diverge, renamedelete
843
843
844
844
845 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
845 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
846 """Resolves false conflicts where the nodeid changed but the content
846 """Resolves false conflicts where the nodeid changed but the content
847 remained the same."""
847 remained the same."""
848 # We force a copy of actions.items() because we're going to mutate
848 # We force a copy of actions.items() because we're going to mutate
849 # actions as we resolve trivial conflicts.
849 # actions as we resolve trivial conflicts.
850 for f, (m, args, msg) in list(actions.items()):
850 for f, (m, args, msg) in list(actions.items()):
851 if (
851 if (
852 m == mergestatemod.ACTION_CHANGED_DELETED
852 m == mergestatemod.ACTION_CHANGED_DELETED
853 and f in ancestor
853 and f in ancestor
854 and not wctx[f].cmp(ancestor[f])
854 and not wctx[f].cmp(ancestor[f])
855 ):
855 ):
856 # local did change but ended up with same content
856 # local did change but ended up with same content
857 actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
857 actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
858 elif (
858 elif (
859 m == mergestatemod.ACTION_DELETED_CHANGED
859 m == mergestatemod.ACTION_DELETED_CHANGED
860 and f in ancestor
860 and f in ancestor
861 and not mctx[f].cmp(ancestor[f])
861 and not mctx[f].cmp(ancestor[f])
862 ):
862 ):
863 # remote did change but ended up with same content
863 # remote did change but ended up with same content
864 del actions[f] # don't get = keep local deleted
864 del actions[f] # don't get = keep local deleted
865
865
866
866
867 def calculateupdates(
867 def calculateupdates(
868 repo,
868 repo,
869 wctx,
869 wctx,
870 mctx,
870 mctx,
871 ancestors,
871 ancestors,
872 branchmerge,
872 branchmerge,
873 force,
873 force,
874 acceptremote,
874 acceptremote,
875 followcopies,
875 followcopies,
876 matcher=None,
876 matcher=None,
877 mergeforce=False,
877 mergeforce=False,
878 ):
878 ):
879 """Calculate the actions needed to merge mctx into wctx using ancestors"""
879 """Calculate the actions needed to merge mctx into wctx using ancestors"""
880 # Avoid cycle.
880 # Avoid cycle.
881 from . import sparse
881 from . import sparse
882
882
883 if len(ancestors) == 1: # default
883 if len(ancestors) == 1: # default
884 actions, diverge, renamedelete = manifestmerge(
884 actions, diverge, renamedelete = manifestmerge(
885 repo,
885 repo,
886 wctx,
886 wctx,
887 mctx,
887 mctx,
888 ancestors[0],
888 ancestors[0],
889 branchmerge,
889 branchmerge,
890 force,
890 force,
891 matcher,
891 matcher,
892 acceptremote,
892 acceptremote,
893 followcopies,
893 followcopies,
894 )
894 )
895 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
895 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
896
896
897 else: # only when merge.preferancestor=* - the default
897 else: # only when merge.preferancestor=* - the default
898 repo.ui.note(
898 repo.ui.note(
899 _(b"note: merging %s and %s using bids from ancestors %s\n")
899 _(b"note: merging %s and %s using bids from ancestors %s\n")
900 % (
900 % (
901 wctx,
901 wctx,
902 mctx,
902 mctx,
903 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
903 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
904 )
904 )
905 )
905 )
906
906
907 # Call for bids
907 # Call for bids
908 fbids = (
908 fbids = (
909 {}
909 {}
910 ) # mapping filename to bids (action method to list af actions)
910 ) # mapping filename to bids (action method to list af actions)
911 diverge, renamedelete = None, None
911 diverge, renamedelete = None, None
912 for ancestor in ancestors:
912 for ancestor in ancestors:
913 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
913 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
914 actions, diverge1, renamedelete1 = manifestmerge(
914 actions, diverge1, renamedelete1 = manifestmerge(
915 repo,
915 repo,
916 wctx,
916 wctx,
917 mctx,
917 mctx,
918 ancestor,
918 ancestor,
919 branchmerge,
919 branchmerge,
920 force,
920 force,
921 matcher,
921 matcher,
922 acceptremote,
922 acceptremote,
923 followcopies,
923 followcopies,
924 forcefulldiff=True,
924 forcefulldiff=True,
925 )
925 )
926 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
926 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
927
927
928 # Track the shortest set of warning on the theory that bid
928 # Track the shortest set of warning on the theory that bid
929 # merge will correctly incorporate more information
929 # merge will correctly incorporate more information
930 if diverge is None or len(diverge1) < len(diverge):
930 if diverge is None or len(diverge1) < len(diverge):
931 diverge = diverge1
931 diverge = diverge1
932 if renamedelete is None or len(renamedelete) < len(renamedelete1):
932 if renamedelete is None or len(renamedelete) < len(renamedelete1):
933 renamedelete = renamedelete1
933 renamedelete = renamedelete1
934
934
935 for f, a in sorted(pycompat.iteritems(actions)):
935 for f, a in sorted(pycompat.iteritems(actions)):
936 m, args, msg = a
936 m, args, msg = a
937 if m == mergestatemod.ACTION_GET_OTHER_AND_STORE:
937 if m == mergestatemod.ACTION_GET_OTHER_AND_STORE:
938 m = mergestatemod.ACTION_GET
938 m = mergestatemod.ACTION_GET
939 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
939 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
940 if f in fbids:
940 if f in fbids:
941 d = fbids[f]
941 d = fbids[f]
942 if m in d:
942 if m in d:
943 d[m].append(a)
943 d[m].append(a)
944 else:
944 else:
945 d[m] = [a]
945 d[m] = [a]
946 else:
946 else:
947 fbids[f] = {m: [a]}
947 fbids[f] = {m: [a]}
948
948
949 # Pick the best bid for each file
949 # Pick the best bid for each file
950 repo.ui.note(_(b'\nauction for merging merge bids\n'))
950 repo.ui.note(_(b'\nauction for merging merge bids\n'))
951 actions = {}
951 actions = {}
952 for f, bids in sorted(fbids.items()):
952 for f, bids in sorted(fbids.items()):
953 # bids is a mapping from action method to list af actions
953 # bids is a mapping from action method to list af actions
954 # Consensus?
954 # Consensus?
955 if len(bids) == 1: # all bids are the same kind of method
955 if len(bids) == 1: # all bids are the same kind of method
956 m, l = list(bids.items())[0]
956 m, l = list(bids.items())[0]
957 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
957 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
958 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
958 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
959 actions[f] = l[0]
959 actions[f] = l[0]
960 continue
960 continue
961 # If keep is an option, just do it.
961 # If keep is an option, just do it.
962 if mergestatemod.ACTION_KEEP in bids:
962 if mergestatemod.ACTION_KEEP in bids:
963 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
963 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
964 actions[f] = bids[mergestatemod.ACTION_KEEP][0]
964 actions[f] = bids[mergestatemod.ACTION_KEEP][0]
965 continue
965 continue
966 # If there are gets and they all agree [how could they not?], do it.
966 # If there are gets and they all agree [how could they not?], do it.
967 if mergestatemod.ACTION_GET in bids:
967 if mergestatemod.ACTION_GET in bids:
968 ga0 = bids[mergestatemod.ACTION_GET][0]
968 ga0 = bids[mergestatemod.ACTION_GET][0]
969 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
969 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
970 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
970 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
971 actions[f] = ga0
971 actions[f] = ga0
972 continue
972 continue
973 # TODO: Consider other simple actions such as mode changes
973 # TODO: Consider other simple actions such as mode changes
974 # Handle inefficient democrazy.
974 # Handle inefficient democrazy.
975 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
975 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
976 for m, l in sorted(bids.items()):
976 for m, l in sorted(bids.items()):
977 for _f, args, msg in l:
977 for _f, args, msg in l:
978 repo.ui.note(b' %s -> %s\n' % (msg, m))
978 repo.ui.note(b' %s -> %s\n' % (msg, m))
979 # Pick random action. TODO: Instead, prompt user when resolving
979 # Pick random action. TODO: Instead, prompt user when resolving
980 m, l = list(bids.items())[0]
980 m, l = list(bids.items())[0]
981 repo.ui.warn(
981 repo.ui.warn(
982 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
982 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
983 )
983 )
984 actions[f] = l[0]
984 actions[f] = l[0]
985 continue
985 continue
986 repo.ui.note(_(b'end of auction\n\n'))
986 repo.ui.note(_(b'end of auction\n\n'))
987
987
988 if wctx.rev() is None:
988 if wctx.rev() is None:
989 fractions = _forgetremoved(wctx, mctx, branchmerge)
989 fractions = _forgetremoved(wctx, mctx, branchmerge)
990 actions.update(fractions)
990 actions.update(fractions)
991
991
992 prunedactions = sparse.filterupdatesactions(
992 prunedactions = sparse.filterupdatesactions(
993 repo, wctx, mctx, branchmerge, actions
993 repo, wctx, mctx, branchmerge, actions
994 )
994 )
995 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
995 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
996
996
997 return prunedactions, diverge, renamedelete
997 return prunedactions, diverge, renamedelete
998
998
999
999
1000 def _getcwd():
1000 def _getcwd():
1001 try:
1001 try:
1002 return encoding.getcwd()
1002 return encoding.getcwd()
1003 except OSError as err:
1003 except OSError as err:
1004 if err.errno == errno.ENOENT:
1004 if err.errno == errno.ENOENT:
1005 return None
1005 return None
1006 raise
1006 raise
1007
1007
1008
1008
1009 def batchremove(repo, wctx, actions):
1009 def batchremove(repo, wctx, actions):
1010 """apply removes to the working directory
1010 """apply removes to the working directory
1011
1011
1012 yields tuples for progress updates
1012 yields tuples for progress updates
1013 """
1013 """
1014 verbose = repo.ui.verbose
1014 verbose = repo.ui.verbose
1015 cwd = _getcwd()
1015 cwd = _getcwd()
1016 i = 0
1016 i = 0
1017 for f, args, msg in actions:
1017 for f, args, msg in actions:
1018 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1018 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1019 if verbose:
1019 if verbose:
1020 repo.ui.note(_(b"removing %s\n") % f)
1020 repo.ui.note(_(b"removing %s\n") % f)
1021 wctx[f].audit()
1021 wctx[f].audit()
1022 try:
1022 try:
1023 wctx[f].remove(ignoremissing=True)
1023 wctx[f].remove(ignoremissing=True)
1024 except OSError as inst:
1024 except OSError as inst:
1025 repo.ui.warn(
1025 repo.ui.warn(
1026 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1026 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1027 )
1027 )
1028 if i == 100:
1028 if i == 100:
1029 yield i, f
1029 yield i, f
1030 i = 0
1030 i = 0
1031 i += 1
1031 i += 1
1032 if i > 0:
1032 if i > 0:
1033 yield i, f
1033 yield i, f
1034
1034
1035 if cwd and not _getcwd():
1035 if cwd and not _getcwd():
1036 # cwd was removed in the course of removing files; print a helpful
1036 # cwd was removed in the course of removing files; print a helpful
1037 # warning.
1037 # warning.
1038 repo.ui.warn(
1038 repo.ui.warn(
1039 _(
1039 _(
1040 b"current directory was removed\n"
1040 b"current directory was removed\n"
1041 b"(consider changing to repo root: %s)\n"
1041 b"(consider changing to repo root: %s)\n"
1042 )
1042 )
1043 % repo.root
1043 % repo.root
1044 )
1044 )
1045
1045
1046
1046
1047 def batchget(repo, mctx, wctx, wantfiledata, actions):
1047 def batchget(repo, mctx, wctx, wantfiledata, actions):
1048 """apply gets to the working directory
1048 """apply gets to the working directory
1049
1049
1050 mctx is the context to get from
1050 mctx is the context to get from
1051
1051
1052 Yields arbitrarily many (False, tuple) for progress updates, followed by
1052 Yields arbitrarily many (False, tuple) for progress updates, followed by
1053 exactly one (True, filedata). When wantfiledata is false, filedata is an
1053 exactly one (True, filedata). When wantfiledata is false, filedata is an
1054 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1054 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1055 mtime) of the file f written for each action.
1055 mtime) of the file f written for each action.
1056 """
1056 """
1057 filedata = {}
1057 filedata = {}
1058 verbose = repo.ui.verbose
1058 verbose = repo.ui.verbose
1059 fctx = mctx.filectx
1059 fctx = mctx.filectx
1060 ui = repo.ui
1060 ui = repo.ui
1061 i = 0
1061 i = 0
1062 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1062 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1063 for f, (flags, backup), msg in actions:
1063 for f, (flags, backup), msg in actions:
1064 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1064 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1065 if verbose:
1065 if verbose:
1066 repo.ui.note(_(b"getting %s\n") % f)
1066 repo.ui.note(_(b"getting %s\n") % f)
1067
1067
1068 if backup:
1068 if backup:
1069 # If a file or directory exists with the same name, back that
1069 # If a file or directory exists with the same name, back that
1070 # up. Otherwise, look to see if there is a file that conflicts
1070 # up. Otherwise, look to see if there is a file that conflicts
1071 # with a directory this file is in, and if so, back that up.
1071 # with a directory this file is in, and if so, back that up.
1072 conflicting = f
1072 conflicting = f
1073 if not repo.wvfs.lexists(f):
1073 if not repo.wvfs.lexists(f):
1074 for p in pathutil.finddirs(f):
1074 for p in pathutil.finddirs(f):
1075 if repo.wvfs.isfileorlink(p):
1075 if repo.wvfs.isfileorlink(p):
1076 conflicting = p
1076 conflicting = p
1077 break
1077 break
1078 if repo.wvfs.lexists(conflicting):
1078 if repo.wvfs.lexists(conflicting):
1079 orig = scmutil.backuppath(ui, repo, conflicting)
1079 orig = scmutil.backuppath(ui, repo, conflicting)
1080 util.rename(repo.wjoin(conflicting), orig)
1080 util.rename(repo.wjoin(conflicting), orig)
1081 wfctx = wctx[f]
1081 wfctx = wctx[f]
1082 wfctx.clearunknown()
1082 wfctx.clearunknown()
1083 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1083 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1084 size = wfctx.write(
1084 size = wfctx.write(
1085 fctx(f).data(),
1085 fctx(f).data(),
1086 flags,
1086 flags,
1087 backgroundclose=True,
1087 backgroundclose=True,
1088 atomictemp=atomictemp,
1088 atomictemp=atomictemp,
1089 )
1089 )
1090 if wantfiledata:
1090 if wantfiledata:
1091 s = wfctx.lstat()
1091 s = wfctx.lstat()
1092 mode = s.st_mode
1092 mode = s.st_mode
1093 mtime = s[stat.ST_MTIME]
1093 mtime = s[stat.ST_MTIME]
1094 filedata[f] = (mode, size, mtime) # for dirstate.normal
1094 filedata[f] = (mode, size, mtime) # for dirstate.normal
1095 if i == 100:
1095 if i == 100:
1096 yield False, (i, f)
1096 yield False, (i, f)
1097 i = 0
1097 i = 0
1098 i += 1
1098 i += 1
1099 if i > 0:
1099 if i > 0:
1100 yield False, (i, f)
1100 yield False, (i, f)
1101 yield True, filedata
1101 yield True, filedata
1102
1102
1103
1103
1104 def _prefetchfiles(repo, ctx, actions):
1104 def _prefetchfiles(repo, ctx, actions):
1105 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1105 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1106 of merge actions. ``ctx`` is the context being merged in."""
1106 of merge actions. ``ctx`` is the context being merged in."""
1107
1107
1108 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1108 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1109 # don't touch the context to be merged in. 'cd' is skipped, because
1109 # don't touch the context to be merged in. 'cd' is skipped, because
1110 # changed/deleted never resolves to something from the remote side.
1110 # changed/deleted never resolves to something from the remote side.
1111 oplist = [
1111 oplist = [
1112 actions[a]
1112 actions[a]
1113 for a in (
1113 for a in (
1114 mergestatemod.ACTION_GET,
1114 mergestatemod.ACTION_GET,
1115 mergestatemod.ACTION_DELETED_CHANGED,
1115 mergestatemod.ACTION_DELETED_CHANGED,
1116 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1116 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1117 mergestatemod.ACTION_MERGE,
1117 mergestatemod.ACTION_MERGE,
1118 )
1118 )
1119 ]
1119 ]
1120 prefetch = scmutil.prefetchfiles
1120 prefetch = scmutil.prefetchfiles
1121 matchfiles = scmutil.matchfiles
1121 matchfiles = scmutil.matchfiles
1122 prefetch(
1122 prefetch(
1123 repo,
1123 repo,
1124 [ctx.rev()],
1124 [(ctx.rev(),
1125 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1125 matchfiles(repo,
1126 [f for sublist in oplist for f, args, msg in sublist]))],
1126 )
1127 )
1127
1128
1128
1129
1129 @attr.s(frozen=True)
1130 @attr.s(frozen=True)
1130 class updateresult(object):
1131 class updateresult(object):
1131 updatedcount = attr.ib()
1132 updatedcount = attr.ib()
1132 mergedcount = attr.ib()
1133 mergedcount = attr.ib()
1133 removedcount = attr.ib()
1134 removedcount = attr.ib()
1134 unresolvedcount = attr.ib()
1135 unresolvedcount = attr.ib()
1135
1136
1136 def isempty(self):
1137 def isempty(self):
1137 return not (
1138 return not (
1138 self.updatedcount
1139 self.updatedcount
1139 or self.mergedcount
1140 or self.mergedcount
1140 or self.removedcount
1141 or self.removedcount
1141 or self.unresolvedcount
1142 or self.unresolvedcount
1142 )
1143 )
1143
1144
1144
1145
1145 def emptyactions():
1146 def emptyactions():
1146 """create an actions dict, to be populated and passed to applyupdates()"""
1147 """create an actions dict, to be populated and passed to applyupdates()"""
1147 return {
1148 return {
1148 m: []
1149 m: []
1149 for m in (
1150 for m in (
1150 mergestatemod.ACTION_ADD,
1151 mergestatemod.ACTION_ADD,
1151 mergestatemod.ACTION_ADD_MODIFIED,
1152 mergestatemod.ACTION_ADD_MODIFIED,
1152 mergestatemod.ACTION_FORGET,
1153 mergestatemod.ACTION_FORGET,
1153 mergestatemod.ACTION_GET,
1154 mergestatemod.ACTION_GET,
1154 mergestatemod.ACTION_CHANGED_DELETED,
1155 mergestatemod.ACTION_CHANGED_DELETED,
1155 mergestatemod.ACTION_DELETED_CHANGED,
1156 mergestatemod.ACTION_DELETED_CHANGED,
1156 mergestatemod.ACTION_REMOVE,
1157 mergestatemod.ACTION_REMOVE,
1157 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1158 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1158 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1159 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1159 mergestatemod.ACTION_MERGE,
1160 mergestatemod.ACTION_MERGE,
1160 mergestatemod.ACTION_EXEC,
1161 mergestatemod.ACTION_EXEC,
1161 mergestatemod.ACTION_KEEP,
1162 mergestatemod.ACTION_KEEP,
1162 mergestatemod.ACTION_PATH_CONFLICT,
1163 mergestatemod.ACTION_PATH_CONFLICT,
1163 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1164 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1164 mergestatemod.ACTION_GET_OTHER_AND_STORE,
1165 mergestatemod.ACTION_GET_OTHER_AND_STORE,
1165 )
1166 )
1166 }
1167 }
1167
1168
1168
1169
1169 def applyupdates(
1170 def applyupdates(
1170 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1171 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1171 ):
1172 ):
1172 """apply the merge action list to the working directory
1173 """apply the merge action list to the working directory
1173
1174
1174 wctx is the working copy context
1175 wctx is the working copy context
1175 mctx is the context to be merged into the working copy
1176 mctx is the context to be merged into the working copy
1176
1177
1177 Return a tuple of (counts, filedata), where counts is a tuple
1178 Return a tuple of (counts, filedata), where counts is a tuple
1178 (updated, merged, removed, unresolved) that describes how many
1179 (updated, merged, removed, unresolved) that describes how many
1179 files were affected by the update, and filedata is as described in
1180 files were affected by the update, and filedata is as described in
1180 batchget.
1181 batchget.
1181 """
1182 """
1182
1183
1183 _prefetchfiles(repo, mctx, actions)
1184 _prefetchfiles(repo, mctx, actions)
1184
1185
1185 updated, merged, removed = 0, 0, 0
1186 updated, merged, removed = 0, 0, 0
1186 ms = mergestatemod.mergestate.clean(
1187 ms = mergestatemod.mergestate.clean(
1187 repo, wctx.p1().node(), mctx.node(), labels
1188 repo, wctx.p1().node(), mctx.node(), labels
1188 )
1189 )
1189
1190
1190 # add ACTION_GET_OTHER_AND_STORE to mergestate
1191 # add ACTION_GET_OTHER_AND_STORE to mergestate
1191 for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
1192 for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
1192 ms.addmergedother(e[0])
1193 ms.addmergedother(e[0])
1193
1194
1194 moves = []
1195 moves = []
1195 for m, l in actions.items():
1196 for m, l in actions.items():
1196 l.sort()
1197 l.sort()
1197
1198
1198 # 'cd' and 'dc' actions are treated like other merge conflicts
1199 # 'cd' and 'dc' actions are treated like other merge conflicts
1199 mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
1200 mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
1200 mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
1201 mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
1201 mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
1202 mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
1202 for f, args, msg in mergeactions:
1203 for f, args, msg in mergeactions:
1203 f1, f2, fa, move, anc = args
1204 f1, f2, fa, move, anc = args
1204 if f == b'.hgsubstate': # merged internally
1205 if f == b'.hgsubstate': # merged internally
1205 continue
1206 continue
1206 if f1 is None:
1207 if f1 is None:
1207 fcl = filemerge.absentfilectx(wctx, fa)
1208 fcl = filemerge.absentfilectx(wctx, fa)
1208 else:
1209 else:
1209 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1210 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1210 fcl = wctx[f1]
1211 fcl = wctx[f1]
1211 if f2 is None:
1212 if f2 is None:
1212 fco = filemerge.absentfilectx(mctx, fa)
1213 fco = filemerge.absentfilectx(mctx, fa)
1213 else:
1214 else:
1214 fco = mctx[f2]
1215 fco = mctx[f2]
1215 actx = repo[anc]
1216 actx = repo[anc]
1216 if fa in actx:
1217 if fa in actx:
1217 fca = actx[fa]
1218 fca = actx[fa]
1218 else:
1219 else:
1219 # TODO: move to absentfilectx
1220 # TODO: move to absentfilectx
1220 fca = repo.filectx(f1, fileid=nullrev)
1221 fca = repo.filectx(f1, fileid=nullrev)
1221 ms.add(fcl, fco, fca, f)
1222 ms.add(fcl, fco, fca, f)
1222 if f1 != f and move:
1223 if f1 != f and move:
1223 moves.append(f1)
1224 moves.append(f1)
1224
1225
1225 # remove renamed files after safely stored
1226 # remove renamed files after safely stored
1226 for f in moves:
1227 for f in moves:
1227 if wctx[f].lexists():
1228 if wctx[f].lexists():
1228 repo.ui.debug(b"removing %s\n" % f)
1229 repo.ui.debug(b"removing %s\n" % f)
1229 wctx[f].audit()
1230 wctx[f].audit()
1230 wctx[f].remove()
1231 wctx[f].remove()
1231
1232
1232 numupdates = sum(
1233 numupdates = sum(
1233 len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
1234 len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
1234 )
1235 )
1235 progress = repo.ui.makeprogress(
1236 progress = repo.ui.makeprogress(
1236 _(b'updating'), unit=_(b'files'), total=numupdates
1237 _(b'updating'), unit=_(b'files'), total=numupdates
1237 )
1238 )
1238
1239
1239 if [
1240 if [
1240 a
1241 a
1241 for a in actions[mergestatemod.ACTION_REMOVE]
1242 for a in actions[mergestatemod.ACTION_REMOVE]
1242 if a[0] == b'.hgsubstate'
1243 if a[0] == b'.hgsubstate'
1243 ]:
1244 ]:
1244 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1245 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1245
1246
1246 # record path conflicts
1247 # record path conflicts
1247 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
1248 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
1248 f1, fo = args
1249 f1, fo = args
1249 s = repo.ui.status
1250 s = repo.ui.status
1250 s(
1251 s(
1251 _(
1252 _(
1252 b"%s: path conflict - a file or link has the same name as a "
1253 b"%s: path conflict - a file or link has the same name as a "
1253 b"directory\n"
1254 b"directory\n"
1254 )
1255 )
1255 % f
1256 % f
1256 )
1257 )
1257 if fo == b'l':
1258 if fo == b'l':
1258 s(_(b"the local file has been renamed to %s\n") % f1)
1259 s(_(b"the local file has been renamed to %s\n") % f1)
1259 else:
1260 else:
1260 s(_(b"the remote file has been renamed to %s\n") % f1)
1261 s(_(b"the remote file has been renamed to %s\n") % f1)
1261 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1262 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1262 ms.addpath(f, f1, fo)
1263 ms.addpath(f, f1, fo)
1263 progress.increment(item=f)
1264 progress.increment(item=f)
1264
1265
1265 # When merging in-memory, we can't support worker processes, so set the
1266 # When merging in-memory, we can't support worker processes, so set the
1266 # per-item cost at 0 in that case.
1267 # per-item cost at 0 in that case.
1267 cost = 0 if wctx.isinmemory() else 0.001
1268 cost = 0 if wctx.isinmemory() else 0.001
1268
1269
1269 # remove in parallel (must come before resolving path conflicts and getting)
1270 # remove in parallel (must come before resolving path conflicts and getting)
1270 prog = worker.worker(
1271 prog = worker.worker(
1271 repo.ui,
1272 repo.ui,
1272 cost,
1273 cost,
1273 batchremove,
1274 batchremove,
1274 (repo, wctx),
1275 (repo, wctx),
1275 actions[mergestatemod.ACTION_REMOVE],
1276 actions[mergestatemod.ACTION_REMOVE],
1276 )
1277 )
1277 for i, item in prog:
1278 for i, item in prog:
1278 progress.increment(step=i, item=item)
1279 progress.increment(step=i, item=item)
1279 removed = len(actions[mergestatemod.ACTION_REMOVE])
1280 removed = len(actions[mergestatemod.ACTION_REMOVE])
1280
1281
1281 # resolve path conflicts (must come before getting)
1282 # resolve path conflicts (must come before getting)
1282 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
1283 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
1283 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1284 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1284 (f0, origf0) = args
1285 (f0, origf0) = args
1285 if wctx[f0].lexists():
1286 if wctx[f0].lexists():
1286 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1287 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1287 wctx[f].audit()
1288 wctx[f].audit()
1288 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1289 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1289 wctx[f0].remove()
1290 wctx[f0].remove()
1290 progress.increment(item=f)
1291 progress.increment(item=f)
1291
1292
1292 # get in parallel.
1293 # get in parallel.
1293 threadsafe = repo.ui.configbool(
1294 threadsafe = repo.ui.configbool(
1294 b'experimental', b'worker.wdir-get-thread-safe'
1295 b'experimental', b'worker.wdir-get-thread-safe'
1295 )
1296 )
1296 prog = worker.worker(
1297 prog = worker.worker(
1297 repo.ui,
1298 repo.ui,
1298 cost,
1299 cost,
1299 batchget,
1300 batchget,
1300 (repo, mctx, wctx, wantfiledata),
1301 (repo, mctx, wctx, wantfiledata),
1301 actions[mergestatemod.ACTION_GET],
1302 actions[mergestatemod.ACTION_GET],
1302 threadsafe=threadsafe,
1303 threadsafe=threadsafe,
1303 hasretval=True,
1304 hasretval=True,
1304 )
1305 )
1305 getfiledata = {}
1306 getfiledata = {}
1306 for final, res in prog:
1307 for final, res in prog:
1307 if final:
1308 if final:
1308 getfiledata = res
1309 getfiledata = res
1309 else:
1310 else:
1310 i, item = res
1311 i, item = res
1311 progress.increment(step=i, item=item)
1312 progress.increment(step=i, item=item)
1312 updated = len(actions[mergestatemod.ACTION_GET])
1313 updated = len(actions[mergestatemod.ACTION_GET])
1313
1314
1314 if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
1315 if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
1315 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1316 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1316
1317
1317 # forget (manifest only, just log it) (must come first)
1318 # forget (manifest only, just log it) (must come first)
1318 for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
1319 for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
1319 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1320 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1320 progress.increment(item=f)
1321 progress.increment(item=f)
1321
1322
1322 # re-add (manifest only, just log it)
1323 # re-add (manifest only, just log it)
1323 for f, args, msg in actions[mergestatemod.ACTION_ADD]:
1324 for f, args, msg in actions[mergestatemod.ACTION_ADD]:
1324 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1325 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1325 progress.increment(item=f)
1326 progress.increment(item=f)
1326
1327
1327 # re-add/mark as modified (manifest only, just log it)
1328 # re-add/mark as modified (manifest only, just log it)
1328 for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
1329 for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
1329 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1330 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1330 progress.increment(item=f)
1331 progress.increment(item=f)
1331
1332
1332 # keep (noop, just log it)
1333 # keep (noop, just log it)
1333 for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
1334 for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
1334 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1335 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1335 # no progress
1336 # no progress
1336
1337
1337 # directory rename, move local
1338 # directory rename, move local
1338 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
1339 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
1339 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1340 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1340 progress.increment(item=f)
1341 progress.increment(item=f)
1341 f0, flags = args
1342 f0, flags = args
1342 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1343 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1343 wctx[f].audit()
1344 wctx[f].audit()
1344 wctx[f].write(wctx.filectx(f0).data(), flags)
1345 wctx[f].write(wctx.filectx(f0).data(), flags)
1345 wctx[f0].remove()
1346 wctx[f0].remove()
1346 updated += 1
1347 updated += 1
1347
1348
1348 # local directory rename, get
1349 # local directory rename, get
1349 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
1350 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
1350 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1351 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1351 progress.increment(item=f)
1352 progress.increment(item=f)
1352 f0, flags = args
1353 f0, flags = args
1353 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1354 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1354 wctx[f].write(mctx.filectx(f0).data(), flags)
1355 wctx[f].write(mctx.filectx(f0).data(), flags)
1355 updated += 1
1356 updated += 1
1356
1357
1357 # exec
1358 # exec
1358 for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
1359 for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
1359 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1360 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1360 progress.increment(item=f)
1361 progress.increment(item=f)
1361 (flags,) = args
1362 (flags,) = args
1362 wctx[f].audit()
1363 wctx[f].audit()
1363 wctx[f].setflags(b'l' in flags, b'x' in flags)
1364 wctx[f].setflags(b'l' in flags, b'x' in flags)
1364 updated += 1
1365 updated += 1
1365
1366
1366 # the ordering is important here -- ms.mergedriver will raise if the merge
1367 # the ordering is important here -- ms.mergedriver will raise if the merge
1367 # driver has changed, and we want to be able to bypass it when overwrite is
1368 # driver has changed, and we want to be able to bypass it when overwrite is
1368 # True
1369 # True
1369 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1370 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1370
1371
1371 if usemergedriver:
1372 if usemergedriver:
1372 if wctx.isinmemory():
1373 if wctx.isinmemory():
1373 raise error.InMemoryMergeConflictsError(
1374 raise error.InMemoryMergeConflictsError(
1374 b"in-memory merge does not support mergedriver"
1375 b"in-memory merge does not support mergedriver"
1375 )
1376 )
1376 ms.commit()
1377 ms.commit()
1377 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1378 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1378 # the driver might leave some files unresolved
1379 # the driver might leave some files unresolved
1379 unresolvedf = set(ms.unresolved())
1380 unresolvedf = set(ms.unresolved())
1380 if not proceed:
1381 if not proceed:
1381 # XXX setting unresolved to at least 1 is a hack to make sure we
1382 # XXX setting unresolved to at least 1 is a hack to make sure we
1382 # error out
1383 # error out
1383 return updateresult(
1384 return updateresult(
1384 updated, merged, removed, max(len(unresolvedf), 1)
1385 updated, merged, removed, max(len(unresolvedf), 1)
1385 )
1386 )
1386 newactions = []
1387 newactions = []
1387 for f, args, msg in mergeactions:
1388 for f, args, msg in mergeactions:
1388 if f in unresolvedf:
1389 if f in unresolvedf:
1389 newactions.append((f, args, msg))
1390 newactions.append((f, args, msg))
1390 mergeactions = newactions
1391 mergeactions = newactions
1391
1392
1392 try:
1393 try:
1393 # premerge
1394 # premerge
1394 tocomplete = []
1395 tocomplete = []
1395 for f, args, msg in mergeactions:
1396 for f, args, msg in mergeactions:
1396 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1397 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1397 progress.increment(item=f)
1398 progress.increment(item=f)
1398 if f == b'.hgsubstate': # subrepo states need updating
1399 if f == b'.hgsubstate': # subrepo states need updating
1399 subrepoutil.submerge(
1400 subrepoutil.submerge(
1400 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1401 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1401 )
1402 )
1402 continue
1403 continue
1403 wctx[f].audit()
1404 wctx[f].audit()
1404 complete, r = ms.preresolve(f, wctx)
1405 complete, r = ms.preresolve(f, wctx)
1405 if not complete:
1406 if not complete:
1406 numupdates += 1
1407 numupdates += 1
1407 tocomplete.append((f, args, msg))
1408 tocomplete.append((f, args, msg))
1408
1409
1409 # merge
1410 # merge
1410 for f, args, msg in tocomplete:
1411 for f, args, msg in tocomplete:
1411 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1412 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1412 progress.increment(item=f, total=numupdates)
1413 progress.increment(item=f, total=numupdates)
1413 ms.resolve(f, wctx)
1414 ms.resolve(f, wctx)
1414
1415
1415 finally:
1416 finally:
1416 ms.commit()
1417 ms.commit()
1417
1418
1418 unresolved = ms.unresolvedcount()
1419 unresolved = ms.unresolvedcount()
1419
1420
1420 if (
1421 if (
1421 usemergedriver
1422 usemergedriver
1422 and not unresolved
1423 and not unresolved
1423 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1424 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1424 ):
1425 ):
1425 if not driverconclude(repo, ms, wctx, labels=labels):
1426 if not driverconclude(repo, ms, wctx, labels=labels):
1426 # XXX setting unresolved to at least 1 is a hack to make sure we
1427 # XXX setting unresolved to at least 1 is a hack to make sure we
1427 # error out
1428 # error out
1428 unresolved = max(unresolved, 1)
1429 unresolved = max(unresolved, 1)
1429
1430
1430 ms.commit()
1431 ms.commit()
1431
1432
1432 msupdated, msmerged, msremoved = ms.counts()
1433 msupdated, msmerged, msremoved = ms.counts()
1433 updated += msupdated
1434 updated += msupdated
1434 merged += msmerged
1435 merged += msmerged
1435 removed += msremoved
1436 removed += msremoved
1436
1437
1437 extraactions = ms.actions()
1438 extraactions = ms.actions()
1438 if extraactions:
1439 if extraactions:
1439 mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
1440 mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
1440 for k, acts in pycompat.iteritems(extraactions):
1441 for k, acts in pycompat.iteritems(extraactions):
1441 actions[k].extend(acts)
1442 actions[k].extend(acts)
1442 if k == mergestatemod.ACTION_GET and wantfiledata:
1443 if k == mergestatemod.ACTION_GET and wantfiledata:
1443 # no filedata until mergestate is updated to provide it
1444 # no filedata until mergestate is updated to provide it
1444 for a in acts:
1445 for a in acts:
1445 getfiledata[a[0]] = None
1446 getfiledata[a[0]] = None
1446 # Remove these files from actions[ACTION_MERGE] as well. This is
1447 # Remove these files from actions[ACTION_MERGE] as well. This is
1447 # important because in recordupdates, files in actions[ACTION_MERGE]
1448 # important because in recordupdates, files in actions[ACTION_MERGE]
1448 # are processed after files in other actions, and the merge driver
1449 # are processed after files in other actions, and the merge driver
1449 # might add files to those actions via extraactions above. This can
1450 # might add files to those actions via extraactions above. This can
1450 # lead to a file being recorded twice, with poor results. This is
1451 # lead to a file being recorded twice, with poor results. This is
1451 # especially problematic for actions[ACTION_REMOVE] (currently only
1452 # especially problematic for actions[ACTION_REMOVE] (currently only
1452 # possible with the merge driver in the initial merge process;
1453 # possible with the merge driver in the initial merge process;
1453 # interrupted merges don't go through this flow).
1454 # interrupted merges don't go through this flow).
1454 #
1455 #
1455 # The real fix here is to have indexes by both file and action so
1456 # The real fix here is to have indexes by both file and action so
1456 # that when the action for a file is changed it is automatically
1457 # that when the action for a file is changed it is automatically
1457 # reflected in the other action lists. But that involves a more
1458 # reflected in the other action lists. But that involves a more
1458 # complex data structure, so this will do for now.
1459 # complex data structure, so this will do for now.
1459 #
1460 #
1460 # We don't need to do the same operation for 'dc' and 'cd' because
1461 # We don't need to do the same operation for 'dc' and 'cd' because
1461 # those lists aren't consulted again.
1462 # those lists aren't consulted again.
1462 mfiles.difference_update(a[0] for a in acts)
1463 mfiles.difference_update(a[0] for a in acts)
1463
1464
1464 actions[mergestatemod.ACTION_MERGE] = [
1465 actions[mergestatemod.ACTION_MERGE] = [
1465 a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
1466 a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
1466 ]
1467 ]
1467
1468
1468 progress.complete()
1469 progress.complete()
1469 assert len(getfiledata) == (
1470 assert len(getfiledata) == (
1470 len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
1471 len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
1471 )
1472 )
1472 return updateresult(updated, merged, removed, unresolved), getfiledata
1473 return updateresult(updated, merged, removed, unresolved), getfiledata
1473
1474
1474
1475
1475 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1476 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1476 UPDATECHECK_NONE = b'none'
1477 UPDATECHECK_NONE = b'none'
1477 UPDATECHECK_LINEAR = b'linear'
1478 UPDATECHECK_LINEAR = b'linear'
1478 UPDATECHECK_NO_CONFLICT = b'noconflict'
1479 UPDATECHECK_NO_CONFLICT = b'noconflict'
1479
1480
1480
1481
1481 def update(
1482 def update(
1482 repo,
1483 repo,
1483 node,
1484 node,
1484 branchmerge,
1485 branchmerge,
1485 force,
1486 force,
1486 ancestor=None,
1487 ancestor=None,
1487 mergeancestor=False,
1488 mergeancestor=False,
1488 labels=None,
1489 labels=None,
1489 matcher=None,
1490 matcher=None,
1490 mergeforce=False,
1491 mergeforce=False,
1491 updatedirstate=True,
1492 updatedirstate=True,
1492 updatecheck=None,
1493 updatecheck=None,
1493 wc=None,
1494 wc=None,
1494 ):
1495 ):
1495 """
1496 """
1496 Perform a merge between the working directory and the given node
1497 Perform a merge between the working directory and the given node
1497
1498
1498 node = the node to update to
1499 node = the node to update to
1499 branchmerge = whether to merge between branches
1500 branchmerge = whether to merge between branches
1500 force = whether to force branch merging or file overwriting
1501 force = whether to force branch merging or file overwriting
1501 matcher = a matcher to filter file lists (dirstate not updated)
1502 matcher = a matcher to filter file lists (dirstate not updated)
1502 mergeancestor = whether it is merging with an ancestor. If true,
1503 mergeancestor = whether it is merging with an ancestor. If true,
1503 we should accept the incoming changes for any prompts that occur.
1504 we should accept the incoming changes for any prompts that occur.
1504 If false, merging with an ancestor (fast-forward) is only allowed
1505 If false, merging with an ancestor (fast-forward) is only allowed
1505 between different named branches. This flag is used by rebase extension
1506 between different named branches. This flag is used by rebase extension
1506 as a temporary fix and should be avoided in general.
1507 as a temporary fix and should be avoided in general.
1507 labels = labels to use for base, local and other
1508 labels = labels to use for base, local and other
1508 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1509 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1509 this is True, then 'force' should be True as well.
1510 this is True, then 'force' should be True as well.
1510
1511
1511 The table below shows all the behaviors of the update command given the
1512 The table below shows all the behaviors of the update command given the
1512 -c/--check and -C/--clean or no options, whether the working directory is
1513 -c/--check and -C/--clean or no options, whether the working directory is
1513 dirty, whether a revision is specified, and the relationship of the parent
1514 dirty, whether a revision is specified, and the relationship of the parent
1514 rev to the target rev (linear or not). Match from top first. The -n
1515 rev to the target rev (linear or not). Match from top first. The -n
1515 option doesn't exist on the command line, but represents the
1516 option doesn't exist on the command line, but represents the
1516 experimental.updatecheck=noconflict option.
1517 experimental.updatecheck=noconflict option.
1517
1518
1518 This logic is tested by test-update-branches.t.
1519 This logic is tested by test-update-branches.t.
1519
1520
1520 -c -C -n -m dirty rev linear | result
1521 -c -C -n -m dirty rev linear | result
1521 y y * * * * * | (1)
1522 y y * * * * * | (1)
1522 y * y * * * * | (1)
1523 y * y * * * * | (1)
1523 y * * y * * * | (1)
1524 y * * y * * * | (1)
1524 * y y * * * * | (1)
1525 * y y * * * * | (1)
1525 * y * y * * * | (1)
1526 * y * y * * * | (1)
1526 * * y y * * * | (1)
1527 * * y y * * * | (1)
1527 * * * * * n n | x
1528 * * * * * n n | x
1528 * * * * n * * | ok
1529 * * * * n * * | ok
1529 n n n n y * y | merge
1530 n n n n y * y | merge
1530 n n n n y y n | (2)
1531 n n n n y y n | (2)
1531 n n n y y * * | merge
1532 n n n y y * * | merge
1532 n n y n y * * | merge if no conflict
1533 n n y n y * * | merge if no conflict
1533 n y n n y * * | discard
1534 n y n n y * * | discard
1534 y n n n y * * | (3)
1535 y n n n y * * | (3)
1535
1536
1536 x = can't happen
1537 x = can't happen
1537 * = don't-care
1538 * = don't-care
1538 1 = incompatible options (checked in commands.py)
1539 1 = incompatible options (checked in commands.py)
1539 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1540 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1540 3 = abort: uncommitted changes (checked in commands.py)
1541 3 = abort: uncommitted changes (checked in commands.py)
1541
1542
1542 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1543 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1543 to repo[None] if None is passed.
1544 to repo[None] if None is passed.
1544
1545
1545 Return the same tuple as applyupdates().
1546 Return the same tuple as applyupdates().
1546 """
1547 """
1547 # Avoid cycle.
1548 # Avoid cycle.
1548 from . import sparse
1549 from . import sparse
1549
1550
1550 # This function used to find the default destination if node was None, but
1551 # This function used to find the default destination if node was None, but
1551 # that's now in destutil.py.
1552 # that's now in destutil.py.
1552 assert node is not None
1553 assert node is not None
1553 if not branchmerge and not force:
1554 if not branchmerge and not force:
1554 # TODO: remove the default once all callers that pass branchmerge=False
1555 # TODO: remove the default once all callers that pass branchmerge=False
1555 # and force=False pass a value for updatecheck. We may want to allow
1556 # and force=False pass a value for updatecheck. We may want to allow
1556 # updatecheck='abort' to better suppport some of these callers.
1557 # updatecheck='abort' to better suppport some of these callers.
1557 if updatecheck is None:
1558 if updatecheck is None:
1558 updatecheck = UPDATECHECK_LINEAR
1559 updatecheck = UPDATECHECK_LINEAR
1559 if updatecheck not in (
1560 if updatecheck not in (
1560 UPDATECHECK_NONE,
1561 UPDATECHECK_NONE,
1561 UPDATECHECK_LINEAR,
1562 UPDATECHECK_LINEAR,
1562 UPDATECHECK_NO_CONFLICT,
1563 UPDATECHECK_NO_CONFLICT,
1563 ):
1564 ):
1564 raise ValueError(
1565 raise ValueError(
1565 r'Invalid updatecheck %r (can accept %r)'
1566 r'Invalid updatecheck %r (can accept %r)'
1566 % (
1567 % (
1567 updatecheck,
1568 updatecheck,
1568 (
1569 (
1569 UPDATECHECK_NONE,
1570 UPDATECHECK_NONE,
1570 UPDATECHECK_LINEAR,
1571 UPDATECHECK_LINEAR,
1571 UPDATECHECK_NO_CONFLICT,
1572 UPDATECHECK_NO_CONFLICT,
1572 ),
1573 ),
1573 )
1574 )
1574 )
1575 )
1575 if wc is not None and wc.isinmemory():
1576 if wc is not None and wc.isinmemory():
1576 maybe_wlock = util.nullcontextmanager()
1577 maybe_wlock = util.nullcontextmanager()
1577 else:
1578 else:
1578 maybe_wlock = repo.wlock()
1579 maybe_wlock = repo.wlock()
1579 with maybe_wlock:
1580 with maybe_wlock:
1580 if wc is None:
1581 if wc is None:
1581 wc = repo[None]
1582 wc = repo[None]
1582 pl = wc.parents()
1583 pl = wc.parents()
1583 p1 = pl[0]
1584 p1 = pl[0]
1584 p2 = repo[node]
1585 p2 = repo[node]
1585 if ancestor is not None:
1586 if ancestor is not None:
1586 pas = [repo[ancestor]]
1587 pas = [repo[ancestor]]
1587 else:
1588 else:
1588 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1589 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1589 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1590 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1590 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1591 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1591 else:
1592 else:
1592 pas = [p1.ancestor(p2, warn=branchmerge)]
1593 pas = [p1.ancestor(p2, warn=branchmerge)]
1593
1594
1594 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1595 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1595
1596
1596 overwrite = force and not branchmerge
1597 overwrite = force and not branchmerge
1597 ### check phase
1598 ### check phase
1598 if not overwrite:
1599 if not overwrite:
1599 if len(pl) > 1:
1600 if len(pl) > 1:
1600 raise error.Abort(_(b"outstanding uncommitted merge"))
1601 raise error.Abort(_(b"outstanding uncommitted merge"))
1601 ms = mergestatemod.mergestate.read(repo)
1602 ms = mergestatemod.mergestate.read(repo)
1602 if list(ms.unresolved()):
1603 if list(ms.unresolved()):
1603 raise error.Abort(
1604 raise error.Abort(
1604 _(b"outstanding merge conflicts"),
1605 _(b"outstanding merge conflicts"),
1605 hint=_(b"use 'hg resolve' to resolve"),
1606 hint=_(b"use 'hg resolve' to resolve"),
1606 )
1607 )
1607 if branchmerge:
1608 if branchmerge:
1608 if pas == [p2]:
1609 if pas == [p2]:
1609 raise error.Abort(
1610 raise error.Abort(
1610 _(
1611 _(
1611 b"merging with a working directory ancestor"
1612 b"merging with a working directory ancestor"
1612 b" has no effect"
1613 b" has no effect"
1613 )
1614 )
1614 )
1615 )
1615 elif pas == [p1]:
1616 elif pas == [p1]:
1616 if not mergeancestor and wc.branch() == p2.branch():
1617 if not mergeancestor and wc.branch() == p2.branch():
1617 raise error.Abort(
1618 raise error.Abort(
1618 _(b"nothing to merge"),
1619 _(b"nothing to merge"),
1619 hint=_(b"use 'hg update' or check 'hg heads'"),
1620 hint=_(b"use 'hg update' or check 'hg heads'"),
1620 )
1621 )
1621 if not force and (wc.files() or wc.deleted()):
1622 if not force and (wc.files() or wc.deleted()):
1622 raise error.Abort(
1623 raise error.Abort(
1623 _(b"uncommitted changes"),
1624 _(b"uncommitted changes"),
1624 hint=_(b"use 'hg status' to list changes"),
1625 hint=_(b"use 'hg status' to list changes"),
1625 )
1626 )
1626 if not wc.isinmemory():
1627 if not wc.isinmemory():
1627 for s in sorted(wc.substate):
1628 for s in sorted(wc.substate):
1628 wc.sub(s).bailifchanged()
1629 wc.sub(s).bailifchanged()
1629
1630
1630 elif not overwrite:
1631 elif not overwrite:
1631 if p1 == p2: # no-op update
1632 if p1 == p2: # no-op update
1632 # call the hooks and exit early
1633 # call the hooks and exit early
1633 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1634 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1634 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1635 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1635 return updateresult(0, 0, 0, 0)
1636 return updateresult(0, 0, 0, 0)
1636
1637
1637 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1638 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1638 [p1],
1639 [p1],
1639 [p2],
1640 [p2],
1640 ): # nonlinear
1641 ): # nonlinear
1641 dirty = wc.dirty(missing=True)
1642 dirty = wc.dirty(missing=True)
1642 if dirty:
1643 if dirty:
1643 # Branching is a bit strange to ensure we do the minimal
1644 # Branching is a bit strange to ensure we do the minimal
1644 # amount of call to obsutil.foreground.
1645 # amount of call to obsutil.foreground.
1645 foreground = obsutil.foreground(repo, [p1.node()])
1646 foreground = obsutil.foreground(repo, [p1.node()])
1646 # note: the <node> variable contains a random identifier
1647 # note: the <node> variable contains a random identifier
1647 if repo[node].node() in foreground:
1648 if repo[node].node() in foreground:
1648 pass # allow updating to successors
1649 pass # allow updating to successors
1649 else:
1650 else:
1650 msg = _(b"uncommitted changes")
1651 msg = _(b"uncommitted changes")
1651 hint = _(b"commit or update --clean to discard changes")
1652 hint = _(b"commit or update --clean to discard changes")
1652 raise error.UpdateAbort(msg, hint=hint)
1653 raise error.UpdateAbort(msg, hint=hint)
1653 else:
1654 else:
1654 # Allow jumping branches if clean and specific rev given
1655 # Allow jumping branches if clean and specific rev given
1655 pass
1656 pass
1656
1657
1657 if overwrite:
1658 if overwrite:
1658 pas = [wc]
1659 pas = [wc]
1659 elif not branchmerge:
1660 elif not branchmerge:
1660 pas = [p1]
1661 pas = [p1]
1661
1662
1662 # deprecated config: merge.followcopies
1663 # deprecated config: merge.followcopies
1663 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1664 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1664 if overwrite:
1665 if overwrite:
1665 followcopies = False
1666 followcopies = False
1666 elif not pas[0]:
1667 elif not pas[0]:
1667 followcopies = False
1668 followcopies = False
1668 if not branchmerge and not wc.dirty(missing=True):
1669 if not branchmerge and not wc.dirty(missing=True):
1669 followcopies = False
1670 followcopies = False
1670
1671
1671 ### calculate phase
1672 ### calculate phase
1672 actionbyfile, diverge, renamedelete = calculateupdates(
1673 actionbyfile, diverge, renamedelete = calculateupdates(
1673 repo,
1674 repo,
1674 wc,
1675 wc,
1675 p2,
1676 p2,
1676 pas,
1677 pas,
1677 branchmerge,
1678 branchmerge,
1678 force,
1679 force,
1679 mergeancestor,
1680 mergeancestor,
1680 followcopies,
1681 followcopies,
1681 matcher=matcher,
1682 matcher=matcher,
1682 mergeforce=mergeforce,
1683 mergeforce=mergeforce,
1683 )
1684 )
1684
1685
1685 if updatecheck == UPDATECHECK_NO_CONFLICT:
1686 if updatecheck == UPDATECHECK_NO_CONFLICT:
1686 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
1687 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
1687 if m not in (
1688 if m not in (
1688 mergestatemod.ACTION_GET,
1689 mergestatemod.ACTION_GET,
1689 mergestatemod.ACTION_KEEP,
1690 mergestatemod.ACTION_KEEP,
1690 mergestatemod.ACTION_EXEC,
1691 mergestatemod.ACTION_EXEC,
1691 mergestatemod.ACTION_REMOVE,
1692 mergestatemod.ACTION_REMOVE,
1692 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1693 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1693 mergestatemod.ACTION_GET_OTHER_AND_STORE,
1694 mergestatemod.ACTION_GET_OTHER_AND_STORE,
1694 ):
1695 ):
1695 msg = _(b"conflicting changes")
1696 msg = _(b"conflicting changes")
1696 hint = _(b"commit or update --clean to discard changes")
1697 hint = _(b"commit or update --clean to discard changes")
1697 raise error.Abort(msg, hint=hint)
1698 raise error.Abort(msg, hint=hint)
1698
1699
1699 # Prompt and create actions. Most of this is in the resolve phase
1700 # Prompt and create actions. Most of this is in the resolve phase
1700 # already, but we can't handle .hgsubstate in filemerge or
1701 # already, but we can't handle .hgsubstate in filemerge or
1701 # subrepoutil.submerge yet so we have to keep prompting for it.
1702 # subrepoutil.submerge yet so we have to keep prompting for it.
1702 if b'.hgsubstate' in actionbyfile:
1703 if b'.hgsubstate' in actionbyfile:
1703 f = b'.hgsubstate'
1704 f = b'.hgsubstate'
1704 m, args, msg = actionbyfile[f]
1705 m, args, msg = actionbyfile[f]
1705 prompts = filemerge.partextras(labels)
1706 prompts = filemerge.partextras(labels)
1706 prompts[b'f'] = f
1707 prompts[b'f'] = f
1707 if m == mergestatemod.ACTION_CHANGED_DELETED:
1708 if m == mergestatemod.ACTION_CHANGED_DELETED:
1708 if repo.ui.promptchoice(
1709 if repo.ui.promptchoice(
1709 _(
1710 _(
1710 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1711 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1711 b"use (c)hanged version or (d)elete?"
1712 b"use (c)hanged version or (d)elete?"
1712 b"$$ &Changed $$ &Delete"
1713 b"$$ &Changed $$ &Delete"
1713 )
1714 )
1714 % prompts,
1715 % prompts,
1715 0,
1716 0,
1716 ):
1717 ):
1717 actionbyfile[f] = (
1718 actionbyfile[f] = (
1718 mergestatemod.ACTION_REMOVE,
1719 mergestatemod.ACTION_REMOVE,
1719 None,
1720 None,
1720 b'prompt delete',
1721 b'prompt delete',
1721 )
1722 )
1722 elif f in p1:
1723 elif f in p1:
1723 actionbyfile[f] = (
1724 actionbyfile[f] = (
1724 mergestatemod.ACTION_ADD_MODIFIED,
1725 mergestatemod.ACTION_ADD_MODIFIED,
1725 None,
1726 None,
1726 b'prompt keep',
1727 b'prompt keep',
1727 )
1728 )
1728 else:
1729 else:
1729 actionbyfile[f] = (
1730 actionbyfile[f] = (
1730 mergestatemod.ACTION_ADD,
1731 mergestatemod.ACTION_ADD,
1731 None,
1732 None,
1732 b'prompt keep',
1733 b'prompt keep',
1733 )
1734 )
1734 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1735 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1735 f1, f2, fa, move, anc = args
1736 f1, f2, fa, move, anc = args
1736 flags = p2[f2].flags()
1737 flags = p2[f2].flags()
1737 if (
1738 if (
1738 repo.ui.promptchoice(
1739 repo.ui.promptchoice(
1739 _(
1740 _(
1740 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1741 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1741 b"use (c)hanged version or leave (d)eleted?"
1742 b"use (c)hanged version or leave (d)eleted?"
1742 b"$$ &Changed $$ &Deleted"
1743 b"$$ &Changed $$ &Deleted"
1743 )
1744 )
1744 % prompts,
1745 % prompts,
1745 0,
1746 0,
1746 )
1747 )
1747 == 0
1748 == 0
1748 ):
1749 ):
1749 actionbyfile[f] = (
1750 actionbyfile[f] = (
1750 mergestatemod.ACTION_GET,
1751 mergestatemod.ACTION_GET,
1751 (flags, False),
1752 (flags, False),
1752 b'prompt recreating',
1753 b'prompt recreating',
1753 )
1754 )
1754 else:
1755 else:
1755 del actionbyfile[f]
1756 del actionbyfile[f]
1756
1757
1757 # Convert to dictionary-of-lists format
1758 # Convert to dictionary-of-lists format
1758 actions = emptyactions()
1759 actions = emptyactions()
1759 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
1760 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
1760 if m not in actions:
1761 if m not in actions:
1761 actions[m] = []
1762 actions[m] = []
1762 actions[m].append((f, args, msg))
1763 actions[m].append((f, args, msg))
1763
1764
1764 # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate
1765 # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate
1765 for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
1766 for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
1766 actions[mergestatemod.ACTION_GET].append(e)
1767 actions[mergestatemod.ACTION_GET].append(e)
1767
1768
1768 if not util.fscasesensitive(repo.path):
1769 if not util.fscasesensitive(repo.path):
1769 # check collision between files only in p2 for clean update
1770 # check collision between files only in p2 for clean update
1770 if not branchmerge and (
1771 if not branchmerge and (
1771 force or not wc.dirty(missing=True, branch=False)
1772 force or not wc.dirty(missing=True, branch=False)
1772 ):
1773 ):
1773 _checkcollision(repo, p2.manifest(), None)
1774 _checkcollision(repo, p2.manifest(), None)
1774 else:
1775 else:
1775 _checkcollision(repo, wc.manifest(), actions)
1776 _checkcollision(repo, wc.manifest(), actions)
1776
1777
1777 # divergent renames
1778 # divergent renames
1778 for f, fl in sorted(pycompat.iteritems(diverge)):
1779 for f, fl in sorted(pycompat.iteritems(diverge)):
1779 repo.ui.warn(
1780 repo.ui.warn(
1780 _(
1781 _(
1781 b"note: possible conflict - %s was renamed "
1782 b"note: possible conflict - %s was renamed "
1782 b"multiple times to:\n"
1783 b"multiple times to:\n"
1783 )
1784 )
1784 % f
1785 % f
1785 )
1786 )
1786 for nf in sorted(fl):
1787 for nf in sorted(fl):
1787 repo.ui.warn(b" %s\n" % nf)
1788 repo.ui.warn(b" %s\n" % nf)
1788
1789
1789 # rename and delete
1790 # rename and delete
1790 for f, fl in sorted(pycompat.iteritems(renamedelete)):
1791 for f, fl in sorted(pycompat.iteritems(renamedelete)):
1791 repo.ui.warn(
1792 repo.ui.warn(
1792 _(
1793 _(
1793 b"note: possible conflict - %s was deleted "
1794 b"note: possible conflict - %s was deleted "
1794 b"and renamed to:\n"
1795 b"and renamed to:\n"
1795 )
1796 )
1796 % f
1797 % f
1797 )
1798 )
1798 for nf in sorted(fl):
1799 for nf in sorted(fl):
1799 repo.ui.warn(b" %s\n" % nf)
1800 repo.ui.warn(b" %s\n" % nf)
1800
1801
1801 ### apply phase
1802 ### apply phase
1802 if not branchmerge: # just jump to the new rev
1803 if not branchmerge: # just jump to the new rev
1803 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
1804 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
1804 # If we're doing a partial update, we need to skip updating
1805 # If we're doing a partial update, we need to skip updating
1805 # the dirstate.
1806 # the dirstate.
1806 always = matcher is None or matcher.always()
1807 always = matcher is None or matcher.always()
1807 updatedirstate = updatedirstate and always and not wc.isinmemory()
1808 updatedirstate = updatedirstate and always and not wc.isinmemory()
1808 if updatedirstate:
1809 if updatedirstate:
1809 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
1810 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
1810 # note that we're in the middle of an update
1811 # note that we're in the middle of an update
1811 repo.vfs.write(b'updatestate', p2.hex())
1812 repo.vfs.write(b'updatestate', p2.hex())
1812
1813
1813 # Advertise fsmonitor when its presence could be useful.
1814 # Advertise fsmonitor when its presence could be useful.
1814 #
1815 #
1815 # We only advertise when performing an update from an empty working
1816 # We only advertise when performing an update from an empty working
1816 # directory. This typically only occurs during initial clone.
1817 # directory. This typically only occurs during initial clone.
1817 #
1818 #
1818 # We give users a mechanism to disable the warning in case it is
1819 # We give users a mechanism to disable the warning in case it is
1819 # annoying.
1820 # annoying.
1820 #
1821 #
1821 # We only allow on Linux and MacOS because that's where fsmonitor is
1822 # We only allow on Linux and MacOS because that's where fsmonitor is
1822 # considered stable.
1823 # considered stable.
1823 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1824 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1824 fsmonitorthreshold = repo.ui.configint(
1825 fsmonitorthreshold = repo.ui.configint(
1825 b'fsmonitor', b'warn_update_file_count'
1826 b'fsmonitor', b'warn_update_file_count'
1826 )
1827 )
1827 try:
1828 try:
1828 # avoid cycle: extensions -> cmdutil -> merge
1829 # avoid cycle: extensions -> cmdutil -> merge
1829 from . import extensions
1830 from . import extensions
1830
1831
1831 extensions.find(b'fsmonitor')
1832 extensions.find(b'fsmonitor')
1832 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1833 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1833 # We intentionally don't look at whether fsmonitor has disabled
1834 # We intentionally don't look at whether fsmonitor has disabled
1834 # itself because a) fsmonitor may have already printed a warning
1835 # itself because a) fsmonitor may have already printed a warning
1835 # b) we only care about the config state here.
1836 # b) we only care about the config state here.
1836 except KeyError:
1837 except KeyError:
1837 fsmonitorenabled = False
1838 fsmonitorenabled = False
1838
1839
1839 if (
1840 if (
1840 fsmonitorwarning
1841 fsmonitorwarning
1841 and not fsmonitorenabled
1842 and not fsmonitorenabled
1842 and p1.node() == nullid
1843 and p1.node() == nullid
1843 and len(actions[mergestatemod.ACTION_GET]) >= fsmonitorthreshold
1844 and len(actions[mergestatemod.ACTION_GET]) >= fsmonitorthreshold
1844 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1845 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1845 ):
1846 ):
1846 repo.ui.warn(
1847 repo.ui.warn(
1847 _(
1848 _(
1848 b'(warning: large working directory being used without '
1849 b'(warning: large working directory being used without '
1849 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1850 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1850 b'see "hg help -e fsmonitor")\n'
1851 b'see "hg help -e fsmonitor")\n'
1851 )
1852 )
1852 )
1853 )
1853
1854
1854 wantfiledata = updatedirstate and not branchmerge
1855 wantfiledata = updatedirstate and not branchmerge
1855 stats, getfiledata = applyupdates(
1856 stats, getfiledata = applyupdates(
1856 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
1857 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
1857 )
1858 )
1858
1859
1859 if updatedirstate:
1860 if updatedirstate:
1860 with repo.dirstate.parentchange():
1861 with repo.dirstate.parentchange():
1861 repo.setparents(fp1, fp2)
1862 repo.setparents(fp1, fp2)
1862 mergestatemod.recordupdates(
1863 mergestatemod.recordupdates(
1863 repo, actions, branchmerge, getfiledata
1864 repo, actions, branchmerge, getfiledata
1864 )
1865 )
1865 # update completed, clear state
1866 # update completed, clear state
1866 util.unlink(repo.vfs.join(b'updatestate'))
1867 util.unlink(repo.vfs.join(b'updatestate'))
1867
1868
1868 if not branchmerge:
1869 if not branchmerge:
1869 repo.dirstate.setbranch(p2.branch())
1870 repo.dirstate.setbranch(p2.branch())
1870
1871
1871 # If we're updating to a location, clean up any stale temporary includes
1872 # If we're updating to a location, clean up any stale temporary includes
1872 # (ex: this happens during hg rebase --abort).
1873 # (ex: this happens during hg rebase --abort).
1873 if not branchmerge:
1874 if not branchmerge:
1874 sparse.prunetemporaryincludes(repo)
1875 sparse.prunetemporaryincludes(repo)
1875
1876
1876 if updatedirstate:
1877 if updatedirstate:
1877 repo.hook(
1878 repo.hook(
1878 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
1879 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
1879 )
1880 )
1880 return stats
1881 return stats
1881
1882
1882
1883
1883 def merge(ctx, labels=None, force=False, wc=None):
1884 def merge(ctx, labels=None, force=False, wc=None):
1884 """Merge another topological branch into the working copy.
1885 """Merge another topological branch into the working copy.
1885
1886
1886 force = whether the merge was run with 'merge --force' (deprecated)
1887 force = whether the merge was run with 'merge --force' (deprecated)
1887 """
1888 """
1888
1889
1889 return update(
1890 return update(
1890 ctx.repo(),
1891 ctx.repo(),
1891 ctx.rev(),
1892 ctx.rev(),
1892 labels=labels,
1893 labels=labels,
1893 branchmerge=True,
1894 branchmerge=True,
1894 force=force,
1895 force=force,
1895 mergeforce=force,
1896 mergeforce=force,
1896 wc=wc,
1897 wc=wc,
1897 )
1898 )
1898
1899
1899
1900
1900 def clean_update(ctx, wc=None):
1901 def clean_update(ctx, wc=None):
1901 """Do a clean update to the given commit.
1902 """Do a clean update to the given commit.
1902
1903
1903 This involves updating to the commit and discarding any changes in the
1904 This involves updating to the commit and discarding any changes in the
1904 working copy.
1905 working copy.
1905 """
1906 """
1906 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
1907 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
1907
1908
1908
1909
1909 def revert_to(ctx, matcher=None, wc=None):
1910 def revert_to(ctx, matcher=None, wc=None):
1910 """Revert the working copy to the given commit.
1911 """Revert the working copy to the given commit.
1911
1912
1912 The working copy will keep its current parent(s) but its content will
1913 The working copy will keep its current parent(s) but its content will
1913 be the same as in the given commit.
1914 be the same as in the given commit.
1914 """
1915 """
1915
1916
1916 return update(
1917 return update(
1917 ctx.repo(),
1918 ctx.repo(),
1918 ctx.rev(),
1919 ctx.rev(),
1919 branchmerge=False,
1920 branchmerge=False,
1920 force=True,
1921 force=True,
1921 updatedirstate=False,
1922 updatedirstate=False,
1922 matcher=matcher,
1923 matcher=matcher,
1923 wc=wc,
1924 wc=wc,
1924 )
1925 )
1925
1926
1926
1927
1927 def graft(
1928 def graft(
1928 repo,
1929 repo,
1929 ctx,
1930 ctx,
1930 base=None,
1931 base=None,
1931 labels=None,
1932 labels=None,
1932 keepparent=False,
1933 keepparent=False,
1933 keepconflictparent=False,
1934 keepconflictparent=False,
1934 wctx=None,
1935 wctx=None,
1935 ):
1936 ):
1936 """Do a graft-like merge.
1937 """Do a graft-like merge.
1937
1938
1938 This is a merge where the merge ancestor is chosen such that one
1939 This is a merge where the merge ancestor is chosen such that one
1939 or more changesets are grafted onto the current changeset. In
1940 or more changesets are grafted onto the current changeset. In
1940 addition to the merge, this fixes up the dirstate to include only
1941 addition to the merge, this fixes up the dirstate to include only
1941 a single parent (if keepparent is False) and tries to duplicate any
1942 a single parent (if keepparent is False) and tries to duplicate any
1942 renames/copies appropriately.
1943 renames/copies appropriately.
1943
1944
1944 ctx - changeset to rebase
1945 ctx - changeset to rebase
1945 base - merge base, or ctx.p1() if not specified
1946 base - merge base, or ctx.p1() if not specified
1946 labels - merge labels eg ['local', 'graft']
1947 labels - merge labels eg ['local', 'graft']
1947 keepparent - keep second parent if any
1948 keepparent - keep second parent if any
1948 keepconflictparent - if unresolved, keep parent used for the merge
1949 keepconflictparent - if unresolved, keep parent used for the merge
1949
1950
1950 """
1951 """
1951 # If we're grafting a descendant onto an ancestor, be sure to pass
1952 # If we're grafting a descendant onto an ancestor, be sure to pass
1952 # mergeancestor=True to update. This does two things: 1) allows the merge if
1953 # mergeancestor=True to update. This does two things: 1) allows the merge if
1953 # the destination is the same as the parent of the ctx (so we can use graft
1954 # the destination is the same as the parent of the ctx (so we can use graft
1954 # to copy commits), and 2) informs update that the incoming changes are
1955 # to copy commits), and 2) informs update that the incoming changes are
1955 # newer than the destination so it doesn't prompt about "remote changed foo
1956 # newer than the destination so it doesn't prompt about "remote changed foo
1956 # which local deleted".
1957 # which local deleted".
1957 # We also pass mergeancestor=True when base is the same revision as p1. 2)
1958 # We also pass mergeancestor=True when base is the same revision as p1. 2)
1958 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
1959 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
1959 wctx = wctx or repo[None]
1960 wctx = wctx or repo[None]
1960 pctx = wctx.p1()
1961 pctx = wctx.p1()
1961 base = base or ctx.p1()
1962 base = base or ctx.p1()
1962 mergeancestor = (
1963 mergeancestor = (
1963 repo.changelog.isancestor(pctx.node(), ctx.node())
1964 repo.changelog.isancestor(pctx.node(), ctx.node())
1964 or pctx.rev() == base.rev()
1965 or pctx.rev() == base.rev()
1965 )
1966 )
1966
1967
1967 stats = update(
1968 stats = update(
1968 repo,
1969 repo,
1969 ctx.node(),
1970 ctx.node(),
1970 True,
1971 True,
1971 True,
1972 True,
1972 base.node(),
1973 base.node(),
1973 mergeancestor=mergeancestor,
1974 mergeancestor=mergeancestor,
1974 labels=labels,
1975 labels=labels,
1975 wc=wctx,
1976 wc=wctx,
1976 )
1977 )
1977
1978
1978 if keepconflictparent and stats.unresolvedcount:
1979 if keepconflictparent and stats.unresolvedcount:
1979 pother = ctx.node()
1980 pother = ctx.node()
1980 else:
1981 else:
1981 pother = nullid
1982 pother = nullid
1982 parents = ctx.parents()
1983 parents = ctx.parents()
1983 if keepparent and len(parents) == 2 and base in parents:
1984 if keepparent and len(parents) == 2 and base in parents:
1984 parents.remove(base)
1985 parents.remove(base)
1985 pother = parents[0].node()
1986 pother = parents[0].node()
1986 # Never set both parents equal to each other
1987 # Never set both parents equal to each other
1987 if pother == pctx.node():
1988 if pother == pctx.node():
1988 pother = nullid
1989 pother = nullid
1989
1990
1990 if wctx.isinmemory():
1991 if wctx.isinmemory():
1991 wctx.setparents(pctx.node(), pother)
1992 wctx.setparents(pctx.node(), pother)
1992 # fix up dirstate for copies and renames
1993 # fix up dirstate for copies and renames
1993 copies.graftcopies(wctx, ctx, base)
1994 copies.graftcopies(wctx, ctx, base)
1994 else:
1995 else:
1995 with repo.dirstate.parentchange():
1996 with repo.dirstate.parentchange():
1996 repo.setparents(pctx.node(), pother)
1997 repo.setparents(pctx.node(), pother)
1997 repo.dirstate.write(repo.currenttransaction())
1998 repo.dirstate.write(repo.currenttransaction())
1998 # fix up dirstate for copies and renames
1999 # fix up dirstate for copies and renames
1999 copies.graftcopies(wctx, ctx, base)
2000 copies.graftcopies(wctx, ctx, base)
2000 return stats
2001 return stats
2001
2002
2002
2003
2003 def purge(
2004 def purge(
2004 repo,
2005 repo,
2005 matcher,
2006 matcher,
2006 unknown=True,
2007 unknown=True,
2007 ignored=False,
2008 ignored=False,
2008 removeemptydirs=True,
2009 removeemptydirs=True,
2009 removefiles=True,
2010 removefiles=True,
2010 abortonerror=False,
2011 abortonerror=False,
2011 noop=False,
2012 noop=False,
2012 ):
2013 ):
2013 """Purge the working directory of untracked files.
2014 """Purge the working directory of untracked files.
2014
2015
2015 ``matcher`` is a matcher configured to scan the working directory -
2016 ``matcher`` is a matcher configured to scan the working directory -
2016 potentially a subset.
2017 potentially a subset.
2017
2018
2018 ``unknown`` controls whether unknown files should be purged.
2019 ``unknown`` controls whether unknown files should be purged.
2019
2020
2020 ``ignored`` controls whether ignored files should be purged.
2021 ``ignored`` controls whether ignored files should be purged.
2021
2022
2022 ``removeemptydirs`` controls whether empty directories should be removed.
2023 ``removeemptydirs`` controls whether empty directories should be removed.
2023
2024
2024 ``removefiles`` controls whether files are removed.
2025 ``removefiles`` controls whether files are removed.
2025
2026
2026 ``abortonerror`` causes an exception to be raised if an error occurs
2027 ``abortonerror`` causes an exception to be raised if an error occurs
2027 deleting a file or directory.
2028 deleting a file or directory.
2028
2029
2029 ``noop`` controls whether to actually remove files. If not defined, actions
2030 ``noop`` controls whether to actually remove files. If not defined, actions
2030 will be taken.
2031 will be taken.
2031
2032
2032 Returns an iterable of relative paths in the working directory that were
2033 Returns an iterable of relative paths in the working directory that were
2033 or would be removed.
2034 or would be removed.
2034 """
2035 """
2035
2036
2036 def remove(removefn, path):
2037 def remove(removefn, path):
2037 try:
2038 try:
2038 removefn(path)
2039 removefn(path)
2039 except OSError:
2040 except OSError:
2040 m = _(b'%s cannot be removed') % path
2041 m = _(b'%s cannot be removed') % path
2041 if abortonerror:
2042 if abortonerror:
2042 raise error.Abort(m)
2043 raise error.Abort(m)
2043 else:
2044 else:
2044 repo.ui.warn(_(b'warning: %s\n') % m)
2045 repo.ui.warn(_(b'warning: %s\n') % m)
2045
2046
2046 # There's no API to copy a matcher. So mutate the passed matcher and
2047 # There's no API to copy a matcher. So mutate the passed matcher and
2047 # restore it when we're done.
2048 # restore it when we're done.
2048 oldtraversedir = matcher.traversedir
2049 oldtraversedir = matcher.traversedir
2049
2050
2050 res = []
2051 res = []
2051
2052
2052 try:
2053 try:
2053 if removeemptydirs:
2054 if removeemptydirs:
2054 directories = []
2055 directories = []
2055 matcher.traversedir = directories.append
2056 matcher.traversedir = directories.append
2056
2057
2057 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2058 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2058
2059
2059 if removefiles:
2060 if removefiles:
2060 for f in sorted(status.unknown + status.ignored):
2061 for f in sorted(status.unknown + status.ignored):
2061 if not noop:
2062 if not noop:
2062 repo.ui.note(_(b'removing file %s\n') % f)
2063 repo.ui.note(_(b'removing file %s\n') % f)
2063 remove(repo.wvfs.unlink, f)
2064 remove(repo.wvfs.unlink, f)
2064 res.append(f)
2065 res.append(f)
2065
2066
2066 if removeemptydirs:
2067 if removeemptydirs:
2067 for f in sorted(directories, reverse=True):
2068 for f in sorted(directories, reverse=True):
2068 if matcher(f) and not repo.wvfs.listdir(f):
2069 if matcher(f) and not repo.wvfs.listdir(f):
2069 if not noop:
2070 if not noop:
2070 repo.ui.note(_(b'removing directory %s\n') % f)
2071 repo.ui.note(_(b'removing directory %s\n') % f)
2071 remove(repo.wvfs.rmdir, f)
2072 remove(repo.wvfs.rmdir, f)
2072 res.append(f)
2073 res.append(f)
2073
2074
2074 return res
2075 return res
2075
2076
2076 finally:
2077 finally:
2077 matcher.traversedir = oldtraversedir
2078 matcher.traversedir = oldtraversedir
@@ -1,3227 +1,3231 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import errno
14 import errno
15 import os
15 import os
16 import re
16 import re
17 import shutil
17 import shutil
18 import zlib
18 import zlib
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import (
21 from .node import (
22 hex,
22 hex,
23 short,
23 short,
24 )
24 )
25 from .pycompat import open
25 from .pycompat import open
26 from . import (
26 from . import (
27 copies,
27 copies,
28 diffhelper,
28 diffhelper,
29 diffutil,
29 diffutil,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 pycompat,
35 pycompat,
36 scmutil,
36 scmutil,
37 similar,
37 similar,
38 util,
38 util,
39 vfs as vfsmod,
39 vfs as vfsmod,
40 )
40 )
41 from .utils import (
41 from .utils import (
42 dateutil,
42 dateutil,
43 hashutil,
43 hashutil,
44 procutil,
44 procutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 stringio = util.stringio
48 stringio = util.stringio
49
49
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 wordsplitter = re.compile(
52 wordsplitter = re.compile(
53 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
53 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|[^ \ta-zA-Z0-9_\x80-\xff])'
54 )
54 )
55
55
56 PatchError = error.PatchError
56 PatchError = error.PatchError
57
57
58 # public functions
58 # public functions
59
59
60
60
61 def split(stream):
61 def split(stream):
62 '''return an iterator of individual patches from a stream'''
62 '''return an iterator of individual patches from a stream'''
63
63
64 def isheader(line, inheader):
64 def isheader(line, inheader):
65 if inheader and line.startswith((b' ', b'\t')):
65 if inheader and line.startswith((b' ', b'\t')):
66 # continuation
66 # continuation
67 return True
67 return True
68 if line.startswith((b' ', b'-', b'+')):
68 if line.startswith((b' ', b'-', b'+')):
69 # diff line - don't check for header pattern in there
69 # diff line - don't check for header pattern in there
70 return False
70 return False
71 l = line.split(b': ', 1)
71 l = line.split(b': ', 1)
72 return len(l) == 2 and b' ' not in l[0]
72 return len(l) == 2 and b' ' not in l[0]
73
73
74 def chunk(lines):
74 def chunk(lines):
75 return stringio(b''.join(lines))
75 return stringio(b''.join(lines))
76
76
77 def hgsplit(stream, cur):
77 def hgsplit(stream, cur):
78 inheader = True
78 inheader = True
79
79
80 for line in stream:
80 for line in stream:
81 if not line.strip():
81 if not line.strip():
82 inheader = False
82 inheader = False
83 if not inheader and line.startswith(b'# HG changeset patch'):
83 if not inheader and line.startswith(b'# HG changeset patch'):
84 yield chunk(cur)
84 yield chunk(cur)
85 cur = []
85 cur = []
86 inheader = True
86 inheader = True
87
87
88 cur.append(line)
88 cur.append(line)
89
89
90 if cur:
90 if cur:
91 yield chunk(cur)
91 yield chunk(cur)
92
92
93 def mboxsplit(stream, cur):
93 def mboxsplit(stream, cur):
94 for line in stream:
94 for line in stream:
95 if line.startswith(b'From '):
95 if line.startswith(b'From '):
96 for c in split(chunk(cur[1:])):
96 for c in split(chunk(cur[1:])):
97 yield c
97 yield c
98 cur = []
98 cur = []
99
99
100 cur.append(line)
100 cur.append(line)
101
101
102 if cur:
102 if cur:
103 for c in split(chunk(cur[1:])):
103 for c in split(chunk(cur[1:])):
104 yield c
104 yield c
105
105
106 def mimesplit(stream, cur):
106 def mimesplit(stream, cur):
107 def msgfp(m):
107 def msgfp(m):
108 fp = stringio()
108 fp = stringio()
109 g = mail.Generator(fp, mangle_from_=False)
109 g = mail.Generator(fp, mangle_from_=False)
110 g.flatten(m)
110 g.flatten(m)
111 fp.seek(0)
111 fp.seek(0)
112 return fp
112 return fp
113
113
114 for line in stream:
114 for line in stream:
115 cur.append(line)
115 cur.append(line)
116 c = chunk(cur)
116 c = chunk(cur)
117
117
118 m = mail.parse(c)
118 m = mail.parse(c)
119 if not m.is_multipart():
119 if not m.is_multipart():
120 yield msgfp(m)
120 yield msgfp(m)
121 else:
121 else:
122 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
122 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
123 for part in m.walk():
123 for part in m.walk():
124 ct = part.get_content_type()
124 ct = part.get_content_type()
125 if ct not in ok_types:
125 if ct not in ok_types:
126 continue
126 continue
127 yield msgfp(part)
127 yield msgfp(part)
128
128
129 def headersplit(stream, cur):
129 def headersplit(stream, cur):
130 inheader = False
130 inheader = False
131
131
132 for line in stream:
132 for line in stream:
133 if not inheader and isheader(line, inheader):
133 if not inheader and isheader(line, inheader):
134 yield chunk(cur)
134 yield chunk(cur)
135 cur = []
135 cur = []
136 inheader = True
136 inheader = True
137 if inheader and not isheader(line, inheader):
137 if inheader and not isheader(line, inheader):
138 inheader = False
138 inheader = False
139
139
140 cur.append(line)
140 cur.append(line)
141
141
142 if cur:
142 if cur:
143 yield chunk(cur)
143 yield chunk(cur)
144
144
145 def remainder(cur):
145 def remainder(cur):
146 yield chunk(cur)
146 yield chunk(cur)
147
147
148 class fiter(object):
148 class fiter(object):
149 def __init__(self, fp):
149 def __init__(self, fp):
150 self.fp = fp
150 self.fp = fp
151
151
152 def __iter__(self):
152 def __iter__(self):
153 return self
153 return self
154
154
155 def next(self):
155 def next(self):
156 l = self.fp.readline()
156 l = self.fp.readline()
157 if not l:
157 if not l:
158 raise StopIteration
158 raise StopIteration
159 return l
159 return l
160
160
161 __next__ = next
161 __next__ = next
162
162
163 inheader = False
163 inheader = False
164 cur = []
164 cur = []
165
165
166 mimeheaders = [b'content-type']
166 mimeheaders = [b'content-type']
167
167
168 if not util.safehasattr(stream, b'next'):
168 if not util.safehasattr(stream, b'next'):
169 # http responses, for example, have readline but not next
169 # http responses, for example, have readline but not next
170 stream = fiter(stream)
170 stream = fiter(stream)
171
171
172 for line in stream:
172 for line in stream:
173 cur.append(line)
173 cur.append(line)
174 if line.startswith(b'# HG changeset patch'):
174 if line.startswith(b'# HG changeset patch'):
175 return hgsplit(stream, cur)
175 return hgsplit(stream, cur)
176 elif line.startswith(b'From '):
176 elif line.startswith(b'From '):
177 return mboxsplit(stream, cur)
177 return mboxsplit(stream, cur)
178 elif isheader(line, inheader):
178 elif isheader(line, inheader):
179 inheader = True
179 inheader = True
180 if line.split(b':', 1)[0].lower() in mimeheaders:
180 if line.split(b':', 1)[0].lower() in mimeheaders:
181 # let email parser handle this
181 # let email parser handle this
182 return mimesplit(stream, cur)
182 return mimesplit(stream, cur)
183 elif line.startswith(b'--- ') and inheader:
183 elif line.startswith(b'--- ') and inheader:
184 # No evil headers seen by diff start, split by hand
184 # No evil headers seen by diff start, split by hand
185 return headersplit(stream, cur)
185 return headersplit(stream, cur)
186 # Not enough info, keep reading
186 # Not enough info, keep reading
187
187
188 # if we are here, we have a very plain patch
188 # if we are here, we have a very plain patch
189 return remainder(cur)
189 return remainder(cur)
190
190
191
191
192 ## Some facility for extensible patch parsing:
192 ## Some facility for extensible patch parsing:
193 # list of pairs ("header to match", "data key")
193 # list of pairs ("header to match", "data key")
194 patchheadermap = [
194 patchheadermap = [
195 (b'Date', b'date'),
195 (b'Date', b'date'),
196 (b'Branch', b'branch'),
196 (b'Branch', b'branch'),
197 (b'Node ID', b'nodeid'),
197 (b'Node ID', b'nodeid'),
198 ]
198 ]
199
199
200
200
201 @contextlib.contextmanager
201 @contextlib.contextmanager
202 def extract(ui, fileobj):
202 def extract(ui, fileobj):
203 '''extract patch from data read from fileobj.
203 '''extract patch from data read from fileobj.
204
204
205 patch can be a normal patch or contained in an email message.
205 patch can be a normal patch or contained in an email message.
206
206
207 return a dictionary. Standard keys are:
207 return a dictionary. Standard keys are:
208 - filename,
208 - filename,
209 - message,
209 - message,
210 - user,
210 - user,
211 - date,
211 - date,
212 - branch,
212 - branch,
213 - node,
213 - node,
214 - p1,
214 - p1,
215 - p2.
215 - p2.
216 Any item can be missing from the dictionary. If filename is missing,
216 Any item can be missing from the dictionary. If filename is missing,
217 fileobj did not contain a patch. Caller must unlink filename when done.'''
217 fileobj did not contain a patch. Caller must unlink filename when done.'''
218
218
219 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
219 fd, tmpname = pycompat.mkstemp(prefix=b'hg-patch-')
220 tmpfp = os.fdopen(fd, 'wb')
220 tmpfp = os.fdopen(fd, 'wb')
221 try:
221 try:
222 yield _extract(ui, fileobj, tmpname, tmpfp)
222 yield _extract(ui, fileobj, tmpname, tmpfp)
223 finally:
223 finally:
224 tmpfp.close()
224 tmpfp.close()
225 os.unlink(tmpname)
225 os.unlink(tmpname)
226
226
227
227
228 def _extract(ui, fileobj, tmpname, tmpfp):
228 def _extract(ui, fileobj, tmpname, tmpfp):
229
229
230 # attempt to detect the start of a patch
230 # attempt to detect the start of a patch
231 # (this heuristic is borrowed from quilt)
231 # (this heuristic is borrowed from quilt)
232 diffre = re.compile(
232 diffre = re.compile(
233 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
233 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
234 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
234 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
235 br'---[ \t].*?^\+\+\+[ \t]|'
235 br'---[ \t].*?^\+\+\+[ \t]|'
236 br'\*\*\*[ \t].*?^---[ \t])',
236 br'\*\*\*[ \t].*?^---[ \t])',
237 re.MULTILINE | re.DOTALL,
237 re.MULTILINE | re.DOTALL,
238 )
238 )
239
239
240 data = {}
240 data = {}
241
241
242 msg = mail.parse(fileobj)
242 msg = mail.parse(fileobj)
243
243
244 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
244 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
245 data[b'user'] = msg['From'] and mail.headdecode(msg['From'])
245 data[b'user'] = msg['From'] and mail.headdecode(msg['From'])
246 if not subject and not data[b'user']:
246 if not subject and not data[b'user']:
247 # Not an email, restore parsed headers if any
247 # Not an email, restore parsed headers if any
248 subject = (
248 subject = (
249 b'\n'.join(
249 b'\n'.join(
250 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
250 b': '.join(map(encoding.strtolocal, h)) for h in msg.items()
251 )
251 )
252 + b'\n'
252 + b'\n'
253 )
253 )
254
254
255 # should try to parse msg['Date']
255 # should try to parse msg['Date']
256 parents = []
256 parents = []
257
257
258 nodeid = msg['X-Mercurial-Node']
258 nodeid = msg['X-Mercurial-Node']
259 if nodeid:
259 if nodeid:
260 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
260 data[b'nodeid'] = nodeid = mail.headdecode(nodeid)
261 ui.debug(b'Node ID: %s\n' % nodeid)
261 ui.debug(b'Node ID: %s\n' % nodeid)
262
262
263 if subject:
263 if subject:
264 if subject.startswith(b'[PATCH'):
264 if subject.startswith(b'[PATCH'):
265 pend = subject.find(b']')
265 pend = subject.find(b']')
266 if pend >= 0:
266 if pend >= 0:
267 subject = subject[pend + 1 :].lstrip()
267 subject = subject[pend + 1 :].lstrip()
268 subject = re.sub(br'\n[ \t]+', b' ', subject)
268 subject = re.sub(br'\n[ \t]+', b' ', subject)
269 ui.debug(b'Subject: %s\n' % subject)
269 ui.debug(b'Subject: %s\n' % subject)
270 if data[b'user']:
270 if data[b'user']:
271 ui.debug(b'From: %s\n' % data[b'user'])
271 ui.debug(b'From: %s\n' % data[b'user'])
272 diffs_seen = 0
272 diffs_seen = 0
273 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
273 ok_types = (b'text/plain', b'text/x-diff', b'text/x-patch')
274 message = b''
274 message = b''
275 for part in msg.walk():
275 for part in msg.walk():
276 content_type = pycompat.bytestr(part.get_content_type())
276 content_type = pycompat.bytestr(part.get_content_type())
277 ui.debug(b'Content-Type: %s\n' % content_type)
277 ui.debug(b'Content-Type: %s\n' % content_type)
278 if content_type not in ok_types:
278 if content_type not in ok_types:
279 continue
279 continue
280 payload = part.get_payload(decode=True)
280 payload = part.get_payload(decode=True)
281 m = diffre.search(payload)
281 m = diffre.search(payload)
282 if m:
282 if m:
283 hgpatch = False
283 hgpatch = False
284 hgpatchheader = False
284 hgpatchheader = False
285 ignoretext = False
285 ignoretext = False
286
286
287 ui.debug(b'found patch at byte %d\n' % m.start(0))
287 ui.debug(b'found patch at byte %d\n' % m.start(0))
288 diffs_seen += 1
288 diffs_seen += 1
289 cfp = stringio()
289 cfp = stringio()
290 for line in payload[: m.start(0)].splitlines():
290 for line in payload[: m.start(0)].splitlines():
291 if line.startswith(b'# HG changeset patch') and not hgpatch:
291 if line.startswith(b'# HG changeset patch') and not hgpatch:
292 ui.debug(b'patch generated by hg export\n')
292 ui.debug(b'patch generated by hg export\n')
293 hgpatch = True
293 hgpatch = True
294 hgpatchheader = True
294 hgpatchheader = True
295 # drop earlier commit message content
295 # drop earlier commit message content
296 cfp.seek(0)
296 cfp.seek(0)
297 cfp.truncate()
297 cfp.truncate()
298 subject = None
298 subject = None
299 elif hgpatchheader:
299 elif hgpatchheader:
300 if line.startswith(b'# User '):
300 if line.startswith(b'# User '):
301 data[b'user'] = line[7:]
301 data[b'user'] = line[7:]
302 ui.debug(b'From: %s\n' % data[b'user'])
302 ui.debug(b'From: %s\n' % data[b'user'])
303 elif line.startswith(b"# Parent "):
303 elif line.startswith(b"# Parent "):
304 parents.append(line[9:].lstrip())
304 parents.append(line[9:].lstrip())
305 elif line.startswith(b"# "):
305 elif line.startswith(b"# "):
306 for header, key in patchheadermap:
306 for header, key in patchheadermap:
307 prefix = b'# %s ' % header
307 prefix = b'# %s ' % header
308 if line.startswith(prefix):
308 if line.startswith(prefix):
309 data[key] = line[len(prefix) :]
309 data[key] = line[len(prefix) :]
310 ui.debug(b'%s: %s\n' % (header, data[key]))
310 ui.debug(b'%s: %s\n' % (header, data[key]))
311 else:
311 else:
312 hgpatchheader = False
312 hgpatchheader = False
313 elif line == b'---':
313 elif line == b'---':
314 ignoretext = True
314 ignoretext = True
315 if not hgpatchheader and not ignoretext:
315 if not hgpatchheader and not ignoretext:
316 cfp.write(line)
316 cfp.write(line)
317 cfp.write(b'\n')
317 cfp.write(b'\n')
318 message = cfp.getvalue()
318 message = cfp.getvalue()
319 if tmpfp:
319 if tmpfp:
320 tmpfp.write(payload)
320 tmpfp.write(payload)
321 if not payload.endswith(b'\n'):
321 if not payload.endswith(b'\n'):
322 tmpfp.write(b'\n')
322 tmpfp.write(b'\n')
323 elif not diffs_seen and message and content_type == b'text/plain':
323 elif not diffs_seen and message and content_type == b'text/plain':
324 message += b'\n' + payload
324 message += b'\n' + payload
325
325
326 if subject and not message.startswith(subject):
326 if subject and not message.startswith(subject):
327 message = b'%s\n%s' % (subject, message)
327 message = b'%s\n%s' % (subject, message)
328 data[b'message'] = message
328 data[b'message'] = message
329 tmpfp.close()
329 tmpfp.close()
330 if parents:
330 if parents:
331 data[b'p1'] = parents.pop(0)
331 data[b'p1'] = parents.pop(0)
332 if parents:
332 if parents:
333 data[b'p2'] = parents.pop(0)
333 data[b'p2'] = parents.pop(0)
334
334
335 if diffs_seen:
335 if diffs_seen:
336 data[b'filename'] = tmpname
336 data[b'filename'] = tmpname
337
337
338 return data
338 return data
339
339
340
340
341 class patchmeta(object):
341 class patchmeta(object):
342 """Patched file metadata
342 """Patched file metadata
343
343
344 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
344 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
345 or COPY. 'path' is patched file path. 'oldpath' is set to the
345 or COPY. 'path' is patched file path. 'oldpath' is set to the
346 origin file when 'op' is either COPY or RENAME, None otherwise. If
346 origin file when 'op' is either COPY or RENAME, None otherwise. If
347 file mode is changed, 'mode' is a tuple (islink, isexec) where
347 file mode is changed, 'mode' is a tuple (islink, isexec) where
348 'islink' is True if the file is a symlink and 'isexec' is True if
348 'islink' is True if the file is a symlink and 'isexec' is True if
349 the file is executable. Otherwise, 'mode' is None.
349 the file is executable. Otherwise, 'mode' is None.
350 """
350 """
351
351
352 def __init__(self, path):
352 def __init__(self, path):
353 self.path = path
353 self.path = path
354 self.oldpath = None
354 self.oldpath = None
355 self.mode = None
355 self.mode = None
356 self.op = b'MODIFY'
356 self.op = b'MODIFY'
357 self.binary = False
357 self.binary = False
358
358
359 def setmode(self, mode):
359 def setmode(self, mode):
360 islink = mode & 0o20000
360 islink = mode & 0o20000
361 isexec = mode & 0o100
361 isexec = mode & 0o100
362 self.mode = (islink, isexec)
362 self.mode = (islink, isexec)
363
363
364 def copy(self):
364 def copy(self):
365 other = patchmeta(self.path)
365 other = patchmeta(self.path)
366 other.oldpath = self.oldpath
366 other.oldpath = self.oldpath
367 other.mode = self.mode
367 other.mode = self.mode
368 other.op = self.op
368 other.op = self.op
369 other.binary = self.binary
369 other.binary = self.binary
370 return other
370 return other
371
371
372 def _ispatchinga(self, afile):
372 def _ispatchinga(self, afile):
373 if afile == b'/dev/null':
373 if afile == b'/dev/null':
374 return self.op == b'ADD'
374 return self.op == b'ADD'
375 return afile == b'a/' + (self.oldpath or self.path)
375 return afile == b'a/' + (self.oldpath or self.path)
376
376
377 def _ispatchingb(self, bfile):
377 def _ispatchingb(self, bfile):
378 if bfile == b'/dev/null':
378 if bfile == b'/dev/null':
379 return self.op == b'DELETE'
379 return self.op == b'DELETE'
380 return bfile == b'b/' + self.path
380 return bfile == b'b/' + self.path
381
381
382 def ispatching(self, afile, bfile):
382 def ispatching(self, afile, bfile):
383 return self._ispatchinga(afile) and self._ispatchingb(bfile)
383 return self._ispatchinga(afile) and self._ispatchingb(bfile)
384
384
385 def __repr__(self):
385 def __repr__(self):
386 return "<patchmeta %s %r>" % (self.op, self.path)
386 return "<patchmeta %s %r>" % (self.op, self.path)
387
387
388
388
389 def readgitpatch(lr):
389 def readgitpatch(lr):
390 """extract git-style metadata about patches from <patchname>"""
390 """extract git-style metadata about patches from <patchname>"""
391
391
392 # Filter patch for git information
392 # Filter patch for git information
393 gp = None
393 gp = None
394 gitpatches = []
394 gitpatches = []
395 for line in lr:
395 for line in lr:
396 line = line.rstrip(b' \r\n')
396 line = line.rstrip(b' \r\n')
397 if line.startswith(b'diff --git a/'):
397 if line.startswith(b'diff --git a/'):
398 m = gitre.match(line)
398 m = gitre.match(line)
399 if m:
399 if m:
400 if gp:
400 if gp:
401 gitpatches.append(gp)
401 gitpatches.append(gp)
402 dst = m.group(2)
402 dst = m.group(2)
403 gp = patchmeta(dst)
403 gp = patchmeta(dst)
404 elif gp:
404 elif gp:
405 if line.startswith(b'--- '):
405 if line.startswith(b'--- '):
406 gitpatches.append(gp)
406 gitpatches.append(gp)
407 gp = None
407 gp = None
408 continue
408 continue
409 if line.startswith(b'rename from '):
409 if line.startswith(b'rename from '):
410 gp.op = b'RENAME'
410 gp.op = b'RENAME'
411 gp.oldpath = line[12:]
411 gp.oldpath = line[12:]
412 elif line.startswith(b'rename to '):
412 elif line.startswith(b'rename to '):
413 gp.path = line[10:]
413 gp.path = line[10:]
414 elif line.startswith(b'copy from '):
414 elif line.startswith(b'copy from '):
415 gp.op = b'COPY'
415 gp.op = b'COPY'
416 gp.oldpath = line[10:]
416 gp.oldpath = line[10:]
417 elif line.startswith(b'copy to '):
417 elif line.startswith(b'copy to '):
418 gp.path = line[8:]
418 gp.path = line[8:]
419 elif line.startswith(b'deleted file'):
419 elif line.startswith(b'deleted file'):
420 gp.op = b'DELETE'
420 gp.op = b'DELETE'
421 elif line.startswith(b'new file mode '):
421 elif line.startswith(b'new file mode '):
422 gp.op = b'ADD'
422 gp.op = b'ADD'
423 gp.setmode(int(line[-6:], 8))
423 gp.setmode(int(line[-6:], 8))
424 elif line.startswith(b'new mode '):
424 elif line.startswith(b'new mode '):
425 gp.setmode(int(line[-6:], 8))
425 gp.setmode(int(line[-6:], 8))
426 elif line.startswith(b'GIT binary patch'):
426 elif line.startswith(b'GIT binary patch'):
427 gp.binary = True
427 gp.binary = True
428 if gp:
428 if gp:
429 gitpatches.append(gp)
429 gitpatches.append(gp)
430
430
431 return gitpatches
431 return gitpatches
432
432
433
433
434 class linereader(object):
434 class linereader(object):
435 # simple class to allow pushing lines back into the input stream
435 # simple class to allow pushing lines back into the input stream
436 def __init__(self, fp):
436 def __init__(self, fp):
437 self.fp = fp
437 self.fp = fp
438 self.buf = []
438 self.buf = []
439
439
440 def push(self, line):
440 def push(self, line):
441 if line is not None:
441 if line is not None:
442 self.buf.append(line)
442 self.buf.append(line)
443
443
444 def readline(self):
444 def readline(self):
445 if self.buf:
445 if self.buf:
446 l = self.buf[0]
446 l = self.buf[0]
447 del self.buf[0]
447 del self.buf[0]
448 return l
448 return l
449 return self.fp.readline()
449 return self.fp.readline()
450
450
451 def __iter__(self):
451 def __iter__(self):
452 return iter(self.readline, b'')
452 return iter(self.readline, b'')
453
453
454
454
455 class abstractbackend(object):
455 class abstractbackend(object):
456 def __init__(self, ui):
456 def __init__(self, ui):
457 self.ui = ui
457 self.ui = ui
458
458
459 def getfile(self, fname):
459 def getfile(self, fname):
460 """Return target file data and flags as a (data, (islink,
460 """Return target file data and flags as a (data, (islink,
461 isexec)) tuple. Data is None if file is missing/deleted.
461 isexec)) tuple. Data is None if file is missing/deleted.
462 """
462 """
463 raise NotImplementedError
463 raise NotImplementedError
464
464
465 def setfile(self, fname, data, mode, copysource):
465 def setfile(self, fname, data, mode, copysource):
466 """Write data to target file fname and set its mode. mode is a
466 """Write data to target file fname and set its mode. mode is a
467 (islink, isexec) tuple. If data is None, the file content should
467 (islink, isexec) tuple. If data is None, the file content should
468 be left unchanged. If the file is modified after being copied,
468 be left unchanged. If the file is modified after being copied,
469 copysource is set to the original file name.
469 copysource is set to the original file name.
470 """
470 """
471 raise NotImplementedError
471 raise NotImplementedError
472
472
473 def unlink(self, fname):
473 def unlink(self, fname):
474 """Unlink target file."""
474 """Unlink target file."""
475 raise NotImplementedError
475 raise NotImplementedError
476
476
477 def writerej(self, fname, failed, total, lines):
477 def writerej(self, fname, failed, total, lines):
478 """Write rejected lines for fname. total is the number of hunks
478 """Write rejected lines for fname. total is the number of hunks
479 which failed to apply and total the total number of hunks for this
479 which failed to apply and total the total number of hunks for this
480 files.
480 files.
481 """
481 """
482
482
483 def exists(self, fname):
483 def exists(self, fname):
484 raise NotImplementedError
484 raise NotImplementedError
485
485
486 def close(self):
486 def close(self):
487 raise NotImplementedError
487 raise NotImplementedError
488
488
489
489
490 class fsbackend(abstractbackend):
490 class fsbackend(abstractbackend):
491 def __init__(self, ui, basedir):
491 def __init__(self, ui, basedir):
492 super(fsbackend, self).__init__(ui)
492 super(fsbackend, self).__init__(ui)
493 self.opener = vfsmod.vfs(basedir)
493 self.opener = vfsmod.vfs(basedir)
494
494
495 def getfile(self, fname):
495 def getfile(self, fname):
496 if self.opener.islink(fname):
496 if self.opener.islink(fname):
497 return (self.opener.readlink(fname), (True, False))
497 return (self.opener.readlink(fname), (True, False))
498
498
499 isexec = False
499 isexec = False
500 try:
500 try:
501 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
501 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
502 except OSError as e:
502 except OSError as e:
503 if e.errno != errno.ENOENT:
503 if e.errno != errno.ENOENT:
504 raise
504 raise
505 try:
505 try:
506 return (self.opener.read(fname), (False, isexec))
506 return (self.opener.read(fname), (False, isexec))
507 except IOError as e:
507 except IOError as e:
508 if e.errno != errno.ENOENT:
508 if e.errno != errno.ENOENT:
509 raise
509 raise
510 return None, None
510 return None, None
511
511
512 def setfile(self, fname, data, mode, copysource):
512 def setfile(self, fname, data, mode, copysource):
513 islink, isexec = mode
513 islink, isexec = mode
514 if data is None:
514 if data is None:
515 self.opener.setflags(fname, islink, isexec)
515 self.opener.setflags(fname, islink, isexec)
516 return
516 return
517 if islink:
517 if islink:
518 self.opener.symlink(data, fname)
518 self.opener.symlink(data, fname)
519 else:
519 else:
520 self.opener.write(fname, data)
520 self.opener.write(fname, data)
521 if isexec:
521 if isexec:
522 self.opener.setflags(fname, False, True)
522 self.opener.setflags(fname, False, True)
523
523
524 def unlink(self, fname):
524 def unlink(self, fname):
525 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
525 rmdir = self.ui.configbool(b'experimental', b'removeemptydirs')
526 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
526 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
527
527
528 def writerej(self, fname, failed, total, lines):
528 def writerej(self, fname, failed, total, lines):
529 fname = fname + b".rej"
529 fname = fname + b".rej"
530 self.ui.warn(
530 self.ui.warn(
531 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
531 _(b"%d out of %d hunks FAILED -- saving rejects to file %s\n")
532 % (failed, total, fname)
532 % (failed, total, fname)
533 )
533 )
534 fp = self.opener(fname, b'w')
534 fp = self.opener(fname, b'w')
535 fp.writelines(lines)
535 fp.writelines(lines)
536 fp.close()
536 fp.close()
537
537
538 def exists(self, fname):
538 def exists(self, fname):
539 return self.opener.lexists(fname)
539 return self.opener.lexists(fname)
540
540
541
541
542 class workingbackend(fsbackend):
542 class workingbackend(fsbackend):
543 def __init__(self, ui, repo, similarity):
543 def __init__(self, ui, repo, similarity):
544 super(workingbackend, self).__init__(ui, repo.root)
544 super(workingbackend, self).__init__(ui, repo.root)
545 self.repo = repo
545 self.repo = repo
546 self.similarity = similarity
546 self.similarity = similarity
547 self.removed = set()
547 self.removed = set()
548 self.changed = set()
548 self.changed = set()
549 self.copied = []
549 self.copied = []
550
550
551 def _checkknown(self, fname):
551 def _checkknown(self, fname):
552 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
552 if self.repo.dirstate[fname] == b'?' and self.exists(fname):
553 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
553 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
554
554
555 def setfile(self, fname, data, mode, copysource):
555 def setfile(self, fname, data, mode, copysource):
556 self._checkknown(fname)
556 self._checkknown(fname)
557 super(workingbackend, self).setfile(fname, data, mode, copysource)
557 super(workingbackend, self).setfile(fname, data, mode, copysource)
558 if copysource is not None:
558 if copysource is not None:
559 self.copied.append((copysource, fname))
559 self.copied.append((copysource, fname))
560 self.changed.add(fname)
560 self.changed.add(fname)
561
561
562 def unlink(self, fname):
562 def unlink(self, fname):
563 self._checkknown(fname)
563 self._checkknown(fname)
564 super(workingbackend, self).unlink(fname)
564 super(workingbackend, self).unlink(fname)
565 self.removed.add(fname)
565 self.removed.add(fname)
566 self.changed.add(fname)
566 self.changed.add(fname)
567
567
568 def close(self):
568 def close(self):
569 wctx = self.repo[None]
569 wctx = self.repo[None]
570 changed = set(self.changed)
570 changed = set(self.changed)
571 for src, dst in self.copied:
571 for src, dst in self.copied:
572 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
572 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
573 if self.removed:
573 if self.removed:
574 wctx.forget(sorted(self.removed))
574 wctx.forget(sorted(self.removed))
575 for f in self.removed:
575 for f in self.removed:
576 if f not in self.repo.dirstate:
576 if f not in self.repo.dirstate:
577 # File was deleted and no longer belongs to the
577 # File was deleted and no longer belongs to the
578 # dirstate, it was probably marked added then
578 # dirstate, it was probably marked added then
579 # deleted, and should not be considered by
579 # deleted, and should not be considered by
580 # marktouched().
580 # marktouched().
581 changed.discard(f)
581 changed.discard(f)
582 if changed:
582 if changed:
583 scmutil.marktouched(self.repo, changed, self.similarity)
583 scmutil.marktouched(self.repo, changed, self.similarity)
584 return sorted(self.changed)
584 return sorted(self.changed)
585
585
586
586
587 class filestore(object):
587 class filestore(object):
588 def __init__(self, maxsize=None):
588 def __init__(self, maxsize=None):
589 self.opener = None
589 self.opener = None
590 self.files = {}
590 self.files = {}
591 self.created = 0
591 self.created = 0
592 self.maxsize = maxsize
592 self.maxsize = maxsize
593 if self.maxsize is None:
593 if self.maxsize is None:
594 self.maxsize = 4 * (2 ** 20)
594 self.maxsize = 4 * (2 ** 20)
595 self.size = 0
595 self.size = 0
596 self.data = {}
596 self.data = {}
597
597
598 def setfile(self, fname, data, mode, copied=None):
598 def setfile(self, fname, data, mode, copied=None):
599 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
599 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
600 self.data[fname] = (data, mode, copied)
600 self.data[fname] = (data, mode, copied)
601 self.size += len(data)
601 self.size += len(data)
602 else:
602 else:
603 if self.opener is None:
603 if self.opener is None:
604 root = pycompat.mkdtemp(prefix=b'hg-patch-')
604 root = pycompat.mkdtemp(prefix=b'hg-patch-')
605 self.opener = vfsmod.vfs(root)
605 self.opener = vfsmod.vfs(root)
606 # Avoid filename issues with these simple names
606 # Avoid filename issues with these simple names
607 fn = b'%d' % self.created
607 fn = b'%d' % self.created
608 self.opener.write(fn, data)
608 self.opener.write(fn, data)
609 self.created += 1
609 self.created += 1
610 self.files[fname] = (fn, mode, copied)
610 self.files[fname] = (fn, mode, copied)
611
611
612 def getfile(self, fname):
612 def getfile(self, fname):
613 if fname in self.data:
613 if fname in self.data:
614 return self.data[fname]
614 return self.data[fname]
615 if not self.opener or fname not in self.files:
615 if not self.opener or fname not in self.files:
616 return None, None, None
616 return None, None, None
617 fn, mode, copied = self.files[fname]
617 fn, mode, copied = self.files[fname]
618 return self.opener.read(fn), mode, copied
618 return self.opener.read(fn), mode, copied
619
619
620 def close(self):
620 def close(self):
621 if self.opener:
621 if self.opener:
622 shutil.rmtree(self.opener.base)
622 shutil.rmtree(self.opener.base)
623
623
624
624
625 class repobackend(abstractbackend):
625 class repobackend(abstractbackend):
626 def __init__(self, ui, repo, ctx, store):
626 def __init__(self, ui, repo, ctx, store):
627 super(repobackend, self).__init__(ui)
627 super(repobackend, self).__init__(ui)
628 self.repo = repo
628 self.repo = repo
629 self.ctx = ctx
629 self.ctx = ctx
630 self.store = store
630 self.store = store
631 self.changed = set()
631 self.changed = set()
632 self.removed = set()
632 self.removed = set()
633 self.copied = {}
633 self.copied = {}
634
634
635 def _checkknown(self, fname):
635 def _checkknown(self, fname):
636 if fname not in self.ctx:
636 if fname not in self.ctx:
637 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
637 raise PatchError(_(b'cannot patch %s: file is not tracked') % fname)
638
638
639 def getfile(self, fname):
639 def getfile(self, fname):
640 try:
640 try:
641 fctx = self.ctx[fname]
641 fctx = self.ctx[fname]
642 except error.LookupError:
642 except error.LookupError:
643 return None, None
643 return None, None
644 flags = fctx.flags()
644 flags = fctx.flags()
645 return fctx.data(), (b'l' in flags, b'x' in flags)
645 return fctx.data(), (b'l' in flags, b'x' in flags)
646
646
647 def setfile(self, fname, data, mode, copysource):
647 def setfile(self, fname, data, mode, copysource):
648 if copysource:
648 if copysource:
649 self._checkknown(copysource)
649 self._checkknown(copysource)
650 if data is None:
650 if data is None:
651 data = self.ctx[fname].data()
651 data = self.ctx[fname].data()
652 self.store.setfile(fname, data, mode, copysource)
652 self.store.setfile(fname, data, mode, copysource)
653 self.changed.add(fname)
653 self.changed.add(fname)
654 if copysource:
654 if copysource:
655 self.copied[fname] = copysource
655 self.copied[fname] = copysource
656
656
657 def unlink(self, fname):
657 def unlink(self, fname):
658 self._checkknown(fname)
658 self._checkknown(fname)
659 self.removed.add(fname)
659 self.removed.add(fname)
660
660
661 def exists(self, fname):
661 def exists(self, fname):
662 return fname in self.ctx
662 return fname in self.ctx
663
663
664 def close(self):
664 def close(self):
665 return self.changed | self.removed
665 return self.changed | self.removed
666
666
667
667
668 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
668 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
669 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
669 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
670 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
670 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
671 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
671 eolmodes = [b'strict', b'crlf', b'lf', b'auto']
672
672
673
673
674 class patchfile(object):
674 class patchfile(object):
675 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
675 def __init__(self, ui, gp, backend, store, eolmode=b'strict'):
676 self.fname = gp.path
676 self.fname = gp.path
677 self.eolmode = eolmode
677 self.eolmode = eolmode
678 self.eol = None
678 self.eol = None
679 self.backend = backend
679 self.backend = backend
680 self.ui = ui
680 self.ui = ui
681 self.lines = []
681 self.lines = []
682 self.exists = False
682 self.exists = False
683 self.missing = True
683 self.missing = True
684 self.mode = gp.mode
684 self.mode = gp.mode
685 self.copysource = gp.oldpath
685 self.copysource = gp.oldpath
686 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
686 self.create = gp.op in (b'ADD', b'COPY', b'RENAME')
687 self.remove = gp.op == b'DELETE'
687 self.remove = gp.op == b'DELETE'
688 if self.copysource is None:
688 if self.copysource is None:
689 data, mode = backend.getfile(self.fname)
689 data, mode = backend.getfile(self.fname)
690 else:
690 else:
691 data, mode = store.getfile(self.copysource)[:2]
691 data, mode = store.getfile(self.copysource)[:2]
692 if data is not None:
692 if data is not None:
693 self.exists = self.copysource is None or backend.exists(self.fname)
693 self.exists = self.copysource is None or backend.exists(self.fname)
694 self.missing = False
694 self.missing = False
695 if data:
695 if data:
696 self.lines = mdiff.splitnewlines(data)
696 self.lines = mdiff.splitnewlines(data)
697 if self.mode is None:
697 if self.mode is None:
698 self.mode = mode
698 self.mode = mode
699 if self.lines:
699 if self.lines:
700 # Normalize line endings
700 # Normalize line endings
701 if self.lines[0].endswith(b'\r\n'):
701 if self.lines[0].endswith(b'\r\n'):
702 self.eol = b'\r\n'
702 self.eol = b'\r\n'
703 elif self.lines[0].endswith(b'\n'):
703 elif self.lines[0].endswith(b'\n'):
704 self.eol = b'\n'
704 self.eol = b'\n'
705 if eolmode != b'strict':
705 if eolmode != b'strict':
706 nlines = []
706 nlines = []
707 for l in self.lines:
707 for l in self.lines:
708 if l.endswith(b'\r\n'):
708 if l.endswith(b'\r\n'):
709 l = l[:-2] + b'\n'
709 l = l[:-2] + b'\n'
710 nlines.append(l)
710 nlines.append(l)
711 self.lines = nlines
711 self.lines = nlines
712 else:
712 else:
713 if self.create:
713 if self.create:
714 self.missing = False
714 self.missing = False
715 if self.mode is None:
715 if self.mode is None:
716 self.mode = (False, False)
716 self.mode = (False, False)
717 if self.missing:
717 if self.missing:
718 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
718 self.ui.warn(_(b"unable to find '%s' for patching\n") % self.fname)
719 self.ui.warn(
719 self.ui.warn(
720 _(
720 _(
721 b"(use '--prefix' to apply patch relative to the "
721 b"(use '--prefix' to apply patch relative to the "
722 b"current directory)\n"
722 b"current directory)\n"
723 )
723 )
724 )
724 )
725
725
726 self.hash = {}
726 self.hash = {}
727 self.dirty = 0
727 self.dirty = 0
728 self.offset = 0
728 self.offset = 0
729 self.skew = 0
729 self.skew = 0
730 self.rej = []
730 self.rej = []
731 self.fileprinted = False
731 self.fileprinted = False
732 self.printfile(False)
732 self.printfile(False)
733 self.hunks = 0
733 self.hunks = 0
734
734
735 def writelines(self, fname, lines, mode):
735 def writelines(self, fname, lines, mode):
736 if self.eolmode == b'auto':
736 if self.eolmode == b'auto':
737 eol = self.eol
737 eol = self.eol
738 elif self.eolmode == b'crlf':
738 elif self.eolmode == b'crlf':
739 eol = b'\r\n'
739 eol = b'\r\n'
740 else:
740 else:
741 eol = b'\n'
741 eol = b'\n'
742
742
743 if self.eolmode != b'strict' and eol and eol != b'\n':
743 if self.eolmode != b'strict' and eol and eol != b'\n':
744 rawlines = []
744 rawlines = []
745 for l in lines:
745 for l in lines:
746 if l and l.endswith(b'\n'):
746 if l and l.endswith(b'\n'):
747 l = l[:-1] + eol
747 l = l[:-1] + eol
748 rawlines.append(l)
748 rawlines.append(l)
749 lines = rawlines
749 lines = rawlines
750
750
751 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
751 self.backend.setfile(fname, b''.join(lines), mode, self.copysource)
752
752
753 def printfile(self, warn):
753 def printfile(self, warn):
754 if self.fileprinted:
754 if self.fileprinted:
755 return
755 return
756 if warn or self.ui.verbose:
756 if warn or self.ui.verbose:
757 self.fileprinted = True
757 self.fileprinted = True
758 s = _(b"patching file %s\n") % self.fname
758 s = _(b"patching file %s\n") % self.fname
759 if warn:
759 if warn:
760 self.ui.warn(s)
760 self.ui.warn(s)
761 else:
761 else:
762 self.ui.note(s)
762 self.ui.note(s)
763
763
764 def findlines(self, l, linenum):
764 def findlines(self, l, linenum):
765 # looks through the hash and finds candidate lines. The
765 # looks through the hash and finds candidate lines. The
766 # result is a list of line numbers sorted based on distance
766 # result is a list of line numbers sorted based on distance
767 # from linenum
767 # from linenum
768
768
769 cand = self.hash.get(l, [])
769 cand = self.hash.get(l, [])
770 if len(cand) > 1:
770 if len(cand) > 1:
771 # resort our list of potentials forward then back.
771 # resort our list of potentials forward then back.
772 cand.sort(key=lambda x: abs(x - linenum))
772 cand.sort(key=lambda x: abs(x - linenum))
773 return cand
773 return cand
774
774
775 def write_rej(self):
775 def write_rej(self):
776 # our rejects are a little different from patch(1). This always
776 # our rejects are a little different from patch(1). This always
777 # creates rejects in the same form as the original patch. A file
777 # creates rejects in the same form as the original patch. A file
778 # header is inserted so that you can run the reject through patch again
778 # header is inserted so that you can run the reject through patch again
779 # without having to type the filename.
779 # without having to type the filename.
780 if not self.rej:
780 if not self.rej:
781 return
781 return
782 base = os.path.basename(self.fname)
782 base = os.path.basename(self.fname)
783 lines = [b"--- %s\n+++ %s\n" % (base, base)]
783 lines = [b"--- %s\n+++ %s\n" % (base, base)]
784 for x in self.rej:
784 for x in self.rej:
785 for l in x.hunk:
785 for l in x.hunk:
786 lines.append(l)
786 lines.append(l)
787 if l[-1:] != b'\n':
787 if l[-1:] != b'\n':
788 lines.append(b"\n\\ No newline at end of file\n")
788 lines.append(b"\n\\ No newline at end of file\n")
789 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
789 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
790
790
791 def apply(self, h):
791 def apply(self, h):
792 if not h.complete():
792 if not h.complete():
793 raise PatchError(
793 raise PatchError(
794 _(b"bad hunk #%d %s (%d %d %d %d)")
794 _(b"bad hunk #%d %s (%d %d %d %d)")
795 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
795 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb)
796 )
796 )
797
797
798 self.hunks += 1
798 self.hunks += 1
799
799
800 if self.missing:
800 if self.missing:
801 self.rej.append(h)
801 self.rej.append(h)
802 return -1
802 return -1
803
803
804 if self.exists and self.create:
804 if self.exists and self.create:
805 if self.copysource:
805 if self.copysource:
806 self.ui.warn(
806 self.ui.warn(
807 _(b"cannot create %s: destination already exists\n")
807 _(b"cannot create %s: destination already exists\n")
808 % self.fname
808 % self.fname
809 )
809 )
810 else:
810 else:
811 self.ui.warn(_(b"file %s already exists\n") % self.fname)
811 self.ui.warn(_(b"file %s already exists\n") % self.fname)
812 self.rej.append(h)
812 self.rej.append(h)
813 return -1
813 return -1
814
814
815 if isinstance(h, binhunk):
815 if isinstance(h, binhunk):
816 if self.remove:
816 if self.remove:
817 self.backend.unlink(self.fname)
817 self.backend.unlink(self.fname)
818 else:
818 else:
819 l = h.new(self.lines)
819 l = h.new(self.lines)
820 self.lines[:] = l
820 self.lines[:] = l
821 self.offset += len(l)
821 self.offset += len(l)
822 self.dirty = True
822 self.dirty = True
823 return 0
823 return 0
824
824
825 horig = h
825 horig = h
826 if (
826 if (
827 self.eolmode in (b'crlf', b'lf')
827 self.eolmode in (b'crlf', b'lf')
828 or self.eolmode == b'auto'
828 or self.eolmode == b'auto'
829 and self.eol
829 and self.eol
830 ):
830 ):
831 # If new eols are going to be normalized, then normalize
831 # If new eols are going to be normalized, then normalize
832 # hunk data before patching. Otherwise, preserve input
832 # hunk data before patching. Otherwise, preserve input
833 # line-endings.
833 # line-endings.
834 h = h.getnormalized()
834 h = h.getnormalized()
835
835
836 # fast case first, no offsets, no fuzz
836 # fast case first, no offsets, no fuzz
837 old, oldstart, new, newstart = h.fuzzit(0, False)
837 old, oldstart, new, newstart = h.fuzzit(0, False)
838 oldstart += self.offset
838 oldstart += self.offset
839 orig_start = oldstart
839 orig_start = oldstart
840 # if there's skew we want to emit the "(offset %d lines)" even
840 # if there's skew we want to emit the "(offset %d lines)" even
841 # when the hunk cleanly applies at start + skew, so skip the
841 # when the hunk cleanly applies at start + skew, so skip the
842 # fast case code
842 # fast case code
843 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
843 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
844 if self.remove:
844 if self.remove:
845 self.backend.unlink(self.fname)
845 self.backend.unlink(self.fname)
846 else:
846 else:
847 self.lines[oldstart : oldstart + len(old)] = new
847 self.lines[oldstart : oldstart + len(old)] = new
848 self.offset += len(new) - len(old)
848 self.offset += len(new) - len(old)
849 self.dirty = True
849 self.dirty = True
850 return 0
850 return 0
851
851
852 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
852 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
853 self.hash = {}
853 self.hash = {}
854 for x, s in enumerate(self.lines):
854 for x, s in enumerate(self.lines):
855 self.hash.setdefault(s, []).append(x)
855 self.hash.setdefault(s, []).append(x)
856
856
857 for fuzzlen in pycompat.xrange(
857 for fuzzlen in pycompat.xrange(
858 self.ui.configint(b"patch", b"fuzz") + 1
858 self.ui.configint(b"patch", b"fuzz") + 1
859 ):
859 ):
860 for toponly in [True, False]:
860 for toponly in [True, False]:
861 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
861 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
862 oldstart = oldstart + self.offset + self.skew
862 oldstart = oldstart + self.offset + self.skew
863 oldstart = min(oldstart, len(self.lines))
863 oldstart = min(oldstart, len(self.lines))
864 if old:
864 if old:
865 cand = self.findlines(old[0][1:], oldstart)
865 cand = self.findlines(old[0][1:], oldstart)
866 else:
866 else:
867 # Only adding lines with no or fuzzed context, just
867 # Only adding lines with no or fuzzed context, just
868 # take the skew in account
868 # take the skew in account
869 cand = [oldstart]
869 cand = [oldstart]
870
870
871 for l in cand:
871 for l in cand:
872 if not old or diffhelper.testhunk(old, self.lines, l):
872 if not old or diffhelper.testhunk(old, self.lines, l):
873 self.lines[l : l + len(old)] = new
873 self.lines[l : l + len(old)] = new
874 self.offset += len(new) - len(old)
874 self.offset += len(new) - len(old)
875 self.skew = l - orig_start
875 self.skew = l - orig_start
876 self.dirty = True
876 self.dirty = True
877 offset = l - orig_start - fuzzlen
877 offset = l - orig_start - fuzzlen
878 if fuzzlen:
878 if fuzzlen:
879 msg = _(
879 msg = _(
880 b"Hunk #%d succeeded at %d "
880 b"Hunk #%d succeeded at %d "
881 b"with fuzz %d "
881 b"with fuzz %d "
882 b"(offset %d lines).\n"
882 b"(offset %d lines).\n"
883 )
883 )
884 self.printfile(True)
884 self.printfile(True)
885 self.ui.warn(
885 self.ui.warn(
886 msg % (h.number, l + 1, fuzzlen, offset)
886 msg % (h.number, l + 1, fuzzlen, offset)
887 )
887 )
888 else:
888 else:
889 msg = _(
889 msg = _(
890 b"Hunk #%d succeeded at %d "
890 b"Hunk #%d succeeded at %d "
891 b"(offset %d lines).\n"
891 b"(offset %d lines).\n"
892 )
892 )
893 self.ui.note(msg % (h.number, l + 1, offset))
893 self.ui.note(msg % (h.number, l + 1, offset))
894 return fuzzlen
894 return fuzzlen
895 self.printfile(True)
895 self.printfile(True)
896 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
896 self.ui.warn(_(b"Hunk #%d FAILED at %d\n") % (h.number, orig_start))
897 self.rej.append(horig)
897 self.rej.append(horig)
898 return -1
898 return -1
899
899
900 def close(self):
900 def close(self):
901 if self.dirty:
901 if self.dirty:
902 self.writelines(self.fname, self.lines, self.mode)
902 self.writelines(self.fname, self.lines, self.mode)
903 self.write_rej()
903 self.write_rej()
904 return len(self.rej)
904 return len(self.rej)
905
905
906
906
907 class header(object):
907 class header(object):
908 """patch header
908 """patch header
909 """
909 """
910
910
911 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
911 diffgit_re = re.compile(b'diff --git a/(.*) b/(.*)$')
912 diff_re = re.compile(b'diff -r .* (.*)$')
912 diff_re = re.compile(b'diff -r .* (.*)$')
913 allhunks_re = re.compile(b'(?:index|deleted file) ')
913 allhunks_re = re.compile(b'(?:index|deleted file) ')
914 pretty_re = re.compile(b'(?:new file|deleted file) ')
914 pretty_re = re.compile(b'(?:new file|deleted file) ')
915 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
915 special_re = re.compile(b'(?:index|deleted|copy|rename|new mode) ')
916 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
916 newfile_re = re.compile(b'(?:new file|copy to|rename to)')
917
917
918 def __init__(self, header):
918 def __init__(self, header):
919 self.header = header
919 self.header = header
920 self.hunks = []
920 self.hunks = []
921
921
922 def binary(self):
922 def binary(self):
923 return any(h.startswith(b'index ') for h in self.header)
923 return any(h.startswith(b'index ') for h in self.header)
924
924
925 def pretty(self, fp):
925 def pretty(self, fp):
926 for h in self.header:
926 for h in self.header:
927 if h.startswith(b'index '):
927 if h.startswith(b'index '):
928 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
928 fp.write(_(b'this modifies a binary file (all or nothing)\n'))
929 break
929 break
930 if self.pretty_re.match(h):
930 if self.pretty_re.match(h):
931 fp.write(h)
931 fp.write(h)
932 if self.binary():
932 if self.binary():
933 fp.write(_(b'this is a binary file\n'))
933 fp.write(_(b'this is a binary file\n'))
934 break
934 break
935 if h.startswith(b'---'):
935 if h.startswith(b'---'):
936 fp.write(
936 fp.write(
937 _(b'%d hunks, %d lines changed\n')
937 _(b'%d hunks, %d lines changed\n')
938 % (
938 % (
939 len(self.hunks),
939 len(self.hunks),
940 sum([max(h.added, h.removed) for h in self.hunks]),
940 sum([max(h.added, h.removed) for h in self.hunks]),
941 )
941 )
942 )
942 )
943 break
943 break
944 fp.write(h)
944 fp.write(h)
945
945
946 def write(self, fp):
946 def write(self, fp):
947 fp.write(b''.join(self.header))
947 fp.write(b''.join(self.header))
948
948
949 def allhunks(self):
949 def allhunks(self):
950 return any(self.allhunks_re.match(h) for h in self.header)
950 return any(self.allhunks_re.match(h) for h in self.header)
951
951
952 def files(self):
952 def files(self):
953 match = self.diffgit_re.match(self.header[0])
953 match = self.diffgit_re.match(self.header[0])
954 if match:
954 if match:
955 fromfile, tofile = match.groups()
955 fromfile, tofile = match.groups()
956 if fromfile == tofile:
956 if fromfile == tofile:
957 return [fromfile]
957 return [fromfile]
958 return [fromfile, tofile]
958 return [fromfile, tofile]
959 else:
959 else:
960 return self.diff_re.match(self.header[0]).groups()
960 return self.diff_re.match(self.header[0]).groups()
961
961
962 def filename(self):
962 def filename(self):
963 return self.files()[-1]
963 return self.files()[-1]
964
964
965 def __repr__(self):
965 def __repr__(self):
966 return '<header %s>' % (
966 return '<header %s>' % (
967 ' '.join(pycompat.rapply(pycompat.fsdecode, self.files()))
967 ' '.join(pycompat.rapply(pycompat.fsdecode, self.files()))
968 )
968 )
969
969
970 def isnewfile(self):
970 def isnewfile(self):
971 return any(self.newfile_re.match(h) for h in self.header)
971 return any(self.newfile_re.match(h) for h in self.header)
972
972
973 def special(self):
973 def special(self):
974 # Special files are shown only at the header level and not at the hunk
974 # Special files are shown only at the header level and not at the hunk
975 # level for example a file that has been deleted is a special file.
975 # level for example a file that has been deleted is a special file.
976 # The user cannot change the content of the operation, in the case of
976 # The user cannot change the content of the operation, in the case of
977 # the deleted file he has to take the deletion or not take it, he
977 # the deleted file he has to take the deletion or not take it, he
978 # cannot take some of it.
978 # cannot take some of it.
979 # Newly added files are special if they are empty, they are not special
979 # Newly added files are special if they are empty, they are not special
980 # if they have some content as we want to be able to change it
980 # if they have some content as we want to be able to change it
981 nocontent = len(self.header) == 2
981 nocontent = len(self.header) == 2
982 emptynewfile = self.isnewfile() and nocontent
982 emptynewfile = self.isnewfile() and nocontent
983 return emptynewfile or any(
983 return emptynewfile or any(
984 self.special_re.match(h) for h in self.header
984 self.special_re.match(h) for h in self.header
985 )
985 )
986
986
987
987
988 class recordhunk(object):
988 class recordhunk(object):
989 """patch hunk
989 """patch hunk
990
990
991 XXX shouldn't we merge this with the other hunk class?
991 XXX shouldn't we merge this with the other hunk class?
992 """
992 """
993
993
994 def __init__(
994 def __init__(
995 self,
995 self,
996 header,
996 header,
997 fromline,
997 fromline,
998 toline,
998 toline,
999 proc,
999 proc,
1000 before,
1000 before,
1001 hunk,
1001 hunk,
1002 after,
1002 after,
1003 maxcontext=None,
1003 maxcontext=None,
1004 ):
1004 ):
1005 def trimcontext(lines, reverse=False):
1005 def trimcontext(lines, reverse=False):
1006 if maxcontext is not None:
1006 if maxcontext is not None:
1007 delta = len(lines) - maxcontext
1007 delta = len(lines) - maxcontext
1008 if delta > 0:
1008 if delta > 0:
1009 if reverse:
1009 if reverse:
1010 return delta, lines[delta:]
1010 return delta, lines[delta:]
1011 else:
1011 else:
1012 return delta, lines[:maxcontext]
1012 return delta, lines[:maxcontext]
1013 return 0, lines
1013 return 0, lines
1014
1014
1015 self.header = header
1015 self.header = header
1016 trimedbefore, self.before = trimcontext(before, True)
1016 trimedbefore, self.before = trimcontext(before, True)
1017 self.fromline = fromline + trimedbefore
1017 self.fromline = fromline + trimedbefore
1018 self.toline = toline + trimedbefore
1018 self.toline = toline + trimedbefore
1019 _trimedafter, self.after = trimcontext(after, False)
1019 _trimedafter, self.after = trimcontext(after, False)
1020 self.proc = proc
1020 self.proc = proc
1021 self.hunk = hunk
1021 self.hunk = hunk
1022 self.added, self.removed = self.countchanges(self.hunk)
1022 self.added, self.removed = self.countchanges(self.hunk)
1023
1023
1024 def __eq__(self, v):
1024 def __eq__(self, v):
1025 if not isinstance(v, recordhunk):
1025 if not isinstance(v, recordhunk):
1026 return False
1026 return False
1027
1027
1028 return (
1028 return (
1029 (v.hunk == self.hunk)
1029 (v.hunk == self.hunk)
1030 and (v.proc == self.proc)
1030 and (v.proc == self.proc)
1031 and (self.fromline == v.fromline)
1031 and (self.fromline == v.fromline)
1032 and (self.header.files() == v.header.files())
1032 and (self.header.files() == v.header.files())
1033 )
1033 )
1034
1034
1035 def __hash__(self):
1035 def __hash__(self):
1036 return hash(
1036 return hash(
1037 (
1037 (
1038 tuple(self.hunk),
1038 tuple(self.hunk),
1039 tuple(self.header.files()),
1039 tuple(self.header.files()),
1040 self.fromline,
1040 self.fromline,
1041 self.proc,
1041 self.proc,
1042 )
1042 )
1043 )
1043 )
1044
1044
1045 def countchanges(self, hunk):
1045 def countchanges(self, hunk):
1046 """hunk -> (n+,n-)"""
1046 """hunk -> (n+,n-)"""
1047 add = len([h for h in hunk if h.startswith(b'+')])
1047 add = len([h for h in hunk if h.startswith(b'+')])
1048 rem = len([h for h in hunk if h.startswith(b'-')])
1048 rem = len([h for h in hunk if h.startswith(b'-')])
1049 return add, rem
1049 return add, rem
1050
1050
1051 def reversehunk(self):
1051 def reversehunk(self):
1052 """return another recordhunk which is the reverse of the hunk
1052 """return another recordhunk which is the reverse of the hunk
1053
1053
1054 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1054 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
1055 that, swap fromline/toline and +/- signs while keep other things
1055 that, swap fromline/toline and +/- signs while keep other things
1056 unchanged.
1056 unchanged.
1057 """
1057 """
1058 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1058 m = {b'+': b'-', b'-': b'+', b'\\': b'\\'}
1059 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1059 hunk = [b'%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
1060 return recordhunk(
1060 return recordhunk(
1061 self.header,
1061 self.header,
1062 self.toline,
1062 self.toline,
1063 self.fromline,
1063 self.fromline,
1064 self.proc,
1064 self.proc,
1065 self.before,
1065 self.before,
1066 hunk,
1066 hunk,
1067 self.after,
1067 self.after,
1068 )
1068 )
1069
1069
1070 def write(self, fp):
1070 def write(self, fp):
1071 delta = len(self.before) + len(self.after)
1071 delta = len(self.before) + len(self.after)
1072 if self.after and self.after[-1] == b'\\ No newline at end of file\n':
1072 if self.after and self.after[-1] == b'\\ No newline at end of file\n':
1073 delta -= 1
1073 delta -= 1
1074 fromlen = delta + self.removed
1074 fromlen = delta + self.removed
1075 tolen = delta + self.added
1075 tolen = delta + self.added
1076 fp.write(
1076 fp.write(
1077 b'@@ -%d,%d +%d,%d @@%s\n'
1077 b'@@ -%d,%d +%d,%d @@%s\n'
1078 % (
1078 % (
1079 self.fromline,
1079 self.fromline,
1080 fromlen,
1080 fromlen,
1081 self.toline,
1081 self.toline,
1082 tolen,
1082 tolen,
1083 self.proc and (b' ' + self.proc),
1083 self.proc and (b' ' + self.proc),
1084 )
1084 )
1085 )
1085 )
1086 fp.write(b''.join(self.before + self.hunk + self.after))
1086 fp.write(b''.join(self.before + self.hunk + self.after))
1087
1087
1088 pretty = write
1088 pretty = write
1089
1089
1090 def filename(self):
1090 def filename(self):
1091 return self.header.filename()
1091 return self.header.filename()
1092
1092
1093 @encoding.strmethod
1093 @encoding.strmethod
1094 def __repr__(self):
1094 def __repr__(self):
1095 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1095 return b'<hunk %r@%d>' % (self.filename(), self.fromline)
1096
1096
1097
1097
1098 def getmessages():
1098 def getmessages():
1099 return {
1099 return {
1100 b'multiple': {
1100 b'multiple': {
1101 b'apply': _(b"apply change %d/%d to '%s'?"),
1101 b'apply': _(b"apply change %d/%d to '%s'?"),
1102 b'discard': _(b"discard change %d/%d to '%s'?"),
1102 b'discard': _(b"discard change %d/%d to '%s'?"),
1103 b'keep': _(b"keep change %d/%d to '%s'?"),
1103 b'keep': _(b"keep change %d/%d to '%s'?"),
1104 b'record': _(b"record change %d/%d to '%s'?"),
1104 b'record': _(b"record change %d/%d to '%s'?"),
1105 },
1105 },
1106 b'single': {
1106 b'single': {
1107 b'apply': _(b"apply this change to '%s'?"),
1107 b'apply': _(b"apply this change to '%s'?"),
1108 b'discard': _(b"discard this change to '%s'?"),
1108 b'discard': _(b"discard this change to '%s'?"),
1109 b'keep': _(b"keep this change to '%s'?"),
1109 b'keep': _(b"keep this change to '%s'?"),
1110 b'record': _(b"record this change to '%s'?"),
1110 b'record': _(b"record this change to '%s'?"),
1111 },
1111 },
1112 b'help': {
1112 b'help': {
1113 b'apply': _(
1113 b'apply': _(
1114 b'[Ynesfdaq?]'
1114 b'[Ynesfdaq?]'
1115 b'$$ &Yes, apply this change'
1115 b'$$ &Yes, apply this change'
1116 b'$$ &No, skip this change'
1116 b'$$ &No, skip this change'
1117 b'$$ &Edit this change manually'
1117 b'$$ &Edit this change manually'
1118 b'$$ &Skip remaining changes to this file'
1118 b'$$ &Skip remaining changes to this file'
1119 b'$$ Apply remaining changes to this &file'
1119 b'$$ Apply remaining changes to this &file'
1120 b'$$ &Done, skip remaining changes and files'
1120 b'$$ &Done, skip remaining changes and files'
1121 b'$$ Apply &all changes to all remaining files'
1121 b'$$ Apply &all changes to all remaining files'
1122 b'$$ &Quit, applying no changes'
1122 b'$$ &Quit, applying no changes'
1123 b'$$ &? (display help)'
1123 b'$$ &? (display help)'
1124 ),
1124 ),
1125 b'discard': _(
1125 b'discard': _(
1126 b'[Ynesfdaq?]'
1126 b'[Ynesfdaq?]'
1127 b'$$ &Yes, discard this change'
1127 b'$$ &Yes, discard this change'
1128 b'$$ &No, skip this change'
1128 b'$$ &No, skip this change'
1129 b'$$ &Edit this change manually'
1129 b'$$ &Edit this change manually'
1130 b'$$ &Skip remaining changes to this file'
1130 b'$$ &Skip remaining changes to this file'
1131 b'$$ Discard remaining changes to this &file'
1131 b'$$ Discard remaining changes to this &file'
1132 b'$$ &Done, skip remaining changes and files'
1132 b'$$ &Done, skip remaining changes and files'
1133 b'$$ Discard &all changes to all remaining files'
1133 b'$$ Discard &all changes to all remaining files'
1134 b'$$ &Quit, discarding no changes'
1134 b'$$ &Quit, discarding no changes'
1135 b'$$ &? (display help)'
1135 b'$$ &? (display help)'
1136 ),
1136 ),
1137 b'keep': _(
1137 b'keep': _(
1138 b'[Ynesfdaq?]'
1138 b'[Ynesfdaq?]'
1139 b'$$ &Yes, keep this change'
1139 b'$$ &Yes, keep this change'
1140 b'$$ &No, skip this change'
1140 b'$$ &No, skip this change'
1141 b'$$ &Edit this change manually'
1141 b'$$ &Edit this change manually'
1142 b'$$ &Skip remaining changes to this file'
1142 b'$$ &Skip remaining changes to this file'
1143 b'$$ Keep remaining changes to this &file'
1143 b'$$ Keep remaining changes to this &file'
1144 b'$$ &Done, skip remaining changes and files'
1144 b'$$ &Done, skip remaining changes and files'
1145 b'$$ Keep &all changes to all remaining files'
1145 b'$$ Keep &all changes to all remaining files'
1146 b'$$ &Quit, keeping all changes'
1146 b'$$ &Quit, keeping all changes'
1147 b'$$ &? (display help)'
1147 b'$$ &? (display help)'
1148 ),
1148 ),
1149 b'record': _(
1149 b'record': _(
1150 b'[Ynesfdaq?]'
1150 b'[Ynesfdaq?]'
1151 b'$$ &Yes, record this change'
1151 b'$$ &Yes, record this change'
1152 b'$$ &No, skip this change'
1152 b'$$ &No, skip this change'
1153 b'$$ &Edit this change manually'
1153 b'$$ &Edit this change manually'
1154 b'$$ &Skip remaining changes to this file'
1154 b'$$ &Skip remaining changes to this file'
1155 b'$$ Record remaining changes to this &file'
1155 b'$$ Record remaining changes to this &file'
1156 b'$$ &Done, skip remaining changes and files'
1156 b'$$ &Done, skip remaining changes and files'
1157 b'$$ Record &all changes to all remaining files'
1157 b'$$ Record &all changes to all remaining files'
1158 b'$$ &Quit, recording no changes'
1158 b'$$ &Quit, recording no changes'
1159 b'$$ &? (display help)'
1159 b'$$ &? (display help)'
1160 ),
1160 ),
1161 },
1161 },
1162 }
1162 }
1163
1163
1164
1164
1165 def filterpatch(ui, headers, match, operation=None):
1165 def filterpatch(ui, headers, match, operation=None):
1166 """Interactively filter patch chunks into applied-only chunks"""
1166 """Interactively filter patch chunks into applied-only chunks"""
1167 messages = getmessages()
1167 messages = getmessages()
1168
1168
1169 if operation is None:
1169 if operation is None:
1170 operation = b'record'
1170 operation = b'record'
1171
1171
1172 def prompt(skipfile, skipall, query, chunk):
1172 def prompt(skipfile, skipall, query, chunk):
1173 """prompt query, and process base inputs
1173 """prompt query, and process base inputs
1174
1174
1175 - y/n for the rest of file
1175 - y/n for the rest of file
1176 - y/n for the rest
1176 - y/n for the rest
1177 - ? (help)
1177 - ? (help)
1178 - q (quit)
1178 - q (quit)
1179
1179
1180 Return True/False and possibly updated skipfile and skipall.
1180 Return True/False and possibly updated skipfile and skipall.
1181 """
1181 """
1182 newpatches = None
1182 newpatches = None
1183 if skipall is not None:
1183 if skipall is not None:
1184 return skipall, skipfile, skipall, newpatches
1184 return skipall, skipfile, skipall, newpatches
1185 if skipfile is not None:
1185 if skipfile is not None:
1186 return skipfile, skipfile, skipall, newpatches
1186 return skipfile, skipfile, skipall, newpatches
1187 while True:
1187 while True:
1188 resps = messages[b'help'][operation]
1188 resps = messages[b'help'][operation]
1189 # IMPORTANT: keep the last line of this prompt short (<40 english
1189 # IMPORTANT: keep the last line of this prompt short (<40 english
1190 # chars is a good target) because of issue6158.
1190 # chars is a good target) because of issue6158.
1191 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1191 r = ui.promptchoice(b"%s\n(enter ? for help) %s" % (query, resps))
1192 ui.write(b"\n")
1192 ui.write(b"\n")
1193 if r == 8: # ?
1193 if r == 8: # ?
1194 for c, t in ui.extractchoices(resps)[1]:
1194 for c, t in ui.extractchoices(resps)[1]:
1195 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1195 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
1196 continue
1196 continue
1197 elif r == 0: # yes
1197 elif r == 0: # yes
1198 ret = True
1198 ret = True
1199 elif r == 1: # no
1199 elif r == 1: # no
1200 ret = False
1200 ret = False
1201 elif r == 2: # Edit patch
1201 elif r == 2: # Edit patch
1202 if chunk is None:
1202 if chunk is None:
1203 ui.write(_(b'cannot edit patch for whole file'))
1203 ui.write(_(b'cannot edit patch for whole file'))
1204 ui.write(b"\n")
1204 ui.write(b"\n")
1205 continue
1205 continue
1206 if chunk.header.binary():
1206 if chunk.header.binary():
1207 ui.write(_(b'cannot edit patch for binary file'))
1207 ui.write(_(b'cannot edit patch for binary file'))
1208 ui.write(b"\n")
1208 ui.write(b"\n")
1209 continue
1209 continue
1210 # Patch comment based on the Git one (based on comment at end of
1210 # Patch comment based on the Git one (based on comment at end of
1211 # https://mercurial-scm.org/wiki/RecordExtension)
1211 # https://mercurial-scm.org/wiki/RecordExtension)
1212 phelp = b'---' + _(
1212 phelp = b'---' + _(
1213 """
1213 """
1214 To remove '-' lines, make them ' ' lines (context).
1214 To remove '-' lines, make them ' ' lines (context).
1215 To remove '+' lines, delete them.
1215 To remove '+' lines, delete them.
1216 Lines starting with # will be removed from the patch.
1216 Lines starting with # will be removed from the patch.
1217
1217
1218 If the patch applies cleanly, the edited hunk will immediately be
1218 If the patch applies cleanly, the edited hunk will immediately be
1219 added to the record list. If it does not apply cleanly, a rejects
1219 added to the record list. If it does not apply cleanly, a rejects
1220 file will be generated: you can use that when you try again. If
1220 file will be generated: you can use that when you try again. If
1221 all lines of the hunk are removed, then the edit is aborted and
1221 all lines of the hunk are removed, then the edit is aborted and
1222 the hunk is left unchanged.
1222 the hunk is left unchanged.
1223 """
1223 """
1224 )
1224 )
1225 (patchfd, patchfn) = pycompat.mkstemp(
1225 (patchfd, patchfn) = pycompat.mkstemp(
1226 prefix=b"hg-editor-", suffix=b".diff"
1226 prefix=b"hg-editor-", suffix=b".diff"
1227 )
1227 )
1228 ncpatchfp = None
1228 ncpatchfp = None
1229 try:
1229 try:
1230 # Write the initial patch
1230 # Write the initial patch
1231 f = util.nativeeolwriter(os.fdopen(patchfd, 'wb'))
1231 f = util.nativeeolwriter(os.fdopen(patchfd, 'wb'))
1232 chunk.header.write(f)
1232 chunk.header.write(f)
1233 chunk.write(f)
1233 chunk.write(f)
1234 f.write(
1234 f.write(
1235 b''.join(
1235 b''.join(
1236 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1236 [b'# ' + i + b'\n' for i in phelp.splitlines()]
1237 )
1237 )
1238 )
1238 )
1239 f.close()
1239 f.close()
1240 # Start the editor and wait for it to complete
1240 # Start the editor and wait for it to complete
1241 editor = ui.geteditor()
1241 editor = ui.geteditor()
1242 ret = ui.system(
1242 ret = ui.system(
1243 b"%s \"%s\"" % (editor, patchfn),
1243 b"%s \"%s\"" % (editor, patchfn),
1244 environ={b'HGUSER': ui.username()},
1244 environ={b'HGUSER': ui.username()},
1245 blockedtag=b'filterpatch',
1245 blockedtag=b'filterpatch',
1246 )
1246 )
1247 if ret != 0:
1247 if ret != 0:
1248 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1248 ui.warn(_(b"editor exited with exit code %d\n") % ret)
1249 continue
1249 continue
1250 # Remove comment lines
1250 # Remove comment lines
1251 patchfp = open(patchfn, 'rb')
1251 patchfp = open(patchfn, 'rb')
1252 ncpatchfp = stringio()
1252 ncpatchfp = stringio()
1253 for line in util.iterfile(patchfp):
1253 for line in util.iterfile(patchfp):
1254 line = util.fromnativeeol(line)
1254 line = util.fromnativeeol(line)
1255 if not line.startswith(b'#'):
1255 if not line.startswith(b'#'):
1256 ncpatchfp.write(line)
1256 ncpatchfp.write(line)
1257 patchfp.close()
1257 patchfp.close()
1258 ncpatchfp.seek(0)
1258 ncpatchfp.seek(0)
1259 newpatches = parsepatch(ncpatchfp)
1259 newpatches = parsepatch(ncpatchfp)
1260 finally:
1260 finally:
1261 os.unlink(patchfn)
1261 os.unlink(patchfn)
1262 del ncpatchfp
1262 del ncpatchfp
1263 # Signal that the chunk shouldn't be applied as-is, but
1263 # Signal that the chunk shouldn't be applied as-is, but
1264 # provide the new patch to be used instead.
1264 # provide the new patch to be used instead.
1265 ret = False
1265 ret = False
1266 elif r == 3: # Skip
1266 elif r == 3: # Skip
1267 ret = skipfile = False
1267 ret = skipfile = False
1268 elif r == 4: # file (Record remaining)
1268 elif r == 4: # file (Record remaining)
1269 ret = skipfile = True
1269 ret = skipfile = True
1270 elif r == 5: # done, skip remaining
1270 elif r == 5: # done, skip remaining
1271 ret = skipall = False
1271 ret = skipall = False
1272 elif r == 6: # all
1272 elif r == 6: # all
1273 ret = skipall = True
1273 ret = skipall = True
1274 elif r == 7: # quit
1274 elif r == 7: # quit
1275 raise error.Abort(_(b'user quit'))
1275 raise error.Abort(_(b'user quit'))
1276 return ret, skipfile, skipall, newpatches
1276 return ret, skipfile, skipall, newpatches
1277
1277
1278 seen = set()
1278 seen = set()
1279 applied = {} # 'filename' -> [] of chunks
1279 applied = {} # 'filename' -> [] of chunks
1280 skipfile, skipall = None, None
1280 skipfile, skipall = None, None
1281 pos, total = 1, sum(len(h.hunks) for h in headers)
1281 pos, total = 1, sum(len(h.hunks) for h in headers)
1282 for h in headers:
1282 for h in headers:
1283 pos += len(h.hunks)
1283 pos += len(h.hunks)
1284 skipfile = None
1284 skipfile = None
1285 fixoffset = 0
1285 fixoffset = 0
1286 hdr = b''.join(h.header)
1286 hdr = b''.join(h.header)
1287 if hdr in seen:
1287 if hdr in seen:
1288 continue
1288 continue
1289 seen.add(hdr)
1289 seen.add(hdr)
1290 if skipall is None:
1290 if skipall is None:
1291 h.pretty(ui)
1291 h.pretty(ui)
1292 files = h.files()
1292 files = h.files()
1293 msg = _(b'examine changes to %s?') % _(b' and ').join(
1293 msg = _(b'examine changes to %s?') % _(b' and ').join(
1294 b"'%s'" % f for f in files
1294 b"'%s'" % f for f in files
1295 )
1295 )
1296 if all(match.exact(f) for f in files):
1296 if all(match.exact(f) for f in files):
1297 r, skipall, np = True, None, None
1297 r, skipall, np = True, None, None
1298 else:
1298 else:
1299 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1299 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1300 if not r:
1300 if not r:
1301 continue
1301 continue
1302 applied[h.filename()] = [h]
1302 applied[h.filename()] = [h]
1303 if h.allhunks():
1303 if h.allhunks():
1304 applied[h.filename()] += h.hunks
1304 applied[h.filename()] += h.hunks
1305 continue
1305 continue
1306 for i, chunk in enumerate(h.hunks):
1306 for i, chunk in enumerate(h.hunks):
1307 if skipfile is None and skipall is None:
1307 if skipfile is None and skipall is None:
1308 chunk.pretty(ui)
1308 chunk.pretty(ui)
1309 if total == 1:
1309 if total == 1:
1310 msg = messages[b'single'][operation] % chunk.filename()
1310 msg = messages[b'single'][operation] % chunk.filename()
1311 else:
1311 else:
1312 idx = pos - len(h.hunks) + i
1312 idx = pos - len(h.hunks) + i
1313 msg = messages[b'multiple'][operation] % (
1313 msg = messages[b'multiple'][operation] % (
1314 idx,
1314 idx,
1315 total,
1315 total,
1316 chunk.filename(),
1316 chunk.filename(),
1317 )
1317 )
1318 r, skipfile, skipall, newpatches = prompt(
1318 r, skipfile, skipall, newpatches = prompt(
1319 skipfile, skipall, msg, chunk
1319 skipfile, skipall, msg, chunk
1320 )
1320 )
1321 if r:
1321 if r:
1322 if fixoffset:
1322 if fixoffset:
1323 chunk = copy.copy(chunk)
1323 chunk = copy.copy(chunk)
1324 chunk.toline += fixoffset
1324 chunk.toline += fixoffset
1325 applied[chunk.filename()].append(chunk)
1325 applied[chunk.filename()].append(chunk)
1326 elif newpatches is not None:
1326 elif newpatches is not None:
1327 for newpatch in newpatches:
1327 for newpatch in newpatches:
1328 for newhunk in newpatch.hunks:
1328 for newhunk in newpatch.hunks:
1329 if fixoffset:
1329 if fixoffset:
1330 newhunk.toline += fixoffset
1330 newhunk.toline += fixoffset
1331 applied[newhunk.filename()].append(newhunk)
1331 applied[newhunk.filename()].append(newhunk)
1332 else:
1332 else:
1333 fixoffset += chunk.removed - chunk.added
1333 fixoffset += chunk.removed - chunk.added
1334 return (
1334 return (
1335 sum(
1335 sum(
1336 [
1336 [
1337 h
1337 h
1338 for h in pycompat.itervalues(applied)
1338 for h in pycompat.itervalues(applied)
1339 if h[0].special() or len(h) > 1
1339 if h[0].special() or len(h) > 1
1340 ],
1340 ],
1341 [],
1341 [],
1342 ),
1342 ),
1343 {},
1343 {},
1344 )
1344 )
1345
1345
1346
1346
1347 class hunk(object):
1347 class hunk(object):
1348 def __init__(self, desc, num, lr, context):
1348 def __init__(self, desc, num, lr, context):
1349 self.number = num
1349 self.number = num
1350 self.desc = desc
1350 self.desc = desc
1351 self.hunk = [desc]
1351 self.hunk = [desc]
1352 self.a = []
1352 self.a = []
1353 self.b = []
1353 self.b = []
1354 self.starta = self.lena = None
1354 self.starta = self.lena = None
1355 self.startb = self.lenb = None
1355 self.startb = self.lenb = None
1356 if lr is not None:
1356 if lr is not None:
1357 if context:
1357 if context:
1358 self.read_context_hunk(lr)
1358 self.read_context_hunk(lr)
1359 else:
1359 else:
1360 self.read_unified_hunk(lr)
1360 self.read_unified_hunk(lr)
1361
1361
1362 def getnormalized(self):
1362 def getnormalized(self):
1363 """Return a copy with line endings normalized to LF."""
1363 """Return a copy with line endings normalized to LF."""
1364
1364
1365 def normalize(lines):
1365 def normalize(lines):
1366 nlines = []
1366 nlines = []
1367 for line in lines:
1367 for line in lines:
1368 if line.endswith(b'\r\n'):
1368 if line.endswith(b'\r\n'):
1369 line = line[:-2] + b'\n'
1369 line = line[:-2] + b'\n'
1370 nlines.append(line)
1370 nlines.append(line)
1371 return nlines
1371 return nlines
1372
1372
1373 # Dummy object, it is rebuilt manually
1373 # Dummy object, it is rebuilt manually
1374 nh = hunk(self.desc, self.number, None, None)
1374 nh = hunk(self.desc, self.number, None, None)
1375 nh.number = self.number
1375 nh.number = self.number
1376 nh.desc = self.desc
1376 nh.desc = self.desc
1377 nh.hunk = self.hunk
1377 nh.hunk = self.hunk
1378 nh.a = normalize(self.a)
1378 nh.a = normalize(self.a)
1379 nh.b = normalize(self.b)
1379 nh.b = normalize(self.b)
1380 nh.starta = self.starta
1380 nh.starta = self.starta
1381 nh.startb = self.startb
1381 nh.startb = self.startb
1382 nh.lena = self.lena
1382 nh.lena = self.lena
1383 nh.lenb = self.lenb
1383 nh.lenb = self.lenb
1384 return nh
1384 return nh
1385
1385
1386 def read_unified_hunk(self, lr):
1386 def read_unified_hunk(self, lr):
1387 m = unidesc.match(self.desc)
1387 m = unidesc.match(self.desc)
1388 if not m:
1388 if not m:
1389 raise PatchError(_(b"bad hunk #%d") % self.number)
1389 raise PatchError(_(b"bad hunk #%d") % self.number)
1390 self.starta, self.lena, self.startb, self.lenb = m.groups()
1390 self.starta, self.lena, self.startb, self.lenb = m.groups()
1391 if self.lena is None:
1391 if self.lena is None:
1392 self.lena = 1
1392 self.lena = 1
1393 else:
1393 else:
1394 self.lena = int(self.lena)
1394 self.lena = int(self.lena)
1395 if self.lenb is None:
1395 if self.lenb is None:
1396 self.lenb = 1
1396 self.lenb = 1
1397 else:
1397 else:
1398 self.lenb = int(self.lenb)
1398 self.lenb = int(self.lenb)
1399 self.starta = int(self.starta)
1399 self.starta = int(self.starta)
1400 self.startb = int(self.startb)
1400 self.startb = int(self.startb)
1401 try:
1401 try:
1402 diffhelper.addlines(
1402 diffhelper.addlines(
1403 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1403 lr, self.hunk, self.lena, self.lenb, self.a, self.b
1404 )
1404 )
1405 except error.ParseError as e:
1405 except error.ParseError as e:
1406 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1406 raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e))
1407 # if we hit eof before finishing out the hunk, the last line will
1407 # if we hit eof before finishing out the hunk, the last line will
1408 # be zero length. Lets try to fix it up.
1408 # be zero length. Lets try to fix it up.
1409 while len(self.hunk[-1]) == 0:
1409 while len(self.hunk[-1]) == 0:
1410 del self.hunk[-1]
1410 del self.hunk[-1]
1411 del self.a[-1]
1411 del self.a[-1]
1412 del self.b[-1]
1412 del self.b[-1]
1413 self.lena -= 1
1413 self.lena -= 1
1414 self.lenb -= 1
1414 self.lenb -= 1
1415 self._fixnewline(lr)
1415 self._fixnewline(lr)
1416
1416
1417 def read_context_hunk(self, lr):
1417 def read_context_hunk(self, lr):
1418 self.desc = lr.readline()
1418 self.desc = lr.readline()
1419 m = contextdesc.match(self.desc)
1419 m = contextdesc.match(self.desc)
1420 if not m:
1420 if not m:
1421 raise PatchError(_(b"bad hunk #%d") % self.number)
1421 raise PatchError(_(b"bad hunk #%d") % self.number)
1422 self.starta, aend = m.groups()
1422 self.starta, aend = m.groups()
1423 self.starta = int(self.starta)
1423 self.starta = int(self.starta)
1424 if aend is None:
1424 if aend is None:
1425 aend = self.starta
1425 aend = self.starta
1426 self.lena = int(aend) - self.starta
1426 self.lena = int(aend) - self.starta
1427 if self.starta:
1427 if self.starta:
1428 self.lena += 1
1428 self.lena += 1
1429 for x in pycompat.xrange(self.lena):
1429 for x in pycompat.xrange(self.lena):
1430 l = lr.readline()
1430 l = lr.readline()
1431 if l.startswith(b'---'):
1431 if l.startswith(b'---'):
1432 # lines addition, old block is empty
1432 # lines addition, old block is empty
1433 lr.push(l)
1433 lr.push(l)
1434 break
1434 break
1435 s = l[2:]
1435 s = l[2:]
1436 if l.startswith(b'- ') or l.startswith(b'! '):
1436 if l.startswith(b'- ') or l.startswith(b'! '):
1437 u = b'-' + s
1437 u = b'-' + s
1438 elif l.startswith(b' '):
1438 elif l.startswith(b' '):
1439 u = b' ' + s
1439 u = b' ' + s
1440 else:
1440 else:
1441 raise PatchError(
1441 raise PatchError(
1442 _(b"bad hunk #%d old text line %d") % (self.number, x)
1442 _(b"bad hunk #%d old text line %d") % (self.number, x)
1443 )
1443 )
1444 self.a.append(u)
1444 self.a.append(u)
1445 self.hunk.append(u)
1445 self.hunk.append(u)
1446
1446
1447 l = lr.readline()
1447 l = lr.readline()
1448 if l.startswith(br'\ '):
1448 if l.startswith(br'\ '):
1449 s = self.a[-1][:-1]
1449 s = self.a[-1][:-1]
1450 self.a[-1] = s
1450 self.a[-1] = s
1451 self.hunk[-1] = s
1451 self.hunk[-1] = s
1452 l = lr.readline()
1452 l = lr.readline()
1453 m = contextdesc.match(l)
1453 m = contextdesc.match(l)
1454 if not m:
1454 if not m:
1455 raise PatchError(_(b"bad hunk #%d") % self.number)
1455 raise PatchError(_(b"bad hunk #%d") % self.number)
1456 self.startb, bend = m.groups()
1456 self.startb, bend = m.groups()
1457 self.startb = int(self.startb)
1457 self.startb = int(self.startb)
1458 if bend is None:
1458 if bend is None:
1459 bend = self.startb
1459 bend = self.startb
1460 self.lenb = int(bend) - self.startb
1460 self.lenb = int(bend) - self.startb
1461 if self.startb:
1461 if self.startb:
1462 self.lenb += 1
1462 self.lenb += 1
1463 hunki = 1
1463 hunki = 1
1464 for x in pycompat.xrange(self.lenb):
1464 for x in pycompat.xrange(self.lenb):
1465 l = lr.readline()
1465 l = lr.readline()
1466 if l.startswith(br'\ '):
1466 if l.startswith(br'\ '):
1467 # XXX: the only way to hit this is with an invalid line range.
1467 # XXX: the only way to hit this is with an invalid line range.
1468 # The no-eol marker is not counted in the line range, but I
1468 # The no-eol marker is not counted in the line range, but I
1469 # guess there are diff(1) out there which behave differently.
1469 # guess there are diff(1) out there which behave differently.
1470 s = self.b[-1][:-1]
1470 s = self.b[-1][:-1]
1471 self.b[-1] = s
1471 self.b[-1] = s
1472 self.hunk[hunki - 1] = s
1472 self.hunk[hunki - 1] = s
1473 continue
1473 continue
1474 if not l:
1474 if not l:
1475 # line deletions, new block is empty and we hit EOF
1475 # line deletions, new block is empty and we hit EOF
1476 lr.push(l)
1476 lr.push(l)
1477 break
1477 break
1478 s = l[2:]
1478 s = l[2:]
1479 if l.startswith(b'+ ') or l.startswith(b'! '):
1479 if l.startswith(b'+ ') or l.startswith(b'! '):
1480 u = b'+' + s
1480 u = b'+' + s
1481 elif l.startswith(b' '):
1481 elif l.startswith(b' '):
1482 u = b' ' + s
1482 u = b' ' + s
1483 elif len(self.b) == 0:
1483 elif len(self.b) == 0:
1484 # line deletions, new block is empty
1484 # line deletions, new block is empty
1485 lr.push(l)
1485 lr.push(l)
1486 break
1486 break
1487 else:
1487 else:
1488 raise PatchError(
1488 raise PatchError(
1489 _(b"bad hunk #%d old text line %d") % (self.number, x)
1489 _(b"bad hunk #%d old text line %d") % (self.number, x)
1490 )
1490 )
1491 self.b.append(s)
1491 self.b.append(s)
1492 while True:
1492 while True:
1493 if hunki >= len(self.hunk):
1493 if hunki >= len(self.hunk):
1494 h = b""
1494 h = b""
1495 else:
1495 else:
1496 h = self.hunk[hunki]
1496 h = self.hunk[hunki]
1497 hunki += 1
1497 hunki += 1
1498 if h == u:
1498 if h == u:
1499 break
1499 break
1500 elif h.startswith(b'-'):
1500 elif h.startswith(b'-'):
1501 continue
1501 continue
1502 else:
1502 else:
1503 self.hunk.insert(hunki - 1, u)
1503 self.hunk.insert(hunki - 1, u)
1504 break
1504 break
1505
1505
1506 if not self.a:
1506 if not self.a:
1507 # this happens when lines were only added to the hunk
1507 # this happens when lines were only added to the hunk
1508 for x in self.hunk:
1508 for x in self.hunk:
1509 if x.startswith(b'-') or x.startswith(b' '):
1509 if x.startswith(b'-') or x.startswith(b' '):
1510 self.a.append(x)
1510 self.a.append(x)
1511 if not self.b:
1511 if not self.b:
1512 # this happens when lines were only deleted from the hunk
1512 # this happens when lines were only deleted from the hunk
1513 for x in self.hunk:
1513 for x in self.hunk:
1514 if x.startswith(b'+') or x.startswith(b' '):
1514 if x.startswith(b'+') or x.startswith(b' '):
1515 self.b.append(x[1:])
1515 self.b.append(x[1:])
1516 # @@ -start,len +start,len @@
1516 # @@ -start,len +start,len @@
1517 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1517 self.desc = b"@@ -%d,%d +%d,%d @@\n" % (
1518 self.starta,
1518 self.starta,
1519 self.lena,
1519 self.lena,
1520 self.startb,
1520 self.startb,
1521 self.lenb,
1521 self.lenb,
1522 )
1522 )
1523 self.hunk[0] = self.desc
1523 self.hunk[0] = self.desc
1524 self._fixnewline(lr)
1524 self._fixnewline(lr)
1525
1525
1526 def _fixnewline(self, lr):
1526 def _fixnewline(self, lr):
1527 l = lr.readline()
1527 l = lr.readline()
1528 if l.startswith(br'\ '):
1528 if l.startswith(br'\ '):
1529 diffhelper.fixnewline(self.hunk, self.a, self.b)
1529 diffhelper.fixnewline(self.hunk, self.a, self.b)
1530 else:
1530 else:
1531 lr.push(l)
1531 lr.push(l)
1532
1532
1533 def complete(self):
1533 def complete(self):
1534 return len(self.a) == self.lena and len(self.b) == self.lenb
1534 return len(self.a) == self.lena and len(self.b) == self.lenb
1535
1535
1536 def _fuzzit(self, old, new, fuzz, toponly):
1536 def _fuzzit(self, old, new, fuzz, toponly):
1537 # this removes context lines from the top and bottom of list 'l'. It
1537 # this removes context lines from the top and bottom of list 'l'. It
1538 # checks the hunk to make sure only context lines are removed, and then
1538 # checks the hunk to make sure only context lines are removed, and then
1539 # returns a new shortened list of lines.
1539 # returns a new shortened list of lines.
1540 fuzz = min(fuzz, len(old))
1540 fuzz = min(fuzz, len(old))
1541 if fuzz:
1541 if fuzz:
1542 top = 0
1542 top = 0
1543 bot = 0
1543 bot = 0
1544 hlen = len(self.hunk)
1544 hlen = len(self.hunk)
1545 for x in pycompat.xrange(hlen - 1):
1545 for x in pycompat.xrange(hlen - 1):
1546 # the hunk starts with the @@ line, so use x+1
1546 # the hunk starts with the @@ line, so use x+1
1547 if self.hunk[x + 1].startswith(b' '):
1547 if self.hunk[x + 1].startswith(b' '):
1548 top += 1
1548 top += 1
1549 else:
1549 else:
1550 break
1550 break
1551 if not toponly:
1551 if not toponly:
1552 for x in pycompat.xrange(hlen - 1):
1552 for x in pycompat.xrange(hlen - 1):
1553 if self.hunk[hlen - bot - 1].startswith(b' '):
1553 if self.hunk[hlen - bot - 1].startswith(b' '):
1554 bot += 1
1554 bot += 1
1555 else:
1555 else:
1556 break
1556 break
1557
1557
1558 bot = min(fuzz, bot)
1558 bot = min(fuzz, bot)
1559 top = min(fuzz, top)
1559 top = min(fuzz, top)
1560 return old[top : len(old) - bot], new[top : len(new) - bot], top
1560 return old[top : len(old) - bot], new[top : len(new) - bot], top
1561 return old, new, 0
1561 return old, new, 0
1562
1562
1563 def fuzzit(self, fuzz, toponly):
1563 def fuzzit(self, fuzz, toponly):
1564 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1564 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1565 oldstart = self.starta + top
1565 oldstart = self.starta + top
1566 newstart = self.startb + top
1566 newstart = self.startb + top
1567 # zero length hunk ranges already have their start decremented
1567 # zero length hunk ranges already have their start decremented
1568 if self.lena and oldstart > 0:
1568 if self.lena and oldstart > 0:
1569 oldstart -= 1
1569 oldstart -= 1
1570 if self.lenb and newstart > 0:
1570 if self.lenb and newstart > 0:
1571 newstart -= 1
1571 newstart -= 1
1572 return old, oldstart, new, newstart
1572 return old, oldstart, new, newstart
1573
1573
1574
1574
1575 class binhunk(object):
1575 class binhunk(object):
1576 """A binary patch file."""
1576 """A binary patch file."""
1577
1577
1578 def __init__(self, lr, fname):
1578 def __init__(self, lr, fname):
1579 self.text = None
1579 self.text = None
1580 self.delta = False
1580 self.delta = False
1581 self.hunk = [b'GIT binary patch\n']
1581 self.hunk = [b'GIT binary patch\n']
1582 self._fname = fname
1582 self._fname = fname
1583 self._read(lr)
1583 self._read(lr)
1584
1584
1585 def complete(self):
1585 def complete(self):
1586 return self.text is not None
1586 return self.text is not None
1587
1587
1588 def new(self, lines):
1588 def new(self, lines):
1589 if self.delta:
1589 if self.delta:
1590 return [applybindelta(self.text, b''.join(lines))]
1590 return [applybindelta(self.text, b''.join(lines))]
1591 return [self.text]
1591 return [self.text]
1592
1592
1593 def _read(self, lr):
1593 def _read(self, lr):
1594 def getline(lr, hunk):
1594 def getline(lr, hunk):
1595 l = lr.readline()
1595 l = lr.readline()
1596 hunk.append(l)
1596 hunk.append(l)
1597 return l.rstrip(b'\r\n')
1597 return l.rstrip(b'\r\n')
1598
1598
1599 while True:
1599 while True:
1600 line = getline(lr, self.hunk)
1600 line = getline(lr, self.hunk)
1601 if not line:
1601 if not line:
1602 raise PatchError(
1602 raise PatchError(
1603 _(b'could not extract "%s" binary data') % self._fname
1603 _(b'could not extract "%s" binary data') % self._fname
1604 )
1604 )
1605 if line.startswith(b'literal '):
1605 if line.startswith(b'literal '):
1606 size = int(line[8:].rstrip())
1606 size = int(line[8:].rstrip())
1607 break
1607 break
1608 if line.startswith(b'delta '):
1608 if line.startswith(b'delta '):
1609 size = int(line[6:].rstrip())
1609 size = int(line[6:].rstrip())
1610 self.delta = True
1610 self.delta = True
1611 break
1611 break
1612 dec = []
1612 dec = []
1613 line = getline(lr, self.hunk)
1613 line = getline(lr, self.hunk)
1614 while len(line) > 1:
1614 while len(line) > 1:
1615 l = line[0:1]
1615 l = line[0:1]
1616 if l <= b'Z' and l >= b'A':
1616 if l <= b'Z' and l >= b'A':
1617 l = ord(l) - ord(b'A') + 1
1617 l = ord(l) - ord(b'A') + 1
1618 else:
1618 else:
1619 l = ord(l) - ord(b'a') + 27
1619 l = ord(l) - ord(b'a') + 27
1620 try:
1620 try:
1621 dec.append(util.b85decode(line[1:])[:l])
1621 dec.append(util.b85decode(line[1:])[:l])
1622 except ValueError as e:
1622 except ValueError as e:
1623 raise PatchError(
1623 raise PatchError(
1624 _(b'could not decode "%s" binary patch: %s')
1624 _(b'could not decode "%s" binary patch: %s')
1625 % (self._fname, stringutil.forcebytestr(e))
1625 % (self._fname, stringutil.forcebytestr(e))
1626 )
1626 )
1627 line = getline(lr, self.hunk)
1627 line = getline(lr, self.hunk)
1628 text = zlib.decompress(b''.join(dec))
1628 text = zlib.decompress(b''.join(dec))
1629 if len(text) != size:
1629 if len(text) != size:
1630 raise PatchError(
1630 raise PatchError(
1631 _(b'"%s" length is %d bytes, should be %d')
1631 _(b'"%s" length is %d bytes, should be %d')
1632 % (self._fname, len(text), size)
1632 % (self._fname, len(text), size)
1633 )
1633 )
1634 self.text = text
1634 self.text = text
1635
1635
1636
1636
1637 def parsefilename(str):
1637 def parsefilename(str):
1638 # --- filename \t|space stuff
1638 # --- filename \t|space stuff
1639 s = str[4:].rstrip(b'\r\n')
1639 s = str[4:].rstrip(b'\r\n')
1640 i = s.find(b'\t')
1640 i = s.find(b'\t')
1641 if i < 0:
1641 if i < 0:
1642 i = s.find(b' ')
1642 i = s.find(b' ')
1643 if i < 0:
1643 if i < 0:
1644 return s
1644 return s
1645 return s[:i]
1645 return s[:i]
1646
1646
1647
1647
1648 def reversehunks(hunks):
1648 def reversehunks(hunks):
1649 '''reverse the signs in the hunks given as argument
1649 '''reverse the signs in the hunks given as argument
1650
1650
1651 This function operates on hunks coming out of patch.filterpatch, that is
1651 This function operates on hunks coming out of patch.filterpatch, that is
1652 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1652 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1653
1653
1654 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1654 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1655 ... --- a/folder1/g
1655 ... --- a/folder1/g
1656 ... +++ b/folder1/g
1656 ... +++ b/folder1/g
1657 ... @@ -1,7 +1,7 @@
1657 ... @@ -1,7 +1,7 @@
1658 ... +firstline
1658 ... +firstline
1659 ... c
1659 ... c
1660 ... 1
1660 ... 1
1661 ... 2
1661 ... 2
1662 ... + 3
1662 ... + 3
1663 ... -4
1663 ... -4
1664 ... 5
1664 ... 5
1665 ... d
1665 ... d
1666 ... +lastline"""
1666 ... +lastline"""
1667 >>> hunks = parsepatch([rawpatch])
1667 >>> hunks = parsepatch([rawpatch])
1668 >>> hunkscomingfromfilterpatch = []
1668 >>> hunkscomingfromfilterpatch = []
1669 >>> for h in hunks:
1669 >>> for h in hunks:
1670 ... hunkscomingfromfilterpatch.append(h)
1670 ... hunkscomingfromfilterpatch.append(h)
1671 ... hunkscomingfromfilterpatch.extend(h.hunks)
1671 ... hunkscomingfromfilterpatch.extend(h.hunks)
1672
1672
1673 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1673 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1674 >>> from . import util
1674 >>> from . import util
1675 >>> fp = util.stringio()
1675 >>> fp = util.stringio()
1676 >>> for c in reversedhunks:
1676 >>> for c in reversedhunks:
1677 ... c.write(fp)
1677 ... c.write(fp)
1678 >>> fp.seek(0) or None
1678 >>> fp.seek(0) or None
1679 >>> reversedpatch = fp.read()
1679 >>> reversedpatch = fp.read()
1680 >>> print(pycompat.sysstr(reversedpatch))
1680 >>> print(pycompat.sysstr(reversedpatch))
1681 diff --git a/folder1/g b/folder1/g
1681 diff --git a/folder1/g b/folder1/g
1682 --- a/folder1/g
1682 --- a/folder1/g
1683 +++ b/folder1/g
1683 +++ b/folder1/g
1684 @@ -1,4 +1,3 @@
1684 @@ -1,4 +1,3 @@
1685 -firstline
1685 -firstline
1686 c
1686 c
1687 1
1687 1
1688 2
1688 2
1689 @@ -2,6 +1,6 @@
1689 @@ -2,6 +1,6 @@
1690 c
1690 c
1691 1
1691 1
1692 2
1692 2
1693 - 3
1693 - 3
1694 +4
1694 +4
1695 5
1695 5
1696 d
1696 d
1697 @@ -6,3 +5,2 @@
1697 @@ -6,3 +5,2 @@
1698 5
1698 5
1699 d
1699 d
1700 -lastline
1700 -lastline
1701
1701
1702 '''
1702 '''
1703
1703
1704 newhunks = []
1704 newhunks = []
1705 for c in hunks:
1705 for c in hunks:
1706 if util.safehasattr(c, b'reversehunk'):
1706 if util.safehasattr(c, b'reversehunk'):
1707 c = c.reversehunk()
1707 c = c.reversehunk()
1708 newhunks.append(c)
1708 newhunks.append(c)
1709 return newhunks
1709 return newhunks
1710
1710
1711
1711
1712 def parsepatch(originalchunks, maxcontext=None):
1712 def parsepatch(originalchunks, maxcontext=None):
1713 """patch -> [] of headers -> [] of hunks
1713 """patch -> [] of headers -> [] of hunks
1714
1714
1715 If maxcontext is not None, trim context lines if necessary.
1715 If maxcontext is not None, trim context lines if necessary.
1716
1716
1717 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1717 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1718 ... --- a/folder1/g
1718 ... --- a/folder1/g
1719 ... +++ b/folder1/g
1719 ... +++ b/folder1/g
1720 ... @@ -1,8 +1,10 @@
1720 ... @@ -1,8 +1,10 @@
1721 ... 1
1721 ... 1
1722 ... 2
1722 ... 2
1723 ... -3
1723 ... -3
1724 ... 4
1724 ... 4
1725 ... 5
1725 ... 5
1726 ... 6
1726 ... 6
1727 ... +6.1
1727 ... +6.1
1728 ... +6.2
1728 ... +6.2
1729 ... 7
1729 ... 7
1730 ... 8
1730 ... 8
1731 ... +9'''
1731 ... +9'''
1732 >>> out = util.stringio()
1732 >>> out = util.stringio()
1733 >>> headers = parsepatch([rawpatch], maxcontext=1)
1733 >>> headers = parsepatch([rawpatch], maxcontext=1)
1734 >>> for header in headers:
1734 >>> for header in headers:
1735 ... header.write(out)
1735 ... header.write(out)
1736 ... for hunk in header.hunks:
1736 ... for hunk in header.hunks:
1737 ... hunk.write(out)
1737 ... hunk.write(out)
1738 >>> print(pycompat.sysstr(out.getvalue()))
1738 >>> print(pycompat.sysstr(out.getvalue()))
1739 diff --git a/folder1/g b/folder1/g
1739 diff --git a/folder1/g b/folder1/g
1740 --- a/folder1/g
1740 --- a/folder1/g
1741 +++ b/folder1/g
1741 +++ b/folder1/g
1742 @@ -2,3 +2,2 @@
1742 @@ -2,3 +2,2 @@
1743 2
1743 2
1744 -3
1744 -3
1745 4
1745 4
1746 @@ -6,2 +5,4 @@
1746 @@ -6,2 +5,4 @@
1747 6
1747 6
1748 +6.1
1748 +6.1
1749 +6.2
1749 +6.2
1750 7
1750 7
1751 @@ -8,1 +9,2 @@
1751 @@ -8,1 +9,2 @@
1752 8
1752 8
1753 +9
1753 +9
1754 """
1754 """
1755
1755
1756 class parser(object):
1756 class parser(object):
1757 """patch parsing state machine"""
1757 """patch parsing state machine"""
1758
1758
1759 def __init__(self):
1759 def __init__(self):
1760 self.fromline = 0
1760 self.fromline = 0
1761 self.toline = 0
1761 self.toline = 0
1762 self.proc = b''
1762 self.proc = b''
1763 self.header = None
1763 self.header = None
1764 self.context = []
1764 self.context = []
1765 self.before = []
1765 self.before = []
1766 self.hunk = []
1766 self.hunk = []
1767 self.headers = []
1767 self.headers = []
1768
1768
1769 def addrange(self, limits):
1769 def addrange(self, limits):
1770 self.addcontext([])
1770 self.addcontext([])
1771 fromstart, fromend, tostart, toend, proc = limits
1771 fromstart, fromend, tostart, toend, proc = limits
1772 self.fromline = int(fromstart)
1772 self.fromline = int(fromstart)
1773 self.toline = int(tostart)
1773 self.toline = int(tostart)
1774 self.proc = proc
1774 self.proc = proc
1775
1775
1776 def addcontext(self, context):
1776 def addcontext(self, context):
1777 if self.hunk:
1777 if self.hunk:
1778 h = recordhunk(
1778 h = recordhunk(
1779 self.header,
1779 self.header,
1780 self.fromline,
1780 self.fromline,
1781 self.toline,
1781 self.toline,
1782 self.proc,
1782 self.proc,
1783 self.before,
1783 self.before,
1784 self.hunk,
1784 self.hunk,
1785 context,
1785 context,
1786 maxcontext,
1786 maxcontext,
1787 )
1787 )
1788 self.header.hunks.append(h)
1788 self.header.hunks.append(h)
1789 self.fromline += len(self.before) + h.removed
1789 self.fromline += len(self.before) + h.removed
1790 self.toline += len(self.before) + h.added
1790 self.toline += len(self.before) + h.added
1791 self.before = []
1791 self.before = []
1792 self.hunk = []
1792 self.hunk = []
1793 self.context = context
1793 self.context = context
1794
1794
1795 def addhunk(self, hunk):
1795 def addhunk(self, hunk):
1796 if self.context:
1796 if self.context:
1797 self.before = self.context
1797 self.before = self.context
1798 self.context = []
1798 self.context = []
1799 if self.hunk:
1799 if self.hunk:
1800 self.addcontext([])
1800 self.addcontext([])
1801 self.hunk = hunk
1801 self.hunk = hunk
1802
1802
1803 def newfile(self, hdr):
1803 def newfile(self, hdr):
1804 self.addcontext([])
1804 self.addcontext([])
1805 h = header(hdr)
1805 h = header(hdr)
1806 self.headers.append(h)
1806 self.headers.append(h)
1807 self.header = h
1807 self.header = h
1808
1808
1809 def addother(self, line):
1809 def addother(self, line):
1810 pass # 'other' lines are ignored
1810 pass # 'other' lines are ignored
1811
1811
1812 def finished(self):
1812 def finished(self):
1813 self.addcontext([])
1813 self.addcontext([])
1814 return self.headers
1814 return self.headers
1815
1815
1816 transitions = {
1816 transitions = {
1817 b'file': {
1817 b'file': {
1818 b'context': addcontext,
1818 b'context': addcontext,
1819 b'file': newfile,
1819 b'file': newfile,
1820 b'hunk': addhunk,
1820 b'hunk': addhunk,
1821 b'range': addrange,
1821 b'range': addrange,
1822 },
1822 },
1823 b'context': {
1823 b'context': {
1824 b'file': newfile,
1824 b'file': newfile,
1825 b'hunk': addhunk,
1825 b'hunk': addhunk,
1826 b'range': addrange,
1826 b'range': addrange,
1827 b'other': addother,
1827 b'other': addother,
1828 },
1828 },
1829 b'hunk': {
1829 b'hunk': {
1830 b'context': addcontext,
1830 b'context': addcontext,
1831 b'file': newfile,
1831 b'file': newfile,
1832 b'range': addrange,
1832 b'range': addrange,
1833 },
1833 },
1834 b'range': {b'context': addcontext, b'hunk': addhunk},
1834 b'range': {b'context': addcontext, b'hunk': addhunk},
1835 b'other': {b'other': addother},
1835 b'other': {b'other': addother},
1836 }
1836 }
1837
1837
1838 p = parser()
1838 p = parser()
1839 fp = stringio()
1839 fp = stringio()
1840 fp.write(b''.join(originalchunks))
1840 fp.write(b''.join(originalchunks))
1841 fp.seek(0)
1841 fp.seek(0)
1842
1842
1843 state = b'context'
1843 state = b'context'
1844 for newstate, data in scanpatch(fp):
1844 for newstate, data in scanpatch(fp):
1845 try:
1845 try:
1846 p.transitions[state][newstate](p, data)
1846 p.transitions[state][newstate](p, data)
1847 except KeyError:
1847 except KeyError:
1848 raise PatchError(
1848 raise PatchError(
1849 b'unhandled transition: %s -> %s' % (state, newstate)
1849 b'unhandled transition: %s -> %s' % (state, newstate)
1850 )
1850 )
1851 state = newstate
1851 state = newstate
1852 del fp
1852 del fp
1853 return p.finished()
1853 return p.finished()
1854
1854
1855
1855
1856 def pathtransform(path, strip, prefix):
1856 def pathtransform(path, strip, prefix):
1857 '''turn a path from a patch into a path suitable for the repository
1857 '''turn a path from a patch into a path suitable for the repository
1858
1858
1859 prefix, if not empty, is expected to be normalized with a / at the end.
1859 prefix, if not empty, is expected to be normalized with a / at the end.
1860
1860
1861 Returns (stripped components, path in repository).
1861 Returns (stripped components, path in repository).
1862
1862
1863 >>> pathtransform(b'a/b/c', 0, b'')
1863 >>> pathtransform(b'a/b/c', 0, b'')
1864 ('', 'a/b/c')
1864 ('', 'a/b/c')
1865 >>> pathtransform(b' a/b/c ', 0, b'')
1865 >>> pathtransform(b' a/b/c ', 0, b'')
1866 ('', ' a/b/c')
1866 ('', ' a/b/c')
1867 >>> pathtransform(b' a/b/c ', 2, b'')
1867 >>> pathtransform(b' a/b/c ', 2, b'')
1868 ('a/b/', 'c')
1868 ('a/b/', 'c')
1869 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1869 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1870 ('', 'd/e/a/b/c')
1870 ('', 'd/e/a/b/c')
1871 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1871 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1872 ('a//b/', 'd/e/c')
1872 ('a//b/', 'd/e/c')
1873 >>> pathtransform(b'a/b/c', 3, b'')
1873 >>> pathtransform(b'a/b/c', 3, b'')
1874 Traceback (most recent call last):
1874 Traceback (most recent call last):
1875 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1875 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1876 '''
1876 '''
1877 pathlen = len(path)
1877 pathlen = len(path)
1878 i = 0
1878 i = 0
1879 if strip == 0:
1879 if strip == 0:
1880 return b'', prefix + path.rstrip()
1880 return b'', prefix + path.rstrip()
1881 count = strip
1881 count = strip
1882 while count > 0:
1882 while count > 0:
1883 i = path.find(b'/', i)
1883 i = path.find(b'/', i)
1884 if i == -1:
1884 if i == -1:
1885 raise PatchError(
1885 raise PatchError(
1886 _(b"unable to strip away %d of %d dirs from %s")
1886 _(b"unable to strip away %d of %d dirs from %s")
1887 % (count, strip, path)
1887 % (count, strip, path)
1888 )
1888 )
1889 i += 1
1889 i += 1
1890 # consume '//' in the path
1890 # consume '//' in the path
1891 while i < pathlen - 1 and path[i : i + 1] == b'/':
1891 while i < pathlen - 1 and path[i : i + 1] == b'/':
1892 i += 1
1892 i += 1
1893 count -= 1
1893 count -= 1
1894 return path[:i].lstrip(), prefix + path[i:].rstrip()
1894 return path[:i].lstrip(), prefix + path[i:].rstrip()
1895
1895
1896
1896
1897 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1897 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1898 nulla = afile_orig == b"/dev/null"
1898 nulla = afile_orig == b"/dev/null"
1899 nullb = bfile_orig == b"/dev/null"
1899 nullb = bfile_orig == b"/dev/null"
1900 create = nulla and hunk.starta == 0 and hunk.lena == 0
1900 create = nulla and hunk.starta == 0 and hunk.lena == 0
1901 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1901 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1902 abase, afile = pathtransform(afile_orig, strip, prefix)
1902 abase, afile = pathtransform(afile_orig, strip, prefix)
1903 gooda = not nulla and backend.exists(afile)
1903 gooda = not nulla and backend.exists(afile)
1904 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1904 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1905 if afile == bfile:
1905 if afile == bfile:
1906 goodb = gooda
1906 goodb = gooda
1907 else:
1907 else:
1908 goodb = not nullb and backend.exists(bfile)
1908 goodb = not nullb and backend.exists(bfile)
1909 missing = not goodb and not gooda and not create
1909 missing = not goodb and not gooda and not create
1910
1910
1911 # some diff programs apparently produce patches where the afile is
1911 # some diff programs apparently produce patches where the afile is
1912 # not /dev/null, but afile starts with bfile
1912 # not /dev/null, but afile starts with bfile
1913 abasedir = afile[: afile.rfind(b'/') + 1]
1913 abasedir = afile[: afile.rfind(b'/') + 1]
1914 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1914 bbasedir = bfile[: bfile.rfind(b'/') + 1]
1915 if (
1915 if (
1916 missing
1916 missing
1917 and abasedir == bbasedir
1917 and abasedir == bbasedir
1918 and afile.startswith(bfile)
1918 and afile.startswith(bfile)
1919 and hunk.starta == 0
1919 and hunk.starta == 0
1920 and hunk.lena == 0
1920 and hunk.lena == 0
1921 ):
1921 ):
1922 create = True
1922 create = True
1923 missing = False
1923 missing = False
1924
1924
1925 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1925 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1926 # diff is between a file and its backup. In this case, the original
1926 # diff is between a file and its backup. In this case, the original
1927 # file should be patched (see original mpatch code).
1927 # file should be patched (see original mpatch code).
1928 isbackup = abase == bbase and bfile.startswith(afile)
1928 isbackup = abase == bbase and bfile.startswith(afile)
1929 fname = None
1929 fname = None
1930 if not missing:
1930 if not missing:
1931 if gooda and goodb:
1931 if gooda and goodb:
1932 if isbackup:
1932 if isbackup:
1933 fname = afile
1933 fname = afile
1934 else:
1934 else:
1935 fname = bfile
1935 fname = bfile
1936 elif gooda:
1936 elif gooda:
1937 fname = afile
1937 fname = afile
1938
1938
1939 if not fname:
1939 if not fname:
1940 if not nullb:
1940 if not nullb:
1941 if isbackup:
1941 if isbackup:
1942 fname = afile
1942 fname = afile
1943 else:
1943 else:
1944 fname = bfile
1944 fname = bfile
1945 elif not nulla:
1945 elif not nulla:
1946 fname = afile
1946 fname = afile
1947 else:
1947 else:
1948 raise PatchError(_(b"undefined source and destination files"))
1948 raise PatchError(_(b"undefined source and destination files"))
1949
1949
1950 gp = patchmeta(fname)
1950 gp = patchmeta(fname)
1951 if create:
1951 if create:
1952 gp.op = b'ADD'
1952 gp.op = b'ADD'
1953 elif remove:
1953 elif remove:
1954 gp.op = b'DELETE'
1954 gp.op = b'DELETE'
1955 return gp
1955 return gp
1956
1956
1957
1957
1958 def scanpatch(fp):
1958 def scanpatch(fp):
1959 """like patch.iterhunks, but yield different events
1959 """like patch.iterhunks, but yield different events
1960
1960
1961 - ('file', [header_lines + fromfile + tofile])
1961 - ('file', [header_lines + fromfile + tofile])
1962 - ('context', [context_lines])
1962 - ('context', [context_lines])
1963 - ('hunk', [hunk_lines])
1963 - ('hunk', [hunk_lines])
1964 - ('range', (-start,len, +start,len, proc))
1964 - ('range', (-start,len, +start,len, proc))
1965 """
1965 """
1966 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1966 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1967 lr = linereader(fp)
1967 lr = linereader(fp)
1968
1968
1969 def scanwhile(first, p):
1969 def scanwhile(first, p):
1970 """scan lr while predicate holds"""
1970 """scan lr while predicate holds"""
1971 lines = [first]
1971 lines = [first]
1972 for line in iter(lr.readline, b''):
1972 for line in iter(lr.readline, b''):
1973 if p(line):
1973 if p(line):
1974 lines.append(line)
1974 lines.append(line)
1975 else:
1975 else:
1976 lr.push(line)
1976 lr.push(line)
1977 break
1977 break
1978 return lines
1978 return lines
1979
1979
1980 for line in iter(lr.readline, b''):
1980 for line in iter(lr.readline, b''):
1981 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1981 if line.startswith(b'diff --git a/') or line.startswith(b'diff -r '):
1982
1982
1983 def notheader(line):
1983 def notheader(line):
1984 s = line.split(None, 1)
1984 s = line.split(None, 1)
1985 return not s or s[0] not in (b'---', b'diff')
1985 return not s or s[0] not in (b'---', b'diff')
1986
1986
1987 header = scanwhile(line, notheader)
1987 header = scanwhile(line, notheader)
1988 fromfile = lr.readline()
1988 fromfile = lr.readline()
1989 if fromfile.startswith(b'---'):
1989 if fromfile.startswith(b'---'):
1990 tofile = lr.readline()
1990 tofile = lr.readline()
1991 header += [fromfile, tofile]
1991 header += [fromfile, tofile]
1992 else:
1992 else:
1993 lr.push(fromfile)
1993 lr.push(fromfile)
1994 yield b'file', header
1994 yield b'file', header
1995 elif line.startswith(b' '):
1995 elif line.startswith(b' '):
1996 cs = (b' ', b'\\')
1996 cs = (b' ', b'\\')
1997 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1997 yield b'context', scanwhile(line, lambda l: l.startswith(cs))
1998 elif line.startswith((b'-', b'+')):
1998 elif line.startswith((b'-', b'+')):
1999 cs = (b'-', b'+', b'\\')
1999 cs = (b'-', b'+', b'\\')
2000 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
2000 yield b'hunk', scanwhile(line, lambda l: l.startswith(cs))
2001 else:
2001 else:
2002 m = lines_re.match(line)
2002 m = lines_re.match(line)
2003 if m:
2003 if m:
2004 yield b'range', m.groups()
2004 yield b'range', m.groups()
2005 else:
2005 else:
2006 yield b'other', line
2006 yield b'other', line
2007
2007
2008
2008
2009 def scangitpatch(lr, firstline):
2009 def scangitpatch(lr, firstline):
2010 """
2010 """
2011 Git patches can emit:
2011 Git patches can emit:
2012 - rename a to b
2012 - rename a to b
2013 - change b
2013 - change b
2014 - copy a to c
2014 - copy a to c
2015 - change c
2015 - change c
2016
2016
2017 We cannot apply this sequence as-is, the renamed 'a' could not be
2017 We cannot apply this sequence as-is, the renamed 'a' could not be
2018 found for it would have been renamed already. And we cannot copy
2018 found for it would have been renamed already. And we cannot copy
2019 from 'b' instead because 'b' would have been changed already. So
2019 from 'b' instead because 'b' would have been changed already. So
2020 we scan the git patch for copy and rename commands so we can
2020 we scan the git patch for copy and rename commands so we can
2021 perform the copies ahead of time.
2021 perform the copies ahead of time.
2022 """
2022 """
2023 pos = 0
2023 pos = 0
2024 try:
2024 try:
2025 pos = lr.fp.tell()
2025 pos = lr.fp.tell()
2026 fp = lr.fp
2026 fp = lr.fp
2027 except IOError:
2027 except IOError:
2028 fp = stringio(lr.fp.read())
2028 fp = stringio(lr.fp.read())
2029 gitlr = linereader(fp)
2029 gitlr = linereader(fp)
2030 gitlr.push(firstline)
2030 gitlr.push(firstline)
2031 gitpatches = readgitpatch(gitlr)
2031 gitpatches = readgitpatch(gitlr)
2032 fp.seek(pos)
2032 fp.seek(pos)
2033 return gitpatches
2033 return gitpatches
2034
2034
2035
2035
2036 def iterhunks(fp):
2036 def iterhunks(fp):
2037 """Read a patch and yield the following events:
2037 """Read a patch and yield the following events:
2038 - ("file", afile, bfile, firsthunk): select a new target file.
2038 - ("file", afile, bfile, firsthunk): select a new target file.
2039 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2039 - ("hunk", hunk): a new hunk is ready to be applied, follows a
2040 "file" event.
2040 "file" event.
2041 - ("git", gitchanges): current diff is in git format, gitchanges
2041 - ("git", gitchanges): current diff is in git format, gitchanges
2042 maps filenames to gitpatch records. Unique event.
2042 maps filenames to gitpatch records. Unique event.
2043 """
2043 """
2044 afile = b""
2044 afile = b""
2045 bfile = b""
2045 bfile = b""
2046 state = None
2046 state = None
2047 hunknum = 0
2047 hunknum = 0
2048 emitfile = newfile = False
2048 emitfile = newfile = False
2049 gitpatches = None
2049 gitpatches = None
2050
2050
2051 # our states
2051 # our states
2052 BFILE = 1
2052 BFILE = 1
2053 context = None
2053 context = None
2054 lr = linereader(fp)
2054 lr = linereader(fp)
2055
2055
2056 for x in iter(lr.readline, b''):
2056 for x in iter(lr.readline, b''):
2057 if state == BFILE and (
2057 if state == BFILE and (
2058 (not context and x.startswith(b'@'))
2058 (not context and x.startswith(b'@'))
2059 or (context is not False and x.startswith(b'***************'))
2059 or (context is not False and x.startswith(b'***************'))
2060 or x.startswith(b'GIT binary patch')
2060 or x.startswith(b'GIT binary patch')
2061 ):
2061 ):
2062 gp = None
2062 gp = None
2063 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2063 if gitpatches and gitpatches[-1].ispatching(afile, bfile):
2064 gp = gitpatches.pop()
2064 gp = gitpatches.pop()
2065 if x.startswith(b'GIT binary patch'):
2065 if x.startswith(b'GIT binary patch'):
2066 h = binhunk(lr, gp.path)
2066 h = binhunk(lr, gp.path)
2067 else:
2067 else:
2068 if context is None and x.startswith(b'***************'):
2068 if context is None and x.startswith(b'***************'):
2069 context = True
2069 context = True
2070 h = hunk(x, hunknum + 1, lr, context)
2070 h = hunk(x, hunknum + 1, lr, context)
2071 hunknum += 1
2071 hunknum += 1
2072 if emitfile:
2072 if emitfile:
2073 emitfile = False
2073 emitfile = False
2074 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2074 yield b'file', (afile, bfile, h, gp and gp.copy() or None)
2075 yield b'hunk', h
2075 yield b'hunk', h
2076 elif x.startswith(b'diff --git a/'):
2076 elif x.startswith(b'diff --git a/'):
2077 m = gitre.match(x.rstrip(b' \r\n'))
2077 m = gitre.match(x.rstrip(b' \r\n'))
2078 if not m:
2078 if not m:
2079 continue
2079 continue
2080 if gitpatches is None:
2080 if gitpatches is None:
2081 # scan whole input for git metadata
2081 # scan whole input for git metadata
2082 gitpatches = scangitpatch(lr, x)
2082 gitpatches = scangitpatch(lr, x)
2083 yield b'git', [
2083 yield b'git', [
2084 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2084 g.copy() for g in gitpatches if g.op in (b'COPY', b'RENAME')
2085 ]
2085 ]
2086 gitpatches.reverse()
2086 gitpatches.reverse()
2087 afile = b'a/' + m.group(1)
2087 afile = b'a/' + m.group(1)
2088 bfile = b'b/' + m.group(2)
2088 bfile = b'b/' + m.group(2)
2089 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2089 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
2090 gp = gitpatches.pop()
2090 gp = gitpatches.pop()
2091 yield b'file', (
2091 yield b'file', (
2092 b'a/' + gp.path,
2092 b'a/' + gp.path,
2093 b'b/' + gp.path,
2093 b'b/' + gp.path,
2094 None,
2094 None,
2095 gp.copy(),
2095 gp.copy(),
2096 )
2096 )
2097 if not gitpatches:
2097 if not gitpatches:
2098 raise PatchError(
2098 raise PatchError(
2099 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2099 _(b'failed to synchronize metadata for "%s"') % afile[2:]
2100 )
2100 )
2101 newfile = True
2101 newfile = True
2102 elif x.startswith(b'---'):
2102 elif x.startswith(b'---'):
2103 # check for a unified diff
2103 # check for a unified diff
2104 l2 = lr.readline()
2104 l2 = lr.readline()
2105 if not l2.startswith(b'+++'):
2105 if not l2.startswith(b'+++'):
2106 lr.push(l2)
2106 lr.push(l2)
2107 continue
2107 continue
2108 newfile = True
2108 newfile = True
2109 context = False
2109 context = False
2110 afile = parsefilename(x)
2110 afile = parsefilename(x)
2111 bfile = parsefilename(l2)
2111 bfile = parsefilename(l2)
2112 elif x.startswith(b'***'):
2112 elif x.startswith(b'***'):
2113 # check for a context diff
2113 # check for a context diff
2114 l2 = lr.readline()
2114 l2 = lr.readline()
2115 if not l2.startswith(b'---'):
2115 if not l2.startswith(b'---'):
2116 lr.push(l2)
2116 lr.push(l2)
2117 continue
2117 continue
2118 l3 = lr.readline()
2118 l3 = lr.readline()
2119 lr.push(l3)
2119 lr.push(l3)
2120 if not l3.startswith(b"***************"):
2120 if not l3.startswith(b"***************"):
2121 lr.push(l2)
2121 lr.push(l2)
2122 continue
2122 continue
2123 newfile = True
2123 newfile = True
2124 context = True
2124 context = True
2125 afile = parsefilename(x)
2125 afile = parsefilename(x)
2126 bfile = parsefilename(l2)
2126 bfile = parsefilename(l2)
2127
2127
2128 if newfile:
2128 if newfile:
2129 newfile = False
2129 newfile = False
2130 emitfile = True
2130 emitfile = True
2131 state = BFILE
2131 state = BFILE
2132 hunknum = 0
2132 hunknum = 0
2133
2133
2134 while gitpatches:
2134 while gitpatches:
2135 gp = gitpatches.pop()
2135 gp = gitpatches.pop()
2136 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2136 yield b'file', (b'a/' + gp.path, b'b/' + gp.path, None, gp.copy())
2137
2137
2138
2138
2139 def applybindelta(binchunk, data):
2139 def applybindelta(binchunk, data):
2140 """Apply a binary delta hunk
2140 """Apply a binary delta hunk
2141 The algorithm used is the algorithm from git's patch-delta.c
2141 The algorithm used is the algorithm from git's patch-delta.c
2142 """
2142 """
2143
2143
2144 def deltahead(binchunk):
2144 def deltahead(binchunk):
2145 i = 0
2145 i = 0
2146 for c in pycompat.bytestr(binchunk):
2146 for c in pycompat.bytestr(binchunk):
2147 i += 1
2147 i += 1
2148 if not (ord(c) & 0x80):
2148 if not (ord(c) & 0x80):
2149 return i
2149 return i
2150 return i
2150 return i
2151
2151
2152 out = b""
2152 out = b""
2153 s = deltahead(binchunk)
2153 s = deltahead(binchunk)
2154 binchunk = binchunk[s:]
2154 binchunk = binchunk[s:]
2155 s = deltahead(binchunk)
2155 s = deltahead(binchunk)
2156 binchunk = binchunk[s:]
2156 binchunk = binchunk[s:]
2157 i = 0
2157 i = 0
2158 while i < len(binchunk):
2158 while i < len(binchunk):
2159 cmd = ord(binchunk[i : i + 1])
2159 cmd = ord(binchunk[i : i + 1])
2160 i += 1
2160 i += 1
2161 if cmd & 0x80:
2161 if cmd & 0x80:
2162 offset = 0
2162 offset = 0
2163 size = 0
2163 size = 0
2164 if cmd & 0x01:
2164 if cmd & 0x01:
2165 offset = ord(binchunk[i : i + 1])
2165 offset = ord(binchunk[i : i + 1])
2166 i += 1
2166 i += 1
2167 if cmd & 0x02:
2167 if cmd & 0x02:
2168 offset |= ord(binchunk[i : i + 1]) << 8
2168 offset |= ord(binchunk[i : i + 1]) << 8
2169 i += 1
2169 i += 1
2170 if cmd & 0x04:
2170 if cmd & 0x04:
2171 offset |= ord(binchunk[i : i + 1]) << 16
2171 offset |= ord(binchunk[i : i + 1]) << 16
2172 i += 1
2172 i += 1
2173 if cmd & 0x08:
2173 if cmd & 0x08:
2174 offset |= ord(binchunk[i : i + 1]) << 24
2174 offset |= ord(binchunk[i : i + 1]) << 24
2175 i += 1
2175 i += 1
2176 if cmd & 0x10:
2176 if cmd & 0x10:
2177 size = ord(binchunk[i : i + 1])
2177 size = ord(binchunk[i : i + 1])
2178 i += 1
2178 i += 1
2179 if cmd & 0x20:
2179 if cmd & 0x20:
2180 size |= ord(binchunk[i : i + 1]) << 8
2180 size |= ord(binchunk[i : i + 1]) << 8
2181 i += 1
2181 i += 1
2182 if cmd & 0x40:
2182 if cmd & 0x40:
2183 size |= ord(binchunk[i : i + 1]) << 16
2183 size |= ord(binchunk[i : i + 1]) << 16
2184 i += 1
2184 i += 1
2185 if size == 0:
2185 if size == 0:
2186 size = 0x10000
2186 size = 0x10000
2187 offset_end = offset + size
2187 offset_end = offset + size
2188 out += data[offset:offset_end]
2188 out += data[offset:offset_end]
2189 elif cmd != 0:
2189 elif cmd != 0:
2190 offset_end = i + cmd
2190 offset_end = i + cmd
2191 out += binchunk[i:offset_end]
2191 out += binchunk[i:offset_end]
2192 i += cmd
2192 i += cmd
2193 else:
2193 else:
2194 raise PatchError(_(b'unexpected delta opcode 0'))
2194 raise PatchError(_(b'unexpected delta opcode 0'))
2195 return out
2195 return out
2196
2196
2197
2197
2198 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2198 def applydiff(ui, fp, backend, store, strip=1, prefix=b'', eolmode=b'strict'):
2199 """Reads a patch from fp and tries to apply it.
2199 """Reads a patch from fp and tries to apply it.
2200
2200
2201 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2201 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2202 there was any fuzz.
2202 there was any fuzz.
2203
2203
2204 If 'eolmode' is 'strict', the patch content and patched file are
2204 If 'eolmode' is 'strict', the patch content and patched file are
2205 read in binary mode. Otherwise, line endings are ignored when
2205 read in binary mode. Otherwise, line endings are ignored when
2206 patching then normalized according to 'eolmode'.
2206 patching then normalized according to 'eolmode'.
2207 """
2207 """
2208 return _applydiff(
2208 return _applydiff(
2209 ui,
2209 ui,
2210 fp,
2210 fp,
2211 patchfile,
2211 patchfile,
2212 backend,
2212 backend,
2213 store,
2213 store,
2214 strip=strip,
2214 strip=strip,
2215 prefix=prefix,
2215 prefix=prefix,
2216 eolmode=eolmode,
2216 eolmode=eolmode,
2217 )
2217 )
2218
2218
2219
2219
2220 def _canonprefix(repo, prefix):
2220 def _canonprefix(repo, prefix):
2221 if prefix:
2221 if prefix:
2222 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2222 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2223 if prefix != b'':
2223 if prefix != b'':
2224 prefix += b'/'
2224 prefix += b'/'
2225 return prefix
2225 return prefix
2226
2226
2227
2227
2228 def _applydiff(
2228 def _applydiff(
2229 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2229 ui, fp, patcher, backend, store, strip=1, prefix=b'', eolmode=b'strict'
2230 ):
2230 ):
2231 prefix = _canonprefix(backend.repo, prefix)
2231 prefix = _canonprefix(backend.repo, prefix)
2232
2232
2233 def pstrip(p):
2233 def pstrip(p):
2234 return pathtransform(p, strip - 1, prefix)[1]
2234 return pathtransform(p, strip - 1, prefix)[1]
2235
2235
2236 rejects = 0
2236 rejects = 0
2237 err = 0
2237 err = 0
2238 current_file = None
2238 current_file = None
2239
2239
2240 for state, values in iterhunks(fp):
2240 for state, values in iterhunks(fp):
2241 if state == b'hunk':
2241 if state == b'hunk':
2242 if not current_file:
2242 if not current_file:
2243 continue
2243 continue
2244 ret = current_file.apply(values)
2244 ret = current_file.apply(values)
2245 if ret > 0:
2245 if ret > 0:
2246 err = 1
2246 err = 1
2247 elif state == b'file':
2247 elif state == b'file':
2248 if current_file:
2248 if current_file:
2249 rejects += current_file.close()
2249 rejects += current_file.close()
2250 current_file = None
2250 current_file = None
2251 afile, bfile, first_hunk, gp = values
2251 afile, bfile, first_hunk, gp = values
2252 if gp:
2252 if gp:
2253 gp.path = pstrip(gp.path)
2253 gp.path = pstrip(gp.path)
2254 if gp.oldpath:
2254 if gp.oldpath:
2255 gp.oldpath = pstrip(gp.oldpath)
2255 gp.oldpath = pstrip(gp.oldpath)
2256 else:
2256 else:
2257 gp = makepatchmeta(
2257 gp = makepatchmeta(
2258 backend, afile, bfile, first_hunk, strip, prefix
2258 backend, afile, bfile, first_hunk, strip, prefix
2259 )
2259 )
2260 if gp.op == b'RENAME':
2260 if gp.op == b'RENAME':
2261 backend.unlink(gp.oldpath)
2261 backend.unlink(gp.oldpath)
2262 if not first_hunk:
2262 if not first_hunk:
2263 if gp.op == b'DELETE':
2263 if gp.op == b'DELETE':
2264 backend.unlink(gp.path)
2264 backend.unlink(gp.path)
2265 continue
2265 continue
2266 data, mode = None, None
2266 data, mode = None, None
2267 if gp.op in (b'RENAME', b'COPY'):
2267 if gp.op in (b'RENAME', b'COPY'):
2268 data, mode = store.getfile(gp.oldpath)[:2]
2268 data, mode = store.getfile(gp.oldpath)[:2]
2269 if data is None:
2269 if data is None:
2270 # This means that the old path does not exist
2270 # This means that the old path does not exist
2271 raise PatchError(
2271 raise PatchError(
2272 _(b"source file '%s' does not exist") % gp.oldpath
2272 _(b"source file '%s' does not exist") % gp.oldpath
2273 )
2273 )
2274 if gp.mode:
2274 if gp.mode:
2275 mode = gp.mode
2275 mode = gp.mode
2276 if gp.op == b'ADD':
2276 if gp.op == b'ADD':
2277 # Added files without content have no hunk and
2277 # Added files without content have no hunk and
2278 # must be created
2278 # must be created
2279 data = b''
2279 data = b''
2280 if data or mode:
2280 if data or mode:
2281 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2281 if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists(
2282 gp.path
2282 gp.path
2283 ):
2283 ):
2284 raise PatchError(
2284 raise PatchError(
2285 _(
2285 _(
2286 b"cannot create %s: destination "
2286 b"cannot create %s: destination "
2287 b"already exists"
2287 b"already exists"
2288 )
2288 )
2289 % gp.path
2289 % gp.path
2290 )
2290 )
2291 backend.setfile(gp.path, data, mode, gp.oldpath)
2291 backend.setfile(gp.path, data, mode, gp.oldpath)
2292 continue
2292 continue
2293 try:
2293 try:
2294 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2294 current_file = patcher(ui, gp, backend, store, eolmode=eolmode)
2295 except PatchError as inst:
2295 except PatchError as inst:
2296 ui.warn(stringutil.forcebytestr(inst) + b'\n')
2296 ui.warn(stringutil.forcebytestr(inst) + b'\n')
2297 current_file = None
2297 current_file = None
2298 rejects += 1
2298 rejects += 1
2299 continue
2299 continue
2300 elif state == b'git':
2300 elif state == b'git':
2301 for gp in values:
2301 for gp in values:
2302 path = pstrip(gp.oldpath)
2302 path = pstrip(gp.oldpath)
2303 data, mode = backend.getfile(path)
2303 data, mode = backend.getfile(path)
2304 if data is None:
2304 if data is None:
2305 # The error ignored here will trigger a getfile()
2305 # The error ignored here will trigger a getfile()
2306 # error in a place more appropriate for error
2306 # error in a place more appropriate for error
2307 # handling, and will not interrupt the patching
2307 # handling, and will not interrupt the patching
2308 # process.
2308 # process.
2309 pass
2309 pass
2310 else:
2310 else:
2311 store.setfile(path, data, mode)
2311 store.setfile(path, data, mode)
2312 else:
2312 else:
2313 raise error.Abort(_(b'unsupported parser state: %s') % state)
2313 raise error.Abort(_(b'unsupported parser state: %s') % state)
2314
2314
2315 if current_file:
2315 if current_file:
2316 rejects += current_file.close()
2316 rejects += current_file.close()
2317
2317
2318 if rejects:
2318 if rejects:
2319 return -1
2319 return -1
2320 return err
2320 return err
2321
2321
2322
2322
2323 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2323 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity):
2324 """use <patcher> to apply <patchname> to the working directory.
2324 """use <patcher> to apply <patchname> to the working directory.
2325 returns whether patch was applied with fuzz factor."""
2325 returns whether patch was applied with fuzz factor."""
2326
2326
2327 fuzz = False
2327 fuzz = False
2328 args = []
2328 args = []
2329 cwd = repo.root
2329 cwd = repo.root
2330 if cwd:
2330 if cwd:
2331 args.append(b'-d %s' % procutil.shellquote(cwd))
2331 args.append(b'-d %s' % procutil.shellquote(cwd))
2332 cmd = b'%s %s -p%d < %s' % (
2332 cmd = b'%s %s -p%d < %s' % (
2333 patcher,
2333 patcher,
2334 b' '.join(args),
2334 b' '.join(args),
2335 strip,
2335 strip,
2336 procutil.shellquote(patchname),
2336 procutil.shellquote(patchname),
2337 )
2337 )
2338 ui.debug(b'Using external patch tool: %s\n' % cmd)
2338 ui.debug(b'Using external patch tool: %s\n' % cmd)
2339 fp = procutil.popen(cmd, b'rb')
2339 fp = procutil.popen(cmd, b'rb')
2340 try:
2340 try:
2341 for line in util.iterfile(fp):
2341 for line in util.iterfile(fp):
2342 line = line.rstrip()
2342 line = line.rstrip()
2343 ui.note(line + b'\n')
2343 ui.note(line + b'\n')
2344 if line.startswith(b'patching file '):
2344 if line.startswith(b'patching file '):
2345 pf = util.parsepatchoutput(line)
2345 pf = util.parsepatchoutput(line)
2346 printed_file = False
2346 printed_file = False
2347 files.add(pf)
2347 files.add(pf)
2348 elif line.find(b'with fuzz') >= 0:
2348 elif line.find(b'with fuzz') >= 0:
2349 fuzz = True
2349 fuzz = True
2350 if not printed_file:
2350 if not printed_file:
2351 ui.warn(pf + b'\n')
2351 ui.warn(pf + b'\n')
2352 printed_file = True
2352 printed_file = True
2353 ui.warn(line + b'\n')
2353 ui.warn(line + b'\n')
2354 elif line.find(b'saving rejects to file') >= 0:
2354 elif line.find(b'saving rejects to file') >= 0:
2355 ui.warn(line + b'\n')
2355 ui.warn(line + b'\n')
2356 elif line.find(b'FAILED') >= 0:
2356 elif line.find(b'FAILED') >= 0:
2357 if not printed_file:
2357 if not printed_file:
2358 ui.warn(pf + b'\n')
2358 ui.warn(pf + b'\n')
2359 printed_file = True
2359 printed_file = True
2360 ui.warn(line + b'\n')
2360 ui.warn(line + b'\n')
2361 finally:
2361 finally:
2362 if files:
2362 if files:
2363 scmutil.marktouched(repo, files, similarity)
2363 scmutil.marktouched(repo, files, similarity)
2364 code = fp.close()
2364 code = fp.close()
2365 if code:
2365 if code:
2366 raise PatchError(
2366 raise PatchError(
2367 _(b"patch command failed: %s") % procutil.explainexit(code)
2367 _(b"patch command failed: %s") % procutil.explainexit(code)
2368 )
2368 )
2369 return fuzz
2369 return fuzz
2370
2370
2371
2371
2372 def patchbackend(
2372 def patchbackend(
2373 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2373 ui, backend, patchobj, strip, prefix, files=None, eolmode=b'strict'
2374 ):
2374 ):
2375 if files is None:
2375 if files is None:
2376 files = set()
2376 files = set()
2377 if eolmode is None:
2377 if eolmode is None:
2378 eolmode = ui.config(b'patch', b'eol')
2378 eolmode = ui.config(b'patch', b'eol')
2379 if eolmode.lower() not in eolmodes:
2379 if eolmode.lower() not in eolmodes:
2380 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2380 raise error.Abort(_(b'unsupported line endings type: %s') % eolmode)
2381 eolmode = eolmode.lower()
2381 eolmode = eolmode.lower()
2382
2382
2383 store = filestore()
2383 store = filestore()
2384 try:
2384 try:
2385 fp = open(patchobj, b'rb')
2385 fp = open(patchobj, b'rb')
2386 except TypeError:
2386 except TypeError:
2387 fp = patchobj
2387 fp = patchobj
2388 try:
2388 try:
2389 ret = applydiff(
2389 ret = applydiff(
2390 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2390 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode
2391 )
2391 )
2392 finally:
2392 finally:
2393 if fp != patchobj:
2393 if fp != patchobj:
2394 fp.close()
2394 fp.close()
2395 files.update(backend.close())
2395 files.update(backend.close())
2396 store.close()
2396 store.close()
2397 if ret < 0:
2397 if ret < 0:
2398 raise PatchError(_(b'patch failed to apply'))
2398 raise PatchError(_(b'patch failed to apply'))
2399 return ret > 0
2399 return ret > 0
2400
2400
2401
2401
2402 def internalpatch(
2402 def internalpatch(
2403 ui,
2403 ui,
2404 repo,
2404 repo,
2405 patchobj,
2405 patchobj,
2406 strip,
2406 strip,
2407 prefix=b'',
2407 prefix=b'',
2408 files=None,
2408 files=None,
2409 eolmode=b'strict',
2409 eolmode=b'strict',
2410 similarity=0,
2410 similarity=0,
2411 ):
2411 ):
2412 """use builtin patch to apply <patchobj> to the working directory.
2412 """use builtin patch to apply <patchobj> to the working directory.
2413 returns whether patch was applied with fuzz factor."""
2413 returns whether patch was applied with fuzz factor."""
2414 backend = workingbackend(ui, repo, similarity)
2414 backend = workingbackend(ui, repo, similarity)
2415 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2415 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2416
2416
2417
2417
2418 def patchrepo(
2418 def patchrepo(
2419 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2419 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode=b'strict'
2420 ):
2420 ):
2421 backend = repobackend(ui, repo, ctx, store)
2421 backend = repobackend(ui, repo, ctx, store)
2422 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2422 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2423
2423
2424
2424
2425 def patch(
2425 def patch(
2426 ui,
2426 ui,
2427 repo,
2427 repo,
2428 patchname,
2428 patchname,
2429 strip=1,
2429 strip=1,
2430 prefix=b'',
2430 prefix=b'',
2431 files=None,
2431 files=None,
2432 eolmode=b'strict',
2432 eolmode=b'strict',
2433 similarity=0,
2433 similarity=0,
2434 ):
2434 ):
2435 """Apply <patchname> to the working directory.
2435 """Apply <patchname> to the working directory.
2436
2436
2437 'eolmode' specifies how end of lines should be handled. It can be:
2437 'eolmode' specifies how end of lines should be handled. It can be:
2438 - 'strict': inputs are read in binary mode, EOLs are preserved
2438 - 'strict': inputs are read in binary mode, EOLs are preserved
2439 - 'crlf': EOLs are ignored when patching and reset to CRLF
2439 - 'crlf': EOLs are ignored when patching and reset to CRLF
2440 - 'lf': EOLs are ignored when patching and reset to LF
2440 - 'lf': EOLs are ignored when patching and reset to LF
2441 - None: get it from user settings, default to 'strict'
2441 - None: get it from user settings, default to 'strict'
2442 'eolmode' is ignored when using an external patcher program.
2442 'eolmode' is ignored when using an external patcher program.
2443
2443
2444 Returns whether patch was applied with fuzz factor.
2444 Returns whether patch was applied with fuzz factor.
2445 """
2445 """
2446 patcher = ui.config(b'ui', b'patch')
2446 patcher = ui.config(b'ui', b'patch')
2447 if files is None:
2447 if files is None:
2448 files = set()
2448 files = set()
2449 if patcher:
2449 if patcher:
2450 return _externalpatch(
2450 return _externalpatch(
2451 ui, repo, patcher, patchname, strip, files, similarity
2451 ui, repo, patcher, patchname, strip, files, similarity
2452 )
2452 )
2453 return internalpatch(
2453 return internalpatch(
2454 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2454 ui, repo, patchname, strip, prefix, files, eolmode, similarity
2455 )
2455 )
2456
2456
2457
2457
2458 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2458 def changedfiles(ui, repo, patchpath, strip=1, prefix=b''):
2459 backend = fsbackend(ui, repo.root)
2459 backend = fsbackend(ui, repo.root)
2460 prefix = _canonprefix(repo, prefix)
2460 prefix = _canonprefix(repo, prefix)
2461 with open(patchpath, b'rb') as fp:
2461 with open(patchpath, b'rb') as fp:
2462 changed = set()
2462 changed = set()
2463 for state, values in iterhunks(fp):
2463 for state, values in iterhunks(fp):
2464 if state == b'file':
2464 if state == b'file':
2465 afile, bfile, first_hunk, gp = values
2465 afile, bfile, first_hunk, gp = values
2466 if gp:
2466 if gp:
2467 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2467 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2468 if gp.oldpath:
2468 if gp.oldpath:
2469 gp.oldpath = pathtransform(
2469 gp.oldpath = pathtransform(
2470 gp.oldpath, strip - 1, prefix
2470 gp.oldpath, strip - 1, prefix
2471 )[1]
2471 )[1]
2472 else:
2472 else:
2473 gp = makepatchmeta(
2473 gp = makepatchmeta(
2474 backend, afile, bfile, first_hunk, strip, prefix
2474 backend, afile, bfile, first_hunk, strip, prefix
2475 )
2475 )
2476 changed.add(gp.path)
2476 changed.add(gp.path)
2477 if gp.op == b'RENAME':
2477 if gp.op == b'RENAME':
2478 changed.add(gp.oldpath)
2478 changed.add(gp.oldpath)
2479 elif state not in (b'hunk', b'git'):
2479 elif state not in (b'hunk', b'git'):
2480 raise error.Abort(_(b'unsupported parser state: %s') % state)
2480 raise error.Abort(_(b'unsupported parser state: %s') % state)
2481 return changed
2481 return changed
2482
2482
2483
2483
2484 class GitDiffRequired(Exception):
2484 class GitDiffRequired(Exception):
2485 pass
2485 pass
2486
2486
2487
2487
2488 diffopts = diffutil.diffallopts
2488 diffopts = diffutil.diffallopts
2489 diffallopts = diffutil.diffallopts
2489 diffallopts = diffutil.diffallopts
2490 difffeatureopts = diffutil.difffeatureopts
2490 difffeatureopts = diffutil.difffeatureopts
2491
2491
2492
2492
2493 def diff(
2493 def diff(
2494 repo,
2494 repo,
2495 node1=None,
2495 node1=None,
2496 node2=None,
2496 node2=None,
2497 match=None,
2497 match=None,
2498 changes=None,
2498 changes=None,
2499 opts=None,
2499 opts=None,
2500 losedatafn=None,
2500 losedatafn=None,
2501 pathfn=None,
2501 pathfn=None,
2502 copy=None,
2502 copy=None,
2503 copysourcematch=None,
2503 copysourcematch=None,
2504 hunksfilterfn=None,
2504 hunksfilterfn=None,
2505 ):
2505 ):
2506 '''yields diff of changes to files between two nodes, or node and
2506 '''yields diff of changes to files between two nodes, or node and
2507 working directory.
2507 working directory.
2508
2508
2509 if node1 is None, use first dirstate parent instead.
2509 if node1 is None, use first dirstate parent instead.
2510 if node2 is None, compare node1 with working directory.
2510 if node2 is None, compare node1 with working directory.
2511
2511
2512 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2512 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2513 every time some change cannot be represented with the current
2513 every time some change cannot be represented with the current
2514 patch format. Return False to upgrade to git patch format, True to
2514 patch format. Return False to upgrade to git patch format, True to
2515 accept the loss or raise an exception to abort the diff. It is
2515 accept the loss or raise an exception to abort the diff. It is
2516 called with the name of current file being diffed as 'fn'. If set
2516 called with the name of current file being diffed as 'fn'. If set
2517 to None, patches will always be upgraded to git format when
2517 to None, patches will always be upgraded to git format when
2518 necessary.
2518 necessary.
2519
2519
2520 prefix is a filename prefix that is prepended to all filenames on
2520 prefix is a filename prefix that is prepended to all filenames on
2521 display (used for subrepos).
2521 display (used for subrepos).
2522
2522
2523 relroot, if not empty, must be normalized with a trailing /. Any match
2523 relroot, if not empty, must be normalized with a trailing /. Any match
2524 patterns that fall outside it will be ignored.
2524 patterns that fall outside it will be ignored.
2525
2525
2526 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2526 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2527 information.
2527 information.
2528
2528
2529 if copysourcematch is not None, then copy sources will be filtered by this
2529 if copysourcematch is not None, then copy sources will be filtered by this
2530 matcher
2530 matcher
2531
2531
2532 hunksfilterfn, if not None, should be a function taking a filectx and
2532 hunksfilterfn, if not None, should be a function taking a filectx and
2533 hunks generator that may yield filtered hunks.
2533 hunks generator that may yield filtered hunks.
2534 '''
2534 '''
2535 if not node1 and not node2:
2535 if not node1 and not node2:
2536 node1 = repo.dirstate.p1()
2536 node1 = repo.dirstate.p1()
2537
2537
2538 ctx1 = repo[node1]
2538 ctx1 = repo[node1]
2539 ctx2 = repo[node2]
2539 ctx2 = repo[node2]
2540
2540
2541 for fctx1, fctx2, hdr, hunks in diffhunks(
2541 for fctx1, fctx2, hdr, hunks in diffhunks(
2542 repo,
2542 repo,
2543 ctx1=ctx1,
2543 ctx1=ctx1,
2544 ctx2=ctx2,
2544 ctx2=ctx2,
2545 match=match,
2545 match=match,
2546 changes=changes,
2546 changes=changes,
2547 opts=opts,
2547 opts=opts,
2548 losedatafn=losedatafn,
2548 losedatafn=losedatafn,
2549 pathfn=pathfn,
2549 pathfn=pathfn,
2550 copy=copy,
2550 copy=copy,
2551 copysourcematch=copysourcematch,
2551 copysourcematch=copysourcematch,
2552 ):
2552 ):
2553 if hunksfilterfn is not None:
2553 if hunksfilterfn is not None:
2554 # If the file has been removed, fctx2 is None; but this should
2554 # If the file has been removed, fctx2 is None; but this should
2555 # not occur here since we catch removed files early in
2555 # not occur here since we catch removed files early in
2556 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2556 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2557 assert (
2557 assert (
2558 fctx2 is not None
2558 fctx2 is not None
2559 ), b'fctx2 unexpectly None in diff hunks filtering'
2559 ), b'fctx2 unexpectly None in diff hunks filtering'
2560 hunks = hunksfilterfn(fctx2, hunks)
2560 hunks = hunksfilterfn(fctx2, hunks)
2561 text = b''.join(b''.join(hlines) for hrange, hlines in hunks)
2561 text = b''.join(b''.join(hlines) for hrange, hlines in hunks)
2562 if hdr and (text or len(hdr) > 1):
2562 if hdr and (text or len(hdr) > 1):
2563 yield b'\n'.join(hdr) + b'\n'
2563 yield b'\n'.join(hdr) + b'\n'
2564 if text:
2564 if text:
2565 yield text
2565 yield text
2566
2566
2567
2567
2568 def diffhunks(
2568 def diffhunks(
2569 repo,
2569 repo,
2570 ctx1,
2570 ctx1,
2571 ctx2,
2571 ctx2,
2572 match=None,
2572 match=None,
2573 changes=None,
2573 changes=None,
2574 opts=None,
2574 opts=None,
2575 losedatafn=None,
2575 losedatafn=None,
2576 pathfn=None,
2576 pathfn=None,
2577 copy=None,
2577 copy=None,
2578 copysourcematch=None,
2578 copysourcematch=None,
2579 ):
2579 ):
2580 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2580 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2581 where `header` is a list of diff headers and `hunks` is an iterable of
2581 where `header` is a list of diff headers and `hunks` is an iterable of
2582 (`hunkrange`, `hunklines`) tuples.
2582 (`hunkrange`, `hunklines`) tuples.
2583
2583
2584 See diff() for the meaning of parameters.
2584 See diff() for the meaning of parameters.
2585 """
2585 """
2586
2586
2587 if opts is None:
2587 if opts is None:
2588 opts = mdiff.defaultopts
2588 opts = mdiff.defaultopts
2589
2589
2590 def lrugetfilectx():
2590 def lrugetfilectx():
2591 cache = {}
2591 cache = {}
2592 order = collections.deque()
2592 order = collections.deque()
2593
2593
2594 def getfilectx(f, ctx):
2594 def getfilectx(f, ctx):
2595 fctx = ctx.filectx(f, filelog=cache.get(f))
2595 fctx = ctx.filectx(f, filelog=cache.get(f))
2596 if f not in cache:
2596 if f not in cache:
2597 if len(cache) > 20:
2597 if len(cache) > 20:
2598 del cache[order.popleft()]
2598 del cache[order.popleft()]
2599 cache[f] = fctx.filelog()
2599 cache[f] = fctx.filelog()
2600 else:
2600 else:
2601 order.remove(f)
2601 order.remove(f)
2602 order.append(f)
2602 order.append(f)
2603 return fctx
2603 return fctx
2604
2604
2605 return getfilectx
2605 return getfilectx
2606
2606
2607 getfilectx = lrugetfilectx()
2607 getfilectx = lrugetfilectx()
2608
2608
2609 if not changes:
2609 if not changes:
2610 changes = ctx1.status(ctx2, match=match)
2610 changes = ctx1.status(ctx2, match=match)
2611 if isinstance(changes, list):
2611 if isinstance(changes, list):
2612 modified, added, removed = changes[:3]
2612 modified, added, removed = changes[:3]
2613 else:
2613 else:
2614 modified, added, removed = (
2614 modified, added, removed = (
2615 changes.modified,
2615 changes.modified,
2616 changes.added,
2616 changes.added,
2617 changes.removed,
2617 changes.removed,
2618 )
2618 )
2619
2619
2620 if not modified and not added and not removed:
2620 if not modified and not added and not removed:
2621 return []
2621 return []
2622
2622
2623 if repo.ui.debugflag:
2623 if repo.ui.debugflag:
2624 hexfunc = hex
2624 hexfunc = hex
2625 else:
2625 else:
2626 hexfunc = short
2626 hexfunc = short
2627 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2627 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2628
2628
2629 if copy is None:
2629 if copy is None:
2630 copy = {}
2630 copy = {}
2631 if opts.git or opts.upgrade:
2631 if opts.git or opts.upgrade:
2632 copy = copies.pathcopies(ctx1, ctx2, match=match)
2632 copy = copies.pathcopies(ctx1, ctx2, match=match)
2633
2633
2634 if copysourcematch:
2634 if copysourcematch:
2635 # filter out copies where source side isn't inside the matcher
2635 # filter out copies where source side isn't inside the matcher
2636 # (copies.pathcopies() already filtered out the destination)
2636 # (copies.pathcopies() already filtered out the destination)
2637 copy = {
2637 copy = {
2638 dst: src
2638 dst: src
2639 for dst, src in pycompat.iteritems(copy)
2639 for dst, src in pycompat.iteritems(copy)
2640 if copysourcematch(src)
2640 if copysourcematch(src)
2641 }
2641 }
2642
2642
2643 modifiedset = set(modified)
2643 modifiedset = set(modified)
2644 addedset = set(added)
2644 addedset = set(added)
2645 removedset = set(removed)
2645 removedset = set(removed)
2646 for f in modified:
2646 for f in modified:
2647 if f not in ctx1:
2647 if f not in ctx1:
2648 # Fix up added, since merged-in additions appear as
2648 # Fix up added, since merged-in additions appear as
2649 # modifications during merges
2649 # modifications during merges
2650 modifiedset.remove(f)
2650 modifiedset.remove(f)
2651 addedset.add(f)
2651 addedset.add(f)
2652 for f in removed:
2652 for f in removed:
2653 if f not in ctx1:
2653 if f not in ctx1:
2654 # Merged-in additions that are then removed are reported as removed.
2654 # Merged-in additions that are then removed are reported as removed.
2655 # They are not in ctx1, so We don't want to show them in the diff.
2655 # They are not in ctx1, so We don't want to show them in the diff.
2656 removedset.remove(f)
2656 removedset.remove(f)
2657 modified = sorted(modifiedset)
2657 modified = sorted(modifiedset)
2658 added = sorted(addedset)
2658 added = sorted(addedset)
2659 removed = sorted(removedset)
2659 removed = sorted(removedset)
2660 for dst, src in list(copy.items()):
2660 for dst, src in list(copy.items()):
2661 if src not in ctx1:
2661 if src not in ctx1:
2662 # Files merged in during a merge and then copied/renamed are
2662 # Files merged in during a merge and then copied/renamed are
2663 # reported as copies. We want to show them in the diff as additions.
2663 # reported as copies. We want to show them in the diff as additions.
2664 del copy[dst]
2664 del copy[dst]
2665
2665
2666 prefetchmatch = scmutil.matchfiles(
2666 prefetchmatch = scmutil.matchfiles(
2667 repo, list(modifiedset | addedset | removedset)
2667 repo, list(modifiedset | addedset | removedset)
2668 )
2668 )
2669 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2669 revmatches = [
2670 (ctx1.rev(), prefetchmatch),
2671 (ctx2.rev(), prefetchmatch),
2672 ]
2673 scmutil.prefetchfiles(repo, revmatches)
2670
2674
2671 def difffn(opts, losedata):
2675 def difffn(opts, losedata):
2672 return trydiff(
2676 return trydiff(
2673 repo,
2677 repo,
2674 revs,
2678 revs,
2675 ctx1,
2679 ctx1,
2676 ctx2,
2680 ctx2,
2677 modified,
2681 modified,
2678 added,
2682 added,
2679 removed,
2683 removed,
2680 copy,
2684 copy,
2681 getfilectx,
2685 getfilectx,
2682 opts,
2686 opts,
2683 losedata,
2687 losedata,
2684 pathfn,
2688 pathfn,
2685 )
2689 )
2686
2690
2687 if opts.upgrade and not opts.git:
2691 if opts.upgrade and not opts.git:
2688 try:
2692 try:
2689
2693
2690 def losedata(fn):
2694 def losedata(fn):
2691 if not losedatafn or not losedatafn(fn=fn):
2695 if not losedatafn or not losedatafn(fn=fn):
2692 raise GitDiffRequired
2696 raise GitDiffRequired
2693
2697
2694 # Buffer the whole output until we are sure it can be generated
2698 # Buffer the whole output until we are sure it can be generated
2695 return list(difffn(opts.copy(git=False), losedata))
2699 return list(difffn(opts.copy(git=False), losedata))
2696 except GitDiffRequired:
2700 except GitDiffRequired:
2697 return difffn(opts.copy(git=True), None)
2701 return difffn(opts.copy(git=True), None)
2698 else:
2702 else:
2699 return difffn(opts, None)
2703 return difffn(opts, None)
2700
2704
2701
2705
2702 def diffsinglehunk(hunklines):
2706 def diffsinglehunk(hunklines):
2703 """yield tokens for a list of lines in a single hunk"""
2707 """yield tokens for a list of lines in a single hunk"""
2704 for line in hunklines:
2708 for line in hunklines:
2705 # chomp
2709 # chomp
2706 chompline = line.rstrip(b'\r\n')
2710 chompline = line.rstrip(b'\r\n')
2707 # highlight tabs and trailing whitespace
2711 # highlight tabs and trailing whitespace
2708 stripline = chompline.rstrip()
2712 stripline = chompline.rstrip()
2709 if line.startswith(b'-'):
2713 if line.startswith(b'-'):
2710 label = b'diff.deleted'
2714 label = b'diff.deleted'
2711 elif line.startswith(b'+'):
2715 elif line.startswith(b'+'):
2712 label = b'diff.inserted'
2716 label = b'diff.inserted'
2713 else:
2717 else:
2714 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2718 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2715 for token in tabsplitter.findall(stripline):
2719 for token in tabsplitter.findall(stripline):
2716 if token.startswith(b'\t'):
2720 if token.startswith(b'\t'):
2717 yield (token, b'diff.tab')
2721 yield (token, b'diff.tab')
2718 else:
2722 else:
2719 yield (token, label)
2723 yield (token, label)
2720
2724
2721 if chompline != stripline:
2725 if chompline != stripline:
2722 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2726 yield (chompline[len(stripline) :], b'diff.trailingwhitespace')
2723 if chompline != line:
2727 if chompline != line:
2724 yield (line[len(chompline) :], b'')
2728 yield (line[len(chompline) :], b'')
2725
2729
2726
2730
2727 def diffsinglehunkinline(hunklines):
2731 def diffsinglehunkinline(hunklines):
2728 """yield tokens for a list of lines in a single hunk, with inline colors"""
2732 """yield tokens for a list of lines in a single hunk, with inline colors"""
2729 # prepare deleted, and inserted content
2733 # prepare deleted, and inserted content
2730 a = b''
2734 a = b''
2731 b = b''
2735 b = b''
2732 for line in hunklines:
2736 for line in hunklines:
2733 if line[0:1] == b'-':
2737 if line[0:1] == b'-':
2734 a += line[1:]
2738 a += line[1:]
2735 elif line[0:1] == b'+':
2739 elif line[0:1] == b'+':
2736 b += line[1:]
2740 b += line[1:]
2737 else:
2741 else:
2738 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2742 raise error.ProgrammingError(b'unexpected hunk line: %s' % line)
2739 # fast path: if either side is empty, use diffsinglehunk
2743 # fast path: if either side is empty, use diffsinglehunk
2740 if not a or not b:
2744 if not a or not b:
2741 for t in diffsinglehunk(hunklines):
2745 for t in diffsinglehunk(hunklines):
2742 yield t
2746 yield t
2743 return
2747 return
2744 # re-split the content into words
2748 # re-split the content into words
2745 al = wordsplitter.findall(a)
2749 al = wordsplitter.findall(a)
2746 bl = wordsplitter.findall(b)
2750 bl = wordsplitter.findall(b)
2747 # re-arrange the words to lines since the diff algorithm is line-based
2751 # re-arrange the words to lines since the diff algorithm is line-based
2748 aln = [s if s == b'\n' else s + b'\n' for s in al]
2752 aln = [s if s == b'\n' else s + b'\n' for s in al]
2749 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2753 bln = [s if s == b'\n' else s + b'\n' for s in bl]
2750 an = b''.join(aln)
2754 an = b''.join(aln)
2751 bn = b''.join(bln)
2755 bn = b''.join(bln)
2752 # run the diff algorithm, prepare atokens and btokens
2756 # run the diff algorithm, prepare atokens and btokens
2753 atokens = []
2757 atokens = []
2754 btokens = []
2758 btokens = []
2755 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2759 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2756 for (a1, a2, b1, b2), btype in blocks:
2760 for (a1, a2, b1, b2), btype in blocks:
2757 changed = btype == b'!'
2761 changed = btype == b'!'
2758 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2762 for token in mdiff.splitnewlines(b''.join(al[a1:a2])):
2759 atokens.append((changed, token))
2763 atokens.append((changed, token))
2760 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2764 for token in mdiff.splitnewlines(b''.join(bl[b1:b2])):
2761 btokens.append((changed, token))
2765 btokens.append((changed, token))
2762
2766
2763 # yield deleted tokens, then inserted ones
2767 # yield deleted tokens, then inserted ones
2764 for prefix, label, tokens in [
2768 for prefix, label, tokens in [
2765 (b'-', b'diff.deleted', atokens),
2769 (b'-', b'diff.deleted', atokens),
2766 (b'+', b'diff.inserted', btokens),
2770 (b'+', b'diff.inserted', btokens),
2767 ]:
2771 ]:
2768 nextisnewline = True
2772 nextisnewline = True
2769 for changed, token in tokens:
2773 for changed, token in tokens:
2770 if nextisnewline:
2774 if nextisnewline:
2771 yield (prefix, label)
2775 yield (prefix, label)
2772 nextisnewline = False
2776 nextisnewline = False
2773 # special handling line end
2777 # special handling line end
2774 isendofline = token.endswith(b'\n')
2778 isendofline = token.endswith(b'\n')
2775 if isendofline:
2779 if isendofline:
2776 chomp = token[:-1] # chomp
2780 chomp = token[:-1] # chomp
2777 if chomp.endswith(b'\r'):
2781 if chomp.endswith(b'\r'):
2778 chomp = chomp[:-1]
2782 chomp = chomp[:-1]
2779 endofline = token[len(chomp) :]
2783 endofline = token[len(chomp) :]
2780 token = chomp.rstrip() # detect spaces at the end
2784 token = chomp.rstrip() # detect spaces at the end
2781 endspaces = chomp[len(token) :]
2785 endspaces = chomp[len(token) :]
2782 # scan tabs
2786 # scan tabs
2783 for maybetab in tabsplitter.findall(token):
2787 for maybetab in tabsplitter.findall(token):
2784 if b'\t' == maybetab[0:1]:
2788 if b'\t' == maybetab[0:1]:
2785 currentlabel = b'diff.tab'
2789 currentlabel = b'diff.tab'
2786 else:
2790 else:
2787 if changed:
2791 if changed:
2788 currentlabel = label + b'.changed'
2792 currentlabel = label + b'.changed'
2789 else:
2793 else:
2790 currentlabel = label + b'.unchanged'
2794 currentlabel = label + b'.unchanged'
2791 yield (maybetab, currentlabel)
2795 yield (maybetab, currentlabel)
2792 if isendofline:
2796 if isendofline:
2793 if endspaces:
2797 if endspaces:
2794 yield (endspaces, b'diff.trailingwhitespace')
2798 yield (endspaces, b'diff.trailingwhitespace')
2795 yield (endofline, b'')
2799 yield (endofline, b'')
2796 nextisnewline = True
2800 nextisnewline = True
2797
2801
2798
2802
2799 def difflabel(func, *args, **kw):
2803 def difflabel(func, *args, **kw):
2800 '''yields 2-tuples of (output, label) based on the output of func()'''
2804 '''yields 2-tuples of (output, label) based on the output of func()'''
2801 if kw.get('opts') and kw['opts'].worddiff:
2805 if kw.get('opts') and kw['opts'].worddiff:
2802 dodiffhunk = diffsinglehunkinline
2806 dodiffhunk = diffsinglehunkinline
2803 else:
2807 else:
2804 dodiffhunk = diffsinglehunk
2808 dodiffhunk = diffsinglehunk
2805 headprefixes = [
2809 headprefixes = [
2806 (b'diff', b'diff.diffline'),
2810 (b'diff', b'diff.diffline'),
2807 (b'copy', b'diff.extended'),
2811 (b'copy', b'diff.extended'),
2808 (b'rename', b'diff.extended'),
2812 (b'rename', b'diff.extended'),
2809 (b'old', b'diff.extended'),
2813 (b'old', b'diff.extended'),
2810 (b'new', b'diff.extended'),
2814 (b'new', b'diff.extended'),
2811 (b'deleted', b'diff.extended'),
2815 (b'deleted', b'diff.extended'),
2812 (b'index', b'diff.extended'),
2816 (b'index', b'diff.extended'),
2813 (b'similarity', b'diff.extended'),
2817 (b'similarity', b'diff.extended'),
2814 (b'---', b'diff.file_a'),
2818 (b'---', b'diff.file_a'),
2815 (b'+++', b'diff.file_b'),
2819 (b'+++', b'diff.file_b'),
2816 ]
2820 ]
2817 textprefixes = [
2821 textprefixes = [
2818 (b'@', b'diff.hunk'),
2822 (b'@', b'diff.hunk'),
2819 # - and + are handled by diffsinglehunk
2823 # - and + are handled by diffsinglehunk
2820 ]
2824 ]
2821 head = False
2825 head = False
2822
2826
2823 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2827 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2824 hunkbuffer = []
2828 hunkbuffer = []
2825
2829
2826 def consumehunkbuffer():
2830 def consumehunkbuffer():
2827 if hunkbuffer:
2831 if hunkbuffer:
2828 for token in dodiffhunk(hunkbuffer):
2832 for token in dodiffhunk(hunkbuffer):
2829 yield token
2833 yield token
2830 hunkbuffer[:] = []
2834 hunkbuffer[:] = []
2831
2835
2832 for chunk in func(*args, **kw):
2836 for chunk in func(*args, **kw):
2833 lines = chunk.split(b'\n')
2837 lines = chunk.split(b'\n')
2834 linecount = len(lines)
2838 linecount = len(lines)
2835 for i, line in enumerate(lines):
2839 for i, line in enumerate(lines):
2836 if head:
2840 if head:
2837 if line.startswith(b'@'):
2841 if line.startswith(b'@'):
2838 head = False
2842 head = False
2839 else:
2843 else:
2840 if line and not line.startswith(
2844 if line and not line.startswith(
2841 (b' ', b'+', b'-', b'@', b'\\')
2845 (b' ', b'+', b'-', b'@', b'\\')
2842 ):
2846 ):
2843 head = True
2847 head = True
2844 diffline = False
2848 diffline = False
2845 if not head and line and line.startswith((b'+', b'-')):
2849 if not head and line and line.startswith((b'+', b'-')):
2846 diffline = True
2850 diffline = True
2847
2851
2848 prefixes = textprefixes
2852 prefixes = textprefixes
2849 if head:
2853 if head:
2850 prefixes = headprefixes
2854 prefixes = headprefixes
2851 if diffline:
2855 if diffline:
2852 # buffered
2856 # buffered
2853 bufferedline = line
2857 bufferedline = line
2854 if i + 1 < linecount:
2858 if i + 1 < linecount:
2855 bufferedline += b"\n"
2859 bufferedline += b"\n"
2856 hunkbuffer.append(bufferedline)
2860 hunkbuffer.append(bufferedline)
2857 else:
2861 else:
2858 # unbuffered
2862 # unbuffered
2859 for token in consumehunkbuffer():
2863 for token in consumehunkbuffer():
2860 yield token
2864 yield token
2861 stripline = line.rstrip()
2865 stripline = line.rstrip()
2862 for prefix, label in prefixes:
2866 for prefix, label in prefixes:
2863 if stripline.startswith(prefix):
2867 if stripline.startswith(prefix):
2864 yield (stripline, label)
2868 yield (stripline, label)
2865 if line != stripline:
2869 if line != stripline:
2866 yield (
2870 yield (
2867 line[len(stripline) :],
2871 line[len(stripline) :],
2868 b'diff.trailingwhitespace',
2872 b'diff.trailingwhitespace',
2869 )
2873 )
2870 break
2874 break
2871 else:
2875 else:
2872 yield (line, b'')
2876 yield (line, b'')
2873 if i + 1 < linecount:
2877 if i + 1 < linecount:
2874 yield (b'\n', b'')
2878 yield (b'\n', b'')
2875 for token in consumehunkbuffer():
2879 for token in consumehunkbuffer():
2876 yield token
2880 yield token
2877
2881
2878
2882
2879 def diffui(*args, **kw):
2883 def diffui(*args, **kw):
2880 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2884 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2881 return difflabel(diff, *args, **kw)
2885 return difflabel(diff, *args, **kw)
2882
2886
2883
2887
2884 def _filepairs(modified, added, removed, copy, opts):
2888 def _filepairs(modified, added, removed, copy, opts):
2885 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2889 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2886 before and f2 is the the name after. For added files, f1 will be None,
2890 before and f2 is the the name after. For added files, f1 will be None,
2887 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2891 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2888 or 'rename' (the latter two only if opts.git is set).'''
2892 or 'rename' (the latter two only if opts.git is set).'''
2889 gone = set()
2893 gone = set()
2890
2894
2891 copyto = {v: k for k, v in copy.items()}
2895 copyto = {v: k for k, v in copy.items()}
2892
2896
2893 addedset, removedset = set(added), set(removed)
2897 addedset, removedset = set(added), set(removed)
2894
2898
2895 for f in sorted(modified + added + removed):
2899 for f in sorted(modified + added + removed):
2896 copyop = None
2900 copyop = None
2897 f1, f2 = f, f
2901 f1, f2 = f, f
2898 if f in addedset:
2902 if f in addedset:
2899 f1 = None
2903 f1 = None
2900 if f in copy:
2904 if f in copy:
2901 if opts.git:
2905 if opts.git:
2902 f1 = copy[f]
2906 f1 = copy[f]
2903 if f1 in removedset and f1 not in gone:
2907 if f1 in removedset and f1 not in gone:
2904 copyop = b'rename'
2908 copyop = b'rename'
2905 gone.add(f1)
2909 gone.add(f1)
2906 else:
2910 else:
2907 copyop = b'copy'
2911 copyop = b'copy'
2908 elif f in removedset:
2912 elif f in removedset:
2909 f2 = None
2913 f2 = None
2910 if opts.git:
2914 if opts.git:
2911 # have we already reported a copy above?
2915 # have we already reported a copy above?
2912 if (
2916 if (
2913 f in copyto
2917 f in copyto
2914 and copyto[f] in addedset
2918 and copyto[f] in addedset
2915 and copy[copyto[f]] == f
2919 and copy[copyto[f]] == f
2916 ):
2920 ):
2917 continue
2921 continue
2918 yield f1, f2, copyop
2922 yield f1, f2, copyop
2919
2923
2920
2924
2921 def trydiff(
2925 def trydiff(
2922 repo,
2926 repo,
2923 revs,
2927 revs,
2924 ctx1,
2928 ctx1,
2925 ctx2,
2929 ctx2,
2926 modified,
2930 modified,
2927 added,
2931 added,
2928 removed,
2932 removed,
2929 copy,
2933 copy,
2930 getfilectx,
2934 getfilectx,
2931 opts,
2935 opts,
2932 losedatafn,
2936 losedatafn,
2933 pathfn,
2937 pathfn,
2934 ):
2938 ):
2935 '''given input data, generate a diff and yield it in blocks
2939 '''given input data, generate a diff and yield it in blocks
2936
2940
2937 If generating a diff would lose data like flags or binary data and
2941 If generating a diff would lose data like flags or binary data and
2938 losedatafn is not None, it will be called.
2942 losedatafn is not None, it will be called.
2939
2943
2940 pathfn is applied to every path in the diff output.
2944 pathfn is applied to every path in the diff output.
2941 '''
2945 '''
2942
2946
2943 def gitindex(text):
2947 def gitindex(text):
2944 if not text:
2948 if not text:
2945 text = b""
2949 text = b""
2946 l = len(text)
2950 l = len(text)
2947 s = hashutil.sha1(b'blob %d\0' % l)
2951 s = hashutil.sha1(b'blob %d\0' % l)
2948 s.update(text)
2952 s.update(text)
2949 return hex(s.digest())
2953 return hex(s.digest())
2950
2954
2951 if opts.noprefix:
2955 if opts.noprefix:
2952 aprefix = bprefix = b''
2956 aprefix = bprefix = b''
2953 else:
2957 else:
2954 aprefix = b'a/'
2958 aprefix = b'a/'
2955 bprefix = b'b/'
2959 bprefix = b'b/'
2956
2960
2957 def diffline(f, revs):
2961 def diffline(f, revs):
2958 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2962 revinfo = b' '.join([b"-r %s" % rev for rev in revs])
2959 return b'diff %s %s' % (revinfo, f)
2963 return b'diff %s %s' % (revinfo, f)
2960
2964
2961 def isempty(fctx):
2965 def isempty(fctx):
2962 return fctx is None or fctx.size() == 0
2966 return fctx is None or fctx.size() == 0
2963
2967
2964 date1 = dateutil.datestr(ctx1.date())
2968 date1 = dateutil.datestr(ctx1.date())
2965 date2 = dateutil.datestr(ctx2.date())
2969 date2 = dateutil.datestr(ctx2.date())
2966
2970
2967 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2971 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
2968
2972
2969 if not pathfn:
2973 if not pathfn:
2970 pathfn = lambda f: f
2974 pathfn = lambda f: f
2971
2975
2972 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2976 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2973 content1 = None
2977 content1 = None
2974 content2 = None
2978 content2 = None
2975 fctx1 = None
2979 fctx1 = None
2976 fctx2 = None
2980 fctx2 = None
2977 flag1 = None
2981 flag1 = None
2978 flag2 = None
2982 flag2 = None
2979 if f1:
2983 if f1:
2980 fctx1 = getfilectx(f1, ctx1)
2984 fctx1 = getfilectx(f1, ctx1)
2981 if opts.git or losedatafn:
2985 if opts.git or losedatafn:
2982 flag1 = ctx1.flags(f1)
2986 flag1 = ctx1.flags(f1)
2983 if f2:
2987 if f2:
2984 fctx2 = getfilectx(f2, ctx2)
2988 fctx2 = getfilectx(f2, ctx2)
2985 if opts.git or losedatafn:
2989 if opts.git or losedatafn:
2986 flag2 = ctx2.flags(f2)
2990 flag2 = ctx2.flags(f2)
2987 # if binary is True, output "summary" or "base85", but not "text diff"
2991 # if binary is True, output "summary" or "base85", but not "text diff"
2988 if opts.text:
2992 if opts.text:
2989 binary = False
2993 binary = False
2990 else:
2994 else:
2991 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2995 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2992
2996
2993 if losedatafn and not opts.git:
2997 if losedatafn and not opts.git:
2994 if (
2998 if (
2995 binary
2999 binary
2996 or
3000 or
2997 # copy/rename
3001 # copy/rename
2998 f2 in copy
3002 f2 in copy
2999 or
3003 or
3000 # empty file creation
3004 # empty file creation
3001 (not f1 and isempty(fctx2))
3005 (not f1 and isempty(fctx2))
3002 or
3006 or
3003 # empty file deletion
3007 # empty file deletion
3004 (isempty(fctx1) and not f2)
3008 (isempty(fctx1) and not f2)
3005 or
3009 or
3006 # create with flags
3010 # create with flags
3007 (not f1 and flag2)
3011 (not f1 and flag2)
3008 or
3012 or
3009 # change flags
3013 # change flags
3010 (f1 and f2 and flag1 != flag2)
3014 (f1 and f2 and flag1 != flag2)
3011 ):
3015 ):
3012 losedatafn(f2 or f1)
3016 losedatafn(f2 or f1)
3013
3017
3014 path1 = pathfn(f1 or f2)
3018 path1 = pathfn(f1 or f2)
3015 path2 = pathfn(f2 or f1)
3019 path2 = pathfn(f2 or f1)
3016 header = []
3020 header = []
3017 if opts.git:
3021 if opts.git:
3018 header.append(
3022 header.append(
3019 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3023 b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
3020 )
3024 )
3021 if not f1: # added
3025 if not f1: # added
3022 header.append(b'new file mode %s' % gitmode[flag2])
3026 header.append(b'new file mode %s' % gitmode[flag2])
3023 elif not f2: # removed
3027 elif not f2: # removed
3024 header.append(b'deleted file mode %s' % gitmode[flag1])
3028 header.append(b'deleted file mode %s' % gitmode[flag1])
3025 else: # modified/copied/renamed
3029 else: # modified/copied/renamed
3026 mode1, mode2 = gitmode[flag1], gitmode[flag2]
3030 mode1, mode2 = gitmode[flag1], gitmode[flag2]
3027 if mode1 != mode2:
3031 if mode1 != mode2:
3028 header.append(b'old mode %s' % mode1)
3032 header.append(b'old mode %s' % mode1)
3029 header.append(b'new mode %s' % mode2)
3033 header.append(b'new mode %s' % mode2)
3030 if copyop is not None:
3034 if copyop is not None:
3031 if opts.showsimilarity:
3035 if opts.showsimilarity:
3032 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3036 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
3033 header.append(b'similarity index %d%%' % sim)
3037 header.append(b'similarity index %d%%' % sim)
3034 header.append(b'%s from %s' % (copyop, path1))
3038 header.append(b'%s from %s' % (copyop, path1))
3035 header.append(b'%s to %s' % (copyop, path2))
3039 header.append(b'%s to %s' % (copyop, path2))
3036 elif revs:
3040 elif revs:
3037 header.append(diffline(path1, revs))
3041 header.append(diffline(path1, revs))
3038
3042
3039 # fctx.is | diffopts | what to | is fctx.data()
3043 # fctx.is | diffopts | what to | is fctx.data()
3040 # binary() | text nobinary git index | output? | outputted?
3044 # binary() | text nobinary git index | output? | outputted?
3041 # ------------------------------------|----------------------------
3045 # ------------------------------------|----------------------------
3042 # yes | no no no * | summary | no
3046 # yes | no no no * | summary | no
3043 # yes | no no yes * | base85 | yes
3047 # yes | no no yes * | base85 | yes
3044 # yes | no yes no * | summary | no
3048 # yes | no yes no * | summary | no
3045 # yes | no yes yes 0 | summary | no
3049 # yes | no yes yes 0 | summary | no
3046 # yes | no yes yes >0 | summary | semi [1]
3050 # yes | no yes yes >0 | summary | semi [1]
3047 # yes | yes * * * | text diff | yes
3051 # yes | yes * * * | text diff | yes
3048 # no | * * * * | text diff | yes
3052 # no | * * * * | text diff | yes
3049 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3053 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
3050 if binary and (
3054 if binary and (
3051 not opts.git or (opts.git and opts.nobinary and not opts.index)
3055 not opts.git or (opts.git and opts.nobinary and not opts.index)
3052 ):
3056 ):
3053 # fast path: no binary content will be displayed, content1 and
3057 # fast path: no binary content will be displayed, content1 and
3054 # content2 are only used for equivalent test. cmp() could have a
3058 # content2 are only used for equivalent test. cmp() could have a
3055 # fast path.
3059 # fast path.
3056 if fctx1 is not None:
3060 if fctx1 is not None:
3057 content1 = b'\0'
3061 content1 = b'\0'
3058 if fctx2 is not None:
3062 if fctx2 is not None:
3059 if fctx1 is not None and not fctx1.cmp(fctx2):
3063 if fctx1 is not None and not fctx1.cmp(fctx2):
3060 content2 = b'\0' # not different
3064 content2 = b'\0' # not different
3061 else:
3065 else:
3062 content2 = b'\0\0'
3066 content2 = b'\0\0'
3063 else:
3067 else:
3064 # normal path: load contents
3068 # normal path: load contents
3065 if fctx1 is not None:
3069 if fctx1 is not None:
3066 content1 = fctx1.data()
3070 content1 = fctx1.data()
3067 if fctx2 is not None:
3071 if fctx2 is not None:
3068 content2 = fctx2.data()
3072 content2 = fctx2.data()
3069
3073
3070 if binary and opts.git and not opts.nobinary:
3074 if binary and opts.git and not opts.nobinary:
3071 text = mdiff.b85diff(content1, content2)
3075 text = mdiff.b85diff(content1, content2)
3072 if text:
3076 if text:
3073 header.append(
3077 header.append(
3074 b'index %s..%s' % (gitindex(content1), gitindex(content2))
3078 b'index %s..%s' % (gitindex(content1), gitindex(content2))
3075 )
3079 )
3076 hunks = ((None, [text]),)
3080 hunks = ((None, [text]),)
3077 else:
3081 else:
3078 if opts.git and opts.index > 0:
3082 if opts.git and opts.index > 0:
3079 flag = flag1
3083 flag = flag1
3080 if flag is None:
3084 if flag is None:
3081 flag = flag2
3085 flag = flag2
3082 header.append(
3086 header.append(
3083 b'index %s..%s %s'
3087 b'index %s..%s %s'
3084 % (
3088 % (
3085 gitindex(content1)[0 : opts.index],
3089 gitindex(content1)[0 : opts.index],
3086 gitindex(content2)[0 : opts.index],
3090 gitindex(content2)[0 : opts.index],
3087 gitmode[flag],
3091 gitmode[flag],
3088 )
3092 )
3089 )
3093 )
3090
3094
3091 uheaders, hunks = mdiff.unidiff(
3095 uheaders, hunks = mdiff.unidiff(
3092 content1,
3096 content1,
3093 date1,
3097 date1,
3094 content2,
3098 content2,
3095 date2,
3099 date2,
3096 path1,
3100 path1,
3097 path2,
3101 path2,
3098 binary=binary,
3102 binary=binary,
3099 opts=opts,
3103 opts=opts,
3100 )
3104 )
3101 header.extend(uheaders)
3105 header.extend(uheaders)
3102 yield fctx1, fctx2, header, hunks
3106 yield fctx1, fctx2, header, hunks
3103
3107
3104
3108
3105 def diffstatsum(stats):
3109 def diffstatsum(stats):
3106 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3110 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
3107 for f, a, r, b in stats:
3111 for f, a, r, b in stats:
3108 maxfile = max(maxfile, encoding.colwidth(f))
3112 maxfile = max(maxfile, encoding.colwidth(f))
3109 maxtotal = max(maxtotal, a + r)
3113 maxtotal = max(maxtotal, a + r)
3110 addtotal += a
3114 addtotal += a
3111 removetotal += r
3115 removetotal += r
3112 binary = binary or b
3116 binary = binary or b
3113
3117
3114 return maxfile, maxtotal, addtotal, removetotal, binary
3118 return maxfile, maxtotal, addtotal, removetotal, binary
3115
3119
3116
3120
3117 def diffstatdata(lines):
3121 def diffstatdata(lines):
3118 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3122 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
3119
3123
3120 results = []
3124 results = []
3121 filename, adds, removes, isbinary = None, 0, 0, False
3125 filename, adds, removes, isbinary = None, 0, 0, False
3122
3126
3123 def addresult():
3127 def addresult():
3124 if filename:
3128 if filename:
3125 results.append((filename, adds, removes, isbinary))
3129 results.append((filename, adds, removes, isbinary))
3126
3130
3127 # inheader is used to track if a line is in the
3131 # inheader is used to track if a line is in the
3128 # header portion of the diff. This helps properly account
3132 # header portion of the diff. This helps properly account
3129 # for lines that start with '--' or '++'
3133 # for lines that start with '--' or '++'
3130 inheader = False
3134 inheader = False
3131
3135
3132 for line in lines:
3136 for line in lines:
3133 if line.startswith(b'diff'):
3137 if line.startswith(b'diff'):
3134 addresult()
3138 addresult()
3135 # starting a new file diff
3139 # starting a new file diff
3136 # set numbers to 0 and reset inheader
3140 # set numbers to 0 and reset inheader
3137 inheader = True
3141 inheader = True
3138 adds, removes, isbinary = 0, 0, False
3142 adds, removes, isbinary = 0, 0, False
3139 if line.startswith(b'diff --git a/'):
3143 if line.startswith(b'diff --git a/'):
3140 filename = gitre.search(line).group(2)
3144 filename = gitre.search(line).group(2)
3141 elif line.startswith(b'diff -r'):
3145 elif line.startswith(b'diff -r'):
3142 # format: "diff -r ... -r ... filename"
3146 # format: "diff -r ... -r ... filename"
3143 filename = diffre.search(line).group(1)
3147 filename = diffre.search(line).group(1)
3144 elif line.startswith(b'@@'):
3148 elif line.startswith(b'@@'):
3145 inheader = False
3149 inheader = False
3146 elif line.startswith(b'+') and not inheader:
3150 elif line.startswith(b'+') and not inheader:
3147 adds += 1
3151 adds += 1
3148 elif line.startswith(b'-') and not inheader:
3152 elif line.startswith(b'-') and not inheader:
3149 removes += 1
3153 removes += 1
3150 elif line.startswith(b'GIT binary patch') or line.startswith(
3154 elif line.startswith(b'GIT binary patch') or line.startswith(
3151 b'Binary file'
3155 b'Binary file'
3152 ):
3156 ):
3153 isbinary = True
3157 isbinary = True
3154 elif line.startswith(b'rename from'):
3158 elif line.startswith(b'rename from'):
3155 filename = line[12:]
3159 filename = line[12:]
3156 elif line.startswith(b'rename to'):
3160 elif line.startswith(b'rename to'):
3157 filename += b' => %s' % line[10:]
3161 filename += b' => %s' % line[10:]
3158 addresult()
3162 addresult()
3159 return results
3163 return results
3160
3164
3161
3165
3162 def diffstat(lines, width=80):
3166 def diffstat(lines, width=80):
3163 output = []
3167 output = []
3164 stats = diffstatdata(lines)
3168 stats = diffstatdata(lines)
3165 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3169 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
3166
3170
3167 countwidth = len(str(maxtotal))
3171 countwidth = len(str(maxtotal))
3168 if hasbinary and countwidth < 3:
3172 if hasbinary and countwidth < 3:
3169 countwidth = 3
3173 countwidth = 3
3170 graphwidth = width - countwidth - maxname - 6
3174 graphwidth = width - countwidth - maxname - 6
3171 if graphwidth < 10:
3175 if graphwidth < 10:
3172 graphwidth = 10
3176 graphwidth = 10
3173
3177
3174 def scale(i):
3178 def scale(i):
3175 if maxtotal <= graphwidth:
3179 if maxtotal <= graphwidth:
3176 return i
3180 return i
3177 # If diffstat runs out of room it doesn't print anything,
3181 # If diffstat runs out of room it doesn't print anything,
3178 # which isn't very useful, so always print at least one + or -
3182 # which isn't very useful, so always print at least one + or -
3179 # if there were at least some changes.
3183 # if there were at least some changes.
3180 return max(i * graphwidth // maxtotal, int(bool(i)))
3184 return max(i * graphwidth // maxtotal, int(bool(i)))
3181
3185
3182 for filename, adds, removes, isbinary in stats:
3186 for filename, adds, removes, isbinary in stats:
3183 if isbinary:
3187 if isbinary:
3184 count = b'Bin'
3188 count = b'Bin'
3185 else:
3189 else:
3186 count = b'%d' % (adds + removes)
3190 count = b'%d' % (adds + removes)
3187 pluses = b'+' * scale(adds)
3191 pluses = b'+' * scale(adds)
3188 minuses = b'-' * scale(removes)
3192 minuses = b'-' * scale(removes)
3189 output.append(
3193 output.append(
3190 b' %s%s | %*s %s%s\n'
3194 b' %s%s | %*s %s%s\n'
3191 % (
3195 % (
3192 filename,
3196 filename,
3193 b' ' * (maxname - encoding.colwidth(filename)),
3197 b' ' * (maxname - encoding.colwidth(filename)),
3194 countwidth,
3198 countwidth,
3195 count,
3199 count,
3196 pluses,
3200 pluses,
3197 minuses,
3201 minuses,
3198 )
3202 )
3199 )
3203 )
3200
3204
3201 if stats:
3205 if stats:
3202 output.append(
3206 output.append(
3203 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3207 _(b' %d files changed, %d insertions(+), %d deletions(-)\n')
3204 % (len(stats), totaladds, totalremoves)
3208 % (len(stats), totaladds, totalremoves)
3205 )
3209 )
3206
3210
3207 return b''.join(output)
3211 return b''.join(output)
3208
3212
3209
3213
3210 def diffstatui(*args, **kw):
3214 def diffstatui(*args, **kw):
3211 '''like diffstat(), but yields 2-tuples of (output, label) for
3215 '''like diffstat(), but yields 2-tuples of (output, label) for
3212 ui.write()
3216 ui.write()
3213 '''
3217 '''
3214
3218
3215 for line in diffstat(*args, **kw).splitlines():
3219 for line in diffstat(*args, **kw).splitlines():
3216 if line and line[-1] in b'+-':
3220 if line and line[-1] in b'+-':
3217 name, graph = line.rsplit(b' ', 1)
3221 name, graph = line.rsplit(b' ', 1)
3218 yield (name + b' ', b'')
3222 yield (name + b' ', b'')
3219 m = re.search(br'\++', graph)
3223 m = re.search(br'\++', graph)
3220 if m:
3224 if m:
3221 yield (m.group(0), b'diffstat.inserted')
3225 yield (m.group(0), b'diffstat.inserted')
3222 m = re.search(br'-+', graph)
3226 m = re.search(br'-+', graph)
3223 if m:
3227 if m:
3224 yield (m.group(0), b'diffstat.deleted')
3228 yield (m.group(0), b'diffstat.deleted')
3225 else:
3229 else:
3226 yield (line, b'')
3230 yield (line, b'')
3227 yield (b'\n', b'')
3231 yield (b'\n', b'')
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now