##// END OF EJS Templates
typing: consolidate "if not globals():" trick...
Yuya Nishihara -
r44162:47b8ca03 default draft
parent child Browse files
Show More
@@ -1,756 +1,756
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 pycompat,
21 pycompat,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 repoviewutil,
26 repoviewutil,
27 stringutil,
27 stringutil,
28 )
28 )
29
29
30 if not globals():
30 if pycompat.TYPE_CHECKING:
31 from typing import (
31 from typing import (
32 Any,
32 Any,
33 Callable,
33 Callable,
34 Dict,
34 Dict,
35 Iterable,
35 Iterable,
36 List,
36 List,
37 Optional,
37 Optional,
38 Set,
38 Set,
39 Tuple,
39 Tuple,
40 Union,
40 Union,
41 )
41 )
42
42
43 assert any(
43 assert any(
44 (Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union,)
44 (Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union,)
45 )
45 )
46
46
47 subsettable = repoviewutil.subsettable
47 subsettable = repoviewutil.subsettable
48
48
49 calcsize = struct.calcsize
49 calcsize = struct.calcsize
50 pack_into = struct.pack_into
50 pack_into = struct.pack_into
51 unpack_from = struct.unpack_from
51 unpack_from = struct.unpack_from
52
52
53
53
54 class BranchMapCache(object):
54 class BranchMapCache(object):
55 """mapping of filtered views of repo with their branchcache"""
55 """mapping of filtered views of repo with their branchcache"""
56
56
57 def __init__(self):
57 def __init__(self):
58 self._per_filter = {}
58 self._per_filter = {}
59
59
60 def __getitem__(self, repo):
60 def __getitem__(self, repo):
61 self.updatecache(repo)
61 self.updatecache(repo)
62 return self._per_filter[repo.filtername]
62 return self._per_filter[repo.filtername]
63
63
64 def updatecache(self, repo):
64 def updatecache(self, repo):
65 """Update the cache for the given filtered view on a repository"""
65 """Update the cache for the given filtered view on a repository"""
66 # This can trigger updates for the caches for subsets of the filtered
66 # This can trigger updates for the caches for subsets of the filtered
67 # view, e.g. when there is no cache for this filtered view or the cache
67 # view, e.g. when there is no cache for this filtered view or the cache
68 # is stale.
68 # is stale.
69
69
70 cl = repo.changelog
70 cl = repo.changelog
71 filtername = repo.filtername
71 filtername = repo.filtername
72 bcache = self._per_filter.get(filtername)
72 bcache = self._per_filter.get(filtername)
73 if bcache is None or not bcache.validfor(repo):
73 if bcache is None or not bcache.validfor(repo):
74 # cache object missing or cache object stale? Read from disk
74 # cache object missing or cache object stale? Read from disk
75 bcache = branchcache.fromfile(repo)
75 bcache = branchcache.fromfile(repo)
76
76
77 revs = []
77 revs = []
78 if bcache is None:
78 if bcache is None:
79 # no (fresh) cache available anymore, perhaps we can re-use
79 # no (fresh) cache available anymore, perhaps we can re-use
80 # the cache for a subset, then extend that to add info on missing
80 # the cache for a subset, then extend that to add info on missing
81 # revisions.
81 # revisions.
82 subsetname = subsettable.get(filtername)
82 subsetname = subsettable.get(filtername)
83 if subsetname is not None:
83 if subsetname is not None:
84 subset = repo.filtered(subsetname)
84 subset = repo.filtered(subsetname)
85 bcache = self[subset].copy()
85 bcache = self[subset].copy()
86 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
86 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
87 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
87 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
88 else:
88 else:
89 # nothing to fall back on, start empty.
89 # nothing to fall back on, start empty.
90 bcache = branchcache()
90 bcache = branchcache()
91
91
92 revs.extend(cl.revs(start=bcache.tiprev + 1))
92 revs.extend(cl.revs(start=bcache.tiprev + 1))
93 if revs:
93 if revs:
94 bcache.update(repo, revs)
94 bcache.update(repo, revs)
95
95
96 assert bcache.validfor(repo), filtername
96 assert bcache.validfor(repo), filtername
97 self._per_filter[repo.filtername] = bcache
97 self._per_filter[repo.filtername] = bcache
98
98
99 def replace(self, repo, remotebranchmap):
99 def replace(self, repo, remotebranchmap):
100 """Replace the branchmap cache for a repo with a branch mapping.
100 """Replace the branchmap cache for a repo with a branch mapping.
101
101
102 This is likely only called during clone with a branch map from a
102 This is likely only called during clone with a branch map from a
103 remote.
103 remote.
104
104
105 """
105 """
106 cl = repo.changelog
106 cl = repo.changelog
107 clrev = cl.rev
107 clrev = cl.rev
108 clbranchinfo = cl.branchinfo
108 clbranchinfo = cl.branchinfo
109 rbheads = []
109 rbheads = []
110 closed = set()
110 closed = set()
111 for bheads in pycompat.itervalues(remotebranchmap):
111 for bheads in pycompat.itervalues(remotebranchmap):
112 rbheads += bheads
112 rbheads += bheads
113 for h in bheads:
113 for h in bheads:
114 r = clrev(h)
114 r = clrev(h)
115 b, c = clbranchinfo(r)
115 b, c = clbranchinfo(r)
116 if c:
116 if c:
117 closed.add(h)
117 closed.add(h)
118
118
119 if rbheads:
119 if rbheads:
120 rtiprev = max((int(clrev(node)) for node in rbheads))
120 rtiprev = max((int(clrev(node)) for node in rbheads))
121 cache = branchcache(
121 cache = branchcache(
122 remotebranchmap,
122 remotebranchmap,
123 repo[rtiprev].node(),
123 repo[rtiprev].node(),
124 rtiprev,
124 rtiprev,
125 closednodes=closed,
125 closednodes=closed,
126 )
126 )
127
127
128 # Try to stick it as low as possible
128 # Try to stick it as low as possible
129 # filter above served are unlikely to be fetch from a clone
129 # filter above served are unlikely to be fetch from a clone
130 for candidate in (b'base', b'immutable', b'served'):
130 for candidate in (b'base', b'immutable', b'served'):
131 rview = repo.filtered(candidate)
131 rview = repo.filtered(candidate)
132 if cache.validfor(rview):
132 if cache.validfor(rview):
133 self._per_filter[candidate] = cache
133 self._per_filter[candidate] = cache
134 cache.write(rview)
134 cache.write(rview)
135 return
135 return
136
136
137 def clear(self):
137 def clear(self):
138 self._per_filter.clear()
138 self._per_filter.clear()
139
139
140
140
141 def _unknownnode(node):
141 def _unknownnode(node):
142 """ raises ValueError when branchcache found a node which does not exists
142 """ raises ValueError when branchcache found a node which does not exists
143 """
143 """
144 raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node)))
144 raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node)))
145
145
146
146
147 def _branchcachedesc(repo):
147 def _branchcachedesc(repo):
148 if repo.filtername is not None:
148 if repo.filtername is not None:
149 return b'branch cache (%s)' % repo.filtername
149 return b'branch cache (%s)' % repo.filtername
150 else:
150 else:
151 return b'branch cache'
151 return b'branch cache'
152
152
153
153
154 class branchcache(object):
154 class branchcache(object):
155 """A dict like object that hold branches heads cache.
155 """A dict like object that hold branches heads cache.
156
156
157 This cache is used to avoid costly computations to determine all the
157 This cache is used to avoid costly computations to determine all the
158 branch heads of a repo.
158 branch heads of a repo.
159
159
160 The cache is serialized on disk in the following format:
160 The cache is serialized on disk in the following format:
161
161
162 <tip hex node> <tip rev number> [optional filtered repo hex hash]
162 <tip hex node> <tip rev number> [optional filtered repo hex hash]
163 <branch head hex node> <open/closed state> <branch name>
163 <branch head hex node> <open/closed state> <branch name>
164 <branch head hex node> <open/closed state> <branch name>
164 <branch head hex node> <open/closed state> <branch name>
165 ...
165 ...
166
166
167 The first line is used to check if the cache is still valid. If the
167 The first line is used to check if the cache is still valid. If the
168 branch cache is for a filtered repo view, an optional third hash is
168 branch cache is for a filtered repo view, an optional third hash is
169 included that hashes the hashes of all filtered revisions.
169 included that hashes the hashes of all filtered revisions.
170
170
171 The open/closed state is represented by a single letter 'o' or 'c'.
171 The open/closed state is represented by a single letter 'o' or 'c'.
172 This field can be used to avoid changelog reads when determining if a
172 This field can be used to avoid changelog reads when determining if a
173 branch head closes a branch or not.
173 branch head closes a branch or not.
174 """
174 """
175
175
176 def __init__(
176 def __init__(
177 self,
177 self,
178 entries=(),
178 entries=(),
179 tipnode=nullid,
179 tipnode=nullid,
180 tiprev=nullrev,
180 tiprev=nullrev,
181 filteredhash=None,
181 filteredhash=None,
182 closednodes=None,
182 closednodes=None,
183 hasnode=None,
183 hasnode=None,
184 ):
184 ):
185 # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
185 # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None
186 """ hasnode is a function which can be used to verify whether changelog
186 """ hasnode is a function which can be used to verify whether changelog
187 has a given node or not. If it's not provided, we assume that every node
187 has a given node or not. If it's not provided, we assume that every node
188 we have exists in changelog """
188 we have exists in changelog """
189 self.tipnode = tipnode
189 self.tipnode = tipnode
190 self.tiprev = tiprev
190 self.tiprev = tiprev
191 self.filteredhash = filteredhash
191 self.filteredhash = filteredhash
192 # closednodes is a set of nodes that close their branch. If the branch
192 # closednodes is a set of nodes that close their branch. If the branch
193 # cache has been updated, it may contain nodes that are no longer
193 # cache has been updated, it may contain nodes that are no longer
194 # heads.
194 # heads.
195 if closednodes is None:
195 if closednodes is None:
196 self._closednodes = set()
196 self._closednodes = set()
197 else:
197 else:
198 self._closednodes = closednodes
198 self._closednodes = closednodes
199 self._entries = dict(entries)
199 self._entries = dict(entries)
200 # whether closed nodes are verified or not
200 # whether closed nodes are verified or not
201 self._closedverified = False
201 self._closedverified = False
202 # branches for which nodes are verified
202 # branches for which nodes are verified
203 self._verifiedbranches = set()
203 self._verifiedbranches = set()
204 self._hasnode = hasnode
204 self._hasnode = hasnode
205 if self._hasnode is None:
205 if self._hasnode is None:
206 self._hasnode = lambda x: True
206 self._hasnode = lambda x: True
207
207
208 def _verifyclosed(self):
208 def _verifyclosed(self):
209 """ verify the closed nodes we have """
209 """ verify the closed nodes we have """
210 if self._closedverified:
210 if self._closedverified:
211 return
211 return
212 for node in self._closednodes:
212 for node in self._closednodes:
213 if not self._hasnode(node):
213 if not self._hasnode(node):
214 _unknownnode(node)
214 _unknownnode(node)
215
215
216 self._closedverified = True
216 self._closedverified = True
217
217
218 def _verifybranch(self, branch):
218 def _verifybranch(self, branch):
219 """ verify head nodes for the given branch. """
219 """ verify head nodes for the given branch. """
220 if branch not in self._entries or branch in self._verifiedbranches:
220 if branch not in self._entries or branch in self._verifiedbranches:
221 return
221 return
222 for n in self._entries[branch]:
222 for n in self._entries[branch]:
223 if not self._hasnode(n):
223 if not self._hasnode(n):
224 _unknownnode(n)
224 _unknownnode(n)
225
225
226 self._verifiedbranches.add(branch)
226 self._verifiedbranches.add(branch)
227
227
228 def _verifyall(self):
228 def _verifyall(self):
229 """ verifies nodes of all the branches """
229 """ verifies nodes of all the branches """
230 needverification = set(self._entries.keys()) - self._verifiedbranches
230 needverification = set(self._entries.keys()) - self._verifiedbranches
231 for b in needverification:
231 for b in needverification:
232 self._verifybranch(b)
232 self._verifybranch(b)
233
233
234 def __iter__(self):
234 def __iter__(self):
235 return iter(self._entries)
235 return iter(self._entries)
236
236
237 def __setitem__(self, key, value):
237 def __setitem__(self, key, value):
238 self._entries[key] = value
238 self._entries[key] = value
239
239
240 def __getitem__(self, key):
240 def __getitem__(self, key):
241 self._verifybranch(key)
241 self._verifybranch(key)
242 return self._entries[key]
242 return self._entries[key]
243
243
244 def __contains__(self, key):
244 def __contains__(self, key):
245 self._verifybranch(key)
245 self._verifybranch(key)
246 return key in self._entries
246 return key in self._entries
247
247
248 def iteritems(self):
248 def iteritems(self):
249 for k, v in pycompat.iteritems(self._entries):
249 for k, v in pycompat.iteritems(self._entries):
250 self._verifybranch(k)
250 self._verifybranch(k)
251 yield k, v
251 yield k, v
252
252
253 items = iteritems
253 items = iteritems
254
254
255 def hasbranch(self, label):
255 def hasbranch(self, label):
256 """ checks whether a branch of this name exists or not """
256 """ checks whether a branch of this name exists or not """
257 self._verifybranch(label)
257 self._verifybranch(label)
258 return label in self._entries
258 return label in self._entries
259
259
260 @classmethod
260 @classmethod
261 def fromfile(cls, repo):
261 def fromfile(cls, repo):
262 f = None
262 f = None
263 try:
263 try:
264 f = repo.cachevfs(cls._filename(repo))
264 f = repo.cachevfs(cls._filename(repo))
265 lineiter = iter(f)
265 lineiter = iter(f)
266 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
266 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
267 last, lrev = cachekey[:2]
267 last, lrev = cachekey[:2]
268 last, lrev = bin(last), int(lrev)
268 last, lrev = bin(last), int(lrev)
269 filteredhash = None
269 filteredhash = None
270 hasnode = repo.changelog.hasnode
270 hasnode = repo.changelog.hasnode
271 if len(cachekey) > 2:
271 if len(cachekey) > 2:
272 filteredhash = bin(cachekey[2])
272 filteredhash = bin(cachekey[2])
273 bcache = cls(
273 bcache = cls(
274 tipnode=last,
274 tipnode=last,
275 tiprev=lrev,
275 tiprev=lrev,
276 filteredhash=filteredhash,
276 filteredhash=filteredhash,
277 hasnode=hasnode,
277 hasnode=hasnode,
278 )
278 )
279 if not bcache.validfor(repo):
279 if not bcache.validfor(repo):
280 # invalidate the cache
280 # invalidate the cache
281 raise ValueError('tip differs')
281 raise ValueError('tip differs')
282 bcache.load(repo, lineiter)
282 bcache.load(repo, lineiter)
283 except (IOError, OSError):
283 except (IOError, OSError):
284 return None
284 return None
285
285
286 except Exception as inst:
286 except Exception as inst:
287 if repo.ui.debugflag:
287 if repo.ui.debugflag:
288 msg = b'invalid %s: %s\n'
288 msg = b'invalid %s: %s\n'
289 repo.ui.debug(
289 repo.ui.debug(
290 msg
290 msg
291 % (
291 % (
292 _branchcachedesc(repo),
292 _branchcachedesc(repo),
293 pycompat.bytestr(
293 pycompat.bytestr(
294 inst # pytype: disable=wrong-arg-types
294 inst # pytype: disable=wrong-arg-types
295 ),
295 ),
296 )
296 )
297 )
297 )
298 bcache = None
298 bcache = None
299
299
300 finally:
300 finally:
301 if f:
301 if f:
302 f.close()
302 f.close()
303
303
304 return bcache
304 return bcache
305
305
306 def load(self, repo, lineiter):
306 def load(self, repo, lineiter):
307 """ fully loads the branchcache by reading from the file using the line
307 """ fully loads the branchcache by reading from the file using the line
308 iterator passed"""
308 iterator passed"""
309 for line in lineiter:
309 for line in lineiter:
310 line = line.rstrip(b'\n')
310 line = line.rstrip(b'\n')
311 if not line:
311 if not line:
312 continue
312 continue
313 node, state, label = line.split(b" ", 2)
313 node, state, label = line.split(b" ", 2)
314 if state not in b'oc':
314 if state not in b'oc':
315 raise ValueError('invalid branch state')
315 raise ValueError('invalid branch state')
316 label = encoding.tolocal(label.strip())
316 label = encoding.tolocal(label.strip())
317 node = bin(node)
317 node = bin(node)
318 self._entries.setdefault(label, []).append(node)
318 self._entries.setdefault(label, []).append(node)
319 if state == b'c':
319 if state == b'c':
320 self._closednodes.add(node)
320 self._closednodes.add(node)
321
321
322 @staticmethod
322 @staticmethod
323 def _filename(repo):
323 def _filename(repo):
324 """name of a branchcache file for a given repo or repoview"""
324 """name of a branchcache file for a given repo or repoview"""
325 filename = b"branch2"
325 filename = b"branch2"
326 if repo.filtername:
326 if repo.filtername:
327 filename = b'%s-%s' % (filename, repo.filtername)
327 filename = b'%s-%s' % (filename, repo.filtername)
328 return filename
328 return filename
329
329
330 def validfor(self, repo):
330 def validfor(self, repo):
331 """Is the cache content valid regarding a repo
331 """Is the cache content valid regarding a repo
332
332
333 - False when cached tipnode is unknown or if we detect a strip.
333 - False when cached tipnode is unknown or if we detect a strip.
334 - True when cache is up to date or a subset of current repo."""
334 - True when cache is up to date or a subset of current repo."""
335 try:
335 try:
336 return (self.tipnode == repo.changelog.node(self.tiprev)) and (
336 return (self.tipnode == repo.changelog.node(self.tiprev)) and (
337 self.filteredhash == scmutil.filteredhash(repo, self.tiprev)
337 self.filteredhash == scmutil.filteredhash(repo, self.tiprev)
338 )
338 )
339 except IndexError:
339 except IndexError:
340 return False
340 return False
341
341
342 def _branchtip(self, heads):
342 def _branchtip(self, heads):
343 '''Return tuple with last open head in heads and false,
343 '''Return tuple with last open head in heads and false,
344 otherwise return last closed head and true.'''
344 otherwise return last closed head and true.'''
345 tip = heads[-1]
345 tip = heads[-1]
346 closed = True
346 closed = True
347 for h in reversed(heads):
347 for h in reversed(heads):
348 if h not in self._closednodes:
348 if h not in self._closednodes:
349 tip = h
349 tip = h
350 closed = False
350 closed = False
351 break
351 break
352 return tip, closed
352 return tip, closed
353
353
354 def branchtip(self, branch):
354 def branchtip(self, branch):
355 '''Return the tipmost open head on branch head, otherwise return the
355 '''Return the tipmost open head on branch head, otherwise return the
356 tipmost closed head on branch.
356 tipmost closed head on branch.
357 Raise KeyError for unknown branch.'''
357 Raise KeyError for unknown branch.'''
358 return self._branchtip(self[branch])[0]
358 return self._branchtip(self[branch])[0]
359
359
360 def iteropen(self, nodes):
360 def iteropen(self, nodes):
361 return (n for n in nodes if n not in self._closednodes)
361 return (n for n in nodes if n not in self._closednodes)
362
362
363 def branchheads(self, branch, closed=False):
363 def branchheads(self, branch, closed=False):
364 self._verifybranch(branch)
364 self._verifybranch(branch)
365 heads = self._entries[branch]
365 heads = self._entries[branch]
366 if not closed:
366 if not closed:
367 heads = list(self.iteropen(heads))
367 heads = list(self.iteropen(heads))
368 return heads
368 return heads
369
369
370 def iterbranches(self):
370 def iterbranches(self):
371 for bn, heads in pycompat.iteritems(self):
371 for bn, heads in pycompat.iteritems(self):
372 yield (bn, heads) + self._branchtip(heads)
372 yield (bn, heads) + self._branchtip(heads)
373
373
374 def iterheads(self):
374 def iterheads(self):
375 """ returns all the heads """
375 """ returns all the heads """
376 self._verifyall()
376 self._verifyall()
377 return pycompat.itervalues(self._entries)
377 return pycompat.itervalues(self._entries)
378
378
379 def copy(self):
379 def copy(self):
380 """return an deep copy of the branchcache object"""
380 """return an deep copy of the branchcache object"""
381 return type(self)(
381 return type(self)(
382 self._entries,
382 self._entries,
383 self.tipnode,
383 self.tipnode,
384 self.tiprev,
384 self.tiprev,
385 self.filteredhash,
385 self.filteredhash,
386 self._closednodes,
386 self._closednodes,
387 )
387 )
388
388
389 def write(self, repo):
389 def write(self, repo):
390 try:
390 try:
391 f = repo.cachevfs(self._filename(repo), b"w", atomictemp=True)
391 f = repo.cachevfs(self._filename(repo), b"w", atomictemp=True)
392 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
392 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
393 if self.filteredhash is not None:
393 if self.filteredhash is not None:
394 cachekey.append(hex(self.filteredhash))
394 cachekey.append(hex(self.filteredhash))
395 f.write(b" ".join(cachekey) + b'\n')
395 f.write(b" ".join(cachekey) + b'\n')
396 nodecount = 0
396 nodecount = 0
397 for label, nodes in sorted(pycompat.iteritems(self._entries)):
397 for label, nodes in sorted(pycompat.iteritems(self._entries)):
398 label = encoding.fromlocal(label)
398 label = encoding.fromlocal(label)
399 for node in nodes:
399 for node in nodes:
400 nodecount += 1
400 nodecount += 1
401 if node in self._closednodes:
401 if node in self._closednodes:
402 state = b'c'
402 state = b'c'
403 else:
403 else:
404 state = b'o'
404 state = b'o'
405 f.write(b"%s %s %s\n" % (hex(node), state, label))
405 f.write(b"%s %s %s\n" % (hex(node), state, label))
406 f.close()
406 f.close()
407 repo.ui.log(
407 repo.ui.log(
408 b'branchcache',
408 b'branchcache',
409 b'wrote %s with %d labels and %d nodes\n',
409 b'wrote %s with %d labels and %d nodes\n',
410 _branchcachedesc(repo),
410 _branchcachedesc(repo),
411 len(self._entries),
411 len(self._entries),
412 nodecount,
412 nodecount,
413 )
413 )
414 except (IOError, OSError, error.Abort) as inst:
414 except (IOError, OSError, error.Abort) as inst:
415 # Abort may be raised by read only opener, so log and continue
415 # Abort may be raised by read only opener, so log and continue
416 repo.ui.debug(
416 repo.ui.debug(
417 b"couldn't write branch cache: %s\n"
417 b"couldn't write branch cache: %s\n"
418 % stringutil.forcebytestr(inst)
418 % stringutil.forcebytestr(inst)
419 )
419 )
420
420
421 def update(self, repo, revgen):
421 def update(self, repo, revgen):
422 """Given a branchhead cache, self, that may have extra nodes or be
422 """Given a branchhead cache, self, that may have extra nodes or be
423 missing heads, and a generator of nodes that are strictly a superset of
423 missing heads, and a generator of nodes that are strictly a superset of
424 heads missing, this function updates self to be correct.
424 heads missing, this function updates self to be correct.
425 """
425 """
426 starttime = util.timer()
426 starttime = util.timer()
427 cl = repo.changelog
427 cl = repo.changelog
428 # collect new branch entries
428 # collect new branch entries
429 newbranches = {}
429 newbranches = {}
430 getbranchinfo = repo.revbranchcache().branchinfo
430 getbranchinfo = repo.revbranchcache().branchinfo
431 for r in revgen:
431 for r in revgen:
432 branch, closesbranch = getbranchinfo(r)
432 branch, closesbranch = getbranchinfo(r)
433 newbranches.setdefault(branch, []).append(r)
433 newbranches.setdefault(branch, []).append(r)
434 if closesbranch:
434 if closesbranch:
435 self._closednodes.add(cl.node(r))
435 self._closednodes.add(cl.node(r))
436
436
437 # fetch current topological heads to speed up filtering
437 # fetch current topological heads to speed up filtering
438 topoheads = set(cl.headrevs())
438 topoheads = set(cl.headrevs())
439
439
440 # new tip revision which we found after iterating items from new
440 # new tip revision which we found after iterating items from new
441 # branches
441 # branches
442 ntiprev = self.tiprev
442 ntiprev = self.tiprev
443
443
444 # if older branchheads are reachable from new ones, they aren't
444 # if older branchheads are reachable from new ones, they aren't
445 # really branchheads. Note checking parents is insufficient:
445 # really branchheads. Note checking parents is insufficient:
446 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
446 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
447 for branch, newheadrevs in pycompat.iteritems(newbranches):
447 for branch, newheadrevs in pycompat.iteritems(newbranches):
448 bheads = self._entries.setdefault(branch, [])
448 bheads = self._entries.setdefault(branch, [])
449 bheadset = set(cl.rev(node) for node in bheads)
449 bheadset = set(cl.rev(node) for node in bheads)
450
450
451 # This have been tested True on all internal usage of this function.
451 # This have been tested True on all internal usage of this function.
452 # run it again in case of doubt
452 # run it again in case of doubt
453 # assert not (set(bheadrevs) & set(newheadrevs))
453 # assert not (set(bheadrevs) & set(newheadrevs))
454 bheadset.update(newheadrevs)
454 bheadset.update(newheadrevs)
455
455
456 # This prunes out two kinds of heads - heads that are superseded by
456 # This prunes out two kinds of heads - heads that are superseded by
457 # a head in newheadrevs, and newheadrevs that are not heads because
457 # a head in newheadrevs, and newheadrevs that are not heads because
458 # an existing head is their descendant.
458 # an existing head is their descendant.
459 uncertain = bheadset - topoheads
459 uncertain = bheadset - topoheads
460 if uncertain:
460 if uncertain:
461 floorrev = min(uncertain)
461 floorrev = min(uncertain)
462 ancestors = set(cl.ancestors(newheadrevs, floorrev))
462 ancestors = set(cl.ancestors(newheadrevs, floorrev))
463 bheadset -= ancestors
463 bheadset -= ancestors
464 bheadrevs = sorted(bheadset)
464 bheadrevs = sorted(bheadset)
465 self[branch] = [cl.node(rev) for rev in bheadrevs]
465 self[branch] = [cl.node(rev) for rev in bheadrevs]
466 tiprev = bheadrevs[-1]
466 tiprev = bheadrevs[-1]
467 if tiprev > ntiprev:
467 if tiprev > ntiprev:
468 ntiprev = tiprev
468 ntiprev = tiprev
469
469
470 if ntiprev > self.tiprev:
470 if ntiprev > self.tiprev:
471 self.tiprev = ntiprev
471 self.tiprev = ntiprev
472 self.tipnode = cl.node(ntiprev)
472 self.tipnode = cl.node(ntiprev)
473
473
474 if not self.validfor(repo):
474 if not self.validfor(repo):
475 # cache key are not valid anymore
475 # cache key are not valid anymore
476 self.tipnode = nullid
476 self.tipnode = nullid
477 self.tiprev = nullrev
477 self.tiprev = nullrev
478 for heads in self.iterheads():
478 for heads in self.iterheads():
479 tiprev = max(cl.rev(node) for node in heads)
479 tiprev = max(cl.rev(node) for node in heads)
480 if tiprev > self.tiprev:
480 if tiprev > self.tiprev:
481 self.tipnode = cl.node(tiprev)
481 self.tipnode = cl.node(tiprev)
482 self.tiprev = tiprev
482 self.tiprev = tiprev
483 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
483 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
484
484
485 duration = util.timer() - starttime
485 duration = util.timer() - starttime
486 repo.ui.log(
486 repo.ui.log(
487 b'branchcache',
487 b'branchcache',
488 b'updated %s in %.4f seconds\n',
488 b'updated %s in %.4f seconds\n',
489 _branchcachedesc(repo),
489 _branchcachedesc(repo),
490 duration,
490 duration,
491 )
491 )
492
492
493 self.write(repo)
493 self.write(repo)
494
494
495
495
496 class remotebranchcache(branchcache):
496 class remotebranchcache(branchcache):
497 """Branchmap info for a remote connection, should not write locally"""
497 """Branchmap info for a remote connection, should not write locally"""
498
498
499 def write(self, repo):
499 def write(self, repo):
500 pass
500 pass
501
501
502
502
503 # Revision branch info cache
503 # Revision branch info cache
504
504
505 _rbcversion = b'-v1'
505 _rbcversion = b'-v1'
506 _rbcnames = b'rbc-names' + _rbcversion
506 _rbcnames = b'rbc-names' + _rbcversion
507 _rbcrevs = b'rbc-revs' + _rbcversion
507 _rbcrevs = b'rbc-revs' + _rbcversion
508 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
508 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
509 _rbcrecfmt = b'>4sI'
509 _rbcrecfmt = b'>4sI'
510 _rbcrecsize = calcsize(_rbcrecfmt)
510 _rbcrecsize = calcsize(_rbcrecfmt)
511 _rbcnodelen = 4
511 _rbcnodelen = 4
512 _rbcbranchidxmask = 0x7FFFFFFF
512 _rbcbranchidxmask = 0x7FFFFFFF
513 _rbccloseflag = 0x80000000
513 _rbccloseflag = 0x80000000
514
514
515
515
516 class revbranchcache(object):
516 class revbranchcache(object):
517 """Persistent cache, mapping from revision number to branch name and close.
517 """Persistent cache, mapping from revision number to branch name and close.
518 This is a low level cache, independent of filtering.
518 This is a low level cache, independent of filtering.
519
519
520 Branch names are stored in rbc-names in internal encoding separated by 0.
520 Branch names are stored in rbc-names in internal encoding separated by 0.
521 rbc-names is append-only, and each branch name is only stored once and will
521 rbc-names is append-only, and each branch name is only stored once and will
522 thus have a unique index.
522 thus have a unique index.
523
523
524 The branch info for each revision is stored in rbc-revs as constant size
524 The branch info for each revision is stored in rbc-revs as constant size
525 records. The whole file is read into memory, but it is only 'parsed' on
525 records. The whole file is read into memory, but it is only 'parsed' on
526 demand. The file is usually append-only but will be truncated if repo
526 demand. The file is usually append-only but will be truncated if repo
527 modification is detected.
527 modification is detected.
528 The record for each revision contains the first 4 bytes of the
528 The record for each revision contains the first 4 bytes of the
529 corresponding node hash, and the record is only used if it still matches.
529 corresponding node hash, and the record is only used if it still matches.
530 Even a completely trashed rbc-revs fill thus still give the right result
530 Even a completely trashed rbc-revs fill thus still give the right result
531 while converging towards full recovery ... assuming no incorrectly matching
531 while converging towards full recovery ... assuming no incorrectly matching
532 node hashes.
532 node hashes.
533 The record also contains 4 bytes where 31 bits contains the index of the
533 The record also contains 4 bytes where 31 bits contains the index of the
534 branch and the last bit indicate that it is a branch close commit.
534 branch and the last bit indicate that it is a branch close commit.
535 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
535 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
536 and will grow with it but be 1/8th of its size.
536 and will grow with it but be 1/8th of its size.
537 """
537 """
538
538
539 def __init__(self, repo, readonly=True):
539 def __init__(self, repo, readonly=True):
540 assert repo.filtername is None
540 assert repo.filtername is None
541 self._repo = repo
541 self._repo = repo
542 self._names = [] # branch names in local encoding with static index
542 self._names = [] # branch names in local encoding with static index
543 self._rbcrevs = bytearray()
543 self._rbcrevs = bytearray()
544 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
544 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
545 try:
545 try:
546 bndata = repo.cachevfs.read(_rbcnames)
546 bndata = repo.cachevfs.read(_rbcnames)
547 self._rbcsnameslen = len(bndata) # for verification before writing
547 self._rbcsnameslen = len(bndata) # for verification before writing
548 if bndata:
548 if bndata:
549 self._names = [
549 self._names = [
550 encoding.tolocal(bn) for bn in bndata.split(b'\0')
550 encoding.tolocal(bn) for bn in bndata.split(b'\0')
551 ]
551 ]
552 except (IOError, OSError):
552 except (IOError, OSError):
553 if readonly:
553 if readonly:
554 # don't try to use cache - fall back to the slow path
554 # don't try to use cache - fall back to the slow path
555 self.branchinfo = self._branchinfo
555 self.branchinfo = self._branchinfo
556
556
557 if self._names:
557 if self._names:
558 try:
558 try:
559 data = repo.cachevfs.read(_rbcrevs)
559 data = repo.cachevfs.read(_rbcrevs)
560 self._rbcrevs[:] = data
560 self._rbcrevs[:] = data
561 except (IOError, OSError) as inst:
561 except (IOError, OSError) as inst:
562 repo.ui.debug(
562 repo.ui.debug(
563 b"couldn't read revision branch cache: %s\n"
563 b"couldn't read revision branch cache: %s\n"
564 % stringutil.forcebytestr(inst)
564 % stringutil.forcebytestr(inst)
565 )
565 )
566 # remember number of good records on disk
566 # remember number of good records on disk
567 self._rbcrevslen = min(
567 self._rbcrevslen = min(
568 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
568 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
569 )
569 )
570 if self._rbcrevslen == 0:
570 if self._rbcrevslen == 0:
571 self._names = []
571 self._names = []
572 self._rbcnamescount = len(self._names) # number of names read at
572 self._rbcnamescount = len(self._names) # number of names read at
573 # _rbcsnameslen
573 # _rbcsnameslen
574
574
575 def _clear(self):
575 def _clear(self):
576 self._rbcsnameslen = 0
576 self._rbcsnameslen = 0
577 del self._names[:]
577 del self._names[:]
578 self._rbcnamescount = 0
578 self._rbcnamescount = 0
579 self._rbcrevslen = len(self._repo.changelog)
579 self._rbcrevslen = len(self._repo.changelog)
580 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
580 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
581 util.clearcachedproperty(self, b'_namesreverse')
581 util.clearcachedproperty(self, b'_namesreverse')
582
582
583 @util.propertycache
583 @util.propertycache
584 def _namesreverse(self):
584 def _namesreverse(self):
585 return dict((b, r) for r, b in enumerate(self._names))
585 return dict((b, r) for r, b in enumerate(self._names))
586
586
587 def branchinfo(self, rev):
587 def branchinfo(self, rev):
588 """Return branch name and close flag for rev, using and updating
588 """Return branch name and close flag for rev, using and updating
589 persistent cache."""
589 persistent cache."""
590 changelog = self._repo.changelog
590 changelog = self._repo.changelog
591 rbcrevidx = rev * _rbcrecsize
591 rbcrevidx = rev * _rbcrecsize
592
592
593 # avoid negative index, changelog.read(nullrev) is fast without cache
593 # avoid negative index, changelog.read(nullrev) is fast without cache
594 if rev == nullrev:
594 if rev == nullrev:
595 return changelog.branchinfo(rev)
595 return changelog.branchinfo(rev)
596
596
597 # if requested rev isn't allocated, grow and cache the rev info
597 # if requested rev isn't allocated, grow and cache the rev info
598 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
598 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
599 return self._branchinfo(rev)
599 return self._branchinfo(rev)
600
600
601 # fast path: extract data from cache, use it if node is matching
601 # fast path: extract data from cache, use it if node is matching
602 reponode = changelog.node(rev)[:_rbcnodelen]
602 reponode = changelog.node(rev)[:_rbcnodelen]
603 cachenode, branchidx = unpack_from(
603 cachenode, branchidx = unpack_from(
604 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx
604 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx
605 )
605 )
606 close = bool(branchidx & _rbccloseflag)
606 close = bool(branchidx & _rbccloseflag)
607 if close:
607 if close:
608 branchidx &= _rbcbranchidxmask
608 branchidx &= _rbcbranchidxmask
609 if cachenode == b'\0\0\0\0':
609 if cachenode == b'\0\0\0\0':
610 pass
610 pass
611 elif cachenode == reponode:
611 elif cachenode == reponode:
612 try:
612 try:
613 return self._names[branchidx], close
613 return self._names[branchidx], close
614 except IndexError:
614 except IndexError:
615 # recover from invalid reference to unknown branch
615 # recover from invalid reference to unknown branch
616 self._repo.ui.debug(
616 self._repo.ui.debug(
617 b"referenced branch names not found"
617 b"referenced branch names not found"
618 b" - rebuilding revision branch cache from scratch\n"
618 b" - rebuilding revision branch cache from scratch\n"
619 )
619 )
620 self._clear()
620 self._clear()
621 else:
621 else:
622 # rev/node map has changed, invalidate the cache from here up
622 # rev/node map has changed, invalidate the cache from here up
623 self._repo.ui.debug(
623 self._repo.ui.debug(
624 b"history modification detected - truncating "
624 b"history modification detected - truncating "
625 b"revision branch cache to revision %d\n" % rev
625 b"revision branch cache to revision %d\n" % rev
626 )
626 )
627 truncate = rbcrevidx + _rbcrecsize
627 truncate = rbcrevidx + _rbcrecsize
628 del self._rbcrevs[truncate:]
628 del self._rbcrevs[truncate:]
629 self._rbcrevslen = min(self._rbcrevslen, truncate)
629 self._rbcrevslen = min(self._rbcrevslen, truncate)
630
630
631 # fall back to slow path and make sure it will be written to disk
631 # fall back to slow path and make sure it will be written to disk
632 return self._branchinfo(rev)
632 return self._branchinfo(rev)
633
633
634 def _branchinfo(self, rev):
634 def _branchinfo(self, rev):
635 """Retrieve branch info from changelog and update _rbcrevs"""
635 """Retrieve branch info from changelog and update _rbcrevs"""
636 changelog = self._repo.changelog
636 changelog = self._repo.changelog
637 b, close = changelog.branchinfo(rev)
637 b, close = changelog.branchinfo(rev)
638 if b in self._namesreverse:
638 if b in self._namesreverse:
639 branchidx = self._namesreverse[b]
639 branchidx = self._namesreverse[b]
640 else:
640 else:
641 branchidx = len(self._names)
641 branchidx = len(self._names)
642 self._names.append(b)
642 self._names.append(b)
643 self._namesreverse[b] = branchidx
643 self._namesreverse[b] = branchidx
644 reponode = changelog.node(rev)
644 reponode = changelog.node(rev)
645 if close:
645 if close:
646 branchidx |= _rbccloseflag
646 branchidx |= _rbccloseflag
647 self._setcachedata(rev, reponode, branchidx)
647 self._setcachedata(rev, reponode, branchidx)
648 return b, close
648 return b, close
649
649
650 def setdata(self, branch, rev, node, close):
650 def setdata(self, branch, rev, node, close):
651 """add new data information to the cache"""
651 """add new data information to the cache"""
652 if branch in self._namesreverse:
652 if branch in self._namesreverse:
653 branchidx = self._namesreverse[branch]
653 branchidx = self._namesreverse[branch]
654 else:
654 else:
655 branchidx = len(self._names)
655 branchidx = len(self._names)
656 self._names.append(branch)
656 self._names.append(branch)
657 self._namesreverse[branch] = branchidx
657 self._namesreverse[branch] = branchidx
658 if close:
658 if close:
659 branchidx |= _rbccloseflag
659 branchidx |= _rbccloseflag
660 self._setcachedata(rev, node, branchidx)
660 self._setcachedata(rev, node, branchidx)
661 # If no cache data were readable (non exists, bad permission, etc)
661 # If no cache data were readable (non exists, bad permission, etc)
662 # the cache was bypassing itself by setting:
662 # the cache was bypassing itself by setting:
663 #
663 #
664 # self.branchinfo = self._branchinfo
664 # self.branchinfo = self._branchinfo
665 #
665 #
666 # Since we now have data in the cache, we need to drop this bypassing.
666 # Since we now have data in the cache, we need to drop this bypassing.
667 if 'branchinfo' in vars(self):
667 if 'branchinfo' in vars(self):
668 del self.branchinfo
668 del self.branchinfo
669
669
670 def _setcachedata(self, rev, node, branchidx):
670 def _setcachedata(self, rev, node, branchidx):
671 """Writes the node's branch data to the in-memory cache data."""
671 """Writes the node's branch data to the in-memory cache data."""
672 if rev == nullrev:
672 if rev == nullrev:
673 return
673 return
674 rbcrevidx = rev * _rbcrecsize
674 rbcrevidx = rev * _rbcrecsize
675 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
675 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
676 self._rbcrevs.extend(
676 self._rbcrevs.extend(
677 b'\0'
677 b'\0'
678 * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
678 * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs))
679 )
679 )
680 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
680 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
681 self._rbcrevslen = min(self._rbcrevslen, rev)
681 self._rbcrevslen = min(self._rbcrevslen, rev)
682
682
683 tr = self._repo.currenttransaction()
683 tr = self._repo.currenttransaction()
684 if tr:
684 if tr:
685 tr.addfinalize(b'write-revbranchcache', self.write)
685 tr.addfinalize(b'write-revbranchcache', self.write)
686
686
687 def write(self, tr=None):
687 def write(self, tr=None):
688 """Save branch cache if it is dirty."""
688 """Save branch cache if it is dirty."""
689 repo = self._repo
689 repo = self._repo
690 wlock = None
690 wlock = None
691 step = b''
691 step = b''
692 try:
692 try:
693 # write the new names
693 # write the new names
694 if self._rbcnamescount < len(self._names):
694 if self._rbcnamescount < len(self._names):
695 wlock = repo.wlock(wait=False)
695 wlock = repo.wlock(wait=False)
696 step = b' names'
696 step = b' names'
697 self._writenames(repo)
697 self._writenames(repo)
698
698
699 # write the new revs
699 # write the new revs
700 start = self._rbcrevslen * _rbcrecsize
700 start = self._rbcrevslen * _rbcrecsize
701 if start != len(self._rbcrevs):
701 if start != len(self._rbcrevs):
702 step = b''
702 step = b''
703 if wlock is None:
703 if wlock is None:
704 wlock = repo.wlock(wait=False)
704 wlock = repo.wlock(wait=False)
705 self._writerevs(repo, start)
705 self._writerevs(repo, start)
706
706
707 except (IOError, OSError, error.Abort, error.LockError) as inst:
707 except (IOError, OSError, error.Abort, error.LockError) as inst:
708 repo.ui.debug(
708 repo.ui.debug(
709 b"couldn't write revision branch cache%s: %s\n"
709 b"couldn't write revision branch cache%s: %s\n"
710 % (step, stringutil.forcebytestr(inst))
710 % (step, stringutil.forcebytestr(inst))
711 )
711 )
712 finally:
712 finally:
713 if wlock is not None:
713 if wlock is not None:
714 wlock.release()
714 wlock.release()
715
715
716 def _writenames(self, repo):
716 def _writenames(self, repo):
717 """ write the new branch names to revbranchcache """
717 """ write the new branch names to revbranchcache """
718 if self._rbcnamescount != 0:
718 if self._rbcnamescount != 0:
719 f = repo.cachevfs.open(_rbcnames, b'ab')
719 f = repo.cachevfs.open(_rbcnames, b'ab')
720 if f.tell() == self._rbcsnameslen:
720 if f.tell() == self._rbcsnameslen:
721 f.write(b'\0')
721 f.write(b'\0')
722 else:
722 else:
723 f.close()
723 f.close()
724 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
724 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
725 self._rbcnamescount = 0
725 self._rbcnamescount = 0
726 self._rbcrevslen = 0
726 self._rbcrevslen = 0
727 if self._rbcnamescount == 0:
727 if self._rbcnamescount == 0:
728 # before rewriting names, make sure references are removed
728 # before rewriting names, make sure references are removed
729 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
729 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
730 f = repo.cachevfs.open(_rbcnames, b'wb')
730 f = repo.cachevfs.open(_rbcnames, b'wb')
731 f.write(
731 f.write(
732 b'\0'.join(
732 b'\0'.join(
733 encoding.fromlocal(b)
733 encoding.fromlocal(b)
734 for b in self._names[self._rbcnamescount :]
734 for b in self._names[self._rbcnamescount :]
735 )
735 )
736 )
736 )
737 self._rbcsnameslen = f.tell()
737 self._rbcsnameslen = f.tell()
738 f.close()
738 f.close()
739 self._rbcnamescount = len(self._names)
739 self._rbcnamescount = len(self._names)
740
740
741 def _writerevs(self, repo, start):
741 def _writerevs(self, repo, start):
742 """ write the new revs to revbranchcache """
742 """ write the new revs to revbranchcache """
743 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
743 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
744 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
744 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
745 if f.tell() != start:
745 if f.tell() != start:
746 repo.ui.debug(
746 repo.ui.debug(
747 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
747 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
748 )
748 )
749 f.seek(start)
749 f.seek(start)
750 if f.tell() != start:
750 if f.tell() != start:
751 start = 0
751 start = 0
752 f.seek(start)
752 f.seek(start)
753 f.truncate()
753 f.truncate()
754 end = revs * _rbcrecsize
754 end = revs * _rbcrecsize
755 f.write(self._rbcrevs[start:end])
755 f.write(self._rbcrevs[start:end])
756 self._rbcrevslen = revs
756 self._rbcrevslen = revs
@@ -1,3984 +1,3984
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import copy as copymod
10 import copy as copymod
11 import errno
11 import errno
12 import os
12 import os
13 import re
13 import re
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22 from .pycompat import (
22 from .pycompat import (
23 getattr,
23 getattr,
24 open,
24 open,
25 setattr,
25 setattr,
26 )
26 )
27
27
28 from . import (
28 from . import (
29 bookmarks,
29 bookmarks,
30 changelog,
30 changelog,
31 copies,
31 copies,
32 crecord as crecordmod,
32 crecord as crecordmod,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 formatter,
36 formatter,
37 logcmdutil,
37 logcmdutil,
38 match as matchmod,
38 match as matchmod,
39 merge as mergemod,
39 merge as mergemod,
40 mergeutil,
40 mergeutil,
41 obsolete,
41 obsolete,
42 patch,
42 patch,
43 pathutil,
43 pathutil,
44 phases,
44 phases,
45 pycompat,
45 pycompat,
46 repair,
46 repair,
47 revlog,
47 revlog,
48 rewriteutil,
48 rewriteutil,
49 scmutil,
49 scmutil,
50 smartset,
50 smartset,
51 state as statemod,
51 state as statemod,
52 subrepoutil,
52 subrepoutil,
53 templatekw,
53 templatekw,
54 templater,
54 templater,
55 util,
55 util,
56 vfs as vfsmod,
56 vfs as vfsmod,
57 )
57 )
58
58
59 from .utils import (
59 from .utils import (
60 dateutil,
60 dateutil,
61 stringutil,
61 stringutil,
62 )
62 )
63
63
64 if not globals():
64 if pycompat.TYPE_CHECKING:
65 from typing import (
65 from typing import (
66 Any,
66 Any,
67 Dict,
67 Dict,
68 )
68 )
69
69
70 for t in (Any, Dict):
70 for t in (Any, Dict):
71 assert t
71 assert t
72
72
73 stringio = util.stringio
73 stringio = util.stringio
74
74
75 # templates of common command options
75 # templates of common command options
76
76
77 dryrunopts = [
77 dryrunopts = [
78 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
78 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
79 ]
79 ]
80
80
81 confirmopts = [
81 confirmopts = [
82 (b'', b'confirm', None, _(b'ask before applying actions')),
82 (b'', b'confirm', None, _(b'ask before applying actions')),
83 ]
83 ]
84
84
85 remoteopts = [
85 remoteopts = [
86 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
86 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
87 (
87 (
88 b'',
88 b'',
89 b'remotecmd',
89 b'remotecmd',
90 b'',
90 b'',
91 _(b'specify hg command to run on the remote side'),
91 _(b'specify hg command to run on the remote side'),
92 _(b'CMD'),
92 _(b'CMD'),
93 ),
93 ),
94 (
94 (
95 b'',
95 b'',
96 b'insecure',
96 b'insecure',
97 None,
97 None,
98 _(b'do not verify server certificate (ignoring web.cacerts config)'),
98 _(b'do not verify server certificate (ignoring web.cacerts config)'),
99 ),
99 ),
100 ]
100 ]
101
101
102 walkopts = [
102 walkopts = [
103 (
103 (
104 b'I',
104 b'I',
105 b'include',
105 b'include',
106 [],
106 [],
107 _(b'include names matching the given patterns'),
107 _(b'include names matching the given patterns'),
108 _(b'PATTERN'),
108 _(b'PATTERN'),
109 ),
109 ),
110 (
110 (
111 b'X',
111 b'X',
112 b'exclude',
112 b'exclude',
113 [],
113 [],
114 _(b'exclude names matching the given patterns'),
114 _(b'exclude names matching the given patterns'),
115 _(b'PATTERN'),
115 _(b'PATTERN'),
116 ),
116 ),
117 ]
117 ]
118
118
119 commitopts = [
119 commitopts = [
120 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
120 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
121 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
121 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
122 ]
122 ]
123
123
124 commitopts2 = [
124 commitopts2 = [
125 (
125 (
126 b'd',
126 b'd',
127 b'date',
127 b'date',
128 b'',
128 b'',
129 _(b'record the specified date as commit date'),
129 _(b'record the specified date as commit date'),
130 _(b'DATE'),
130 _(b'DATE'),
131 ),
131 ),
132 (
132 (
133 b'u',
133 b'u',
134 b'user',
134 b'user',
135 b'',
135 b'',
136 _(b'record the specified user as committer'),
136 _(b'record the specified user as committer'),
137 _(b'USER'),
137 _(b'USER'),
138 ),
138 ),
139 ]
139 ]
140
140
141 commitopts3 = [
141 commitopts3 = [
142 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
142 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
143 (b'U', b'currentuser', None, _(b'record the current user as committer')),
143 (b'U', b'currentuser', None, _(b'record the current user as committer')),
144 ]
144 ]
145
145
146 formatteropts = [
146 formatteropts = [
147 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
147 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
148 ]
148 ]
149
149
150 templateopts = [
150 templateopts = [
151 (
151 (
152 b'',
152 b'',
153 b'style',
153 b'style',
154 b'',
154 b'',
155 _(b'display using template map file (DEPRECATED)'),
155 _(b'display using template map file (DEPRECATED)'),
156 _(b'STYLE'),
156 _(b'STYLE'),
157 ),
157 ),
158 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
158 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
159 ]
159 ]
160
160
161 logopts = [
161 logopts = [
162 (b'p', b'patch', None, _(b'show patch')),
162 (b'p', b'patch', None, _(b'show patch')),
163 (b'g', b'git', None, _(b'use git extended diff format')),
163 (b'g', b'git', None, _(b'use git extended diff format')),
164 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
164 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
165 (b'M', b'no-merges', None, _(b'do not show merges')),
165 (b'M', b'no-merges', None, _(b'do not show merges')),
166 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
166 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
167 (b'G', b'graph', None, _(b"show the revision DAG")),
167 (b'G', b'graph', None, _(b"show the revision DAG")),
168 ] + templateopts
168 ] + templateopts
169
169
170 diffopts = [
170 diffopts = [
171 (b'a', b'text', None, _(b'treat all files as text')),
171 (b'a', b'text', None, _(b'treat all files as text')),
172 (b'g', b'git', None, _(b'use git extended diff format')),
172 (b'g', b'git', None, _(b'use git extended diff format')),
173 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
173 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
174 (b'', b'nodates', None, _(b'omit dates from diff headers')),
174 (b'', b'nodates', None, _(b'omit dates from diff headers')),
175 ]
175 ]
176
176
177 diffwsopts = [
177 diffwsopts = [
178 (
178 (
179 b'w',
179 b'w',
180 b'ignore-all-space',
180 b'ignore-all-space',
181 None,
181 None,
182 _(b'ignore white space when comparing lines'),
182 _(b'ignore white space when comparing lines'),
183 ),
183 ),
184 (
184 (
185 b'b',
185 b'b',
186 b'ignore-space-change',
186 b'ignore-space-change',
187 None,
187 None,
188 _(b'ignore changes in the amount of white space'),
188 _(b'ignore changes in the amount of white space'),
189 ),
189 ),
190 (
190 (
191 b'B',
191 b'B',
192 b'ignore-blank-lines',
192 b'ignore-blank-lines',
193 None,
193 None,
194 _(b'ignore changes whose lines are all blank'),
194 _(b'ignore changes whose lines are all blank'),
195 ),
195 ),
196 (
196 (
197 b'Z',
197 b'Z',
198 b'ignore-space-at-eol',
198 b'ignore-space-at-eol',
199 None,
199 None,
200 _(b'ignore changes in whitespace at EOL'),
200 _(b'ignore changes in whitespace at EOL'),
201 ),
201 ),
202 ]
202 ]
203
203
204 diffopts2 = (
204 diffopts2 = (
205 [
205 [
206 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
206 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
207 (
207 (
208 b'p',
208 b'p',
209 b'show-function',
209 b'show-function',
210 None,
210 None,
211 _(b'show which function each change is in'),
211 _(b'show which function each change is in'),
212 ),
212 ),
213 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
213 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
214 ]
214 ]
215 + diffwsopts
215 + diffwsopts
216 + [
216 + [
217 (
217 (
218 b'U',
218 b'U',
219 b'unified',
219 b'unified',
220 b'',
220 b'',
221 _(b'number of lines of context to show'),
221 _(b'number of lines of context to show'),
222 _(b'NUM'),
222 _(b'NUM'),
223 ),
223 ),
224 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
224 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
225 (
225 (
226 b'',
226 b'',
227 b'root',
227 b'root',
228 b'',
228 b'',
229 _(b'produce diffs relative to subdirectory'),
229 _(b'produce diffs relative to subdirectory'),
230 _(b'DIR'),
230 _(b'DIR'),
231 ),
231 ),
232 ]
232 ]
233 )
233 )
234
234
235 mergetoolopts = [
235 mergetoolopts = [
236 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
236 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
237 ]
237 ]
238
238
239 similarityopts = [
239 similarityopts = [
240 (
240 (
241 b's',
241 b's',
242 b'similarity',
242 b'similarity',
243 b'',
243 b'',
244 _(b'guess renamed files by similarity (0<=s<=100)'),
244 _(b'guess renamed files by similarity (0<=s<=100)'),
245 _(b'SIMILARITY'),
245 _(b'SIMILARITY'),
246 )
246 )
247 ]
247 ]
248
248
249 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
249 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
250
250
251 debugrevlogopts = [
251 debugrevlogopts = [
252 (b'c', b'changelog', False, _(b'open changelog')),
252 (b'c', b'changelog', False, _(b'open changelog')),
253 (b'm', b'manifest', False, _(b'open manifest')),
253 (b'm', b'manifest', False, _(b'open manifest')),
254 (b'', b'dir', b'', _(b'open directory manifest')),
254 (b'', b'dir', b'', _(b'open directory manifest')),
255 ]
255 ]
256
256
257 # special string such that everything below this line will be ingored in the
257 # special string such that everything below this line will be ingored in the
258 # editor text
258 # editor text
259 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
259 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
260
260
261
261
262 def resolvecommitoptions(ui, opts):
262 def resolvecommitoptions(ui, opts):
263 """modify commit options dict to handle related options
263 """modify commit options dict to handle related options
264
264
265 The return value indicates that ``rewrite.update-timestamp`` is the reason
265 The return value indicates that ``rewrite.update-timestamp`` is the reason
266 the ``date`` option is set.
266 the ``date`` option is set.
267 """
267 """
268 if opts.get(b'date') and opts.get(b'currentdate'):
268 if opts.get(b'date') and opts.get(b'currentdate'):
269 raise error.Abort(_(b'--date and --currentdate are mutually exclusive'))
269 raise error.Abort(_(b'--date and --currentdate are mutually exclusive'))
270 if opts.get(b'user') and opts.get(b'currentuser'):
270 if opts.get(b'user') and opts.get(b'currentuser'):
271 raise error.Abort(_(b'--user and --currentuser are mutually exclusive'))
271 raise error.Abort(_(b'--user and --currentuser are mutually exclusive'))
272
272
273 datemaydiffer = False # date-only change should be ignored?
273 datemaydiffer = False # date-only change should be ignored?
274
274
275 if opts.get(b'currentdate'):
275 if opts.get(b'currentdate'):
276 opts[b'date'] = b'%d %d' % dateutil.makedate()
276 opts[b'date'] = b'%d %d' % dateutil.makedate()
277 elif (
277 elif (
278 not opts.get(b'date')
278 not opts.get(b'date')
279 and ui.configbool(b'rewrite', b'update-timestamp')
279 and ui.configbool(b'rewrite', b'update-timestamp')
280 and opts.get(b'currentdate') is None
280 and opts.get(b'currentdate') is None
281 ):
281 ):
282 opts[b'date'] = b'%d %d' % dateutil.makedate()
282 opts[b'date'] = b'%d %d' % dateutil.makedate()
283 datemaydiffer = True
283 datemaydiffer = True
284
284
285 if opts.get(b'currentuser'):
285 if opts.get(b'currentuser'):
286 opts[b'user'] = ui.username()
286 opts[b'user'] = ui.username()
287
287
288 return datemaydiffer
288 return datemaydiffer
289
289
290
290
291 def checknotesize(ui, opts):
291 def checknotesize(ui, opts):
292 """ make sure note is of valid format """
292 """ make sure note is of valid format """
293
293
294 note = opts.get(b'note')
294 note = opts.get(b'note')
295 if not note:
295 if not note:
296 return
296 return
297
297
298 if len(note) > 255:
298 if len(note) > 255:
299 raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
299 raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
300 if b'\n' in note:
300 if b'\n' in note:
301 raise error.Abort(_(b"note cannot contain a newline"))
301 raise error.Abort(_(b"note cannot contain a newline"))
302
302
303
303
304 def ishunk(x):
304 def ishunk(x):
305 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
305 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
306 return isinstance(x, hunkclasses)
306 return isinstance(x, hunkclasses)
307
307
308
308
309 def newandmodified(chunks, originalchunks):
309 def newandmodified(chunks, originalchunks):
310 newlyaddedandmodifiedfiles = set()
310 newlyaddedandmodifiedfiles = set()
311 alsorestore = set()
311 alsorestore = set()
312 for chunk in chunks:
312 for chunk in chunks:
313 if (
313 if (
314 ishunk(chunk)
314 ishunk(chunk)
315 and chunk.header.isnewfile()
315 and chunk.header.isnewfile()
316 and chunk not in originalchunks
316 and chunk not in originalchunks
317 ):
317 ):
318 newlyaddedandmodifiedfiles.add(chunk.header.filename())
318 newlyaddedandmodifiedfiles.add(chunk.header.filename())
319 alsorestore.update(
319 alsorestore.update(
320 set(chunk.header.files()) - {chunk.header.filename()}
320 set(chunk.header.files()) - {chunk.header.filename()}
321 )
321 )
322 return newlyaddedandmodifiedfiles, alsorestore
322 return newlyaddedandmodifiedfiles, alsorestore
323
323
324
324
325 def parsealiases(cmd):
325 def parsealiases(cmd):
326 return cmd.split(b"|")
326 return cmd.split(b"|")
327
327
328
328
329 def setupwrapcolorwrite(ui):
329 def setupwrapcolorwrite(ui):
330 # wrap ui.write so diff output can be labeled/colorized
330 # wrap ui.write so diff output can be labeled/colorized
331 def wrapwrite(orig, *args, **kw):
331 def wrapwrite(orig, *args, **kw):
332 label = kw.pop('label', b'')
332 label = kw.pop('label', b'')
333 for chunk, l in patch.difflabel(lambda: args):
333 for chunk, l in patch.difflabel(lambda: args):
334 orig(chunk, label=label + l)
334 orig(chunk, label=label + l)
335
335
336 oldwrite = ui.write
336 oldwrite = ui.write
337
337
338 def wrap(*args, **kwargs):
338 def wrap(*args, **kwargs):
339 return wrapwrite(oldwrite, *args, **kwargs)
339 return wrapwrite(oldwrite, *args, **kwargs)
340
340
341 setattr(ui, 'write', wrap)
341 setattr(ui, 'write', wrap)
342 return oldwrite
342 return oldwrite
343
343
344
344
345 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
345 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
346 try:
346 try:
347 if usecurses:
347 if usecurses:
348 if testfile:
348 if testfile:
349 recordfn = crecordmod.testdecorator(
349 recordfn = crecordmod.testdecorator(
350 testfile, crecordmod.testchunkselector
350 testfile, crecordmod.testchunkselector
351 )
351 )
352 else:
352 else:
353 recordfn = crecordmod.chunkselector
353 recordfn = crecordmod.chunkselector
354
354
355 return crecordmod.filterpatch(
355 return crecordmod.filterpatch(
356 ui, originalhunks, recordfn, operation
356 ui, originalhunks, recordfn, operation
357 )
357 )
358 except crecordmod.fallbackerror as e:
358 except crecordmod.fallbackerror as e:
359 ui.warn(b'%s\n' % e.message) # pytype: disable=attribute-error
359 ui.warn(b'%s\n' % e.message) # pytype: disable=attribute-error
360 ui.warn(_(b'falling back to text mode\n'))
360 ui.warn(_(b'falling back to text mode\n'))
361
361
362 return patch.filterpatch(ui, originalhunks, match, operation)
362 return patch.filterpatch(ui, originalhunks, match, operation)
363
363
364
364
365 def recordfilter(ui, originalhunks, match, operation=None):
365 def recordfilter(ui, originalhunks, match, operation=None):
366 """ Prompts the user to filter the originalhunks and return a list of
366 """ Prompts the user to filter the originalhunks and return a list of
367 selected hunks.
367 selected hunks.
368 *operation* is used for to build ui messages to indicate the user what
368 *operation* is used for to build ui messages to indicate the user what
369 kind of filtering they are doing: reverting, committing, shelving, etc.
369 kind of filtering they are doing: reverting, committing, shelving, etc.
370 (see patch.filterpatch).
370 (see patch.filterpatch).
371 """
371 """
372 usecurses = crecordmod.checkcurses(ui)
372 usecurses = crecordmod.checkcurses(ui)
373 testfile = ui.config(b'experimental', b'crecordtest')
373 testfile = ui.config(b'experimental', b'crecordtest')
374 oldwrite = setupwrapcolorwrite(ui)
374 oldwrite = setupwrapcolorwrite(ui)
375 try:
375 try:
376 newchunks, newopts = filterchunks(
376 newchunks, newopts = filterchunks(
377 ui, originalhunks, usecurses, testfile, match, operation
377 ui, originalhunks, usecurses, testfile, match, operation
378 )
378 )
379 finally:
379 finally:
380 ui.write = oldwrite
380 ui.write = oldwrite
381 return newchunks, newopts
381 return newchunks, newopts
382
382
383
383
384 def dorecord(
384 def dorecord(
385 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
385 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
386 ):
386 ):
387 opts = pycompat.byteskwargs(opts)
387 opts = pycompat.byteskwargs(opts)
388 if not ui.interactive():
388 if not ui.interactive():
389 if cmdsuggest:
389 if cmdsuggest:
390 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
390 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
391 else:
391 else:
392 msg = _(b'running non-interactively')
392 msg = _(b'running non-interactively')
393 raise error.Abort(msg)
393 raise error.Abort(msg)
394
394
395 # make sure username is set before going interactive
395 # make sure username is set before going interactive
396 if not opts.get(b'user'):
396 if not opts.get(b'user'):
397 ui.username() # raise exception, username not provided
397 ui.username() # raise exception, username not provided
398
398
399 def recordfunc(ui, repo, message, match, opts):
399 def recordfunc(ui, repo, message, match, opts):
400 """This is generic record driver.
400 """This is generic record driver.
401
401
402 Its job is to interactively filter local changes, and
402 Its job is to interactively filter local changes, and
403 accordingly prepare working directory into a state in which the
403 accordingly prepare working directory into a state in which the
404 job can be delegated to a non-interactive commit command such as
404 job can be delegated to a non-interactive commit command such as
405 'commit' or 'qrefresh'.
405 'commit' or 'qrefresh'.
406
406
407 After the actual job is done by non-interactive command, the
407 After the actual job is done by non-interactive command, the
408 working directory is restored to its original state.
408 working directory is restored to its original state.
409
409
410 In the end we'll record interesting changes, and everything else
410 In the end we'll record interesting changes, and everything else
411 will be left in place, so the user can continue working.
411 will be left in place, so the user can continue working.
412 """
412 """
413 if not opts.get(b'interactive-unshelve'):
413 if not opts.get(b'interactive-unshelve'):
414 checkunfinished(repo, commit=True)
414 checkunfinished(repo, commit=True)
415 wctx = repo[None]
415 wctx = repo[None]
416 merge = len(wctx.parents()) > 1
416 merge = len(wctx.parents()) > 1
417 if merge:
417 if merge:
418 raise error.Abort(
418 raise error.Abort(
419 _(
419 _(
420 b'cannot partially commit a merge '
420 b'cannot partially commit a merge '
421 b'(use "hg commit" instead)'
421 b'(use "hg commit" instead)'
422 )
422 )
423 )
423 )
424
424
425 def fail(f, msg):
425 def fail(f, msg):
426 raise error.Abort(b'%s: %s' % (f, msg))
426 raise error.Abort(b'%s: %s' % (f, msg))
427
427
428 force = opts.get(b'force')
428 force = opts.get(b'force')
429 if not force:
429 if not force:
430 match = matchmod.badmatch(match, fail)
430 match = matchmod.badmatch(match, fail)
431
431
432 status = repo.status(match=match)
432 status = repo.status(match=match)
433
433
434 overrides = {(b'ui', b'commitsubrepos'): True}
434 overrides = {(b'ui', b'commitsubrepos'): True}
435
435
436 with repo.ui.configoverride(overrides, b'record'):
436 with repo.ui.configoverride(overrides, b'record'):
437 # subrepoutil.precommit() modifies the status
437 # subrepoutil.precommit() modifies the status
438 tmpstatus = scmutil.status(
438 tmpstatus = scmutil.status(
439 copymod.copy(status.modified),
439 copymod.copy(status.modified),
440 copymod.copy(status.added),
440 copymod.copy(status.added),
441 copymod.copy(status.removed),
441 copymod.copy(status.removed),
442 copymod.copy(status.deleted),
442 copymod.copy(status.deleted),
443 copymod.copy(status.unknown),
443 copymod.copy(status.unknown),
444 copymod.copy(status.ignored),
444 copymod.copy(status.ignored),
445 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
445 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
446 )
446 )
447
447
448 # Force allows -X subrepo to skip the subrepo.
448 # Force allows -X subrepo to skip the subrepo.
449 subs, commitsubs, newstate = subrepoutil.precommit(
449 subs, commitsubs, newstate = subrepoutil.precommit(
450 repo.ui, wctx, tmpstatus, match, force=True
450 repo.ui, wctx, tmpstatus, match, force=True
451 )
451 )
452 for s in subs:
452 for s in subs:
453 if s in commitsubs:
453 if s in commitsubs:
454 dirtyreason = wctx.sub(s).dirtyreason(True)
454 dirtyreason = wctx.sub(s).dirtyreason(True)
455 raise error.Abort(dirtyreason)
455 raise error.Abort(dirtyreason)
456
456
457 if not force:
457 if not force:
458 repo.checkcommitpatterns(wctx, match, status, fail)
458 repo.checkcommitpatterns(wctx, match, status, fail)
459 diffopts = patch.difffeatureopts(
459 diffopts = patch.difffeatureopts(
460 ui,
460 ui,
461 opts=opts,
461 opts=opts,
462 whitespace=True,
462 whitespace=True,
463 section=b'commands',
463 section=b'commands',
464 configprefix=b'commit.interactive.',
464 configprefix=b'commit.interactive.',
465 )
465 )
466 diffopts.nodates = True
466 diffopts.nodates = True
467 diffopts.git = True
467 diffopts.git = True
468 diffopts.showfunc = True
468 diffopts.showfunc = True
469 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
469 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
470 originalchunks = patch.parsepatch(originaldiff)
470 originalchunks = patch.parsepatch(originaldiff)
471 match = scmutil.match(repo[None], pats)
471 match = scmutil.match(repo[None], pats)
472
472
473 # 1. filter patch, since we are intending to apply subset of it
473 # 1. filter patch, since we are intending to apply subset of it
474 try:
474 try:
475 chunks, newopts = filterfn(ui, originalchunks, match)
475 chunks, newopts = filterfn(ui, originalchunks, match)
476 except error.PatchError as err:
476 except error.PatchError as err:
477 raise error.Abort(_(b'error parsing patch: %s') % err)
477 raise error.Abort(_(b'error parsing patch: %s') % err)
478 opts.update(newopts)
478 opts.update(newopts)
479
479
480 # We need to keep a backup of files that have been newly added and
480 # We need to keep a backup of files that have been newly added and
481 # modified during the recording process because there is a previous
481 # modified during the recording process because there is a previous
482 # version without the edit in the workdir. We also will need to restore
482 # version without the edit in the workdir. We also will need to restore
483 # files that were the sources of renames so that the patch application
483 # files that were the sources of renames so that the patch application
484 # works.
484 # works.
485 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
485 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
486 chunks, originalchunks
486 chunks, originalchunks
487 )
487 )
488 contenders = set()
488 contenders = set()
489 for h in chunks:
489 for h in chunks:
490 try:
490 try:
491 contenders.update(set(h.files()))
491 contenders.update(set(h.files()))
492 except AttributeError:
492 except AttributeError:
493 pass
493 pass
494
494
495 changed = status.modified + status.added + status.removed
495 changed = status.modified + status.added + status.removed
496 newfiles = [f for f in changed if f in contenders]
496 newfiles = [f for f in changed if f in contenders]
497 if not newfiles:
497 if not newfiles:
498 ui.status(_(b'no changes to record\n'))
498 ui.status(_(b'no changes to record\n'))
499 return 0
499 return 0
500
500
501 modified = set(status.modified)
501 modified = set(status.modified)
502
502
503 # 2. backup changed files, so we can restore them in the end
503 # 2. backup changed files, so we can restore them in the end
504
504
505 if backupall:
505 if backupall:
506 tobackup = changed
506 tobackup = changed
507 else:
507 else:
508 tobackup = [
508 tobackup = [
509 f
509 f
510 for f in newfiles
510 for f in newfiles
511 if f in modified or f in newlyaddedandmodifiedfiles
511 if f in modified or f in newlyaddedandmodifiedfiles
512 ]
512 ]
513 backups = {}
513 backups = {}
514 if tobackup:
514 if tobackup:
515 backupdir = repo.vfs.join(b'record-backups')
515 backupdir = repo.vfs.join(b'record-backups')
516 try:
516 try:
517 os.mkdir(backupdir)
517 os.mkdir(backupdir)
518 except OSError as err:
518 except OSError as err:
519 if err.errno != errno.EEXIST:
519 if err.errno != errno.EEXIST:
520 raise
520 raise
521 try:
521 try:
522 # backup continues
522 # backup continues
523 for f in tobackup:
523 for f in tobackup:
524 fd, tmpname = pycompat.mkstemp(
524 fd, tmpname = pycompat.mkstemp(
525 prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
525 prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
526 )
526 )
527 os.close(fd)
527 os.close(fd)
528 ui.debug(b'backup %r as %r\n' % (f, tmpname))
528 ui.debug(b'backup %r as %r\n' % (f, tmpname))
529 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
529 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
530 backups[f] = tmpname
530 backups[f] = tmpname
531
531
532 fp = stringio()
532 fp = stringio()
533 for c in chunks:
533 for c in chunks:
534 fname = c.filename()
534 fname = c.filename()
535 if fname in backups:
535 if fname in backups:
536 c.write(fp)
536 c.write(fp)
537 dopatch = fp.tell()
537 dopatch = fp.tell()
538 fp.seek(0)
538 fp.seek(0)
539
539
540 # 2.5 optionally review / modify patch in text editor
540 # 2.5 optionally review / modify patch in text editor
541 if opts.get(b'review', False):
541 if opts.get(b'review', False):
542 patchtext = (
542 patchtext = (
543 crecordmod.diffhelptext
543 crecordmod.diffhelptext
544 + crecordmod.patchhelptext
544 + crecordmod.patchhelptext
545 + fp.read()
545 + fp.read()
546 )
546 )
547 reviewedpatch = ui.edit(
547 reviewedpatch = ui.edit(
548 patchtext, b"", action=b"diff", repopath=repo.path
548 patchtext, b"", action=b"diff", repopath=repo.path
549 )
549 )
550 fp.truncate(0)
550 fp.truncate(0)
551 fp.write(reviewedpatch)
551 fp.write(reviewedpatch)
552 fp.seek(0)
552 fp.seek(0)
553
553
554 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
554 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
555 # 3a. apply filtered patch to clean repo (clean)
555 # 3a. apply filtered patch to clean repo (clean)
556 if backups:
556 if backups:
557 # Equivalent to hg.revert
557 # Equivalent to hg.revert
558 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
558 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
559 mergemod.update(
559 mergemod.update(
560 repo,
560 repo,
561 repo.dirstate.p1(),
561 repo.dirstate.p1(),
562 branchmerge=False,
562 branchmerge=False,
563 force=True,
563 force=True,
564 matcher=m,
564 matcher=m,
565 )
565 )
566
566
567 # 3b. (apply)
567 # 3b. (apply)
568 if dopatch:
568 if dopatch:
569 try:
569 try:
570 ui.debug(b'applying patch\n')
570 ui.debug(b'applying patch\n')
571 ui.debug(fp.getvalue())
571 ui.debug(fp.getvalue())
572 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
572 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
573 except error.PatchError as err:
573 except error.PatchError as err:
574 raise error.Abort(pycompat.bytestr(err))
574 raise error.Abort(pycompat.bytestr(err))
575 del fp
575 del fp
576
576
577 # 4. We prepared working directory according to filtered
577 # 4. We prepared working directory according to filtered
578 # patch. Now is the time to delegate the job to
578 # patch. Now is the time to delegate the job to
579 # commit/qrefresh or the like!
579 # commit/qrefresh or the like!
580
580
581 # Make all of the pathnames absolute.
581 # Make all of the pathnames absolute.
582 newfiles = [repo.wjoin(nf) for nf in newfiles]
582 newfiles = [repo.wjoin(nf) for nf in newfiles]
583 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
583 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
584 finally:
584 finally:
585 # 5. finally restore backed-up files
585 # 5. finally restore backed-up files
586 try:
586 try:
587 dirstate = repo.dirstate
587 dirstate = repo.dirstate
588 for realname, tmpname in pycompat.iteritems(backups):
588 for realname, tmpname in pycompat.iteritems(backups):
589 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
589 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
590
590
591 if dirstate[realname] == b'n':
591 if dirstate[realname] == b'n':
592 # without normallookup, restoring timestamp
592 # without normallookup, restoring timestamp
593 # may cause partially committed files
593 # may cause partially committed files
594 # to be treated as unmodified
594 # to be treated as unmodified
595 dirstate.normallookup(realname)
595 dirstate.normallookup(realname)
596
596
597 # copystat=True here and above are a hack to trick any
597 # copystat=True here and above are a hack to trick any
598 # editors that have f open that we haven't modified them.
598 # editors that have f open that we haven't modified them.
599 #
599 #
600 # Also note that this racy as an editor could notice the
600 # Also note that this racy as an editor could notice the
601 # file's mtime before we've finished writing it.
601 # file's mtime before we've finished writing it.
602 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
602 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
603 os.unlink(tmpname)
603 os.unlink(tmpname)
604 if tobackup:
604 if tobackup:
605 os.rmdir(backupdir)
605 os.rmdir(backupdir)
606 except OSError:
606 except OSError:
607 pass
607 pass
608
608
609 def recordinwlock(ui, repo, message, match, opts):
609 def recordinwlock(ui, repo, message, match, opts):
610 with repo.wlock():
610 with repo.wlock():
611 return recordfunc(ui, repo, message, match, opts)
611 return recordfunc(ui, repo, message, match, opts)
612
612
613 return commit(ui, repo, recordinwlock, pats, opts)
613 return commit(ui, repo, recordinwlock, pats, opts)
614
614
615
615
616 class dirnode(object):
616 class dirnode(object):
617 """
617 """
618 Represent a directory in user working copy with information required for
618 Represent a directory in user working copy with information required for
619 the purpose of tersing its status.
619 the purpose of tersing its status.
620
620
621 path is the path to the directory, without a trailing '/'
621 path is the path to the directory, without a trailing '/'
622
622
623 statuses is a set of statuses of all files in this directory (this includes
623 statuses is a set of statuses of all files in this directory (this includes
624 all the files in all the subdirectories too)
624 all the files in all the subdirectories too)
625
625
626 files is a list of files which are direct child of this directory
626 files is a list of files which are direct child of this directory
627
627
628 subdirs is a dictionary of sub-directory name as the key and it's own
628 subdirs is a dictionary of sub-directory name as the key and it's own
629 dirnode object as the value
629 dirnode object as the value
630 """
630 """
631
631
632 def __init__(self, dirpath):
632 def __init__(self, dirpath):
633 self.path = dirpath
633 self.path = dirpath
634 self.statuses = set()
634 self.statuses = set()
635 self.files = []
635 self.files = []
636 self.subdirs = {}
636 self.subdirs = {}
637
637
638 def _addfileindir(self, filename, status):
638 def _addfileindir(self, filename, status):
639 """Add a file in this directory as a direct child."""
639 """Add a file in this directory as a direct child."""
640 self.files.append((filename, status))
640 self.files.append((filename, status))
641
641
642 def addfile(self, filename, status):
642 def addfile(self, filename, status):
643 """
643 """
644 Add a file to this directory or to its direct parent directory.
644 Add a file to this directory or to its direct parent directory.
645
645
646 If the file is not direct child of this directory, we traverse to the
646 If the file is not direct child of this directory, we traverse to the
647 directory of which this file is a direct child of and add the file
647 directory of which this file is a direct child of and add the file
648 there.
648 there.
649 """
649 """
650
650
651 # the filename contains a path separator, it means it's not the direct
651 # the filename contains a path separator, it means it's not the direct
652 # child of this directory
652 # child of this directory
653 if b'/' in filename:
653 if b'/' in filename:
654 subdir, filep = filename.split(b'/', 1)
654 subdir, filep = filename.split(b'/', 1)
655
655
656 # does the dirnode object for subdir exists
656 # does the dirnode object for subdir exists
657 if subdir not in self.subdirs:
657 if subdir not in self.subdirs:
658 subdirpath = pathutil.join(self.path, subdir)
658 subdirpath = pathutil.join(self.path, subdir)
659 self.subdirs[subdir] = dirnode(subdirpath)
659 self.subdirs[subdir] = dirnode(subdirpath)
660
660
661 # try adding the file in subdir
661 # try adding the file in subdir
662 self.subdirs[subdir].addfile(filep, status)
662 self.subdirs[subdir].addfile(filep, status)
663
663
664 else:
664 else:
665 self._addfileindir(filename, status)
665 self._addfileindir(filename, status)
666
666
667 if status not in self.statuses:
667 if status not in self.statuses:
668 self.statuses.add(status)
668 self.statuses.add(status)
669
669
670 def iterfilepaths(self):
670 def iterfilepaths(self):
671 """Yield (status, path) for files directly under this directory."""
671 """Yield (status, path) for files directly under this directory."""
672 for f, st in self.files:
672 for f, st in self.files:
673 yield st, pathutil.join(self.path, f)
673 yield st, pathutil.join(self.path, f)
674
674
675 def tersewalk(self, terseargs):
675 def tersewalk(self, terseargs):
676 """
676 """
677 Yield (status, path) obtained by processing the status of this
677 Yield (status, path) obtained by processing the status of this
678 dirnode.
678 dirnode.
679
679
680 terseargs is the string of arguments passed by the user with `--terse`
680 terseargs is the string of arguments passed by the user with `--terse`
681 flag.
681 flag.
682
682
683 Following are the cases which can happen:
683 Following are the cases which can happen:
684
684
685 1) All the files in the directory (including all the files in its
685 1) All the files in the directory (including all the files in its
686 subdirectories) share the same status and the user has asked us to terse
686 subdirectories) share the same status and the user has asked us to terse
687 that status. -> yield (status, dirpath). dirpath will end in '/'.
687 that status. -> yield (status, dirpath). dirpath will end in '/'.
688
688
689 2) Otherwise, we do following:
689 2) Otherwise, we do following:
690
690
691 a) Yield (status, filepath) for all the files which are in this
691 a) Yield (status, filepath) for all the files which are in this
692 directory (only the ones in this directory, not the subdirs)
692 directory (only the ones in this directory, not the subdirs)
693
693
694 b) Recurse the function on all the subdirectories of this
694 b) Recurse the function on all the subdirectories of this
695 directory
695 directory
696 """
696 """
697
697
698 if len(self.statuses) == 1:
698 if len(self.statuses) == 1:
699 onlyst = self.statuses.pop()
699 onlyst = self.statuses.pop()
700
700
701 # Making sure we terse only when the status abbreviation is
701 # Making sure we terse only when the status abbreviation is
702 # passed as terse argument
702 # passed as terse argument
703 if onlyst in terseargs:
703 if onlyst in terseargs:
704 yield onlyst, self.path + b'/'
704 yield onlyst, self.path + b'/'
705 return
705 return
706
706
707 # add the files to status list
707 # add the files to status list
708 for st, fpath in self.iterfilepaths():
708 for st, fpath in self.iterfilepaths():
709 yield st, fpath
709 yield st, fpath
710
710
711 # recurse on the subdirs
711 # recurse on the subdirs
712 for dirobj in self.subdirs.values():
712 for dirobj in self.subdirs.values():
713 for st, fpath in dirobj.tersewalk(terseargs):
713 for st, fpath in dirobj.tersewalk(terseargs):
714 yield st, fpath
714 yield st, fpath
715
715
716
716
717 def tersedir(statuslist, terseargs):
717 def tersedir(statuslist, terseargs):
718 """
718 """
719 Terse the status if all the files in a directory shares the same status.
719 Terse the status if all the files in a directory shares the same status.
720
720
721 statuslist is scmutil.status() object which contains a list of files for
721 statuslist is scmutil.status() object which contains a list of files for
722 each status.
722 each status.
723 terseargs is string which is passed by the user as the argument to `--terse`
723 terseargs is string which is passed by the user as the argument to `--terse`
724 flag.
724 flag.
725
725
726 The function makes a tree of objects of dirnode class, and at each node it
726 The function makes a tree of objects of dirnode class, and at each node it
727 stores the information required to know whether we can terse a certain
727 stores the information required to know whether we can terse a certain
728 directory or not.
728 directory or not.
729 """
729 """
730 # the order matters here as that is used to produce final list
730 # the order matters here as that is used to produce final list
731 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
731 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
732
732
733 # checking the argument validity
733 # checking the argument validity
734 for s in pycompat.bytestr(terseargs):
734 for s in pycompat.bytestr(terseargs):
735 if s not in allst:
735 if s not in allst:
736 raise error.Abort(_(b"'%s' not recognized") % s)
736 raise error.Abort(_(b"'%s' not recognized") % s)
737
737
738 # creating a dirnode object for the root of the repo
738 # creating a dirnode object for the root of the repo
739 rootobj = dirnode(b'')
739 rootobj = dirnode(b'')
740 pstatus = (
740 pstatus = (
741 b'modified',
741 b'modified',
742 b'added',
742 b'added',
743 b'deleted',
743 b'deleted',
744 b'clean',
744 b'clean',
745 b'unknown',
745 b'unknown',
746 b'ignored',
746 b'ignored',
747 b'removed',
747 b'removed',
748 )
748 )
749
749
750 tersedict = {}
750 tersedict = {}
751 for attrname in pstatus:
751 for attrname in pstatus:
752 statuschar = attrname[0:1]
752 statuschar = attrname[0:1]
753 for f in getattr(statuslist, attrname):
753 for f in getattr(statuslist, attrname):
754 rootobj.addfile(f, statuschar)
754 rootobj.addfile(f, statuschar)
755 tersedict[statuschar] = []
755 tersedict[statuschar] = []
756
756
757 # we won't be tersing the root dir, so add files in it
757 # we won't be tersing the root dir, so add files in it
758 for st, fpath in rootobj.iterfilepaths():
758 for st, fpath in rootobj.iterfilepaths():
759 tersedict[st].append(fpath)
759 tersedict[st].append(fpath)
760
760
761 # process each sub-directory and build tersedict
761 # process each sub-directory and build tersedict
762 for subdir in rootobj.subdirs.values():
762 for subdir in rootobj.subdirs.values():
763 for st, f in subdir.tersewalk(terseargs):
763 for st, f in subdir.tersewalk(terseargs):
764 tersedict[st].append(f)
764 tersedict[st].append(f)
765
765
766 tersedlist = []
766 tersedlist = []
767 for st in allst:
767 for st in allst:
768 tersedict[st].sort()
768 tersedict[st].sort()
769 tersedlist.append(tersedict[st])
769 tersedlist.append(tersedict[st])
770
770
771 return scmutil.status(*tersedlist)
771 return scmutil.status(*tersedlist)
772
772
773
773
774 def _commentlines(raw):
774 def _commentlines(raw):
775 '''Surround lineswith a comment char and a new line'''
775 '''Surround lineswith a comment char and a new line'''
776 lines = raw.splitlines()
776 lines = raw.splitlines()
777 commentedlines = [b'# %s' % line for line in lines]
777 commentedlines = [b'# %s' % line for line in lines]
778 return b'\n'.join(commentedlines) + b'\n'
778 return b'\n'.join(commentedlines) + b'\n'
779
779
780
780
781 def _conflictsmsg(repo):
781 def _conflictsmsg(repo):
782 mergestate = mergemod.mergestate.read(repo)
782 mergestate = mergemod.mergestate.read(repo)
783 if not mergestate.active():
783 if not mergestate.active():
784 return
784 return
785
785
786 m = scmutil.match(repo[None])
786 m = scmutil.match(repo[None])
787 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
787 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
788 if unresolvedlist:
788 if unresolvedlist:
789 mergeliststr = b'\n'.join(
789 mergeliststr = b'\n'.join(
790 [
790 [
791 b' %s' % util.pathto(repo.root, encoding.getcwd(), path)
791 b' %s' % util.pathto(repo.root, encoding.getcwd(), path)
792 for path in sorted(unresolvedlist)
792 for path in sorted(unresolvedlist)
793 ]
793 ]
794 )
794 )
795 msg = (
795 msg = (
796 _(
796 _(
797 '''Unresolved merge conflicts:
797 '''Unresolved merge conflicts:
798
798
799 %s
799 %s
800
800
801 To mark files as resolved: hg resolve --mark FILE'''
801 To mark files as resolved: hg resolve --mark FILE'''
802 )
802 )
803 % mergeliststr
803 % mergeliststr
804 )
804 )
805 else:
805 else:
806 msg = _(b'No unresolved merge conflicts.')
806 msg = _(b'No unresolved merge conflicts.')
807
807
808 return _commentlines(msg)
808 return _commentlines(msg)
809
809
810
810
811 def morestatus(repo, fm):
811 def morestatus(repo, fm):
812 statetuple = statemod.getrepostate(repo)
812 statetuple = statemod.getrepostate(repo)
813 label = b'status.morestatus'
813 label = b'status.morestatus'
814 if statetuple:
814 if statetuple:
815 state, helpfulmsg = statetuple
815 state, helpfulmsg = statetuple
816 statemsg = _(b'The repository is in an unfinished *%s* state.') % state
816 statemsg = _(b'The repository is in an unfinished *%s* state.') % state
817 fm.plain(b'%s\n' % _commentlines(statemsg), label=label)
817 fm.plain(b'%s\n' % _commentlines(statemsg), label=label)
818 conmsg = _conflictsmsg(repo)
818 conmsg = _conflictsmsg(repo)
819 if conmsg:
819 if conmsg:
820 fm.plain(b'%s\n' % conmsg, label=label)
820 fm.plain(b'%s\n' % conmsg, label=label)
821 if helpfulmsg:
821 if helpfulmsg:
822 fm.plain(b'%s\n' % _commentlines(helpfulmsg), label=label)
822 fm.plain(b'%s\n' % _commentlines(helpfulmsg), label=label)
823
823
824
824
825 def findpossible(cmd, table, strict=False):
825 def findpossible(cmd, table, strict=False):
826 """
826 """
827 Return cmd -> (aliases, command table entry)
827 Return cmd -> (aliases, command table entry)
828 for each matching command.
828 for each matching command.
829 Return debug commands (or their aliases) only if no normal command matches.
829 Return debug commands (or their aliases) only if no normal command matches.
830 """
830 """
831 choice = {}
831 choice = {}
832 debugchoice = {}
832 debugchoice = {}
833
833
834 if cmd in table:
834 if cmd in table:
835 # short-circuit exact matches, "log" alias beats "log|history"
835 # short-circuit exact matches, "log" alias beats "log|history"
836 keys = [cmd]
836 keys = [cmd]
837 else:
837 else:
838 keys = table.keys()
838 keys = table.keys()
839
839
840 allcmds = []
840 allcmds = []
841 for e in keys:
841 for e in keys:
842 aliases = parsealiases(e)
842 aliases = parsealiases(e)
843 allcmds.extend(aliases)
843 allcmds.extend(aliases)
844 found = None
844 found = None
845 if cmd in aliases:
845 if cmd in aliases:
846 found = cmd
846 found = cmd
847 elif not strict:
847 elif not strict:
848 for a in aliases:
848 for a in aliases:
849 if a.startswith(cmd):
849 if a.startswith(cmd):
850 found = a
850 found = a
851 break
851 break
852 if found is not None:
852 if found is not None:
853 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
853 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
854 debugchoice[found] = (aliases, table[e])
854 debugchoice[found] = (aliases, table[e])
855 else:
855 else:
856 choice[found] = (aliases, table[e])
856 choice[found] = (aliases, table[e])
857
857
858 if not choice and debugchoice:
858 if not choice and debugchoice:
859 choice = debugchoice
859 choice = debugchoice
860
860
861 return choice, allcmds
861 return choice, allcmds
862
862
863
863
864 def findcmd(cmd, table, strict=True):
864 def findcmd(cmd, table, strict=True):
865 """Return (aliases, command table entry) for command string."""
865 """Return (aliases, command table entry) for command string."""
866 choice, allcmds = findpossible(cmd, table, strict)
866 choice, allcmds = findpossible(cmd, table, strict)
867
867
868 if cmd in choice:
868 if cmd in choice:
869 return choice[cmd]
869 return choice[cmd]
870
870
871 if len(choice) > 1:
871 if len(choice) > 1:
872 clist = sorted(choice)
872 clist = sorted(choice)
873 raise error.AmbiguousCommand(cmd, clist)
873 raise error.AmbiguousCommand(cmd, clist)
874
874
875 if choice:
875 if choice:
876 return list(choice.values())[0]
876 return list(choice.values())[0]
877
877
878 raise error.UnknownCommand(cmd, allcmds)
878 raise error.UnknownCommand(cmd, allcmds)
879
879
880
880
881 def changebranch(ui, repo, revs, label):
881 def changebranch(ui, repo, revs, label):
882 """ Change the branch name of given revs to label """
882 """ Change the branch name of given revs to label """
883
883
884 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
884 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
885 # abort in case of uncommitted merge or dirty wdir
885 # abort in case of uncommitted merge or dirty wdir
886 bailifchanged(repo)
886 bailifchanged(repo)
887 revs = scmutil.revrange(repo, revs)
887 revs = scmutil.revrange(repo, revs)
888 if not revs:
888 if not revs:
889 raise error.Abort(b"empty revision set")
889 raise error.Abort(b"empty revision set")
890 roots = repo.revs(b'roots(%ld)', revs)
890 roots = repo.revs(b'roots(%ld)', revs)
891 if len(roots) > 1:
891 if len(roots) > 1:
892 raise error.Abort(
892 raise error.Abort(
893 _(b"cannot change branch of non-linear revisions")
893 _(b"cannot change branch of non-linear revisions")
894 )
894 )
895 rewriteutil.precheck(repo, revs, b'change branch of')
895 rewriteutil.precheck(repo, revs, b'change branch of')
896
896
897 root = repo[roots.first()]
897 root = repo[roots.first()]
898 rpb = {parent.branch() for parent in root.parents()}
898 rpb = {parent.branch() for parent in root.parents()}
899 if label not in rpb and label in repo.branchmap():
899 if label not in rpb and label in repo.branchmap():
900 raise error.Abort(_(b"a branch of the same name already exists"))
900 raise error.Abort(_(b"a branch of the same name already exists"))
901
901
902 if repo.revs(b'obsolete() and %ld', revs):
902 if repo.revs(b'obsolete() and %ld', revs):
903 raise error.Abort(
903 raise error.Abort(
904 _(b"cannot change branch of a obsolete changeset")
904 _(b"cannot change branch of a obsolete changeset")
905 )
905 )
906
906
907 # make sure only topological heads
907 # make sure only topological heads
908 if repo.revs(b'heads(%ld) - head()', revs):
908 if repo.revs(b'heads(%ld) - head()', revs):
909 raise error.Abort(_(b"cannot change branch in middle of a stack"))
909 raise error.Abort(_(b"cannot change branch in middle of a stack"))
910
910
911 replacements = {}
911 replacements = {}
912 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
912 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
913 # mercurial.subrepo -> mercurial.cmdutil
913 # mercurial.subrepo -> mercurial.cmdutil
914 from . import context
914 from . import context
915
915
916 for rev in revs:
916 for rev in revs:
917 ctx = repo[rev]
917 ctx = repo[rev]
918 oldbranch = ctx.branch()
918 oldbranch = ctx.branch()
919 # check if ctx has same branch
919 # check if ctx has same branch
920 if oldbranch == label:
920 if oldbranch == label:
921 continue
921 continue
922
922
923 def filectxfn(repo, newctx, path):
923 def filectxfn(repo, newctx, path):
924 try:
924 try:
925 return ctx[path]
925 return ctx[path]
926 except error.ManifestLookupError:
926 except error.ManifestLookupError:
927 return None
927 return None
928
928
929 ui.debug(
929 ui.debug(
930 b"changing branch of '%s' from '%s' to '%s'\n"
930 b"changing branch of '%s' from '%s' to '%s'\n"
931 % (hex(ctx.node()), oldbranch, label)
931 % (hex(ctx.node()), oldbranch, label)
932 )
932 )
933 extra = ctx.extra()
933 extra = ctx.extra()
934 extra[b'branch_change'] = hex(ctx.node())
934 extra[b'branch_change'] = hex(ctx.node())
935 # While changing branch of set of linear commits, make sure that
935 # While changing branch of set of linear commits, make sure that
936 # we base our commits on new parent rather than old parent which
936 # we base our commits on new parent rather than old parent which
937 # was obsoleted while changing the branch
937 # was obsoleted while changing the branch
938 p1 = ctx.p1().node()
938 p1 = ctx.p1().node()
939 p2 = ctx.p2().node()
939 p2 = ctx.p2().node()
940 if p1 in replacements:
940 if p1 in replacements:
941 p1 = replacements[p1][0]
941 p1 = replacements[p1][0]
942 if p2 in replacements:
942 if p2 in replacements:
943 p2 = replacements[p2][0]
943 p2 = replacements[p2][0]
944
944
945 mc = context.memctx(
945 mc = context.memctx(
946 repo,
946 repo,
947 (p1, p2),
947 (p1, p2),
948 ctx.description(),
948 ctx.description(),
949 ctx.files(),
949 ctx.files(),
950 filectxfn,
950 filectxfn,
951 user=ctx.user(),
951 user=ctx.user(),
952 date=ctx.date(),
952 date=ctx.date(),
953 extra=extra,
953 extra=extra,
954 branch=label,
954 branch=label,
955 )
955 )
956
956
957 newnode = repo.commitctx(mc)
957 newnode = repo.commitctx(mc)
958 replacements[ctx.node()] = (newnode,)
958 replacements[ctx.node()] = (newnode,)
959 ui.debug(b'new node id is %s\n' % hex(newnode))
959 ui.debug(b'new node id is %s\n' % hex(newnode))
960
960
961 # create obsmarkers and move bookmarks
961 # create obsmarkers and move bookmarks
962 scmutil.cleanupnodes(
962 scmutil.cleanupnodes(
963 repo, replacements, b'branch-change', fixphase=True
963 repo, replacements, b'branch-change', fixphase=True
964 )
964 )
965
965
966 # move the working copy too
966 # move the working copy too
967 wctx = repo[None]
967 wctx = repo[None]
968 # in-progress merge is a bit too complex for now.
968 # in-progress merge is a bit too complex for now.
969 if len(wctx.parents()) == 1:
969 if len(wctx.parents()) == 1:
970 newid = replacements.get(wctx.p1().node())
970 newid = replacements.get(wctx.p1().node())
971 if newid is not None:
971 if newid is not None:
972 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
972 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
973 # mercurial.cmdutil
973 # mercurial.cmdutil
974 from . import hg
974 from . import hg
975
975
976 hg.update(repo, newid[0], quietempty=True)
976 hg.update(repo, newid[0], quietempty=True)
977
977
978 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
978 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
979
979
980
980
981 def findrepo(p):
981 def findrepo(p):
982 while not os.path.isdir(os.path.join(p, b".hg")):
982 while not os.path.isdir(os.path.join(p, b".hg")):
983 oldp, p = p, os.path.dirname(p)
983 oldp, p = p, os.path.dirname(p)
984 if p == oldp:
984 if p == oldp:
985 return None
985 return None
986
986
987 return p
987 return p
988
988
989
989
990 def bailifchanged(repo, merge=True, hint=None):
990 def bailifchanged(repo, merge=True, hint=None):
991 """ enforce the precondition that working directory must be clean.
991 """ enforce the precondition that working directory must be clean.
992
992
993 'merge' can be set to false if a pending uncommitted merge should be
993 'merge' can be set to false if a pending uncommitted merge should be
994 ignored (such as when 'update --check' runs).
994 ignored (such as when 'update --check' runs).
995
995
996 'hint' is the usual hint given to Abort exception.
996 'hint' is the usual hint given to Abort exception.
997 """
997 """
998
998
999 if merge and repo.dirstate.p2() != nullid:
999 if merge and repo.dirstate.p2() != nullid:
1000 raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
1000 raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
1001 st = repo.status()
1001 st = repo.status()
1002 if st.modified or st.added or st.removed or st.deleted:
1002 if st.modified or st.added or st.removed or st.deleted:
1003 raise error.Abort(_(b'uncommitted changes'), hint=hint)
1003 raise error.Abort(_(b'uncommitted changes'), hint=hint)
1004 ctx = repo[None]
1004 ctx = repo[None]
1005 for s in sorted(ctx.substate):
1005 for s in sorted(ctx.substate):
1006 ctx.sub(s).bailifchanged(hint=hint)
1006 ctx.sub(s).bailifchanged(hint=hint)
1007
1007
1008
1008
1009 def logmessage(ui, opts):
1009 def logmessage(ui, opts):
1010 """ get the log message according to -m and -l option """
1010 """ get the log message according to -m and -l option """
1011 message = opts.get(b'message')
1011 message = opts.get(b'message')
1012 logfile = opts.get(b'logfile')
1012 logfile = opts.get(b'logfile')
1013
1013
1014 if message and logfile:
1014 if message and logfile:
1015 raise error.Abort(
1015 raise error.Abort(
1016 _(b'options --message and --logfile are mutually exclusive')
1016 _(b'options --message and --logfile are mutually exclusive')
1017 )
1017 )
1018 if not message and logfile:
1018 if not message and logfile:
1019 try:
1019 try:
1020 if isstdiofilename(logfile):
1020 if isstdiofilename(logfile):
1021 message = ui.fin.read()
1021 message = ui.fin.read()
1022 else:
1022 else:
1023 message = b'\n'.join(util.readfile(logfile).splitlines())
1023 message = b'\n'.join(util.readfile(logfile).splitlines())
1024 except IOError as inst:
1024 except IOError as inst:
1025 raise error.Abort(
1025 raise error.Abort(
1026 _(b"can't read commit message '%s': %s")
1026 _(b"can't read commit message '%s': %s")
1027 % (logfile, encoding.strtolocal(inst.strerror))
1027 % (logfile, encoding.strtolocal(inst.strerror))
1028 )
1028 )
1029 return message
1029 return message
1030
1030
1031
1031
1032 def mergeeditform(ctxorbool, baseformname):
1032 def mergeeditform(ctxorbool, baseformname):
1033 """return appropriate editform name (referencing a committemplate)
1033 """return appropriate editform name (referencing a committemplate)
1034
1034
1035 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1035 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1036 merging is committed.
1036 merging is committed.
1037
1037
1038 This returns baseformname with '.merge' appended if it is a merge,
1038 This returns baseformname with '.merge' appended if it is a merge,
1039 otherwise '.normal' is appended.
1039 otherwise '.normal' is appended.
1040 """
1040 """
1041 if isinstance(ctxorbool, bool):
1041 if isinstance(ctxorbool, bool):
1042 if ctxorbool:
1042 if ctxorbool:
1043 return baseformname + b".merge"
1043 return baseformname + b".merge"
1044 elif len(ctxorbool.parents()) > 1:
1044 elif len(ctxorbool.parents()) > 1:
1045 return baseformname + b".merge"
1045 return baseformname + b".merge"
1046
1046
1047 return baseformname + b".normal"
1047 return baseformname + b".normal"
1048
1048
1049
1049
1050 def getcommiteditor(
1050 def getcommiteditor(
1051 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1051 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1052 ):
1052 ):
1053 """get appropriate commit message editor according to '--edit' option
1053 """get appropriate commit message editor according to '--edit' option
1054
1054
1055 'finishdesc' is a function to be called with edited commit message
1055 'finishdesc' is a function to be called with edited commit message
1056 (= 'description' of the new changeset) just after editing, but
1056 (= 'description' of the new changeset) just after editing, but
1057 before checking empty-ness. It should return actual text to be
1057 before checking empty-ness. It should return actual text to be
1058 stored into history. This allows to change description before
1058 stored into history. This allows to change description before
1059 storing.
1059 storing.
1060
1060
1061 'extramsg' is a extra message to be shown in the editor instead of
1061 'extramsg' is a extra message to be shown in the editor instead of
1062 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1062 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1063 is automatically added.
1063 is automatically added.
1064
1064
1065 'editform' is a dot-separated list of names, to distinguish
1065 'editform' is a dot-separated list of names, to distinguish
1066 the purpose of commit text editing.
1066 the purpose of commit text editing.
1067
1067
1068 'getcommiteditor' returns 'commitforceeditor' regardless of
1068 'getcommiteditor' returns 'commitforceeditor' regardless of
1069 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1069 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1070 they are specific for usage in MQ.
1070 they are specific for usage in MQ.
1071 """
1071 """
1072 if edit or finishdesc or extramsg:
1072 if edit or finishdesc or extramsg:
1073 return lambda r, c, s: commitforceeditor(
1073 return lambda r, c, s: commitforceeditor(
1074 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1074 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1075 )
1075 )
1076 elif editform:
1076 elif editform:
1077 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1077 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1078 else:
1078 else:
1079 return commiteditor
1079 return commiteditor
1080
1080
1081
1081
1082 def _escapecommandtemplate(tmpl):
1082 def _escapecommandtemplate(tmpl):
1083 parts = []
1083 parts = []
1084 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1084 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1085 if typ == b'string':
1085 if typ == b'string':
1086 parts.append(stringutil.escapestr(tmpl[start:end]))
1086 parts.append(stringutil.escapestr(tmpl[start:end]))
1087 else:
1087 else:
1088 parts.append(tmpl[start:end])
1088 parts.append(tmpl[start:end])
1089 return b''.join(parts)
1089 return b''.join(parts)
1090
1090
1091
1091
1092 def rendercommandtemplate(ui, tmpl, props):
1092 def rendercommandtemplate(ui, tmpl, props):
1093 r"""Expand a literal template 'tmpl' in a way suitable for command line
1093 r"""Expand a literal template 'tmpl' in a way suitable for command line
1094
1094
1095 '\' in outermost string is not taken as an escape character because it
1095 '\' in outermost string is not taken as an escape character because it
1096 is a directory separator on Windows.
1096 is a directory separator on Windows.
1097
1097
1098 >>> from . import ui as uimod
1098 >>> from . import ui as uimod
1099 >>> ui = uimod.ui()
1099 >>> ui = uimod.ui()
1100 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1100 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1101 'c:\\foo'
1101 'c:\\foo'
1102 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1102 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1103 'c:{path}'
1103 'c:{path}'
1104 """
1104 """
1105 if not tmpl:
1105 if not tmpl:
1106 return tmpl
1106 return tmpl
1107 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1107 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1108 return t.renderdefault(props)
1108 return t.renderdefault(props)
1109
1109
1110
1110
1111 def rendertemplate(ctx, tmpl, props=None):
1111 def rendertemplate(ctx, tmpl, props=None):
1112 """Expand a literal template 'tmpl' byte-string against one changeset
1112 """Expand a literal template 'tmpl' byte-string against one changeset
1113
1113
1114 Each props item must be a stringify-able value or a callable returning
1114 Each props item must be a stringify-able value or a callable returning
1115 such value, i.e. no bare list nor dict should be passed.
1115 such value, i.e. no bare list nor dict should be passed.
1116 """
1116 """
1117 repo = ctx.repo()
1117 repo = ctx.repo()
1118 tres = formatter.templateresources(repo.ui, repo)
1118 tres = formatter.templateresources(repo.ui, repo)
1119 t = formatter.maketemplater(
1119 t = formatter.maketemplater(
1120 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1120 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1121 )
1121 )
1122 mapping = {b'ctx': ctx}
1122 mapping = {b'ctx': ctx}
1123 if props:
1123 if props:
1124 mapping.update(props)
1124 mapping.update(props)
1125 return t.renderdefault(mapping)
1125 return t.renderdefault(mapping)
1126
1126
1127
1127
1128 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1128 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1129 r"""Convert old-style filename format string to template string
1129 r"""Convert old-style filename format string to template string
1130
1130
1131 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1131 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1132 'foo-{reporoot|basename}-{seqno}.patch'
1132 'foo-{reporoot|basename}-{seqno}.patch'
1133 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1133 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1134 '{rev}{tags % "{tag}"}{node}'
1134 '{rev}{tags % "{tag}"}{node}'
1135
1135
1136 '\' in outermost strings has to be escaped because it is a directory
1136 '\' in outermost strings has to be escaped because it is a directory
1137 separator on Windows:
1137 separator on Windows:
1138
1138
1139 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1139 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1140 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1140 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1141 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1141 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1142 '\\\\\\\\foo\\\\bar.patch'
1142 '\\\\\\\\foo\\\\bar.patch'
1143 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1143 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1144 '\\\\{tags % "{tag}"}'
1144 '\\\\{tags % "{tag}"}'
1145
1145
1146 but inner strings follow the template rules (i.e. '\' is taken as an
1146 but inner strings follow the template rules (i.e. '\' is taken as an
1147 escape character):
1147 escape character):
1148
1148
1149 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1149 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1150 '{"c:\\tmp"}'
1150 '{"c:\\tmp"}'
1151 """
1151 """
1152 expander = {
1152 expander = {
1153 b'H': b'{node}',
1153 b'H': b'{node}',
1154 b'R': b'{rev}',
1154 b'R': b'{rev}',
1155 b'h': b'{node|short}',
1155 b'h': b'{node|short}',
1156 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1156 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1157 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1157 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1158 b'%': b'%',
1158 b'%': b'%',
1159 b'b': b'{reporoot|basename}',
1159 b'b': b'{reporoot|basename}',
1160 }
1160 }
1161 if total is not None:
1161 if total is not None:
1162 expander[b'N'] = b'{total}'
1162 expander[b'N'] = b'{total}'
1163 if seqno is not None:
1163 if seqno is not None:
1164 expander[b'n'] = b'{seqno}'
1164 expander[b'n'] = b'{seqno}'
1165 if total is not None and seqno is not None:
1165 if total is not None and seqno is not None:
1166 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1166 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1167 if pathname is not None:
1167 if pathname is not None:
1168 expander[b's'] = b'{pathname|basename}'
1168 expander[b's'] = b'{pathname|basename}'
1169 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1169 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1170 expander[b'p'] = b'{pathname}'
1170 expander[b'p'] = b'{pathname}'
1171
1171
1172 newname = []
1172 newname = []
1173 for typ, start, end in templater.scantemplate(pat, raw=True):
1173 for typ, start, end in templater.scantemplate(pat, raw=True):
1174 if typ != b'string':
1174 if typ != b'string':
1175 newname.append(pat[start:end])
1175 newname.append(pat[start:end])
1176 continue
1176 continue
1177 i = start
1177 i = start
1178 while i < end:
1178 while i < end:
1179 n = pat.find(b'%', i, end)
1179 n = pat.find(b'%', i, end)
1180 if n < 0:
1180 if n < 0:
1181 newname.append(stringutil.escapestr(pat[i:end]))
1181 newname.append(stringutil.escapestr(pat[i:end]))
1182 break
1182 break
1183 newname.append(stringutil.escapestr(pat[i:n]))
1183 newname.append(stringutil.escapestr(pat[i:n]))
1184 if n + 2 > end:
1184 if n + 2 > end:
1185 raise error.Abort(
1185 raise error.Abort(
1186 _(b"incomplete format spec in output filename")
1186 _(b"incomplete format spec in output filename")
1187 )
1187 )
1188 c = pat[n + 1 : n + 2]
1188 c = pat[n + 1 : n + 2]
1189 i = n + 2
1189 i = n + 2
1190 try:
1190 try:
1191 newname.append(expander[c])
1191 newname.append(expander[c])
1192 except KeyError:
1192 except KeyError:
1193 raise error.Abort(
1193 raise error.Abort(
1194 _(b"invalid format spec '%%%s' in output filename") % c
1194 _(b"invalid format spec '%%%s' in output filename") % c
1195 )
1195 )
1196 return b''.join(newname)
1196 return b''.join(newname)
1197
1197
1198
1198
1199 def makefilename(ctx, pat, **props):
1199 def makefilename(ctx, pat, **props):
1200 if not pat:
1200 if not pat:
1201 return pat
1201 return pat
1202 tmpl = _buildfntemplate(pat, **props)
1202 tmpl = _buildfntemplate(pat, **props)
1203 # BUG: alias expansion shouldn't be made against template fragments
1203 # BUG: alias expansion shouldn't be made against template fragments
1204 # rewritten from %-format strings, but we have no easy way to partially
1204 # rewritten from %-format strings, but we have no easy way to partially
1205 # disable the expansion.
1205 # disable the expansion.
1206 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1206 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1207
1207
1208
1208
1209 def isstdiofilename(pat):
1209 def isstdiofilename(pat):
1210 """True if the given pat looks like a filename denoting stdin/stdout"""
1210 """True if the given pat looks like a filename denoting stdin/stdout"""
1211 return not pat or pat == b'-'
1211 return not pat or pat == b'-'
1212
1212
1213
1213
1214 class _unclosablefile(object):
1214 class _unclosablefile(object):
1215 def __init__(self, fp):
1215 def __init__(self, fp):
1216 self._fp = fp
1216 self._fp = fp
1217
1217
1218 def close(self):
1218 def close(self):
1219 pass
1219 pass
1220
1220
1221 def __iter__(self):
1221 def __iter__(self):
1222 return iter(self._fp)
1222 return iter(self._fp)
1223
1223
1224 def __getattr__(self, attr):
1224 def __getattr__(self, attr):
1225 return getattr(self._fp, attr)
1225 return getattr(self._fp, attr)
1226
1226
1227 def __enter__(self):
1227 def __enter__(self):
1228 return self
1228 return self
1229
1229
1230 def __exit__(self, exc_type, exc_value, exc_tb):
1230 def __exit__(self, exc_type, exc_value, exc_tb):
1231 pass
1231 pass
1232
1232
1233
1233
1234 def makefileobj(ctx, pat, mode=b'wb', **props):
1234 def makefileobj(ctx, pat, mode=b'wb', **props):
1235 writable = mode not in (b'r', b'rb')
1235 writable = mode not in (b'r', b'rb')
1236
1236
1237 if isstdiofilename(pat):
1237 if isstdiofilename(pat):
1238 repo = ctx.repo()
1238 repo = ctx.repo()
1239 if writable:
1239 if writable:
1240 fp = repo.ui.fout
1240 fp = repo.ui.fout
1241 else:
1241 else:
1242 fp = repo.ui.fin
1242 fp = repo.ui.fin
1243 return _unclosablefile(fp)
1243 return _unclosablefile(fp)
1244 fn = makefilename(ctx, pat, **props)
1244 fn = makefilename(ctx, pat, **props)
1245 return open(fn, mode)
1245 return open(fn, mode)
1246
1246
1247
1247
1248 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1248 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1249 """opens the changelog, manifest, a filelog or a given revlog"""
1249 """opens the changelog, manifest, a filelog or a given revlog"""
1250 cl = opts[b'changelog']
1250 cl = opts[b'changelog']
1251 mf = opts[b'manifest']
1251 mf = opts[b'manifest']
1252 dir = opts[b'dir']
1252 dir = opts[b'dir']
1253 msg = None
1253 msg = None
1254 if cl and mf:
1254 if cl and mf:
1255 msg = _(b'cannot specify --changelog and --manifest at the same time')
1255 msg = _(b'cannot specify --changelog and --manifest at the same time')
1256 elif cl and dir:
1256 elif cl and dir:
1257 msg = _(b'cannot specify --changelog and --dir at the same time')
1257 msg = _(b'cannot specify --changelog and --dir at the same time')
1258 elif cl or mf or dir:
1258 elif cl or mf or dir:
1259 if file_:
1259 if file_:
1260 msg = _(b'cannot specify filename with --changelog or --manifest')
1260 msg = _(b'cannot specify filename with --changelog or --manifest')
1261 elif not repo:
1261 elif not repo:
1262 msg = _(
1262 msg = _(
1263 b'cannot specify --changelog or --manifest or --dir '
1263 b'cannot specify --changelog or --manifest or --dir '
1264 b'without a repository'
1264 b'without a repository'
1265 )
1265 )
1266 if msg:
1266 if msg:
1267 raise error.Abort(msg)
1267 raise error.Abort(msg)
1268
1268
1269 r = None
1269 r = None
1270 if repo:
1270 if repo:
1271 if cl:
1271 if cl:
1272 r = repo.unfiltered().changelog
1272 r = repo.unfiltered().changelog
1273 elif dir:
1273 elif dir:
1274 if b'treemanifest' not in repo.requirements:
1274 if b'treemanifest' not in repo.requirements:
1275 raise error.Abort(
1275 raise error.Abort(
1276 _(
1276 _(
1277 b"--dir can only be used on repos with "
1277 b"--dir can only be used on repos with "
1278 b"treemanifest enabled"
1278 b"treemanifest enabled"
1279 )
1279 )
1280 )
1280 )
1281 if not dir.endswith(b'/'):
1281 if not dir.endswith(b'/'):
1282 dir = dir + b'/'
1282 dir = dir + b'/'
1283 dirlog = repo.manifestlog.getstorage(dir)
1283 dirlog = repo.manifestlog.getstorage(dir)
1284 if len(dirlog):
1284 if len(dirlog):
1285 r = dirlog
1285 r = dirlog
1286 elif mf:
1286 elif mf:
1287 r = repo.manifestlog.getstorage(b'')
1287 r = repo.manifestlog.getstorage(b'')
1288 elif file_:
1288 elif file_:
1289 filelog = repo.file(file_)
1289 filelog = repo.file(file_)
1290 if len(filelog):
1290 if len(filelog):
1291 r = filelog
1291 r = filelog
1292
1292
1293 # Not all storage may be revlogs. If requested, try to return an actual
1293 # Not all storage may be revlogs. If requested, try to return an actual
1294 # revlog instance.
1294 # revlog instance.
1295 if returnrevlog:
1295 if returnrevlog:
1296 if isinstance(r, revlog.revlog):
1296 if isinstance(r, revlog.revlog):
1297 pass
1297 pass
1298 elif util.safehasattr(r, b'_revlog'):
1298 elif util.safehasattr(r, b'_revlog'):
1299 r = r._revlog # pytype: disable=attribute-error
1299 r = r._revlog # pytype: disable=attribute-error
1300 elif r is not None:
1300 elif r is not None:
1301 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
1301 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
1302
1302
1303 if not r:
1303 if not r:
1304 if not returnrevlog:
1304 if not returnrevlog:
1305 raise error.Abort(_(b'cannot give path to non-revlog'))
1305 raise error.Abort(_(b'cannot give path to non-revlog'))
1306
1306
1307 if not file_:
1307 if not file_:
1308 raise error.CommandError(cmd, _(b'invalid arguments'))
1308 raise error.CommandError(cmd, _(b'invalid arguments'))
1309 if not os.path.isfile(file_):
1309 if not os.path.isfile(file_):
1310 raise error.Abort(_(b"revlog '%s' not found") % file_)
1310 raise error.Abort(_(b"revlog '%s' not found") % file_)
1311 r = revlog.revlog(
1311 r = revlog.revlog(
1312 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1312 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1313 )
1313 )
1314 return r
1314 return r
1315
1315
1316
1316
1317 def openrevlog(repo, cmd, file_, opts):
1317 def openrevlog(repo, cmd, file_, opts):
1318 """Obtain a revlog backing storage of an item.
1318 """Obtain a revlog backing storage of an item.
1319
1319
1320 This is similar to ``openstorage()`` except it always returns a revlog.
1320 This is similar to ``openstorage()`` except it always returns a revlog.
1321
1321
1322 In most cases, a caller cares about the main storage object - not the
1322 In most cases, a caller cares about the main storage object - not the
1323 revlog backing it. Therefore, this function should only be used by code
1323 revlog backing it. Therefore, this function should only be used by code
1324 that needs to examine low-level revlog implementation details. e.g. debug
1324 that needs to examine low-level revlog implementation details. e.g. debug
1325 commands.
1325 commands.
1326 """
1326 """
1327 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1327 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1328
1328
1329
1329
1330 def copy(ui, repo, pats, opts, rename=False):
1330 def copy(ui, repo, pats, opts, rename=False):
1331 # called with the repo lock held
1331 # called with the repo lock held
1332 #
1332 #
1333 # hgsep => pathname that uses "/" to separate directories
1333 # hgsep => pathname that uses "/" to separate directories
1334 # ossep => pathname that uses os.sep to separate directories
1334 # ossep => pathname that uses os.sep to separate directories
1335 cwd = repo.getcwd()
1335 cwd = repo.getcwd()
1336 targets = {}
1336 targets = {}
1337 after = opts.get(b"after")
1337 after = opts.get(b"after")
1338 dryrun = opts.get(b"dry_run")
1338 dryrun = opts.get(b"dry_run")
1339 wctx = repo[None]
1339 wctx = repo[None]
1340
1340
1341 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1341 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1342
1342
1343 def walkpat(pat):
1343 def walkpat(pat):
1344 srcs = []
1344 srcs = []
1345 if after:
1345 if after:
1346 badstates = b'?'
1346 badstates = b'?'
1347 else:
1347 else:
1348 badstates = b'?r'
1348 badstates = b'?r'
1349 m = scmutil.match(wctx, [pat], opts, globbed=True)
1349 m = scmutil.match(wctx, [pat], opts, globbed=True)
1350 for abs in wctx.walk(m):
1350 for abs in wctx.walk(m):
1351 state = repo.dirstate[abs]
1351 state = repo.dirstate[abs]
1352 rel = uipathfn(abs)
1352 rel = uipathfn(abs)
1353 exact = m.exact(abs)
1353 exact = m.exact(abs)
1354 if state in badstates:
1354 if state in badstates:
1355 if exact and state == b'?':
1355 if exact and state == b'?':
1356 ui.warn(_(b'%s: not copying - file is not managed\n') % rel)
1356 ui.warn(_(b'%s: not copying - file is not managed\n') % rel)
1357 if exact and state == b'r':
1357 if exact and state == b'r':
1358 ui.warn(
1358 ui.warn(
1359 _(
1359 _(
1360 b'%s: not copying - file has been marked for'
1360 b'%s: not copying - file has been marked for'
1361 b' remove\n'
1361 b' remove\n'
1362 )
1362 )
1363 % rel
1363 % rel
1364 )
1364 )
1365 continue
1365 continue
1366 # abs: hgsep
1366 # abs: hgsep
1367 # rel: ossep
1367 # rel: ossep
1368 srcs.append((abs, rel, exact))
1368 srcs.append((abs, rel, exact))
1369 return srcs
1369 return srcs
1370
1370
1371 # abssrc: hgsep
1371 # abssrc: hgsep
1372 # relsrc: ossep
1372 # relsrc: ossep
1373 # otarget: ossep
1373 # otarget: ossep
1374 def copyfile(abssrc, relsrc, otarget, exact):
1374 def copyfile(abssrc, relsrc, otarget, exact):
1375 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1375 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1376 if b'/' in abstarget:
1376 if b'/' in abstarget:
1377 # We cannot normalize abstarget itself, this would prevent
1377 # We cannot normalize abstarget itself, this would prevent
1378 # case only renames, like a => A.
1378 # case only renames, like a => A.
1379 abspath, absname = abstarget.rsplit(b'/', 1)
1379 abspath, absname = abstarget.rsplit(b'/', 1)
1380 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1380 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1381 reltarget = repo.pathto(abstarget, cwd)
1381 reltarget = repo.pathto(abstarget, cwd)
1382 target = repo.wjoin(abstarget)
1382 target = repo.wjoin(abstarget)
1383 src = repo.wjoin(abssrc)
1383 src = repo.wjoin(abssrc)
1384 state = repo.dirstate[abstarget]
1384 state = repo.dirstate[abstarget]
1385
1385
1386 scmutil.checkportable(ui, abstarget)
1386 scmutil.checkportable(ui, abstarget)
1387
1387
1388 # check for collisions
1388 # check for collisions
1389 prevsrc = targets.get(abstarget)
1389 prevsrc = targets.get(abstarget)
1390 if prevsrc is not None:
1390 if prevsrc is not None:
1391 ui.warn(
1391 ui.warn(
1392 _(b'%s: not overwriting - %s collides with %s\n')
1392 _(b'%s: not overwriting - %s collides with %s\n')
1393 % (
1393 % (
1394 reltarget,
1394 reltarget,
1395 repo.pathto(abssrc, cwd),
1395 repo.pathto(abssrc, cwd),
1396 repo.pathto(prevsrc, cwd),
1396 repo.pathto(prevsrc, cwd),
1397 )
1397 )
1398 )
1398 )
1399 return True # report a failure
1399 return True # report a failure
1400
1400
1401 # check for overwrites
1401 # check for overwrites
1402 exists = os.path.lexists(target)
1402 exists = os.path.lexists(target)
1403 samefile = False
1403 samefile = False
1404 if exists and abssrc != abstarget:
1404 if exists and abssrc != abstarget:
1405 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1405 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1406 abstarget
1406 abstarget
1407 ):
1407 ):
1408 if not rename:
1408 if not rename:
1409 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1409 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1410 return True # report a failure
1410 return True # report a failure
1411 exists = False
1411 exists = False
1412 samefile = True
1412 samefile = True
1413
1413
1414 if not after and exists or after and state in b'mn':
1414 if not after and exists or after and state in b'mn':
1415 if not opts[b'force']:
1415 if not opts[b'force']:
1416 if state in b'mn':
1416 if state in b'mn':
1417 msg = _(b'%s: not overwriting - file already committed\n')
1417 msg = _(b'%s: not overwriting - file already committed\n')
1418 if after:
1418 if after:
1419 flags = b'--after --force'
1419 flags = b'--after --force'
1420 else:
1420 else:
1421 flags = b'--force'
1421 flags = b'--force'
1422 if rename:
1422 if rename:
1423 hint = (
1423 hint = (
1424 _(
1424 _(
1425 b"('hg rename %s' to replace the file by "
1425 b"('hg rename %s' to replace the file by "
1426 b'recording a rename)\n'
1426 b'recording a rename)\n'
1427 )
1427 )
1428 % flags
1428 % flags
1429 )
1429 )
1430 else:
1430 else:
1431 hint = (
1431 hint = (
1432 _(
1432 _(
1433 b"('hg copy %s' to replace the file by "
1433 b"('hg copy %s' to replace the file by "
1434 b'recording a copy)\n'
1434 b'recording a copy)\n'
1435 )
1435 )
1436 % flags
1436 % flags
1437 )
1437 )
1438 else:
1438 else:
1439 msg = _(b'%s: not overwriting - file exists\n')
1439 msg = _(b'%s: not overwriting - file exists\n')
1440 if rename:
1440 if rename:
1441 hint = _(
1441 hint = _(
1442 b"('hg rename --after' to record the rename)\n"
1442 b"('hg rename --after' to record the rename)\n"
1443 )
1443 )
1444 else:
1444 else:
1445 hint = _(b"('hg copy --after' to record the copy)\n")
1445 hint = _(b"('hg copy --after' to record the copy)\n")
1446 ui.warn(msg % reltarget)
1446 ui.warn(msg % reltarget)
1447 ui.warn(hint)
1447 ui.warn(hint)
1448 return True # report a failure
1448 return True # report a failure
1449
1449
1450 if after:
1450 if after:
1451 if not exists:
1451 if not exists:
1452 if rename:
1452 if rename:
1453 ui.warn(
1453 ui.warn(
1454 _(b'%s: not recording move - %s does not exist\n')
1454 _(b'%s: not recording move - %s does not exist\n')
1455 % (relsrc, reltarget)
1455 % (relsrc, reltarget)
1456 )
1456 )
1457 else:
1457 else:
1458 ui.warn(
1458 ui.warn(
1459 _(b'%s: not recording copy - %s does not exist\n')
1459 _(b'%s: not recording copy - %s does not exist\n')
1460 % (relsrc, reltarget)
1460 % (relsrc, reltarget)
1461 )
1461 )
1462 return True # report a failure
1462 return True # report a failure
1463 elif not dryrun:
1463 elif not dryrun:
1464 try:
1464 try:
1465 if exists:
1465 if exists:
1466 os.unlink(target)
1466 os.unlink(target)
1467 targetdir = os.path.dirname(target) or b'.'
1467 targetdir = os.path.dirname(target) or b'.'
1468 if not os.path.isdir(targetdir):
1468 if not os.path.isdir(targetdir):
1469 os.makedirs(targetdir)
1469 os.makedirs(targetdir)
1470 if samefile:
1470 if samefile:
1471 tmp = target + b"~hgrename"
1471 tmp = target + b"~hgrename"
1472 os.rename(src, tmp)
1472 os.rename(src, tmp)
1473 os.rename(tmp, target)
1473 os.rename(tmp, target)
1474 else:
1474 else:
1475 # Preserve stat info on renames, not on copies; this matches
1475 # Preserve stat info on renames, not on copies; this matches
1476 # Linux CLI behavior.
1476 # Linux CLI behavior.
1477 util.copyfile(src, target, copystat=rename)
1477 util.copyfile(src, target, copystat=rename)
1478 srcexists = True
1478 srcexists = True
1479 except IOError as inst:
1479 except IOError as inst:
1480 if inst.errno == errno.ENOENT:
1480 if inst.errno == errno.ENOENT:
1481 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1481 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1482 srcexists = False
1482 srcexists = False
1483 else:
1483 else:
1484 ui.warn(
1484 ui.warn(
1485 _(b'%s: cannot copy - %s\n')
1485 _(b'%s: cannot copy - %s\n')
1486 % (relsrc, encoding.strtolocal(inst.strerror))
1486 % (relsrc, encoding.strtolocal(inst.strerror))
1487 )
1487 )
1488 return True # report a failure
1488 return True # report a failure
1489
1489
1490 if ui.verbose or not exact:
1490 if ui.verbose or not exact:
1491 if rename:
1491 if rename:
1492 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1492 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1493 else:
1493 else:
1494 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1494 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1495
1495
1496 targets[abstarget] = abssrc
1496 targets[abstarget] = abssrc
1497
1497
1498 # fix up dirstate
1498 # fix up dirstate
1499 scmutil.dirstatecopy(
1499 scmutil.dirstatecopy(
1500 ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1500 ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1501 )
1501 )
1502 if rename and not dryrun:
1502 if rename and not dryrun:
1503 if not after and srcexists and not samefile:
1503 if not after and srcexists and not samefile:
1504 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1504 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1505 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1505 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1506 wctx.forget([abssrc])
1506 wctx.forget([abssrc])
1507
1507
1508 # pat: ossep
1508 # pat: ossep
1509 # dest ossep
1509 # dest ossep
1510 # srcs: list of (hgsep, hgsep, ossep, bool)
1510 # srcs: list of (hgsep, hgsep, ossep, bool)
1511 # return: function that takes hgsep and returns ossep
1511 # return: function that takes hgsep and returns ossep
1512 def targetpathfn(pat, dest, srcs):
1512 def targetpathfn(pat, dest, srcs):
1513 if os.path.isdir(pat):
1513 if os.path.isdir(pat):
1514 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1514 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1515 abspfx = util.localpath(abspfx)
1515 abspfx = util.localpath(abspfx)
1516 if destdirexists:
1516 if destdirexists:
1517 striplen = len(os.path.split(abspfx)[0])
1517 striplen = len(os.path.split(abspfx)[0])
1518 else:
1518 else:
1519 striplen = len(abspfx)
1519 striplen = len(abspfx)
1520 if striplen:
1520 if striplen:
1521 striplen += len(pycompat.ossep)
1521 striplen += len(pycompat.ossep)
1522 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1522 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1523 elif destdirexists:
1523 elif destdirexists:
1524 res = lambda p: os.path.join(
1524 res = lambda p: os.path.join(
1525 dest, os.path.basename(util.localpath(p))
1525 dest, os.path.basename(util.localpath(p))
1526 )
1526 )
1527 else:
1527 else:
1528 res = lambda p: dest
1528 res = lambda p: dest
1529 return res
1529 return res
1530
1530
1531 # pat: ossep
1531 # pat: ossep
1532 # dest ossep
1532 # dest ossep
1533 # srcs: list of (hgsep, hgsep, ossep, bool)
1533 # srcs: list of (hgsep, hgsep, ossep, bool)
1534 # return: function that takes hgsep and returns ossep
1534 # return: function that takes hgsep and returns ossep
1535 def targetpathafterfn(pat, dest, srcs):
1535 def targetpathafterfn(pat, dest, srcs):
1536 if matchmod.patkind(pat):
1536 if matchmod.patkind(pat):
1537 # a mercurial pattern
1537 # a mercurial pattern
1538 res = lambda p: os.path.join(
1538 res = lambda p: os.path.join(
1539 dest, os.path.basename(util.localpath(p))
1539 dest, os.path.basename(util.localpath(p))
1540 )
1540 )
1541 else:
1541 else:
1542 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1542 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1543 if len(abspfx) < len(srcs[0][0]):
1543 if len(abspfx) < len(srcs[0][0]):
1544 # A directory. Either the target path contains the last
1544 # A directory. Either the target path contains the last
1545 # component of the source path or it does not.
1545 # component of the source path or it does not.
1546 def evalpath(striplen):
1546 def evalpath(striplen):
1547 score = 0
1547 score = 0
1548 for s in srcs:
1548 for s in srcs:
1549 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1549 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1550 if os.path.lexists(t):
1550 if os.path.lexists(t):
1551 score += 1
1551 score += 1
1552 return score
1552 return score
1553
1553
1554 abspfx = util.localpath(abspfx)
1554 abspfx = util.localpath(abspfx)
1555 striplen = len(abspfx)
1555 striplen = len(abspfx)
1556 if striplen:
1556 if striplen:
1557 striplen += len(pycompat.ossep)
1557 striplen += len(pycompat.ossep)
1558 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1558 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1559 score = evalpath(striplen)
1559 score = evalpath(striplen)
1560 striplen1 = len(os.path.split(abspfx)[0])
1560 striplen1 = len(os.path.split(abspfx)[0])
1561 if striplen1:
1561 if striplen1:
1562 striplen1 += len(pycompat.ossep)
1562 striplen1 += len(pycompat.ossep)
1563 if evalpath(striplen1) > score:
1563 if evalpath(striplen1) > score:
1564 striplen = striplen1
1564 striplen = striplen1
1565 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1565 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1566 else:
1566 else:
1567 # a file
1567 # a file
1568 if destdirexists:
1568 if destdirexists:
1569 res = lambda p: os.path.join(
1569 res = lambda p: os.path.join(
1570 dest, os.path.basename(util.localpath(p))
1570 dest, os.path.basename(util.localpath(p))
1571 )
1571 )
1572 else:
1572 else:
1573 res = lambda p: dest
1573 res = lambda p: dest
1574 return res
1574 return res
1575
1575
1576 pats = scmutil.expandpats(pats)
1576 pats = scmutil.expandpats(pats)
1577 if not pats:
1577 if not pats:
1578 raise error.Abort(_(b'no source or destination specified'))
1578 raise error.Abort(_(b'no source or destination specified'))
1579 if len(pats) == 1:
1579 if len(pats) == 1:
1580 raise error.Abort(_(b'no destination specified'))
1580 raise error.Abort(_(b'no destination specified'))
1581 dest = pats.pop()
1581 dest = pats.pop()
1582 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1582 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1583 if not destdirexists:
1583 if not destdirexists:
1584 if len(pats) > 1 or matchmod.patkind(pats[0]):
1584 if len(pats) > 1 or matchmod.patkind(pats[0]):
1585 raise error.Abort(
1585 raise error.Abort(
1586 _(
1586 _(
1587 b'with multiple sources, destination must be an '
1587 b'with multiple sources, destination must be an '
1588 b'existing directory'
1588 b'existing directory'
1589 )
1589 )
1590 )
1590 )
1591 if util.endswithsep(dest):
1591 if util.endswithsep(dest):
1592 raise error.Abort(_(b'destination %s is not a directory') % dest)
1592 raise error.Abort(_(b'destination %s is not a directory') % dest)
1593
1593
1594 tfn = targetpathfn
1594 tfn = targetpathfn
1595 if after:
1595 if after:
1596 tfn = targetpathafterfn
1596 tfn = targetpathafterfn
1597 copylist = []
1597 copylist = []
1598 for pat in pats:
1598 for pat in pats:
1599 srcs = walkpat(pat)
1599 srcs = walkpat(pat)
1600 if not srcs:
1600 if not srcs:
1601 continue
1601 continue
1602 copylist.append((tfn(pat, dest, srcs), srcs))
1602 copylist.append((tfn(pat, dest, srcs), srcs))
1603 if not copylist:
1603 if not copylist:
1604 raise error.Abort(_(b'no files to copy'))
1604 raise error.Abort(_(b'no files to copy'))
1605
1605
1606 errors = 0
1606 errors = 0
1607 for targetpath, srcs in copylist:
1607 for targetpath, srcs in copylist:
1608 for abssrc, relsrc, exact in srcs:
1608 for abssrc, relsrc, exact in srcs:
1609 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1609 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1610 errors += 1
1610 errors += 1
1611
1611
1612 return errors != 0
1612 return errors != 0
1613
1613
1614
1614
1615 ## facility to let extension process additional data into an import patch
1615 ## facility to let extension process additional data into an import patch
1616 # list of identifier to be executed in order
1616 # list of identifier to be executed in order
1617 extrapreimport = [] # run before commit
1617 extrapreimport = [] # run before commit
1618 extrapostimport = [] # run after commit
1618 extrapostimport = [] # run after commit
1619 # mapping from identifier to actual import function
1619 # mapping from identifier to actual import function
1620 #
1620 #
1621 # 'preimport' are run before the commit is made and are provided the following
1621 # 'preimport' are run before the commit is made and are provided the following
1622 # arguments:
1622 # arguments:
1623 # - repo: the localrepository instance,
1623 # - repo: the localrepository instance,
1624 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1624 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1625 # - extra: the future extra dictionary of the changeset, please mutate it,
1625 # - extra: the future extra dictionary of the changeset, please mutate it,
1626 # - opts: the import options.
1626 # - opts: the import options.
1627 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1627 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1628 # mutation of in memory commit and more. Feel free to rework the code to get
1628 # mutation of in memory commit and more. Feel free to rework the code to get
1629 # there.
1629 # there.
1630 extrapreimportmap = {}
1630 extrapreimportmap = {}
1631 # 'postimport' are run after the commit is made and are provided the following
1631 # 'postimport' are run after the commit is made and are provided the following
1632 # argument:
1632 # argument:
1633 # - ctx: the changectx created by import.
1633 # - ctx: the changectx created by import.
1634 extrapostimportmap = {}
1634 extrapostimportmap = {}
1635
1635
1636
1636
1637 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1637 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1638 """Utility function used by commands.import to import a single patch
1638 """Utility function used by commands.import to import a single patch
1639
1639
1640 This function is explicitly defined here to help the evolve extension to
1640 This function is explicitly defined here to help the evolve extension to
1641 wrap this part of the import logic.
1641 wrap this part of the import logic.
1642
1642
1643 The API is currently a bit ugly because it a simple code translation from
1643 The API is currently a bit ugly because it a simple code translation from
1644 the import command. Feel free to make it better.
1644 the import command. Feel free to make it better.
1645
1645
1646 :patchdata: a dictionary containing parsed patch data (such as from
1646 :patchdata: a dictionary containing parsed patch data (such as from
1647 ``patch.extract()``)
1647 ``patch.extract()``)
1648 :parents: nodes that will be parent of the created commit
1648 :parents: nodes that will be parent of the created commit
1649 :opts: the full dict of option passed to the import command
1649 :opts: the full dict of option passed to the import command
1650 :msgs: list to save commit message to.
1650 :msgs: list to save commit message to.
1651 (used in case we need to save it when failing)
1651 (used in case we need to save it when failing)
1652 :updatefunc: a function that update a repo to a given node
1652 :updatefunc: a function that update a repo to a given node
1653 updatefunc(<repo>, <node>)
1653 updatefunc(<repo>, <node>)
1654 """
1654 """
1655 # avoid cycle context -> subrepo -> cmdutil
1655 # avoid cycle context -> subrepo -> cmdutil
1656 from . import context
1656 from . import context
1657
1657
1658 tmpname = patchdata.get(b'filename')
1658 tmpname = patchdata.get(b'filename')
1659 message = patchdata.get(b'message')
1659 message = patchdata.get(b'message')
1660 user = opts.get(b'user') or patchdata.get(b'user')
1660 user = opts.get(b'user') or patchdata.get(b'user')
1661 date = opts.get(b'date') or patchdata.get(b'date')
1661 date = opts.get(b'date') or patchdata.get(b'date')
1662 branch = patchdata.get(b'branch')
1662 branch = patchdata.get(b'branch')
1663 nodeid = patchdata.get(b'nodeid')
1663 nodeid = patchdata.get(b'nodeid')
1664 p1 = patchdata.get(b'p1')
1664 p1 = patchdata.get(b'p1')
1665 p2 = patchdata.get(b'p2')
1665 p2 = patchdata.get(b'p2')
1666
1666
1667 nocommit = opts.get(b'no_commit')
1667 nocommit = opts.get(b'no_commit')
1668 importbranch = opts.get(b'import_branch')
1668 importbranch = opts.get(b'import_branch')
1669 update = not opts.get(b'bypass')
1669 update = not opts.get(b'bypass')
1670 strip = opts[b"strip"]
1670 strip = opts[b"strip"]
1671 prefix = opts[b"prefix"]
1671 prefix = opts[b"prefix"]
1672 sim = float(opts.get(b'similarity') or 0)
1672 sim = float(opts.get(b'similarity') or 0)
1673
1673
1674 if not tmpname:
1674 if not tmpname:
1675 return None, None, False
1675 return None, None, False
1676
1676
1677 rejects = False
1677 rejects = False
1678
1678
1679 cmdline_message = logmessage(ui, opts)
1679 cmdline_message = logmessage(ui, opts)
1680 if cmdline_message:
1680 if cmdline_message:
1681 # pickup the cmdline msg
1681 # pickup the cmdline msg
1682 message = cmdline_message
1682 message = cmdline_message
1683 elif message:
1683 elif message:
1684 # pickup the patch msg
1684 # pickup the patch msg
1685 message = message.strip()
1685 message = message.strip()
1686 else:
1686 else:
1687 # launch the editor
1687 # launch the editor
1688 message = None
1688 message = None
1689 ui.debug(b'message:\n%s\n' % (message or b''))
1689 ui.debug(b'message:\n%s\n' % (message or b''))
1690
1690
1691 if len(parents) == 1:
1691 if len(parents) == 1:
1692 parents.append(repo[nullid])
1692 parents.append(repo[nullid])
1693 if opts.get(b'exact'):
1693 if opts.get(b'exact'):
1694 if not nodeid or not p1:
1694 if not nodeid or not p1:
1695 raise error.Abort(_(b'not a Mercurial patch'))
1695 raise error.Abort(_(b'not a Mercurial patch'))
1696 p1 = repo[p1]
1696 p1 = repo[p1]
1697 p2 = repo[p2 or nullid]
1697 p2 = repo[p2 or nullid]
1698 elif p2:
1698 elif p2:
1699 try:
1699 try:
1700 p1 = repo[p1]
1700 p1 = repo[p1]
1701 p2 = repo[p2]
1701 p2 = repo[p2]
1702 # Without any options, consider p2 only if the
1702 # Without any options, consider p2 only if the
1703 # patch is being applied on top of the recorded
1703 # patch is being applied on top of the recorded
1704 # first parent.
1704 # first parent.
1705 if p1 != parents[0]:
1705 if p1 != parents[0]:
1706 p1 = parents[0]
1706 p1 = parents[0]
1707 p2 = repo[nullid]
1707 p2 = repo[nullid]
1708 except error.RepoError:
1708 except error.RepoError:
1709 p1, p2 = parents
1709 p1, p2 = parents
1710 if p2.node() == nullid:
1710 if p2.node() == nullid:
1711 ui.warn(
1711 ui.warn(
1712 _(
1712 _(
1713 b"warning: import the patch as a normal revision\n"
1713 b"warning: import the patch as a normal revision\n"
1714 b"(use --exact to import the patch as a merge)\n"
1714 b"(use --exact to import the patch as a merge)\n"
1715 )
1715 )
1716 )
1716 )
1717 else:
1717 else:
1718 p1, p2 = parents
1718 p1, p2 = parents
1719
1719
1720 n = None
1720 n = None
1721 if update:
1721 if update:
1722 if p1 != parents[0]:
1722 if p1 != parents[0]:
1723 updatefunc(repo, p1.node())
1723 updatefunc(repo, p1.node())
1724 if p2 != parents[1]:
1724 if p2 != parents[1]:
1725 repo.setparents(p1.node(), p2.node())
1725 repo.setparents(p1.node(), p2.node())
1726
1726
1727 if opts.get(b'exact') or importbranch:
1727 if opts.get(b'exact') or importbranch:
1728 repo.dirstate.setbranch(branch or b'default')
1728 repo.dirstate.setbranch(branch or b'default')
1729
1729
1730 partial = opts.get(b'partial', False)
1730 partial = opts.get(b'partial', False)
1731 files = set()
1731 files = set()
1732 try:
1732 try:
1733 patch.patch(
1733 patch.patch(
1734 ui,
1734 ui,
1735 repo,
1735 repo,
1736 tmpname,
1736 tmpname,
1737 strip=strip,
1737 strip=strip,
1738 prefix=prefix,
1738 prefix=prefix,
1739 files=files,
1739 files=files,
1740 eolmode=None,
1740 eolmode=None,
1741 similarity=sim / 100.0,
1741 similarity=sim / 100.0,
1742 )
1742 )
1743 except error.PatchError as e:
1743 except error.PatchError as e:
1744 if not partial:
1744 if not partial:
1745 raise error.Abort(pycompat.bytestr(e))
1745 raise error.Abort(pycompat.bytestr(e))
1746 if partial:
1746 if partial:
1747 rejects = True
1747 rejects = True
1748
1748
1749 files = list(files)
1749 files = list(files)
1750 if nocommit:
1750 if nocommit:
1751 if message:
1751 if message:
1752 msgs.append(message)
1752 msgs.append(message)
1753 else:
1753 else:
1754 if opts.get(b'exact') or p2:
1754 if opts.get(b'exact') or p2:
1755 # If you got here, you either use --force and know what
1755 # If you got here, you either use --force and know what
1756 # you are doing or used --exact or a merge patch while
1756 # you are doing or used --exact or a merge patch while
1757 # being updated to its first parent.
1757 # being updated to its first parent.
1758 m = None
1758 m = None
1759 else:
1759 else:
1760 m = scmutil.matchfiles(repo, files or [])
1760 m = scmutil.matchfiles(repo, files or [])
1761 editform = mergeeditform(repo[None], b'import.normal')
1761 editform = mergeeditform(repo[None], b'import.normal')
1762 if opts.get(b'exact'):
1762 if opts.get(b'exact'):
1763 editor = None
1763 editor = None
1764 else:
1764 else:
1765 editor = getcommiteditor(
1765 editor = getcommiteditor(
1766 editform=editform, **pycompat.strkwargs(opts)
1766 editform=editform, **pycompat.strkwargs(opts)
1767 )
1767 )
1768 extra = {}
1768 extra = {}
1769 for idfunc in extrapreimport:
1769 for idfunc in extrapreimport:
1770 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1770 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1771 overrides = {}
1771 overrides = {}
1772 if partial:
1772 if partial:
1773 overrides[(b'ui', b'allowemptycommit')] = True
1773 overrides[(b'ui', b'allowemptycommit')] = True
1774 if opts.get(b'secret'):
1774 if opts.get(b'secret'):
1775 overrides[(b'phases', b'new-commit')] = b'secret'
1775 overrides[(b'phases', b'new-commit')] = b'secret'
1776 with repo.ui.configoverride(overrides, b'import'):
1776 with repo.ui.configoverride(overrides, b'import'):
1777 n = repo.commit(
1777 n = repo.commit(
1778 message, user, date, match=m, editor=editor, extra=extra
1778 message, user, date, match=m, editor=editor, extra=extra
1779 )
1779 )
1780 for idfunc in extrapostimport:
1780 for idfunc in extrapostimport:
1781 extrapostimportmap[idfunc](repo[n])
1781 extrapostimportmap[idfunc](repo[n])
1782 else:
1782 else:
1783 if opts.get(b'exact') or importbranch:
1783 if opts.get(b'exact') or importbranch:
1784 branch = branch or b'default'
1784 branch = branch or b'default'
1785 else:
1785 else:
1786 branch = p1.branch()
1786 branch = p1.branch()
1787 store = patch.filestore()
1787 store = patch.filestore()
1788 try:
1788 try:
1789 files = set()
1789 files = set()
1790 try:
1790 try:
1791 patch.patchrepo(
1791 patch.patchrepo(
1792 ui,
1792 ui,
1793 repo,
1793 repo,
1794 p1,
1794 p1,
1795 store,
1795 store,
1796 tmpname,
1796 tmpname,
1797 strip,
1797 strip,
1798 prefix,
1798 prefix,
1799 files,
1799 files,
1800 eolmode=None,
1800 eolmode=None,
1801 )
1801 )
1802 except error.PatchError as e:
1802 except error.PatchError as e:
1803 raise error.Abort(stringutil.forcebytestr(e))
1803 raise error.Abort(stringutil.forcebytestr(e))
1804 if opts.get(b'exact'):
1804 if opts.get(b'exact'):
1805 editor = None
1805 editor = None
1806 else:
1806 else:
1807 editor = getcommiteditor(editform=b'import.bypass')
1807 editor = getcommiteditor(editform=b'import.bypass')
1808 memctx = context.memctx(
1808 memctx = context.memctx(
1809 repo,
1809 repo,
1810 (p1.node(), p2.node()),
1810 (p1.node(), p2.node()),
1811 message,
1811 message,
1812 files=files,
1812 files=files,
1813 filectxfn=store,
1813 filectxfn=store,
1814 user=user,
1814 user=user,
1815 date=date,
1815 date=date,
1816 branch=branch,
1816 branch=branch,
1817 editor=editor,
1817 editor=editor,
1818 )
1818 )
1819 n = memctx.commit()
1819 n = memctx.commit()
1820 finally:
1820 finally:
1821 store.close()
1821 store.close()
1822 if opts.get(b'exact') and nocommit:
1822 if opts.get(b'exact') and nocommit:
1823 # --exact with --no-commit is still useful in that it does merge
1823 # --exact with --no-commit is still useful in that it does merge
1824 # and branch bits
1824 # and branch bits
1825 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
1825 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
1826 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
1826 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
1827 raise error.Abort(_(b'patch is damaged or loses information'))
1827 raise error.Abort(_(b'patch is damaged or loses information'))
1828 msg = _(b'applied to working directory')
1828 msg = _(b'applied to working directory')
1829 if n:
1829 if n:
1830 # i18n: refers to a short changeset id
1830 # i18n: refers to a short changeset id
1831 msg = _(b'created %s') % short(n)
1831 msg = _(b'created %s') % short(n)
1832 return msg, n, rejects
1832 return msg, n, rejects
1833
1833
1834
1834
1835 # facility to let extensions include additional data in an exported patch
1835 # facility to let extensions include additional data in an exported patch
1836 # list of identifiers to be executed in order
1836 # list of identifiers to be executed in order
1837 extraexport = []
1837 extraexport = []
1838 # mapping from identifier to actual export function
1838 # mapping from identifier to actual export function
1839 # function as to return a string to be added to the header or None
1839 # function as to return a string to be added to the header or None
1840 # it is given two arguments (sequencenumber, changectx)
1840 # it is given two arguments (sequencenumber, changectx)
1841 extraexportmap = {}
1841 extraexportmap = {}
1842
1842
1843
1843
1844 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1844 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1845 node = scmutil.binnode(ctx)
1845 node = scmutil.binnode(ctx)
1846 parents = [p.node() for p in ctx.parents() if p]
1846 parents = [p.node() for p in ctx.parents() if p]
1847 branch = ctx.branch()
1847 branch = ctx.branch()
1848 if switch_parent:
1848 if switch_parent:
1849 parents.reverse()
1849 parents.reverse()
1850
1850
1851 if parents:
1851 if parents:
1852 prev = parents[0]
1852 prev = parents[0]
1853 else:
1853 else:
1854 prev = nullid
1854 prev = nullid
1855
1855
1856 fm.context(ctx=ctx)
1856 fm.context(ctx=ctx)
1857 fm.plain(b'# HG changeset patch\n')
1857 fm.plain(b'# HG changeset patch\n')
1858 fm.write(b'user', b'# User %s\n', ctx.user())
1858 fm.write(b'user', b'# User %s\n', ctx.user())
1859 fm.plain(b'# Date %d %d\n' % ctx.date())
1859 fm.plain(b'# Date %d %d\n' % ctx.date())
1860 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
1860 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
1861 fm.condwrite(
1861 fm.condwrite(
1862 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
1862 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
1863 )
1863 )
1864 fm.write(b'node', b'# Node ID %s\n', hex(node))
1864 fm.write(b'node', b'# Node ID %s\n', hex(node))
1865 fm.plain(b'# Parent %s\n' % hex(prev))
1865 fm.plain(b'# Parent %s\n' % hex(prev))
1866 if len(parents) > 1:
1866 if len(parents) > 1:
1867 fm.plain(b'# Parent %s\n' % hex(parents[1]))
1867 fm.plain(b'# Parent %s\n' % hex(parents[1]))
1868 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
1868 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
1869
1869
1870 # TODO: redesign extraexportmap function to support formatter
1870 # TODO: redesign extraexportmap function to support formatter
1871 for headerid in extraexport:
1871 for headerid in extraexport:
1872 header = extraexportmap[headerid](seqno, ctx)
1872 header = extraexportmap[headerid](seqno, ctx)
1873 if header is not None:
1873 if header is not None:
1874 fm.plain(b'# %s\n' % header)
1874 fm.plain(b'# %s\n' % header)
1875
1875
1876 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
1876 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
1877 fm.plain(b'\n')
1877 fm.plain(b'\n')
1878
1878
1879 if fm.isplain():
1879 if fm.isplain():
1880 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1880 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1881 for chunk, label in chunkiter:
1881 for chunk, label in chunkiter:
1882 fm.plain(chunk, label=label)
1882 fm.plain(chunk, label=label)
1883 else:
1883 else:
1884 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1884 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1885 # TODO: make it structured?
1885 # TODO: make it structured?
1886 fm.data(diff=b''.join(chunkiter))
1886 fm.data(diff=b''.join(chunkiter))
1887
1887
1888
1888
1889 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1889 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1890 """Export changesets to stdout or a single file"""
1890 """Export changesets to stdout or a single file"""
1891 for seqno, rev in enumerate(revs, 1):
1891 for seqno, rev in enumerate(revs, 1):
1892 ctx = repo[rev]
1892 ctx = repo[rev]
1893 if not dest.startswith(b'<'):
1893 if not dest.startswith(b'<'):
1894 repo.ui.note(b"%s\n" % dest)
1894 repo.ui.note(b"%s\n" % dest)
1895 fm.startitem()
1895 fm.startitem()
1896 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1896 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1897
1897
1898
1898
1899 def _exportfntemplate(
1899 def _exportfntemplate(
1900 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
1900 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
1901 ):
1901 ):
1902 """Export changesets to possibly multiple files"""
1902 """Export changesets to possibly multiple files"""
1903 total = len(revs)
1903 total = len(revs)
1904 revwidth = max(len(str(rev)) for rev in revs)
1904 revwidth = max(len(str(rev)) for rev in revs)
1905 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1905 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1906
1906
1907 for seqno, rev in enumerate(revs, 1):
1907 for seqno, rev in enumerate(revs, 1):
1908 ctx = repo[rev]
1908 ctx = repo[rev]
1909 dest = makefilename(
1909 dest = makefilename(
1910 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
1910 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
1911 )
1911 )
1912 filemap.setdefault(dest, []).append((seqno, rev))
1912 filemap.setdefault(dest, []).append((seqno, rev))
1913
1913
1914 for dest in filemap:
1914 for dest in filemap:
1915 with formatter.maybereopen(basefm, dest) as fm:
1915 with formatter.maybereopen(basefm, dest) as fm:
1916 repo.ui.note(b"%s\n" % dest)
1916 repo.ui.note(b"%s\n" % dest)
1917 for seqno, rev in filemap[dest]:
1917 for seqno, rev in filemap[dest]:
1918 fm.startitem()
1918 fm.startitem()
1919 ctx = repo[rev]
1919 ctx = repo[rev]
1920 _exportsingle(
1920 _exportsingle(
1921 repo, ctx, fm, match, switch_parent, seqno, diffopts
1921 repo, ctx, fm, match, switch_parent, seqno, diffopts
1922 )
1922 )
1923
1923
1924
1924
1925 def _prefetchchangedfiles(repo, revs, match):
1925 def _prefetchchangedfiles(repo, revs, match):
1926 allfiles = set()
1926 allfiles = set()
1927 for rev in revs:
1927 for rev in revs:
1928 for file in repo[rev].files():
1928 for file in repo[rev].files():
1929 if not match or match(file):
1929 if not match or match(file):
1930 allfiles.add(file)
1930 allfiles.add(file)
1931 scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
1931 scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
1932
1932
1933
1933
1934 def export(
1934 def export(
1935 repo,
1935 repo,
1936 revs,
1936 revs,
1937 basefm,
1937 basefm,
1938 fntemplate=b'hg-%h.patch',
1938 fntemplate=b'hg-%h.patch',
1939 switch_parent=False,
1939 switch_parent=False,
1940 opts=None,
1940 opts=None,
1941 match=None,
1941 match=None,
1942 ):
1942 ):
1943 '''export changesets as hg patches
1943 '''export changesets as hg patches
1944
1944
1945 Args:
1945 Args:
1946 repo: The repository from which we're exporting revisions.
1946 repo: The repository from which we're exporting revisions.
1947 revs: A list of revisions to export as revision numbers.
1947 revs: A list of revisions to export as revision numbers.
1948 basefm: A formatter to which patches should be written.
1948 basefm: A formatter to which patches should be written.
1949 fntemplate: An optional string to use for generating patch file names.
1949 fntemplate: An optional string to use for generating patch file names.
1950 switch_parent: If True, show diffs against second parent when not nullid.
1950 switch_parent: If True, show diffs against second parent when not nullid.
1951 Default is false, which always shows diff against p1.
1951 Default is false, which always shows diff against p1.
1952 opts: diff options to use for generating the patch.
1952 opts: diff options to use for generating the patch.
1953 match: If specified, only export changes to files matching this matcher.
1953 match: If specified, only export changes to files matching this matcher.
1954
1954
1955 Returns:
1955 Returns:
1956 Nothing.
1956 Nothing.
1957
1957
1958 Side Effect:
1958 Side Effect:
1959 "HG Changeset Patch" data is emitted to one of the following
1959 "HG Changeset Patch" data is emitted to one of the following
1960 destinations:
1960 destinations:
1961 fntemplate specified: Each rev is written to a unique file named using
1961 fntemplate specified: Each rev is written to a unique file named using
1962 the given template.
1962 the given template.
1963 Otherwise: All revs will be written to basefm.
1963 Otherwise: All revs will be written to basefm.
1964 '''
1964 '''
1965 _prefetchchangedfiles(repo, revs, match)
1965 _prefetchchangedfiles(repo, revs, match)
1966
1966
1967 if not fntemplate:
1967 if not fntemplate:
1968 _exportfile(
1968 _exportfile(
1969 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
1969 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
1970 )
1970 )
1971 else:
1971 else:
1972 _exportfntemplate(
1972 _exportfntemplate(
1973 repo, revs, basefm, fntemplate, switch_parent, opts, match
1973 repo, revs, basefm, fntemplate, switch_parent, opts, match
1974 )
1974 )
1975
1975
1976
1976
1977 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1977 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1978 """Export changesets to the given file stream"""
1978 """Export changesets to the given file stream"""
1979 _prefetchchangedfiles(repo, revs, match)
1979 _prefetchchangedfiles(repo, revs, match)
1980
1980
1981 dest = getattr(fp, 'name', b'<unnamed>')
1981 dest = getattr(fp, 'name', b'<unnamed>')
1982 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
1982 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
1983 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
1983 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
1984
1984
1985
1985
1986 def showmarker(fm, marker, index=None):
1986 def showmarker(fm, marker, index=None):
1987 """utility function to display obsolescence marker in a readable way
1987 """utility function to display obsolescence marker in a readable way
1988
1988
1989 To be used by debug function."""
1989 To be used by debug function."""
1990 if index is not None:
1990 if index is not None:
1991 fm.write(b'index', b'%i ', index)
1991 fm.write(b'index', b'%i ', index)
1992 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
1992 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
1993 succs = marker.succnodes()
1993 succs = marker.succnodes()
1994 fm.condwrite(
1994 fm.condwrite(
1995 succs,
1995 succs,
1996 b'succnodes',
1996 b'succnodes',
1997 b'%s ',
1997 b'%s ',
1998 fm.formatlist(map(hex, succs), name=b'node'),
1998 fm.formatlist(map(hex, succs), name=b'node'),
1999 )
1999 )
2000 fm.write(b'flag', b'%X ', marker.flags())
2000 fm.write(b'flag', b'%X ', marker.flags())
2001 parents = marker.parentnodes()
2001 parents = marker.parentnodes()
2002 if parents is not None:
2002 if parents is not None:
2003 fm.write(
2003 fm.write(
2004 b'parentnodes',
2004 b'parentnodes',
2005 b'{%s} ',
2005 b'{%s} ',
2006 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2006 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2007 )
2007 )
2008 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2008 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2009 meta = marker.metadata().copy()
2009 meta = marker.metadata().copy()
2010 meta.pop(b'date', None)
2010 meta.pop(b'date', None)
2011 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2011 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2012 fm.write(
2012 fm.write(
2013 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2013 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2014 )
2014 )
2015 fm.plain(b'\n')
2015 fm.plain(b'\n')
2016
2016
2017
2017
2018 def finddate(ui, repo, date):
2018 def finddate(ui, repo, date):
2019 """Find the tipmost changeset that matches the given date spec"""
2019 """Find the tipmost changeset that matches the given date spec"""
2020
2020
2021 df = dateutil.matchdate(date)
2021 df = dateutil.matchdate(date)
2022 m = scmutil.matchall(repo)
2022 m = scmutil.matchall(repo)
2023 results = {}
2023 results = {}
2024
2024
2025 def prep(ctx, fns):
2025 def prep(ctx, fns):
2026 d = ctx.date()
2026 d = ctx.date()
2027 if df(d[0]):
2027 if df(d[0]):
2028 results[ctx.rev()] = d
2028 results[ctx.rev()] = d
2029
2029
2030 for ctx in walkchangerevs(repo, m, {b'rev': None}, prep):
2030 for ctx in walkchangerevs(repo, m, {b'rev': None}, prep):
2031 rev = ctx.rev()
2031 rev = ctx.rev()
2032 if rev in results:
2032 if rev in results:
2033 ui.status(
2033 ui.status(
2034 _(b"found revision %d from %s\n")
2034 _(b"found revision %d from %s\n")
2035 % (rev, dateutil.datestr(results[rev]))
2035 % (rev, dateutil.datestr(results[rev]))
2036 )
2036 )
2037 return b'%d' % rev
2037 return b'%d' % rev
2038
2038
2039 raise error.Abort(_(b"revision matching date not found"))
2039 raise error.Abort(_(b"revision matching date not found"))
2040
2040
2041
2041
2042 def increasingwindows(windowsize=8, sizelimit=512):
2042 def increasingwindows(windowsize=8, sizelimit=512):
2043 while True:
2043 while True:
2044 yield windowsize
2044 yield windowsize
2045 if windowsize < sizelimit:
2045 if windowsize < sizelimit:
2046 windowsize *= 2
2046 windowsize *= 2
2047
2047
2048
2048
2049 def _walkrevs(repo, opts):
2049 def _walkrevs(repo, opts):
2050 # Default --rev value depends on --follow but --follow behavior
2050 # Default --rev value depends on --follow but --follow behavior
2051 # depends on revisions resolved from --rev...
2051 # depends on revisions resolved from --rev...
2052 follow = opts.get(b'follow') or opts.get(b'follow_first')
2052 follow = opts.get(b'follow') or opts.get(b'follow_first')
2053 if opts.get(b'rev'):
2053 if opts.get(b'rev'):
2054 revs = scmutil.revrange(repo, opts[b'rev'])
2054 revs = scmutil.revrange(repo, opts[b'rev'])
2055 elif follow and repo.dirstate.p1() == nullid:
2055 elif follow and repo.dirstate.p1() == nullid:
2056 revs = smartset.baseset()
2056 revs = smartset.baseset()
2057 elif follow:
2057 elif follow:
2058 revs = repo.revs(b'reverse(:.)')
2058 revs = repo.revs(b'reverse(:.)')
2059 else:
2059 else:
2060 revs = smartset.spanset(repo)
2060 revs = smartset.spanset(repo)
2061 revs.reverse()
2061 revs.reverse()
2062 return revs
2062 return revs
2063
2063
2064
2064
2065 class FileWalkError(Exception):
2065 class FileWalkError(Exception):
2066 pass
2066 pass
2067
2067
2068
2068
2069 def walkfilerevs(repo, match, follow, revs, fncache):
2069 def walkfilerevs(repo, match, follow, revs, fncache):
2070 '''Walks the file history for the matched files.
2070 '''Walks the file history for the matched files.
2071
2071
2072 Returns the changeset revs that are involved in the file history.
2072 Returns the changeset revs that are involved in the file history.
2073
2073
2074 Throws FileWalkError if the file history can't be walked using
2074 Throws FileWalkError if the file history can't be walked using
2075 filelogs alone.
2075 filelogs alone.
2076 '''
2076 '''
2077 wanted = set()
2077 wanted = set()
2078 copies = []
2078 copies = []
2079 minrev, maxrev = min(revs), max(revs)
2079 minrev, maxrev = min(revs), max(revs)
2080
2080
2081 def filerevs(filelog, last):
2081 def filerevs(filelog, last):
2082 """
2082 """
2083 Only files, no patterns. Check the history of each file.
2083 Only files, no patterns. Check the history of each file.
2084
2084
2085 Examines filelog entries within minrev, maxrev linkrev range
2085 Examines filelog entries within minrev, maxrev linkrev range
2086 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2086 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2087 tuples in backwards order
2087 tuples in backwards order
2088 """
2088 """
2089 cl_count = len(repo)
2089 cl_count = len(repo)
2090 revs = []
2090 revs = []
2091 for j in pycompat.xrange(0, last + 1):
2091 for j in pycompat.xrange(0, last + 1):
2092 linkrev = filelog.linkrev(j)
2092 linkrev = filelog.linkrev(j)
2093 if linkrev < minrev:
2093 if linkrev < minrev:
2094 continue
2094 continue
2095 # only yield rev for which we have the changelog, it can
2095 # only yield rev for which we have the changelog, it can
2096 # happen while doing "hg log" during a pull or commit
2096 # happen while doing "hg log" during a pull or commit
2097 if linkrev >= cl_count:
2097 if linkrev >= cl_count:
2098 break
2098 break
2099
2099
2100 parentlinkrevs = []
2100 parentlinkrevs = []
2101 for p in filelog.parentrevs(j):
2101 for p in filelog.parentrevs(j):
2102 if p != nullrev:
2102 if p != nullrev:
2103 parentlinkrevs.append(filelog.linkrev(p))
2103 parentlinkrevs.append(filelog.linkrev(p))
2104 n = filelog.node(j)
2104 n = filelog.node(j)
2105 revs.append(
2105 revs.append(
2106 (linkrev, parentlinkrevs, follow and filelog.renamed(n))
2106 (linkrev, parentlinkrevs, follow and filelog.renamed(n))
2107 )
2107 )
2108
2108
2109 return reversed(revs)
2109 return reversed(revs)
2110
2110
2111 def iterfiles():
2111 def iterfiles():
2112 pctx = repo[b'.']
2112 pctx = repo[b'.']
2113 for filename in match.files():
2113 for filename in match.files():
2114 if follow:
2114 if follow:
2115 if filename not in pctx:
2115 if filename not in pctx:
2116 raise error.Abort(
2116 raise error.Abort(
2117 _(
2117 _(
2118 b'cannot follow file not in parent '
2118 b'cannot follow file not in parent '
2119 b'revision: "%s"'
2119 b'revision: "%s"'
2120 )
2120 )
2121 % filename
2121 % filename
2122 )
2122 )
2123 yield filename, pctx[filename].filenode()
2123 yield filename, pctx[filename].filenode()
2124 else:
2124 else:
2125 yield filename, None
2125 yield filename, None
2126 for filename_node in copies:
2126 for filename_node in copies:
2127 yield filename_node
2127 yield filename_node
2128
2128
2129 for file_, node in iterfiles():
2129 for file_, node in iterfiles():
2130 filelog = repo.file(file_)
2130 filelog = repo.file(file_)
2131 if not len(filelog):
2131 if not len(filelog):
2132 if node is None:
2132 if node is None:
2133 # A zero count may be a directory or deleted file, so
2133 # A zero count may be a directory or deleted file, so
2134 # try to find matching entries on the slow path.
2134 # try to find matching entries on the slow path.
2135 if follow:
2135 if follow:
2136 raise error.Abort(
2136 raise error.Abort(
2137 _(b'cannot follow nonexistent file: "%s"') % file_
2137 _(b'cannot follow nonexistent file: "%s"') % file_
2138 )
2138 )
2139 raise FileWalkError(b"Cannot walk via filelog")
2139 raise FileWalkError(b"Cannot walk via filelog")
2140 else:
2140 else:
2141 continue
2141 continue
2142
2142
2143 if node is None:
2143 if node is None:
2144 last = len(filelog) - 1
2144 last = len(filelog) - 1
2145 else:
2145 else:
2146 last = filelog.rev(node)
2146 last = filelog.rev(node)
2147
2147
2148 # keep track of all ancestors of the file
2148 # keep track of all ancestors of the file
2149 ancestors = {filelog.linkrev(last)}
2149 ancestors = {filelog.linkrev(last)}
2150
2150
2151 # iterate from latest to oldest revision
2151 # iterate from latest to oldest revision
2152 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
2152 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
2153 if not follow:
2153 if not follow:
2154 if rev > maxrev:
2154 if rev > maxrev:
2155 continue
2155 continue
2156 else:
2156 else:
2157 # Note that last might not be the first interesting
2157 # Note that last might not be the first interesting
2158 # rev to us:
2158 # rev to us:
2159 # if the file has been changed after maxrev, we'll
2159 # if the file has been changed after maxrev, we'll
2160 # have linkrev(last) > maxrev, and we still need
2160 # have linkrev(last) > maxrev, and we still need
2161 # to explore the file graph
2161 # to explore the file graph
2162 if rev not in ancestors:
2162 if rev not in ancestors:
2163 continue
2163 continue
2164 # XXX insert 1327 fix here
2164 # XXX insert 1327 fix here
2165 if flparentlinkrevs:
2165 if flparentlinkrevs:
2166 ancestors.update(flparentlinkrevs)
2166 ancestors.update(flparentlinkrevs)
2167
2167
2168 fncache.setdefault(rev, []).append(file_)
2168 fncache.setdefault(rev, []).append(file_)
2169 wanted.add(rev)
2169 wanted.add(rev)
2170 if copied:
2170 if copied:
2171 copies.append(copied)
2171 copies.append(copied)
2172
2172
2173 return wanted
2173 return wanted
2174
2174
2175
2175
2176 class _followfilter(object):
2176 class _followfilter(object):
2177 def __init__(self, repo, onlyfirst=False):
2177 def __init__(self, repo, onlyfirst=False):
2178 self.repo = repo
2178 self.repo = repo
2179 self.startrev = nullrev
2179 self.startrev = nullrev
2180 self.roots = set()
2180 self.roots = set()
2181 self.onlyfirst = onlyfirst
2181 self.onlyfirst = onlyfirst
2182
2182
2183 def match(self, rev):
2183 def match(self, rev):
2184 def realparents(rev):
2184 def realparents(rev):
2185 if self.onlyfirst:
2185 if self.onlyfirst:
2186 return self.repo.changelog.parentrevs(rev)[0:1]
2186 return self.repo.changelog.parentrevs(rev)[0:1]
2187 else:
2187 else:
2188 return filter(
2188 return filter(
2189 lambda x: x != nullrev, self.repo.changelog.parentrevs(rev)
2189 lambda x: x != nullrev, self.repo.changelog.parentrevs(rev)
2190 )
2190 )
2191
2191
2192 if self.startrev == nullrev:
2192 if self.startrev == nullrev:
2193 self.startrev = rev
2193 self.startrev = rev
2194 return True
2194 return True
2195
2195
2196 if rev > self.startrev:
2196 if rev > self.startrev:
2197 # forward: all descendants
2197 # forward: all descendants
2198 if not self.roots:
2198 if not self.roots:
2199 self.roots.add(self.startrev)
2199 self.roots.add(self.startrev)
2200 for parent in realparents(rev):
2200 for parent in realparents(rev):
2201 if parent in self.roots:
2201 if parent in self.roots:
2202 self.roots.add(rev)
2202 self.roots.add(rev)
2203 return True
2203 return True
2204 else:
2204 else:
2205 # backwards: all parents
2205 # backwards: all parents
2206 if not self.roots:
2206 if not self.roots:
2207 self.roots.update(realparents(self.startrev))
2207 self.roots.update(realparents(self.startrev))
2208 if rev in self.roots:
2208 if rev in self.roots:
2209 self.roots.remove(rev)
2209 self.roots.remove(rev)
2210 self.roots.update(realparents(rev))
2210 self.roots.update(realparents(rev))
2211 return True
2211 return True
2212
2212
2213 return False
2213 return False
2214
2214
2215
2215
2216 def walkchangerevs(repo, match, opts, prepare):
2216 def walkchangerevs(repo, match, opts, prepare):
2217 '''Iterate over files and the revs in which they changed.
2217 '''Iterate over files and the revs in which they changed.
2218
2218
2219 Callers most commonly need to iterate backwards over the history
2219 Callers most commonly need to iterate backwards over the history
2220 in which they are interested. Doing so has awful (quadratic-looking)
2220 in which they are interested. Doing so has awful (quadratic-looking)
2221 performance, so we use iterators in a "windowed" way.
2221 performance, so we use iterators in a "windowed" way.
2222
2222
2223 We walk a window of revisions in the desired order. Within the
2223 We walk a window of revisions in the desired order. Within the
2224 window, we first walk forwards to gather data, then in the desired
2224 window, we first walk forwards to gather data, then in the desired
2225 order (usually backwards) to display it.
2225 order (usually backwards) to display it.
2226
2226
2227 This function returns an iterator yielding contexts. Before
2227 This function returns an iterator yielding contexts. Before
2228 yielding each context, the iterator will first call the prepare
2228 yielding each context, the iterator will first call the prepare
2229 function on each context in the window in forward order.'''
2229 function on each context in the window in forward order.'''
2230
2230
2231 allfiles = opts.get(b'all_files')
2231 allfiles = opts.get(b'all_files')
2232 follow = opts.get(b'follow') or opts.get(b'follow_first')
2232 follow = opts.get(b'follow') or opts.get(b'follow_first')
2233 revs = _walkrevs(repo, opts)
2233 revs = _walkrevs(repo, opts)
2234 if not revs:
2234 if not revs:
2235 return []
2235 return []
2236 wanted = set()
2236 wanted = set()
2237 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
2237 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
2238 fncache = {}
2238 fncache = {}
2239 change = repo.__getitem__
2239 change = repo.__getitem__
2240
2240
2241 # First step is to fill wanted, the set of revisions that we want to yield.
2241 # First step is to fill wanted, the set of revisions that we want to yield.
2242 # When it does not induce extra cost, we also fill fncache for revisions in
2242 # When it does not induce extra cost, we also fill fncache for revisions in
2243 # wanted: a cache of filenames that were changed (ctx.files()) and that
2243 # wanted: a cache of filenames that were changed (ctx.files()) and that
2244 # match the file filtering conditions.
2244 # match the file filtering conditions.
2245
2245
2246 if match.always() or allfiles:
2246 if match.always() or allfiles:
2247 # No files, no patterns. Display all revs.
2247 # No files, no patterns. Display all revs.
2248 wanted = revs
2248 wanted = revs
2249 elif not slowpath:
2249 elif not slowpath:
2250 # We only have to read through the filelog to find wanted revisions
2250 # We only have to read through the filelog to find wanted revisions
2251
2251
2252 try:
2252 try:
2253 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2253 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2254 except FileWalkError:
2254 except FileWalkError:
2255 slowpath = True
2255 slowpath = True
2256
2256
2257 # We decided to fall back to the slowpath because at least one
2257 # We decided to fall back to the slowpath because at least one
2258 # of the paths was not a file. Check to see if at least one of them
2258 # of the paths was not a file. Check to see if at least one of them
2259 # existed in history, otherwise simply return
2259 # existed in history, otherwise simply return
2260 for path in match.files():
2260 for path in match.files():
2261 if path == b'.' or path in repo.store:
2261 if path == b'.' or path in repo.store:
2262 break
2262 break
2263 else:
2263 else:
2264 return []
2264 return []
2265
2265
2266 if slowpath:
2266 if slowpath:
2267 # We have to read the changelog to match filenames against
2267 # We have to read the changelog to match filenames against
2268 # changed files
2268 # changed files
2269
2269
2270 if follow:
2270 if follow:
2271 raise error.Abort(
2271 raise error.Abort(
2272 _(b'can only follow copies/renames for explicit filenames')
2272 _(b'can only follow copies/renames for explicit filenames')
2273 )
2273 )
2274
2274
2275 # The slow path checks files modified in every changeset.
2275 # The slow path checks files modified in every changeset.
2276 # This is really slow on large repos, so compute the set lazily.
2276 # This is really slow on large repos, so compute the set lazily.
2277 class lazywantedset(object):
2277 class lazywantedset(object):
2278 def __init__(self):
2278 def __init__(self):
2279 self.set = set()
2279 self.set = set()
2280 self.revs = set(revs)
2280 self.revs = set(revs)
2281
2281
2282 # No need to worry about locality here because it will be accessed
2282 # No need to worry about locality here because it will be accessed
2283 # in the same order as the increasing window below.
2283 # in the same order as the increasing window below.
2284 def __contains__(self, value):
2284 def __contains__(self, value):
2285 if value in self.set:
2285 if value in self.set:
2286 return True
2286 return True
2287 elif not value in self.revs:
2287 elif not value in self.revs:
2288 return False
2288 return False
2289 else:
2289 else:
2290 self.revs.discard(value)
2290 self.revs.discard(value)
2291 ctx = change(value)
2291 ctx = change(value)
2292 if allfiles:
2292 if allfiles:
2293 matches = list(ctx.manifest().walk(match))
2293 matches = list(ctx.manifest().walk(match))
2294 else:
2294 else:
2295 matches = [f for f in ctx.files() if match(f)]
2295 matches = [f for f in ctx.files() if match(f)]
2296 if matches:
2296 if matches:
2297 fncache[value] = matches
2297 fncache[value] = matches
2298 self.set.add(value)
2298 self.set.add(value)
2299 return True
2299 return True
2300 return False
2300 return False
2301
2301
2302 def discard(self, value):
2302 def discard(self, value):
2303 self.revs.discard(value)
2303 self.revs.discard(value)
2304 self.set.discard(value)
2304 self.set.discard(value)
2305
2305
2306 wanted = lazywantedset()
2306 wanted = lazywantedset()
2307
2307
2308 # it might be worthwhile to do this in the iterator if the rev range
2308 # it might be worthwhile to do this in the iterator if the rev range
2309 # is descending and the prune args are all within that range
2309 # is descending and the prune args are all within that range
2310 for rev in opts.get(b'prune', ()):
2310 for rev in opts.get(b'prune', ()):
2311 rev = repo[rev].rev()
2311 rev = repo[rev].rev()
2312 ff = _followfilter(repo)
2312 ff = _followfilter(repo)
2313 stop = min(revs[0], revs[-1])
2313 stop = min(revs[0], revs[-1])
2314 for x in pycompat.xrange(rev, stop - 1, -1):
2314 for x in pycompat.xrange(rev, stop - 1, -1):
2315 if ff.match(x):
2315 if ff.match(x):
2316 wanted = wanted - [x]
2316 wanted = wanted - [x]
2317
2317
2318 # Now that wanted is correctly initialized, we can iterate over the
2318 # Now that wanted is correctly initialized, we can iterate over the
2319 # revision range, yielding only revisions in wanted.
2319 # revision range, yielding only revisions in wanted.
2320 def iterate():
2320 def iterate():
2321 if follow and match.always():
2321 if follow and match.always():
2322 ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
2322 ff = _followfilter(repo, onlyfirst=opts.get(b'follow_first'))
2323
2323
2324 def want(rev):
2324 def want(rev):
2325 return ff.match(rev) and rev in wanted
2325 return ff.match(rev) and rev in wanted
2326
2326
2327 else:
2327 else:
2328
2328
2329 def want(rev):
2329 def want(rev):
2330 return rev in wanted
2330 return rev in wanted
2331
2331
2332 it = iter(revs)
2332 it = iter(revs)
2333 stopiteration = False
2333 stopiteration = False
2334 for windowsize in increasingwindows():
2334 for windowsize in increasingwindows():
2335 nrevs = []
2335 nrevs = []
2336 for i in pycompat.xrange(windowsize):
2336 for i in pycompat.xrange(windowsize):
2337 rev = next(it, None)
2337 rev = next(it, None)
2338 if rev is None:
2338 if rev is None:
2339 stopiteration = True
2339 stopiteration = True
2340 break
2340 break
2341 elif want(rev):
2341 elif want(rev):
2342 nrevs.append(rev)
2342 nrevs.append(rev)
2343 for rev in sorted(nrevs):
2343 for rev in sorted(nrevs):
2344 fns = fncache.get(rev)
2344 fns = fncache.get(rev)
2345 ctx = change(rev)
2345 ctx = change(rev)
2346 if not fns:
2346 if not fns:
2347
2347
2348 def fns_generator():
2348 def fns_generator():
2349 if allfiles:
2349 if allfiles:
2350 fiter = iter(ctx)
2350 fiter = iter(ctx)
2351 else:
2351 else:
2352 fiter = ctx.files()
2352 fiter = ctx.files()
2353 for f in fiter:
2353 for f in fiter:
2354 if match(f):
2354 if match(f):
2355 yield f
2355 yield f
2356
2356
2357 fns = fns_generator()
2357 fns = fns_generator()
2358 prepare(ctx, fns)
2358 prepare(ctx, fns)
2359 for rev in nrevs:
2359 for rev in nrevs:
2360 yield change(rev)
2360 yield change(rev)
2361
2361
2362 if stopiteration:
2362 if stopiteration:
2363 break
2363 break
2364
2364
2365 return iterate()
2365 return iterate()
2366
2366
2367
2367
2368 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2368 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2369 bad = []
2369 bad = []
2370
2370
2371 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2371 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2372 names = []
2372 names = []
2373 wctx = repo[None]
2373 wctx = repo[None]
2374 cca = None
2374 cca = None
2375 abort, warn = scmutil.checkportabilityalert(ui)
2375 abort, warn = scmutil.checkportabilityalert(ui)
2376 if abort or warn:
2376 if abort or warn:
2377 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2377 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2378
2378
2379 match = repo.narrowmatch(match, includeexact=True)
2379 match = repo.narrowmatch(match, includeexact=True)
2380 badmatch = matchmod.badmatch(match, badfn)
2380 badmatch = matchmod.badmatch(match, badfn)
2381 dirstate = repo.dirstate
2381 dirstate = repo.dirstate
2382 # We don't want to just call wctx.walk here, since it would return a lot of
2382 # We don't want to just call wctx.walk here, since it would return a lot of
2383 # clean files, which we aren't interested in and takes time.
2383 # clean files, which we aren't interested in and takes time.
2384 for f in sorted(
2384 for f in sorted(
2385 dirstate.walk(
2385 dirstate.walk(
2386 badmatch,
2386 badmatch,
2387 subrepos=sorted(wctx.substate),
2387 subrepos=sorted(wctx.substate),
2388 unknown=True,
2388 unknown=True,
2389 ignored=False,
2389 ignored=False,
2390 full=False,
2390 full=False,
2391 )
2391 )
2392 ):
2392 ):
2393 exact = match.exact(f)
2393 exact = match.exact(f)
2394 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2394 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2395 if cca:
2395 if cca:
2396 cca(f)
2396 cca(f)
2397 names.append(f)
2397 names.append(f)
2398 if ui.verbose or not exact:
2398 if ui.verbose or not exact:
2399 ui.status(
2399 ui.status(
2400 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2400 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2401 )
2401 )
2402
2402
2403 for subpath in sorted(wctx.substate):
2403 for subpath in sorted(wctx.substate):
2404 sub = wctx.sub(subpath)
2404 sub = wctx.sub(subpath)
2405 try:
2405 try:
2406 submatch = matchmod.subdirmatcher(subpath, match)
2406 submatch = matchmod.subdirmatcher(subpath, match)
2407 subprefix = repo.wvfs.reljoin(prefix, subpath)
2407 subprefix = repo.wvfs.reljoin(prefix, subpath)
2408 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2408 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2409 if opts.get('subrepos'):
2409 if opts.get('subrepos'):
2410 bad.extend(
2410 bad.extend(
2411 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2411 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2412 )
2412 )
2413 else:
2413 else:
2414 bad.extend(
2414 bad.extend(
2415 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2415 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2416 )
2416 )
2417 except error.LookupError:
2417 except error.LookupError:
2418 ui.status(
2418 ui.status(
2419 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2419 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2420 )
2420 )
2421
2421
2422 if not opts.get('dry_run'):
2422 if not opts.get('dry_run'):
2423 rejected = wctx.add(names, prefix)
2423 rejected = wctx.add(names, prefix)
2424 bad.extend(f for f in rejected if f in match.files())
2424 bad.extend(f for f in rejected if f in match.files())
2425 return bad
2425 return bad
2426
2426
2427
2427
2428 def addwebdirpath(repo, serverpath, webconf):
2428 def addwebdirpath(repo, serverpath, webconf):
2429 webconf[serverpath] = repo.root
2429 webconf[serverpath] = repo.root
2430 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2430 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2431
2431
2432 for r in repo.revs(b'filelog("path:.hgsub")'):
2432 for r in repo.revs(b'filelog("path:.hgsub")'):
2433 ctx = repo[r]
2433 ctx = repo[r]
2434 for subpath in ctx.substate:
2434 for subpath in ctx.substate:
2435 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2435 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2436
2436
2437
2437
2438 def forget(
2438 def forget(
2439 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2439 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2440 ):
2440 ):
2441 if dryrun and interactive:
2441 if dryrun and interactive:
2442 raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
2442 raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
2443 bad = []
2443 bad = []
2444 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2444 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2445 wctx = repo[None]
2445 wctx = repo[None]
2446 forgot = []
2446 forgot = []
2447
2447
2448 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2448 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2449 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2449 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2450 if explicitonly:
2450 if explicitonly:
2451 forget = [f for f in forget if match.exact(f)]
2451 forget = [f for f in forget if match.exact(f)]
2452
2452
2453 for subpath in sorted(wctx.substate):
2453 for subpath in sorted(wctx.substate):
2454 sub = wctx.sub(subpath)
2454 sub = wctx.sub(subpath)
2455 submatch = matchmod.subdirmatcher(subpath, match)
2455 submatch = matchmod.subdirmatcher(subpath, match)
2456 subprefix = repo.wvfs.reljoin(prefix, subpath)
2456 subprefix = repo.wvfs.reljoin(prefix, subpath)
2457 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2457 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2458 try:
2458 try:
2459 subbad, subforgot = sub.forget(
2459 subbad, subforgot = sub.forget(
2460 submatch,
2460 submatch,
2461 subprefix,
2461 subprefix,
2462 subuipathfn,
2462 subuipathfn,
2463 dryrun=dryrun,
2463 dryrun=dryrun,
2464 interactive=interactive,
2464 interactive=interactive,
2465 )
2465 )
2466 bad.extend([subpath + b'/' + f for f in subbad])
2466 bad.extend([subpath + b'/' + f for f in subbad])
2467 forgot.extend([subpath + b'/' + f for f in subforgot])
2467 forgot.extend([subpath + b'/' + f for f in subforgot])
2468 except error.LookupError:
2468 except error.LookupError:
2469 ui.status(
2469 ui.status(
2470 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2470 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2471 )
2471 )
2472
2472
2473 if not explicitonly:
2473 if not explicitonly:
2474 for f in match.files():
2474 for f in match.files():
2475 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2475 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2476 if f not in forgot:
2476 if f not in forgot:
2477 if repo.wvfs.exists(f):
2477 if repo.wvfs.exists(f):
2478 # Don't complain if the exact case match wasn't given.
2478 # Don't complain if the exact case match wasn't given.
2479 # But don't do this until after checking 'forgot', so
2479 # But don't do this until after checking 'forgot', so
2480 # that subrepo files aren't normalized, and this op is
2480 # that subrepo files aren't normalized, and this op is
2481 # purely from data cached by the status walk above.
2481 # purely from data cached by the status walk above.
2482 if repo.dirstate.normalize(f) in repo.dirstate:
2482 if repo.dirstate.normalize(f) in repo.dirstate:
2483 continue
2483 continue
2484 ui.warn(
2484 ui.warn(
2485 _(
2485 _(
2486 b'not removing %s: '
2486 b'not removing %s: '
2487 b'file is already untracked\n'
2487 b'file is already untracked\n'
2488 )
2488 )
2489 % uipathfn(f)
2489 % uipathfn(f)
2490 )
2490 )
2491 bad.append(f)
2491 bad.append(f)
2492
2492
2493 if interactive:
2493 if interactive:
2494 responses = _(
2494 responses = _(
2495 b'[Ynsa?]'
2495 b'[Ynsa?]'
2496 b'$$ &Yes, forget this file'
2496 b'$$ &Yes, forget this file'
2497 b'$$ &No, skip this file'
2497 b'$$ &No, skip this file'
2498 b'$$ &Skip remaining files'
2498 b'$$ &Skip remaining files'
2499 b'$$ Include &all remaining files'
2499 b'$$ Include &all remaining files'
2500 b'$$ &? (display help)'
2500 b'$$ &? (display help)'
2501 )
2501 )
2502 for filename in forget[:]:
2502 for filename in forget[:]:
2503 r = ui.promptchoice(
2503 r = ui.promptchoice(
2504 _(b'forget %s %s') % (uipathfn(filename), responses)
2504 _(b'forget %s %s') % (uipathfn(filename), responses)
2505 )
2505 )
2506 if r == 4: # ?
2506 if r == 4: # ?
2507 while r == 4:
2507 while r == 4:
2508 for c, t in ui.extractchoices(responses)[1]:
2508 for c, t in ui.extractchoices(responses)[1]:
2509 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2509 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2510 r = ui.promptchoice(
2510 r = ui.promptchoice(
2511 _(b'forget %s %s') % (uipathfn(filename), responses)
2511 _(b'forget %s %s') % (uipathfn(filename), responses)
2512 )
2512 )
2513 if r == 0: # yes
2513 if r == 0: # yes
2514 continue
2514 continue
2515 elif r == 1: # no
2515 elif r == 1: # no
2516 forget.remove(filename)
2516 forget.remove(filename)
2517 elif r == 2: # Skip
2517 elif r == 2: # Skip
2518 fnindex = forget.index(filename)
2518 fnindex = forget.index(filename)
2519 del forget[fnindex:]
2519 del forget[fnindex:]
2520 break
2520 break
2521 elif r == 3: # All
2521 elif r == 3: # All
2522 break
2522 break
2523
2523
2524 for f in forget:
2524 for f in forget:
2525 if ui.verbose or not match.exact(f) or interactive:
2525 if ui.verbose or not match.exact(f) or interactive:
2526 ui.status(
2526 ui.status(
2527 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2527 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2528 )
2528 )
2529
2529
2530 if not dryrun:
2530 if not dryrun:
2531 rejected = wctx.forget(forget, prefix)
2531 rejected = wctx.forget(forget, prefix)
2532 bad.extend(f for f in rejected if f in match.files())
2532 bad.extend(f for f in rejected if f in match.files())
2533 forgot.extend(f for f in forget if f not in rejected)
2533 forgot.extend(f for f in forget if f not in rejected)
2534 return bad, forgot
2534 return bad, forgot
2535
2535
2536
2536
2537 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2537 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2538 ret = 1
2538 ret = 1
2539
2539
2540 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2540 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2541 for f in ctx.matches(m):
2541 for f in ctx.matches(m):
2542 fm.startitem()
2542 fm.startitem()
2543 fm.context(ctx=ctx)
2543 fm.context(ctx=ctx)
2544 if needsfctx:
2544 if needsfctx:
2545 fc = ctx[f]
2545 fc = ctx[f]
2546 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2546 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2547 fm.data(path=f)
2547 fm.data(path=f)
2548 fm.plain(fmt % uipathfn(f))
2548 fm.plain(fmt % uipathfn(f))
2549 ret = 0
2549 ret = 0
2550
2550
2551 for subpath in sorted(ctx.substate):
2551 for subpath in sorted(ctx.substate):
2552 submatch = matchmod.subdirmatcher(subpath, m)
2552 submatch = matchmod.subdirmatcher(subpath, m)
2553 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2553 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2554 if subrepos or m.exact(subpath) or any(submatch.files()):
2554 if subrepos or m.exact(subpath) or any(submatch.files()):
2555 sub = ctx.sub(subpath)
2555 sub = ctx.sub(subpath)
2556 try:
2556 try:
2557 recurse = m.exact(subpath) or subrepos
2557 recurse = m.exact(subpath) or subrepos
2558 if (
2558 if (
2559 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2559 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2560 == 0
2560 == 0
2561 ):
2561 ):
2562 ret = 0
2562 ret = 0
2563 except error.LookupError:
2563 except error.LookupError:
2564 ui.status(
2564 ui.status(
2565 _(b"skipping missing subrepository: %s\n")
2565 _(b"skipping missing subrepository: %s\n")
2566 % uipathfn(subpath)
2566 % uipathfn(subpath)
2567 )
2567 )
2568
2568
2569 return ret
2569 return ret
2570
2570
2571
2571
2572 def remove(
2572 def remove(
2573 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2573 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2574 ):
2574 ):
2575 ret = 0
2575 ret = 0
2576 s = repo.status(match=m, clean=True)
2576 s = repo.status(match=m, clean=True)
2577 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2577 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2578
2578
2579 wctx = repo[None]
2579 wctx = repo[None]
2580
2580
2581 if warnings is None:
2581 if warnings is None:
2582 warnings = []
2582 warnings = []
2583 warn = True
2583 warn = True
2584 else:
2584 else:
2585 warn = False
2585 warn = False
2586
2586
2587 subs = sorted(wctx.substate)
2587 subs = sorted(wctx.substate)
2588 progress = ui.makeprogress(
2588 progress = ui.makeprogress(
2589 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2589 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2590 )
2590 )
2591 for subpath in subs:
2591 for subpath in subs:
2592 submatch = matchmod.subdirmatcher(subpath, m)
2592 submatch = matchmod.subdirmatcher(subpath, m)
2593 subprefix = repo.wvfs.reljoin(prefix, subpath)
2593 subprefix = repo.wvfs.reljoin(prefix, subpath)
2594 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2594 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2595 if subrepos or m.exact(subpath) or any(submatch.files()):
2595 if subrepos or m.exact(subpath) or any(submatch.files()):
2596 progress.increment()
2596 progress.increment()
2597 sub = wctx.sub(subpath)
2597 sub = wctx.sub(subpath)
2598 try:
2598 try:
2599 if sub.removefiles(
2599 if sub.removefiles(
2600 submatch,
2600 submatch,
2601 subprefix,
2601 subprefix,
2602 subuipathfn,
2602 subuipathfn,
2603 after,
2603 after,
2604 force,
2604 force,
2605 subrepos,
2605 subrepos,
2606 dryrun,
2606 dryrun,
2607 warnings,
2607 warnings,
2608 ):
2608 ):
2609 ret = 1
2609 ret = 1
2610 except error.LookupError:
2610 except error.LookupError:
2611 warnings.append(
2611 warnings.append(
2612 _(b"skipping missing subrepository: %s\n")
2612 _(b"skipping missing subrepository: %s\n")
2613 % uipathfn(subpath)
2613 % uipathfn(subpath)
2614 )
2614 )
2615 progress.complete()
2615 progress.complete()
2616
2616
2617 # warn about failure to delete explicit files/dirs
2617 # warn about failure to delete explicit files/dirs
2618 deleteddirs = pathutil.dirs(deleted)
2618 deleteddirs = pathutil.dirs(deleted)
2619 files = m.files()
2619 files = m.files()
2620 progress = ui.makeprogress(
2620 progress = ui.makeprogress(
2621 _(b'deleting'), total=len(files), unit=_(b'files')
2621 _(b'deleting'), total=len(files), unit=_(b'files')
2622 )
2622 )
2623 for f in files:
2623 for f in files:
2624
2624
2625 def insubrepo():
2625 def insubrepo():
2626 for subpath in wctx.substate:
2626 for subpath in wctx.substate:
2627 if f.startswith(subpath + b'/'):
2627 if f.startswith(subpath + b'/'):
2628 return True
2628 return True
2629 return False
2629 return False
2630
2630
2631 progress.increment()
2631 progress.increment()
2632 isdir = f in deleteddirs or wctx.hasdir(f)
2632 isdir = f in deleteddirs or wctx.hasdir(f)
2633 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2633 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2634 continue
2634 continue
2635
2635
2636 if repo.wvfs.exists(f):
2636 if repo.wvfs.exists(f):
2637 if repo.wvfs.isdir(f):
2637 if repo.wvfs.isdir(f):
2638 warnings.append(
2638 warnings.append(
2639 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2639 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2640 )
2640 )
2641 else:
2641 else:
2642 warnings.append(
2642 warnings.append(
2643 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2643 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2644 )
2644 )
2645 # missing files will generate a warning elsewhere
2645 # missing files will generate a warning elsewhere
2646 ret = 1
2646 ret = 1
2647 progress.complete()
2647 progress.complete()
2648
2648
2649 if force:
2649 if force:
2650 list = modified + deleted + clean + added
2650 list = modified + deleted + clean + added
2651 elif after:
2651 elif after:
2652 list = deleted
2652 list = deleted
2653 remaining = modified + added + clean
2653 remaining = modified + added + clean
2654 progress = ui.makeprogress(
2654 progress = ui.makeprogress(
2655 _(b'skipping'), total=len(remaining), unit=_(b'files')
2655 _(b'skipping'), total=len(remaining), unit=_(b'files')
2656 )
2656 )
2657 for f in remaining:
2657 for f in remaining:
2658 progress.increment()
2658 progress.increment()
2659 if ui.verbose or (f in files):
2659 if ui.verbose or (f in files):
2660 warnings.append(
2660 warnings.append(
2661 _(b'not removing %s: file still exists\n') % uipathfn(f)
2661 _(b'not removing %s: file still exists\n') % uipathfn(f)
2662 )
2662 )
2663 ret = 1
2663 ret = 1
2664 progress.complete()
2664 progress.complete()
2665 else:
2665 else:
2666 list = deleted + clean
2666 list = deleted + clean
2667 progress = ui.makeprogress(
2667 progress = ui.makeprogress(
2668 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2668 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2669 )
2669 )
2670 for f in modified:
2670 for f in modified:
2671 progress.increment()
2671 progress.increment()
2672 warnings.append(
2672 warnings.append(
2673 _(
2673 _(
2674 b'not removing %s: file is modified (use -f'
2674 b'not removing %s: file is modified (use -f'
2675 b' to force removal)\n'
2675 b' to force removal)\n'
2676 )
2676 )
2677 % uipathfn(f)
2677 % uipathfn(f)
2678 )
2678 )
2679 ret = 1
2679 ret = 1
2680 for f in added:
2680 for f in added:
2681 progress.increment()
2681 progress.increment()
2682 warnings.append(
2682 warnings.append(
2683 _(
2683 _(
2684 b"not removing %s: file has been marked for add"
2684 b"not removing %s: file has been marked for add"
2685 b" (use 'hg forget' to undo add)\n"
2685 b" (use 'hg forget' to undo add)\n"
2686 )
2686 )
2687 % uipathfn(f)
2687 % uipathfn(f)
2688 )
2688 )
2689 ret = 1
2689 ret = 1
2690 progress.complete()
2690 progress.complete()
2691
2691
2692 list = sorted(list)
2692 list = sorted(list)
2693 progress = ui.makeprogress(
2693 progress = ui.makeprogress(
2694 _(b'deleting'), total=len(list), unit=_(b'files')
2694 _(b'deleting'), total=len(list), unit=_(b'files')
2695 )
2695 )
2696 for f in list:
2696 for f in list:
2697 if ui.verbose or not m.exact(f):
2697 if ui.verbose or not m.exact(f):
2698 progress.increment()
2698 progress.increment()
2699 ui.status(
2699 ui.status(
2700 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2700 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2701 )
2701 )
2702 progress.complete()
2702 progress.complete()
2703
2703
2704 if not dryrun:
2704 if not dryrun:
2705 with repo.wlock():
2705 with repo.wlock():
2706 if not after:
2706 if not after:
2707 for f in list:
2707 for f in list:
2708 if f in added:
2708 if f in added:
2709 continue # we never unlink added files on remove
2709 continue # we never unlink added files on remove
2710 rmdir = repo.ui.configbool(
2710 rmdir = repo.ui.configbool(
2711 b'experimental', b'removeemptydirs'
2711 b'experimental', b'removeemptydirs'
2712 )
2712 )
2713 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2713 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2714 repo[None].forget(list)
2714 repo[None].forget(list)
2715
2715
2716 if warn:
2716 if warn:
2717 for warning in warnings:
2717 for warning in warnings:
2718 ui.warn(warning)
2718 ui.warn(warning)
2719
2719
2720 return ret
2720 return ret
2721
2721
2722
2722
2723 def _catfmtneedsdata(fm):
2723 def _catfmtneedsdata(fm):
2724 return not fm.datahint() or b'data' in fm.datahint()
2724 return not fm.datahint() or b'data' in fm.datahint()
2725
2725
2726
2726
2727 def _updatecatformatter(fm, ctx, matcher, path, decode):
2727 def _updatecatformatter(fm, ctx, matcher, path, decode):
2728 """Hook for adding data to the formatter used by ``hg cat``.
2728 """Hook for adding data to the formatter used by ``hg cat``.
2729
2729
2730 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2730 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2731 this method first."""
2731 this method first."""
2732
2732
2733 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2733 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2734 # wasn't requested.
2734 # wasn't requested.
2735 data = b''
2735 data = b''
2736 if _catfmtneedsdata(fm):
2736 if _catfmtneedsdata(fm):
2737 data = ctx[path].data()
2737 data = ctx[path].data()
2738 if decode:
2738 if decode:
2739 data = ctx.repo().wwritedata(path, data)
2739 data = ctx.repo().wwritedata(path, data)
2740 fm.startitem()
2740 fm.startitem()
2741 fm.context(ctx=ctx)
2741 fm.context(ctx=ctx)
2742 fm.write(b'data', b'%s', data)
2742 fm.write(b'data', b'%s', data)
2743 fm.data(path=path)
2743 fm.data(path=path)
2744
2744
2745
2745
2746 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2746 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2747 err = 1
2747 err = 1
2748 opts = pycompat.byteskwargs(opts)
2748 opts = pycompat.byteskwargs(opts)
2749
2749
2750 def write(path):
2750 def write(path):
2751 filename = None
2751 filename = None
2752 if fntemplate:
2752 if fntemplate:
2753 filename = makefilename(
2753 filename = makefilename(
2754 ctx, fntemplate, pathname=os.path.join(prefix, path)
2754 ctx, fntemplate, pathname=os.path.join(prefix, path)
2755 )
2755 )
2756 # attempt to create the directory if it does not already exist
2756 # attempt to create the directory if it does not already exist
2757 try:
2757 try:
2758 os.makedirs(os.path.dirname(filename))
2758 os.makedirs(os.path.dirname(filename))
2759 except OSError:
2759 except OSError:
2760 pass
2760 pass
2761 with formatter.maybereopen(basefm, filename) as fm:
2761 with formatter.maybereopen(basefm, filename) as fm:
2762 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2762 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2763
2763
2764 # Automation often uses hg cat on single files, so special case it
2764 # Automation often uses hg cat on single files, so special case it
2765 # for performance to avoid the cost of parsing the manifest.
2765 # for performance to avoid the cost of parsing the manifest.
2766 if len(matcher.files()) == 1 and not matcher.anypats():
2766 if len(matcher.files()) == 1 and not matcher.anypats():
2767 file = matcher.files()[0]
2767 file = matcher.files()[0]
2768 mfl = repo.manifestlog
2768 mfl = repo.manifestlog
2769 mfnode = ctx.manifestnode()
2769 mfnode = ctx.manifestnode()
2770 try:
2770 try:
2771 if mfnode and mfl[mfnode].find(file)[0]:
2771 if mfnode and mfl[mfnode].find(file)[0]:
2772 if _catfmtneedsdata(basefm):
2772 if _catfmtneedsdata(basefm):
2773 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2773 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2774 write(file)
2774 write(file)
2775 return 0
2775 return 0
2776 except KeyError:
2776 except KeyError:
2777 pass
2777 pass
2778
2778
2779 if _catfmtneedsdata(basefm):
2779 if _catfmtneedsdata(basefm):
2780 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2780 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2781
2781
2782 for abs in ctx.walk(matcher):
2782 for abs in ctx.walk(matcher):
2783 write(abs)
2783 write(abs)
2784 err = 0
2784 err = 0
2785
2785
2786 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2786 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2787 for subpath in sorted(ctx.substate):
2787 for subpath in sorted(ctx.substate):
2788 sub = ctx.sub(subpath)
2788 sub = ctx.sub(subpath)
2789 try:
2789 try:
2790 submatch = matchmod.subdirmatcher(subpath, matcher)
2790 submatch = matchmod.subdirmatcher(subpath, matcher)
2791 subprefix = os.path.join(prefix, subpath)
2791 subprefix = os.path.join(prefix, subpath)
2792 if not sub.cat(
2792 if not sub.cat(
2793 submatch,
2793 submatch,
2794 basefm,
2794 basefm,
2795 fntemplate,
2795 fntemplate,
2796 subprefix,
2796 subprefix,
2797 **pycompat.strkwargs(opts)
2797 **pycompat.strkwargs(opts)
2798 ):
2798 ):
2799 err = 0
2799 err = 0
2800 except error.RepoLookupError:
2800 except error.RepoLookupError:
2801 ui.status(
2801 ui.status(
2802 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2802 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2803 )
2803 )
2804
2804
2805 return err
2805 return err
2806
2806
2807
2807
2808 def commit(ui, repo, commitfunc, pats, opts):
2808 def commit(ui, repo, commitfunc, pats, opts):
2809 '''commit the specified files or all outstanding changes'''
2809 '''commit the specified files or all outstanding changes'''
2810 date = opts.get(b'date')
2810 date = opts.get(b'date')
2811 if date:
2811 if date:
2812 opts[b'date'] = dateutil.parsedate(date)
2812 opts[b'date'] = dateutil.parsedate(date)
2813 message = logmessage(ui, opts)
2813 message = logmessage(ui, opts)
2814 matcher = scmutil.match(repo[None], pats, opts)
2814 matcher = scmutil.match(repo[None], pats, opts)
2815
2815
2816 dsguard = None
2816 dsguard = None
2817 # extract addremove carefully -- this function can be called from a command
2817 # extract addremove carefully -- this function can be called from a command
2818 # that doesn't support addremove
2818 # that doesn't support addremove
2819 if opts.get(b'addremove'):
2819 if opts.get(b'addremove'):
2820 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2820 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2821 with dsguard or util.nullcontextmanager():
2821 with dsguard or util.nullcontextmanager():
2822 if dsguard:
2822 if dsguard:
2823 relative = scmutil.anypats(pats, opts)
2823 relative = scmutil.anypats(pats, opts)
2824 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2824 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2825 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2825 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2826 raise error.Abort(
2826 raise error.Abort(
2827 _(b"failed to mark all new/missing files as added/removed")
2827 _(b"failed to mark all new/missing files as added/removed")
2828 )
2828 )
2829
2829
2830 return commitfunc(ui, repo, message, matcher, opts)
2830 return commitfunc(ui, repo, message, matcher, opts)
2831
2831
2832
2832
2833 def samefile(f, ctx1, ctx2):
2833 def samefile(f, ctx1, ctx2):
2834 if f in ctx1.manifest():
2834 if f in ctx1.manifest():
2835 a = ctx1.filectx(f)
2835 a = ctx1.filectx(f)
2836 if f in ctx2.manifest():
2836 if f in ctx2.manifest():
2837 b = ctx2.filectx(f)
2837 b = ctx2.filectx(f)
2838 return not a.cmp(b) and a.flags() == b.flags()
2838 return not a.cmp(b) and a.flags() == b.flags()
2839 else:
2839 else:
2840 return False
2840 return False
2841 else:
2841 else:
2842 return f not in ctx2.manifest()
2842 return f not in ctx2.manifest()
2843
2843
2844
2844
2845 def amend(ui, repo, old, extra, pats, opts):
2845 def amend(ui, repo, old, extra, pats, opts):
2846 # avoid cycle context -> subrepo -> cmdutil
2846 # avoid cycle context -> subrepo -> cmdutil
2847 from . import context
2847 from . import context
2848
2848
2849 # amend will reuse the existing user if not specified, but the obsolete
2849 # amend will reuse the existing user if not specified, but the obsolete
2850 # marker creation requires that the current user's name is specified.
2850 # marker creation requires that the current user's name is specified.
2851 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2851 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2852 ui.username() # raise exception if username not set
2852 ui.username() # raise exception if username not set
2853
2853
2854 ui.note(_(b'amending changeset %s\n') % old)
2854 ui.note(_(b'amending changeset %s\n') % old)
2855 base = old.p1()
2855 base = old.p1()
2856
2856
2857 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2857 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2858 # Participating changesets:
2858 # Participating changesets:
2859 #
2859 #
2860 # wctx o - workingctx that contains changes from working copy
2860 # wctx o - workingctx that contains changes from working copy
2861 # | to go into amending commit
2861 # | to go into amending commit
2862 # |
2862 # |
2863 # old o - changeset to amend
2863 # old o - changeset to amend
2864 # |
2864 # |
2865 # base o - first parent of the changeset to amend
2865 # base o - first parent of the changeset to amend
2866 wctx = repo[None]
2866 wctx = repo[None]
2867
2867
2868 # Copy to avoid mutating input
2868 # Copy to avoid mutating input
2869 extra = extra.copy()
2869 extra = extra.copy()
2870 # Update extra dict from amended commit (e.g. to preserve graft
2870 # Update extra dict from amended commit (e.g. to preserve graft
2871 # source)
2871 # source)
2872 extra.update(old.extra())
2872 extra.update(old.extra())
2873
2873
2874 # Also update it from the from the wctx
2874 # Also update it from the from the wctx
2875 extra.update(wctx.extra())
2875 extra.update(wctx.extra())
2876
2876
2877 # date-only change should be ignored?
2877 # date-only change should be ignored?
2878 datemaydiffer = resolvecommitoptions(ui, opts)
2878 datemaydiffer = resolvecommitoptions(ui, opts)
2879
2879
2880 date = old.date()
2880 date = old.date()
2881 if opts.get(b'date'):
2881 if opts.get(b'date'):
2882 date = dateutil.parsedate(opts.get(b'date'))
2882 date = dateutil.parsedate(opts.get(b'date'))
2883 user = opts.get(b'user') or old.user()
2883 user = opts.get(b'user') or old.user()
2884
2884
2885 if len(old.parents()) > 1:
2885 if len(old.parents()) > 1:
2886 # ctx.files() isn't reliable for merges, so fall back to the
2886 # ctx.files() isn't reliable for merges, so fall back to the
2887 # slower repo.status() method
2887 # slower repo.status() method
2888 st = base.status(old)
2888 st = base.status(old)
2889 files = set(st.modified) | set(st.added) | set(st.removed)
2889 files = set(st.modified) | set(st.added) | set(st.removed)
2890 else:
2890 else:
2891 files = set(old.files())
2891 files = set(old.files())
2892
2892
2893 # add/remove the files to the working copy if the "addremove" option
2893 # add/remove the files to the working copy if the "addremove" option
2894 # was specified.
2894 # was specified.
2895 matcher = scmutil.match(wctx, pats, opts)
2895 matcher = scmutil.match(wctx, pats, opts)
2896 relative = scmutil.anypats(pats, opts)
2896 relative = scmutil.anypats(pats, opts)
2897 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2897 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2898 if opts.get(b'addremove') and scmutil.addremove(
2898 if opts.get(b'addremove') and scmutil.addremove(
2899 repo, matcher, b"", uipathfn, opts
2899 repo, matcher, b"", uipathfn, opts
2900 ):
2900 ):
2901 raise error.Abort(
2901 raise error.Abort(
2902 _(b"failed to mark all new/missing files as added/removed")
2902 _(b"failed to mark all new/missing files as added/removed")
2903 )
2903 )
2904
2904
2905 # Check subrepos. This depends on in-place wctx._status update in
2905 # Check subrepos. This depends on in-place wctx._status update in
2906 # subrepo.precommit(). To minimize the risk of this hack, we do
2906 # subrepo.precommit(). To minimize the risk of this hack, we do
2907 # nothing if .hgsub does not exist.
2907 # nothing if .hgsub does not exist.
2908 if b'.hgsub' in wctx or b'.hgsub' in old:
2908 if b'.hgsub' in wctx or b'.hgsub' in old:
2909 subs, commitsubs, newsubstate = subrepoutil.precommit(
2909 subs, commitsubs, newsubstate = subrepoutil.precommit(
2910 ui, wctx, wctx._status, matcher
2910 ui, wctx, wctx._status, matcher
2911 )
2911 )
2912 # amend should abort if commitsubrepos is enabled
2912 # amend should abort if commitsubrepos is enabled
2913 assert not commitsubs
2913 assert not commitsubs
2914 if subs:
2914 if subs:
2915 subrepoutil.writestate(repo, newsubstate)
2915 subrepoutil.writestate(repo, newsubstate)
2916
2916
2917 ms = mergemod.mergestate.read(repo)
2917 ms = mergemod.mergestate.read(repo)
2918 mergeutil.checkunresolved(ms)
2918 mergeutil.checkunresolved(ms)
2919
2919
2920 filestoamend = set(f for f in wctx.files() if matcher(f))
2920 filestoamend = set(f for f in wctx.files() if matcher(f))
2921
2921
2922 changes = len(filestoamend) > 0
2922 changes = len(filestoamend) > 0
2923 if changes:
2923 if changes:
2924 # Recompute copies (avoid recording a -> b -> a)
2924 # Recompute copies (avoid recording a -> b -> a)
2925 copied = copies.pathcopies(base, wctx, matcher)
2925 copied = copies.pathcopies(base, wctx, matcher)
2926 if old.p2:
2926 if old.p2:
2927 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2927 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2928
2928
2929 # Prune files which were reverted by the updates: if old
2929 # Prune files which were reverted by the updates: if old
2930 # introduced file X and the file was renamed in the working
2930 # introduced file X and the file was renamed in the working
2931 # copy, then those two files are the same and
2931 # copy, then those two files are the same and
2932 # we can discard X from our list of files. Likewise if X
2932 # we can discard X from our list of files. Likewise if X
2933 # was removed, it's no longer relevant. If X is missing (aka
2933 # was removed, it's no longer relevant. If X is missing (aka
2934 # deleted), old X must be preserved.
2934 # deleted), old X must be preserved.
2935 files.update(filestoamend)
2935 files.update(filestoamend)
2936 files = [
2936 files = [
2937 f
2937 f
2938 for f in files
2938 for f in files
2939 if (f not in filestoamend or not samefile(f, wctx, base))
2939 if (f not in filestoamend or not samefile(f, wctx, base))
2940 ]
2940 ]
2941
2941
2942 def filectxfn(repo, ctx_, path):
2942 def filectxfn(repo, ctx_, path):
2943 try:
2943 try:
2944 # If the file being considered is not amongst the files
2944 # If the file being considered is not amongst the files
2945 # to be amended, we should return the file context from the
2945 # to be amended, we should return the file context from the
2946 # old changeset. This avoids issues when only some files in
2946 # old changeset. This avoids issues when only some files in
2947 # the working copy are being amended but there are also
2947 # the working copy are being amended but there are also
2948 # changes to other files from the old changeset.
2948 # changes to other files from the old changeset.
2949 if path not in filestoamend:
2949 if path not in filestoamend:
2950 return old.filectx(path)
2950 return old.filectx(path)
2951
2951
2952 # Return None for removed files.
2952 # Return None for removed files.
2953 if path in wctx.removed():
2953 if path in wctx.removed():
2954 return None
2954 return None
2955
2955
2956 fctx = wctx[path]
2956 fctx = wctx[path]
2957 flags = fctx.flags()
2957 flags = fctx.flags()
2958 mctx = context.memfilectx(
2958 mctx = context.memfilectx(
2959 repo,
2959 repo,
2960 ctx_,
2960 ctx_,
2961 fctx.path(),
2961 fctx.path(),
2962 fctx.data(),
2962 fctx.data(),
2963 islink=b'l' in flags,
2963 islink=b'l' in flags,
2964 isexec=b'x' in flags,
2964 isexec=b'x' in flags,
2965 copysource=copied.get(path),
2965 copysource=copied.get(path),
2966 )
2966 )
2967 return mctx
2967 return mctx
2968 except KeyError:
2968 except KeyError:
2969 return None
2969 return None
2970
2970
2971 else:
2971 else:
2972 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2972 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2973
2973
2974 # Use version of files as in the old cset
2974 # Use version of files as in the old cset
2975 def filectxfn(repo, ctx_, path):
2975 def filectxfn(repo, ctx_, path):
2976 try:
2976 try:
2977 return old.filectx(path)
2977 return old.filectx(path)
2978 except KeyError:
2978 except KeyError:
2979 return None
2979 return None
2980
2980
2981 # See if we got a message from -m or -l, if not, open the editor with
2981 # See if we got a message from -m or -l, if not, open the editor with
2982 # the message of the changeset to amend.
2982 # the message of the changeset to amend.
2983 message = logmessage(ui, opts)
2983 message = logmessage(ui, opts)
2984
2984
2985 editform = mergeeditform(old, b'commit.amend')
2985 editform = mergeeditform(old, b'commit.amend')
2986
2986
2987 if not message:
2987 if not message:
2988 message = old.description()
2988 message = old.description()
2989 # Default if message isn't provided and --edit is not passed is to
2989 # Default if message isn't provided and --edit is not passed is to
2990 # invoke editor, but allow --no-edit. If somehow we don't have any
2990 # invoke editor, but allow --no-edit. If somehow we don't have any
2991 # description, let's always start the editor.
2991 # description, let's always start the editor.
2992 doedit = not message or opts.get(b'edit') in [True, None]
2992 doedit = not message or opts.get(b'edit') in [True, None]
2993 else:
2993 else:
2994 # Default if message is provided is to not invoke editor, but allow
2994 # Default if message is provided is to not invoke editor, but allow
2995 # --edit.
2995 # --edit.
2996 doedit = opts.get(b'edit') is True
2996 doedit = opts.get(b'edit') is True
2997 editor = getcommiteditor(edit=doedit, editform=editform)
2997 editor = getcommiteditor(edit=doedit, editform=editform)
2998
2998
2999 pureextra = extra.copy()
2999 pureextra = extra.copy()
3000 extra[b'amend_source'] = old.hex()
3000 extra[b'amend_source'] = old.hex()
3001
3001
3002 new = context.memctx(
3002 new = context.memctx(
3003 repo,
3003 repo,
3004 parents=[base.node(), old.p2().node()],
3004 parents=[base.node(), old.p2().node()],
3005 text=message,
3005 text=message,
3006 files=files,
3006 files=files,
3007 filectxfn=filectxfn,
3007 filectxfn=filectxfn,
3008 user=user,
3008 user=user,
3009 date=date,
3009 date=date,
3010 extra=extra,
3010 extra=extra,
3011 editor=editor,
3011 editor=editor,
3012 )
3012 )
3013
3013
3014 newdesc = changelog.stripdesc(new.description())
3014 newdesc = changelog.stripdesc(new.description())
3015 if (
3015 if (
3016 (not changes)
3016 (not changes)
3017 and newdesc == old.description()
3017 and newdesc == old.description()
3018 and user == old.user()
3018 and user == old.user()
3019 and (date == old.date() or datemaydiffer)
3019 and (date == old.date() or datemaydiffer)
3020 and pureextra == old.extra()
3020 and pureextra == old.extra()
3021 ):
3021 ):
3022 # nothing changed. continuing here would create a new node
3022 # nothing changed. continuing here would create a new node
3023 # anyway because of the amend_source noise.
3023 # anyway because of the amend_source noise.
3024 #
3024 #
3025 # This not what we expect from amend.
3025 # This not what we expect from amend.
3026 return old.node()
3026 return old.node()
3027
3027
3028 commitphase = None
3028 commitphase = None
3029 if opts.get(b'secret'):
3029 if opts.get(b'secret'):
3030 commitphase = phases.secret
3030 commitphase = phases.secret
3031 newid = repo.commitctx(new)
3031 newid = repo.commitctx(new)
3032
3032
3033 # Reroute the working copy parent to the new changeset
3033 # Reroute the working copy parent to the new changeset
3034 repo.setparents(newid, nullid)
3034 repo.setparents(newid, nullid)
3035 mapping = {old.node(): (newid,)}
3035 mapping = {old.node(): (newid,)}
3036 obsmetadata = None
3036 obsmetadata = None
3037 if opts.get(b'note'):
3037 if opts.get(b'note'):
3038 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3038 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3039 backup = ui.configbool(b'rewrite', b'backup-bundle')
3039 backup = ui.configbool(b'rewrite', b'backup-bundle')
3040 scmutil.cleanupnodes(
3040 scmutil.cleanupnodes(
3041 repo,
3041 repo,
3042 mapping,
3042 mapping,
3043 b'amend',
3043 b'amend',
3044 metadata=obsmetadata,
3044 metadata=obsmetadata,
3045 fixphase=True,
3045 fixphase=True,
3046 targetphase=commitphase,
3046 targetphase=commitphase,
3047 backup=backup,
3047 backup=backup,
3048 )
3048 )
3049
3049
3050 # Fixing the dirstate because localrepo.commitctx does not update
3050 # Fixing the dirstate because localrepo.commitctx does not update
3051 # it. This is rather convenient because we did not need to update
3051 # it. This is rather convenient because we did not need to update
3052 # the dirstate for all the files in the new commit which commitctx
3052 # the dirstate for all the files in the new commit which commitctx
3053 # could have done if it updated the dirstate. Now, we can
3053 # could have done if it updated the dirstate. Now, we can
3054 # selectively update the dirstate only for the amended files.
3054 # selectively update the dirstate only for the amended files.
3055 dirstate = repo.dirstate
3055 dirstate = repo.dirstate
3056
3056
3057 # Update the state of the files which were added and
3057 # Update the state of the files which were added and
3058 # and modified in the amend to "normal" in the dirstate.
3058 # and modified in the amend to "normal" in the dirstate.
3059 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3059 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3060 for f in normalfiles:
3060 for f in normalfiles:
3061 dirstate.normal(f)
3061 dirstate.normal(f)
3062
3062
3063 # Update the state of files which were removed in the amend
3063 # Update the state of files which were removed in the amend
3064 # to "removed" in the dirstate.
3064 # to "removed" in the dirstate.
3065 removedfiles = set(wctx.removed()) & filestoamend
3065 removedfiles = set(wctx.removed()) & filestoamend
3066 for f in removedfiles:
3066 for f in removedfiles:
3067 dirstate.drop(f)
3067 dirstate.drop(f)
3068
3068
3069 return newid
3069 return newid
3070
3070
3071
3071
3072 def commiteditor(repo, ctx, subs, editform=b''):
3072 def commiteditor(repo, ctx, subs, editform=b''):
3073 if ctx.description():
3073 if ctx.description():
3074 return ctx.description()
3074 return ctx.description()
3075 return commitforceeditor(
3075 return commitforceeditor(
3076 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3076 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3077 )
3077 )
3078
3078
3079
3079
3080 def commitforceeditor(
3080 def commitforceeditor(
3081 repo,
3081 repo,
3082 ctx,
3082 ctx,
3083 subs,
3083 subs,
3084 finishdesc=None,
3084 finishdesc=None,
3085 extramsg=None,
3085 extramsg=None,
3086 editform=b'',
3086 editform=b'',
3087 unchangedmessagedetection=False,
3087 unchangedmessagedetection=False,
3088 ):
3088 ):
3089 if not extramsg:
3089 if not extramsg:
3090 extramsg = _(b"Leave message empty to abort commit.")
3090 extramsg = _(b"Leave message empty to abort commit.")
3091
3091
3092 forms = [e for e in editform.split(b'.') if e]
3092 forms = [e for e in editform.split(b'.') if e]
3093 forms.insert(0, b'changeset')
3093 forms.insert(0, b'changeset')
3094 templatetext = None
3094 templatetext = None
3095 while forms:
3095 while forms:
3096 ref = b'.'.join(forms)
3096 ref = b'.'.join(forms)
3097 if repo.ui.config(b'committemplate', ref):
3097 if repo.ui.config(b'committemplate', ref):
3098 templatetext = committext = buildcommittemplate(
3098 templatetext = committext = buildcommittemplate(
3099 repo, ctx, subs, extramsg, ref
3099 repo, ctx, subs, extramsg, ref
3100 )
3100 )
3101 break
3101 break
3102 forms.pop()
3102 forms.pop()
3103 else:
3103 else:
3104 committext = buildcommittext(repo, ctx, subs, extramsg)
3104 committext = buildcommittext(repo, ctx, subs, extramsg)
3105
3105
3106 # run editor in the repository root
3106 # run editor in the repository root
3107 olddir = encoding.getcwd()
3107 olddir = encoding.getcwd()
3108 os.chdir(repo.root)
3108 os.chdir(repo.root)
3109
3109
3110 # make in-memory changes visible to external process
3110 # make in-memory changes visible to external process
3111 tr = repo.currenttransaction()
3111 tr = repo.currenttransaction()
3112 repo.dirstate.write(tr)
3112 repo.dirstate.write(tr)
3113 pending = tr and tr.writepending() and repo.root
3113 pending = tr and tr.writepending() and repo.root
3114
3114
3115 editortext = repo.ui.edit(
3115 editortext = repo.ui.edit(
3116 committext,
3116 committext,
3117 ctx.user(),
3117 ctx.user(),
3118 ctx.extra(),
3118 ctx.extra(),
3119 editform=editform,
3119 editform=editform,
3120 pending=pending,
3120 pending=pending,
3121 repopath=repo.path,
3121 repopath=repo.path,
3122 action=b'commit',
3122 action=b'commit',
3123 )
3123 )
3124 text = editortext
3124 text = editortext
3125
3125
3126 # strip away anything below this special string (used for editors that want
3126 # strip away anything below this special string (used for editors that want
3127 # to display the diff)
3127 # to display the diff)
3128 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3128 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3129 if stripbelow:
3129 if stripbelow:
3130 text = text[: stripbelow.start()]
3130 text = text[: stripbelow.start()]
3131
3131
3132 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3132 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3133 os.chdir(olddir)
3133 os.chdir(olddir)
3134
3134
3135 if finishdesc:
3135 if finishdesc:
3136 text = finishdesc(text)
3136 text = finishdesc(text)
3137 if not text.strip():
3137 if not text.strip():
3138 raise error.Abort(_(b"empty commit message"))
3138 raise error.Abort(_(b"empty commit message"))
3139 if unchangedmessagedetection and editortext == templatetext:
3139 if unchangedmessagedetection and editortext == templatetext:
3140 raise error.Abort(_(b"commit message unchanged"))
3140 raise error.Abort(_(b"commit message unchanged"))
3141
3141
3142 return text
3142 return text
3143
3143
3144
3144
3145 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3145 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3146 ui = repo.ui
3146 ui = repo.ui
3147 spec = formatter.templatespec(ref, None, None)
3147 spec = formatter.templatespec(ref, None, None)
3148 t = logcmdutil.changesettemplater(ui, repo, spec)
3148 t = logcmdutil.changesettemplater(ui, repo, spec)
3149 t.t.cache.update(
3149 t.t.cache.update(
3150 (k, templater.unquotestring(v))
3150 (k, templater.unquotestring(v))
3151 for k, v in repo.ui.configitems(b'committemplate')
3151 for k, v in repo.ui.configitems(b'committemplate')
3152 )
3152 )
3153
3153
3154 if not extramsg:
3154 if not extramsg:
3155 extramsg = b'' # ensure that extramsg is string
3155 extramsg = b'' # ensure that extramsg is string
3156
3156
3157 ui.pushbuffer()
3157 ui.pushbuffer()
3158 t.show(ctx, extramsg=extramsg)
3158 t.show(ctx, extramsg=extramsg)
3159 return ui.popbuffer()
3159 return ui.popbuffer()
3160
3160
3161
3161
3162 def hgprefix(msg):
3162 def hgprefix(msg):
3163 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3163 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3164
3164
3165
3165
3166 def buildcommittext(repo, ctx, subs, extramsg):
3166 def buildcommittext(repo, ctx, subs, extramsg):
3167 edittext = []
3167 edittext = []
3168 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3168 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3169 if ctx.description():
3169 if ctx.description():
3170 edittext.append(ctx.description())
3170 edittext.append(ctx.description())
3171 edittext.append(b"")
3171 edittext.append(b"")
3172 edittext.append(b"") # Empty line between message and comments.
3172 edittext.append(b"") # Empty line between message and comments.
3173 edittext.append(
3173 edittext.append(
3174 hgprefix(
3174 hgprefix(
3175 _(
3175 _(
3176 b"Enter commit message."
3176 b"Enter commit message."
3177 b" Lines beginning with 'HG:' are removed."
3177 b" Lines beginning with 'HG:' are removed."
3178 )
3178 )
3179 )
3179 )
3180 )
3180 )
3181 edittext.append(hgprefix(extramsg))
3181 edittext.append(hgprefix(extramsg))
3182 edittext.append(b"HG: --")
3182 edittext.append(b"HG: --")
3183 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3183 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3184 if ctx.p2():
3184 if ctx.p2():
3185 edittext.append(hgprefix(_(b"branch merge")))
3185 edittext.append(hgprefix(_(b"branch merge")))
3186 if ctx.branch():
3186 if ctx.branch():
3187 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3187 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3188 if bookmarks.isactivewdirparent(repo):
3188 if bookmarks.isactivewdirparent(repo):
3189 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3189 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3190 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3190 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3191 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3191 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3192 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3192 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3193 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3193 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3194 if not added and not modified and not removed:
3194 if not added and not modified and not removed:
3195 edittext.append(hgprefix(_(b"no files changed")))
3195 edittext.append(hgprefix(_(b"no files changed")))
3196 edittext.append(b"")
3196 edittext.append(b"")
3197
3197
3198 return b"\n".join(edittext)
3198 return b"\n".join(edittext)
3199
3199
3200
3200
3201 def commitstatus(repo, node, branch, bheads=None, opts=None):
3201 def commitstatus(repo, node, branch, bheads=None, opts=None):
3202 if opts is None:
3202 if opts is None:
3203 opts = {}
3203 opts = {}
3204 ctx = repo[node]
3204 ctx = repo[node]
3205 parents = ctx.parents()
3205 parents = ctx.parents()
3206
3206
3207 if (
3207 if (
3208 not opts.get(b'amend')
3208 not opts.get(b'amend')
3209 and bheads
3209 and bheads
3210 and node not in bheads
3210 and node not in bheads
3211 and not [
3211 and not [
3212 x for x in parents if x.node() in bheads and x.branch() == branch
3212 x for x in parents if x.node() in bheads and x.branch() == branch
3213 ]
3213 ]
3214 ):
3214 ):
3215 repo.ui.status(_(b'created new head\n'))
3215 repo.ui.status(_(b'created new head\n'))
3216 # The message is not printed for initial roots. For the other
3216 # The message is not printed for initial roots. For the other
3217 # changesets, it is printed in the following situations:
3217 # changesets, it is printed in the following situations:
3218 #
3218 #
3219 # Par column: for the 2 parents with ...
3219 # Par column: for the 2 parents with ...
3220 # N: null or no parent
3220 # N: null or no parent
3221 # B: parent is on another named branch
3221 # B: parent is on another named branch
3222 # C: parent is a regular non head changeset
3222 # C: parent is a regular non head changeset
3223 # H: parent was a branch head of the current branch
3223 # H: parent was a branch head of the current branch
3224 # Msg column: whether we print "created new head" message
3224 # Msg column: whether we print "created new head" message
3225 # In the following, it is assumed that there already exists some
3225 # In the following, it is assumed that there already exists some
3226 # initial branch heads of the current branch, otherwise nothing is
3226 # initial branch heads of the current branch, otherwise nothing is
3227 # printed anyway.
3227 # printed anyway.
3228 #
3228 #
3229 # Par Msg Comment
3229 # Par Msg Comment
3230 # N N y additional topo root
3230 # N N y additional topo root
3231 #
3231 #
3232 # B N y additional branch root
3232 # B N y additional branch root
3233 # C N y additional topo head
3233 # C N y additional topo head
3234 # H N n usual case
3234 # H N n usual case
3235 #
3235 #
3236 # B B y weird additional branch root
3236 # B B y weird additional branch root
3237 # C B y branch merge
3237 # C B y branch merge
3238 # H B n merge with named branch
3238 # H B n merge with named branch
3239 #
3239 #
3240 # C C y additional head from merge
3240 # C C y additional head from merge
3241 # C H n merge with a head
3241 # C H n merge with a head
3242 #
3242 #
3243 # H H n head merge: head count decreases
3243 # H H n head merge: head count decreases
3244
3244
3245 if not opts.get(b'close_branch'):
3245 if not opts.get(b'close_branch'):
3246 for r in parents:
3246 for r in parents:
3247 if r.closesbranch() and r.branch() == branch:
3247 if r.closesbranch() and r.branch() == branch:
3248 repo.ui.status(
3248 repo.ui.status(
3249 _(b'reopening closed branch head %d\n') % r.rev()
3249 _(b'reopening closed branch head %d\n') % r.rev()
3250 )
3250 )
3251
3251
3252 if repo.ui.debugflag:
3252 if repo.ui.debugflag:
3253 repo.ui.write(
3253 repo.ui.write(
3254 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3254 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3255 )
3255 )
3256 elif repo.ui.verbose:
3256 elif repo.ui.verbose:
3257 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3257 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3258
3258
3259
3259
3260 def postcommitstatus(repo, pats, opts):
3260 def postcommitstatus(repo, pats, opts):
3261 return repo.status(match=scmutil.match(repo[None], pats, opts))
3261 return repo.status(match=scmutil.match(repo[None], pats, opts))
3262
3262
3263
3263
3264 def revert(ui, repo, ctx, parents, *pats, **opts):
3264 def revert(ui, repo, ctx, parents, *pats, **opts):
3265 opts = pycompat.byteskwargs(opts)
3265 opts = pycompat.byteskwargs(opts)
3266 parent, p2 = parents
3266 parent, p2 = parents
3267 node = ctx.node()
3267 node = ctx.node()
3268
3268
3269 mf = ctx.manifest()
3269 mf = ctx.manifest()
3270 if node == p2:
3270 if node == p2:
3271 parent = p2
3271 parent = p2
3272
3272
3273 # need all matching names in dirstate and manifest of target rev,
3273 # need all matching names in dirstate and manifest of target rev,
3274 # so have to walk both. do not print errors if files exist in one
3274 # so have to walk both. do not print errors if files exist in one
3275 # but not other. in both cases, filesets should be evaluated against
3275 # but not other. in both cases, filesets should be evaluated against
3276 # workingctx to get consistent result (issue4497). this means 'set:**'
3276 # workingctx to get consistent result (issue4497). this means 'set:**'
3277 # cannot be used to select missing files from target rev.
3277 # cannot be used to select missing files from target rev.
3278
3278
3279 # `names` is a mapping for all elements in working copy and target revision
3279 # `names` is a mapping for all elements in working copy and target revision
3280 # The mapping is in the form:
3280 # The mapping is in the form:
3281 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3281 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3282 names = {}
3282 names = {}
3283 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3283 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3284
3284
3285 with repo.wlock():
3285 with repo.wlock():
3286 ## filling of the `names` mapping
3286 ## filling of the `names` mapping
3287 # walk dirstate to fill `names`
3287 # walk dirstate to fill `names`
3288
3288
3289 interactive = opts.get(b'interactive', False)
3289 interactive = opts.get(b'interactive', False)
3290 wctx = repo[None]
3290 wctx = repo[None]
3291 m = scmutil.match(wctx, pats, opts)
3291 m = scmutil.match(wctx, pats, opts)
3292
3292
3293 # we'll need this later
3293 # we'll need this later
3294 targetsubs = sorted(s for s in wctx.substate if m(s))
3294 targetsubs = sorted(s for s in wctx.substate if m(s))
3295
3295
3296 if not m.always():
3296 if not m.always():
3297 matcher = matchmod.badmatch(m, lambda x, y: False)
3297 matcher = matchmod.badmatch(m, lambda x, y: False)
3298 for abs in wctx.walk(matcher):
3298 for abs in wctx.walk(matcher):
3299 names[abs] = m.exact(abs)
3299 names[abs] = m.exact(abs)
3300
3300
3301 # walk target manifest to fill `names`
3301 # walk target manifest to fill `names`
3302
3302
3303 def badfn(path, msg):
3303 def badfn(path, msg):
3304 if path in names:
3304 if path in names:
3305 return
3305 return
3306 if path in ctx.substate:
3306 if path in ctx.substate:
3307 return
3307 return
3308 path_ = path + b'/'
3308 path_ = path + b'/'
3309 for f in names:
3309 for f in names:
3310 if f.startswith(path_):
3310 if f.startswith(path_):
3311 return
3311 return
3312 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3312 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3313
3313
3314 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3314 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3315 if abs not in names:
3315 if abs not in names:
3316 names[abs] = m.exact(abs)
3316 names[abs] = m.exact(abs)
3317
3317
3318 # Find status of all file in `names`.
3318 # Find status of all file in `names`.
3319 m = scmutil.matchfiles(repo, names)
3319 m = scmutil.matchfiles(repo, names)
3320
3320
3321 changes = repo.status(
3321 changes = repo.status(
3322 node1=node, match=m, unknown=True, ignored=True, clean=True
3322 node1=node, match=m, unknown=True, ignored=True, clean=True
3323 )
3323 )
3324 else:
3324 else:
3325 changes = repo.status(node1=node, match=m)
3325 changes = repo.status(node1=node, match=m)
3326 for kind in changes:
3326 for kind in changes:
3327 for abs in kind:
3327 for abs in kind:
3328 names[abs] = m.exact(abs)
3328 names[abs] = m.exact(abs)
3329
3329
3330 m = scmutil.matchfiles(repo, names)
3330 m = scmutil.matchfiles(repo, names)
3331
3331
3332 modified = set(changes.modified)
3332 modified = set(changes.modified)
3333 added = set(changes.added)
3333 added = set(changes.added)
3334 removed = set(changes.removed)
3334 removed = set(changes.removed)
3335 _deleted = set(changes.deleted)
3335 _deleted = set(changes.deleted)
3336 unknown = set(changes.unknown)
3336 unknown = set(changes.unknown)
3337 unknown.update(changes.ignored)
3337 unknown.update(changes.ignored)
3338 clean = set(changes.clean)
3338 clean = set(changes.clean)
3339 modadded = set()
3339 modadded = set()
3340
3340
3341 # We need to account for the state of the file in the dirstate,
3341 # We need to account for the state of the file in the dirstate,
3342 # even when we revert against something else than parent. This will
3342 # even when we revert against something else than parent. This will
3343 # slightly alter the behavior of revert (doing back up or not, delete
3343 # slightly alter the behavior of revert (doing back up or not, delete
3344 # or just forget etc).
3344 # or just forget etc).
3345 if parent == node:
3345 if parent == node:
3346 dsmodified = modified
3346 dsmodified = modified
3347 dsadded = added
3347 dsadded = added
3348 dsremoved = removed
3348 dsremoved = removed
3349 # store all local modifications, useful later for rename detection
3349 # store all local modifications, useful later for rename detection
3350 localchanges = dsmodified | dsadded
3350 localchanges = dsmodified | dsadded
3351 modified, added, removed = set(), set(), set()
3351 modified, added, removed = set(), set(), set()
3352 else:
3352 else:
3353 changes = repo.status(node1=parent, match=m)
3353 changes = repo.status(node1=parent, match=m)
3354 dsmodified = set(changes.modified)
3354 dsmodified = set(changes.modified)
3355 dsadded = set(changes.added)
3355 dsadded = set(changes.added)
3356 dsremoved = set(changes.removed)
3356 dsremoved = set(changes.removed)
3357 # store all local modifications, useful later for rename detection
3357 # store all local modifications, useful later for rename detection
3358 localchanges = dsmodified | dsadded
3358 localchanges = dsmodified | dsadded
3359
3359
3360 # only take into account for removes between wc and target
3360 # only take into account for removes between wc and target
3361 clean |= dsremoved - removed
3361 clean |= dsremoved - removed
3362 dsremoved &= removed
3362 dsremoved &= removed
3363 # distinct between dirstate remove and other
3363 # distinct between dirstate remove and other
3364 removed -= dsremoved
3364 removed -= dsremoved
3365
3365
3366 modadded = added & dsmodified
3366 modadded = added & dsmodified
3367 added -= modadded
3367 added -= modadded
3368
3368
3369 # tell newly modified apart.
3369 # tell newly modified apart.
3370 dsmodified &= modified
3370 dsmodified &= modified
3371 dsmodified |= modified & dsadded # dirstate added may need backup
3371 dsmodified |= modified & dsadded # dirstate added may need backup
3372 modified -= dsmodified
3372 modified -= dsmodified
3373
3373
3374 # We need to wait for some post-processing to update this set
3374 # We need to wait for some post-processing to update this set
3375 # before making the distinction. The dirstate will be used for
3375 # before making the distinction. The dirstate will be used for
3376 # that purpose.
3376 # that purpose.
3377 dsadded = added
3377 dsadded = added
3378
3378
3379 # in case of merge, files that are actually added can be reported as
3379 # in case of merge, files that are actually added can be reported as
3380 # modified, we need to post process the result
3380 # modified, we need to post process the result
3381 if p2 != nullid:
3381 if p2 != nullid:
3382 mergeadd = set(dsmodified)
3382 mergeadd = set(dsmodified)
3383 for path in dsmodified:
3383 for path in dsmodified:
3384 if path in mf:
3384 if path in mf:
3385 mergeadd.remove(path)
3385 mergeadd.remove(path)
3386 dsadded |= mergeadd
3386 dsadded |= mergeadd
3387 dsmodified -= mergeadd
3387 dsmodified -= mergeadd
3388
3388
3389 # if f is a rename, update `names` to also revert the source
3389 # if f is a rename, update `names` to also revert the source
3390 for f in localchanges:
3390 for f in localchanges:
3391 src = repo.dirstate.copied(f)
3391 src = repo.dirstate.copied(f)
3392 # XXX should we check for rename down to target node?
3392 # XXX should we check for rename down to target node?
3393 if src and src not in names and repo.dirstate[src] == b'r':
3393 if src and src not in names and repo.dirstate[src] == b'r':
3394 dsremoved.add(src)
3394 dsremoved.add(src)
3395 names[src] = True
3395 names[src] = True
3396
3396
3397 # determine the exact nature of the deleted changesets
3397 # determine the exact nature of the deleted changesets
3398 deladded = set(_deleted)
3398 deladded = set(_deleted)
3399 for path in _deleted:
3399 for path in _deleted:
3400 if path in mf:
3400 if path in mf:
3401 deladded.remove(path)
3401 deladded.remove(path)
3402 deleted = _deleted - deladded
3402 deleted = _deleted - deladded
3403
3403
3404 # distinguish between file to forget and the other
3404 # distinguish between file to forget and the other
3405 added = set()
3405 added = set()
3406 for abs in dsadded:
3406 for abs in dsadded:
3407 if repo.dirstate[abs] != b'a':
3407 if repo.dirstate[abs] != b'a':
3408 added.add(abs)
3408 added.add(abs)
3409 dsadded -= added
3409 dsadded -= added
3410
3410
3411 for abs in deladded:
3411 for abs in deladded:
3412 if repo.dirstate[abs] == b'a':
3412 if repo.dirstate[abs] == b'a':
3413 dsadded.add(abs)
3413 dsadded.add(abs)
3414 deladded -= dsadded
3414 deladded -= dsadded
3415
3415
3416 # For files marked as removed, we check if an unknown file is present at
3416 # For files marked as removed, we check if an unknown file is present at
3417 # the same path. If a such file exists it may need to be backed up.
3417 # the same path. If a such file exists it may need to be backed up.
3418 # Making the distinction at this stage helps have simpler backup
3418 # Making the distinction at this stage helps have simpler backup
3419 # logic.
3419 # logic.
3420 removunk = set()
3420 removunk = set()
3421 for abs in removed:
3421 for abs in removed:
3422 target = repo.wjoin(abs)
3422 target = repo.wjoin(abs)
3423 if os.path.lexists(target):
3423 if os.path.lexists(target):
3424 removunk.add(abs)
3424 removunk.add(abs)
3425 removed -= removunk
3425 removed -= removunk
3426
3426
3427 dsremovunk = set()
3427 dsremovunk = set()
3428 for abs in dsremoved:
3428 for abs in dsremoved:
3429 target = repo.wjoin(abs)
3429 target = repo.wjoin(abs)
3430 if os.path.lexists(target):
3430 if os.path.lexists(target):
3431 dsremovunk.add(abs)
3431 dsremovunk.add(abs)
3432 dsremoved -= dsremovunk
3432 dsremoved -= dsremovunk
3433
3433
3434 # action to be actually performed by revert
3434 # action to be actually performed by revert
3435 # (<list of file>, message>) tuple
3435 # (<list of file>, message>) tuple
3436 actions = {
3436 actions = {
3437 b'revert': ([], _(b'reverting %s\n')),
3437 b'revert': ([], _(b'reverting %s\n')),
3438 b'add': ([], _(b'adding %s\n')),
3438 b'add': ([], _(b'adding %s\n')),
3439 b'remove': ([], _(b'removing %s\n')),
3439 b'remove': ([], _(b'removing %s\n')),
3440 b'drop': ([], _(b'removing %s\n')),
3440 b'drop': ([], _(b'removing %s\n')),
3441 b'forget': ([], _(b'forgetting %s\n')),
3441 b'forget': ([], _(b'forgetting %s\n')),
3442 b'undelete': ([], _(b'undeleting %s\n')),
3442 b'undelete': ([], _(b'undeleting %s\n')),
3443 b'noop': (None, _(b'no changes needed to %s\n')),
3443 b'noop': (None, _(b'no changes needed to %s\n')),
3444 b'unknown': (None, _(b'file not managed: %s\n')),
3444 b'unknown': (None, _(b'file not managed: %s\n')),
3445 }
3445 }
3446
3446
3447 # "constant" that convey the backup strategy.
3447 # "constant" that convey the backup strategy.
3448 # All set to `discard` if `no-backup` is set do avoid checking
3448 # All set to `discard` if `no-backup` is set do avoid checking
3449 # no_backup lower in the code.
3449 # no_backup lower in the code.
3450 # These values are ordered for comparison purposes
3450 # These values are ordered for comparison purposes
3451 backupinteractive = 3 # do backup if interactively modified
3451 backupinteractive = 3 # do backup if interactively modified
3452 backup = 2 # unconditionally do backup
3452 backup = 2 # unconditionally do backup
3453 check = 1 # check if the existing file differs from target
3453 check = 1 # check if the existing file differs from target
3454 discard = 0 # never do backup
3454 discard = 0 # never do backup
3455 if opts.get(b'no_backup'):
3455 if opts.get(b'no_backup'):
3456 backupinteractive = backup = check = discard
3456 backupinteractive = backup = check = discard
3457 if interactive:
3457 if interactive:
3458 dsmodifiedbackup = backupinteractive
3458 dsmodifiedbackup = backupinteractive
3459 else:
3459 else:
3460 dsmodifiedbackup = backup
3460 dsmodifiedbackup = backup
3461 tobackup = set()
3461 tobackup = set()
3462
3462
3463 backupanddel = actions[b'remove']
3463 backupanddel = actions[b'remove']
3464 if not opts.get(b'no_backup'):
3464 if not opts.get(b'no_backup'):
3465 backupanddel = actions[b'drop']
3465 backupanddel = actions[b'drop']
3466
3466
3467 disptable = (
3467 disptable = (
3468 # dispatch table:
3468 # dispatch table:
3469 # file state
3469 # file state
3470 # action
3470 # action
3471 # make backup
3471 # make backup
3472 ## Sets that results that will change file on disk
3472 ## Sets that results that will change file on disk
3473 # Modified compared to target, no local change
3473 # Modified compared to target, no local change
3474 (modified, actions[b'revert'], discard),
3474 (modified, actions[b'revert'], discard),
3475 # Modified compared to target, but local file is deleted
3475 # Modified compared to target, but local file is deleted
3476 (deleted, actions[b'revert'], discard),
3476 (deleted, actions[b'revert'], discard),
3477 # Modified compared to target, local change
3477 # Modified compared to target, local change
3478 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3478 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3479 # Added since target
3479 # Added since target
3480 (added, actions[b'remove'], discard),
3480 (added, actions[b'remove'], discard),
3481 # Added in working directory
3481 # Added in working directory
3482 (dsadded, actions[b'forget'], discard),
3482 (dsadded, actions[b'forget'], discard),
3483 # Added since target, have local modification
3483 # Added since target, have local modification
3484 (modadded, backupanddel, backup),
3484 (modadded, backupanddel, backup),
3485 # Added since target but file is missing in working directory
3485 # Added since target but file is missing in working directory
3486 (deladded, actions[b'drop'], discard),
3486 (deladded, actions[b'drop'], discard),
3487 # Removed since target, before working copy parent
3487 # Removed since target, before working copy parent
3488 (removed, actions[b'add'], discard),
3488 (removed, actions[b'add'], discard),
3489 # Same as `removed` but an unknown file exists at the same path
3489 # Same as `removed` but an unknown file exists at the same path
3490 (removunk, actions[b'add'], check),
3490 (removunk, actions[b'add'], check),
3491 # Removed since targe, marked as such in working copy parent
3491 # Removed since targe, marked as such in working copy parent
3492 (dsremoved, actions[b'undelete'], discard),
3492 (dsremoved, actions[b'undelete'], discard),
3493 # Same as `dsremoved` but an unknown file exists at the same path
3493 # Same as `dsremoved` but an unknown file exists at the same path
3494 (dsremovunk, actions[b'undelete'], check),
3494 (dsremovunk, actions[b'undelete'], check),
3495 ## the following sets does not result in any file changes
3495 ## the following sets does not result in any file changes
3496 # File with no modification
3496 # File with no modification
3497 (clean, actions[b'noop'], discard),
3497 (clean, actions[b'noop'], discard),
3498 # Existing file, not tracked anywhere
3498 # Existing file, not tracked anywhere
3499 (unknown, actions[b'unknown'], discard),
3499 (unknown, actions[b'unknown'], discard),
3500 )
3500 )
3501
3501
3502 for abs, exact in sorted(names.items()):
3502 for abs, exact in sorted(names.items()):
3503 # target file to be touch on disk (relative to cwd)
3503 # target file to be touch on disk (relative to cwd)
3504 target = repo.wjoin(abs)
3504 target = repo.wjoin(abs)
3505 # search the entry in the dispatch table.
3505 # search the entry in the dispatch table.
3506 # if the file is in any of these sets, it was touched in the working
3506 # if the file is in any of these sets, it was touched in the working
3507 # directory parent and we are sure it needs to be reverted.
3507 # directory parent and we are sure it needs to be reverted.
3508 for table, (xlist, msg), dobackup in disptable:
3508 for table, (xlist, msg), dobackup in disptable:
3509 if abs not in table:
3509 if abs not in table:
3510 continue
3510 continue
3511 if xlist is not None:
3511 if xlist is not None:
3512 xlist.append(abs)
3512 xlist.append(abs)
3513 if dobackup:
3513 if dobackup:
3514 # If in interactive mode, don't automatically create
3514 # If in interactive mode, don't automatically create
3515 # .orig files (issue4793)
3515 # .orig files (issue4793)
3516 if dobackup == backupinteractive:
3516 if dobackup == backupinteractive:
3517 tobackup.add(abs)
3517 tobackup.add(abs)
3518 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3518 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3519 absbakname = scmutil.backuppath(ui, repo, abs)
3519 absbakname = scmutil.backuppath(ui, repo, abs)
3520 bakname = os.path.relpath(
3520 bakname = os.path.relpath(
3521 absbakname, start=repo.root
3521 absbakname, start=repo.root
3522 )
3522 )
3523 ui.note(
3523 ui.note(
3524 _(b'saving current version of %s as %s\n')
3524 _(b'saving current version of %s as %s\n')
3525 % (uipathfn(abs), uipathfn(bakname))
3525 % (uipathfn(abs), uipathfn(bakname))
3526 )
3526 )
3527 if not opts.get(b'dry_run'):
3527 if not opts.get(b'dry_run'):
3528 if interactive:
3528 if interactive:
3529 util.copyfile(target, absbakname)
3529 util.copyfile(target, absbakname)
3530 else:
3530 else:
3531 util.rename(target, absbakname)
3531 util.rename(target, absbakname)
3532 if opts.get(b'dry_run'):
3532 if opts.get(b'dry_run'):
3533 if ui.verbose or not exact:
3533 if ui.verbose or not exact:
3534 ui.status(msg % uipathfn(abs))
3534 ui.status(msg % uipathfn(abs))
3535 elif exact:
3535 elif exact:
3536 ui.warn(msg % uipathfn(abs))
3536 ui.warn(msg % uipathfn(abs))
3537 break
3537 break
3538
3538
3539 if not opts.get(b'dry_run'):
3539 if not opts.get(b'dry_run'):
3540 needdata = (b'revert', b'add', b'undelete')
3540 needdata = (b'revert', b'add', b'undelete')
3541 oplist = [actions[name][0] for name in needdata]
3541 oplist = [actions[name][0] for name in needdata]
3542 prefetch = scmutil.prefetchfiles
3542 prefetch = scmutil.prefetchfiles
3543 matchfiles = scmutil.matchfiles
3543 matchfiles = scmutil.matchfiles
3544 prefetch(
3544 prefetch(
3545 repo,
3545 repo,
3546 [ctx.rev()],
3546 [ctx.rev()],
3547 matchfiles(repo, [f for sublist in oplist for f in sublist]),
3547 matchfiles(repo, [f for sublist in oplist for f in sublist]),
3548 )
3548 )
3549 match = scmutil.match(repo[None], pats)
3549 match = scmutil.match(repo[None], pats)
3550 _performrevert(
3550 _performrevert(
3551 repo,
3551 repo,
3552 parents,
3552 parents,
3553 ctx,
3553 ctx,
3554 names,
3554 names,
3555 uipathfn,
3555 uipathfn,
3556 actions,
3556 actions,
3557 match,
3557 match,
3558 interactive,
3558 interactive,
3559 tobackup,
3559 tobackup,
3560 )
3560 )
3561
3561
3562 if targetsubs:
3562 if targetsubs:
3563 # Revert the subrepos on the revert list
3563 # Revert the subrepos on the revert list
3564 for sub in targetsubs:
3564 for sub in targetsubs:
3565 try:
3565 try:
3566 wctx.sub(sub).revert(
3566 wctx.sub(sub).revert(
3567 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3567 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3568 )
3568 )
3569 except KeyError:
3569 except KeyError:
3570 raise error.Abort(
3570 raise error.Abort(
3571 b"subrepository '%s' does not exist in %s!"
3571 b"subrepository '%s' does not exist in %s!"
3572 % (sub, short(ctx.node()))
3572 % (sub, short(ctx.node()))
3573 )
3573 )
3574
3574
3575
3575
3576 def _performrevert(
3576 def _performrevert(
3577 repo,
3577 repo,
3578 parents,
3578 parents,
3579 ctx,
3579 ctx,
3580 names,
3580 names,
3581 uipathfn,
3581 uipathfn,
3582 actions,
3582 actions,
3583 match,
3583 match,
3584 interactive=False,
3584 interactive=False,
3585 tobackup=None,
3585 tobackup=None,
3586 ):
3586 ):
3587 """function that actually perform all the actions computed for revert
3587 """function that actually perform all the actions computed for revert
3588
3588
3589 This is an independent function to let extension to plug in and react to
3589 This is an independent function to let extension to plug in and react to
3590 the imminent revert.
3590 the imminent revert.
3591
3591
3592 Make sure you have the working directory locked when calling this function.
3592 Make sure you have the working directory locked when calling this function.
3593 """
3593 """
3594 parent, p2 = parents
3594 parent, p2 = parents
3595 node = ctx.node()
3595 node = ctx.node()
3596 excluded_files = []
3596 excluded_files = []
3597
3597
3598 def checkout(f):
3598 def checkout(f):
3599 fc = ctx[f]
3599 fc = ctx[f]
3600 repo.wwrite(f, fc.data(), fc.flags())
3600 repo.wwrite(f, fc.data(), fc.flags())
3601
3601
3602 def doremove(f):
3602 def doremove(f):
3603 try:
3603 try:
3604 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3604 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3605 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3605 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3606 except OSError:
3606 except OSError:
3607 pass
3607 pass
3608 repo.dirstate.remove(f)
3608 repo.dirstate.remove(f)
3609
3609
3610 def prntstatusmsg(action, f):
3610 def prntstatusmsg(action, f):
3611 exact = names[f]
3611 exact = names[f]
3612 if repo.ui.verbose or not exact:
3612 if repo.ui.verbose or not exact:
3613 repo.ui.status(actions[action][1] % uipathfn(f))
3613 repo.ui.status(actions[action][1] % uipathfn(f))
3614
3614
3615 audit_path = pathutil.pathauditor(repo.root, cached=True)
3615 audit_path = pathutil.pathauditor(repo.root, cached=True)
3616 for f in actions[b'forget'][0]:
3616 for f in actions[b'forget'][0]:
3617 if interactive:
3617 if interactive:
3618 choice = repo.ui.promptchoice(
3618 choice = repo.ui.promptchoice(
3619 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3619 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3620 )
3620 )
3621 if choice == 0:
3621 if choice == 0:
3622 prntstatusmsg(b'forget', f)
3622 prntstatusmsg(b'forget', f)
3623 repo.dirstate.drop(f)
3623 repo.dirstate.drop(f)
3624 else:
3624 else:
3625 excluded_files.append(f)
3625 excluded_files.append(f)
3626 else:
3626 else:
3627 prntstatusmsg(b'forget', f)
3627 prntstatusmsg(b'forget', f)
3628 repo.dirstate.drop(f)
3628 repo.dirstate.drop(f)
3629 for f in actions[b'remove'][0]:
3629 for f in actions[b'remove'][0]:
3630 audit_path(f)
3630 audit_path(f)
3631 if interactive:
3631 if interactive:
3632 choice = repo.ui.promptchoice(
3632 choice = repo.ui.promptchoice(
3633 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3633 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3634 )
3634 )
3635 if choice == 0:
3635 if choice == 0:
3636 prntstatusmsg(b'remove', f)
3636 prntstatusmsg(b'remove', f)
3637 doremove(f)
3637 doremove(f)
3638 else:
3638 else:
3639 excluded_files.append(f)
3639 excluded_files.append(f)
3640 else:
3640 else:
3641 prntstatusmsg(b'remove', f)
3641 prntstatusmsg(b'remove', f)
3642 doremove(f)
3642 doremove(f)
3643 for f in actions[b'drop'][0]:
3643 for f in actions[b'drop'][0]:
3644 audit_path(f)
3644 audit_path(f)
3645 prntstatusmsg(b'drop', f)
3645 prntstatusmsg(b'drop', f)
3646 repo.dirstate.remove(f)
3646 repo.dirstate.remove(f)
3647
3647
3648 normal = None
3648 normal = None
3649 if node == parent:
3649 if node == parent:
3650 # We're reverting to our parent. If possible, we'd like status
3650 # We're reverting to our parent. If possible, we'd like status
3651 # to report the file as clean. We have to use normallookup for
3651 # to report the file as clean. We have to use normallookup for
3652 # merges to avoid losing information about merged/dirty files.
3652 # merges to avoid losing information about merged/dirty files.
3653 if p2 != nullid:
3653 if p2 != nullid:
3654 normal = repo.dirstate.normallookup
3654 normal = repo.dirstate.normallookup
3655 else:
3655 else:
3656 normal = repo.dirstate.normal
3656 normal = repo.dirstate.normal
3657
3657
3658 newlyaddedandmodifiedfiles = set()
3658 newlyaddedandmodifiedfiles = set()
3659 if interactive:
3659 if interactive:
3660 # Prompt the user for changes to revert
3660 # Prompt the user for changes to revert
3661 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3661 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3662 m = scmutil.matchfiles(repo, torevert)
3662 m = scmutil.matchfiles(repo, torevert)
3663 diffopts = patch.difffeatureopts(
3663 diffopts = patch.difffeatureopts(
3664 repo.ui,
3664 repo.ui,
3665 whitespace=True,
3665 whitespace=True,
3666 section=b'commands',
3666 section=b'commands',
3667 configprefix=b'revert.interactive.',
3667 configprefix=b'revert.interactive.',
3668 )
3668 )
3669 diffopts.nodates = True
3669 diffopts.nodates = True
3670 diffopts.git = True
3670 diffopts.git = True
3671 operation = b'apply'
3671 operation = b'apply'
3672 if node == parent:
3672 if node == parent:
3673 if repo.ui.configbool(
3673 if repo.ui.configbool(
3674 b'experimental', b'revert.interactive.select-to-keep'
3674 b'experimental', b'revert.interactive.select-to-keep'
3675 ):
3675 ):
3676 operation = b'keep'
3676 operation = b'keep'
3677 else:
3677 else:
3678 operation = b'discard'
3678 operation = b'discard'
3679
3679
3680 if operation == b'apply':
3680 if operation == b'apply':
3681 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3681 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3682 else:
3682 else:
3683 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3683 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3684 originalchunks = patch.parsepatch(diff)
3684 originalchunks = patch.parsepatch(diff)
3685
3685
3686 try:
3686 try:
3687
3687
3688 chunks, opts = recordfilter(
3688 chunks, opts = recordfilter(
3689 repo.ui, originalchunks, match, operation=operation
3689 repo.ui, originalchunks, match, operation=operation
3690 )
3690 )
3691 if operation == b'discard':
3691 if operation == b'discard':
3692 chunks = patch.reversehunks(chunks)
3692 chunks = patch.reversehunks(chunks)
3693
3693
3694 except error.PatchError as err:
3694 except error.PatchError as err:
3695 raise error.Abort(_(b'error parsing patch: %s') % err)
3695 raise error.Abort(_(b'error parsing patch: %s') % err)
3696
3696
3697 # FIXME: when doing an interactive revert of a copy, there's no way of
3697 # FIXME: when doing an interactive revert of a copy, there's no way of
3698 # performing a partial revert of the added file, the only option is
3698 # performing a partial revert of the added file, the only option is
3699 # "remove added file <name> (Yn)?", so we don't need to worry about the
3699 # "remove added file <name> (Yn)?", so we don't need to worry about the
3700 # alsorestore value. Ideally we'd be able to partially revert
3700 # alsorestore value. Ideally we'd be able to partially revert
3701 # copied/renamed files.
3701 # copied/renamed files.
3702 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3702 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3703 chunks, originalchunks
3703 chunks, originalchunks
3704 )
3704 )
3705 if tobackup is None:
3705 if tobackup is None:
3706 tobackup = set()
3706 tobackup = set()
3707 # Apply changes
3707 # Apply changes
3708 fp = stringio()
3708 fp = stringio()
3709 # chunks are serialized per file, but files aren't sorted
3709 # chunks are serialized per file, but files aren't sorted
3710 for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
3710 for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
3711 prntstatusmsg(b'revert', f)
3711 prntstatusmsg(b'revert', f)
3712 files = set()
3712 files = set()
3713 for c in chunks:
3713 for c in chunks:
3714 if ishunk(c):
3714 if ishunk(c):
3715 abs = c.header.filename()
3715 abs = c.header.filename()
3716 # Create a backup file only if this hunk should be backed up
3716 # Create a backup file only if this hunk should be backed up
3717 if c.header.filename() in tobackup:
3717 if c.header.filename() in tobackup:
3718 target = repo.wjoin(abs)
3718 target = repo.wjoin(abs)
3719 bakname = scmutil.backuppath(repo.ui, repo, abs)
3719 bakname = scmutil.backuppath(repo.ui, repo, abs)
3720 util.copyfile(target, bakname)
3720 util.copyfile(target, bakname)
3721 tobackup.remove(abs)
3721 tobackup.remove(abs)
3722 if abs not in files:
3722 if abs not in files:
3723 files.add(abs)
3723 files.add(abs)
3724 if operation == b'keep':
3724 if operation == b'keep':
3725 checkout(abs)
3725 checkout(abs)
3726 c.write(fp)
3726 c.write(fp)
3727 dopatch = fp.tell()
3727 dopatch = fp.tell()
3728 fp.seek(0)
3728 fp.seek(0)
3729 if dopatch:
3729 if dopatch:
3730 try:
3730 try:
3731 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3731 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3732 except error.PatchError as err:
3732 except error.PatchError as err:
3733 raise error.Abort(pycompat.bytestr(err))
3733 raise error.Abort(pycompat.bytestr(err))
3734 del fp
3734 del fp
3735 else:
3735 else:
3736 for f in actions[b'revert'][0]:
3736 for f in actions[b'revert'][0]:
3737 prntstatusmsg(b'revert', f)
3737 prntstatusmsg(b'revert', f)
3738 checkout(f)
3738 checkout(f)
3739 if normal:
3739 if normal:
3740 normal(f)
3740 normal(f)
3741
3741
3742 for f in actions[b'add'][0]:
3742 for f in actions[b'add'][0]:
3743 # Don't checkout modified files, they are already created by the diff
3743 # Don't checkout modified files, they are already created by the diff
3744 if f not in newlyaddedandmodifiedfiles:
3744 if f not in newlyaddedandmodifiedfiles:
3745 prntstatusmsg(b'add', f)
3745 prntstatusmsg(b'add', f)
3746 checkout(f)
3746 checkout(f)
3747 repo.dirstate.add(f)
3747 repo.dirstate.add(f)
3748
3748
3749 normal = repo.dirstate.normallookup
3749 normal = repo.dirstate.normallookup
3750 if node == parent and p2 == nullid:
3750 if node == parent and p2 == nullid:
3751 normal = repo.dirstate.normal
3751 normal = repo.dirstate.normal
3752 for f in actions[b'undelete'][0]:
3752 for f in actions[b'undelete'][0]:
3753 if interactive:
3753 if interactive:
3754 choice = repo.ui.promptchoice(
3754 choice = repo.ui.promptchoice(
3755 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3755 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3756 )
3756 )
3757 if choice == 0:
3757 if choice == 0:
3758 prntstatusmsg(b'undelete', f)
3758 prntstatusmsg(b'undelete', f)
3759 checkout(f)
3759 checkout(f)
3760 normal(f)
3760 normal(f)
3761 else:
3761 else:
3762 excluded_files.append(f)
3762 excluded_files.append(f)
3763 else:
3763 else:
3764 prntstatusmsg(b'undelete', f)
3764 prntstatusmsg(b'undelete', f)
3765 checkout(f)
3765 checkout(f)
3766 normal(f)
3766 normal(f)
3767
3767
3768 copied = copies.pathcopies(repo[parent], ctx)
3768 copied = copies.pathcopies(repo[parent], ctx)
3769
3769
3770 for f in (
3770 for f in (
3771 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3771 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3772 ):
3772 ):
3773 if f in copied:
3773 if f in copied:
3774 repo.dirstate.copy(copied[f], f)
3774 repo.dirstate.copy(copied[f], f)
3775
3775
3776
3776
3777 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3777 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3778 # commands.outgoing. "missing" is "missing" of the result of
3778 # commands.outgoing. "missing" is "missing" of the result of
3779 # "findcommonoutgoing()"
3779 # "findcommonoutgoing()"
3780 outgoinghooks = util.hooks()
3780 outgoinghooks = util.hooks()
3781
3781
3782 # a list of (ui, repo) functions called by commands.summary
3782 # a list of (ui, repo) functions called by commands.summary
3783 summaryhooks = util.hooks()
3783 summaryhooks = util.hooks()
3784
3784
3785 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3785 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3786 #
3786 #
3787 # functions should return tuple of booleans below, if 'changes' is None:
3787 # functions should return tuple of booleans below, if 'changes' is None:
3788 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3788 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3789 #
3789 #
3790 # otherwise, 'changes' is a tuple of tuples below:
3790 # otherwise, 'changes' is a tuple of tuples below:
3791 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3791 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3792 # - (desturl, destbranch, destpeer, outgoing)
3792 # - (desturl, destbranch, destpeer, outgoing)
3793 summaryremotehooks = util.hooks()
3793 summaryremotehooks = util.hooks()
3794
3794
3795
3795
3796 def checkunfinished(repo, commit=False, skipmerge=False):
3796 def checkunfinished(repo, commit=False, skipmerge=False):
3797 '''Look for an unfinished multistep operation, like graft, and abort
3797 '''Look for an unfinished multistep operation, like graft, and abort
3798 if found. It's probably good to check this right before
3798 if found. It's probably good to check this right before
3799 bailifchanged().
3799 bailifchanged().
3800 '''
3800 '''
3801 # Check for non-clearable states first, so things like rebase will take
3801 # Check for non-clearable states first, so things like rebase will take
3802 # precedence over update.
3802 # precedence over update.
3803 for state in statemod._unfinishedstates:
3803 for state in statemod._unfinishedstates:
3804 if (
3804 if (
3805 state._clearable
3805 state._clearable
3806 or (commit and state._allowcommit)
3806 or (commit and state._allowcommit)
3807 or state._reportonly
3807 or state._reportonly
3808 ):
3808 ):
3809 continue
3809 continue
3810 if state.isunfinished(repo):
3810 if state.isunfinished(repo):
3811 raise error.Abort(state.msg(), hint=state.hint())
3811 raise error.Abort(state.msg(), hint=state.hint())
3812
3812
3813 for s in statemod._unfinishedstates:
3813 for s in statemod._unfinishedstates:
3814 if (
3814 if (
3815 not s._clearable
3815 not s._clearable
3816 or (commit and s._allowcommit)
3816 or (commit and s._allowcommit)
3817 or (s._opname == b'merge' and skipmerge)
3817 or (s._opname == b'merge' and skipmerge)
3818 or s._reportonly
3818 or s._reportonly
3819 ):
3819 ):
3820 continue
3820 continue
3821 if s.isunfinished(repo):
3821 if s.isunfinished(repo):
3822 raise error.Abort(s.msg(), hint=s.hint())
3822 raise error.Abort(s.msg(), hint=s.hint())
3823
3823
3824
3824
3825 def clearunfinished(repo):
3825 def clearunfinished(repo):
3826 '''Check for unfinished operations (as above), and clear the ones
3826 '''Check for unfinished operations (as above), and clear the ones
3827 that are clearable.
3827 that are clearable.
3828 '''
3828 '''
3829 for state in statemod._unfinishedstates:
3829 for state in statemod._unfinishedstates:
3830 if state._reportonly:
3830 if state._reportonly:
3831 continue
3831 continue
3832 if not state._clearable and state.isunfinished(repo):
3832 if not state._clearable and state.isunfinished(repo):
3833 raise error.Abort(state.msg(), hint=state.hint())
3833 raise error.Abort(state.msg(), hint=state.hint())
3834
3834
3835 for s in statemod._unfinishedstates:
3835 for s in statemod._unfinishedstates:
3836 if s._opname == b'merge' or state._reportonly:
3836 if s._opname == b'merge' or state._reportonly:
3837 continue
3837 continue
3838 if s._clearable and s.isunfinished(repo):
3838 if s._clearable and s.isunfinished(repo):
3839 util.unlink(repo.vfs.join(s._fname))
3839 util.unlink(repo.vfs.join(s._fname))
3840
3840
3841
3841
3842 def getunfinishedstate(repo):
3842 def getunfinishedstate(repo):
3843 ''' Checks for unfinished operations and returns statecheck object
3843 ''' Checks for unfinished operations and returns statecheck object
3844 for it'''
3844 for it'''
3845 for state in statemod._unfinishedstates:
3845 for state in statemod._unfinishedstates:
3846 if state.isunfinished(repo):
3846 if state.isunfinished(repo):
3847 return state
3847 return state
3848 return None
3848 return None
3849
3849
3850
3850
3851 def howtocontinue(repo):
3851 def howtocontinue(repo):
3852 '''Check for an unfinished operation and return the command to finish
3852 '''Check for an unfinished operation and return the command to finish
3853 it.
3853 it.
3854
3854
3855 statemod._unfinishedstates list is checked for an unfinished operation
3855 statemod._unfinishedstates list is checked for an unfinished operation
3856 and the corresponding message to finish it is generated if a method to
3856 and the corresponding message to finish it is generated if a method to
3857 continue is supported by the operation.
3857 continue is supported by the operation.
3858
3858
3859 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3859 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3860 a boolean.
3860 a boolean.
3861 '''
3861 '''
3862 contmsg = _(b"continue: %s")
3862 contmsg = _(b"continue: %s")
3863 for state in statemod._unfinishedstates:
3863 for state in statemod._unfinishedstates:
3864 if not state._continueflag:
3864 if not state._continueflag:
3865 continue
3865 continue
3866 if state.isunfinished(repo):
3866 if state.isunfinished(repo):
3867 return contmsg % state.continuemsg(), True
3867 return contmsg % state.continuemsg(), True
3868 if repo[None].dirty(missing=True, merge=False, branch=False):
3868 if repo[None].dirty(missing=True, merge=False, branch=False):
3869 return contmsg % _(b"hg commit"), False
3869 return contmsg % _(b"hg commit"), False
3870 return None, None
3870 return None, None
3871
3871
3872
3872
3873 def checkafterresolved(repo):
3873 def checkafterresolved(repo):
3874 '''Inform the user about the next action after completing hg resolve
3874 '''Inform the user about the next action after completing hg resolve
3875
3875
3876 If there's a an unfinished operation that supports continue flag,
3876 If there's a an unfinished operation that supports continue flag,
3877 howtocontinue will yield repo.ui.warn as the reporter.
3877 howtocontinue will yield repo.ui.warn as the reporter.
3878
3878
3879 Otherwise, it will yield repo.ui.note.
3879 Otherwise, it will yield repo.ui.note.
3880 '''
3880 '''
3881 msg, warning = howtocontinue(repo)
3881 msg, warning = howtocontinue(repo)
3882 if msg is not None:
3882 if msg is not None:
3883 if warning:
3883 if warning:
3884 repo.ui.warn(b"%s\n" % msg)
3884 repo.ui.warn(b"%s\n" % msg)
3885 else:
3885 else:
3886 repo.ui.note(b"%s\n" % msg)
3886 repo.ui.note(b"%s\n" % msg)
3887
3887
3888
3888
3889 def wrongtooltocontinue(repo, task):
3889 def wrongtooltocontinue(repo, task):
3890 '''Raise an abort suggesting how to properly continue if there is an
3890 '''Raise an abort suggesting how to properly continue if there is an
3891 active task.
3891 active task.
3892
3892
3893 Uses howtocontinue() to find the active task.
3893 Uses howtocontinue() to find the active task.
3894
3894
3895 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3895 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3896 a hint.
3896 a hint.
3897 '''
3897 '''
3898 after = howtocontinue(repo)
3898 after = howtocontinue(repo)
3899 hint = None
3899 hint = None
3900 if after[1]:
3900 if after[1]:
3901 hint = after[0]
3901 hint = after[0]
3902 raise error.Abort(_(b'no %s in progress') % task, hint=hint)
3902 raise error.Abort(_(b'no %s in progress') % task, hint=hint)
3903
3903
3904
3904
3905 def abortgraft(ui, repo, graftstate):
3905 def abortgraft(ui, repo, graftstate):
3906 """abort the interrupted graft and rollbacks to the state before interrupted
3906 """abort the interrupted graft and rollbacks to the state before interrupted
3907 graft"""
3907 graft"""
3908 if not graftstate.exists():
3908 if not graftstate.exists():
3909 raise error.Abort(_(b"no interrupted graft to abort"))
3909 raise error.Abort(_(b"no interrupted graft to abort"))
3910 statedata = readgraftstate(repo, graftstate)
3910 statedata = readgraftstate(repo, graftstate)
3911 newnodes = statedata.get(b'newnodes')
3911 newnodes = statedata.get(b'newnodes')
3912 if newnodes is None:
3912 if newnodes is None:
3913 # and old graft state which does not have all the data required to abort
3913 # and old graft state which does not have all the data required to abort
3914 # the graft
3914 # the graft
3915 raise error.Abort(_(b"cannot abort using an old graftstate"))
3915 raise error.Abort(_(b"cannot abort using an old graftstate"))
3916
3916
3917 # changeset from which graft operation was started
3917 # changeset from which graft operation was started
3918 if len(newnodes) > 0:
3918 if len(newnodes) > 0:
3919 startctx = repo[newnodes[0]].p1()
3919 startctx = repo[newnodes[0]].p1()
3920 else:
3920 else:
3921 startctx = repo[b'.']
3921 startctx = repo[b'.']
3922 # whether to strip or not
3922 # whether to strip or not
3923 cleanup = False
3923 cleanup = False
3924 from . import hg
3924 from . import hg
3925
3925
3926 if newnodes:
3926 if newnodes:
3927 newnodes = [repo[r].rev() for r in newnodes]
3927 newnodes = [repo[r].rev() for r in newnodes]
3928 cleanup = True
3928 cleanup = True
3929 # checking that none of the newnodes turned public or is public
3929 # checking that none of the newnodes turned public or is public
3930 immutable = [c for c in newnodes if not repo[c].mutable()]
3930 immutable = [c for c in newnodes if not repo[c].mutable()]
3931 if immutable:
3931 if immutable:
3932 repo.ui.warn(
3932 repo.ui.warn(
3933 _(b"cannot clean up public changesets %s\n")
3933 _(b"cannot clean up public changesets %s\n")
3934 % b', '.join(bytes(repo[r]) for r in immutable),
3934 % b', '.join(bytes(repo[r]) for r in immutable),
3935 hint=_(b"see 'hg help phases' for details"),
3935 hint=_(b"see 'hg help phases' for details"),
3936 )
3936 )
3937 cleanup = False
3937 cleanup = False
3938
3938
3939 # checking that no new nodes are created on top of grafted revs
3939 # checking that no new nodes are created on top of grafted revs
3940 desc = set(repo.changelog.descendants(newnodes))
3940 desc = set(repo.changelog.descendants(newnodes))
3941 if desc - set(newnodes):
3941 if desc - set(newnodes):
3942 repo.ui.warn(
3942 repo.ui.warn(
3943 _(
3943 _(
3944 b"new changesets detected on destination "
3944 b"new changesets detected on destination "
3945 b"branch, can't strip\n"
3945 b"branch, can't strip\n"
3946 )
3946 )
3947 )
3947 )
3948 cleanup = False
3948 cleanup = False
3949
3949
3950 if cleanup:
3950 if cleanup:
3951 with repo.wlock(), repo.lock():
3951 with repo.wlock(), repo.lock():
3952 hg.updaterepo(repo, startctx.node(), overwrite=True)
3952 hg.updaterepo(repo, startctx.node(), overwrite=True)
3953 # stripping the new nodes created
3953 # stripping the new nodes created
3954 strippoints = [
3954 strippoints = [
3955 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3955 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3956 ]
3956 ]
3957 repair.strip(repo.ui, repo, strippoints, backup=False)
3957 repair.strip(repo.ui, repo, strippoints, backup=False)
3958
3958
3959 if not cleanup:
3959 if not cleanup:
3960 # we don't update to the startnode if we can't strip
3960 # we don't update to the startnode if we can't strip
3961 startctx = repo[b'.']
3961 startctx = repo[b'.']
3962 hg.updaterepo(repo, startctx.node(), overwrite=True)
3962 hg.updaterepo(repo, startctx.node(), overwrite=True)
3963
3963
3964 ui.status(_(b"graft aborted\n"))
3964 ui.status(_(b"graft aborted\n"))
3965 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3965 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3966 graftstate.delete()
3966 graftstate.delete()
3967 return 0
3967 return 0
3968
3968
3969
3969
3970 def readgraftstate(repo, graftstate):
3970 def readgraftstate(repo, graftstate):
3971 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3971 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3972 """read the graft state file and return a dict of the data stored in it"""
3972 """read the graft state file and return a dict of the data stored in it"""
3973 try:
3973 try:
3974 return graftstate.read()
3974 return graftstate.read()
3975 except error.CorruptedState:
3975 except error.CorruptedState:
3976 nodes = repo.vfs.read(b'graftstate').splitlines()
3976 nodes = repo.vfs.read(b'graftstate').splitlines()
3977 return {b'nodes': nodes}
3977 return {b'nodes': nodes}
3978
3978
3979
3979
3980 def hgabortgraft(ui, repo):
3980 def hgabortgraft(ui, repo):
3981 """ abort logic for aborting graft using 'hg abort'"""
3981 """ abort logic for aborting graft using 'hg abort'"""
3982 with repo.wlock():
3982 with repo.wlock():
3983 graftstate = statemod.cmdstate(repo, b'graftstate')
3983 graftstate = statemod.cmdstate(repo, b'graftstate')
3984 return abortgraft(ui, repo, graftstate)
3984 return abortgraft(ui, repo, graftstate)
@@ -1,705 +1,702
1 # encoding.py - character transcoding support for Mercurial
1 # encoding.py - character transcoding support for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import locale
10 import locale
11 import os
11 import os
12 import unicodedata
12 import unicodedata
13
13
14 from .pycompat import getattr
14 from .pycompat import getattr
15 from . import (
15 from . import (
16 error,
16 error,
17 policy,
17 policy,
18 pycompat,
18 pycompat,
19 )
19 )
20
20
21 from .pure import charencode as charencodepure
21 from .pure import charencode as charencodepure
22
22
23 _TYPE_CHECKING = False
23 if pycompat.TYPE_CHECKING:
24
25 if not globals(): # hide this from non-pytype users
26 from typing import (
24 from typing import (
27 Any,
25 Any,
28 Callable,
26 Callable,
29 List,
27 List,
30 TYPE_CHECKING as _TYPE_CHECKING,
31 Text,
28 Text,
32 Type,
29 Type,
33 TypeVar,
30 TypeVar,
34 Union,
31 Union,
35 )
32 )
36
33
37 # keep pyflakes happy
34 # keep pyflakes happy
38 for t in (Any, Callable, List, Text, Type, Union):
35 for t in (Any, Callable, List, Text, Type, Union):
39 assert t
36 assert t
40
37
41 _Tlocalstr = TypeVar('_Tlocalstr', bound='localstr')
38 _Tlocalstr = TypeVar('_Tlocalstr', bound='localstr')
42
39
43 charencode = policy.importmod('charencode')
40 charencode = policy.importmod('charencode')
44
41
45 isasciistr = charencode.isasciistr
42 isasciistr = charencode.isasciistr
46 asciilower = charencode.asciilower
43 asciilower = charencode.asciilower
47 asciiupper = charencode.asciiupper
44 asciiupper = charencode.asciiupper
48 _jsonescapeu8fast = charencode.jsonescapeu8fast
45 _jsonescapeu8fast = charencode.jsonescapeu8fast
49
46
50 _sysstr = pycompat.sysstr
47 _sysstr = pycompat.sysstr
51
48
52 if pycompat.ispy3:
49 if pycompat.ispy3:
53 unichr = chr
50 unichr = chr
54
51
55 # These unicode characters are ignored by HFS+ (Apple Technote 1150,
52 # These unicode characters are ignored by HFS+ (Apple Technote 1150,
56 # "Unicode Subtleties"), so we need to ignore them in some places for
53 # "Unicode Subtleties"), so we need to ignore them in some places for
57 # sanity.
54 # sanity.
58 _ignore = [
55 _ignore = [
59 unichr(int(x, 16)).encode("utf-8")
56 unichr(int(x, 16)).encode("utf-8")
60 for x in b"200c 200d 200e 200f 202a 202b 202c 202d 202e "
57 for x in b"200c 200d 200e 200f 202a 202b 202c 202d 202e "
61 b"206a 206b 206c 206d 206e 206f feff".split()
58 b"206a 206b 206c 206d 206e 206f feff".split()
62 ]
59 ]
63 # verify the next function will work
60 # verify the next function will work
64 assert all(i.startswith((b"\xe2", b"\xef")) for i in _ignore)
61 assert all(i.startswith((b"\xe2", b"\xef")) for i in _ignore)
65
62
66
63
67 def hfsignoreclean(s):
64 def hfsignoreclean(s):
68 # type: (bytes) -> bytes
65 # type: (bytes) -> bytes
69 """Remove codepoints ignored by HFS+ from s.
66 """Remove codepoints ignored by HFS+ from s.
70
67
71 >>> hfsignoreclean(u'.h\u200cg'.encode('utf-8'))
68 >>> hfsignoreclean(u'.h\u200cg'.encode('utf-8'))
72 '.hg'
69 '.hg'
73 >>> hfsignoreclean(u'.h\ufeffg'.encode('utf-8'))
70 >>> hfsignoreclean(u'.h\ufeffg'.encode('utf-8'))
74 '.hg'
71 '.hg'
75 """
72 """
76 if b"\xe2" in s or b"\xef" in s:
73 if b"\xe2" in s or b"\xef" in s:
77 for c in _ignore:
74 for c in _ignore:
78 s = s.replace(c, b'')
75 s = s.replace(c, b'')
79 return s
76 return s
80
77
81
78
82 # encoding.environ is provided read-only, which may not be used to modify
79 # encoding.environ is provided read-only, which may not be used to modify
83 # the process environment
80 # the process environment
84 _nativeenviron = not pycompat.ispy3 or os.supports_bytes_environ
81 _nativeenviron = not pycompat.ispy3 or os.supports_bytes_environ
85 if not pycompat.ispy3:
82 if not pycompat.ispy3:
86 environ = os.environ # re-exports
83 environ = os.environ # re-exports
87 elif _nativeenviron:
84 elif _nativeenviron:
88 environ = os.environb # re-exports
85 environ = os.environb # re-exports
89 else:
86 else:
90 # preferred encoding isn't known yet; use utf-8 to avoid unicode error
87 # preferred encoding isn't known yet; use utf-8 to avoid unicode error
91 # and recreate it once encoding is settled
88 # and recreate it once encoding is settled
92 environ = dict(
89 environ = dict(
93 (k.encode('utf-8'), v.encode('utf-8'))
90 (k.encode('utf-8'), v.encode('utf-8'))
94 for k, v in os.environ.items() # re-exports
91 for k, v in os.environ.items() # re-exports
95 )
92 )
96
93
97 _encodingrewrites = {
94 _encodingrewrites = {
98 b'646': b'ascii',
95 b'646': b'ascii',
99 b'ANSI_X3.4-1968': b'ascii',
96 b'ANSI_X3.4-1968': b'ascii',
100 }
97 }
101 # cp65001 is a Windows variant of utf-8, which isn't supported on Python 2.
98 # cp65001 is a Windows variant of utf-8, which isn't supported on Python 2.
102 # No idea if it should be rewritten to the canonical name 'utf-8' on Python 3.
99 # No idea if it should be rewritten to the canonical name 'utf-8' on Python 3.
103 # https://bugs.python.org/issue13216
100 # https://bugs.python.org/issue13216
104 if pycompat.iswindows and not pycompat.ispy3:
101 if pycompat.iswindows and not pycompat.ispy3:
105 _encodingrewrites[b'cp65001'] = b'utf-8'
102 _encodingrewrites[b'cp65001'] = b'utf-8'
106
103
107 try:
104 try:
108 encoding = environ.get(b"HGENCODING")
105 encoding = environ.get(b"HGENCODING")
109 if not encoding:
106 if not encoding:
110 encoding = locale.getpreferredencoding().encode('ascii') or b'ascii'
107 encoding = locale.getpreferredencoding().encode('ascii') or b'ascii'
111 encoding = _encodingrewrites.get(encoding, encoding)
108 encoding = _encodingrewrites.get(encoding, encoding)
112 except locale.Error:
109 except locale.Error:
113 encoding = b'ascii'
110 encoding = b'ascii'
114 encodingmode = environ.get(b"HGENCODINGMODE", b"strict")
111 encodingmode = environ.get(b"HGENCODINGMODE", b"strict")
115 fallbackencoding = b'ISO-8859-1'
112 fallbackencoding = b'ISO-8859-1'
116
113
117
114
118 class localstr(bytes):
115 class localstr(bytes):
119 '''This class allows strings that are unmodified to be
116 '''This class allows strings that are unmodified to be
120 round-tripped to the local encoding and back'''
117 round-tripped to the local encoding and back'''
121
118
122 def __new__(cls, u, l):
119 def __new__(cls, u, l):
123 s = bytes.__new__(cls, l)
120 s = bytes.__new__(cls, l)
124 s._utf8 = u
121 s._utf8 = u
125 return s
122 return s
126
123
127 if _TYPE_CHECKING:
124 if pycompat.TYPE_CHECKING:
128 # pseudo implementation to help pytype see localstr() constructor
125 # pseudo implementation to help pytype see localstr() constructor
129 def __init__(self, u, l):
126 def __init__(self, u, l):
130 # type: (bytes, bytes) -> None
127 # type: (bytes, bytes) -> None
131 super(localstr, self).__init__(l)
128 super(localstr, self).__init__(l)
132 self._utf8 = u
129 self._utf8 = u
133
130
134 def __hash__(self):
131 def __hash__(self):
135 return hash(self._utf8) # avoid collisions in local string space
132 return hash(self._utf8) # avoid collisions in local string space
136
133
137
134
138 class safelocalstr(bytes):
135 class safelocalstr(bytes):
139 """Tagged string denoting it was previously an internal UTF-8 string,
136 """Tagged string denoting it was previously an internal UTF-8 string,
140 and can be converted back to UTF-8 losslessly
137 and can be converted back to UTF-8 losslessly
141
138
142 >>> assert safelocalstr(b'\\xc3') == b'\\xc3'
139 >>> assert safelocalstr(b'\\xc3') == b'\\xc3'
143 >>> assert b'\\xc3' == safelocalstr(b'\\xc3')
140 >>> assert b'\\xc3' == safelocalstr(b'\\xc3')
144 >>> assert b'\\xc3' in {safelocalstr(b'\\xc3'): 0}
141 >>> assert b'\\xc3' in {safelocalstr(b'\\xc3'): 0}
145 >>> assert safelocalstr(b'\\xc3') in {b'\\xc3': 0}
142 >>> assert safelocalstr(b'\\xc3') in {b'\\xc3': 0}
146 """
143 """
147
144
148
145
149 def tolocal(s):
146 def tolocal(s):
150 # type: (bytes) -> bytes
147 # type: (bytes) -> bytes
151 """
148 """
152 Convert a string from internal UTF-8 to local encoding
149 Convert a string from internal UTF-8 to local encoding
153
150
154 All internal strings should be UTF-8 but some repos before the
151 All internal strings should be UTF-8 but some repos before the
155 implementation of locale support may contain latin1 or possibly
152 implementation of locale support may contain latin1 or possibly
156 other character sets. We attempt to decode everything strictly
153 other character sets. We attempt to decode everything strictly
157 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
154 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
158 replace unknown characters.
155 replace unknown characters.
159
156
160 The localstr class is used to cache the known UTF-8 encoding of
157 The localstr class is used to cache the known UTF-8 encoding of
161 strings next to their local representation to allow lossless
158 strings next to their local representation to allow lossless
162 round-trip conversion back to UTF-8.
159 round-trip conversion back to UTF-8.
163
160
164 >>> u = b'foo: \\xc3\\xa4' # utf-8
161 >>> u = b'foo: \\xc3\\xa4' # utf-8
165 >>> l = tolocal(u)
162 >>> l = tolocal(u)
166 >>> l
163 >>> l
167 'foo: ?'
164 'foo: ?'
168 >>> fromlocal(l)
165 >>> fromlocal(l)
169 'foo: \\xc3\\xa4'
166 'foo: \\xc3\\xa4'
170 >>> u2 = b'foo: \\xc3\\xa1'
167 >>> u2 = b'foo: \\xc3\\xa1'
171 >>> d = { l: 1, tolocal(u2): 2 }
168 >>> d = { l: 1, tolocal(u2): 2 }
172 >>> len(d) # no collision
169 >>> len(d) # no collision
173 2
170 2
174 >>> b'foo: ?' in d
171 >>> b'foo: ?' in d
175 False
172 False
176 >>> l1 = b'foo: \\xe4' # historical latin1 fallback
173 >>> l1 = b'foo: \\xe4' # historical latin1 fallback
177 >>> l = tolocal(l1)
174 >>> l = tolocal(l1)
178 >>> l
175 >>> l
179 'foo: ?'
176 'foo: ?'
180 >>> fromlocal(l) # magically in utf-8
177 >>> fromlocal(l) # magically in utf-8
181 'foo: \\xc3\\xa4'
178 'foo: \\xc3\\xa4'
182 """
179 """
183
180
184 if isasciistr(s):
181 if isasciistr(s):
185 return s
182 return s
186
183
187 try:
184 try:
188 try:
185 try:
189 # make sure string is actually stored in UTF-8
186 # make sure string is actually stored in UTF-8
190 u = s.decode('UTF-8')
187 u = s.decode('UTF-8')
191 if encoding == b'UTF-8':
188 if encoding == b'UTF-8':
192 # fast path
189 # fast path
193 return s
190 return s
194 r = u.encode(_sysstr(encoding), "replace")
191 r = u.encode(_sysstr(encoding), "replace")
195 if u == r.decode(_sysstr(encoding)):
192 if u == r.decode(_sysstr(encoding)):
196 # r is a safe, non-lossy encoding of s
193 # r is a safe, non-lossy encoding of s
197 return safelocalstr(r)
194 return safelocalstr(r)
198 return localstr(s, r)
195 return localstr(s, r)
199 except UnicodeDecodeError:
196 except UnicodeDecodeError:
200 # we should only get here if we're looking at an ancient changeset
197 # we should only get here if we're looking at an ancient changeset
201 try:
198 try:
202 u = s.decode(_sysstr(fallbackencoding))
199 u = s.decode(_sysstr(fallbackencoding))
203 r = u.encode(_sysstr(encoding), "replace")
200 r = u.encode(_sysstr(encoding), "replace")
204 if u == r.decode(_sysstr(encoding)):
201 if u == r.decode(_sysstr(encoding)):
205 # r is a safe, non-lossy encoding of s
202 # r is a safe, non-lossy encoding of s
206 return safelocalstr(r)
203 return safelocalstr(r)
207 return localstr(u.encode('UTF-8'), r)
204 return localstr(u.encode('UTF-8'), r)
208 except UnicodeDecodeError:
205 except UnicodeDecodeError:
209 u = s.decode("utf-8", "replace") # last ditch
206 u = s.decode("utf-8", "replace") # last ditch
210 # can't round-trip
207 # can't round-trip
211 return u.encode(_sysstr(encoding), "replace")
208 return u.encode(_sysstr(encoding), "replace")
212 except LookupError as k:
209 except LookupError as k:
213 raise error.Abort(k, hint=b"please check your locale settings")
210 raise error.Abort(k, hint=b"please check your locale settings")
214
211
215
212
216 def fromlocal(s):
213 def fromlocal(s):
217 # type: (bytes) -> bytes
214 # type: (bytes) -> bytes
218 """
215 """
219 Convert a string from the local character encoding to UTF-8
216 Convert a string from the local character encoding to UTF-8
220
217
221 We attempt to decode strings using the encoding mode set by
218 We attempt to decode strings using the encoding mode set by
222 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
219 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
223 characters will cause an error message. Other modes include
220 characters will cause an error message. Other modes include
224 'replace', which replaces unknown characters with a special
221 'replace', which replaces unknown characters with a special
225 Unicode character, and 'ignore', which drops the character.
222 Unicode character, and 'ignore', which drops the character.
226 """
223 """
227
224
228 # can we do a lossless round-trip?
225 # can we do a lossless round-trip?
229 if isinstance(s, localstr):
226 if isinstance(s, localstr):
230 return s._utf8
227 return s._utf8
231 if isasciistr(s):
228 if isasciistr(s):
232 return s
229 return s
233
230
234 try:
231 try:
235 u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
232 u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
236 return u.encode("utf-8")
233 return u.encode("utf-8")
237 except UnicodeDecodeError as inst:
234 except UnicodeDecodeError as inst:
238 sub = s[max(0, inst.start - 10) : inst.start + 10]
235 sub = s[max(0, inst.start - 10) : inst.start + 10]
239 raise error.Abort(
236 raise error.Abort(
240 b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst))
237 b"decoding near '%s': %s!" % (sub, pycompat.bytestr(inst))
241 )
238 )
242 except LookupError as k:
239 except LookupError as k:
243 raise error.Abort(k, hint=b"please check your locale settings")
240 raise error.Abort(k, hint=b"please check your locale settings")
244
241
245
242
246 def unitolocal(u):
243 def unitolocal(u):
247 # type: (Text) -> bytes
244 # type: (Text) -> bytes
248 """Convert a unicode string to a byte string of local encoding"""
245 """Convert a unicode string to a byte string of local encoding"""
249 return tolocal(u.encode('utf-8'))
246 return tolocal(u.encode('utf-8'))
250
247
251
248
252 def unifromlocal(s):
249 def unifromlocal(s):
253 # type: (bytes) -> Text
250 # type: (bytes) -> Text
254 """Convert a byte string of local encoding to a unicode string"""
251 """Convert a byte string of local encoding to a unicode string"""
255 return fromlocal(s).decode('utf-8')
252 return fromlocal(s).decode('utf-8')
256
253
257
254
258 def unimethod(bytesfunc):
255 def unimethod(bytesfunc):
259 # type: (Callable[[Any], bytes]) -> Callable[[Any], Text]
256 # type: (Callable[[Any], bytes]) -> Callable[[Any], Text]
260 """Create a proxy method that forwards __unicode__() and __str__() of
257 """Create a proxy method that forwards __unicode__() and __str__() of
261 Python 3 to __bytes__()"""
258 Python 3 to __bytes__()"""
262
259
263 def unifunc(obj):
260 def unifunc(obj):
264 return unifromlocal(bytesfunc(obj))
261 return unifromlocal(bytesfunc(obj))
265
262
266 return unifunc
263 return unifunc
267
264
268
265
269 # converter functions between native str and byte string. use these if the
266 # converter functions between native str and byte string. use these if the
270 # character encoding is not aware (e.g. exception message) or is known to
267 # character encoding is not aware (e.g. exception message) or is known to
271 # be locale dependent (e.g. date formatting.)
268 # be locale dependent (e.g. date formatting.)
272 if pycompat.ispy3:
269 if pycompat.ispy3:
273 strtolocal = unitolocal
270 strtolocal = unitolocal
274 strfromlocal = unifromlocal
271 strfromlocal = unifromlocal
275 strmethod = unimethod
272 strmethod = unimethod
276 else:
273 else:
277
274
278 def strtolocal(s):
275 def strtolocal(s):
279 # type: (str) -> bytes
276 # type: (str) -> bytes
280 return s # pytype: disable=bad-return-type
277 return s # pytype: disable=bad-return-type
281
278
282 def strfromlocal(s):
279 def strfromlocal(s):
283 # type: (bytes) -> str
280 # type: (bytes) -> str
284 return s # pytype: disable=bad-return-type
281 return s # pytype: disable=bad-return-type
285
282
286 strmethod = pycompat.identity
283 strmethod = pycompat.identity
287
284
288 if not _nativeenviron:
285 if not _nativeenviron:
289 # now encoding and helper functions are available, recreate the environ
286 # now encoding and helper functions are available, recreate the environ
290 # dict to be exported to other modules
287 # dict to be exported to other modules
291 environ = dict(
288 environ = dict(
292 (tolocal(k.encode('utf-8')), tolocal(v.encode('utf-8')))
289 (tolocal(k.encode('utf-8')), tolocal(v.encode('utf-8')))
293 for k, v in os.environ.items() # re-exports
290 for k, v in os.environ.items() # re-exports
294 )
291 )
295
292
296 if pycompat.ispy3:
293 if pycompat.ispy3:
297 # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
294 # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
298 # returns bytes.
295 # returns bytes.
299 if pycompat.iswindows:
296 if pycompat.iswindows:
300 # Python 3 on Windows issues a DeprecationWarning about using the bytes
297 # Python 3 on Windows issues a DeprecationWarning about using the bytes
301 # API when os.getcwdb() is called.
298 # API when os.getcwdb() is called.
302 getcwd = lambda: strtolocal(os.getcwd()) # re-exports
299 getcwd = lambda: strtolocal(os.getcwd()) # re-exports
303 else:
300 else:
304 getcwd = os.getcwdb # re-exports
301 getcwd = os.getcwdb # re-exports
305 else:
302 else:
306 getcwd = os.getcwd # re-exports
303 getcwd = os.getcwd # re-exports
307
304
308 # How to treat ambiguous-width characters. Set to 'wide' to treat as wide.
305 # How to treat ambiguous-width characters. Set to 'wide' to treat as wide.
309 _wide = _sysstr(
306 _wide = _sysstr(
310 environ.get(b"HGENCODINGAMBIGUOUS", b"narrow") == b"wide"
307 environ.get(b"HGENCODINGAMBIGUOUS", b"narrow") == b"wide"
311 and b"WFA"
308 and b"WFA"
312 or b"WF"
309 or b"WF"
313 )
310 )
314
311
315
312
316 def colwidth(s):
313 def colwidth(s):
317 # type: (bytes) -> int
314 # type: (bytes) -> int
318 b"Find the column width of a string for display in the local encoding"
315 b"Find the column width of a string for display in the local encoding"
319 return ucolwidth(s.decode(_sysstr(encoding), 'replace'))
316 return ucolwidth(s.decode(_sysstr(encoding), 'replace'))
320
317
321
318
322 def ucolwidth(d):
319 def ucolwidth(d):
323 # type: (Text) -> int
320 # type: (Text) -> int
324 b"Find the column width of a Unicode string for display"
321 b"Find the column width of a Unicode string for display"
325 eaw = getattr(unicodedata, 'east_asian_width', None)
322 eaw = getattr(unicodedata, 'east_asian_width', None)
326 if eaw is not None:
323 if eaw is not None:
327 return sum([eaw(c) in _wide and 2 or 1 for c in d])
324 return sum([eaw(c) in _wide and 2 or 1 for c in d])
328 return len(d)
325 return len(d)
329
326
330
327
331 def getcols(s, start, c):
328 def getcols(s, start, c):
332 # type: (bytes, int, int) -> bytes
329 # type: (bytes, int, int) -> bytes
333 '''Use colwidth to find a c-column substring of s starting at byte
330 '''Use colwidth to find a c-column substring of s starting at byte
334 index start'''
331 index start'''
335 for x in pycompat.xrange(start + c, len(s)):
332 for x in pycompat.xrange(start + c, len(s)):
336 t = s[start:x]
333 t = s[start:x]
337 if colwidth(t) == c:
334 if colwidth(t) == c:
338 return t
335 return t
339 raise ValueError('substring not found')
336 raise ValueError('substring not found')
340
337
341
338
342 def trim(s, width, ellipsis=b'', leftside=False):
339 def trim(s, width, ellipsis=b'', leftside=False):
343 # type: (bytes, int, bytes, bool) -> bytes
340 # type: (bytes, int, bytes, bool) -> bytes
344 """Trim string 's' to at most 'width' columns (including 'ellipsis').
341 """Trim string 's' to at most 'width' columns (including 'ellipsis').
345
342
346 If 'leftside' is True, left side of string 's' is trimmed.
343 If 'leftside' is True, left side of string 's' is trimmed.
347 'ellipsis' is always placed at trimmed side.
344 'ellipsis' is always placed at trimmed side.
348
345
349 >>> from .node import bin
346 >>> from .node import bin
350 >>> def bprint(s):
347 >>> def bprint(s):
351 ... print(pycompat.sysstr(s))
348 ... print(pycompat.sysstr(s))
352 >>> ellipsis = b'+++'
349 >>> ellipsis = b'+++'
353 >>> from . import encoding
350 >>> from . import encoding
354 >>> encoding.encoding = b'utf-8'
351 >>> encoding.encoding = b'utf-8'
355 >>> t = b'1234567890'
352 >>> t = b'1234567890'
356 >>> bprint(trim(t, 12, ellipsis=ellipsis))
353 >>> bprint(trim(t, 12, ellipsis=ellipsis))
357 1234567890
354 1234567890
358 >>> bprint(trim(t, 10, ellipsis=ellipsis))
355 >>> bprint(trim(t, 10, ellipsis=ellipsis))
359 1234567890
356 1234567890
360 >>> bprint(trim(t, 8, ellipsis=ellipsis))
357 >>> bprint(trim(t, 8, ellipsis=ellipsis))
361 12345+++
358 12345+++
362 >>> bprint(trim(t, 8, ellipsis=ellipsis, leftside=True))
359 >>> bprint(trim(t, 8, ellipsis=ellipsis, leftside=True))
363 +++67890
360 +++67890
364 >>> bprint(trim(t, 8))
361 >>> bprint(trim(t, 8))
365 12345678
362 12345678
366 >>> bprint(trim(t, 8, leftside=True))
363 >>> bprint(trim(t, 8, leftside=True))
367 34567890
364 34567890
368 >>> bprint(trim(t, 3, ellipsis=ellipsis))
365 >>> bprint(trim(t, 3, ellipsis=ellipsis))
369 +++
366 +++
370 >>> bprint(trim(t, 1, ellipsis=ellipsis))
367 >>> bprint(trim(t, 1, ellipsis=ellipsis))
371 +
368 +
372 >>> u = u'\u3042\u3044\u3046\u3048\u304a' # 2 x 5 = 10 columns
369 >>> u = u'\u3042\u3044\u3046\u3048\u304a' # 2 x 5 = 10 columns
373 >>> t = u.encode(pycompat.sysstr(encoding.encoding))
370 >>> t = u.encode(pycompat.sysstr(encoding.encoding))
374 >>> bprint(trim(t, 12, ellipsis=ellipsis))
371 >>> bprint(trim(t, 12, ellipsis=ellipsis))
375 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a
372 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a
376 >>> bprint(trim(t, 10, ellipsis=ellipsis))
373 >>> bprint(trim(t, 10, ellipsis=ellipsis))
377 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a
374 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a
378 >>> bprint(trim(t, 8, ellipsis=ellipsis))
375 >>> bprint(trim(t, 8, ellipsis=ellipsis))
379 \xe3\x81\x82\xe3\x81\x84+++
376 \xe3\x81\x82\xe3\x81\x84+++
380 >>> bprint(trim(t, 8, ellipsis=ellipsis, leftside=True))
377 >>> bprint(trim(t, 8, ellipsis=ellipsis, leftside=True))
381 +++\xe3\x81\x88\xe3\x81\x8a
378 +++\xe3\x81\x88\xe3\x81\x8a
382 >>> bprint(trim(t, 5))
379 >>> bprint(trim(t, 5))
383 \xe3\x81\x82\xe3\x81\x84
380 \xe3\x81\x82\xe3\x81\x84
384 >>> bprint(trim(t, 5, leftside=True))
381 >>> bprint(trim(t, 5, leftside=True))
385 \xe3\x81\x88\xe3\x81\x8a
382 \xe3\x81\x88\xe3\x81\x8a
386 >>> bprint(trim(t, 4, ellipsis=ellipsis))
383 >>> bprint(trim(t, 4, ellipsis=ellipsis))
387 +++
384 +++
388 >>> bprint(trim(t, 4, ellipsis=ellipsis, leftside=True))
385 >>> bprint(trim(t, 4, ellipsis=ellipsis, leftside=True))
389 +++
386 +++
390 >>> t = bin(b'112233445566778899aa') # invalid byte sequence
387 >>> t = bin(b'112233445566778899aa') # invalid byte sequence
391 >>> bprint(trim(t, 12, ellipsis=ellipsis))
388 >>> bprint(trim(t, 12, ellipsis=ellipsis))
392 \x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa
389 \x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa
393 >>> bprint(trim(t, 10, ellipsis=ellipsis))
390 >>> bprint(trim(t, 10, ellipsis=ellipsis))
394 \x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa
391 \x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa
395 >>> bprint(trim(t, 8, ellipsis=ellipsis))
392 >>> bprint(trim(t, 8, ellipsis=ellipsis))
396 \x11\x22\x33\x44\x55+++
393 \x11\x22\x33\x44\x55+++
397 >>> bprint(trim(t, 8, ellipsis=ellipsis, leftside=True))
394 >>> bprint(trim(t, 8, ellipsis=ellipsis, leftside=True))
398 +++\x66\x77\x88\x99\xaa
395 +++\x66\x77\x88\x99\xaa
399 >>> bprint(trim(t, 8))
396 >>> bprint(trim(t, 8))
400 \x11\x22\x33\x44\x55\x66\x77\x88
397 \x11\x22\x33\x44\x55\x66\x77\x88
401 >>> bprint(trim(t, 8, leftside=True))
398 >>> bprint(trim(t, 8, leftside=True))
402 \x33\x44\x55\x66\x77\x88\x99\xaa
399 \x33\x44\x55\x66\x77\x88\x99\xaa
403 >>> bprint(trim(t, 3, ellipsis=ellipsis))
400 >>> bprint(trim(t, 3, ellipsis=ellipsis))
404 +++
401 +++
405 >>> bprint(trim(t, 1, ellipsis=ellipsis))
402 >>> bprint(trim(t, 1, ellipsis=ellipsis))
406 +
403 +
407 """
404 """
408 try:
405 try:
409 u = s.decode(_sysstr(encoding))
406 u = s.decode(_sysstr(encoding))
410 except UnicodeDecodeError:
407 except UnicodeDecodeError:
411 if len(s) <= width: # trimming is not needed
408 if len(s) <= width: # trimming is not needed
412 return s
409 return s
413 width -= len(ellipsis)
410 width -= len(ellipsis)
414 if width <= 0: # no enough room even for ellipsis
411 if width <= 0: # no enough room even for ellipsis
415 return ellipsis[: width + len(ellipsis)]
412 return ellipsis[: width + len(ellipsis)]
416 if leftside:
413 if leftside:
417 return ellipsis + s[-width:]
414 return ellipsis + s[-width:]
418 return s[:width] + ellipsis
415 return s[:width] + ellipsis
419
416
420 if ucolwidth(u) <= width: # trimming is not needed
417 if ucolwidth(u) <= width: # trimming is not needed
421 return s
418 return s
422
419
423 width -= len(ellipsis)
420 width -= len(ellipsis)
424 if width <= 0: # no enough room even for ellipsis
421 if width <= 0: # no enough room even for ellipsis
425 return ellipsis[: width + len(ellipsis)]
422 return ellipsis[: width + len(ellipsis)]
426
423
427 if leftside:
424 if leftside:
428 uslice = lambda i: u[i:]
425 uslice = lambda i: u[i:]
429 concat = lambda s: ellipsis + s
426 concat = lambda s: ellipsis + s
430 else:
427 else:
431 uslice = lambda i: u[:-i]
428 uslice = lambda i: u[:-i]
432 concat = lambda s: s + ellipsis
429 concat = lambda s: s + ellipsis
433 for i in pycompat.xrange(1, len(u)):
430 for i in pycompat.xrange(1, len(u)):
434 usub = uslice(i)
431 usub = uslice(i)
435 if ucolwidth(usub) <= width:
432 if ucolwidth(usub) <= width:
436 return concat(usub.encode(_sysstr(encoding)))
433 return concat(usub.encode(_sysstr(encoding)))
437 return ellipsis # no enough room for multi-column characters
434 return ellipsis # no enough room for multi-column characters
438
435
439
436
440 def lower(s):
437 def lower(s):
441 # type: (bytes) -> bytes
438 # type: (bytes) -> bytes
442 b"best-effort encoding-aware case-folding of local string s"
439 b"best-effort encoding-aware case-folding of local string s"
443 try:
440 try:
444 return asciilower(s)
441 return asciilower(s)
445 except UnicodeDecodeError:
442 except UnicodeDecodeError:
446 pass
443 pass
447 try:
444 try:
448 if isinstance(s, localstr):
445 if isinstance(s, localstr):
449 u = s._utf8.decode("utf-8")
446 u = s._utf8.decode("utf-8")
450 else:
447 else:
451 u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
448 u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
452
449
453 lu = u.lower()
450 lu = u.lower()
454 if u == lu:
451 if u == lu:
455 return s # preserve localstring
452 return s # preserve localstring
456 return lu.encode(_sysstr(encoding))
453 return lu.encode(_sysstr(encoding))
457 except UnicodeError:
454 except UnicodeError:
458 return s.lower() # we don't know how to fold this except in ASCII
455 return s.lower() # we don't know how to fold this except in ASCII
459 except LookupError as k:
456 except LookupError as k:
460 raise error.Abort(k, hint=b"please check your locale settings")
457 raise error.Abort(k, hint=b"please check your locale settings")
461
458
462
459
463 def upper(s):
460 def upper(s):
464 # type: (bytes) -> bytes
461 # type: (bytes) -> bytes
465 b"best-effort encoding-aware case-folding of local string s"
462 b"best-effort encoding-aware case-folding of local string s"
466 try:
463 try:
467 return asciiupper(s)
464 return asciiupper(s)
468 except UnicodeDecodeError:
465 except UnicodeDecodeError:
469 return upperfallback(s)
466 return upperfallback(s)
470
467
471
468
472 def upperfallback(s):
469 def upperfallback(s):
473 # type: (Any) -> Any
470 # type: (Any) -> Any
474 try:
471 try:
475 if isinstance(s, localstr):
472 if isinstance(s, localstr):
476 u = s._utf8.decode("utf-8")
473 u = s._utf8.decode("utf-8")
477 else:
474 else:
478 u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
475 u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
479
476
480 uu = u.upper()
477 uu = u.upper()
481 if u == uu:
478 if u == uu:
482 return s # preserve localstring
479 return s # preserve localstring
483 return uu.encode(_sysstr(encoding))
480 return uu.encode(_sysstr(encoding))
484 except UnicodeError:
481 except UnicodeError:
485 return s.upper() # we don't know how to fold this except in ASCII
482 return s.upper() # we don't know how to fold this except in ASCII
486 except LookupError as k:
483 except LookupError as k:
487 raise error.Abort(k, hint=b"please check your locale settings")
484 raise error.Abort(k, hint=b"please check your locale settings")
488
485
489
486
490 class normcasespecs(object):
487 class normcasespecs(object):
491 '''what a platform's normcase does to ASCII strings
488 '''what a platform's normcase does to ASCII strings
492
489
493 This is specified per platform, and should be consistent with what normcase
490 This is specified per platform, and should be consistent with what normcase
494 on that platform actually does.
491 on that platform actually does.
495
492
496 lower: normcase lowercases ASCII strings
493 lower: normcase lowercases ASCII strings
497 upper: normcase uppercases ASCII strings
494 upper: normcase uppercases ASCII strings
498 other: the fallback function should always be called
495 other: the fallback function should always be called
499
496
500 This should be kept in sync with normcase_spec in util.h.'''
497 This should be kept in sync with normcase_spec in util.h.'''
501
498
502 lower = -1
499 lower = -1
503 upper = 1
500 upper = 1
504 other = 0
501 other = 0
505
502
506
503
507 def jsonescape(s, paranoid=False):
504 def jsonescape(s, paranoid=False):
508 # type: (Any, Any) -> Any
505 # type: (Any, Any) -> Any
509 '''returns a string suitable for JSON
506 '''returns a string suitable for JSON
510
507
511 JSON is problematic for us because it doesn't support non-Unicode
508 JSON is problematic for us because it doesn't support non-Unicode
512 bytes. To deal with this, we take the following approach:
509 bytes. To deal with this, we take the following approach:
513
510
514 - localstr/safelocalstr objects are converted back to UTF-8
511 - localstr/safelocalstr objects are converted back to UTF-8
515 - valid UTF-8/ASCII strings are passed as-is
512 - valid UTF-8/ASCII strings are passed as-is
516 - other strings are converted to UTF-8b surrogate encoding
513 - other strings are converted to UTF-8b surrogate encoding
517 - apply JSON-specified string escaping
514 - apply JSON-specified string escaping
518
515
519 (escapes are doubled in these tests)
516 (escapes are doubled in these tests)
520
517
521 >>> jsonescape(b'this is a test')
518 >>> jsonescape(b'this is a test')
522 'this is a test'
519 'this is a test'
523 >>> jsonescape(b'escape characters: \\0 \\x0b \\x7f')
520 >>> jsonescape(b'escape characters: \\0 \\x0b \\x7f')
524 'escape characters: \\\\u0000 \\\\u000b \\\\u007f'
521 'escape characters: \\\\u0000 \\\\u000b \\\\u007f'
525 >>> jsonescape(b'escape characters: \\b \\t \\n \\f \\r \\" \\\\')
522 >>> jsonescape(b'escape characters: \\b \\t \\n \\f \\r \\" \\\\')
526 'escape characters: \\\\b \\\\t \\\\n \\\\f \\\\r \\\\" \\\\\\\\'
523 'escape characters: \\\\b \\\\t \\\\n \\\\f \\\\r \\\\" \\\\\\\\'
527 >>> jsonescape(b'a weird byte: \\xdd')
524 >>> jsonescape(b'a weird byte: \\xdd')
528 'a weird byte: \\xed\\xb3\\x9d'
525 'a weird byte: \\xed\\xb3\\x9d'
529 >>> jsonescape(b'utf-8: caf\\xc3\\xa9')
526 >>> jsonescape(b'utf-8: caf\\xc3\\xa9')
530 'utf-8: caf\\xc3\\xa9'
527 'utf-8: caf\\xc3\\xa9'
531 >>> jsonescape(b'')
528 >>> jsonescape(b'')
532 ''
529 ''
533
530
534 If paranoid, non-ascii and common troublesome characters are also escaped.
531 If paranoid, non-ascii and common troublesome characters are also escaped.
535 This is suitable for web output.
532 This is suitable for web output.
536
533
537 >>> s = b'escape characters: \\0 \\x0b \\x7f'
534 >>> s = b'escape characters: \\0 \\x0b \\x7f'
538 >>> assert jsonescape(s) == jsonescape(s, paranoid=True)
535 >>> assert jsonescape(s) == jsonescape(s, paranoid=True)
539 >>> s = b'escape characters: \\b \\t \\n \\f \\r \\" \\\\'
536 >>> s = b'escape characters: \\b \\t \\n \\f \\r \\" \\\\'
540 >>> assert jsonescape(s) == jsonescape(s, paranoid=True)
537 >>> assert jsonescape(s) == jsonescape(s, paranoid=True)
541 >>> jsonescape(b'escape boundary: \\x7e \\x7f \\xc2\\x80', paranoid=True)
538 >>> jsonescape(b'escape boundary: \\x7e \\x7f \\xc2\\x80', paranoid=True)
542 'escape boundary: ~ \\\\u007f \\\\u0080'
539 'escape boundary: ~ \\\\u007f \\\\u0080'
543 >>> jsonescape(b'a weird byte: \\xdd', paranoid=True)
540 >>> jsonescape(b'a weird byte: \\xdd', paranoid=True)
544 'a weird byte: \\\\udcdd'
541 'a weird byte: \\\\udcdd'
545 >>> jsonescape(b'utf-8: caf\\xc3\\xa9', paranoid=True)
542 >>> jsonescape(b'utf-8: caf\\xc3\\xa9', paranoid=True)
546 'utf-8: caf\\\\u00e9'
543 'utf-8: caf\\\\u00e9'
547 >>> jsonescape(b'non-BMP: \\xf0\\x9d\\x84\\x9e', paranoid=True)
544 >>> jsonescape(b'non-BMP: \\xf0\\x9d\\x84\\x9e', paranoid=True)
548 'non-BMP: \\\\ud834\\\\udd1e'
545 'non-BMP: \\\\ud834\\\\udd1e'
549 >>> jsonescape(b'<foo@example.org>', paranoid=True)
546 >>> jsonescape(b'<foo@example.org>', paranoid=True)
550 '\\\\u003cfoo@example.org\\\\u003e'
547 '\\\\u003cfoo@example.org\\\\u003e'
551 '''
548 '''
552
549
553 u8chars = toutf8b(s)
550 u8chars = toutf8b(s)
554 try:
551 try:
555 return _jsonescapeu8fast(u8chars, paranoid)
552 return _jsonescapeu8fast(u8chars, paranoid)
556 except ValueError:
553 except ValueError:
557 pass
554 pass
558 return charencodepure.jsonescapeu8fallback(u8chars, paranoid)
555 return charencodepure.jsonescapeu8fallback(u8chars, paranoid)
559
556
560
557
561 # We need to decode/encode U+DCxx codes transparently since invalid UTF-8
558 # We need to decode/encode U+DCxx codes transparently since invalid UTF-8
562 # bytes are mapped to that range.
559 # bytes are mapped to that range.
563 if pycompat.ispy3:
560 if pycompat.ispy3:
564 _utf8strict = r'surrogatepass'
561 _utf8strict = r'surrogatepass'
565 else:
562 else:
566 _utf8strict = r'strict'
563 _utf8strict = r'strict'
567
564
568 _utf8len = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4]
565 _utf8len = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4]
569
566
570
567
571 def getutf8char(s, pos):
568 def getutf8char(s, pos):
572 # type: (bytes, int) -> bytes
569 # type: (bytes, int) -> bytes
573 '''get the next full utf-8 character in the given string, starting at pos
570 '''get the next full utf-8 character in the given string, starting at pos
574
571
575 Raises a UnicodeError if the given location does not start a valid
572 Raises a UnicodeError if the given location does not start a valid
576 utf-8 character.
573 utf-8 character.
577 '''
574 '''
578
575
579 # find how many bytes to attempt decoding from first nibble
576 # find how many bytes to attempt decoding from first nibble
580 l = _utf8len[ord(s[pos : pos + 1]) >> 4]
577 l = _utf8len[ord(s[pos : pos + 1]) >> 4]
581 if not l: # ascii
578 if not l: # ascii
582 return s[pos : pos + 1]
579 return s[pos : pos + 1]
583
580
584 c = s[pos : pos + l]
581 c = s[pos : pos + l]
585 # validate with attempted decode
582 # validate with attempted decode
586 c.decode("utf-8", _utf8strict)
583 c.decode("utf-8", _utf8strict)
587 return c
584 return c
588
585
589
586
590 def toutf8b(s):
587 def toutf8b(s):
591 # type: (bytes) -> bytes
588 # type: (bytes) -> bytes
592 '''convert a local, possibly-binary string into UTF-8b
589 '''convert a local, possibly-binary string into UTF-8b
593
590
594 This is intended as a generic method to preserve data when working
591 This is intended as a generic method to preserve data when working
595 with schemes like JSON and XML that have no provision for
592 with schemes like JSON and XML that have no provision for
596 arbitrary byte strings. As Mercurial often doesn't know
593 arbitrary byte strings. As Mercurial often doesn't know
597 what encoding data is in, we use so-called UTF-8b.
594 what encoding data is in, we use so-called UTF-8b.
598
595
599 If a string is already valid UTF-8 (or ASCII), it passes unmodified.
596 If a string is already valid UTF-8 (or ASCII), it passes unmodified.
600 Otherwise, unsupported bytes are mapped to UTF-16 surrogate range,
597 Otherwise, unsupported bytes are mapped to UTF-16 surrogate range,
601 uDC00-uDCFF.
598 uDC00-uDCFF.
602
599
603 Principles of operation:
600 Principles of operation:
604
601
605 - ASCII and UTF-8 data successfully round-trips and is understood
602 - ASCII and UTF-8 data successfully round-trips and is understood
606 by Unicode-oriented clients
603 by Unicode-oriented clients
607 - filenames and file contents in arbitrary other encodings can have
604 - filenames and file contents in arbitrary other encodings can have
608 be round-tripped or recovered by clueful clients
605 be round-tripped or recovered by clueful clients
609 - local strings that have a cached known UTF-8 encoding (aka
606 - local strings that have a cached known UTF-8 encoding (aka
610 localstr) get sent as UTF-8 so Unicode-oriented clients get the
607 localstr) get sent as UTF-8 so Unicode-oriented clients get the
611 Unicode data they want
608 Unicode data they want
612 - non-lossy local strings (aka safelocalstr) get sent as UTF-8 as well
609 - non-lossy local strings (aka safelocalstr) get sent as UTF-8 as well
613 - because we must preserve UTF-8 bytestring in places such as
610 - because we must preserve UTF-8 bytestring in places such as
614 filenames, metadata can't be roundtripped without help
611 filenames, metadata can't be roundtripped without help
615
612
616 (Note: "UTF-8b" often refers to decoding a mix of valid UTF-8 and
613 (Note: "UTF-8b" often refers to decoding a mix of valid UTF-8 and
617 arbitrary bytes into an internal Unicode format that can be
614 arbitrary bytes into an internal Unicode format that can be
618 re-encoded back into the original. Here we are exposing the
615 re-encoded back into the original. Here we are exposing the
619 internal surrogate encoding as a UTF-8 string.)
616 internal surrogate encoding as a UTF-8 string.)
620 '''
617 '''
621
618
622 if isinstance(s, localstr):
619 if isinstance(s, localstr):
623 # assume that the original UTF-8 sequence would never contain
620 # assume that the original UTF-8 sequence would never contain
624 # invalid characters in U+DCxx range
621 # invalid characters in U+DCxx range
625 return s._utf8
622 return s._utf8
626 elif isinstance(s, safelocalstr):
623 elif isinstance(s, safelocalstr):
627 # already verified that s is non-lossy in legacy encoding, which
624 # already verified that s is non-lossy in legacy encoding, which
628 # shouldn't contain characters in U+DCxx range
625 # shouldn't contain characters in U+DCxx range
629 return fromlocal(s)
626 return fromlocal(s)
630 elif isasciistr(s):
627 elif isasciistr(s):
631 return s
628 return s
632 if b"\xed" not in s:
629 if b"\xed" not in s:
633 try:
630 try:
634 s.decode('utf-8', _utf8strict)
631 s.decode('utf-8', _utf8strict)
635 return s
632 return s
636 except UnicodeDecodeError:
633 except UnicodeDecodeError:
637 pass
634 pass
638
635
639 s = pycompat.bytestr(s)
636 s = pycompat.bytestr(s)
640 r = b""
637 r = b""
641 pos = 0
638 pos = 0
642 l = len(s)
639 l = len(s)
643 while pos < l:
640 while pos < l:
644 try:
641 try:
645 c = getutf8char(s, pos)
642 c = getutf8char(s, pos)
646 if b"\xed\xb0\x80" <= c <= b"\xed\xb3\xbf":
643 if b"\xed\xb0\x80" <= c <= b"\xed\xb3\xbf":
647 # have to re-escape existing U+DCxx characters
644 # have to re-escape existing U+DCxx characters
648 c = unichr(0xDC00 + ord(s[pos])).encode('utf-8', _utf8strict)
645 c = unichr(0xDC00 + ord(s[pos])).encode('utf-8', _utf8strict)
649 pos += 1
646 pos += 1
650 else:
647 else:
651 pos += len(c)
648 pos += len(c)
652 except UnicodeDecodeError:
649 except UnicodeDecodeError:
653 c = unichr(0xDC00 + ord(s[pos])).encode('utf-8', _utf8strict)
650 c = unichr(0xDC00 + ord(s[pos])).encode('utf-8', _utf8strict)
654 pos += 1
651 pos += 1
655 r += c
652 r += c
656 return r
653 return r
657
654
658
655
659 def fromutf8b(s):
656 def fromutf8b(s):
660 # type: (bytes) -> bytes
657 # type: (bytes) -> bytes
661 '''Given a UTF-8b string, return a local, possibly-binary string.
658 '''Given a UTF-8b string, return a local, possibly-binary string.
662
659
663 return the original binary string. This
660 return the original binary string. This
664 is a round-trip process for strings like filenames, but metadata
661 is a round-trip process for strings like filenames, but metadata
665 that's was passed through tolocal will remain in UTF-8.
662 that's was passed through tolocal will remain in UTF-8.
666
663
667 >>> roundtrip = lambda x: fromutf8b(toutf8b(x)) == x
664 >>> roundtrip = lambda x: fromutf8b(toutf8b(x)) == x
668 >>> m = b"\\xc3\\xa9\\x99abcd"
665 >>> m = b"\\xc3\\xa9\\x99abcd"
669 >>> toutf8b(m)
666 >>> toutf8b(m)
670 '\\xc3\\xa9\\xed\\xb2\\x99abcd'
667 '\\xc3\\xa9\\xed\\xb2\\x99abcd'
671 >>> roundtrip(m)
668 >>> roundtrip(m)
672 True
669 True
673 >>> roundtrip(b"\\xc2\\xc2\\x80")
670 >>> roundtrip(b"\\xc2\\xc2\\x80")
674 True
671 True
675 >>> roundtrip(b"\\xef\\xbf\\xbd")
672 >>> roundtrip(b"\\xef\\xbf\\xbd")
676 True
673 True
677 >>> roundtrip(b"\\xef\\xef\\xbf\\xbd")
674 >>> roundtrip(b"\\xef\\xef\\xbf\\xbd")
678 True
675 True
679 >>> roundtrip(b"\\xf1\\x80\\x80\\x80\\x80")
676 >>> roundtrip(b"\\xf1\\x80\\x80\\x80\\x80")
680 True
677 True
681 '''
678 '''
682
679
683 if isasciistr(s):
680 if isasciistr(s):
684 return s
681 return s
685 # fast path - look for uDxxx prefixes in s
682 # fast path - look for uDxxx prefixes in s
686 if b"\xed" not in s:
683 if b"\xed" not in s:
687 return s
684 return s
688
685
689 # We could do this with the unicode type but some Python builds
686 # We could do this with the unicode type but some Python builds
690 # use UTF-16 internally (issue5031) which causes non-BMP code
687 # use UTF-16 internally (issue5031) which causes non-BMP code
691 # points to be escaped. Instead, we use our handy getutf8char
688 # points to be escaped. Instead, we use our handy getutf8char
692 # helper again to walk the string without "decoding" it.
689 # helper again to walk the string without "decoding" it.
693
690
694 s = pycompat.bytestr(s)
691 s = pycompat.bytestr(s)
695 r = b""
692 r = b""
696 pos = 0
693 pos = 0
697 l = len(s)
694 l = len(s)
698 while pos < l:
695 while pos < l:
699 c = getutf8char(s, pos)
696 c = getutf8char(s, pos)
700 pos += len(c)
697 pos += len(c)
701 # unescape U+DCxx characters
698 # unescape U+DCxx characters
702 if b"\xed\xb0\x80" <= c <= b"\xed\xb3\xbf":
699 if b"\xed\xb0\x80" <= c <= b"\xed\xb3\xbf":
703 c = pycompat.bytechr(ord(c.decode("utf-8", _utf8strict)) & 0xFF)
700 c = pycompat.bytechr(ord(c.decode("utf-8", _utf8strict)) & 0xFF)
704 r += c
701 r += c
705 return r
702 return r
@@ -1,1084 +1,1084
1 # logcmdutil.py - utility for log-like commands
1 # logcmdutil.py - utility for log-like commands
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import itertools
10 import itertools
11 import os
11 import os
12 import posixpath
12 import posixpath
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 nullid,
16 nullid,
17 wdirid,
17 wdirid,
18 wdirrev,
18 wdirrev,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 dagop,
22 dagop,
23 error,
23 error,
24 formatter,
24 formatter,
25 graphmod,
25 graphmod,
26 match as matchmod,
26 match as matchmod,
27 mdiff,
27 mdiff,
28 patch,
28 patch,
29 pathutil,
29 pathutil,
30 pycompat,
30 pycompat,
31 revset,
31 revset,
32 revsetlang,
32 revsetlang,
33 scmutil,
33 scmutil,
34 smartset,
34 smartset,
35 templatekw,
35 templatekw,
36 templater,
36 templater,
37 util,
37 util,
38 )
38 )
39 from .utils import (
39 from .utils import (
40 dateutil,
40 dateutil,
41 stringutil,
41 stringutil,
42 )
42 )
43
43
44
44
45 if not globals():
45 if pycompat.TYPE_CHECKING:
46 from typing import (
46 from typing import (
47 Any,
47 Any,
48 Tuple,
48 Tuple,
49 )
49 )
50
50
51 for t in (Any, Tuple):
51 for t in (Any, Tuple):
52 assert t
52 assert t
53
53
54
54
55 def getlimit(opts):
55 def getlimit(opts):
56 """get the log limit according to option -l/--limit"""
56 """get the log limit according to option -l/--limit"""
57 limit = opts.get(b'limit')
57 limit = opts.get(b'limit')
58 if limit:
58 if limit:
59 try:
59 try:
60 limit = int(limit)
60 limit = int(limit)
61 except ValueError:
61 except ValueError:
62 raise error.Abort(_(b'limit must be a positive integer'))
62 raise error.Abort(_(b'limit must be a positive integer'))
63 if limit <= 0:
63 if limit <= 0:
64 raise error.Abort(_(b'limit must be positive'))
64 raise error.Abort(_(b'limit must be positive'))
65 else:
65 else:
66 limit = None
66 limit = None
67 return limit
67 return limit
68
68
69
69
70 def diffordiffstat(
70 def diffordiffstat(
71 ui,
71 ui,
72 repo,
72 repo,
73 diffopts,
73 diffopts,
74 node1,
74 node1,
75 node2,
75 node2,
76 match,
76 match,
77 changes=None,
77 changes=None,
78 stat=False,
78 stat=False,
79 fp=None,
79 fp=None,
80 graphwidth=0,
80 graphwidth=0,
81 prefix=b'',
81 prefix=b'',
82 root=b'',
82 root=b'',
83 listsubrepos=False,
83 listsubrepos=False,
84 hunksfilterfn=None,
84 hunksfilterfn=None,
85 ):
85 ):
86 '''show diff or diffstat.'''
86 '''show diff or diffstat.'''
87 ctx1 = repo[node1]
87 ctx1 = repo[node1]
88 ctx2 = repo[node2]
88 ctx2 = repo[node2]
89 if root:
89 if root:
90 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
90 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
91 else:
91 else:
92 relroot = b''
92 relroot = b''
93 copysourcematch = None
93 copysourcematch = None
94
94
95 def compose(f, g):
95 def compose(f, g):
96 return lambda x: f(g(x))
96 return lambda x: f(g(x))
97
97
98 def pathfn(f):
98 def pathfn(f):
99 return posixpath.join(prefix, f)
99 return posixpath.join(prefix, f)
100
100
101 if relroot != b'':
101 if relroot != b'':
102 # XXX relative roots currently don't work if the root is within a
102 # XXX relative roots currently don't work if the root is within a
103 # subrepo
103 # subrepo
104 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
104 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
105 uirelroot = uipathfn(pathfn(relroot))
105 uirelroot = uipathfn(pathfn(relroot))
106 relroot += b'/'
106 relroot += b'/'
107 for matchroot in match.files():
107 for matchroot in match.files():
108 if not matchroot.startswith(relroot):
108 if not matchroot.startswith(relroot):
109 ui.warn(
109 ui.warn(
110 _(b'warning: %s not inside relative root %s\n')
110 _(b'warning: %s not inside relative root %s\n')
111 % (uipathfn(pathfn(matchroot)), uirelroot)
111 % (uipathfn(pathfn(matchroot)), uirelroot)
112 )
112 )
113
113
114 relrootmatch = scmutil.match(ctx2, pats=[relroot], default=b'path')
114 relrootmatch = scmutil.match(ctx2, pats=[relroot], default=b'path')
115 match = matchmod.intersectmatchers(match, relrootmatch)
115 match = matchmod.intersectmatchers(match, relrootmatch)
116 copysourcematch = relrootmatch
116 copysourcematch = relrootmatch
117
117
118 checkroot = repo.ui.configbool(
118 checkroot = repo.ui.configbool(
119 b'devel', b'all-warnings'
119 b'devel', b'all-warnings'
120 ) or repo.ui.configbool(b'devel', b'check-relroot')
120 ) or repo.ui.configbool(b'devel', b'check-relroot')
121
121
122 def relrootpathfn(f):
122 def relrootpathfn(f):
123 if checkroot and not f.startswith(relroot):
123 if checkroot and not f.startswith(relroot):
124 raise AssertionError(
124 raise AssertionError(
125 b"file %s doesn't start with relroot %s" % (f, relroot)
125 b"file %s doesn't start with relroot %s" % (f, relroot)
126 )
126 )
127 return f[len(relroot) :]
127 return f[len(relroot) :]
128
128
129 pathfn = compose(relrootpathfn, pathfn)
129 pathfn = compose(relrootpathfn, pathfn)
130
130
131 if stat:
131 if stat:
132 diffopts = diffopts.copy(context=0, noprefix=False)
132 diffopts = diffopts.copy(context=0, noprefix=False)
133 width = 80
133 width = 80
134 if not ui.plain():
134 if not ui.plain():
135 width = ui.termwidth() - graphwidth
135 width = ui.termwidth() - graphwidth
136 # If an explicit --root was given, don't respect ui.relative-paths
136 # If an explicit --root was given, don't respect ui.relative-paths
137 if not relroot:
137 if not relroot:
138 pathfn = compose(scmutil.getuipathfn(repo), pathfn)
138 pathfn = compose(scmutil.getuipathfn(repo), pathfn)
139
139
140 chunks = ctx2.diff(
140 chunks = ctx2.diff(
141 ctx1,
141 ctx1,
142 match,
142 match,
143 changes,
143 changes,
144 opts=diffopts,
144 opts=diffopts,
145 pathfn=pathfn,
145 pathfn=pathfn,
146 copysourcematch=copysourcematch,
146 copysourcematch=copysourcematch,
147 hunksfilterfn=hunksfilterfn,
147 hunksfilterfn=hunksfilterfn,
148 )
148 )
149
149
150 if fp is not None or ui.canwritewithoutlabels():
150 if fp is not None or ui.canwritewithoutlabels():
151 out = fp or ui
151 out = fp or ui
152 if stat:
152 if stat:
153 chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
153 chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
154 for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
154 for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
155 out.write(chunk)
155 out.write(chunk)
156 else:
156 else:
157 if stat:
157 if stat:
158 chunks = patch.diffstatui(util.iterlines(chunks), width=width)
158 chunks = patch.diffstatui(util.iterlines(chunks), width=width)
159 else:
159 else:
160 chunks = patch.difflabel(
160 chunks = patch.difflabel(
161 lambda chunks, **kwargs: chunks, chunks, opts=diffopts
161 lambda chunks, **kwargs: chunks, chunks, opts=diffopts
162 )
162 )
163 if ui.canbatchlabeledwrites():
163 if ui.canbatchlabeledwrites():
164
164
165 def gen():
165 def gen():
166 for chunk, label in chunks:
166 for chunk, label in chunks:
167 yield ui.label(chunk, label=label)
167 yield ui.label(chunk, label=label)
168
168
169 for chunk in util.filechunkiter(util.chunkbuffer(gen())):
169 for chunk in util.filechunkiter(util.chunkbuffer(gen())):
170 ui.write(chunk)
170 ui.write(chunk)
171 else:
171 else:
172 for chunk, label in chunks:
172 for chunk, label in chunks:
173 ui.write(chunk, label=label)
173 ui.write(chunk, label=label)
174
174
175 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
175 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
176 tempnode2 = node2
176 tempnode2 = node2
177 try:
177 try:
178 if node2 is not None:
178 if node2 is not None:
179 tempnode2 = ctx2.substate[subpath][1]
179 tempnode2 = ctx2.substate[subpath][1]
180 except KeyError:
180 except KeyError:
181 # A subrepo that existed in node1 was deleted between node1 and
181 # A subrepo that existed in node1 was deleted between node1 and
182 # node2 (inclusive). Thus, ctx2's substate won't contain that
182 # node2 (inclusive). Thus, ctx2's substate won't contain that
183 # subpath. The best we can do is to ignore it.
183 # subpath. The best we can do is to ignore it.
184 tempnode2 = None
184 tempnode2 = None
185 submatch = matchmod.subdirmatcher(subpath, match)
185 submatch = matchmod.subdirmatcher(subpath, match)
186 subprefix = repo.wvfs.reljoin(prefix, subpath)
186 subprefix = repo.wvfs.reljoin(prefix, subpath)
187 if listsubrepos or match.exact(subpath) or any(submatch.files()):
187 if listsubrepos or match.exact(subpath) or any(submatch.files()):
188 sub.diff(
188 sub.diff(
189 ui,
189 ui,
190 diffopts,
190 diffopts,
191 tempnode2,
191 tempnode2,
192 submatch,
192 submatch,
193 changes=changes,
193 changes=changes,
194 stat=stat,
194 stat=stat,
195 fp=fp,
195 fp=fp,
196 prefix=subprefix,
196 prefix=subprefix,
197 )
197 )
198
198
199
199
200 class changesetdiffer(object):
200 class changesetdiffer(object):
201 """Generate diff of changeset with pre-configured filtering functions"""
201 """Generate diff of changeset with pre-configured filtering functions"""
202
202
203 def _makefilematcher(self, ctx):
203 def _makefilematcher(self, ctx):
204 return scmutil.matchall(ctx.repo())
204 return scmutil.matchall(ctx.repo())
205
205
206 def _makehunksfilter(self, ctx):
206 def _makehunksfilter(self, ctx):
207 return None
207 return None
208
208
209 def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
209 def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
210 repo = ctx.repo()
210 repo = ctx.repo()
211 node = ctx.node()
211 node = ctx.node()
212 prev = ctx.p1().node()
212 prev = ctx.p1().node()
213 diffordiffstat(
213 diffordiffstat(
214 ui,
214 ui,
215 repo,
215 repo,
216 diffopts,
216 diffopts,
217 prev,
217 prev,
218 node,
218 node,
219 match=self._makefilematcher(ctx),
219 match=self._makefilematcher(ctx),
220 stat=stat,
220 stat=stat,
221 graphwidth=graphwidth,
221 graphwidth=graphwidth,
222 hunksfilterfn=self._makehunksfilter(ctx),
222 hunksfilterfn=self._makehunksfilter(ctx),
223 )
223 )
224
224
225
225
226 def changesetlabels(ctx):
226 def changesetlabels(ctx):
227 labels = [b'log.changeset', b'changeset.%s' % ctx.phasestr()]
227 labels = [b'log.changeset', b'changeset.%s' % ctx.phasestr()]
228 if ctx.obsolete():
228 if ctx.obsolete():
229 labels.append(b'changeset.obsolete')
229 labels.append(b'changeset.obsolete')
230 if ctx.isunstable():
230 if ctx.isunstable():
231 labels.append(b'changeset.unstable')
231 labels.append(b'changeset.unstable')
232 for instability in ctx.instabilities():
232 for instability in ctx.instabilities():
233 labels.append(b'instability.%s' % instability)
233 labels.append(b'instability.%s' % instability)
234 return b' '.join(labels)
234 return b' '.join(labels)
235
235
236
236
237 class changesetprinter(object):
237 class changesetprinter(object):
238 '''show changeset information when templating not requested.'''
238 '''show changeset information when templating not requested.'''
239
239
240 def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
240 def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
241 self.ui = ui
241 self.ui = ui
242 self.repo = repo
242 self.repo = repo
243 self.buffered = buffered
243 self.buffered = buffered
244 self._differ = differ or changesetdiffer()
244 self._differ = differ or changesetdiffer()
245 self._diffopts = patch.diffallopts(ui, diffopts)
245 self._diffopts = patch.diffallopts(ui, diffopts)
246 self._includestat = diffopts and diffopts.get(b'stat')
246 self._includestat = diffopts and diffopts.get(b'stat')
247 self._includediff = diffopts and diffopts.get(b'patch')
247 self._includediff = diffopts and diffopts.get(b'patch')
248 self.header = {}
248 self.header = {}
249 self.hunk = {}
249 self.hunk = {}
250 self.lastheader = None
250 self.lastheader = None
251 self.footer = None
251 self.footer = None
252 self._columns = templatekw.getlogcolumns()
252 self._columns = templatekw.getlogcolumns()
253
253
254 def flush(self, ctx):
254 def flush(self, ctx):
255 rev = ctx.rev()
255 rev = ctx.rev()
256 if rev in self.header:
256 if rev in self.header:
257 h = self.header[rev]
257 h = self.header[rev]
258 if h != self.lastheader:
258 if h != self.lastheader:
259 self.lastheader = h
259 self.lastheader = h
260 self.ui.write(h)
260 self.ui.write(h)
261 del self.header[rev]
261 del self.header[rev]
262 if rev in self.hunk:
262 if rev in self.hunk:
263 self.ui.write(self.hunk[rev])
263 self.ui.write(self.hunk[rev])
264 del self.hunk[rev]
264 del self.hunk[rev]
265
265
266 def close(self):
266 def close(self):
267 if self.footer:
267 if self.footer:
268 self.ui.write(self.footer)
268 self.ui.write(self.footer)
269
269
270 def show(self, ctx, copies=None, **props):
270 def show(self, ctx, copies=None, **props):
271 props = pycompat.byteskwargs(props)
271 props = pycompat.byteskwargs(props)
272 if self.buffered:
272 if self.buffered:
273 self.ui.pushbuffer(labeled=True)
273 self.ui.pushbuffer(labeled=True)
274 self._show(ctx, copies, props)
274 self._show(ctx, copies, props)
275 self.hunk[ctx.rev()] = self.ui.popbuffer()
275 self.hunk[ctx.rev()] = self.ui.popbuffer()
276 else:
276 else:
277 self._show(ctx, copies, props)
277 self._show(ctx, copies, props)
278
278
279 def _show(self, ctx, copies, props):
279 def _show(self, ctx, copies, props):
280 '''show a single changeset or file revision'''
280 '''show a single changeset or file revision'''
281 changenode = ctx.node()
281 changenode = ctx.node()
282 graphwidth = props.get(b'graphwidth', 0)
282 graphwidth = props.get(b'graphwidth', 0)
283
283
284 if self.ui.quiet:
284 if self.ui.quiet:
285 self.ui.write(
285 self.ui.write(
286 b"%s\n" % scmutil.formatchangeid(ctx), label=b'log.node'
286 b"%s\n" % scmutil.formatchangeid(ctx), label=b'log.node'
287 )
287 )
288 return
288 return
289
289
290 columns = self._columns
290 columns = self._columns
291 self.ui.write(
291 self.ui.write(
292 columns[b'changeset'] % scmutil.formatchangeid(ctx),
292 columns[b'changeset'] % scmutil.formatchangeid(ctx),
293 label=changesetlabels(ctx),
293 label=changesetlabels(ctx),
294 )
294 )
295
295
296 # branches are shown first before any other names due to backwards
296 # branches are shown first before any other names due to backwards
297 # compatibility
297 # compatibility
298 branch = ctx.branch()
298 branch = ctx.branch()
299 # don't show the default branch name
299 # don't show the default branch name
300 if branch != b'default':
300 if branch != b'default':
301 self.ui.write(columns[b'branch'] % branch, label=b'log.branch')
301 self.ui.write(columns[b'branch'] % branch, label=b'log.branch')
302
302
303 for nsname, ns in pycompat.iteritems(self.repo.names):
303 for nsname, ns in pycompat.iteritems(self.repo.names):
304 # branches has special logic already handled above, so here we just
304 # branches has special logic already handled above, so here we just
305 # skip it
305 # skip it
306 if nsname == b'branches':
306 if nsname == b'branches':
307 continue
307 continue
308 # we will use the templatename as the color name since those two
308 # we will use the templatename as the color name since those two
309 # should be the same
309 # should be the same
310 for name in ns.names(self.repo, changenode):
310 for name in ns.names(self.repo, changenode):
311 self.ui.write(ns.logfmt % name, label=b'log.%s' % ns.colorname)
311 self.ui.write(ns.logfmt % name, label=b'log.%s' % ns.colorname)
312 if self.ui.debugflag:
312 if self.ui.debugflag:
313 self.ui.write(
313 self.ui.write(
314 columns[b'phase'] % ctx.phasestr(), label=b'log.phase'
314 columns[b'phase'] % ctx.phasestr(), label=b'log.phase'
315 )
315 )
316 for pctx in scmutil.meaningfulparents(self.repo, ctx):
316 for pctx in scmutil.meaningfulparents(self.repo, ctx):
317 label = b'log.parent changeset.%s' % pctx.phasestr()
317 label = b'log.parent changeset.%s' % pctx.phasestr()
318 self.ui.write(
318 self.ui.write(
319 columns[b'parent'] % scmutil.formatchangeid(pctx), label=label
319 columns[b'parent'] % scmutil.formatchangeid(pctx), label=label
320 )
320 )
321
321
322 if self.ui.debugflag:
322 if self.ui.debugflag:
323 mnode = ctx.manifestnode()
323 mnode = ctx.manifestnode()
324 if mnode is None:
324 if mnode is None:
325 mnode = wdirid
325 mnode = wdirid
326 mrev = wdirrev
326 mrev = wdirrev
327 else:
327 else:
328 mrev = self.repo.manifestlog.rev(mnode)
328 mrev = self.repo.manifestlog.rev(mnode)
329 self.ui.write(
329 self.ui.write(
330 columns[b'manifest']
330 columns[b'manifest']
331 % scmutil.formatrevnode(self.ui, mrev, mnode),
331 % scmutil.formatrevnode(self.ui, mrev, mnode),
332 label=b'ui.debug log.manifest',
332 label=b'ui.debug log.manifest',
333 )
333 )
334 self.ui.write(columns[b'user'] % ctx.user(), label=b'log.user')
334 self.ui.write(columns[b'user'] % ctx.user(), label=b'log.user')
335 self.ui.write(
335 self.ui.write(
336 columns[b'date'] % dateutil.datestr(ctx.date()), label=b'log.date'
336 columns[b'date'] % dateutil.datestr(ctx.date()), label=b'log.date'
337 )
337 )
338
338
339 if ctx.isunstable():
339 if ctx.isunstable():
340 instabilities = ctx.instabilities()
340 instabilities = ctx.instabilities()
341 self.ui.write(
341 self.ui.write(
342 columns[b'instability'] % b', '.join(instabilities),
342 columns[b'instability'] % b', '.join(instabilities),
343 label=b'log.instability',
343 label=b'log.instability',
344 )
344 )
345
345
346 elif ctx.obsolete():
346 elif ctx.obsolete():
347 self._showobsfate(ctx)
347 self._showobsfate(ctx)
348
348
349 self._exthook(ctx)
349 self._exthook(ctx)
350
350
351 if self.ui.debugflag:
351 if self.ui.debugflag:
352 files = ctx.p1().status(ctx)
352 files = ctx.p1().status(ctx)
353 for key, value in zip(
353 for key, value in zip(
354 [b'files', b'files+', b'files-'],
354 [b'files', b'files+', b'files-'],
355 [files.modified, files.added, files.removed],
355 [files.modified, files.added, files.removed],
356 ):
356 ):
357 if value:
357 if value:
358 self.ui.write(
358 self.ui.write(
359 columns[key] % b" ".join(value),
359 columns[key] % b" ".join(value),
360 label=b'ui.debug log.files',
360 label=b'ui.debug log.files',
361 )
361 )
362 elif ctx.files() and self.ui.verbose:
362 elif ctx.files() and self.ui.verbose:
363 self.ui.write(
363 self.ui.write(
364 columns[b'files'] % b" ".join(ctx.files()),
364 columns[b'files'] % b" ".join(ctx.files()),
365 label=b'ui.note log.files',
365 label=b'ui.note log.files',
366 )
366 )
367 if copies and self.ui.verbose:
367 if copies and self.ui.verbose:
368 copies = [b'%s (%s)' % c for c in copies]
368 copies = [b'%s (%s)' % c for c in copies]
369 self.ui.write(
369 self.ui.write(
370 columns[b'copies'] % b' '.join(copies),
370 columns[b'copies'] % b' '.join(copies),
371 label=b'ui.note log.copies',
371 label=b'ui.note log.copies',
372 )
372 )
373
373
374 extra = ctx.extra()
374 extra = ctx.extra()
375 if extra and self.ui.debugflag:
375 if extra and self.ui.debugflag:
376 for key, value in sorted(extra.items()):
376 for key, value in sorted(extra.items()):
377 self.ui.write(
377 self.ui.write(
378 columns[b'extra'] % (key, stringutil.escapestr(value)),
378 columns[b'extra'] % (key, stringutil.escapestr(value)),
379 label=b'ui.debug log.extra',
379 label=b'ui.debug log.extra',
380 )
380 )
381
381
382 description = ctx.description().strip()
382 description = ctx.description().strip()
383 if description:
383 if description:
384 if self.ui.verbose:
384 if self.ui.verbose:
385 self.ui.write(
385 self.ui.write(
386 _(b"description:\n"), label=b'ui.note log.description'
386 _(b"description:\n"), label=b'ui.note log.description'
387 )
387 )
388 self.ui.write(description, label=b'ui.note log.description')
388 self.ui.write(description, label=b'ui.note log.description')
389 self.ui.write(b"\n\n")
389 self.ui.write(b"\n\n")
390 else:
390 else:
391 self.ui.write(
391 self.ui.write(
392 columns[b'summary'] % description.splitlines()[0],
392 columns[b'summary'] % description.splitlines()[0],
393 label=b'log.summary',
393 label=b'log.summary',
394 )
394 )
395 self.ui.write(b"\n")
395 self.ui.write(b"\n")
396
396
397 self._showpatch(ctx, graphwidth)
397 self._showpatch(ctx, graphwidth)
398
398
399 def _showobsfate(self, ctx):
399 def _showobsfate(self, ctx):
400 # TODO: do not depend on templater
400 # TODO: do not depend on templater
401 tres = formatter.templateresources(self.repo.ui, self.repo)
401 tres = formatter.templateresources(self.repo.ui, self.repo)
402 t = formatter.maketemplater(
402 t = formatter.maketemplater(
403 self.repo.ui,
403 self.repo.ui,
404 b'{join(obsfate, "\n")}',
404 b'{join(obsfate, "\n")}',
405 defaults=templatekw.keywords,
405 defaults=templatekw.keywords,
406 resources=tres,
406 resources=tres,
407 )
407 )
408 obsfate = t.renderdefault({b'ctx': ctx}).splitlines()
408 obsfate = t.renderdefault({b'ctx': ctx}).splitlines()
409
409
410 if obsfate:
410 if obsfate:
411 for obsfateline in obsfate:
411 for obsfateline in obsfate:
412 self.ui.write(
412 self.ui.write(
413 self._columns[b'obsolete'] % obsfateline,
413 self._columns[b'obsolete'] % obsfateline,
414 label=b'log.obsfate',
414 label=b'log.obsfate',
415 )
415 )
416
416
417 def _exthook(self, ctx):
417 def _exthook(self, ctx):
418 '''empty method used by extension as a hook point
418 '''empty method used by extension as a hook point
419 '''
419 '''
420
420
421 def _showpatch(self, ctx, graphwidth=0):
421 def _showpatch(self, ctx, graphwidth=0):
422 if self._includestat:
422 if self._includestat:
423 self._differ.showdiff(
423 self._differ.showdiff(
424 self.ui, ctx, self._diffopts, graphwidth, stat=True
424 self.ui, ctx, self._diffopts, graphwidth, stat=True
425 )
425 )
426 if self._includestat and self._includediff:
426 if self._includestat and self._includediff:
427 self.ui.write(b"\n")
427 self.ui.write(b"\n")
428 if self._includediff:
428 if self._includediff:
429 self._differ.showdiff(
429 self._differ.showdiff(
430 self.ui, ctx, self._diffopts, graphwidth, stat=False
430 self.ui, ctx, self._diffopts, graphwidth, stat=False
431 )
431 )
432 if self._includestat or self._includediff:
432 if self._includestat or self._includediff:
433 self.ui.write(b"\n")
433 self.ui.write(b"\n")
434
434
435
435
436 class changesetformatter(changesetprinter):
436 class changesetformatter(changesetprinter):
437 """Format changeset information by generic formatter"""
437 """Format changeset information by generic formatter"""
438
438
439 def __init__(
439 def __init__(
440 self, ui, repo, fm, differ=None, diffopts=None, buffered=False
440 self, ui, repo, fm, differ=None, diffopts=None, buffered=False
441 ):
441 ):
442 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
442 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
443 self._diffopts = patch.difffeatureopts(ui, diffopts, git=True)
443 self._diffopts = patch.difffeatureopts(ui, diffopts, git=True)
444 self._fm = fm
444 self._fm = fm
445
445
446 def close(self):
446 def close(self):
447 self._fm.end()
447 self._fm.end()
448
448
449 def _show(self, ctx, copies, props):
449 def _show(self, ctx, copies, props):
450 '''show a single changeset or file revision'''
450 '''show a single changeset or file revision'''
451 fm = self._fm
451 fm = self._fm
452 fm.startitem()
452 fm.startitem()
453 fm.context(ctx=ctx)
453 fm.context(ctx=ctx)
454 fm.data(rev=scmutil.intrev(ctx), node=fm.hexfunc(scmutil.binnode(ctx)))
454 fm.data(rev=scmutil.intrev(ctx), node=fm.hexfunc(scmutil.binnode(ctx)))
455
455
456 datahint = fm.datahint()
456 datahint = fm.datahint()
457 if self.ui.quiet and not datahint:
457 if self.ui.quiet and not datahint:
458 return
458 return
459
459
460 fm.data(
460 fm.data(
461 branch=ctx.branch(),
461 branch=ctx.branch(),
462 phase=ctx.phasestr(),
462 phase=ctx.phasestr(),
463 user=ctx.user(),
463 user=ctx.user(),
464 date=fm.formatdate(ctx.date()),
464 date=fm.formatdate(ctx.date()),
465 desc=ctx.description(),
465 desc=ctx.description(),
466 bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'),
466 bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'),
467 tags=fm.formatlist(ctx.tags(), name=b'tag'),
467 tags=fm.formatlist(ctx.tags(), name=b'tag'),
468 parents=fm.formatlist(
468 parents=fm.formatlist(
469 [fm.hexfunc(c.node()) for c in ctx.parents()], name=b'node'
469 [fm.hexfunc(c.node()) for c in ctx.parents()], name=b'node'
470 ),
470 ),
471 )
471 )
472
472
473 if self.ui.debugflag or b'manifest' in datahint:
473 if self.ui.debugflag or b'manifest' in datahint:
474 fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
474 fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
475 if self.ui.debugflag or b'extra' in datahint:
475 if self.ui.debugflag or b'extra' in datahint:
476 fm.data(extra=fm.formatdict(ctx.extra()))
476 fm.data(extra=fm.formatdict(ctx.extra()))
477
477
478 if (
478 if (
479 self.ui.debugflag
479 self.ui.debugflag
480 or b'modified' in datahint
480 or b'modified' in datahint
481 or b'added' in datahint
481 or b'added' in datahint
482 or b'removed' in datahint
482 or b'removed' in datahint
483 ):
483 ):
484 files = ctx.p1().status(ctx)
484 files = ctx.p1().status(ctx)
485 fm.data(
485 fm.data(
486 modified=fm.formatlist(files.modified, name=b'file'),
486 modified=fm.formatlist(files.modified, name=b'file'),
487 added=fm.formatlist(files.added, name=b'file'),
487 added=fm.formatlist(files.added, name=b'file'),
488 removed=fm.formatlist(files.removed, name=b'file'),
488 removed=fm.formatlist(files.removed, name=b'file'),
489 )
489 )
490
490
491 verbose = not self.ui.debugflag and self.ui.verbose
491 verbose = not self.ui.debugflag and self.ui.verbose
492 if verbose or b'files' in datahint:
492 if verbose or b'files' in datahint:
493 fm.data(files=fm.formatlist(ctx.files(), name=b'file'))
493 fm.data(files=fm.formatlist(ctx.files(), name=b'file'))
494 if verbose and copies or b'copies' in datahint:
494 if verbose and copies or b'copies' in datahint:
495 fm.data(
495 fm.data(
496 copies=fm.formatdict(copies or {}, key=b'name', value=b'source')
496 copies=fm.formatdict(copies or {}, key=b'name', value=b'source')
497 )
497 )
498
498
499 if self._includestat or b'diffstat' in datahint:
499 if self._includestat or b'diffstat' in datahint:
500 self.ui.pushbuffer()
500 self.ui.pushbuffer()
501 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
501 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
502 fm.data(diffstat=self.ui.popbuffer())
502 fm.data(diffstat=self.ui.popbuffer())
503 if self._includediff or b'diff' in datahint:
503 if self._includediff or b'diff' in datahint:
504 self.ui.pushbuffer()
504 self.ui.pushbuffer()
505 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
505 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
506 fm.data(diff=self.ui.popbuffer())
506 fm.data(diff=self.ui.popbuffer())
507
507
508
508
509 class changesettemplater(changesetprinter):
509 class changesettemplater(changesetprinter):
510 '''format changeset information.
510 '''format changeset information.
511
511
512 Note: there are a variety of convenience functions to build a
512 Note: there are a variety of convenience functions to build a
513 changesettemplater for common cases. See functions such as:
513 changesettemplater for common cases. See functions such as:
514 maketemplater, changesetdisplayer, buildcommittemplate, or other
514 maketemplater, changesetdisplayer, buildcommittemplate, or other
515 functions that use changesest_templater.
515 functions that use changesest_templater.
516 '''
516 '''
517
517
518 # Arguments before "buffered" used to be positional. Consider not
518 # Arguments before "buffered" used to be positional. Consider not
519 # adding/removing arguments before "buffered" to not break callers.
519 # adding/removing arguments before "buffered" to not break callers.
520 def __init__(
520 def __init__(
521 self, ui, repo, tmplspec, differ=None, diffopts=None, buffered=False
521 self, ui, repo, tmplspec, differ=None, diffopts=None, buffered=False
522 ):
522 ):
523 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
523 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
524 # tres is shared with _graphnodeformatter()
524 # tres is shared with _graphnodeformatter()
525 self._tresources = tres = formatter.templateresources(ui, repo)
525 self._tresources = tres = formatter.templateresources(ui, repo)
526 self.t = formatter.loadtemplater(
526 self.t = formatter.loadtemplater(
527 ui,
527 ui,
528 tmplspec,
528 tmplspec,
529 defaults=templatekw.keywords,
529 defaults=templatekw.keywords,
530 resources=tres,
530 resources=tres,
531 cache=templatekw.defaulttempl,
531 cache=templatekw.defaulttempl,
532 )
532 )
533 self._counter = itertools.count()
533 self._counter = itertools.count()
534
534
535 self._tref = tmplspec.ref
535 self._tref = tmplspec.ref
536 self._parts = {
536 self._parts = {
537 b'header': b'',
537 b'header': b'',
538 b'footer': b'',
538 b'footer': b'',
539 tmplspec.ref: tmplspec.ref,
539 tmplspec.ref: tmplspec.ref,
540 b'docheader': b'',
540 b'docheader': b'',
541 b'docfooter': b'',
541 b'docfooter': b'',
542 b'separator': b'',
542 b'separator': b'',
543 }
543 }
544 if tmplspec.mapfile:
544 if tmplspec.mapfile:
545 # find correct templates for current mode, for backward
545 # find correct templates for current mode, for backward
546 # compatibility with 'log -v/-q/--debug' using a mapfile
546 # compatibility with 'log -v/-q/--debug' using a mapfile
547 tmplmodes = [
547 tmplmodes = [
548 (True, b''),
548 (True, b''),
549 (self.ui.verbose, b'_verbose'),
549 (self.ui.verbose, b'_verbose'),
550 (self.ui.quiet, b'_quiet'),
550 (self.ui.quiet, b'_quiet'),
551 (self.ui.debugflag, b'_debug'),
551 (self.ui.debugflag, b'_debug'),
552 ]
552 ]
553 for mode, postfix in tmplmodes:
553 for mode, postfix in tmplmodes:
554 for t in self._parts:
554 for t in self._parts:
555 cur = t + postfix
555 cur = t + postfix
556 if mode and cur in self.t:
556 if mode and cur in self.t:
557 self._parts[t] = cur
557 self._parts[t] = cur
558 else:
558 else:
559 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
559 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
560 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
560 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
561 self._parts.update(m)
561 self._parts.update(m)
562
562
563 if self._parts[b'docheader']:
563 if self._parts[b'docheader']:
564 self.ui.write(self.t.render(self._parts[b'docheader'], {}))
564 self.ui.write(self.t.render(self._parts[b'docheader'], {}))
565
565
566 def close(self):
566 def close(self):
567 if self._parts[b'docfooter']:
567 if self._parts[b'docfooter']:
568 if not self.footer:
568 if not self.footer:
569 self.footer = b""
569 self.footer = b""
570 self.footer += self.t.render(self._parts[b'docfooter'], {})
570 self.footer += self.t.render(self._parts[b'docfooter'], {})
571 return super(changesettemplater, self).close()
571 return super(changesettemplater, self).close()
572
572
573 def _show(self, ctx, copies, props):
573 def _show(self, ctx, copies, props):
574 '''show a single changeset or file revision'''
574 '''show a single changeset or file revision'''
575 props = props.copy()
575 props = props.copy()
576 props[b'ctx'] = ctx
576 props[b'ctx'] = ctx
577 props[b'index'] = index = next(self._counter)
577 props[b'index'] = index = next(self._counter)
578 props[b'revcache'] = {b'copies': copies}
578 props[b'revcache'] = {b'copies': copies}
579 graphwidth = props.get(b'graphwidth', 0)
579 graphwidth = props.get(b'graphwidth', 0)
580
580
581 # write separator, which wouldn't work well with the header part below
581 # write separator, which wouldn't work well with the header part below
582 # since there's inherently a conflict between header (across items) and
582 # since there's inherently a conflict between header (across items) and
583 # separator (per item)
583 # separator (per item)
584 if self._parts[b'separator'] and index > 0:
584 if self._parts[b'separator'] and index > 0:
585 self.ui.write(self.t.render(self._parts[b'separator'], {}))
585 self.ui.write(self.t.render(self._parts[b'separator'], {}))
586
586
587 # write header
587 # write header
588 if self._parts[b'header']:
588 if self._parts[b'header']:
589 h = self.t.render(self._parts[b'header'], props)
589 h = self.t.render(self._parts[b'header'], props)
590 if self.buffered:
590 if self.buffered:
591 self.header[ctx.rev()] = h
591 self.header[ctx.rev()] = h
592 else:
592 else:
593 if self.lastheader != h:
593 if self.lastheader != h:
594 self.lastheader = h
594 self.lastheader = h
595 self.ui.write(h)
595 self.ui.write(h)
596
596
597 # write changeset metadata, then patch if requested
597 # write changeset metadata, then patch if requested
598 key = self._parts[self._tref]
598 key = self._parts[self._tref]
599 self.ui.write(self.t.render(key, props))
599 self.ui.write(self.t.render(key, props))
600 self._showpatch(ctx, graphwidth)
600 self._showpatch(ctx, graphwidth)
601
601
602 if self._parts[b'footer']:
602 if self._parts[b'footer']:
603 if not self.footer:
603 if not self.footer:
604 self.footer = self.t.render(self._parts[b'footer'], props)
604 self.footer = self.t.render(self._parts[b'footer'], props)
605
605
606
606
607 def templatespec(tmpl, mapfile):
607 def templatespec(tmpl, mapfile):
608 if pycompat.ispy3:
608 if pycompat.ispy3:
609 assert not isinstance(tmpl, str), b'tmpl must not be a str'
609 assert not isinstance(tmpl, str), b'tmpl must not be a str'
610 if mapfile:
610 if mapfile:
611 return formatter.templatespec(b'changeset', tmpl, mapfile)
611 return formatter.templatespec(b'changeset', tmpl, mapfile)
612 else:
612 else:
613 return formatter.templatespec(b'', tmpl, None)
613 return formatter.templatespec(b'', tmpl, None)
614
614
615
615
616 def _lookuptemplate(ui, tmpl, style):
616 def _lookuptemplate(ui, tmpl, style):
617 """Find the template matching the given template spec or style
617 """Find the template matching the given template spec or style
618
618
619 See formatter.lookuptemplate() for details.
619 See formatter.lookuptemplate() for details.
620 """
620 """
621
621
622 # ui settings
622 # ui settings
623 if not tmpl and not style: # template are stronger than style
623 if not tmpl and not style: # template are stronger than style
624 tmpl = ui.config(b'ui', b'logtemplate')
624 tmpl = ui.config(b'ui', b'logtemplate')
625 if tmpl:
625 if tmpl:
626 return templatespec(templater.unquotestring(tmpl), None)
626 return templatespec(templater.unquotestring(tmpl), None)
627 else:
627 else:
628 style = util.expandpath(ui.config(b'ui', b'style'))
628 style = util.expandpath(ui.config(b'ui', b'style'))
629
629
630 if not tmpl and style:
630 if not tmpl and style:
631 mapfile = style
631 mapfile = style
632 if not os.path.split(mapfile)[0]:
632 if not os.path.split(mapfile)[0]:
633 mapname = templater.templatepath(
633 mapname = templater.templatepath(
634 b'map-cmdline.' + mapfile
634 b'map-cmdline.' + mapfile
635 ) or templater.templatepath(mapfile)
635 ) or templater.templatepath(mapfile)
636 if mapname:
636 if mapname:
637 mapfile = mapname
637 mapfile = mapname
638 return templatespec(None, mapfile)
638 return templatespec(None, mapfile)
639
639
640 return formatter.lookuptemplate(ui, b'changeset', tmpl)
640 return formatter.lookuptemplate(ui, b'changeset', tmpl)
641
641
642
642
643 def maketemplater(ui, repo, tmpl, buffered=False):
643 def maketemplater(ui, repo, tmpl, buffered=False):
644 """Create a changesettemplater from a literal template 'tmpl'
644 """Create a changesettemplater from a literal template 'tmpl'
645 byte-string."""
645 byte-string."""
646 spec = templatespec(tmpl, None)
646 spec = templatespec(tmpl, None)
647 return changesettemplater(ui, repo, spec, buffered=buffered)
647 return changesettemplater(ui, repo, spec, buffered=buffered)
648
648
649
649
650 def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
650 def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
651 """show one changeset using template or regular display.
651 """show one changeset using template or regular display.
652
652
653 Display format will be the first non-empty hit of:
653 Display format will be the first non-empty hit of:
654 1. option 'template'
654 1. option 'template'
655 2. option 'style'
655 2. option 'style'
656 3. [ui] setting 'logtemplate'
656 3. [ui] setting 'logtemplate'
657 4. [ui] setting 'style'
657 4. [ui] setting 'style'
658 If all of these values are either the unset or the empty string,
658 If all of these values are either the unset or the empty string,
659 regular display via changesetprinter() is done.
659 regular display via changesetprinter() is done.
660 """
660 """
661 postargs = (differ, opts, buffered)
661 postargs = (differ, opts, buffered)
662 spec = _lookuptemplate(ui, opts.get(b'template'), opts.get(b'style'))
662 spec = _lookuptemplate(ui, opts.get(b'template'), opts.get(b'style'))
663
663
664 # machine-readable formats have slightly different keyword set than
664 # machine-readable formats have slightly different keyword set than
665 # plain templates, which are handled by changesetformatter.
665 # plain templates, which are handled by changesetformatter.
666 # note that {b'pickle', b'debug'} can also be added to the list if needed.
666 # note that {b'pickle', b'debug'} can also be added to the list if needed.
667 if spec.ref in {b'cbor', b'json'}:
667 if spec.ref in {b'cbor', b'json'}:
668 fm = ui.formatter(b'log', opts)
668 fm = ui.formatter(b'log', opts)
669 return changesetformatter(ui, repo, fm, *postargs)
669 return changesetformatter(ui, repo, fm, *postargs)
670
670
671 if not spec.ref and not spec.tmpl and not spec.mapfile:
671 if not spec.ref and not spec.tmpl and not spec.mapfile:
672 return changesetprinter(ui, repo, *postargs)
672 return changesetprinter(ui, repo, *postargs)
673
673
674 return changesettemplater(ui, repo, spec, *postargs)
674 return changesettemplater(ui, repo, spec, *postargs)
675
675
676
676
677 def _makematcher(repo, revs, pats, opts):
677 def _makematcher(repo, revs, pats, opts):
678 """Build matcher and expanded patterns from log options
678 """Build matcher and expanded patterns from log options
679
679
680 If --follow, revs are the revisions to follow from.
680 If --follow, revs are the revisions to follow from.
681
681
682 Returns (match, pats, slowpath) where
682 Returns (match, pats, slowpath) where
683 - match: a matcher built from the given pats and -I/-X opts
683 - match: a matcher built from the given pats and -I/-X opts
684 - pats: patterns used (globs are expanded on Windows)
684 - pats: patterns used (globs are expanded on Windows)
685 - slowpath: True if patterns aren't as simple as scanning filelogs
685 - slowpath: True if patterns aren't as simple as scanning filelogs
686 """
686 """
687 # pats/include/exclude are passed to match.match() directly in
687 # pats/include/exclude are passed to match.match() directly in
688 # _matchfiles() revset but walkchangerevs() builds its matcher with
688 # _matchfiles() revset but walkchangerevs() builds its matcher with
689 # scmutil.match(). The difference is input pats are globbed on
689 # scmutil.match(). The difference is input pats are globbed on
690 # platforms without shell expansion (windows).
690 # platforms without shell expansion (windows).
691 wctx = repo[None]
691 wctx = repo[None]
692 match, pats = scmutil.matchandpats(wctx, pats, opts)
692 match, pats = scmutil.matchandpats(wctx, pats, opts)
693 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
693 slowpath = match.anypats() or (not match.always() and opts.get(b'removed'))
694 if not slowpath:
694 if not slowpath:
695 follow = opts.get(b'follow') or opts.get(b'follow_first')
695 follow = opts.get(b'follow') or opts.get(b'follow_first')
696 startctxs = []
696 startctxs = []
697 if follow and opts.get(b'rev'):
697 if follow and opts.get(b'rev'):
698 startctxs = [repo[r] for r in revs]
698 startctxs = [repo[r] for r in revs]
699 for f in match.files():
699 for f in match.files():
700 if follow and startctxs:
700 if follow and startctxs:
701 # No idea if the path was a directory at that revision, so
701 # No idea if the path was a directory at that revision, so
702 # take the slow path.
702 # take the slow path.
703 if any(f not in c for c in startctxs):
703 if any(f not in c for c in startctxs):
704 slowpath = True
704 slowpath = True
705 continue
705 continue
706 elif follow and f not in wctx:
706 elif follow and f not in wctx:
707 # If the file exists, it may be a directory, so let it
707 # If the file exists, it may be a directory, so let it
708 # take the slow path.
708 # take the slow path.
709 if os.path.exists(repo.wjoin(f)):
709 if os.path.exists(repo.wjoin(f)):
710 slowpath = True
710 slowpath = True
711 continue
711 continue
712 else:
712 else:
713 raise error.Abort(
713 raise error.Abort(
714 _(
714 _(
715 b'cannot follow file not in parent '
715 b'cannot follow file not in parent '
716 b'revision: "%s"'
716 b'revision: "%s"'
717 )
717 )
718 % f
718 % f
719 )
719 )
720 filelog = repo.file(f)
720 filelog = repo.file(f)
721 if not filelog:
721 if not filelog:
722 # A zero count may be a directory or deleted file, so
722 # A zero count may be a directory or deleted file, so
723 # try to find matching entries on the slow path.
723 # try to find matching entries on the slow path.
724 if follow:
724 if follow:
725 raise error.Abort(
725 raise error.Abort(
726 _(b'cannot follow nonexistent file: "%s"') % f
726 _(b'cannot follow nonexistent file: "%s"') % f
727 )
727 )
728 slowpath = True
728 slowpath = True
729
729
730 # We decided to fall back to the slowpath because at least one
730 # We decided to fall back to the slowpath because at least one
731 # of the paths was not a file. Check to see if at least one of them
731 # of the paths was not a file. Check to see if at least one of them
732 # existed in history - in that case, we'll continue down the
732 # existed in history - in that case, we'll continue down the
733 # slowpath; otherwise, we can turn off the slowpath
733 # slowpath; otherwise, we can turn off the slowpath
734 if slowpath:
734 if slowpath:
735 for path in match.files():
735 for path in match.files():
736 if path == b'.' or path in repo.store:
736 if path == b'.' or path in repo.store:
737 break
737 break
738 else:
738 else:
739 slowpath = False
739 slowpath = False
740
740
741 return match, pats, slowpath
741 return match, pats, slowpath
742
742
743
743
744 def _fileancestors(repo, revs, match, followfirst):
744 def _fileancestors(repo, revs, match, followfirst):
745 fctxs = []
745 fctxs = []
746 for r in revs:
746 for r in revs:
747 ctx = repo[r]
747 ctx = repo[r]
748 fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
748 fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
749
749
750 # When displaying a revision with --patch --follow FILE, we have
750 # When displaying a revision with --patch --follow FILE, we have
751 # to know which file of the revision must be diffed. With
751 # to know which file of the revision must be diffed. With
752 # --follow, we want the names of the ancestors of FILE in the
752 # --follow, we want the names of the ancestors of FILE in the
753 # revision, stored in "fcache". "fcache" is populated as a side effect
753 # revision, stored in "fcache". "fcache" is populated as a side effect
754 # of the graph traversal.
754 # of the graph traversal.
755 fcache = {}
755 fcache = {}
756
756
757 def filematcher(ctx):
757 def filematcher(ctx):
758 return scmutil.matchfiles(repo, fcache.get(ctx.rev(), []))
758 return scmutil.matchfiles(repo, fcache.get(ctx.rev(), []))
759
759
760 def revgen():
760 def revgen():
761 for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
761 for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
762 fcache[rev] = [c.path() for c in cs]
762 fcache[rev] = [c.path() for c in cs]
763 yield rev
763 yield rev
764
764
765 return smartset.generatorset(revgen(), iterasc=False), filematcher
765 return smartset.generatorset(revgen(), iterasc=False), filematcher
766
766
767
767
768 def _makenofollowfilematcher(repo, pats, opts):
768 def _makenofollowfilematcher(repo, pats, opts):
769 '''hook for extensions to override the filematcher for non-follow cases'''
769 '''hook for extensions to override the filematcher for non-follow cases'''
770 return None
770 return None
771
771
772
772
773 _opt2logrevset = {
773 _opt2logrevset = {
774 b'no_merges': (b'not merge()', None),
774 b'no_merges': (b'not merge()', None),
775 b'only_merges': (b'merge()', None),
775 b'only_merges': (b'merge()', None),
776 b'_matchfiles': (None, b'_matchfiles(%ps)'),
776 b'_matchfiles': (None, b'_matchfiles(%ps)'),
777 b'date': (b'date(%s)', None),
777 b'date': (b'date(%s)', None),
778 b'branch': (b'branch(%s)', b'%lr'),
778 b'branch': (b'branch(%s)', b'%lr'),
779 b'_patslog': (b'filelog(%s)', b'%lr'),
779 b'_patslog': (b'filelog(%s)', b'%lr'),
780 b'keyword': (b'keyword(%s)', b'%lr'),
780 b'keyword': (b'keyword(%s)', b'%lr'),
781 b'prune': (b'ancestors(%s)', b'not %lr'),
781 b'prune': (b'ancestors(%s)', b'not %lr'),
782 b'user': (b'user(%s)', b'%lr'),
782 b'user': (b'user(%s)', b'%lr'),
783 }
783 }
784
784
785
785
786 def _makerevset(repo, match, pats, slowpath, opts):
786 def _makerevset(repo, match, pats, slowpath, opts):
787 """Return a revset string built from log options and file patterns"""
787 """Return a revset string built from log options and file patterns"""
788 opts = dict(opts)
788 opts = dict(opts)
789 # follow or not follow?
789 # follow or not follow?
790 follow = opts.get(b'follow') or opts.get(b'follow_first')
790 follow = opts.get(b'follow') or opts.get(b'follow_first')
791
791
792 # branch and only_branch are really aliases and must be handled at
792 # branch and only_branch are really aliases and must be handled at
793 # the same time
793 # the same time
794 opts[b'branch'] = opts.get(b'branch', []) + opts.get(b'only_branch', [])
794 opts[b'branch'] = opts.get(b'branch', []) + opts.get(b'only_branch', [])
795 opts[b'branch'] = [repo.lookupbranch(b) for b in opts[b'branch']]
795 opts[b'branch'] = [repo.lookupbranch(b) for b in opts[b'branch']]
796
796
797 if slowpath:
797 if slowpath:
798 # See walkchangerevs() slow path.
798 # See walkchangerevs() slow path.
799 #
799 #
800 # pats/include/exclude cannot be represented as separate
800 # pats/include/exclude cannot be represented as separate
801 # revset expressions as their filtering logic applies at file
801 # revset expressions as their filtering logic applies at file
802 # level. For instance "-I a -X b" matches a revision touching
802 # level. For instance "-I a -X b" matches a revision touching
803 # "a" and "b" while "file(a) and not file(b)" does
803 # "a" and "b" while "file(a) and not file(b)" does
804 # not. Besides, filesets are evaluated against the working
804 # not. Besides, filesets are evaluated against the working
805 # directory.
805 # directory.
806 matchargs = [b'r:', b'd:relpath']
806 matchargs = [b'r:', b'd:relpath']
807 for p in pats:
807 for p in pats:
808 matchargs.append(b'p:' + p)
808 matchargs.append(b'p:' + p)
809 for p in opts.get(b'include', []):
809 for p in opts.get(b'include', []):
810 matchargs.append(b'i:' + p)
810 matchargs.append(b'i:' + p)
811 for p in opts.get(b'exclude', []):
811 for p in opts.get(b'exclude', []):
812 matchargs.append(b'x:' + p)
812 matchargs.append(b'x:' + p)
813 opts[b'_matchfiles'] = matchargs
813 opts[b'_matchfiles'] = matchargs
814 elif not follow:
814 elif not follow:
815 opts[b'_patslog'] = list(pats)
815 opts[b'_patslog'] = list(pats)
816
816
817 expr = []
817 expr = []
818 for op, val in sorted(pycompat.iteritems(opts)):
818 for op, val in sorted(pycompat.iteritems(opts)):
819 if not val:
819 if not val:
820 continue
820 continue
821 if op not in _opt2logrevset:
821 if op not in _opt2logrevset:
822 continue
822 continue
823 revop, listop = _opt2logrevset[op]
823 revop, listop = _opt2logrevset[op]
824 if revop and b'%' not in revop:
824 if revop and b'%' not in revop:
825 expr.append(revop)
825 expr.append(revop)
826 elif not listop:
826 elif not listop:
827 expr.append(revsetlang.formatspec(revop, val))
827 expr.append(revsetlang.formatspec(revop, val))
828 else:
828 else:
829 if revop:
829 if revop:
830 val = [revsetlang.formatspec(revop, v) for v in val]
830 val = [revsetlang.formatspec(revop, v) for v in val]
831 expr.append(revsetlang.formatspec(listop, val))
831 expr.append(revsetlang.formatspec(listop, val))
832
832
833 if expr:
833 if expr:
834 expr = b'(' + b' and '.join(expr) + b')'
834 expr = b'(' + b' and '.join(expr) + b')'
835 else:
835 else:
836 expr = None
836 expr = None
837 return expr
837 return expr
838
838
839
839
840 def _initialrevs(repo, opts):
840 def _initialrevs(repo, opts):
841 """Return the initial set of revisions to be filtered or followed"""
841 """Return the initial set of revisions to be filtered or followed"""
842 follow = opts.get(b'follow') or opts.get(b'follow_first')
842 follow = opts.get(b'follow') or opts.get(b'follow_first')
843 if opts.get(b'rev'):
843 if opts.get(b'rev'):
844 revs = scmutil.revrange(repo, opts[b'rev'])
844 revs = scmutil.revrange(repo, opts[b'rev'])
845 elif follow and repo.dirstate.p1() == nullid:
845 elif follow and repo.dirstate.p1() == nullid:
846 revs = smartset.baseset()
846 revs = smartset.baseset()
847 elif follow:
847 elif follow:
848 revs = repo.revs(b'.')
848 revs = repo.revs(b'.')
849 else:
849 else:
850 revs = smartset.spanset(repo)
850 revs = smartset.spanset(repo)
851 revs.reverse()
851 revs.reverse()
852 return revs
852 return revs
853
853
854
854
855 def getrevs(repo, pats, opts):
855 def getrevs(repo, pats, opts):
856 # type: (Any, Any, Any) -> Tuple[smartset.BaseSet, changesetdiffer]
856 # type: (Any, Any, Any) -> Tuple[smartset.BaseSet, changesetdiffer]
857 """Return (revs, differ) where revs is a smartset
857 """Return (revs, differ) where revs is a smartset
858
858
859 differ is a changesetdiffer with pre-configured file matcher.
859 differ is a changesetdiffer with pre-configured file matcher.
860 """
860 """
861 follow = opts.get(b'follow') or opts.get(b'follow_first')
861 follow = opts.get(b'follow') or opts.get(b'follow_first')
862 followfirst = opts.get(b'follow_first')
862 followfirst = opts.get(b'follow_first')
863 limit = getlimit(opts)
863 limit = getlimit(opts)
864 revs = _initialrevs(repo, opts)
864 revs = _initialrevs(repo, opts)
865 if not revs:
865 if not revs:
866 return smartset.baseset(), None
866 return smartset.baseset(), None
867 match, pats, slowpath = _makematcher(repo, revs, pats, opts)
867 match, pats, slowpath = _makematcher(repo, revs, pats, opts)
868 filematcher = None
868 filematcher = None
869 if follow:
869 if follow:
870 if slowpath or match.always():
870 if slowpath or match.always():
871 revs = dagop.revancestors(repo, revs, followfirst=followfirst)
871 revs = dagop.revancestors(repo, revs, followfirst=followfirst)
872 else:
872 else:
873 revs, filematcher = _fileancestors(repo, revs, match, followfirst)
873 revs, filematcher = _fileancestors(repo, revs, match, followfirst)
874 revs.reverse()
874 revs.reverse()
875 if filematcher is None:
875 if filematcher is None:
876 filematcher = _makenofollowfilematcher(repo, pats, opts)
876 filematcher = _makenofollowfilematcher(repo, pats, opts)
877 if filematcher is None:
877 if filematcher is None:
878
878
879 def filematcher(ctx):
879 def filematcher(ctx):
880 return match
880 return match
881
881
882 expr = _makerevset(repo, match, pats, slowpath, opts)
882 expr = _makerevset(repo, match, pats, slowpath, opts)
883 if opts.get(b'graph'):
883 if opts.get(b'graph'):
884 # User-specified revs might be unsorted, but don't sort before
884 # User-specified revs might be unsorted, but don't sort before
885 # _makerevset because it might depend on the order of revs
885 # _makerevset because it might depend on the order of revs
886 if repo.ui.configbool(b'experimental', b'log.topo'):
886 if repo.ui.configbool(b'experimental', b'log.topo'):
887 if not revs.istopo():
887 if not revs.istopo():
888 revs = dagop.toposort(revs, repo.changelog.parentrevs)
888 revs = dagop.toposort(revs, repo.changelog.parentrevs)
889 # TODO: try to iterate the set lazily
889 # TODO: try to iterate the set lazily
890 revs = revset.baseset(list(revs), istopo=True)
890 revs = revset.baseset(list(revs), istopo=True)
891 elif not (revs.isdescending() or revs.istopo()):
891 elif not (revs.isdescending() or revs.istopo()):
892 revs.sort(reverse=True)
892 revs.sort(reverse=True)
893 if expr:
893 if expr:
894 matcher = revset.match(None, expr)
894 matcher = revset.match(None, expr)
895 revs = matcher(repo, revs)
895 revs = matcher(repo, revs)
896 if limit is not None:
896 if limit is not None:
897 revs = revs.slice(0, limit)
897 revs = revs.slice(0, limit)
898
898
899 differ = changesetdiffer()
899 differ = changesetdiffer()
900 differ._makefilematcher = filematcher
900 differ._makefilematcher = filematcher
901 return revs, differ
901 return revs, differ
902
902
903
903
904 def _parselinerangeopt(repo, opts):
904 def _parselinerangeopt(repo, opts):
905 """Parse --line-range log option and return a list of tuples (filename,
905 """Parse --line-range log option and return a list of tuples (filename,
906 (fromline, toline)).
906 (fromline, toline)).
907 """
907 """
908 linerangebyfname = []
908 linerangebyfname = []
909 for pat in opts.get(b'line_range', []):
909 for pat in opts.get(b'line_range', []):
910 try:
910 try:
911 pat, linerange = pat.rsplit(b',', 1)
911 pat, linerange = pat.rsplit(b',', 1)
912 except ValueError:
912 except ValueError:
913 raise error.Abort(_(b'malformatted line-range pattern %s') % pat)
913 raise error.Abort(_(b'malformatted line-range pattern %s') % pat)
914 try:
914 try:
915 fromline, toline = map(int, linerange.split(b':'))
915 fromline, toline = map(int, linerange.split(b':'))
916 except ValueError:
916 except ValueError:
917 raise error.Abort(_(b"invalid line range for %s") % pat)
917 raise error.Abort(_(b"invalid line range for %s") % pat)
918 msg = _(b"line range pattern '%s' must match exactly one file") % pat
918 msg = _(b"line range pattern '%s' must match exactly one file") % pat
919 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
919 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
920 linerangebyfname.append(
920 linerangebyfname.append(
921 (fname, util.processlinerange(fromline, toline))
921 (fname, util.processlinerange(fromline, toline))
922 )
922 )
923 return linerangebyfname
923 return linerangebyfname
924
924
925
925
926 def getlinerangerevs(repo, userrevs, opts):
926 def getlinerangerevs(repo, userrevs, opts):
927 """Return (revs, differ).
927 """Return (revs, differ).
928
928
929 "revs" are revisions obtained by processing "line-range" log options and
929 "revs" are revisions obtained by processing "line-range" log options and
930 walking block ancestors of each specified file/line-range.
930 walking block ancestors of each specified file/line-range.
931
931
932 "differ" is a changesetdiffer with pre-configured file matcher and hunks
932 "differ" is a changesetdiffer with pre-configured file matcher and hunks
933 filter.
933 filter.
934 """
934 """
935 wctx = repo[None]
935 wctx = repo[None]
936
936
937 # Two-levels map of "rev -> file ctx -> [line range]".
937 # Two-levels map of "rev -> file ctx -> [line range]".
938 linerangesbyrev = {}
938 linerangesbyrev = {}
939 for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
939 for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
940 if fname not in wctx:
940 if fname not in wctx:
941 raise error.Abort(
941 raise error.Abort(
942 _(b'cannot follow file not in parent revision: "%s"') % fname
942 _(b'cannot follow file not in parent revision: "%s"') % fname
943 )
943 )
944 fctx = wctx.filectx(fname)
944 fctx = wctx.filectx(fname)
945 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
945 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
946 rev = fctx.introrev()
946 rev = fctx.introrev()
947 if rev not in userrevs:
947 if rev not in userrevs:
948 continue
948 continue
949 linerangesbyrev.setdefault(rev, {}).setdefault(
949 linerangesbyrev.setdefault(rev, {}).setdefault(
950 fctx.path(), []
950 fctx.path(), []
951 ).append(linerange)
951 ).append(linerange)
952
952
953 def nofilterhunksfn(fctx, hunks):
953 def nofilterhunksfn(fctx, hunks):
954 return hunks
954 return hunks
955
955
956 def hunksfilter(ctx):
956 def hunksfilter(ctx):
957 fctxlineranges = linerangesbyrev.get(ctx.rev())
957 fctxlineranges = linerangesbyrev.get(ctx.rev())
958 if fctxlineranges is None:
958 if fctxlineranges is None:
959 return nofilterhunksfn
959 return nofilterhunksfn
960
960
961 def filterfn(fctx, hunks):
961 def filterfn(fctx, hunks):
962 lineranges = fctxlineranges.get(fctx.path())
962 lineranges = fctxlineranges.get(fctx.path())
963 if lineranges is not None:
963 if lineranges is not None:
964 for hr, lines in hunks:
964 for hr, lines in hunks:
965 if hr is None: # binary
965 if hr is None: # binary
966 yield hr, lines
966 yield hr, lines
967 continue
967 continue
968 if any(mdiff.hunkinrange(hr[2:], lr) for lr in lineranges):
968 if any(mdiff.hunkinrange(hr[2:], lr) for lr in lineranges):
969 yield hr, lines
969 yield hr, lines
970 else:
970 else:
971 for hunk in hunks:
971 for hunk in hunks:
972 yield hunk
972 yield hunk
973
973
974 return filterfn
974 return filterfn
975
975
976 def filematcher(ctx):
976 def filematcher(ctx):
977 files = list(linerangesbyrev.get(ctx.rev(), []))
977 files = list(linerangesbyrev.get(ctx.rev(), []))
978 return scmutil.matchfiles(repo, files)
978 return scmutil.matchfiles(repo, files)
979
979
980 revs = sorted(linerangesbyrev, reverse=True)
980 revs = sorted(linerangesbyrev, reverse=True)
981
981
982 differ = changesetdiffer()
982 differ = changesetdiffer()
983 differ._makefilematcher = filematcher
983 differ._makefilematcher = filematcher
984 differ._makehunksfilter = hunksfilter
984 differ._makehunksfilter = hunksfilter
985 return smartset.baseset(revs), differ
985 return smartset.baseset(revs), differ
986
986
987
987
988 def _graphnodeformatter(ui, displayer):
988 def _graphnodeformatter(ui, displayer):
989 spec = ui.config(b'ui', b'graphnodetemplate')
989 spec = ui.config(b'ui', b'graphnodetemplate')
990 if not spec:
990 if not spec:
991 return templatekw.getgraphnode # fast path for "{graphnode}"
991 return templatekw.getgraphnode # fast path for "{graphnode}"
992
992
993 spec = templater.unquotestring(spec)
993 spec = templater.unquotestring(spec)
994 if isinstance(displayer, changesettemplater):
994 if isinstance(displayer, changesettemplater):
995 # reuse cache of slow templates
995 # reuse cache of slow templates
996 tres = displayer._tresources
996 tres = displayer._tresources
997 else:
997 else:
998 tres = formatter.templateresources(ui)
998 tres = formatter.templateresources(ui)
999 templ = formatter.maketemplater(
999 templ = formatter.maketemplater(
1000 ui, spec, defaults=templatekw.keywords, resources=tres
1000 ui, spec, defaults=templatekw.keywords, resources=tres
1001 )
1001 )
1002
1002
1003 def formatnode(repo, ctx):
1003 def formatnode(repo, ctx):
1004 props = {b'ctx': ctx, b'repo': repo}
1004 props = {b'ctx': ctx, b'repo': repo}
1005 return templ.renderdefault(props)
1005 return templ.renderdefault(props)
1006
1006
1007 return formatnode
1007 return formatnode
1008
1008
1009
1009
1010 def displaygraph(ui, repo, dag, displayer, edgefn, getcopies=None, props=None):
1010 def displaygraph(ui, repo, dag, displayer, edgefn, getcopies=None, props=None):
1011 props = props or {}
1011 props = props or {}
1012 formatnode = _graphnodeformatter(ui, displayer)
1012 formatnode = _graphnodeformatter(ui, displayer)
1013 state = graphmod.asciistate()
1013 state = graphmod.asciistate()
1014 styles = state[b'styles']
1014 styles = state[b'styles']
1015
1015
1016 # only set graph styling if HGPLAIN is not set.
1016 # only set graph styling if HGPLAIN is not set.
1017 if ui.plain(b'graph'):
1017 if ui.plain(b'graph'):
1018 # set all edge styles to |, the default pre-3.8 behaviour
1018 # set all edge styles to |, the default pre-3.8 behaviour
1019 styles.update(dict.fromkeys(styles, b'|'))
1019 styles.update(dict.fromkeys(styles, b'|'))
1020 else:
1020 else:
1021 edgetypes = {
1021 edgetypes = {
1022 b'parent': graphmod.PARENT,
1022 b'parent': graphmod.PARENT,
1023 b'grandparent': graphmod.GRANDPARENT,
1023 b'grandparent': graphmod.GRANDPARENT,
1024 b'missing': graphmod.MISSINGPARENT,
1024 b'missing': graphmod.MISSINGPARENT,
1025 }
1025 }
1026 for name, key in edgetypes.items():
1026 for name, key in edgetypes.items():
1027 # experimental config: experimental.graphstyle.*
1027 # experimental config: experimental.graphstyle.*
1028 styles[key] = ui.config(
1028 styles[key] = ui.config(
1029 b'experimental', b'graphstyle.%s' % name, styles[key]
1029 b'experimental', b'graphstyle.%s' % name, styles[key]
1030 )
1030 )
1031 if not styles[key]:
1031 if not styles[key]:
1032 styles[key] = None
1032 styles[key] = None
1033
1033
1034 # experimental config: experimental.graphshorten
1034 # experimental config: experimental.graphshorten
1035 state[b'graphshorten'] = ui.configbool(b'experimental', b'graphshorten')
1035 state[b'graphshorten'] = ui.configbool(b'experimental', b'graphshorten')
1036
1036
1037 for rev, type, ctx, parents in dag:
1037 for rev, type, ctx, parents in dag:
1038 char = formatnode(repo, ctx)
1038 char = formatnode(repo, ctx)
1039 copies = getcopies(ctx) if getcopies else None
1039 copies = getcopies(ctx) if getcopies else None
1040 edges = edgefn(type, char, state, rev, parents)
1040 edges = edgefn(type, char, state, rev, parents)
1041 firstedge = next(edges)
1041 firstedge = next(edges)
1042 width = firstedge[2]
1042 width = firstedge[2]
1043 displayer.show(
1043 displayer.show(
1044 ctx, copies=copies, graphwidth=width, **pycompat.strkwargs(props)
1044 ctx, copies=copies, graphwidth=width, **pycompat.strkwargs(props)
1045 )
1045 )
1046 lines = displayer.hunk.pop(rev).split(b'\n')
1046 lines = displayer.hunk.pop(rev).split(b'\n')
1047 if not lines[-1]:
1047 if not lines[-1]:
1048 del lines[-1]
1048 del lines[-1]
1049 displayer.flush(ctx)
1049 displayer.flush(ctx)
1050 for type, char, width, coldata in itertools.chain([firstedge], edges):
1050 for type, char, width, coldata in itertools.chain([firstedge], edges):
1051 graphmod.ascii(ui, state, type, char, lines, coldata)
1051 graphmod.ascii(ui, state, type, char, lines, coldata)
1052 lines = []
1052 lines = []
1053 displayer.close()
1053 displayer.close()
1054
1054
1055
1055
1056 def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
1056 def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
1057 revdag = graphmod.dagwalker(repo, revs)
1057 revdag = graphmod.dagwalker(repo, revs)
1058 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
1058 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
1059
1059
1060
1060
1061 def displayrevs(ui, repo, revs, displayer, getcopies):
1061 def displayrevs(ui, repo, revs, displayer, getcopies):
1062 for rev in revs:
1062 for rev in revs:
1063 ctx = repo[rev]
1063 ctx = repo[rev]
1064 copies = getcopies(ctx) if getcopies else None
1064 copies = getcopies(ctx) if getcopies else None
1065 displayer.show(ctx, copies=copies)
1065 displayer.show(ctx, copies=copies)
1066 displayer.flush(ctx)
1066 displayer.flush(ctx)
1067 displayer.close()
1067 displayer.close()
1068
1068
1069
1069
1070 def checkunsupportedgraphflags(pats, opts):
1070 def checkunsupportedgraphflags(pats, opts):
1071 for op in [b"newest_first"]:
1071 for op in [b"newest_first"]:
1072 if op in opts and opts[op]:
1072 if op in opts and opts[op]:
1073 raise error.Abort(
1073 raise error.Abort(
1074 _(b"-G/--graph option is incompatible with --%s")
1074 _(b"-G/--graph option is incompatible with --%s")
1075 % op.replace(b"_", b"-")
1075 % op.replace(b"_", b"-")
1076 )
1076 )
1077
1077
1078
1078
1079 def graphrevs(repo, nodes, opts):
1079 def graphrevs(repo, nodes, opts):
1080 limit = getlimit(opts)
1080 limit = getlimit(opts)
1081 nodes.reverse()
1081 nodes.reverse()
1082 if limit is not None:
1082 if limit is not None:
1083 nodes = nodes[:limit]
1083 nodes = nodes[:limit]
1084 return graphmod.nodes(repo, nodes)
1084 return graphmod.nodes(repo, nodes)
@@ -1,517 +1,517
1 # mail.py - mail sending bits for mercurial
1 # mail.py - mail sending bits for mercurial
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import email
10 import email
11 import email.charset
11 import email.charset
12 import email.generator
12 import email.generator
13 import email.header
13 import email.header
14 import email.message
14 import email.message
15 import email.parser
15 import email.parser
16 import io
16 import io
17 import os
17 import os
18 import smtplib
18 import smtplib
19 import socket
19 import socket
20 import time
20 import time
21
21
22 from .i18n import _
22 from .i18n import _
23 from .pycompat import (
23 from .pycompat import (
24 getattr,
24 getattr,
25 open,
25 open,
26 )
26 )
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 pycompat,
30 pycompat,
31 sslutil,
31 sslutil,
32 util,
32 util,
33 )
33 )
34 from .utils import (
34 from .utils import (
35 procutil,
35 procutil,
36 stringutil,
36 stringutil,
37 )
37 )
38
38
39 if not globals(): # hide this from non-pytype users
39 if pycompat.TYPE_CHECKING:
40 from typing import Any, List, Tuple, Union
40 from typing import Any, List, Tuple, Union
41
41
42 # keep pyflakes happy
42 # keep pyflakes happy
43 assert all((Any, List, Tuple, Union))
43 assert all((Any, List, Tuple, Union))
44
44
45
45
46 class STARTTLS(smtplib.SMTP):
46 class STARTTLS(smtplib.SMTP):
47 '''Derived class to verify the peer certificate for STARTTLS.
47 '''Derived class to verify the peer certificate for STARTTLS.
48
48
49 This class allows to pass any keyword arguments to SSL socket creation.
49 This class allows to pass any keyword arguments to SSL socket creation.
50 '''
50 '''
51
51
52 def __init__(self, ui, host=None, **kwargs):
52 def __init__(self, ui, host=None, **kwargs):
53 smtplib.SMTP.__init__(self, **kwargs)
53 smtplib.SMTP.__init__(self, **kwargs)
54 self._ui = ui
54 self._ui = ui
55 self._host = host
55 self._host = host
56
56
57 def starttls(self, keyfile=None, certfile=None):
57 def starttls(self, keyfile=None, certfile=None):
58 if not self.has_extn("starttls"):
58 if not self.has_extn("starttls"):
59 msg = b"STARTTLS extension not supported by server"
59 msg = b"STARTTLS extension not supported by server"
60 raise smtplib.SMTPException(msg)
60 raise smtplib.SMTPException(msg)
61 (resp, reply) = self.docmd("STARTTLS")
61 (resp, reply) = self.docmd("STARTTLS")
62 if resp == 220:
62 if resp == 220:
63 self.sock = sslutil.wrapsocket(
63 self.sock = sslutil.wrapsocket(
64 self.sock,
64 self.sock,
65 keyfile,
65 keyfile,
66 certfile,
66 certfile,
67 ui=self._ui,
67 ui=self._ui,
68 serverhostname=self._host,
68 serverhostname=self._host,
69 )
69 )
70 self.file = self.sock.makefile("rb")
70 self.file = self.sock.makefile("rb")
71 self.helo_resp = None
71 self.helo_resp = None
72 self.ehlo_resp = None
72 self.ehlo_resp = None
73 self.esmtp_features = {}
73 self.esmtp_features = {}
74 self.does_esmtp = 0
74 self.does_esmtp = 0
75 return (resp, reply)
75 return (resp, reply)
76
76
77
77
78 class SMTPS(smtplib.SMTP):
78 class SMTPS(smtplib.SMTP):
79 '''Derived class to verify the peer certificate for SMTPS.
79 '''Derived class to verify the peer certificate for SMTPS.
80
80
81 This class allows to pass any keyword arguments to SSL socket creation.
81 This class allows to pass any keyword arguments to SSL socket creation.
82 '''
82 '''
83
83
84 def __init__(self, ui, keyfile=None, certfile=None, host=None, **kwargs):
84 def __init__(self, ui, keyfile=None, certfile=None, host=None, **kwargs):
85 self.keyfile = keyfile
85 self.keyfile = keyfile
86 self.certfile = certfile
86 self.certfile = certfile
87 smtplib.SMTP.__init__(self, **kwargs)
87 smtplib.SMTP.__init__(self, **kwargs)
88 self._host = host
88 self._host = host
89 self.default_port = smtplib.SMTP_SSL_PORT
89 self.default_port = smtplib.SMTP_SSL_PORT
90 self._ui = ui
90 self._ui = ui
91
91
92 def _get_socket(self, host, port, timeout):
92 def _get_socket(self, host, port, timeout):
93 if self.debuglevel > 0:
93 if self.debuglevel > 0:
94 self._ui.debug(b'connect: %r\n' % ((host, port),))
94 self._ui.debug(b'connect: %r\n' % ((host, port),))
95 new_socket = socket.create_connection((host, port), timeout)
95 new_socket = socket.create_connection((host, port), timeout)
96 new_socket = sslutil.wrapsocket(
96 new_socket = sslutil.wrapsocket(
97 new_socket,
97 new_socket,
98 self.keyfile,
98 self.keyfile,
99 self.certfile,
99 self.certfile,
100 ui=self._ui,
100 ui=self._ui,
101 serverhostname=self._host,
101 serverhostname=self._host,
102 )
102 )
103 self.file = new_socket.makefile('rb')
103 self.file = new_socket.makefile('rb')
104 return new_socket
104 return new_socket
105
105
106
106
107 def _pyhastls():
107 def _pyhastls():
108 # type: () -> bool
108 # type: () -> bool
109 """Returns true iff Python has TLS support, false otherwise."""
109 """Returns true iff Python has TLS support, false otherwise."""
110 try:
110 try:
111 import ssl
111 import ssl
112
112
113 getattr(ssl, 'HAS_TLS', False)
113 getattr(ssl, 'HAS_TLS', False)
114 return True
114 return True
115 except ImportError:
115 except ImportError:
116 return False
116 return False
117
117
118
118
119 def _smtp(ui):
119 def _smtp(ui):
120 '''build an smtp connection and return a function to send mail'''
120 '''build an smtp connection and return a function to send mail'''
121 local_hostname = ui.config(b'smtp', b'local_hostname')
121 local_hostname = ui.config(b'smtp', b'local_hostname')
122 tls = ui.config(b'smtp', b'tls')
122 tls = ui.config(b'smtp', b'tls')
123 # backward compatible: when tls = true, we use starttls.
123 # backward compatible: when tls = true, we use starttls.
124 starttls = tls == b'starttls' or stringutil.parsebool(tls)
124 starttls = tls == b'starttls' or stringutil.parsebool(tls)
125 smtps = tls == b'smtps'
125 smtps = tls == b'smtps'
126 if (starttls or smtps) and not _pyhastls():
126 if (starttls or smtps) and not _pyhastls():
127 raise error.Abort(_(b"can't use TLS: Python SSL support not installed"))
127 raise error.Abort(_(b"can't use TLS: Python SSL support not installed"))
128 mailhost = ui.config(b'smtp', b'host')
128 mailhost = ui.config(b'smtp', b'host')
129 if not mailhost:
129 if not mailhost:
130 raise error.Abort(_(b'smtp.host not configured - cannot send mail'))
130 raise error.Abort(_(b'smtp.host not configured - cannot send mail'))
131 if smtps:
131 if smtps:
132 ui.note(_(b'(using smtps)\n'))
132 ui.note(_(b'(using smtps)\n'))
133 s = SMTPS(ui, local_hostname=local_hostname, host=mailhost)
133 s = SMTPS(ui, local_hostname=local_hostname, host=mailhost)
134 elif starttls:
134 elif starttls:
135 s = STARTTLS(ui, local_hostname=local_hostname, host=mailhost)
135 s = STARTTLS(ui, local_hostname=local_hostname, host=mailhost)
136 else:
136 else:
137 s = smtplib.SMTP(local_hostname=local_hostname)
137 s = smtplib.SMTP(local_hostname=local_hostname)
138 if smtps:
138 if smtps:
139 defaultport = 465
139 defaultport = 465
140 else:
140 else:
141 defaultport = 25
141 defaultport = 25
142 mailport = util.getport(ui.config(b'smtp', b'port', defaultport))
142 mailport = util.getport(ui.config(b'smtp', b'port', defaultport))
143 ui.note(_(b'sending mail: smtp host %s, port %d\n') % (mailhost, mailport))
143 ui.note(_(b'sending mail: smtp host %s, port %d\n') % (mailhost, mailport))
144 s.connect(host=mailhost, port=mailport)
144 s.connect(host=mailhost, port=mailport)
145 if starttls:
145 if starttls:
146 ui.note(_(b'(using starttls)\n'))
146 ui.note(_(b'(using starttls)\n'))
147 s.ehlo()
147 s.ehlo()
148 s.starttls()
148 s.starttls()
149 s.ehlo()
149 s.ehlo()
150 if starttls or smtps:
150 if starttls or smtps:
151 ui.note(_(b'(verifying remote certificate)\n'))
151 ui.note(_(b'(verifying remote certificate)\n'))
152 sslutil.validatesocket(s.sock)
152 sslutil.validatesocket(s.sock)
153 username = ui.config(b'smtp', b'username')
153 username = ui.config(b'smtp', b'username')
154 password = ui.config(b'smtp', b'password')
154 password = ui.config(b'smtp', b'password')
155 if username:
155 if username:
156 if password:
156 if password:
157 password = encoding.strfromlocal(password)
157 password = encoding.strfromlocal(password)
158 else:
158 else:
159 password = ui.getpass()
159 password = ui.getpass()
160 if username and password:
160 if username and password:
161 ui.note(_(b'(authenticating to mail server as %s)\n') % username)
161 ui.note(_(b'(authenticating to mail server as %s)\n') % username)
162 username = encoding.strfromlocal(username)
162 username = encoding.strfromlocal(username)
163 try:
163 try:
164 s.login(username, password)
164 s.login(username, password)
165 except smtplib.SMTPException as inst:
165 except smtplib.SMTPException as inst:
166 raise error.Abort(inst)
166 raise error.Abort(inst)
167
167
168 def send(sender, recipients, msg):
168 def send(sender, recipients, msg):
169 try:
169 try:
170 return s.sendmail(sender, recipients, msg)
170 return s.sendmail(sender, recipients, msg)
171 except smtplib.SMTPRecipientsRefused as inst:
171 except smtplib.SMTPRecipientsRefused as inst:
172 recipients = [r[1] for r in inst.recipients.values()]
172 recipients = [r[1] for r in inst.recipients.values()]
173 raise error.Abort(b'\n' + b'\n'.join(recipients))
173 raise error.Abort(b'\n' + b'\n'.join(recipients))
174 except smtplib.SMTPException as inst:
174 except smtplib.SMTPException as inst:
175 raise error.Abort(inst)
175 raise error.Abort(inst)
176
176
177 return send
177 return send
178
178
179
179
180 def _sendmail(ui, sender, recipients, msg):
180 def _sendmail(ui, sender, recipients, msg):
181 '''send mail using sendmail.'''
181 '''send mail using sendmail.'''
182 program = ui.config(b'email', b'method')
182 program = ui.config(b'email', b'method')
183
183
184 def stremail(x):
184 def stremail(x):
185 return procutil.shellquote(stringutil.email(encoding.strtolocal(x)))
185 return procutil.shellquote(stringutil.email(encoding.strtolocal(x)))
186
186
187 cmdline = b'%s -f %s %s' % (
187 cmdline = b'%s -f %s %s' % (
188 program,
188 program,
189 stremail(sender),
189 stremail(sender),
190 b' '.join(map(stremail, recipients)),
190 b' '.join(map(stremail, recipients)),
191 )
191 )
192 ui.note(_(b'sending mail: %s\n') % cmdline)
192 ui.note(_(b'sending mail: %s\n') % cmdline)
193 fp = procutil.popen(cmdline, b'wb')
193 fp = procutil.popen(cmdline, b'wb')
194 fp.write(util.tonativeeol(msg))
194 fp.write(util.tonativeeol(msg))
195 ret = fp.close()
195 ret = fp.close()
196 if ret:
196 if ret:
197 raise error.Abort(
197 raise error.Abort(
198 b'%s %s'
198 b'%s %s'
199 % (
199 % (
200 os.path.basename(program.split(None, 1)[0]),
200 os.path.basename(program.split(None, 1)[0]),
201 procutil.explainexit(ret),
201 procutil.explainexit(ret),
202 )
202 )
203 )
203 )
204
204
205
205
206 def _mbox(mbox, sender, recipients, msg):
206 def _mbox(mbox, sender, recipients, msg):
207 '''write mails to mbox'''
207 '''write mails to mbox'''
208 fp = open(mbox, b'ab+')
208 fp = open(mbox, b'ab+')
209 # Should be time.asctime(), but Windows prints 2-characters day
209 # Should be time.asctime(), but Windows prints 2-characters day
210 # of month instead of one. Make them print the same thing.
210 # of month instead of one. Make them print the same thing.
211 date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
211 date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
212 fp.write(
212 fp.write(
213 b'From %s %s\n'
213 b'From %s %s\n'
214 % (encoding.strtolocal(sender), encoding.strtolocal(date))
214 % (encoding.strtolocal(sender), encoding.strtolocal(date))
215 )
215 )
216 fp.write(msg)
216 fp.write(msg)
217 fp.write(b'\n\n')
217 fp.write(b'\n\n')
218 fp.close()
218 fp.close()
219
219
220
220
221 def connect(ui, mbox=None):
221 def connect(ui, mbox=None):
222 '''make a mail connection. return a function to send mail.
222 '''make a mail connection. return a function to send mail.
223 call as sendmail(sender, list-of-recipients, msg).'''
223 call as sendmail(sender, list-of-recipients, msg).'''
224 if mbox:
224 if mbox:
225 open(mbox, b'wb').close()
225 open(mbox, b'wb').close()
226 return lambda s, r, m: _mbox(mbox, s, r, m)
226 return lambda s, r, m: _mbox(mbox, s, r, m)
227 if ui.config(b'email', b'method') == b'smtp':
227 if ui.config(b'email', b'method') == b'smtp':
228 return _smtp(ui)
228 return _smtp(ui)
229 return lambda s, r, m: _sendmail(ui, s, r, m)
229 return lambda s, r, m: _sendmail(ui, s, r, m)
230
230
231
231
232 def sendmail(ui, sender, recipients, msg, mbox=None):
232 def sendmail(ui, sender, recipients, msg, mbox=None):
233 send = connect(ui, mbox=mbox)
233 send = connect(ui, mbox=mbox)
234 return send(sender, recipients, msg)
234 return send(sender, recipients, msg)
235
235
236
236
237 def validateconfig(ui):
237 def validateconfig(ui):
238 '''determine if we have enough config data to try sending email.'''
238 '''determine if we have enough config data to try sending email.'''
239 method = ui.config(b'email', b'method')
239 method = ui.config(b'email', b'method')
240 if method == b'smtp':
240 if method == b'smtp':
241 if not ui.config(b'smtp', b'host'):
241 if not ui.config(b'smtp', b'host'):
242 raise error.Abort(
242 raise error.Abort(
243 _(
243 _(
244 b'smtp specified as email transport, '
244 b'smtp specified as email transport, '
245 b'but no smtp host configured'
245 b'but no smtp host configured'
246 )
246 )
247 )
247 )
248 else:
248 else:
249 if not procutil.findexe(method):
249 if not procutil.findexe(method):
250 raise error.Abort(
250 raise error.Abort(
251 _(b'%r specified as email transport, but not in PATH') % method
251 _(b'%r specified as email transport, but not in PATH') % method
252 )
252 )
253
253
254
254
255 def codec2iana(cs):
255 def codec2iana(cs):
256 # type: (str) -> str
256 # type: (str) -> str
257 ''''''
257 ''''''
258 cs = email.charset.Charset(cs).input_charset.lower()
258 cs = email.charset.Charset(cs).input_charset.lower()
259
259
260 # "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1"
260 # "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1"
261 if cs.startswith("iso") and not cs.startswith("iso-"):
261 if cs.startswith("iso") and not cs.startswith("iso-"):
262 return "iso-" + cs[3:]
262 return "iso-" + cs[3:]
263 return cs
263 return cs
264
264
265
265
266 def mimetextpatch(s, subtype='plain', display=False):
266 def mimetextpatch(s, subtype='plain', display=False):
267 # type: (bytes, str, bool) -> email.message.Message
267 # type: (bytes, str, bool) -> email.message.Message
268 '''Return MIME message suitable for a patch.
268 '''Return MIME message suitable for a patch.
269 Charset will be detected by first trying to decode as us-ascii, then utf-8,
269 Charset will be detected by first trying to decode as us-ascii, then utf-8,
270 and finally the global encodings. If all those fail, fall back to
270 and finally the global encodings. If all those fail, fall back to
271 ISO-8859-1, an encoding with that allows all byte sequences.
271 ISO-8859-1, an encoding with that allows all byte sequences.
272 Transfer encodings will be used if necessary.'''
272 Transfer encodings will be used if necessary.'''
273
273
274 cs = [
274 cs = [
275 'us-ascii',
275 'us-ascii',
276 'utf-8',
276 'utf-8',
277 pycompat.sysstr(encoding.encoding),
277 pycompat.sysstr(encoding.encoding),
278 pycompat.sysstr(encoding.fallbackencoding),
278 pycompat.sysstr(encoding.fallbackencoding),
279 ]
279 ]
280 if display:
280 if display:
281 cs = ['us-ascii']
281 cs = ['us-ascii']
282 for charset in cs:
282 for charset in cs:
283 try:
283 try:
284 s.decode(charset)
284 s.decode(charset)
285 return mimetextqp(s, subtype, codec2iana(charset))
285 return mimetextqp(s, subtype, codec2iana(charset))
286 except UnicodeDecodeError:
286 except UnicodeDecodeError:
287 pass
287 pass
288
288
289 return mimetextqp(s, subtype, "iso-8859-1")
289 return mimetextqp(s, subtype, "iso-8859-1")
290
290
291
291
292 def mimetextqp(body, subtype, charset):
292 def mimetextqp(body, subtype, charset):
293 # type: (bytes, str, str) -> email.message.Message
293 # type: (bytes, str, str) -> email.message.Message
294 '''Return MIME message.
294 '''Return MIME message.
295 Quoted-printable transfer encoding will be used if necessary.
295 Quoted-printable transfer encoding will be used if necessary.
296 '''
296 '''
297 cs = email.charset.Charset(charset)
297 cs = email.charset.Charset(charset)
298 msg = email.message.Message()
298 msg = email.message.Message()
299 msg.set_type('text/' + subtype)
299 msg.set_type('text/' + subtype)
300
300
301 for line in body.splitlines():
301 for line in body.splitlines():
302 if len(line) > 950:
302 if len(line) > 950:
303 cs.body_encoding = email.charset.QP
303 cs.body_encoding = email.charset.QP
304 break
304 break
305
305
306 # On Python 2, this simply assigns a value. Python 3 inspects
306 # On Python 2, this simply assigns a value. Python 3 inspects
307 # body and does different things depending on whether it has
307 # body and does different things depending on whether it has
308 # encode() or decode() attributes. We can get the old behavior
308 # encode() or decode() attributes. We can get the old behavior
309 # if we pass a str and charset is None and we call set_charset().
309 # if we pass a str and charset is None and we call set_charset().
310 # But we may get into trouble later due to Python attempting to
310 # But we may get into trouble later due to Python attempting to
311 # encode/decode using the registered charset (or attempting to
311 # encode/decode using the registered charset (or attempting to
312 # use ascii in the absence of a charset).
312 # use ascii in the absence of a charset).
313 msg.set_payload(body, cs)
313 msg.set_payload(body, cs)
314
314
315 return msg
315 return msg
316
316
317
317
318 def _charsets(ui):
318 def _charsets(ui):
319 # type: (Any) -> List[str]
319 # type: (Any) -> List[str]
320 '''Obtains charsets to send mail parts not containing patches.'''
320 '''Obtains charsets to send mail parts not containing patches.'''
321 charsets = [
321 charsets = [
322 pycompat.sysstr(cs.lower())
322 pycompat.sysstr(cs.lower())
323 for cs in ui.configlist(b'email', b'charsets')
323 for cs in ui.configlist(b'email', b'charsets')
324 ]
324 ]
325 fallbacks = [
325 fallbacks = [
326 pycompat.sysstr(encoding.fallbackencoding.lower()),
326 pycompat.sysstr(encoding.fallbackencoding.lower()),
327 pycompat.sysstr(encoding.encoding.lower()),
327 pycompat.sysstr(encoding.encoding.lower()),
328 'utf-8',
328 'utf-8',
329 ]
329 ]
330 for cs in fallbacks: # find unique charsets while keeping order
330 for cs in fallbacks: # find unique charsets while keeping order
331 if cs not in charsets:
331 if cs not in charsets:
332 charsets.append(cs)
332 charsets.append(cs)
333 return [cs for cs in charsets if not cs.endswith('ascii')]
333 return [cs for cs in charsets if not cs.endswith('ascii')]
334
334
335
335
336 def _encode(ui, s, charsets):
336 def _encode(ui, s, charsets):
337 # type: (Any, bytes, List[str]) -> Tuple[bytes, str]
337 # type: (Any, bytes, List[str]) -> Tuple[bytes, str]
338 '''Returns (converted) string, charset tuple.
338 '''Returns (converted) string, charset tuple.
339 Finds out best charset by cycling through sendcharsets in descending
339 Finds out best charset by cycling through sendcharsets in descending
340 order. Tries both encoding and fallbackencoding for input. Only as
340 order. Tries both encoding and fallbackencoding for input. Only as
341 last resort send as is in fake ascii.
341 last resort send as is in fake ascii.
342 Caveat: Do not use for mail parts containing patches!'''
342 Caveat: Do not use for mail parts containing patches!'''
343 sendcharsets = charsets or _charsets(ui)
343 sendcharsets = charsets or _charsets(ui)
344 if not isinstance(s, bytes):
344 if not isinstance(s, bytes):
345 # We have unicode data, which we need to try and encode to
345 # We have unicode data, which we need to try and encode to
346 # some reasonable-ish encoding. Try the encodings the user
346 # some reasonable-ish encoding. Try the encodings the user
347 # wants, and fall back to garbage-in-ascii.
347 # wants, and fall back to garbage-in-ascii.
348 for ocs in sendcharsets:
348 for ocs in sendcharsets:
349 try:
349 try:
350 return s.encode(ocs), ocs
350 return s.encode(ocs), ocs
351 except UnicodeEncodeError:
351 except UnicodeEncodeError:
352 pass
352 pass
353 except LookupError:
353 except LookupError:
354 ui.warn(
354 ui.warn(
355 _(b'ignoring invalid sendcharset: %s\n')
355 _(b'ignoring invalid sendcharset: %s\n')
356 % pycompat.sysbytes(ocs)
356 % pycompat.sysbytes(ocs)
357 )
357 )
358 else:
358 else:
359 # Everything failed, ascii-armor what we've got and send it.
359 # Everything failed, ascii-armor what we've got and send it.
360 return s.encode('ascii', 'backslashreplace'), 'us-ascii'
360 return s.encode('ascii', 'backslashreplace'), 'us-ascii'
361 # We have a bytes of unknown encoding. We'll try and guess a valid
361 # We have a bytes of unknown encoding. We'll try and guess a valid
362 # encoding, falling back to pretending we had ascii even though we
362 # encoding, falling back to pretending we had ascii even though we
363 # know that's wrong.
363 # know that's wrong.
364 try:
364 try:
365 s.decode('ascii')
365 s.decode('ascii')
366 except UnicodeDecodeError:
366 except UnicodeDecodeError:
367 for ics in (encoding.encoding, encoding.fallbackencoding):
367 for ics in (encoding.encoding, encoding.fallbackencoding):
368 ics = pycompat.sysstr(ics)
368 ics = pycompat.sysstr(ics)
369 try:
369 try:
370 u = s.decode(ics)
370 u = s.decode(ics)
371 except UnicodeDecodeError:
371 except UnicodeDecodeError:
372 continue
372 continue
373 for ocs in sendcharsets:
373 for ocs in sendcharsets:
374 try:
374 try:
375 return u.encode(ocs), ocs
375 return u.encode(ocs), ocs
376 except UnicodeEncodeError:
376 except UnicodeEncodeError:
377 pass
377 pass
378 except LookupError:
378 except LookupError:
379 ui.warn(
379 ui.warn(
380 _(b'ignoring invalid sendcharset: %s\n')
380 _(b'ignoring invalid sendcharset: %s\n')
381 % pycompat.sysbytes(ocs)
381 % pycompat.sysbytes(ocs)
382 )
382 )
383 # if ascii, or all conversion attempts fail, send (broken) ascii
383 # if ascii, or all conversion attempts fail, send (broken) ascii
384 return s, 'us-ascii'
384 return s, 'us-ascii'
385
385
386
386
387 def headencode(ui, s, charsets=None, display=False):
387 def headencode(ui, s, charsets=None, display=False):
388 # type: (Any, Union[bytes, str], List[str], bool) -> str
388 # type: (Any, Union[bytes, str], List[str], bool) -> str
389 '''Returns RFC-2047 compliant header from given string.'''
389 '''Returns RFC-2047 compliant header from given string.'''
390 if not display:
390 if not display:
391 # split into words?
391 # split into words?
392 s, cs = _encode(ui, s, charsets)
392 s, cs = _encode(ui, s, charsets)
393 return email.header.Header(s, cs).encode()
393 return email.header.Header(s, cs).encode()
394 return encoding.strfromlocal(s)
394 return encoding.strfromlocal(s)
395
395
396
396
397 def _addressencode(ui, name, addr, charsets=None):
397 def _addressencode(ui, name, addr, charsets=None):
398 # type: (Any, str, str, List[str]) -> str
398 # type: (Any, str, str, List[str]) -> str
399 addr = encoding.strtolocal(addr)
399 addr = encoding.strtolocal(addr)
400 name = headencode(ui, name, charsets)
400 name = headencode(ui, name, charsets)
401 try:
401 try:
402 acc, dom = addr.split(b'@')
402 acc, dom = addr.split(b'@')
403 acc.decode('ascii')
403 acc.decode('ascii')
404 dom = dom.decode(pycompat.sysstr(encoding.encoding)).encode('idna')
404 dom = dom.decode(pycompat.sysstr(encoding.encoding)).encode('idna')
405 addr = b'%s@%s' % (acc, dom)
405 addr = b'%s@%s' % (acc, dom)
406 except UnicodeDecodeError:
406 except UnicodeDecodeError:
407 raise error.Abort(_(b'invalid email address: %s') % addr)
407 raise error.Abort(_(b'invalid email address: %s') % addr)
408 except ValueError:
408 except ValueError:
409 try:
409 try:
410 # too strict?
410 # too strict?
411 addr.decode('ascii')
411 addr.decode('ascii')
412 except UnicodeDecodeError:
412 except UnicodeDecodeError:
413 raise error.Abort(_(b'invalid local address: %s') % addr)
413 raise error.Abort(_(b'invalid local address: %s') % addr)
414 return email.utils.formataddr((name, encoding.strfromlocal(addr)))
414 return email.utils.formataddr((name, encoding.strfromlocal(addr)))
415
415
416
416
417 def addressencode(ui, address, charsets=None, display=False):
417 def addressencode(ui, address, charsets=None, display=False):
418 # type: (Any, bytes, List[str], bool) -> str
418 # type: (Any, bytes, List[str], bool) -> str
419 '''Turns address into RFC-2047 compliant header.'''
419 '''Turns address into RFC-2047 compliant header.'''
420 if display or not address:
420 if display or not address:
421 return encoding.strfromlocal(address or b'')
421 return encoding.strfromlocal(address or b'')
422 name, addr = email.utils.parseaddr(encoding.strfromlocal(address))
422 name, addr = email.utils.parseaddr(encoding.strfromlocal(address))
423 return _addressencode(ui, name, addr, charsets)
423 return _addressencode(ui, name, addr, charsets)
424
424
425
425
426 def addrlistencode(ui, addrs, charsets=None, display=False):
426 def addrlistencode(ui, addrs, charsets=None, display=False):
427 # type: (Any, List[bytes], List[str], bool) -> List[str]
427 # type: (Any, List[bytes], List[str], bool) -> List[str]
428 '''Turns a list of addresses into a list of RFC-2047 compliant headers.
428 '''Turns a list of addresses into a list of RFC-2047 compliant headers.
429 A single element of input list may contain multiple addresses, but output
429 A single element of input list may contain multiple addresses, but output
430 always has one address per item'''
430 always has one address per item'''
431 straddrs = []
431 straddrs = []
432 for a in addrs:
432 for a in addrs:
433 assert isinstance(a, bytes), '%r unexpectedly not a bytestr' % a
433 assert isinstance(a, bytes), '%r unexpectedly not a bytestr' % a
434 straddrs.append(encoding.strfromlocal(a))
434 straddrs.append(encoding.strfromlocal(a))
435 if display:
435 if display:
436 return [a.strip() for a in straddrs if a.strip()]
436 return [a.strip() for a in straddrs if a.strip()]
437
437
438 result = []
438 result = []
439 for name, addr in email.utils.getaddresses(straddrs):
439 for name, addr in email.utils.getaddresses(straddrs):
440 if name or addr:
440 if name or addr:
441 r = _addressencode(ui, name, addr, charsets)
441 r = _addressencode(ui, name, addr, charsets)
442 result.append(r)
442 result.append(r)
443 return result
443 return result
444
444
445
445
446 def mimeencode(ui, s, charsets=None, display=False):
446 def mimeencode(ui, s, charsets=None, display=False):
447 # type: (Any, bytes, List[str], bool) -> email.message.Message
447 # type: (Any, bytes, List[str], bool) -> email.message.Message
448 '''creates mime text object, encodes it if needed, and sets
448 '''creates mime text object, encodes it if needed, and sets
449 charset and transfer-encoding accordingly.'''
449 charset and transfer-encoding accordingly.'''
450 cs = 'us-ascii'
450 cs = 'us-ascii'
451 if not display:
451 if not display:
452 s, cs = _encode(ui, s, charsets)
452 s, cs = _encode(ui, s, charsets)
453 return mimetextqp(s, 'plain', cs)
453 return mimetextqp(s, 'plain', cs)
454
454
455
455
456 if pycompat.ispy3:
456 if pycompat.ispy3:
457
457
458 Generator = email.generator.BytesGenerator
458 Generator = email.generator.BytesGenerator
459
459
460 def parse(fp):
460 def parse(fp):
461 # type: (Any) -> email.message.Message
461 # type: (Any) -> email.message.Message
462 ep = email.parser.Parser()
462 ep = email.parser.Parser()
463 # disable the "universal newlines" mode, which isn't binary safe.
463 # disable the "universal newlines" mode, which isn't binary safe.
464 # I have no idea if ascii/surrogateescape is correct, but that's
464 # I have no idea if ascii/surrogateescape is correct, but that's
465 # what the standard Python email parser does.
465 # what the standard Python email parser does.
466 fp = io.TextIOWrapper(
466 fp = io.TextIOWrapper(
467 fp, encoding='ascii', errors='surrogateescape', newline=chr(10)
467 fp, encoding='ascii', errors='surrogateescape', newline=chr(10)
468 )
468 )
469 try:
469 try:
470 return ep.parse(fp)
470 return ep.parse(fp)
471 finally:
471 finally:
472 fp.detach()
472 fp.detach()
473
473
474 def parsebytes(data):
474 def parsebytes(data):
475 # type: (bytes) -> email.message.Message
475 # type: (bytes) -> email.message.Message
476 ep = email.parser.BytesParser()
476 ep = email.parser.BytesParser()
477 return ep.parsebytes(data)
477 return ep.parsebytes(data)
478
478
479
479
480 else:
480 else:
481
481
482 Generator = email.generator.Generator
482 Generator = email.generator.Generator
483
483
484 def parse(fp):
484 def parse(fp):
485 # type: (Any) -> email.message.Message
485 # type: (Any) -> email.message.Message
486 ep = email.parser.Parser()
486 ep = email.parser.Parser()
487 return ep.parse(fp)
487 return ep.parse(fp)
488
488
489 def parsebytes(data):
489 def parsebytes(data):
490 # type: (str) -> email.message.Message
490 # type: (str) -> email.message.Message
491 ep = email.parser.Parser()
491 ep = email.parser.Parser()
492 return ep.parsestr(data)
492 return ep.parsestr(data)
493
493
494
494
495 def headdecode(s):
495 def headdecode(s):
496 # type: (Union[email.header.Header, bytes]) -> bytes
496 # type: (Union[email.header.Header, bytes]) -> bytes
497 '''Decodes RFC-2047 header'''
497 '''Decodes RFC-2047 header'''
498 uparts = []
498 uparts = []
499 for part, charset in email.header.decode_header(s):
499 for part, charset in email.header.decode_header(s):
500 if charset is not None:
500 if charset is not None:
501 try:
501 try:
502 uparts.append(part.decode(charset))
502 uparts.append(part.decode(charset))
503 continue
503 continue
504 except (UnicodeDecodeError, LookupError):
504 except (UnicodeDecodeError, LookupError):
505 pass
505 pass
506 # On Python 3, decode_header() may return either bytes or unicode
506 # On Python 3, decode_header() may return either bytes or unicode
507 # depending on whether the header has =?<charset>? or not
507 # depending on whether the header has =?<charset>? or not
508 if isinstance(part, type(u'')):
508 if isinstance(part, type(u'')):
509 uparts.append(part)
509 uparts.append(part)
510 continue
510 continue
511 try:
511 try:
512 uparts.append(part.decode('UTF-8'))
512 uparts.append(part.decode('UTF-8'))
513 continue
513 continue
514 except UnicodeDecodeError:
514 except UnicodeDecodeError:
515 pass
515 pass
516 uparts.append(part.decode('ISO-8859-1'))
516 uparts.append(part.decode('ISO-8859-1'))
517 return encoding.unitolocal(u' '.join(uparts))
517 return encoding.unitolocal(u' '.join(uparts))
@@ -1,506 +1,512
1 # pycompat.py - portability shim for python 3
1 # pycompat.py - portability shim for python 3
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """Mercurial portability shim for python 3.
6 """Mercurial portability shim for python 3.
7
7
8 This contains aliases to hide python version-specific details from the core.
8 This contains aliases to hide python version-specific details from the core.
9 """
9 """
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 import getopt
13 import getopt
14 import inspect
14 import inspect
15 import json
15 import json
16 import os
16 import os
17 import shlex
17 import shlex
18 import sys
18 import sys
19 import tempfile
19 import tempfile
20
20
21 ispy3 = sys.version_info[0] >= 3
21 ispy3 = sys.version_info[0] >= 3
22 ispypy = '__pypy__' in sys.builtin_module_names
22 ispypy = '__pypy__' in sys.builtin_module_names
23 TYPE_CHECKING = False
24
25 if not globals(): # hide this from non-pytype users
26 import typing
27
28 TYPE_CHECKING = typing.TYPE_CHECKING
23
29
24 if not ispy3:
30 if not ispy3:
25 import cookielib
31 import cookielib
26 import cPickle as pickle
32 import cPickle as pickle
27 import httplib
33 import httplib
28 import Queue as queue
34 import Queue as queue
29 import SocketServer as socketserver
35 import SocketServer as socketserver
30 import xmlrpclib
36 import xmlrpclib
31
37
32 from .thirdparty.concurrent import futures
38 from .thirdparty.concurrent import futures
33
39
34 def future_set_exception_info(f, exc_info):
40 def future_set_exception_info(f, exc_info):
35 f.set_exception_info(*exc_info)
41 f.set_exception_info(*exc_info)
36
42
37
43
38 else:
44 else:
39 import concurrent.futures as futures
45 import concurrent.futures as futures
40 import http.cookiejar as cookielib
46 import http.cookiejar as cookielib
41 import http.client as httplib
47 import http.client as httplib
42 import pickle
48 import pickle
43 import queue as queue
49 import queue as queue
44 import socketserver
50 import socketserver
45 import xmlrpc.client as xmlrpclib
51 import xmlrpc.client as xmlrpclib
46
52
47 def future_set_exception_info(f, exc_info):
53 def future_set_exception_info(f, exc_info):
48 f.set_exception(exc_info[0])
54 f.set_exception(exc_info[0])
49
55
50
56
51 def identity(a):
57 def identity(a):
52 return a
58 return a
53
59
54
60
55 def _rapply(f, xs):
61 def _rapply(f, xs):
56 if xs is None:
62 if xs is None:
57 # assume None means non-value of optional data
63 # assume None means non-value of optional data
58 return xs
64 return xs
59 if isinstance(xs, (list, set, tuple)):
65 if isinstance(xs, (list, set, tuple)):
60 return type(xs)(_rapply(f, x) for x in xs)
66 return type(xs)(_rapply(f, x) for x in xs)
61 if isinstance(xs, dict):
67 if isinstance(xs, dict):
62 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
68 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
63 return f(xs)
69 return f(xs)
64
70
65
71
66 def rapply(f, xs):
72 def rapply(f, xs):
67 """Apply function recursively to every item preserving the data structure
73 """Apply function recursively to every item preserving the data structure
68
74
69 >>> def f(x):
75 >>> def f(x):
70 ... return 'f(%s)' % x
76 ... return 'f(%s)' % x
71 >>> rapply(f, None) is None
77 >>> rapply(f, None) is None
72 True
78 True
73 >>> rapply(f, 'a')
79 >>> rapply(f, 'a')
74 'f(a)'
80 'f(a)'
75 >>> rapply(f, {'a'}) == {'f(a)'}
81 >>> rapply(f, {'a'}) == {'f(a)'}
76 True
82 True
77 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
83 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
78 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
84 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
79
85
80 >>> xs = [object()]
86 >>> xs = [object()]
81 >>> rapply(identity, xs) is xs
87 >>> rapply(identity, xs) is xs
82 True
88 True
83 """
89 """
84 if f is identity:
90 if f is identity:
85 # fast path mainly for py2
91 # fast path mainly for py2
86 return xs
92 return xs
87 return _rapply(f, xs)
93 return _rapply(f, xs)
88
94
89
95
90 if ispy3:
96 if ispy3:
91 import builtins
97 import builtins
92 import codecs
98 import codecs
93 import functools
99 import functools
94 import io
100 import io
95 import struct
101 import struct
96
102
97 if os.name == r'nt' and sys.version_info >= (3, 6):
103 if os.name == r'nt' and sys.version_info >= (3, 6):
98 # MBCS (or ANSI) filesystem encoding must be used as before.
104 # MBCS (or ANSI) filesystem encoding must be used as before.
99 # Otherwise non-ASCII filenames in existing repositories would be
105 # Otherwise non-ASCII filenames in existing repositories would be
100 # corrupted.
106 # corrupted.
101 # This must be set once prior to any fsencode/fsdecode calls.
107 # This must be set once prior to any fsencode/fsdecode calls.
102 sys._enablelegacywindowsfsencoding() # pytype: disable=module-attr
108 sys._enablelegacywindowsfsencoding() # pytype: disable=module-attr
103
109
104 fsencode = os.fsencode
110 fsencode = os.fsencode
105 fsdecode = os.fsdecode
111 fsdecode = os.fsdecode
106 oscurdir = os.curdir.encode('ascii')
112 oscurdir = os.curdir.encode('ascii')
107 oslinesep = os.linesep.encode('ascii')
113 oslinesep = os.linesep.encode('ascii')
108 osname = os.name.encode('ascii')
114 osname = os.name.encode('ascii')
109 ospathsep = os.pathsep.encode('ascii')
115 ospathsep = os.pathsep.encode('ascii')
110 ospardir = os.pardir.encode('ascii')
116 ospardir = os.pardir.encode('ascii')
111 ossep = os.sep.encode('ascii')
117 ossep = os.sep.encode('ascii')
112 osaltsep = os.altsep
118 osaltsep = os.altsep
113 if osaltsep:
119 if osaltsep:
114 osaltsep = osaltsep.encode('ascii')
120 osaltsep = osaltsep.encode('ascii')
115
121
116 sysplatform = sys.platform.encode('ascii')
122 sysplatform = sys.platform.encode('ascii')
117 sysexecutable = sys.executable
123 sysexecutable = sys.executable
118 if sysexecutable:
124 if sysexecutable:
119 sysexecutable = os.fsencode(sysexecutable)
125 sysexecutable = os.fsencode(sysexecutable)
120 bytesio = io.BytesIO
126 bytesio = io.BytesIO
121 # TODO deprecate stringio name, as it is a lie on Python 3.
127 # TODO deprecate stringio name, as it is a lie on Python 3.
122 stringio = bytesio
128 stringio = bytesio
123
129
124 def maplist(*args):
130 def maplist(*args):
125 return list(map(*args))
131 return list(map(*args))
126
132
127 def rangelist(*args):
133 def rangelist(*args):
128 return list(range(*args))
134 return list(range(*args))
129
135
130 def ziplist(*args):
136 def ziplist(*args):
131 return list(zip(*args))
137 return list(zip(*args))
132
138
133 rawinput = input
139 rawinput = input
134 getargspec = inspect.getfullargspec
140 getargspec = inspect.getfullargspec
135
141
136 long = int
142 long = int
137
143
138 # TODO: .buffer might not exist if std streams were replaced; we'll need
144 # TODO: .buffer might not exist if std streams were replaced; we'll need
139 # a silly wrapper to make a bytes stream backed by a unicode one.
145 # a silly wrapper to make a bytes stream backed by a unicode one.
140 stdin = sys.stdin.buffer
146 stdin = sys.stdin.buffer
141 stdout = sys.stdout.buffer
147 stdout = sys.stdout.buffer
142 stderr = sys.stderr.buffer
148 stderr = sys.stderr.buffer
143
149
144 # Since Python 3 converts argv to wchar_t type by Py_DecodeLocale() on Unix,
150 # Since Python 3 converts argv to wchar_t type by Py_DecodeLocale() on Unix,
145 # we can use os.fsencode() to get back bytes argv.
151 # we can use os.fsencode() to get back bytes argv.
146 #
152 #
147 # https://hg.python.org/cpython/file/v3.5.1/Programs/python.c#l55
153 # https://hg.python.org/cpython/file/v3.5.1/Programs/python.c#l55
148 #
154 #
149 # On Windows, the native argv is unicode and is converted to MBCS bytes
155 # On Windows, the native argv is unicode and is converted to MBCS bytes
150 # since we do enable the legacy filesystem encoding.
156 # since we do enable the legacy filesystem encoding.
151 if getattr(sys, 'argv', None) is not None:
157 if getattr(sys, 'argv', None) is not None:
152 sysargv = list(map(os.fsencode, sys.argv))
158 sysargv = list(map(os.fsencode, sys.argv))
153
159
154 bytechr = struct.Struct('>B').pack
160 bytechr = struct.Struct('>B').pack
155 byterepr = b'%r'.__mod__
161 byterepr = b'%r'.__mod__
156
162
157 class bytestr(bytes):
163 class bytestr(bytes):
158 """A bytes which mostly acts as a Python 2 str
164 """A bytes which mostly acts as a Python 2 str
159
165
160 >>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1)
166 >>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1)
161 ('', 'foo', 'ascii', '1')
167 ('', 'foo', 'ascii', '1')
162 >>> s = bytestr(b'foo')
168 >>> s = bytestr(b'foo')
163 >>> assert s is bytestr(s)
169 >>> assert s is bytestr(s)
164
170
165 __bytes__() should be called if provided:
171 __bytes__() should be called if provided:
166
172
167 >>> class bytesable(object):
173 >>> class bytesable(object):
168 ... def __bytes__(self):
174 ... def __bytes__(self):
169 ... return b'bytes'
175 ... return b'bytes'
170 >>> bytestr(bytesable())
176 >>> bytestr(bytesable())
171 'bytes'
177 'bytes'
172
178
173 There's no implicit conversion from non-ascii str as its encoding is
179 There's no implicit conversion from non-ascii str as its encoding is
174 unknown:
180 unknown:
175
181
176 >>> bytestr(chr(0x80)) # doctest: +ELLIPSIS
182 >>> bytestr(chr(0x80)) # doctest: +ELLIPSIS
177 Traceback (most recent call last):
183 Traceback (most recent call last):
178 ...
184 ...
179 UnicodeEncodeError: ...
185 UnicodeEncodeError: ...
180
186
181 Comparison between bytestr and bytes should work:
187 Comparison between bytestr and bytes should work:
182
188
183 >>> assert bytestr(b'foo') == b'foo'
189 >>> assert bytestr(b'foo') == b'foo'
184 >>> assert b'foo' == bytestr(b'foo')
190 >>> assert b'foo' == bytestr(b'foo')
185 >>> assert b'f' in bytestr(b'foo')
191 >>> assert b'f' in bytestr(b'foo')
186 >>> assert bytestr(b'f') in b'foo'
192 >>> assert bytestr(b'f') in b'foo'
187
193
188 Sliced elements should be bytes, not integer:
194 Sliced elements should be bytes, not integer:
189
195
190 >>> s[1], s[:2]
196 >>> s[1], s[:2]
191 (b'o', b'fo')
197 (b'o', b'fo')
192 >>> list(s), list(reversed(s))
198 >>> list(s), list(reversed(s))
193 ([b'f', b'o', b'o'], [b'o', b'o', b'f'])
199 ([b'f', b'o', b'o'], [b'o', b'o', b'f'])
194
200
195 As bytestr type isn't propagated across operations, you need to cast
201 As bytestr type isn't propagated across operations, you need to cast
196 bytes to bytestr explicitly:
202 bytes to bytestr explicitly:
197
203
198 >>> s = bytestr(b'foo').upper()
204 >>> s = bytestr(b'foo').upper()
199 >>> t = bytestr(s)
205 >>> t = bytestr(s)
200 >>> s[0], t[0]
206 >>> s[0], t[0]
201 (70, b'F')
207 (70, b'F')
202
208
203 Be careful to not pass a bytestr object to a function which expects
209 Be careful to not pass a bytestr object to a function which expects
204 bytearray-like behavior.
210 bytearray-like behavior.
205
211
206 >>> t = bytes(t) # cast to bytes
212 >>> t = bytes(t) # cast to bytes
207 >>> assert type(t) is bytes
213 >>> assert type(t) is bytes
208 """
214 """
209
215
210 def __new__(cls, s=b''):
216 def __new__(cls, s=b''):
211 if isinstance(s, bytestr):
217 if isinstance(s, bytestr):
212 return s
218 return s
213 if not isinstance(
219 if not isinstance(
214 s, (bytes, bytearray)
220 s, (bytes, bytearray)
215 ) and not hasattr( # hasattr-py3-only
221 ) and not hasattr( # hasattr-py3-only
216 s, u'__bytes__'
222 s, u'__bytes__'
217 ):
223 ):
218 s = str(s).encode('ascii')
224 s = str(s).encode('ascii')
219 return bytes.__new__(cls, s)
225 return bytes.__new__(cls, s)
220
226
221 def __getitem__(self, key):
227 def __getitem__(self, key):
222 s = bytes.__getitem__(self, key)
228 s = bytes.__getitem__(self, key)
223 if not isinstance(s, bytes):
229 if not isinstance(s, bytes):
224 s = bytechr(s)
230 s = bytechr(s)
225 return s
231 return s
226
232
227 def __iter__(self):
233 def __iter__(self):
228 return iterbytestr(bytes.__iter__(self))
234 return iterbytestr(bytes.__iter__(self))
229
235
230 def __repr__(self):
236 def __repr__(self):
231 return bytes.__repr__(self)[1:] # drop b''
237 return bytes.__repr__(self)[1:] # drop b''
232
238
233 def iterbytestr(s):
239 def iterbytestr(s):
234 """Iterate bytes as if it were a str object of Python 2"""
240 """Iterate bytes as if it were a str object of Python 2"""
235 return map(bytechr, s)
241 return map(bytechr, s)
236
242
237 def maybebytestr(s):
243 def maybebytestr(s):
238 """Promote bytes to bytestr"""
244 """Promote bytes to bytestr"""
239 if isinstance(s, bytes):
245 if isinstance(s, bytes):
240 return bytestr(s)
246 return bytestr(s)
241 return s
247 return s
242
248
243 def sysbytes(s):
249 def sysbytes(s):
244 """Convert an internal str (e.g. keyword, __doc__) back to bytes
250 """Convert an internal str (e.g. keyword, __doc__) back to bytes
245
251
246 This never raises UnicodeEncodeError, but only ASCII characters
252 This never raises UnicodeEncodeError, but only ASCII characters
247 can be round-trip by sysstr(sysbytes(s)).
253 can be round-trip by sysstr(sysbytes(s)).
248 """
254 """
249 return s.encode('utf-8')
255 return s.encode('utf-8')
250
256
251 def sysstr(s):
257 def sysstr(s):
252 """Return a keyword str to be passed to Python functions such as
258 """Return a keyword str to be passed to Python functions such as
253 getattr() and str.encode()
259 getattr() and str.encode()
254
260
255 This never raises UnicodeDecodeError. Non-ascii characters are
261 This never raises UnicodeDecodeError. Non-ascii characters are
256 considered invalid and mapped to arbitrary but unique code points
262 considered invalid and mapped to arbitrary but unique code points
257 such that 'sysstr(a) != sysstr(b)' for all 'a != b'.
263 such that 'sysstr(a) != sysstr(b)' for all 'a != b'.
258 """
264 """
259 if isinstance(s, builtins.str):
265 if isinstance(s, builtins.str):
260 return s
266 return s
261 return s.decode('latin-1')
267 return s.decode('latin-1')
262
268
263 def strurl(url):
269 def strurl(url):
264 """Converts a bytes url back to str"""
270 """Converts a bytes url back to str"""
265 if isinstance(url, bytes):
271 if isinstance(url, bytes):
266 return url.decode('ascii')
272 return url.decode('ascii')
267 return url
273 return url
268
274
269 def bytesurl(url):
275 def bytesurl(url):
270 """Converts a str url to bytes by encoding in ascii"""
276 """Converts a str url to bytes by encoding in ascii"""
271 if isinstance(url, str):
277 if isinstance(url, str):
272 return url.encode('ascii')
278 return url.encode('ascii')
273 return url
279 return url
274
280
275 def raisewithtb(exc, tb):
281 def raisewithtb(exc, tb):
276 """Raise exception with the given traceback"""
282 """Raise exception with the given traceback"""
277 raise exc.with_traceback(tb)
283 raise exc.with_traceback(tb)
278
284
279 def getdoc(obj):
285 def getdoc(obj):
280 """Get docstring as bytes; may be None so gettext() won't confuse it
286 """Get docstring as bytes; may be None so gettext() won't confuse it
281 with _('')"""
287 with _('')"""
282 doc = getattr(obj, '__doc__', None)
288 doc = getattr(obj, '__doc__', None)
283 if doc is None:
289 if doc is None:
284 return doc
290 return doc
285 return sysbytes(doc)
291 return sysbytes(doc)
286
292
287 def _wrapattrfunc(f):
293 def _wrapattrfunc(f):
288 @functools.wraps(f)
294 @functools.wraps(f)
289 def w(object, name, *args):
295 def w(object, name, *args):
290 return f(object, sysstr(name), *args)
296 return f(object, sysstr(name), *args)
291
297
292 return w
298 return w
293
299
294 # these wrappers are automagically imported by hgloader
300 # these wrappers are automagically imported by hgloader
295 delattr = _wrapattrfunc(builtins.delattr)
301 delattr = _wrapattrfunc(builtins.delattr)
296 getattr = _wrapattrfunc(builtins.getattr)
302 getattr = _wrapattrfunc(builtins.getattr)
297 hasattr = _wrapattrfunc(builtins.hasattr)
303 hasattr = _wrapattrfunc(builtins.hasattr)
298 setattr = _wrapattrfunc(builtins.setattr)
304 setattr = _wrapattrfunc(builtins.setattr)
299 xrange = builtins.range
305 xrange = builtins.range
300 unicode = str
306 unicode = str
301
307
302 def open(name, mode=b'r', buffering=-1, encoding=None):
308 def open(name, mode=b'r', buffering=-1, encoding=None):
303 return builtins.open(name, sysstr(mode), buffering, encoding)
309 return builtins.open(name, sysstr(mode), buffering, encoding)
304
310
305 safehasattr = _wrapattrfunc(builtins.hasattr)
311 safehasattr = _wrapattrfunc(builtins.hasattr)
306
312
307 def _getoptbwrapper(orig, args, shortlist, namelist):
313 def _getoptbwrapper(orig, args, shortlist, namelist):
308 """
314 """
309 Takes bytes arguments, converts them to unicode, pass them to
315 Takes bytes arguments, converts them to unicode, pass them to
310 getopt.getopt(), convert the returned values back to bytes and then
316 getopt.getopt(), convert the returned values back to bytes and then
311 return them for Python 3 compatibility as getopt.getopt() don't accepts
317 return them for Python 3 compatibility as getopt.getopt() don't accepts
312 bytes on Python 3.
318 bytes on Python 3.
313 """
319 """
314 args = [a.decode('latin-1') for a in args]
320 args = [a.decode('latin-1') for a in args]
315 shortlist = shortlist.decode('latin-1')
321 shortlist = shortlist.decode('latin-1')
316 namelist = [a.decode('latin-1') for a in namelist]
322 namelist = [a.decode('latin-1') for a in namelist]
317 opts, args = orig(args, shortlist, namelist)
323 opts, args = orig(args, shortlist, namelist)
318 opts = [(a[0].encode('latin-1'), a[1].encode('latin-1')) for a in opts]
324 opts = [(a[0].encode('latin-1'), a[1].encode('latin-1')) for a in opts]
319 args = [a.encode('latin-1') for a in args]
325 args = [a.encode('latin-1') for a in args]
320 return opts, args
326 return opts, args
321
327
322 def strkwargs(dic):
328 def strkwargs(dic):
323 """
329 """
324 Converts the keys of a python dictonary to str i.e. unicodes so that
330 Converts the keys of a python dictonary to str i.e. unicodes so that
325 they can be passed as keyword arguments as dictonaries with bytes keys
331 they can be passed as keyword arguments as dictonaries with bytes keys
326 can't be passed as keyword arguments to functions on Python 3.
332 can't be passed as keyword arguments to functions on Python 3.
327 """
333 """
328 dic = dict((k.decode('latin-1'), v) for k, v in dic.items())
334 dic = dict((k.decode('latin-1'), v) for k, v in dic.items())
329 return dic
335 return dic
330
336
331 def byteskwargs(dic):
337 def byteskwargs(dic):
332 """
338 """
333 Converts keys of python dictonaries to bytes as they were converted to
339 Converts keys of python dictonaries to bytes as they were converted to
334 str to pass that dictonary as a keyword argument on Python 3.
340 str to pass that dictonary as a keyword argument on Python 3.
335 """
341 """
336 dic = dict((k.encode('latin-1'), v) for k, v in dic.items())
342 dic = dict((k.encode('latin-1'), v) for k, v in dic.items())
337 return dic
343 return dic
338
344
339 # TODO: handle shlex.shlex().
345 # TODO: handle shlex.shlex().
340 def shlexsplit(s, comments=False, posix=True):
346 def shlexsplit(s, comments=False, posix=True):
341 """
347 """
342 Takes bytes argument, convert it to str i.e. unicodes, pass that into
348 Takes bytes argument, convert it to str i.e. unicodes, pass that into
343 shlex.split(), convert the returned value to bytes and return that for
349 shlex.split(), convert the returned value to bytes and return that for
344 Python 3 compatibility as shelx.split() don't accept bytes on Python 3.
350 Python 3 compatibility as shelx.split() don't accept bytes on Python 3.
345 """
351 """
346 ret = shlex.split(s.decode('latin-1'), comments, posix)
352 ret = shlex.split(s.decode('latin-1'), comments, posix)
347 return [a.encode('latin-1') for a in ret]
353 return [a.encode('latin-1') for a in ret]
348
354
349 iteritems = lambda x: x.items()
355 iteritems = lambda x: x.items()
350 itervalues = lambda x: x.values()
356 itervalues = lambda x: x.values()
351
357
352 # Python 3.5's json.load and json.loads require str. We polyfill its
358 # Python 3.5's json.load and json.loads require str. We polyfill its
353 # code for detecting encoding from bytes.
359 # code for detecting encoding from bytes.
354 if sys.version_info[0:2] < (3, 6):
360 if sys.version_info[0:2] < (3, 6):
355
361
356 def _detect_encoding(b):
362 def _detect_encoding(b):
357 bstartswith = b.startswith
363 bstartswith = b.startswith
358 if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)):
364 if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)):
359 return 'utf-32'
365 return 'utf-32'
360 if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)):
366 if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)):
361 return 'utf-16'
367 return 'utf-16'
362 if bstartswith(codecs.BOM_UTF8):
368 if bstartswith(codecs.BOM_UTF8):
363 return 'utf-8-sig'
369 return 'utf-8-sig'
364
370
365 if len(b) >= 4:
371 if len(b) >= 4:
366 if not b[0]:
372 if not b[0]:
367 # 00 00 -- -- - utf-32-be
373 # 00 00 -- -- - utf-32-be
368 # 00 XX -- -- - utf-16-be
374 # 00 XX -- -- - utf-16-be
369 return 'utf-16-be' if b[1] else 'utf-32-be'
375 return 'utf-16-be' if b[1] else 'utf-32-be'
370 if not b[1]:
376 if not b[1]:
371 # XX 00 00 00 - utf-32-le
377 # XX 00 00 00 - utf-32-le
372 # XX 00 00 XX - utf-16-le
378 # XX 00 00 XX - utf-16-le
373 # XX 00 XX -- - utf-16-le
379 # XX 00 XX -- - utf-16-le
374 return 'utf-16-le' if b[2] or b[3] else 'utf-32-le'
380 return 'utf-16-le' if b[2] or b[3] else 'utf-32-le'
375 elif len(b) == 2:
381 elif len(b) == 2:
376 if not b[0]:
382 if not b[0]:
377 # 00 XX - utf-16-be
383 # 00 XX - utf-16-be
378 return 'utf-16-be'
384 return 'utf-16-be'
379 if not b[1]:
385 if not b[1]:
380 # XX 00 - utf-16-le
386 # XX 00 - utf-16-le
381 return 'utf-16-le'
387 return 'utf-16-le'
382 # default
388 # default
383 return 'utf-8'
389 return 'utf-8'
384
390
385 def json_loads(s, *args, **kwargs):
391 def json_loads(s, *args, **kwargs):
386 if isinstance(s, (bytes, bytearray)):
392 if isinstance(s, (bytes, bytearray)):
387 s = s.decode(_detect_encoding(s), 'surrogatepass')
393 s = s.decode(_detect_encoding(s), 'surrogatepass')
388
394
389 return json.loads(s, *args, **kwargs)
395 return json.loads(s, *args, **kwargs)
390
396
391 else:
397 else:
392 json_loads = json.loads
398 json_loads = json.loads
393
399
394 else:
400 else:
395 import cStringIO
401 import cStringIO
396
402
397 xrange = xrange
403 xrange = xrange
398 unicode = unicode
404 unicode = unicode
399 bytechr = chr
405 bytechr = chr
400 byterepr = repr
406 byterepr = repr
401 bytestr = str
407 bytestr = str
402 iterbytestr = iter
408 iterbytestr = iter
403 maybebytestr = identity
409 maybebytestr = identity
404 sysbytes = identity
410 sysbytes = identity
405 sysstr = identity
411 sysstr = identity
406 strurl = identity
412 strurl = identity
407 bytesurl = identity
413 bytesurl = identity
408 open = open
414 open = open
409 delattr = delattr
415 delattr = delattr
410 getattr = getattr
416 getattr = getattr
411 hasattr = hasattr
417 hasattr = hasattr
412 setattr = setattr
418 setattr = setattr
413
419
414 # this can't be parsed on Python 3
420 # this can't be parsed on Python 3
415 exec(b'def raisewithtb(exc, tb):\n raise exc, None, tb\n')
421 exec(b'def raisewithtb(exc, tb):\n raise exc, None, tb\n')
416
422
417 def fsencode(filename):
423 def fsencode(filename):
418 """
424 """
419 Partial backport from os.py in Python 3, which only accepts bytes.
425 Partial backport from os.py in Python 3, which only accepts bytes.
420 In Python 2, our paths should only ever be bytes, a unicode path
426 In Python 2, our paths should only ever be bytes, a unicode path
421 indicates a bug.
427 indicates a bug.
422 """
428 """
423 if isinstance(filename, str):
429 if isinstance(filename, str):
424 return filename
430 return filename
425 else:
431 else:
426 raise TypeError("expect str, not %s" % type(filename).__name__)
432 raise TypeError("expect str, not %s" % type(filename).__name__)
427
433
428 # In Python 2, fsdecode() has a very chance to receive bytes. So it's
434 # In Python 2, fsdecode() has a very chance to receive bytes. So it's
429 # better not to touch Python 2 part as it's already working fine.
435 # better not to touch Python 2 part as it's already working fine.
430 fsdecode = identity
436 fsdecode = identity
431
437
432 def getdoc(obj):
438 def getdoc(obj):
433 return getattr(obj, '__doc__', None)
439 return getattr(obj, '__doc__', None)
434
440
435 _notset = object()
441 _notset = object()
436
442
437 def safehasattr(thing, attr):
443 def safehasattr(thing, attr):
438 return getattr(thing, attr, _notset) is not _notset
444 return getattr(thing, attr, _notset) is not _notset
439
445
440 def _getoptbwrapper(orig, args, shortlist, namelist):
446 def _getoptbwrapper(orig, args, shortlist, namelist):
441 return orig(args, shortlist, namelist)
447 return orig(args, shortlist, namelist)
442
448
443 strkwargs = identity
449 strkwargs = identity
444 byteskwargs = identity
450 byteskwargs = identity
445
451
446 oscurdir = os.curdir
452 oscurdir = os.curdir
447 oslinesep = os.linesep
453 oslinesep = os.linesep
448 osname = os.name
454 osname = os.name
449 ospathsep = os.pathsep
455 ospathsep = os.pathsep
450 ospardir = os.pardir
456 ospardir = os.pardir
451 ossep = os.sep
457 ossep = os.sep
452 osaltsep = os.altsep
458 osaltsep = os.altsep
453 long = long
459 long = long
454 stdin = sys.stdin
460 stdin = sys.stdin
455 stdout = sys.stdout
461 stdout = sys.stdout
456 stderr = sys.stderr
462 stderr = sys.stderr
457 if getattr(sys, 'argv', None) is not None:
463 if getattr(sys, 'argv', None) is not None:
458 sysargv = sys.argv
464 sysargv = sys.argv
459 sysplatform = sys.platform
465 sysplatform = sys.platform
460 sysexecutable = sys.executable
466 sysexecutable = sys.executable
461 shlexsplit = shlex.split
467 shlexsplit = shlex.split
462 bytesio = cStringIO.StringIO
468 bytesio = cStringIO.StringIO
463 stringio = bytesio
469 stringio = bytesio
464 maplist = map
470 maplist = map
465 rangelist = range
471 rangelist = range
466 ziplist = zip
472 ziplist = zip
467 rawinput = raw_input
473 rawinput = raw_input
468 getargspec = inspect.getargspec
474 getargspec = inspect.getargspec
469 iteritems = lambda x: x.iteritems()
475 iteritems = lambda x: x.iteritems()
470 itervalues = lambda x: x.itervalues()
476 itervalues = lambda x: x.itervalues()
471 json_loads = json.loads
477 json_loads = json.loads
472
478
473 isjython = sysplatform.startswith(b'java')
479 isjython = sysplatform.startswith(b'java')
474
480
475 isdarwin = sysplatform.startswith(b'darwin')
481 isdarwin = sysplatform.startswith(b'darwin')
476 islinux = sysplatform.startswith(b'linux')
482 islinux = sysplatform.startswith(b'linux')
477 isposix = osname == b'posix'
483 isposix = osname == b'posix'
478 iswindows = osname == b'nt'
484 iswindows = osname == b'nt'
479
485
480
486
481 def getoptb(args, shortlist, namelist):
487 def getoptb(args, shortlist, namelist):
482 return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
488 return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
483
489
484
490
485 def gnugetoptb(args, shortlist, namelist):
491 def gnugetoptb(args, shortlist, namelist):
486 return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
492 return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
487
493
488
494
489 def mkdtemp(suffix=b'', prefix=b'tmp', dir=None):
495 def mkdtemp(suffix=b'', prefix=b'tmp', dir=None):
490 return tempfile.mkdtemp(suffix, prefix, dir)
496 return tempfile.mkdtemp(suffix, prefix, dir)
491
497
492
498
493 # text=True is not supported; use util.from/tonativeeol() instead
499 # text=True is not supported; use util.from/tonativeeol() instead
494 def mkstemp(suffix=b'', prefix=b'tmp', dir=None):
500 def mkstemp(suffix=b'', prefix=b'tmp', dir=None):
495 return tempfile.mkstemp(suffix, prefix, dir)
501 return tempfile.mkstemp(suffix, prefix, dir)
496
502
497
503
498 # mode must include 'b'ytes as encoding= is not supported
504 # mode must include 'b'ytes as encoding= is not supported
499 def namedtempfile(
505 def namedtempfile(
500 mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None, delete=True
506 mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None, delete=True
501 ):
507 ):
502 mode = sysstr(mode)
508 mode = sysstr(mode)
503 assert 'b' in mode
509 assert 'b' in mode
504 return tempfile.NamedTemporaryFile(
510 return tempfile.NamedTemporaryFile(
505 mode, bufsize, suffix=suffix, prefix=prefix, dir=dir, delete=delete
511 mode, bufsize, suffix=suffix, prefix=prefix, dir=dir, delete=delete
506 )
512 )
@@ -1,280 +1,281
1 # state.py - writing and reading state files in Mercurial
1 # state.py - writing and reading state files in Mercurial
2 #
2 #
3 # Copyright 2018 Pulkit Goyal <pulkitmgoyal@gmail.com>
3 # Copyright 2018 Pulkit Goyal <pulkitmgoyal@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """
8 """
9 This file contains class to wrap the state for commands and other
9 This file contains class to wrap the state for commands and other
10 related logic.
10 related logic.
11
11
12 All the data related to the command state is stored as dictionary in the object.
12 All the data related to the command state is stored as dictionary in the object.
13 The class has methods using which the data can be stored to disk in a file under
13 The class has methods using which the data can be stored to disk in a file under
14 .hg/ directory.
14 .hg/ directory.
15
15
16 We store the data on disk in cbor, for which we use the CBOR format to encode
16 We store the data on disk in cbor, for which we use the CBOR format to encode
17 the data.
17 the data.
18 """
18 """
19
19
20 from __future__ import absolute_import
20 from __future__ import absolute_import
21
21
22 from .i18n import _
22 from .i18n import _
23
23
24 from . import (
24 from . import (
25 error,
25 error,
26 pycompat,
26 util,
27 util,
27 )
28 )
28 from .utils import cborutil
29 from .utils import cborutil
29
30
30 if not globals():
31 if pycompat.TYPE_CHECKING:
31 from typing import (
32 from typing import (
32 Any,
33 Any,
33 Dict,
34 Dict,
34 )
35 )
35
36
36 for t in (Any, Dict):
37 for t in (Any, Dict):
37 assert t
38 assert t
38
39
39
40
40 class cmdstate(object):
41 class cmdstate(object):
41 """a wrapper class to store the state of commands like `rebase`, `graft`,
42 """a wrapper class to store the state of commands like `rebase`, `graft`,
42 `histedit`, `shelve` etc. Extensions can also use this to write state files.
43 `histedit`, `shelve` etc. Extensions can also use this to write state files.
43
44
44 All the data for the state is stored in the form of key-value pairs in a
45 All the data for the state is stored in the form of key-value pairs in a
45 dictionary.
46 dictionary.
46
47
47 The class object can write all the data to a file in .hg/ directory and
48 The class object can write all the data to a file in .hg/ directory and
48 can populate the object data reading that file.
49 can populate the object data reading that file.
49
50
50 Uses cbor to serialize and deserialize data while writing and reading from
51 Uses cbor to serialize and deserialize data while writing and reading from
51 disk.
52 disk.
52 """
53 """
53
54
54 def __init__(self, repo, fname):
55 def __init__(self, repo, fname):
55 """ repo is the repo object
56 """ repo is the repo object
56 fname is the file name in which data should be stored in .hg directory
57 fname is the file name in which data should be stored in .hg directory
57 """
58 """
58 self._repo = repo
59 self._repo = repo
59 self.fname = fname
60 self.fname = fname
60
61
61 def read(self):
62 def read(self):
62 # type: () -> Dict[bytes, Any]
63 # type: () -> Dict[bytes, Any]
63 """read the existing state file and return a dict of data stored"""
64 """read the existing state file and return a dict of data stored"""
64 return self._read()
65 return self._read()
65
66
66 def save(self, version, data):
67 def save(self, version, data):
67 """write all the state data stored to .hg/<filename> file
68 """write all the state data stored to .hg/<filename> file
68
69
69 we use third-party library cbor to serialize data to write in the file.
70 we use third-party library cbor to serialize data to write in the file.
70 """
71 """
71 if not isinstance(version, int):
72 if not isinstance(version, int):
72 raise error.ProgrammingError(
73 raise error.ProgrammingError(
73 b"version of state file should be an integer"
74 b"version of state file should be an integer"
74 )
75 )
75
76
76 with self._repo.vfs(self.fname, b'wb', atomictemp=True) as fp:
77 with self._repo.vfs(self.fname, b'wb', atomictemp=True) as fp:
77 fp.write(b'%d\n' % version)
78 fp.write(b'%d\n' % version)
78 for chunk in cborutil.streamencode(data):
79 for chunk in cborutil.streamencode(data):
79 fp.write(chunk)
80 fp.write(chunk)
80
81
81 def _read(self):
82 def _read(self):
82 """reads the state file and returns a dictionary which contain
83 """reads the state file and returns a dictionary which contain
83 data in the same format as it was before storing"""
84 data in the same format as it was before storing"""
84 with self._repo.vfs(self.fname, b'rb') as fp:
85 with self._repo.vfs(self.fname, b'rb') as fp:
85 try:
86 try:
86 int(fp.readline())
87 int(fp.readline())
87 except ValueError:
88 except ValueError:
88 raise error.CorruptedState(
89 raise error.CorruptedState(
89 b"unknown version of state file found"
90 b"unknown version of state file found"
90 )
91 )
91
92
92 return cborutil.decodeall(fp.read())[0]
93 return cborutil.decodeall(fp.read())[0]
93
94
94 def delete(self):
95 def delete(self):
95 """drop the state file if exists"""
96 """drop the state file if exists"""
96 util.unlinkpath(self._repo.vfs.join(self.fname), ignoremissing=True)
97 util.unlinkpath(self._repo.vfs.join(self.fname), ignoremissing=True)
97
98
98 def exists(self):
99 def exists(self):
99 """check whether the state file exists or not"""
100 """check whether the state file exists or not"""
100 return self._repo.vfs.exists(self.fname)
101 return self._repo.vfs.exists(self.fname)
101
102
102
103
103 class _statecheck(object):
104 class _statecheck(object):
104 """a utility class that deals with multistep operations like graft,
105 """a utility class that deals with multistep operations like graft,
105 histedit, bisect, update etc and check whether such commands
106 histedit, bisect, update etc and check whether such commands
106 are in an unfinished conditition or not and return appropriate message
107 are in an unfinished conditition or not and return appropriate message
107 and hint.
108 and hint.
108 It also has the ability to register and determine the states of any new
109 It also has the ability to register and determine the states of any new
109 multistep operation or multistep command extension.
110 multistep operation or multistep command extension.
110 """
111 """
111
112
112 def __init__(
113 def __init__(
113 self,
114 self,
114 opname,
115 opname,
115 fname,
116 fname,
116 clearable,
117 clearable,
117 allowcommit,
118 allowcommit,
118 reportonly,
119 reportonly,
119 continueflag,
120 continueflag,
120 stopflag,
121 stopflag,
121 cmdmsg,
122 cmdmsg,
122 cmdhint,
123 cmdhint,
123 statushint,
124 statushint,
124 abortfunc,
125 abortfunc,
125 continuefunc,
126 continuefunc,
126 ):
127 ):
127 self._opname = opname
128 self._opname = opname
128 self._fname = fname
129 self._fname = fname
129 self._clearable = clearable
130 self._clearable = clearable
130 self._allowcommit = allowcommit
131 self._allowcommit = allowcommit
131 self._reportonly = reportonly
132 self._reportonly = reportonly
132 self._continueflag = continueflag
133 self._continueflag = continueflag
133 self._stopflag = stopflag
134 self._stopflag = stopflag
134 self._cmdmsg = cmdmsg
135 self._cmdmsg = cmdmsg
135 self._cmdhint = cmdhint
136 self._cmdhint = cmdhint
136 self._statushint = statushint
137 self._statushint = statushint
137 self.abortfunc = abortfunc
138 self.abortfunc = abortfunc
138 self.continuefunc = continuefunc
139 self.continuefunc = continuefunc
139
140
140 def statusmsg(self):
141 def statusmsg(self):
141 """returns the hint message corresponding to the command for
142 """returns the hint message corresponding to the command for
142 hg status --verbose
143 hg status --verbose
143 """
144 """
144 if not self._statushint:
145 if not self._statushint:
145 hint = _(
146 hint = _(
146 b'To continue: hg %s --continue\n'
147 b'To continue: hg %s --continue\n'
147 b'To abort: hg %s --abort'
148 b'To abort: hg %s --abort'
148 ) % (self._opname, self._opname)
149 ) % (self._opname, self._opname)
149 if self._stopflag:
150 if self._stopflag:
150 hint = hint + (
151 hint = hint + (
151 _(b'\nTo stop: hg %s --stop') % (self._opname)
152 _(b'\nTo stop: hg %s --stop') % (self._opname)
152 )
153 )
153 return hint
154 return hint
154 return self._statushint
155 return self._statushint
155
156
156 def hint(self):
157 def hint(self):
157 """returns the hint message corresponding to an interrupted
158 """returns the hint message corresponding to an interrupted
158 operation
159 operation
159 """
160 """
160 if not self._cmdhint:
161 if not self._cmdhint:
161 return _(b"use 'hg %s --continue' or 'hg %s --abort'") % (
162 return _(b"use 'hg %s --continue' or 'hg %s --abort'") % (
162 self._opname,
163 self._opname,
163 self._opname,
164 self._opname,
164 )
165 )
165 return self._cmdhint
166 return self._cmdhint
166
167
167 def msg(self):
168 def msg(self):
168 """returns the status message corresponding to the command"""
169 """returns the status message corresponding to the command"""
169 if not self._cmdmsg:
170 if not self._cmdmsg:
170 return _(b'%s in progress') % (self._opname)
171 return _(b'%s in progress') % (self._opname)
171 return self._cmdmsg
172 return self._cmdmsg
172
173
173 def continuemsg(self):
174 def continuemsg(self):
174 """ returns appropriate continue message corresponding to command"""
175 """ returns appropriate continue message corresponding to command"""
175 return _(b'hg %s --continue') % (self._opname)
176 return _(b'hg %s --continue') % (self._opname)
176
177
177 def isunfinished(self, repo):
178 def isunfinished(self, repo):
178 """determines whether a multi-step operation is in progress
179 """determines whether a multi-step operation is in progress
179 or not
180 or not
180 """
181 """
181 if self._opname == b'merge':
182 if self._opname == b'merge':
182 return len(repo[None].parents()) > 1
183 return len(repo[None].parents()) > 1
183 else:
184 else:
184 return repo.vfs.exists(self._fname)
185 return repo.vfs.exists(self._fname)
185
186
186
187
187 # A list of statecheck objects for multistep operations like graft.
188 # A list of statecheck objects for multistep operations like graft.
188 _unfinishedstates = []
189 _unfinishedstates = []
189
190
190
191
191 def addunfinished(
192 def addunfinished(
192 opname,
193 opname,
193 fname,
194 fname,
194 clearable=False,
195 clearable=False,
195 allowcommit=False,
196 allowcommit=False,
196 reportonly=False,
197 reportonly=False,
197 continueflag=False,
198 continueflag=False,
198 stopflag=False,
199 stopflag=False,
199 cmdmsg=b"",
200 cmdmsg=b"",
200 cmdhint=b"",
201 cmdhint=b"",
201 statushint=b"",
202 statushint=b"",
202 abortfunc=None,
203 abortfunc=None,
203 continuefunc=None,
204 continuefunc=None,
204 ):
205 ):
205 """this registers a new command or operation to unfinishedstates
206 """this registers a new command or operation to unfinishedstates
206 opname is the name the command or operation
207 opname is the name the command or operation
207 fname is the file name in which data should be stored in .hg directory.
208 fname is the file name in which data should be stored in .hg directory.
208 It is None for merge command.
209 It is None for merge command.
209 clearable boolean determines whether or not interrupted states can be
210 clearable boolean determines whether or not interrupted states can be
210 cleared by running `hg update -C .` which in turn deletes the
211 cleared by running `hg update -C .` which in turn deletes the
211 state file.
212 state file.
212 allowcommit boolean decides whether commit is allowed during interrupted
213 allowcommit boolean decides whether commit is allowed during interrupted
213 state or not.
214 state or not.
214 reportonly flag is used for operations like bisect where we just
215 reportonly flag is used for operations like bisect where we just
215 need to detect the operation using 'hg status --verbose'
216 need to detect the operation using 'hg status --verbose'
216 continueflag is a boolean determines whether or not a command supports
217 continueflag is a boolean determines whether or not a command supports
217 `--continue` option or not.
218 `--continue` option or not.
218 stopflag is a boolean that determines whether or not a command supports
219 stopflag is a boolean that determines whether or not a command supports
219 --stop flag
220 --stop flag
220 cmdmsg is used to pass a different status message in case standard
221 cmdmsg is used to pass a different status message in case standard
221 message of the format "abort: cmdname in progress" is not desired.
222 message of the format "abort: cmdname in progress" is not desired.
222 cmdhint is used to pass a different hint message in case standard
223 cmdhint is used to pass a different hint message in case standard
223 message of the format "To continue: hg cmdname --continue
224 message of the format "To continue: hg cmdname --continue
224 To abort: hg cmdname --abort" is not desired.
225 To abort: hg cmdname --abort" is not desired.
225 statushint is used to pass a different status message in case standard
226 statushint is used to pass a different status message in case standard
226 message of the format ('To continue: hg cmdname --continue'
227 message of the format ('To continue: hg cmdname --continue'
227 'To abort: hg cmdname --abort') is not desired
228 'To abort: hg cmdname --abort') is not desired
228 abortfunc stores the function required to abort an unfinished state.
229 abortfunc stores the function required to abort an unfinished state.
229 continuefunc stores the function required to finish an interrupted
230 continuefunc stores the function required to finish an interrupted
230 operation.
231 operation.
231 """
232 """
232 statecheckobj = _statecheck(
233 statecheckobj = _statecheck(
233 opname,
234 opname,
234 fname,
235 fname,
235 clearable,
236 clearable,
236 allowcommit,
237 allowcommit,
237 reportonly,
238 reportonly,
238 continueflag,
239 continueflag,
239 stopflag,
240 stopflag,
240 cmdmsg,
241 cmdmsg,
241 cmdhint,
242 cmdhint,
242 statushint,
243 statushint,
243 abortfunc,
244 abortfunc,
244 continuefunc,
245 continuefunc,
245 )
246 )
246 if opname == b'merge':
247 if opname == b'merge':
247 _unfinishedstates.append(statecheckobj)
248 _unfinishedstates.append(statecheckobj)
248 else:
249 else:
249 _unfinishedstates.insert(0, statecheckobj)
250 _unfinishedstates.insert(0, statecheckobj)
250
251
251
252
252 addunfinished(
253 addunfinished(
253 b'update',
254 b'update',
254 fname=b'updatestate',
255 fname=b'updatestate',
255 clearable=True,
256 clearable=True,
256 cmdmsg=_(b'last update was interrupted'),
257 cmdmsg=_(b'last update was interrupted'),
257 cmdhint=_(b"use 'hg update' to get a consistent checkout"),
258 cmdhint=_(b"use 'hg update' to get a consistent checkout"),
258 statushint=_(b"To continue: hg update ."),
259 statushint=_(b"To continue: hg update ."),
259 )
260 )
260 addunfinished(
261 addunfinished(
261 b'bisect',
262 b'bisect',
262 fname=b'bisect.state',
263 fname=b'bisect.state',
263 allowcommit=True,
264 allowcommit=True,
264 reportonly=True,
265 reportonly=True,
265 statushint=_(
266 statushint=_(
266 b'To mark the changeset good: hg bisect --good\n'
267 b'To mark the changeset good: hg bisect --good\n'
267 b'To mark the changeset bad: hg bisect --bad\n'
268 b'To mark the changeset bad: hg bisect --bad\n'
268 b'To abort: hg bisect --reset\n'
269 b'To abort: hg bisect --reset\n'
269 ),
270 ),
270 )
271 )
271
272
272
273
273 def getrepostate(repo):
274 def getrepostate(repo):
274 # experimental config: commands.status.skipstates
275 # experimental config: commands.status.skipstates
275 skip = set(repo.ui.configlist(b'commands', b'status.skipstates'))
276 skip = set(repo.ui.configlist(b'commands', b'status.skipstates'))
276 for state in _unfinishedstates:
277 for state in _unfinishedstates:
277 if state._opname in skip:
278 if state._opname in skip:
278 continue
279 continue
279 if state.isunfinished(repo):
280 if state.isunfinished(repo):
280 return (state._opname, state.statusmsg())
281 return (state._opname, state.statusmsg())
General Comments 0
You need to be logged in to leave comments. Login now