##// END OF EJS Templates
py3: use str instead of pycompat.unicode...
Gregory Szorc -
r49789:06de08b3 default
parent child Browse files
Show More
@@ -1,597 +1,597
1 # common.py - common code for the convert extension
1 # common.py - common code for the convert extension
2 #
2 #
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import base64
8 import base64
9 import datetime
9 import datetime
10 import errno
10 import errno
11 import os
11 import os
12 import pickle
12 import pickle
13 import re
13 import re
14 import shlex
14 import shlex
15 import subprocess
15 import subprocess
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19 from mercurial import (
19 from mercurial import (
20 encoding,
20 encoding,
21 error,
21 error,
22 phases,
22 phases,
23 pycompat,
23 pycompat,
24 util,
24 util,
25 )
25 )
26 from mercurial.utils import procutil
26 from mercurial.utils import procutil
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30
30
31 def _encodeornone(d):
31 def _encodeornone(d):
32 if d is None:
32 if d is None:
33 return
33 return
34 return d.encode('latin1')
34 return d.encode('latin1')
35
35
36
36
37 class _shlexpy3proxy(object):
37 class _shlexpy3proxy(object):
38 def __init__(self, l):
38 def __init__(self, l):
39 self._l = l
39 self._l = l
40
40
41 def __iter__(self):
41 def __iter__(self):
42 return (_encodeornone(v) for v in self._l)
42 return (_encodeornone(v) for v in self._l)
43
43
44 def get_token(self):
44 def get_token(self):
45 return _encodeornone(self._l.get_token())
45 return _encodeornone(self._l.get_token())
46
46
47 @property
47 @property
48 def infile(self):
48 def infile(self):
49 return self._l.infile or b'<unknown>'
49 return self._l.infile or b'<unknown>'
50
50
51 @property
51 @property
52 def lineno(self):
52 def lineno(self):
53 return self._l.lineno
53 return self._l.lineno
54
54
55
55
56 def shlexer(data=None, filepath=None, wordchars=None, whitespace=None):
56 def shlexer(data=None, filepath=None, wordchars=None, whitespace=None):
57 if data is None:
57 if data is None:
58 if pycompat.ispy3:
58 if pycompat.ispy3:
59 data = open(filepath, b'r', encoding='latin1')
59 data = open(filepath, b'r', encoding='latin1')
60 else:
60 else:
61 data = open(filepath, b'r')
61 data = open(filepath, b'r')
62 else:
62 else:
63 if filepath is not None:
63 if filepath is not None:
64 raise error.ProgrammingError(
64 raise error.ProgrammingError(
65 b'shlexer only accepts data or filepath, not both'
65 b'shlexer only accepts data or filepath, not both'
66 )
66 )
67 if pycompat.ispy3:
67 if pycompat.ispy3:
68 data = data.decode('latin1')
68 data = data.decode('latin1')
69 l = shlex.shlex(data, infile=filepath, posix=True)
69 l = shlex.shlex(data, infile=filepath, posix=True)
70 if whitespace is not None:
70 if whitespace is not None:
71 l.whitespace_split = True
71 l.whitespace_split = True
72 if pycompat.ispy3:
72 if pycompat.ispy3:
73 l.whitespace += whitespace.decode('latin1')
73 l.whitespace += whitespace.decode('latin1')
74 else:
74 else:
75 l.whitespace += whitespace
75 l.whitespace += whitespace
76 if wordchars is not None:
76 if wordchars is not None:
77 if pycompat.ispy3:
77 if pycompat.ispy3:
78 l.wordchars += wordchars.decode('latin1')
78 l.wordchars += wordchars.decode('latin1')
79 else:
79 else:
80 l.wordchars += wordchars
80 l.wordchars += wordchars
81 if pycompat.ispy3:
81 if pycompat.ispy3:
82 return _shlexpy3proxy(l)
82 return _shlexpy3proxy(l)
83 return l
83 return l
84
84
85
85
86 if pycompat.ispy3:
86 if pycompat.ispy3:
87 base64_encodebytes = base64.encodebytes
87 base64_encodebytes = base64.encodebytes
88 base64_decodebytes = base64.decodebytes
88 base64_decodebytes = base64.decodebytes
89 else:
89 else:
90 base64_encodebytes = base64.encodestring
90 base64_encodebytes = base64.encodestring
91 base64_decodebytes = base64.decodestring
91 base64_decodebytes = base64.decodestring
92
92
93
93
94 def encodeargs(args):
94 def encodeargs(args):
95 def encodearg(s):
95 def encodearg(s):
96 lines = base64_encodebytes(s)
96 lines = base64_encodebytes(s)
97 lines = [l.splitlines()[0] for l in pycompat.iterbytestr(lines)]
97 lines = [l.splitlines()[0] for l in pycompat.iterbytestr(lines)]
98 return b''.join(lines)
98 return b''.join(lines)
99
99
100 s = pickle.dumps(args)
100 s = pickle.dumps(args)
101 return encodearg(s)
101 return encodearg(s)
102
102
103
103
104 def decodeargs(s):
104 def decodeargs(s):
105 s = base64_decodebytes(s)
105 s = base64_decodebytes(s)
106 return pickle.loads(s)
106 return pickle.loads(s)
107
107
108
108
109 class MissingTool(Exception):
109 class MissingTool(Exception):
110 pass
110 pass
111
111
112
112
113 def checktool(exe, name=None, abort=True):
113 def checktool(exe, name=None, abort=True):
114 name = name or exe
114 name = name or exe
115 if not procutil.findexe(exe):
115 if not procutil.findexe(exe):
116 if abort:
116 if abort:
117 exc = error.Abort
117 exc = error.Abort
118 else:
118 else:
119 exc = MissingTool
119 exc = MissingTool
120 raise exc(_(b'cannot find required "%s" tool') % name)
120 raise exc(_(b'cannot find required "%s" tool') % name)
121
121
122
122
123 class NoRepo(Exception):
123 class NoRepo(Exception):
124 pass
124 pass
125
125
126
126
127 SKIPREV = b'SKIP'
127 SKIPREV = b'SKIP'
128
128
129
129
130 class commit(object):
130 class commit(object):
131 def __init__(
131 def __init__(
132 self,
132 self,
133 author,
133 author,
134 date,
134 date,
135 desc,
135 desc,
136 parents,
136 parents,
137 branch=None,
137 branch=None,
138 rev=None,
138 rev=None,
139 extra=None,
139 extra=None,
140 sortkey=None,
140 sortkey=None,
141 saverev=True,
141 saverev=True,
142 phase=phases.draft,
142 phase=phases.draft,
143 optparents=None,
143 optparents=None,
144 ctx=None,
144 ctx=None,
145 ):
145 ):
146 self.author = author or b'unknown'
146 self.author = author or b'unknown'
147 self.date = date or b'0 0'
147 self.date = date or b'0 0'
148 self.desc = desc
148 self.desc = desc
149 self.parents = parents # will be converted and used as parents
149 self.parents = parents # will be converted and used as parents
150 self.optparents = optparents or [] # will be used if already converted
150 self.optparents = optparents or [] # will be used if already converted
151 self.branch = branch
151 self.branch = branch
152 self.rev = rev
152 self.rev = rev
153 self.extra = extra or {}
153 self.extra = extra or {}
154 self.sortkey = sortkey
154 self.sortkey = sortkey
155 self.saverev = saverev
155 self.saverev = saverev
156 self.phase = phase
156 self.phase = phase
157 self.ctx = ctx # for hg to hg conversions
157 self.ctx = ctx # for hg to hg conversions
158
158
159
159
160 class converter_source(object):
160 class converter_source(object):
161 """Conversion source interface"""
161 """Conversion source interface"""
162
162
163 def __init__(self, ui, repotype, path=None, revs=None):
163 def __init__(self, ui, repotype, path=None, revs=None):
164 """Initialize conversion source (or raise NoRepo("message")
164 """Initialize conversion source (or raise NoRepo("message")
165 exception if path is not a valid repository)"""
165 exception if path is not a valid repository)"""
166 self.ui = ui
166 self.ui = ui
167 self.path = path
167 self.path = path
168 self.revs = revs
168 self.revs = revs
169 self.repotype = repotype
169 self.repotype = repotype
170
170
171 self.encoding = b'utf-8'
171 self.encoding = b'utf-8'
172
172
173 def checkhexformat(self, revstr, mapname=b'splicemap'):
173 def checkhexformat(self, revstr, mapname=b'splicemap'):
174 """fails if revstr is not a 40 byte hex. mercurial and git both uses
174 """fails if revstr is not a 40 byte hex. mercurial and git both uses
175 such format for their revision numbering
175 such format for their revision numbering
176 """
176 """
177 if not re.match(br'[0-9a-fA-F]{40,40}$', revstr):
177 if not re.match(br'[0-9a-fA-F]{40,40}$', revstr):
178 raise error.Abort(
178 raise error.Abort(
179 _(b'%s entry %s is not a valid revision identifier')
179 _(b'%s entry %s is not a valid revision identifier')
180 % (mapname, revstr)
180 % (mapname, revstr)
181 )
181 )
182
182
183 def before(self):
183 def before(self):
184 pass
184 pass
185
185
186 def after(self):
186 def after(self):
187 pass
187 pass
188
188
189 def targetfilebelongstosource(self, targetfilename):
189 def targetfilebelongstosource(self, targetfilename):
190 """Returns true if the given targetfile belongs to the source repo. This
190 """Returns true if the given targetfile belongs to the source repo. This
191 is useful when only a subdirectory of the target belongs to the source
191 is useful when only a subdirectory of the target belongs to the source
192 repo."""
192 repo."""
193 # For normal full repo converts, this is always True.
193 # For normal full repo converts, this is always True.
194 return True
194 return True
195
195
196 def setrevmap(self, revmap):
196 def setrevmap(self, revmap):
197 """set the map of already-converted revisions"""
197 """set the map of already-converted revisions"""
198
198
199 def getheads(self):
199 def getheads(self):
200 """Return a list of this repository's heads"""
200 """Return a list of this repository's heads"""
201 raise NotImplementedError
201 raise NotImplementedError
202
202
203 def getfile(self, name, rev):
203 def getfile(self, name, rev):
204 """Return a pair (data, mode) where data is the file content
204 """Return a pair (data, mode) where data is the file content
205 as a string and mode one of '', 'x' or 'l'. rev is the
205 as a string and mode one of '', 'x' or 'l'. rev is the
206 identifier returned by a previous call to getchanges().
206 identifier returned by a previous call to getchanges().
207 Data is None if file is missing/deleted in rev.
207 Data is None if file is missing/deleted in rev.
208 """
208 """
209 raise NotImplementedError
209 raise NotImplementedError
210
210
211 def getchanges(self, version, full):
211 def getchanges(self, version, full):
212 """Returns a tuple of (files, copies, cleanp2).
212 """Returns a tuple of (files, copies, cleanp2).
213
213
214 files is a sorted list of (filename, id) tuples for all files
214 files is a sorted list of (filename, id) tuples for all files
215 changed between version and its first parent returned by
215 changed between version and its first parent returned by
216 getcommit(). If full, all files in that revision is returned.
216 getcommit(). If full, all files in that revision is returned.
217 id is the source revision id of the file.
217 id is the source revision id of the file.
218
218
219 copies is a dictionary of dest: source
219 copies is a dictionary of dest: source
220
220
221 cleanp2 is the set of files filenames that are clean against p2.
221 cleanp2 is the set of files filenames that are clean against p2.
222 (Files that are clean against p1 are already not in files (unless
222 (Files that are clean against p1 are already not in files (unless
223 full). This makes it possible to handle p2 clean files similarly.)
223 full). This makes it possible to handle p2 clean files similarly.)
224 """
224 """
225 raise NotImplementedError
225 raise NotImplementedError
226
226
227 def getcommit(self, version):
227 def getcommit(self, version):
228 """Return the commit object for version"""
228 """Return the commit object for version"""
229 raise NotImplementedError
229 raise NotImplementedError
230
230
231 def numcommits(self):
231 def numcommits(self):
232 """Return the number of commits in this source.
232 """Return the number of commits in this source.
233
233
234 If unknown, return None.
234 If unknown, return None.
235 """
235 """
236 return None
236 return None
237
237
238 def gettags(self):
238 def gettags(self):
239 """Return the tags as a dictionary of name: revision
239 """Return the tags as a dictionary of name: revision
240
240
241 Tag names must be UTF-8 strings.
241 Tag names must be UTF-8 strings.
242 """
242 """
243 raise NotImplementedError
243 raise NotImplementedError
244
244
245 def recode(self, s, encoding=None):
245 def recode(self, s, encoding=None):
246 if not encoding:
246 if not encoding:
247 encoding = self.encoding or b'utf-8'
247 encoding = self.encoding or b'utf-8'
248
248
249 if isinstance(s, pycompat.unicode):
249 if isinstance(s, str):
250 return s.encode("utf-8")
250 return s.encode("utf-8")
251 try:
251 try:
252 return s.decode(pycompat.sysstr(encoding)).encode("utf-8")
252 return s.decode(pycompat.sysstr(encoding)).encode("utf-8")
253 except UnicodeError:
253 except UnicodeError:
254 try:
254 try:
255 return s.decode("latin-1").encode("utf-8")
255 return s.decode("latin-1").encode("utf-8")
256 except UnicodeError:
256 except UnicodeError:
257 return s.decode(pycompat.sysstr(encoding), "replace").encode(
257 return s.decode(pycompat.sysstr(encoding), "replace").encode(
258 "utf-8"
258 "utf-8"
259 )
259 )
260
260
261 def getchangedfiles(self, rev, i):
261 def getchangedfiles(self, rev, i):
262 """Return the files changed by rev compared to parent[i].
262 """Return the files changed by rev compared to parent[i].
263
263
264 i is an index selecting one of the parents of rev. The return
264 i is an index selecting one of the parents of rev. The return
265 value should be the list of files that are different in rev and
265 value should be the list of files that are different in rev and
266 this parent.
266 this parent.
267
267
268 If rev has no parents, i is None.
268 If rev has no parents, i is None.
269
269
270 This function is only needed to support --filemap
270 This function is only needed to support --filemap
271 """
271 """
272 raise NotImplementedError
272 raise NotImplementedError
273
273
274 def converted(self, rev, sinkrev):
274 def converted(self, rev, sinkrev):
275 '''Notify the source that a revision has been converted.'''
275 '''Notify the source that a revision has been converted.'''
276
276
277 def hasnativeorder(self):
277 def hasnativeorder(self):
278 """Return true if this source has a meaningful, native revision
278 """Return true if this source has a meaningful, native revision
279 order. For instance, Mercurial revisions are store sequentially
279 order. For instance, Mercurial revisions are store sequentially
280 while there is no such global ordering with Darcs.
280 while there is no such global ordering with Darcs.
281 """
281 """
282 return False
282 return False
283
283
284 def hasnativeclose(self):
284 def hasnativeclose(self):
285 """Return true if this source has ability to close branch."""
285 """Return true if this source has ability to close branch."""
286 return False
286 return False
287
287
288 def lookuprev(self, rev):
288 def lookuprev(self, rev):
289 """If rev is a meaningful revision reference in source, return
289 """If rev is a meaningful revision reference in source, return
290 the referenced identifier in the same format used by getcommit().
290 the referenced identifier in the same format used by getcommit().
291 return None otherwise.
291 return None otherwise.
292 """
292 """
293 return None
293 return None
294
294
295 def getbookmarks(self):
295 def getbookmarks(self):
296 """Return the bookmarks as a dictionary of name: revision
296 """Return the bookmarks as a dictionary of name: revision
297
297
298 Bookmark names are to be UTF-8 strings.
298 Bookmark names are to be UTF-8 strings.
299 """
299 """
300 return {}
300 return {}
301
301
302 def checkrevformat(self, revstr, mapname=b'splicemap'):
302 def checkrevformat(self, revstr, mapname=b'splicemap'):
303 """revstr is a string that describes a revision in the given
303 """revstr is a string that describes a revision in the given
304 source control system. Return true if revstr has correct
304 source control system. Return true if revstr has correct
305 format.
305 format.
306 """
306 """
307 return True
307 return True
308
308
309
309
310 class converter_sink(object):
310 class converter_sink(object):
311 """Conversion sink (target) interface"""
311 """Conversion sink (target) interface"""
312
312
313 def __init__(self, ui, repotype, path):
313 def __init__(self, ui, repotype, path):
314 """Initialize conversion sink (or raise NoRepo("message")
314 """Initialize conversion sink (or raise NoRepo("message")
315 exception if path is not a valid repository)
315 exception if path is not a valid repository)
316
316
317 created is a list of paths to remove if a fatal error occurs
317 created is a list of paths to remove if a fatal error occurs
318 later"""
318 later"""
319 self.ui = ui
319 self.ui = ui
320 self.path = path
320 self.path = path
321 self.created = []
321 self.created = []
322 self.repotype = repotype
322 self.repotype = repotype
323
323
324 def revmapfile(self):
324 def revmapfile(self):
325 """Path to a file that will contain lines
325 """Path to a file that will contain lines
326 source_rev_id sink_rev_id
326 source_rev_id sink_rev_id
327 mapping equivalent revision identifiers for each system."""
327 mapping equivalent revision identifiers for each system."""
328 raise NotImplementedError
328 raise NotImplementedError
329
329
330 def authorfile(self):
330 def authorfile(self):
331 """Path to a file that will contain lines
331 """Path to a file that will contain lines
332 srcauthor=dstauthor
332 srcauthor=dstauthor
333 mapping equivalent authors identifiers for each system."""
333 mapping equivalent authors identifiers for each system."""
334 return None
334 return None
335
335
336 def putcommit(
336 def putcommit(
337 self, files, copies, parents, commit, source, revmap, full, cleanp2
337 self, files, copies, parents, commit, source, revmap, full, cleanp2
338 ):
338 ):
339 """Create a revision with all changed files listed in 'files'
339 """Create a revision with all changed files listed in 'files'
340 and having listed parents. 'commit' is a commit object
340 and having listed parents. 'commit' is a commit object
341 containing at a minimum the author, date, and message for this
341 containing at a minimum the author, date, and message for this
342 changeset. 'files' is a list of (path, version) tuples,
342 changeset. 'files' is a list of (path, version) tuples,
343 'copies' is a dictionary mapping destinations to sources,
343 'copies' is a dictionary mapping destinations to sources,
344 'source' is the source repository, and 'revmap' is a mapfile
344 'source' is the source repository, and 'revmap' is a mapfile
345 of source revisions to converted revisions. Only getfile() and
345 of source revisions to converted revisions. Only getfile() and
346 lookuprev() should be called on 'source'. 'full' means that 'files'
346 lookuprev() should be called on 'source'. 'full' means that 'files'
347 is complete and all other files should be removed.
347 is complete and all other files should be removed.
348 'cleanp2' is a set of the filenames that are unchanged from p2
348 'cleanp2' is a set of the filenames that are unchanged from p2
349 (only in the common merge case where there two parents).
349 (only in the common merge case where there two parents).
350
350
351 Note that the sink repository is not told to update itself to
351 Note that the sink repository is not told to update itself to
352 a particular revision (or even what that revision would be)
352 a particular revision (or even what that revision would be)
353 before it receives the file data.
353 before it receives the file data.
354 """
354 """
355 raise NotImplementedError
355 raise NotImplementedError
356
356
357 def puttags(self, tags):
357 def puttags(self, tags):
358 """Put tags into sink.
358 """Put tags into sink.
359
359
360 tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
360 tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
361 Return a pair (tag_revision, tag_parent_revision), or (None, None)
361 Return a pair (tag_revision, tag_parent_revision), or (None, None)
362 if nothing was changed.
362 if nothing was changed.
363 """
363 """
364 raise NotImplementedError
364 raise NotImplementedError
365
365
366 def setbranch(self, branch, pbranches):
366 def setbranch(self, branch, pbranches):
367 """Set the current branch name. Called before the first putcommit
367 """Set the current branch name. Called before the first putcommit
368 on the branch.
368 on the branch.
369 branch: branch name for subsequent commits
369 branch: branch name for subsequent commits
370 pbranches: (converted parent revision, parent branch) tuples"""
370 pbranches: (converted parent revision, parent branch) tuples"""
371
371
372 def setfilemapmode(self, active):
372 def setfilemapmode(self, active):
373 """Tell the destination that we're using a filemap
373 """Tell the destination that we're using a filemap
374
374
375 Some converter_sources (svn in particular) can claim that a file
375 Some converter_sources (svn in particular) can claim that a file
376 was changed in a revision, even if there was no change. This method
376 was changed in a revision, even if there was no change. This method
377 tells the destination that we're using a filemap and that it should
377 tells the destination that we're using a filemap and that it should
378 filter empty revisions.
378 filter empty revisions.
379 """
379 """
380
380
381 def before(self):
381 def before(self):
382 pass
382 pass
383
383
384 def after(self):
384 def after(self):
385 pass
385 pass
386
386
387 def putbookmarks(self, bookmarks):
387 def putbookmarks(self, bookmarks):
388 """Put bookmarks into sink.
388 """Put bookmarks into sink.
389
389
390 bookmarks: {bookmarkname: sink_rev_id, ...}
390 bookmarks: {bookmarkname: sink_rev_id, ...}
391 where bookmarkname is an UTF-8 string.
391 where bookmarkname is an UTF-8 string.
392 """
392 """
393
393
394 def hascommitfrommap(self, rev):
394 def hascommitfrommap(self, rev):
395 """Return False if a rev mentioned in a filemap is known to not be
395 """Return False if a rev mentioned in a filemap is known to not be
396 present."""
396 present."""
397 raise NotImplementedError
397 raise NotImplementedError
398
398
399 def hascommitforsplicemap(self, rev):
399 def hascommitforsplicemap(self, rev):
400 """This method is for the special needs for splicemap handling and not
400 """This method is for the special needs for splicemap handling and not
401 for general use. Returns True if the sink contains rev, aborts on some
401 for general use. Returns True if the sink contains rev, aborts on some
402 special cases."""
402 special cases."""
403 raise NotImplementedError
403 raise NotImplementedError
404
404
405
405
406 class commandline(object):
406 class commandline(object):
407 def __init__(self, ui, command):
407 def __init__(self, ui, command):
408 self.ui = ui
408 self.ui = ui
409 self.command = command
409 self.command = command
410
410
411 def prerun(self):
411 def prerun(self):
412 pass
412 pass
413
413
414 def postrun(self):
414 def postrun(self):
415 pass
415 pass
416
416
417 def _cmdline(self, cmd, *args, **kwargs):
417 def _cmdline(self, cmd, *args, **kwargs):
418 kwargs = pycompat.byteskwargs(kwargs)
418 kwargs = pycompat.byteskwargs(kwargs)
419 cmdline = [self.command, cmd] + list(args)
419 cmdline = [self.command, cmd] + list(args)
420 for k, v in kwargs.items():
420 for k, v in kwargs.items():
421 if len(k) == 1:
421 if len(k) == 1:
422 cmdline.append(b'-' + k)
422 cmdline.append(b'-' + k)
423 else:
423 else:
424 cmdline.append(b'--' + k.replace(b'_', b'-'))
424 cmdline.append(b'--' + k.replace(b'_', b'-'))
425 try:
425 try:
426 if len(k) == 1:
426 if len(k) == 1:
427 cmdline.append(b'' + v)
427 cmdline.append(b'' + v)
428 else:
428 else:
429 cmdline[-1] += b'=' + v
429 cmdline[-1] += b'=' + v
430 except TypeError:
430 except TypeError:
431 pass
431 pass
432 cmdline = [procutil.shellquote(arg) for arg in cmdline]
432 cmdline = [procutil.shellquote(arg) for arg in cmdline]
433 if not self.ui.debugflag:
433 if not self.ui.debugflag:
434 cmdline += [b'2>', pycompat.bytestr(os.devnull)]
434 cmdline += [b'2>', pycompat.bytestr(os.devnull)]
435 cmdline = b' '.join(cmdline)
435 cmdline = b' '.join(cmdline)
436 return cmdline
436 return cmdline
437
437
438 def _run(self, cmd, *args, **kwargs):
438 def _run(self, cmd, *args, **kwargs):
439 def popen(cmdline):
439 def popen(cmdline):
440 p = subprocess.Popen(
440 p = subprocess.Popen(
441 procutil.tonativestr(cmdline),
441 procutil.tonativestr(cmdline),
442 shell=True,
442 shell=True,
443 bufsize=-1,
443 bufsize=-1,
444 close_fds=procutil.closefds,
444 close_fds=procutil.closefds,
445 stdout=subprocess.PIPE,
445 stdout=subprocess.PIPE,
446 )
446 )
447 return p
447 return p
448
448
449 return self._dorun(popen, cmd, *args, **kwargs)
449 return self._dorun(popen, cmd, *args, **kwargs)
450
450
451 def _run2(self, cmd, *args, **kwargs):
451 def _run2(self, cmd, *args, **kwargs):
452 return self._dorun(procutil.popen2, cmd, *args, **kwargs)
452 return self._dorun(procutil.popen2, cmd, *args, **kwargs)
453
453
454 def _run3(self, cmd, *args, **kwargs):
454 def _run3(self, cmd, *args, **kwargs):
455 return self._dorun(procutil.popen3, cmd, *args, **kwargs)
455 return self._dorun(procutil.popen3, cmd, *args, **kwargs)
456
456
457 def _dorun(self, openfunc, cmd, *args, **kwargs):
457 def _dorun(self, openfunc, cmd, *args, **kwargs):
458 cmdline = self._cmdline(cmd, *args, **kwargs)
458 cmdline = self._cmdline(cmd, *args, **kwargs)
459 self.ui.debug(b'running: %s\n' % (cmdline,))
459 self.ui.debug(b'running: %s\n' % (cmdline,))
460 self.prerun()
460 self.prerun()
461 try:
461 try:
462 return openfunc(cmdline)
462 return openfunc(cmdline)
463 finally:
463 finally:
464 self.postrun()
464 self.postrun()
465
465
466 def run(self, cmd, *args, **kwargs):
466 def run(self, cmd, *args, **kwargs):
467 p = self._run(cmd, *args, **kwargs)
467 p = self._run(cmd, *args, **kwargs)
468 output = p.communicate()[0]
468 output = p.communicate()[0]
469 self.ui.debug(output)
469 self.ui.debug(output)
470 return output, p.returncode
470 return output, p.returncode
471
471
472 def runlines(self, cmd, *args, **kwargs):
472 def runlines(self, cmd, *args, **kwargs):
473 p = self._run(cmd, *args, **kwargs)
473 p = self._run(cmd, *args, **kwargs)
474 output = p.stdout.readlines()
474 output = p.stdout.readlines()
475 p.wait()
475 p.wait()
476 self.ui.debug(b''.join(output))
476 self.ui.debug(b''.join(output))
477 return output, p.returncode
477 return output, p.returncode
478
478
479 def checkexit(self, status, output=b''):
479 def checkexit(self, status, output=b''):
480 if status:
480 if status:
481 if output:
481 if output:
482 self.ui.warn(_(b'%s error:\n') % self.command)
482 self.ui.warn(_(b'%s error:\n') % self.command)
483 self.ui.warn(output)
483 self.ui.warn(output)
484 msg = procutil.explainexit(status)
484 msg = procutil.explainexit(status)
485 raise error.Abort(b'%s %s' % (self.command, msg))
485 raise error.Abort(b'%s %s' % (self.command, msg))
486
486
487 def run0(self, cmd, *args, **kwargs):
487 def run0(self, cmd, *args, **kwargs):
488 output, status = self.run(cmd, *args, **kwargs)
488 output, status = self.run(cmd, *args, **kwargs)
489 self.checkexit(status, output)
489 self.checkexit(status, output)
490 return output
490 return output
491
491
492 def runlines0(self, cmd, *args, **kwargs):
492 def runlines0(self, cmd, *args, **kwargs):
493 output, status = self.runlines(cmd, *args, **kwargs)
493 output, status = self.runlines(cmd, *args, **kwargs)
494 self.checkexit(status, b''.join(output))
494 self.checkexit(status, b''.join(output))
495 return output
495 return output
496
496
497 @propertycache
497 @propertycache
498 def argmax(self):
498 def argmax(self):
499 # POSIX requires at least 4096 bytes for ARG_MAX
499 # POSIX requires at least 4096 bytes for ARG_MAX
500 argmax = 4096
500 argmax = 4096
501 try:
501 try:
502 argmax = os.sysconf("SC_ARG_MAX")
502 argmax = os.sysconf("SC_ARG_MAX")
503 except (AttributeError, ValueError):
503 except (AttributeError, ValueError):
504 pass
504 pass
505
505
506 # Windows shells impose their own limits on command line length,
506 # Windows shells impose their own limits on command line length,
507 # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
507 # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
508 # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
508 # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
509 # details about cmd.exe limitations.
509 # details about cmd.exe limitations.
510
510
511 # Since ARG_MAX is for command line _and_ environment, lower our limit
511 # Since ARG_MAX is for command line _and_ environment, lower our limit
512 # (and make happy Windows shells while doing this).
512 # (and make happy Windows shells while doing this).
513 return argmax // 2 - 1
513 return argmax // 2 - 1
514
514
515 def _limit_arglist(self, arglist, cmd, *args, **kwargs):
515 def _limit_arglist(self, arglist, cmd, *args, **kwargs):
516 cmdlen = len(self._cmdline(cmd, *args, **kwargs))
516 cmdlen = len(self._cmdline(cmd, *args, **kwargs))
517 limit = self.argmax - cmdlen
517 limit = self.argmax - cmdlen
518 numbytes = 0
518 numbytes = 0
519 fl = []
519 fl = []
520 for fn in arglist:
520 for fn in arglist:
521 b = len(fn) + 3
521 b = len(fn) + 3
522 if numbytes + b < limit or len(fl) == 0:
522 if numbytes + b < limit or len(fl) == 0:
523 fl.append(fn)
523 fl.append(fn)
524 numbytes += b
524 numbytes += b
525 else:
525 else:
526 yield fl
526 yield fl
527 fl = [fn]
527 fl = [fn]
528 numbytes = b
528 numbytes = b
529 if fl:
529 if fl:
530 yield fl
530 yield fl
531
531
532 def xargs(self, arglist, cmd, *args, **kwargs):
532 def xargs(self, arglist, cmd, *args, **kwargs):
533 for l in self._limit_arglist(arglist, cmd, *args, **kwargs):
533 for l in self._limit_arglist(arglist, cmd, *args, **kwargs):
534 self.run0(cmd, *(list(args) + l), **kwargs)
534 self.run0(cmd, *(list(args) + l), **kwargs)
535
535
536
536
537 class mapfile(dict):
537 class mapfile(dict):
538 def __init__(self, ui, path):
538 def __init__(self, ui, path):
539 super(mapfile, self).__init__()
539 super(mapfile, self).__init__()
540 self.ui = ui
540 self.ui = ui
541 self.path = path
541 self.path = path
542 self.fp = None
542 self.fp = None
543 self.order = []
543 self.order = []
544 self._read()
544 self._read()
545
545
546 def _read(self):
546 def _read(self):
547 if not self.path:
547 if not self.path:
548 return
548 return
549 try:
549 try:
550 fp = open(self.path, b'rb')
550 fp = open(self.path, b'rb')
551 except IOError as err:
551 except IOError as err:
552 if err.errno != errno.ENOENT:
552 if err.errno != errno.ENOENT:
553 raise
553 raise
554 return
554 return
555 for i, line in enumerate(util.iterfile(fp)):
555 for i, line in enumerate(util.iterfile(fp)):
556 line = line.splitlines()[0].rstrip()
556 line = line.splitlines()[0].rstrip()
557 if not line:
557 if not line:
558 # Ignore blank lines
558 # Ignore blank lines
559 continue
559 continue
560 try:
560 try:
561 key, value = line.rsplit(b' ', 1)
561 key, value = line.rsplit(b' ', 1)
562 except ValueError:
562 except ValueError:
563 raise error.Abort(
563 raise error.Abort(
564 _(b'syntax error in %s(%d): key/value pair expected')
564 _(b'syntax error in %s(%d): key/value pair expected')
565 % (self.path, i + 1)
565 % (self.path, i + 1)
566 )
566 )
567 if key not in self:
567 if key not in self:
568 self.order.append(key)
568 self.order.append(key)
569 super(mapfile, self).__setitem__(key, value)
569 super(mapfile, self).__setitem__(key, value)
570 fp.close()
570 fp.close()
571
571
572 def __setitem__(self, key, value):
572 def __setitem__(self, key, value):
573 if self.fp is None:
573 if self.fp is None:
574 try:
574 try:
575 self.fp = open(self.path, b'ab')
575 self.fp = open(self.path, b'ab')
576 except IOError as err:
576 except IOError as err:
577 raise error.Abort(
577 raise error.Abort(
578 _(b'could not open map file %r: %s')
578 _(b'could not open map file %r: %s')
579 % (self.path, encoding.strtolocal(err.strerror))
579 % (self.path, encoding.strtolocal(err.strerror))
580 )
580 )
581 self.fp.write(util.tonativeeol(b'%s %s\n' % (key, value)))
581 self.fp.write(util.tonativeeol(b'%s %s\n' % (key, value)))
582 self.fp.flush()
582 self.fp.flush()
583 super(mapfile, self).__setitem__(key, value)
583 super(mapfile, self).__setitem__(key, value)
584
584
585 def close(self):
585 def close(self):
586 if self.fp:
586 if self.fp:
587 self.fp.close()
587 self.fp.close()
588 self.fp = None
588 self.fp = None
589
589
590
590
591 def makedatetimestamp(t):
591 def makedatetimestamp(t):
592 """Like dateutil.makedate() but for time t instead of current time"""
592 """Like dateutil.makedate() but for time t instead of current time"""
593 delta = datetime.datetime.utcfromtimestamp(
593 delta = datetime.datetime.utcfromtimestamp(
594 t
594 t
595 ) - datetime.datetime.fromtimestamp(t)
595 ) - datetime.datetime.fromtimestamp(t)
596 tz = delta.days * 86400 + delta.seconds
596 tz = delta.days * 86400 + delta.seconds
597 return t, tz
597 return t, tz
@@ -1,667 +1,667
1 # convcmd - convert extension commands definition
1 # convcmd - convert extension commands definition
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import collections
8 import collections
9 import os
9 import os
10 import shutil
10 import shutil
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.pycompat import open
13 from mercurial.pycompat import open
14 from mercurial import (
14 from mercurial import (
15 encoding,
15 encoding,
16 error,
16 error,
17 hg,
17 hg,
18 pycompat,
18 pycompat,
19 scmutil,
19 scmutil,
20 util,
20 util,
21 )
21 )
22 from mercurial.utils import dateutil
22 from mercurial.utils import dateutil
23
23
24 from . import (
24 from . import (
25 bzr,
25 bzr,
26 common,
26 common,
27 cvs,
27 cvs,
28 darcs,
28 darcs,
29 filemap,
29 filemap,
30 git,
30 git,
31 gnuarch,
31 gnuarch,
32 hg as hgconvert,
32 hg as hgconvert,
33 monotone,
33 monotone,
34 p4,
34 p4,
35 subversion,
35 subversion,
36 )
36 )
37
37
38 mapfile = common.mapfile
38 mapfile = common.mapfile
39 MissingTool = common.MissingTool
39 MissingTool = common.MissingTool
40 NoRepo = common.NoRepo
40 NoRepo = common.NoRepo
41 SKIPREV = common.SKIPREV
41 SKIPREV = common.SKIPREV
42
42
43 bzr_source = bzr.bzr_source
43 bzr_source = bzr.bzr_source
44 convert_cvs = cvs.convert_cvs
44 convert_cvs = cvs.convert_cvs
45 convert_git = git.convert_git
45 convert_git = git.convert_git
46 darcs_source = darcs.darcs_source
46 darcs_source = darcs.darcs_source
47 gnuarch_source = gnuarch.gnuarch_source
47 gnuarch_source = gnuarch.gnuarch_source
48 mercurial_sink = hgconvert.mercurial_sink
48 mercurial_sink = hgconvert.mercurial_sink
49 mercurial_source = hgconvert.mercurial_source
49 mercurial_source = hgconvert.mercurial_source
50 monotone_source = monotone.monotone_source
50 monotone_source = monotone.monotone_source
51 p4_source = p4.p4_source
51 p4_source = p4.p4_source
52 svn_sink = subversion.svn_sink
52 svn_sink = subversion.svn_sink
53 svn_source = subversion.svn_source
53 svn_source = subversion.svn_source
54
54
55 orig_encoding = b'ascii'
55 orig_encoding = b'ascii'
56
56
57
57
58 def readauthormap(ui, authorfile, authors=None):
58 def readauthormap(ui, authorfile, authors=None):
59 if authors is None:
59 if authors is None:
60 authors = {}
60 authors = {}
61 with open(authorfile, b'rb') as afile:
61 with open(authorfile, b'rb') as afile:
62 for line in afile:
62 for line in afile:
63
63
64 line = line.strip()
64 line = line.strip()
65 if not line or line.startswith(b'#'):
65 if not line or line.startswith(b'#'):
66 continue
66 continue
67
67
68 try:
68 try:
69 srcauthor, dstauthor = line.split(b'=', 1)
69 srcauthor, dstauthor = line.split(b'=', 1)
70 except ValueError:
70 except ValueError:
71 msg = _(b'ignoring bad line in author map file %s: %s\n')
71 msg = _(b'ignoring bad line in author map file %s: %s\n')
72 ui.warn(msg % (authorfile, line.rstrip()))
72 ui.warn(msg % (authorfile, line.rstrip()))
73 continue
73 continue
74
74
75 srcauthor = srcauthor.strip()
75 srcauthor = srcauthor.strip()
76 dstauthor = dstauthor.strip()
76 dstauthor = dstauthor.strip()
77 if authors.get(srcauthor) in (None, dstauthor):
77 if authors.get(srcauthor) in (None, dstauthor):
78 msg = _(b'mapping author %s to %s\n')
78 msg = _(b'mapping author %s to %s\n')
79 ui.debug(msg % (srcauthor, dstauthor))
79 ui.debug(msg % (srcauthor, dstauthor))
80 authors[srcauthor] = dstauthor
80 authors[srcauthor] = dstauthor
81 continue
81 continue
82
82
83 m = _(b'overriding mapping for author %s, was %s, will be %s\n')
83 m = _(b'overriding mapping for author %s, was %s, will be %s\n')
84 ui.status(m % (srcauthor, authors[srcauthor], dstauthor))
84 ui.status(m % (srcauthor, authors[srcauthor], dstauthor))
85 return authors
85 return authors
86
86
87
87
88 def recode(s):
88 def recode(s):
89 if isinstance(s, pycompat.unicode):
89 if isinstance(s, str):
90 return s.encode(pycompat.sysstr(orig_encoding), 'replace')
90 return s.encode(pycompat.sysstr(orig_encoding), 'replace')
91 else:
91 else:
92 return s.decode('utf-8').encode(
92 return s.decode('utf-8').encode(
93 pycompat.sysstr(orig_encoding), 'replace'
93 pycompat.sysstr(orig_encoding), 'replace'
94 )
94 )
95
95
96
96
97 def mapbranch(branch, branchmap):
97 def mapbranch(branch, branchmap):
98 """
98 """
99 >>> bmap = {b'default': b'branch1'}
99 >>> bmap = {b'default': b'branch1'}
100 >>> for i in [b'', None]:
100 >>> for i in [b'', None]:
101 ... mapbranch(i, bmap)
101 ... mapbranch(i, bmap)
102 'branch1'
102 'branch1'
103 'branch1'
103 'branch1'
104 >>> bmap = {b'None': b'branch2'}
104 >>> bmap = {b'None': b'branch2'}
105 >>> for i in [b'', None]:
105 >>> for i in [b'', None]:
106 ... mapbranch(i, bmap)
106 ... mapbranch(i, bmap)
107 'branch2'
107 'branch2'
108 'branch2'
108 'branch2'
109 >>> bmap = {b'None': b'branch3', b'default': b'branch4'}
109 >>> bmap = {b'None': b'branch3', b'default': b'branch4'}
110 >>> for i in [b'None', b'', None, b'default', b'branch5']:
110 >>> for i in [b'None', b'', None, b'default', b'branch5']:
111 ... mapbranch(i, bmap)
111 ... mapbranch(i, bmap)
112 'branch3'
112 'branch3'
113 'branch4'
113 'branch4'
114 'branch4'
114 'branch4'
115 'branch4'
115 'branch4'
116 'branch5'
116 'branch5'
117 """
117 """
118 # If branch is None or empty, this commit is coming from the source
118 # If branch is None or empty, this commit is coming from the source
119 # repository's default branch and destined for the default branch in the
119 # repository's default branch and destined for the default branch in the
120 # destination repository. For such commits, using a literal "default"
120 # destination repository. For such commits, using a literal "default"
121 # in branchmap below allows the user to map "default" to an alternate
121 # in branchmap below allows the user to map "default" to an alternate
122 # default branch in the destination repository.
122 # default branch in the destination repository.
123 branch = branchmap.get(branch or b'default', branch)
123 branch = branchmap.get(branch or b'default', branch)
124 # At some point we used "None" literal to denote the default branch,
124 # At some point we used "None" literal to denote the default branch,
125 # attempt to use that for backward compatibility.
125 # attempt to use that for backward compatibility.
126 if not branch:
126 if not branch:
127 branch = branchmap.get(b'None', branch)
127 branch = branchmap.get(b'None', branch)
128 return branch
128 return branch
129
129
130
130
131 source_converters = [
131 source_converters = [
132 (b'cvs', convert_cvs, b'branchsort'),
132 (b'cvs', convert_cvs, b'branchsort'),
133 (b'git', convert_git, b'branchsort'),
133 (b'git', convert_git, b'branchsort'),
134 (b'svn', svn_source, b'branchsort'),
134 (b'svn', svn_source, b'branchsort'),
135 (b'hg', mercurial_source, b'sourcesort'),
135 (b'hg', mercurial_source, b'sourcesort'),
136 (b'darcs', darcs_source, b'branchsort'),
136 (b'darcs', darcs_source, b'branchsort'),
137 (b'mtn', monotone_source, b'branchsort'),
137 (b'mtn', monotone_source, b'branchsort'),
138 (b'gnuarch', gnuarch_source, b'branchsort'),
138 (b'gnuarch', gnuarch_source, b'branchsort'),
139 (b'bzr', bzr_source, b'branchsort'),
139 (b'bzr', bzr_source, b'branchsort'),
140 (b'p4', p4_source, b'branchsort'),
140 (b'p4', p4_source, b'branchsort'),
141 ]
141 ]
142
142
143 sink_converters = [
143 sink_converters = [
144 (b'hg', mercurial_sink),
144 (b'hg', mercurial_sink),
145 (b'svn', svn_sink),
145 (b'svn', svn_sink),
146 ]
146 ]
147
147
148
148
149 def convertsource(ui, path, type, revs):
149 def convertsource(ui, path, type, revs):
150 exceptions = []
150 exceptions = []
151 if type and type not in [s[0] for s in source_converters]:
151 if type and type not in [s[0] for s in source_converters]:
152 raise error.Abort(_(b'%s: invalid source repository type') % type)
152 raise error.Abort(_(b'%s: invalid source repository type') % type)
153 for name, source, sortmode in source_converters:
153 for name, source, sortmode in source_converters:
154 try:
154 try:
155 if not type or name == type:
155 if not type or name == type:
156 return source(ui, name, path, revs), sortmode
156 return source(ui, name, path, revs), sortmode
157 except (NoRepo, MissingTool) as inst:
157 except (NoRepo, MissingTool) as inst:
158 exceptions.append(inst)
158 exceptions.append(inst)
159 if not ui.quiet:
159 if not ui.quiet:
160 for inst in exceptions:
160 for inst in exceptions:
161 ui.write(b"%s\n" % pycompat.bytestr(inst.args[0]))
161 ui.write(b"%s\n" % pycompat.bytestr(inst.args[0]))
162 raise error.Abort(_(b'%s: missing or unsupported repository') % path)
162 raise error.Abort(_(b'%s: missing or unsupported repository') % path)
163
163
164
164
165 def convertsink(ui, path, type):
165 def convertsink(ui, path, type):
166 if type and type not in [s[0] for s in sink_converters]:
166 if type and type not in [s[0] for s in sink_converters]:
167 raise error.Abort(_(b'%s: invalid destination repository type') % type)
167 raise error.Abort(_(b'%s: invalid destination repository type') % type)
168 for name, sink in sink_converters:
168 for name, sink in sink_converters:
169 try:
169 try:
170 if not type or name == type:
170 if not type or name == type:
171 return sink(ui, name, path)
171 return sink(ui, name, path)
172 except NoRepo as inst:
172 except NoRepo as inst:
173 ui.note(_(b"convert: %s\n") % inst)
173 ui.note(_(b"convert: %s\n") % inst)
174 except MissingTool as inst:
174 except MissingTool as inst:
175 raise error.Abort(b'%s\n' % inst)
175 raise error.Abort(b'%s\n' % inst)
176 raise error.Abort(_(b'%s: unknown repository type') % path)
176 raise error.Abort(_(b'%s: unknown repository type') % path)
177
177
178
178
179 class progresssource(object):
179 class progresssource(object):
180 def __init__(self, ui, source, filecount):
180 def __init__(self, ui, source, filecount):
181 self.ui = ui
181 self.ui = ui
182 self.source = source
182 self.source = source
183 self.progress = ui.makeprogress(
183 self.progress = ui.makeprogress(
184 _(b'getting files'), unit=_(b'files'), total=filecount
184 _(b'getting files'), unit=_(b'files'), total=filecount
185 )
185 )
186
186
187 def getfile(self, file, rev):
187 def getfile(self, file, rev):
188 self.progress.increment(item=file)
188 self.progress.increment(item=file)
189 return self.source.getfile(file, rev)
189 return self.source.getfile(file, rev)
190
190
191 def targetfilebelongstosource(self, targetfilename):
191 def targetfilebelongstosource(self, targetfilename):
192 return self.source.targetfilebelongstosource(targetfilename)
192 return self.source.targetfilebelongstosource(targetfilename)
193
193
194 def lookuprev(self, rev):
194 def lookuprev(self, rev):
195 return self.source.lookuprev(rev)
195 return self.source.lookuprev(rev)
196
196
197 def close(self):
197 def close(self):
198 self.progress.complete()
198 self.progress.complete()
199
199
200
200
201 class converter(object):
201 class converter(object):
202 def __init__(self, ui, source, dest, revmapfile, opts):
202 def __init__(self, ui, source, dest, revmapfile, opts):
203
203
204 self.source = source
204 self.source = source
205 self.dest = dest
205 self.dest = dest
206 self.ui = ui
206 self.ui = ui
207 self.opts = opts
207 self.opts = opts
208 self.commitcache = {}
208 self.commitcache = {}
209 self.authors = {}
209 self.authors = {}
210 self.authorfile = None
210 self.authorfile = None
211
211
212 # Record converted revisions persistently: maps source revision
212 # Record converted revisions persistently: maps source revision
213 # ID to target revision ID (both strings). (This is how
213 # ID to target revision ID (both strings). (This is how
214 # incremental conversions work.)
214 # incremental conversions work.)
215 self.map = mapfile(ui, revmapfile)
215 self.map = mapfile(ui, revmapfile)
216
216
217 # Read first the dst author map if any
217 # Read first the dst author map if any
218 authorfile = self.dest.authorfile()
218 authorfile = self.dest.authorfile()
219 if authorfile and os.path.exists(authorfile):
219 if authorfile and os.path.exists(authorfile):
220 self.readauthormap(authorfile)
220 self.readauthormap(authorfile)
221 # Extend/Override with new author map if necessary
221 # Extend/Override with new author map if necessary
222 if opts.get(b'authormap'):
222 if opts.get(b'authormap'):
223 self.readauthormap(opts.get(b'authormap'))
223 self.readauthormap(opts.get(b'authormap'))
224 self.authorfile = self.dest.authorfile()
224 self.authorfile = self.dest.authorfile()
225
225
226 self.splicemap = self.parsesplicemap(opts.get(b'splicemap'))
226 self.splicemap = self.parsesplicemap(opts.get(b'splicemap'))
227 self.branchmap = mapfile(ui, opts.get(b'branchmap'))
227 self.branchmap = mapfile(ui, opts.get(b'branchmap'))
228
228
229 def parsesplicemap(self, path):
229 def parsesplicemap(self, path):
230 """check and validate the splicemap format and
230 """check and validate the splicemap format and
231 return a child/parents dictionary.
231 return a child/parents dictionary.
232 Format checking has two parts.
232 Format checking has two parts.
233 1. generic format which is same across all source types
233 1. generic format which is same across all source types
234 2. specific format checking which may be different for
234 2. specific format checking which may be different for
235 different source type. This logic is implemented in
235 different source type. This logic is implemented in
236 checkrevformat function in source files like
236 checkrevformat function in source files like
237 hg.py, subversion.py etc.
237 hg.py, subversion.py etc.
238 """
238 """
239
239
240 if not path:
240 if not path:
241 return {}
241 return {}
242 m = {}
242 m = {}
243 try:
243 try:
244 fp = open(path, b'rb')
244 fp = open(path, b'rb')
245 for i, line in enumerate(util.iterfile(fp)):
245 for i, line in enumerate(util.iterfile(fp)):
246 line = line.splitlines()[0].rstrip()
246 line = line.splitlines()[0].rstrip()
247 if not line:
247 if not line:
248 # Ignore blank lines
248 # Ignore blank lines
249 continue
249 continue
250 # split line
250 # split line
251 lex = common.shlexer(data=line, whitespace=b',')
251 lex = common.shlexer(data=line, whitespace=b',')
252 line = list(lex)
252 line = list(lex)
253 # check number of parents
253 # check number of parents
254 if not (2 <= len(line) <= 3):
254 if not (2 <= len(line) <= 3):
255 raise error.Abort(
255 raise error.Abort(
256 _(
256 _(
257 b'syntax error in %s(%d): child parent1'
257 b'syntax error in %s(%d): child parent1'
258 b'[,parent2] expected'
258 b'[,parent2] expected'
259 )
259 )
260 % (path, i + 1)
260 % (path, i + 1)
261 )
261 )
262 for part in line:
262 for part in line:
263 self.source.checkrevformat(part)
263 self.source.checkrevformat(part)
264 child, p1, p2 = line[0], line[1:2], line[2:]
264 child, p1, p2 = line[0], line[1:2], line[2:]
265 if p1 == p2:
265 if p1 == p2:
266 m[child] = p1
266 m[child] = p1
267 else:
267 else:
268 m[child] = p1 + p2
268 m[child] = p1 + p2
269 # if file does not exist or error reading, exit
269 # if file does not exist or error reading, exit
270 except IOError:
270 except IOError:
271 raise error.Abort(
271 raise error.Abort(
272 _(b'splicemap file not found or error reading %s:') % path
272 _(b'splicemap file not found or error reading %s:') % path
273 )
273 )
274 return m
274 return m
275
275
276 def walktree(self, heads):
276 def walktree(self, heads):
277 """Return a mapping that identifies the uncommitted parents of every
277 """Return a mapping that identifies the uncommitted parents of every
278 uncommitted changeset."""
278 uncommitted changeset."""
279 visit = list(heads)
279 visit = list(heads)
280 known = set()
280 known = set()
281 parents = {}
281 parents = {}
282 numcommits = self.source.numcommits()
282 numcommits = self.source.numcommits()
283 progress = self.ui.makeprogress(
283 progress = self.ui.makeprogress(
284 _(b'scanning'), unit=_(b'revisions'), total=numcommits
284 _(b'scanning'), unit=_(b'revisions'), total=numcommits
285 )
285 )
286 while visit:
286 while visit:
287 n = visit.pop(0)
287 n = visit.pop(0)
288 if n in known:
288 if n in known:
289 continue
289 continue
290 if n in self.map:
290 if n in self.map:
291 m = self.map[n]
291 m = self.map[n]
292 if m == SKIPREV or self.dest.hascommitfrommap(m):
292 if m == SKIPREV or self.dest.hascommitfrommap(m):
293 continue
293 continue
294 known.add(n)
294 known.add(n)
295 progress.update(len(known))
295 progress.update(len(known))
296 commit = self.cachecommit(n)
296 commit = self.cachecommit(n)
297 parents[n] = []
297 parents[n] = []
298 for p in commit.parents:
298 for p in commit.parents:
299 parents[n].append(p)
299 parents[n].append(p)
300 visit.append(p)
300 visit.append(p)
301 progress.complete()
301 progress.complete()
302
302
303 return parents
303 return parents
304
304
305 def mergesplicemap(self, parents, splicemap):
305 def mergesplicemap(self, parents, splicemap):
306 """A splicemap redefines child/parent relationships. Check the
306 """A splicemap redefines child/parent relationships. Check the
307 map contains valid revision identifiers and merge the new
307 map contains valid revision identifiers and merge the new
308 links in the source graph.
308 links in the source graph.
309 """
309 """
310 for c in sorted(splicemap):
310 for c in sorted(splicemap):
311 if c not in parents:
311 if c not in parents:
312 if not self.dest.hascommitforsplicemap(self.map.get(c, c)):
312 if not self.dest.hascommitforsplicemap(self.map.get(c, c)):
313 # Could be in source but not converted during this run
313 # Could be in source but not converted during this run
314 self.ui.warn(
314 self.ui.warn(
315 _(
315 _(
316 b'splice map revision %s is not being '
316 b'splice map revision %s is not being '
317 b'converted, ignoring\n'
317 b'converted, ignoring\n'
318 )
318 )
319 % c
319 % c
320 )
320 )
321 continue
321 continue
322 pc = []
322 pc = []
323 for p in splicemap[c]:
323 for p in splicemap[c]:
324 # We do not have to wait for nodes already in dest.
324 # We do not have to wait for nodes already in dest.
325 if self.dest.hascommitforsplicemap(self.map.get(p, p)):
325 if self.dest.hascommitforsplicemap(self.map.get(p, p)):
326 continue
326 continue
327 # Parent is not in dest and not being converted, not good
327 # Parent is not in dest and not being converted, not good
328 if p not in parents:
328 if p not in parents:
329 raise error.Abort(_(b'unknown splice map parent: %s') % p)
329 raise error.Abort(_(b'unknown splice map parent: %s') % p)
330 pc.append(p)
330 pc.append(p)
331 parents[c] = pc
331 parents[c] = pc
332
332
333 def toposort(self, parents, sortmode):
333 def toposort(self, parents, sortmode):
334 """Return an ordering such that every uncommitted changeset is
334 """Return an ordering such that every uncommitted changeset is
335 preceded by all its uncommitted ancestors."""
335 preceded by all its uncommitted ancestors."""
336
336
337 def mapchildren(parents):
337 def mapchildren(parents):
338 """Return a (children, roots) tuple where 'children' maps parent
338 """Return a (children, roots) tuple where 'children' maps parent
339 revision identifiers to children ones, and 'roots' is the list of
339 revision identifiers to children ones, and 'roots' is the list of
340 revisions without parents. 'parents' must be a mapping of revision
340 revisions without parents. 'parents' must be a mapping of revision
341 identifier to its parents ones.
341 identifier to its parents ones.
342 """
342 """
343 visit = collections.deque(sorted(parents))
343 visit = collections.deque(sorted(parents))
344 seen = set()
344 seen = set()
345 children = {}
345 children = {}
346 roots = []
346 roots = []
347
347
348 while visit:
348 while visit:
349 n = visit.popleft()
349 n = visit.popleft()
350 if n in seen:
350 if n in seen:
351 continue
351 continue
352 seen.add(n)
352 seen.add(n)
353 # Ensure that nodes without parents are present in the
353 # Ensure that nodes without parents are present in the
354 # 'children' mapping.
354 # 'children' mapping.
355 children.setdefault(n, [])
355 children.setdefault(n, [])
356 hasparent = False
356 hasparent = False
357 for p in parents[n]:
357 for p in parents[n]:
358 if p not in self.map:
358 if p not in self.map:
359 visit.append(p)
359 visit.append(p)
360 hasparent = True
360 hasparent = True
361 children.setdefault(p, []).append(n)
361 children.setdefault(p, []).append(n)
362 if not hasparent:
362 if not hasparent:
363 roots.append(n)
363 roots.append(n)
364
364
365 return children, roots
365 return children, roots
366
366
367 # Sort functions are supposed to take a list of revisions which
367 # Sort functions are supposed to take a list of revisions which
368 # can be converted immediately and pick one
368 # can be converted immediately and pick one
369
369
370 def makebranchsorter():
370 def makebranchsorter():
371 """If the previously converted revision has a child in the
371 """If the previously converted revision has a child in the
372 eligible revisions list, pick it. Return the list head
372 eligible revisions list, pick it. Return the list head
373 otherwise. Branch sort attempts to minimize branch
373 otherwise. Branch sort attempts to minimize branch
374 switching, which is harmful for Mercurial backend
374 switching, which is harmful for Mercurial backend
375 compression.
375 compression.
376 """
376 """
377 prev = [None]
377 prev = [None]
378
378
379 def picknext(nodes):
379 def picknext(nodes):
380 next = nodes[0]
380 next = nodes[0]
381 for n in nodes:
381 for n in nodes:
382 if prev[0] in parents[n]:
382 if prev[0] in parents[n]:
383 next = n
383 next = n
384 break
384 break
385 prev[0] = next
385 prev[0] = next
386 return next
386 return next
387
387
388 return picknext
388 return picknext
389
389
390 def makesourcesorter():
390 def makesourcesorter():
391 """Source specific sort."""
391 """Source specific sort."""
392 keyfn = lambda n: self.commitcache[n].sortkey
392 keyfn = lambda n: self.commitcache[n].sortkey
393
393
394 def picknext(nodes):
394 def picknext(nodes):
395 return sorted(nodes, key=keyfn)[0]
395 return sorted(nodes, key=keyfn)[0]
396
396
397 return picknext
397 return picknext
398
398
399 def makeclosesorter():
399 def makeclosesorter():
400 """Close order sort."""
400 """Close order sort."""
401 keyfn = lambda n: (
401 keyfn = lambda n: (
402 b'close' not in self.commitcache[n].extra,
402 b'close' not in self.commitcache[n].extra,
403 self.commitcache[n].sortkey,
403 self.commitcache[n].sortkey,
404 )
404 )
405
405
406 def picknext(nodes):
406 def picknext(nodes):
407 return sorted(nodes, key=keyfn)[0]
407 return sorted(nodes, key=keyfn)[0]
408
408
409 return picknext
409 return picknext
410
410
411 def makedatesorter():
411 def makedatesorter():
412 """Sort revisions by date."""
412 """Sort revisions by date."""
413 dates = {}
413 dates = {}
414
414
415 def getdate(n):
415 def getdate(n):
416 if n not in dates:
416 if n not in dates:
417 dates[n] = dateutil.parsedate(self.commitcache[n].date)
417 dates[n] = dateutil.parsedate(self.commitcache[n].date)
418 return dates[n]
418 return dates[n]
419
419
420 def picknext(nodes):
420 def picknext(nodes):
421 return min([(getdate(n), n) for n in nodes])[1]
421 return min([(getdate(n), n) for n in nodes])[1]
422
422
423 return picknext
423 return picknext
424
424
425 if sortmode == b'branchsort':
425 if sortmode == b'branchsort':
426 picknext = makebranchsorter()
426 picknext = makebranchsorter()
427 elif sortmode == b'datesort':
427 elif sortmode == b'datesort':
428 picknext = makedatesorter()
428 picknext = makedatesorter()
429 elif sortmode == b'sourcesort':
429 elif sortmode == b'sourcesort':
430 picknext = makesourcesorter()
430 picknext = makesourcesorter()
431 elif sortmode == b'closesort':
431 elif sortmode == b'closesort':
432 picknext = makeclosesorter()
432 picknext = makeclosesorter()
433 else:
433 else:
434 raise error.Abort(_(b'unknown sort mode: %s') % sortmode)
434 raise error.Abort(_(b'unknown sort mode: %s') % sortmode)
435
435
436 children, actives = mapchildren(parents)
436 children, actives = mapchildren(parents)
437
437
438 s = []
438 s = []
439 pendings = {}
439 pendings = {}
440 while actives:
440 while actives:
441 n = picknext(actives)
441 n = picknext(actives)
442 actives.remove(n)
442 actives.remove(n)
443 s.append(n)
443 s.append(n)
444
444
445 # Update dependents list
445 # Update dependents list
446 for c in children.get(n, []):
446 for c in children.get(n, []):
447 if c not in pendings:
447 if c not in pendings:
448 pendings[c] = [p for p in parents[c] if p not in self.map]
448 pendings[c] = [p for p in parents[c] if p not in self.map]
449 try:
449 try:
450 pendings[c].remove(n)
450 pendings[c].remove(n)
451 except ValueError:
451 except ValueError:
452 raise error.Abort(
452 raise error.Abort(
453 _(b'cycle detected between %s and %s')
453 _(b'cycle detected between %s and %s')
454 % (recode(c), recode(n))
454 % (recode(c), recode(n))
455 )
455 )
456 if not pendings[c]:
456 if not pendings[c]:
457 # Parents are converted, node is eligible
457 # Parents are converted, node is eligible
458 actives.insert(0, c)
458 actives.insert(0, c)
459 pendings[c] = None
459 pendings[c] = None
460
460
461 if len(s) != len(parents):
461 if len(s) != len(parents):
462 raise error.Abort(_(b"not all revisions were sorted"))
462 raise error.Abort(_(b"not all revisions were sorted"))
463
463
464 return s
464 return s
465
465
466 def writeauthormap(self):
466 def writeauthormap(self):
467 authorfile = self.authorfile
467 authorfile = self.authorfile
468 if authorfile:
468 if authorfile:
469 self.ui.status(_(b'writing author map file %s\n') % authorfile)
469 self.ui.status(_(b'writing author map file %s\n') % authorfile)
470 ofile = open(authorfile, b'wb+')
470 ofile = open(authorfile, b'wb+')
471 for author in self.authors:
471 for author in self.authors:
472 ofile.write(
472 ofile.write(
473 util.tonativeeol(
473 util.tonativeeol(
474 b"%s=%s\n" % (author, self.authors[author])
474 b"%s=%s\n" % (author, self.authors[author])
475 )
475 )
476 )
476 )
477 ofile.close()
477 ofile.close()
478
478
479 def readauthormap(self, authorfile):
479 def readauthormap(self, authorfile):
480 self.authors = readauthormap(self.ui, authorfile, self.authors)
480 self.authors = readauthormap(self.ui, authorfile, self.authors)
481
481
482 def cachecommit(self, rev):
482 def cachecommit(self, rev):
483 commit = self.source.getcommit(rev)
483 commit = self.source.getcommit(rev)
484 commit.author = self.authors.get(commit.author, commit.author)
484 commit.author = self.authors.get(commit.author, commit.author)
485 commit.branch = mapbranch(commit.branch, self.branchmap)
485 commit.branch = mapbranch(commit.branch, self.branchmap)
486 self.commitcache[rev] = commit
486 self.commitcache[rev] = commit
487 return commit
487 return commit
488
488
489 def copy(self, rev):
489 def copy(self, rev):
490 commit = self.commitcache[rev]
490 commit = self.commitcache[rev]
491 full = self.opts.get(b'full')
491 full = self.opts.get(b'full')
492 changes = self.source.getchanges(rev, full)
492 changes = self.source.getchanges(rev, full)
493 if isinstance(changes, bytes):
493 if isinstance(changes, bytes):
494 if changes == SKIPREV:
494 if changes == SKIPREV:
495 dest = SKIPREV
495 dest = SKIPREV
496 else:
496 else:
497 dest = self.map[changes]
497 dest = self.map[changes]
498 self.map[rev] = dest
498 self.map[rev] = dest
499 return
499 return
500 files, copies, cleanp2 = changes
500 files, copies, cleanp2 = changes
501 pbranches = []
501 pbranches = []
502 if commit.parents:
502 if commit.parents:
503 for prev in commit.parents:
503 for prev in commit.parents:
504 if prev not in self.commitcache:
504 if prev not in self.commitcache:
505 self.cachecommit(prev)
505 self.cachecommit(prev)
506 pbranches.append(
506 pbranches.append(
507 (self.map[prev], self.commitcache[prev].branch)
507 (self.map[prev], self.commitcache[prev].branch)
508 )
508 )
509 self.dest.setbranch(commit.branch, pbranches)
509 self.dest.setbranch(commit.branch, pbranches)
510 try:
510 try:
511 parents = self.splicemap[rev]
511 parents = self.splicemap[rev]
512 self.ui.status(
512 self.ui.status(
513 _(b'spliced in %s as parents of %s\n')
513 _(b'spliced in %s as parents of %s\n')
514 % (_(b' and ').join(parents), rev)
514 % (_(b' and ').join(parents), rev)
515 )
515 )
516 parents = [self.map.get(p, p) for p in parents]
516 parents = [self.map.get(p, p) for p in parents]
517 except KeyError:
517 except KeyError:
518 parents = [b[0] for b in pbranches]
518 parents = [b[0] for b in pbranches]
519 parents.extend(
519 parents.extend(
520 self.map[x] for x in commit.optparents if x in self.map
520 self.map[x] for x in commit.optparents if x in self.map
521 )
521 )
522 if len(pbranches) != 2:
522 if len(pbranches) != 2:
523 cleanp2 = set()
523 cleanp2 = set()
524 if len(parents) < 3:
524 if len(parents) < 3:
525 source = progresssource(self.ui, self.source, len(files))
525 source = progresssource(self.ui, self.source, len(files))
526 else:
526 else:
527 # For an octopus merge, we end up traversing the list of
527 # For an octopus merge, we end up traversing the list of
528 # changed files N-1 times. This tweak to the number of
528 # changed files N-1 times. This tweak to the number of
529 # files makes it so the progress bar doesn't overflow
529 # files makes it so the progress bar doesn't overflow
530 # itself.
530 # itself.
531 source = progresssource(
531 source = progresssource(
532 self.ui, self.source, len(files) * (len(parents) - 1)
532 self.ui, self.source, len(files) * (len(parents) - 1)
533 )
533 )
534 newnode = self.dest.putcommit(
534 newnode = self.dest.putcommit(
535 files, copies, parents, commit, source, self.map, full, cleanp2
535 files, copies, parents, commit, source, self.map, full, cleanp2
536 )
536 )
537 source.close()
537 source.close()
538 self.source.converted(rev, newnode)
538 self.source.converted(rev, newnode)
539 self.map[rev] = newnode
539 self.map[rev] = newnode
540
540
541 def convert(self, sortmode):
541 def convert(self, sortmode):
542 try:
542 try:
543 self.source.before()
543 self.source.before()
544 self.dest.before()
544 self.dest.before()
545 self.source.setrevmap(self.map)
545 self.source.setrevmap(self.map)
546 self.ui.status(_(b"scanning source...\n"))
546 self.ui.status(_(b"scanning source...\n"))
547 heads = self.source.getheads()
547 heads = self.source.getheads()
548 parents = self.walktree(heads)
548 parents = self.walktree(heads)
549 self.mergesplicemap(parents, self.splicemap)
549 self.mergesplicemap(parents, self.splicemap)
550 self.ui.status(_(b"sorting...\n"))
550 self.ui.status(_(b"sorting...\n"))
551 t = self.toposort(parents, sortmode)
551 t = self.toposort(parents, sortmode)
552 num = len(t)
552 num = len(t)
553 c = None
553 c = None
554
554
555 self.ui.status(_(b"converting...\n"))
555 self.ui.status(_(b"converting...\n"))
556 progress = self.ui.makeprogress(
556 progress = self.ui.makeprogress(
557 _(b'converting'), unit=_(b'revisions'), total=len(t)
557 _(b'converting'), unit=_(b'revisions'), total=len(t)
558 )
558 )
559 for i, c in enumerate(t):
559 for i, c in enumerate(t):
560 num -= 1
560 num -= 1
561 desc = self.commitcache[c].desc
561 desc = self.commitcache[c].desc
562 if b"\n" in desc:
562 if b"\n" in desc:
563 desc = desc.splitlines()[0]
563 desc = desc.splitlines()[0]
564 # convert log message to local encoding without using
564 # convert log message to local encoding without using
565 # tolocal() because the encoding.encoding convert()
565 # tolocal() because the encoding.encoding convert()
566 # uses is 'utf-8'
566 # uses is 'utf-8'
567 self.ui.status(b"%d %s\n" % (num, recode(desc)))
567 self.ui.status(b"%d %s\n" % (num, recode(desc)))
568 self.ui.note(_(b"source: %s\n") % recode(c))
568 self.ui.note(_(b"source: %s\n") % recode(c))
569 progress.update(i)
569 progress.update(i)
570 self.copy(c)
570 self.copy(c)
571 progress.complete()
571 progress.complete()
572
572
573 if not self.ui.configbool(b'convert', b'skiptags'):
573 if not self.ui.configbool(b'convert', b'skiptags'):
574 tags = self.source.gettags()
574 tags = self.source.gettags()
575 ctags = {}
575 ctags = {}
576 for k in tags:
576 for k in tags:
577 v = tags[k]
577 v = tags[k]
578 if self.map.get(v, SKIPREV) != SKIPREV:
578 if self.map.get(v, SKIPREV) != SKIPREV:
579 ctags[k] = self.map[v]
579 ctags[k] = self.map[v]
580
580
581 if c and ctags:
581 if c and ctags:
582 nrev, tagsparent = self.dest.puttags(ctags)
582 nrev, tagsparent = self.dest.puttags(ctags)
583 if nrev and tagsparent:
583 if nrev and tagsparent:
584 # write another hash correspondence to override the
584 # write another hash correspondence to override the
585 # previous one so we don't end up with extra tag heads
585 # previous one so we don't end up with extra tag heads
586 tagsparents = [
586 tagsparents = [
587 e for e in self.map.items() if e[1] == tagsparent
587 e for e in self.map.items() if e[1] == tagsparent
588 ]
588 ]
589 if tagsparents:
589 if tagsparents:
590 self.map[tagsparents[0][0]] = nrev
590 self.map[tagsparents[0][0]] = nrev
591
591
592 bookmarks = self.source.getbookmarks()
592 bookmarks = self.source.getbookmarks()
593 cbookmarks = {}
593 cbookmarks = {}
594 for k in bookmarks:
594 for k in bookmarks:
595 v = bookmarks[k]
595 v = bookmarks[k]
596 if self.map.get(v, SKIPREV) != SKIPREV:
596 if self.map.get(v, SKIPREV) != SKIPREV:
597 cbookmarks[k] = self.map[v]
597 cbookmarks[k] = self.map[v]
598
598
599 if c and cbookmarks:
599 if c and cbookmarks:
600 self.dest.putbookmarks(cbookmarks)
600 self.dest.putbookmarks(cbookmarks)
601
601
602 self.writeauthormap()
602 self.writeauthormap()
603 finally:
603 finally:
604 self.cleanup()
604 self.cleanup()
605
605
606 def cleanup(self):
606 def cleanup(self):
607 try:
607 try:
608 self.dest.after()
608 self.dest.after()
609 finally:
609 finally:
610 self.source.after()
610 self.source.after()
611 self.map.close()
611 self.map.close()
612
612
613
613
614 def convert(ui, src, dest=None, revmapfile=None, **opts):
614 def convert(ui, src, dest=None, revmapfile=None, **opts):
615 opts = pycompat.byteskwargs(opts)
615 opts = pycompat.byteskwargs(opts)
616 global orig_encoding
616 global orig_encoding
617 orig_encoding = encoding.encoding
617 orig_encoding = encoding.encoding
618 encoding.encoding = b'UTF-8'
618 encoding.encoding = b'UTF-8'
619
619
620 # support --authors as an alias for --authormap
620 # support --authors as an alias for --authormap
621 if not opts.get(b'authormap'):
621 if not opts.get(b'authormap'):
622 opts[b'authormap'] = opts.get(b'authors')
622 opts[b'authormap'] = opts.get(b'authors')
623
623
624 if not dest:
624 if not dest:
625 dest = hg.defaultdest(src) + b"-hg"
625 dest = hg.defaultdest(src) + b"-hg"
626 ui.status(_(b"assuming destination %s\n") % dest)
626 ui.status(_(b"assuming destination %s\n") % dest)
627
627
628 destc = convertsink(ui, dest, opts.get(b'dest_type'))
628 destc = convertsink(ui, dest, opts.get(b'dest_type'))
629 destc = scmutil.wrapconvertsink(destc)
629 destc = scmutil.wrapconvertsink(destc)
630
630
631 try:
631 try:
632 srcc, defaultsort = convertsource(
632 srcc, defaultsort = convertsource(
633 ui, src, opts.get(b'source_type'), opts.get(b'rev')
633 ui, src, opts.get(b'source_type'), opts.get(b'rev')
634 )
634 )
635 except Exception:
635 except Exception:
636 for path in destc.created:
636 for path in destc.created:
637 shutil.rmtree(path, True)
637 shutil.rmtree(path, True)
638 raise
638 raise
639
639
640 sortmodes = (b'branchsort', b'datesort', b'sourcesort', b'closesort')
640 sortmodes = (b'branchsort', b'datesort', b'sourcesort', b'closesort')
641 sortmode = [m for m in sortmodes if opts.get(m)]
641 sortmode = [m for m in sortmodes if opts.get(m)]
642 if len(sortmode) > 1:
642 if len(sortmode) > 1:
643 raise error.Abort(_(b'more than one sort mode specified'))
643 raise error.Abort(_(b'more than one sort mode specified'))
644 if sortmode:
644 if sortmode:
645 sortmode = sortmode[0]
645 sortmode = sortmode[0]
646 else:
646 else:
647 sortmode = defaultsort
647 sortmode = defaultsort
648
648
649 if sortmode == b'sourcesort' and not srcc.hasnativeorder():
649 if sortmode == b'sourcesort' and not srcc.hasnativeorder():
650 raise error.Abort(
650 raise error.Abort(
651 _(b'--sourcesort is not supported by this data source')
651 _(b'--sourcesort is not supported by this data source')
652 )
652 )
653 if sortmode == b'closesort' and not srcc.hasnativeclose():
653 if sortmode == b'closesort' and not srcc.hasnativeclose():
654 raise error.Abort(
654 raise error.Abort(
655 _(b'--closesort is not supported by this data source')
655 _(b'--closesort is not supported by this data source')
656 )
656 )
657
657
658 fmap = opts.get(b'filemap')
658 fmap = opts.get(b'filemap')
659 if fmap:
659 if fmap:
660 srcc = filemap.filemap_source(ui, srcc, fmap)
660 srcc = filemap.filemap_source(ui, srcc, fmap)
661 destc.setfilemapmode(True)
661 destc.setfilemapmode(True)
662
662
663 if not revmapfile:
663 if not revmapfile:
664 revmapfile = destc.revmapfile()
664 revmapfile = destc.revmapfile()
665
665
666 c = converter(ui, srcc, destc, revmapfile, opts)
666 c = converter(ui, srcc, destc, revmapfile, opts)
667 c.convert(sortmode)
667 c.convert(sortmode)
@@ -1,241 +1,241
1 # darcs.py - darcs support for the convert extension
1 # darcs.py - darcs support for the convert extension
2 #
2 #
3 # Copyright 2007-2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2007-2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno
8 import errno
9 import os
9 import os
10 import re
10 import re
11 import shutil
11 import shutil
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial import (
14 from mercurial import (
15 error,
15 error,
16 pycompat,
16 pycompat,
17 util,
17 util,
18 )
18 )
19 from mercurial.utils import dateutil
19 from mercurial.utils import dateutil
20 from . import common
20 from . import common
21
21
22 NoRepo = common.NoRepo
22 NoRepo = common.NoRepo
23
23
24 # The naming drift of ElementTree is fun!
24 # The naming drift of ElementTree is fun!
25
25
26 try:
26 try:
27 import xml.etree.cElementTree.ElementTree as ElementTree
27 import xml.etree.cElementTree.ElementTree as ElementTree
28 import xml.etree.cElementTree.XMLParser as XMLParser
28 import xml.etree.cElementTree.XMLParser as XMLParser
29 except ImportError:
29 except ImportError:
30 try:
30 try:
31 import xml.etree.ElementTree.ElementTree as ElementTree
31 import xml.etree.ElementTree.ElementTree as ElementTree
32 import xml.etree.ElementTree.XMLParser as XMLParser
32 import xml.etree.ElementTree.XMLParser as XMLParser
33 except ImportError:
33 except ImportError:
34 try:
34 try:
35 import elementtree.cElementTree.ElementTree as ElementTree
35 import elementtree.cElementTree.ElementTree as ElementTree
36 import elementtree.cElementTree.XMLParser as XMLParser
36 import elementtree.cElementTree.XMLParser as XMLParser
37 except ImportError:
37 except ImportError:
38 try:
38 try:
39 import elementtree.ElementTree.ElementTree as ElementTree
39 import elementtree.ElementTree.ElementTree as ElementTree
40 import elementtree.ElementTree.XMLParser as XMLParser
40 import elementtree.ElementTree.XMLParser as XMLParser
41 except ImportError:
41 except ImportError:
42 pass
42 pass
43
43
44
44
45 class darcs_source(common.converter_source, common.commandline):
45 class darcs_source(common.converter_source, common.commandline):
46 def __init__(self, ui, repotype, path, revs=None):
46 def __init__(self, ui, repotype, path, revs=None):
47 common.converter_source.__init__(self, ui, repotype, path, revs=revs)
47 common.converter_source.__init__(self, ui, repotype, path, revs=revs)
48 common.commandline.__init__(self, ui, b'darcs')
48 common.commandline.__init__(self, ui, b'darcs')
49
49
50 # check for _darcs, ElementTree so that we can easily skip
50 # check for _darcs, ElementTree so that we can easily skip
51 # test-convert-darcs if ElementTree is not around
51 # test-convert-darcs if ElementTree is not around
52 if not os.path.exists(os.path.join(path, b'_darcs')):
52 if not os.path.exists(os.path.join(path, b'_darcs')):
53 raise NoRepo(_(b"%s does not look like a darcs repository") % path)
53 raise NoRepo(_(b"%s does not look like a darcs repository") % path)
54
54
55 common.checktool(b'darcs')
55 common.checktool(b'darcs')
56 version = self.run0(b'--version').splitlines()[0].strip()
56 version = self.run0(b'--version').splitlines()[0].strip()
57 if version < b'2.1':
57 if version < b'2.1':
58 raise error.Abort(
58 raise error.Abort(
59 _(b'darcs version 2.1 or newer needed (found %r)') % version
59 _(b'darcs version 2.1 or newer needed (found %r)') % version
60 )
60 )
61
61
62 if b"ElementTree" not in globals():
62 if b"ElementTree" not in globals():
63 raise error.Abort(_(b"Python ElementTree module is not available"))
63 raise error.Abort(_(b"Python ElementTree module is not available"))
64
64
65 self.path = os.path.realpath(path)
65 self.path = os.path.realpath(path)
66
66
67 self.lastrev = None
67 self.lastrev = None
68 self.changes = {}
68 self.changes = {}
69 self.parents = {}
69 self.parents = {}
70 self.tags = {}
70 self.tags = {}
71
71
72 # Check darcs repository format
72 # Check darcs repository format
73 format = self.format()
73 format = self.format()
74 if format:
74 if format:
75 if format in (b'darcs-1.0', b'hashed'):
75 if format in (b'darcs-1.0', b'hashed'):
76 raise NoRepo(
76 raise NoRepo(
77 _(
77 _(
78 b"%s repository format is unsupported, "
78 b"%s repository format is unsupported, "
79 b"please upgrade"
79 b"please upgrade"
80 )
80 )
81 % format
81 % format
82 )
82 )
83 else:
83 else:
84 self.ui.warn(_(b'failed to detect repository format!'))
84 self.ui.warn(_(b'failed to detect repository format!'))
85
85
86 def before(self):
86 def before(self):
87 self.tmppath = pycompat.mkdtemp(
87 self.tmppath = pycompat.mkdtemp(
88 prefix=b'convert-' + os.path.basename(self.path) + b'-'
88 prefix=b'convert-' + os.path.basename(self.path) + b'-'
89 )
89 )
90 output, status = self.run(b'init', repodir=self.tmppath)
90 output, status = self.run(b'init', repodir=self.tmppath)
91 self.checkexit(status)
91 self.checkexit(status)
92
92
93 tree = self.xml(
93 tree = self.xml(
94 b'changes', xml_output=True, summary=True, repodir=self.path
94 b'changes', xml_output=True, summary=True, repodir=self.path
95 )
95 )
96 tagname = None
96 tagname = None
97 child = None
97 child = None
98 for elt in tree.findall(b'patch'):
98 for elt in tree.findall(b'patch'):
99 node = elt.get(b'hash')
99 node = elt.get(b'hash')
100 name = elt.findtext(b'name', b'')
100 name = elt.findtext(b'name', b'')
101 if name.startswith(b'TAG '):
101 if name.startswith(b'TAG '):
102 tagname = name[4:].strip()
102 tagname = name[4:].strip()
103 elif tagname is not None:
103 elif tagname is not None:
104 self.tags[tagname] = node
104 self.tags[tagname] = node
105 tagname = None
105 tagname = None
106 self.changes[node] = elt
106 self.changes[node] = elt
107 self.parents[child] = [node]
107 self.parents[child] = [node]
108 child = node
108 child = node
109 self.parents[child] = []
109 self.parents[child] = []
110
110
111 def after(self):
111 def after(self):
112 self.ui.debug(b'cleaning up %s\n' % self.tmppath)
112 self.ui.debug(b'cleaning up %s\n' % self.tmppath)
113 shutil.rmtree(self.tmppath, ignore_errors=True)
113 shutil.rmtree(self.tmppath, ignore_errors=True)
114
114
115 def recode(self, s, encoding=None):
115 def recode(self, s, encoding=None):
116 if isinstance(s, pycompat.unicode):
116 if isinstance(s, str):
117 # XMLParser returns unicode objects for anything it can't
117 # XMLParser returns unicode objects for anything it can't
118 # encode into ASCII. We convert them back to str to get
118 # encode into ASCII. We convert them back to str to get
119 # recode's normal conversion behavior.
119 # recode's normal conversion behavior.
120 s = s.encode('latin-1')
120 s = s.encode('latin-1')
121 return super(darcs_source, self).recode(s, encoding)
121 return super(darcs_source, self).recode(s, encoding)
122
122
123 def xml(self, cmd, **kwargs):
123 def xml(self, cmd, **kwargs):
124 # NOTE: darcs is currently encoding agnostic and will print
124 # NOTE: darcs is currently encoding agnostic and will print
125 # patch metadata byte-for-byte, even in the XML changelog.
125 # patch metadata byte-for-byte, even in the XML changelog.
126 etree = ElementTree()
126 etree = ElementTree()
127 # While we are decoding the XML as latin-1 to be as liberal as
127 # While we are decoding the XML as latin-1 to be as liberal as
128 # possible, etree will still raise an exception if any
128 # possible, etree will still raise an exception if any
129 # non-printable characters are in the XML changelog.
129 # non-printable characters are in the XML changelog.
130 parser = XMLParser(encoding=b'latin-1')
130 parser = XMLParser(encoding=b'latin-1')
131 p = self._run(cmd, **kwargs)
131 p = self._run(cmd, **kwargs)
132 etree.parse(p.stdout, parser=parser)
132 etree.parse(p.stdout, parser=parser)
133 p.wait()
133 p.wait()
134 self.checkexit(p.returncode)
134 self.checkexit(p.returncode)
135 return etree.getroot()
135 return etree.getroot()
136
136
137 def format(self):
137 def format(self):
138 output, status = self.run(b'show', b'repo', repodir=self.path)
138 output, status = self.run(b'show', b'repo', repodir=self.path)
139 self.checkexit(status)
139 self.checkexit(status)
140 m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE)
140 m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE)
141 if not m:
141 if not m:
142 return None
142 return None
143 return b','.join(sorted(f.strip() for f in m.group(1).split(b',')))
143 return b','.join(sorted(f.strip() for f in m.group(1).split(b',')))
144
144
145 def manifest(self):
145 def manifest(self):
146 man = []
146 man = []
147 output, status = self.run(
147 output, status = self.run(
148 b'show', b'files', no_directories=True, repodir=self.tmppath
148 b'show', b'files', no_directories=True, repodir=self.tmppath
149 )
149 )
150 self.checkexit(status)
150 self.checkexit(status)
151 for line in output.split(b'\n'):
151 for line in output.split(b'\n'):
152 path = line[2:]
152 path = line[2:]
153 if path:
153 if path:
154 man.append(path)
154 man.append(path)
155 return man
155 return man
156
156
157 def getheads(self):
157 def getheads(self):
158 return self.parents[None]
158 return self.parents[None]
159
159
160 def getcommit(self, rev):
160 def getcommit(self, rev):
161 elt = self.changes[rev]
161 elt = self.changes[rev]
162 dateformat = b'%a %b %d %H:%M:%S %Z %Y'
162 dateformat = b'%a %b %d %H:%M:%S %Z %Y'
163 date = dateutil.strdate(elt.get(b'local_date'), dateformat)
163 date = dateutil.strdate(elt.get(b'local_date'), dateformat)
164 desc = elt.findtext(b'name') + b'\n' + elt.findtext(b'comment', b'')
164 desc = elt.findtext(b'name') + b'\n' + elt.findtext(b'comment', b'')
165 # etree can return unicode objects for name, comment, and author,
165 # etree can return unicode objects for name, comment, and author,
166 # so recode() is used to ensure str objects are emitted.
166 # so recode() is used to ensure str objects are emitted.
167 newdateformat = b'%Y-%m-%d %H:%M:%S %1%2'
167 newdateformat = b'%Y-%m-%d %H:%M:%S %1%2'
168 return common.commit(
168 return common.commit(
169 author=self.recode(elt.get(b'author')),
169 author=self.recode(elt.get(b'author')),
170 date=dateutil.datestr(date, newdateformat),
170 date=dateutil.datestr(date, newdateformat),
171 desc=self.recode(desc).strip(),
171 desc=self.recode(desc).strip(),
172 parents=self.parents[rev],
172 parents=self.parents[rev],
173 )
173 )
174
174
175 def pull(self, rev):
175 def pull(self, rev):
176 output, status = self.run(
176 output, status = self.run(
177 b'pull',
177 b'pull',
178 self.path,
178 self.path,
179 all=True,
179 all=True,
180 match=b'hash %s' % rev,
180 match=b'hash %s' % rev,
181 no_test=True,
181 no_test=True,
182 no_posthook=True,
182 no_posthook=True,
183 external_merge=b'/bin/false',
183 external_merge=b'/bin/false',
184 repodir=self.tmppath,
184 repodir=self.tmppath,
185 )
185 )
186 if status:
186 if status:
187 if output.find(b'We have conflicts in') == -1:
187 if output.find(b'We have conflicts in') == -1:
188 self.checkexit(status, output)
188 self.checkexit(status, output)
189 output, status = self.run(b'revert', all=True, repodir=self.tmppath)
189 output, status = self.run(b'revert', all=True, repodir=self.tmppath)
190 self.checkexit(status, output)
190 self.checkexit(status, output)
191
191
192 def getchanges(self, rev, full):
192 def getchanges(self, rev, full):
193 if full:
193 if full:
194 raise error.Abort(_(b"convert from darcs does not support --full"))
194 raise error.Abort(_(b"convert from darcs does not support --full"))
195 copies = {}
195 copies = {}
196 changes = []
196 changes = []
197 man = None
197 man = None
198 for elt in self.changes[rev].find(b'summary').getchildren():
198 for elt in self.changes[rev].find(b'summary').getchildren():
199 if elt.tag in (b'add_directory', b'remove_directory'):
199 if elt.tag in (b'add_directory', b'remove_directory'):
200 continue
200 continue
201 if elt.tag == b'move':
201 if elt.tag == b'move':
202 if man is None:
202 if man is None:
203 man = self.manifest()
203 man = self.manifest()
204 source, dest = elt.get(b'from'), elt.get(b'to')
204 source, dest = elt.get(b'from'), elt.get(b'to')
205 if source in man:
205 if source in man:
206 # File move
206 # File move
207 changes.append((source, rev))
207 changes.append((source, rev))
208 changes.append((dest, rev))
208 changes.append((dest, rev))
209 copies[dest] = source
209 copies[dest] = source
210 else:
210 else:
211 # Directory move, deduce file moves from manifest
211 # Directory move, deduce file moves from manifest
212 source = source + b'/'
212 source = source + b'/'
213 for f in man:
213 for f in man:
214 if not f.startswith(source):
214 if not f.startswith(source):
215 continue
215 continue
216 fdest = dest + b'/' + f[len(source) :]
216 fdest = dest + b'/' + f[len(source) :]
217 changes.append((f, rev))
217 changes.append((f, rev))
218 changes.append((fdest, rev))
218 changes.append((fdest, rev))
219 copies[fdest] = f
219 copies[fdest] = f
220 else:
220 else:
221 changes.append((elt.text.strip(), rev))
221 changes.append((elt.text.strip(), rev))
222 self.pull(rev)
222 self.pull(rev)
223 self.lastrev = rev
223 self.lastrev = rev
224 return sorted(changes), copies, set()
224 return sorted(changes), copies, set()
225
225
226 def getfile(self, name, rev):
226 def getfile(self, name, rev):
227 if rev != self.lastrev:
227 if rev != self.lastrev:
228 raise error.Abort(_(b'internal calling inconsistency'))
228 raise error.Abort(_(b'internal calling inconsistency'))
229 path = os.path.join(self.tmppath, name)
229 path = os.path.join(self.tmppath, name)
230 try:
230 try:
231 data = util.readfile(path)
231 data = util.readfile(path)
232 mode = os.lstat(path).st_mode
232 mode = os.lstat(path).st_mode
233 except IOError as inst:
233 except IOError as inst:
234 if inst.errno == errno.ENOENT:
234 if inst.errno == errno.ENOENT:
235 return None, None
235 return None, None
236 raise
236 raise
237 mode = (mode & 0o111) and b'x' or b''
237 mode = (mode & 0o111) and b'x' or b''
238 return data, mode
238 return data, mode
239
239
240 def gettags(self):
240 def gettags(self):
241 return self.tags
241 return self.tags
@@ -1,769 +1,769
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import contextlib
9 import contextlib
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import json
12 import json
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.pycompat import getattr
18 from mercurial.pycompat import getattr
19 from mercurial.node import hex
19 from mercurial.node import hex
20
20
21 from mercurial import (
21 from mercurial import (
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection as httpconnectionmod,
24 httpconnection as httpconnectionmod,
25 pathutil,
25 pathutil,
26 pycompat,
26 pycompat,
27 url as urlmod,
27 url as urlmod,
28 util,
28 util,
29 vfs as vfsmod,
29 vfs as vfsmod,
30 worker,
30 worker,
31 )
31 )
32
32
33 from mercurial.utils import (
33 from mercurial.utils import (
34 stringutil,
34 stringutil,
35 urlutil,
35 urlutil,
36 )
36 )
37
37
38 from ..largefiles import lfutil
38 from ..largefiles import lfutil
39
39
40 # 64 bytes for SHA256
40 # 64 bytes for SHA256
41 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
41 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
42
42
43
43
44 class lfsvfs(vfsmod.vfs):
44 class lfsvfs(vfsmod.vfs):
45 def join(self, path):
45 def join(self, path):
46 """split the path at first two characters, like: XX/XXXXX..."""
46 """split the path at first two characters, like: XX/XXXXX..."""
47 if not _lfsre.match(path):
47 if not _lfsre.match(path):
48 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
48 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
49 return super(lfsvfs, self).join(path[0:2], path[2:])
49 return super(lfsvfs, self).join(path[0:2], path[2:])
50
50
51 def walk(self, path=None, onerror=None):
51 def walk(self, path=None, onerror=None):
52 """Yield (dirpath, [], oids) tuple for blobs under path
52 """Yield (dirpath, [], oids) tuple for blobs under path
53
53
54 Oids only exist in the root of this vfs, so dirpath is always ''.
54 Oids only exist in the root of this vfs, so dirpath is always ''.
55 """
55 """
56 root = os.path.normpath(self.base)
56 root = os.path.normpath(self.base)
57 # when dirpath == root, dirpath[prefixlen:] becomes empty
57 # when dirpath == root, dirpath[prefixlen:] becomes empty
58 # because len(dirpath) < prefixlen.
58 # because len(dirpath) < prefixlen.
59 prefixlen = len(pathutil.normasprefix(root))
59 prefixlen = len(pathutil.normasprefix(root))
60 oids = []
60 oids = []
61
61
62 for dirpath, dirs, files in os.walk(
62 for dirpath, dirs, files in os.walk(
63 self.reljoin(self.base, path or b''), onerror=onerror
63 self.reljoin(self.base, path or b''), onerror=onerror
64 ):
64 ):
65 dirpath = dirpath[prefixlen:]
65 dirpath = dirpath[prefixlen:]
66
66
67 # Silently skip unexpected files and directories
67 # Silently skip unexpected files and directories
68 if len(dirpath) == 2:
68 if len(dirpath) == 2:
69 oids.extend(
69 oids.extend(
70 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
70 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
71 )
71 )
72
72
73 yield (b'', [], oids)
73 yield (b'', [], oids)
74
74
75
75
76 class nullvfs(lfsvfs):
76 class nullvfs(lfsvfs):
77 def __init__(self):
77 def __init__(self):
78 pass
78 pass
79
79
80 def exists(self, oid):
80 def exists(self, oid):
81 return False
81 return False
82
82
83 def read(self, oid):
83 def read(self, oid):
84 # store.read() calls into here if the blob doesn't exist in its
84 # store.read() calls into here if the blob doesn't exist in its
85 # self.vfs. Raise the same error as a normal vfs when asked to read a
85 # self.vfs. Raise the same error as a normal vfs when asked to read a
86 # file that doesn't exist. The only difference is the full file path
86 # file that doesn't exist. The only difference is the full file path
87 # isn't available in the error.
87 # isn't available in the error.
88 raise IOError(
88 raise IOError(
89 errno.ENOENT,
89 errno.ENOENT,
90 pycompat.sysstr(b'%s: No such file or directory' % oid),
90 pycompat.sysstr(b'%s: No such file or directory' % oid),
91 )
91 )
92
92
93 def walk(self, path=None, onerror=None):
93 def walk(self, path=None, onerror=None):
94 return (b'', [], [])
94 return (b'', [], [])
95
95
96 def write(self, oid, data):
96 def write(self, oid, data):
97 pass
97 pass
98
98
99
99
100 class lfsuploadfile(httpconnectionmod.httpsendfile):
100 class lfsuploadfile(httpconnectionmod.httpsendfile):
101 """a file-like object that supports keepalive."""
101 """a file-like object that supports keepalive."""
102
102
103 def __init__(self, ui, filename):
103 def __init__(self, ui, filename):
104 super(lfsuploadfile, self).__init__(ui, filename, b'rb')
104 super(lfsuploadfile, self).__init__(ui, filename, b'rb')
105 self.read = self._data.read
105 self.read = self._data.read
106
106
107 def _makeprogress(self):
107 def _makeprogress(self):
108 return None # progress is handled by the worker client
108 return None # progress is handled by the worker client
109
109
110
110
111 class local(object):
111 class local(object):
112 """Local blobstore for large file contents.
112 """Local blobstore for large file contents.
113
113
114 This blobstore is used both as a cache and as a staging area for large blobs
114 This blobstore is used both as a cache and as a staging area for large blobs
115 to be uploaded to the remote blobstore.
115 to be uploaded to the remote blobstore.
116 """
116 """
117
117
118 def __init__(self, repo):
118 def __init__(self, repo):
119 fullpath = repo.svfs.join(b'lfs/objects')
119 fullpath = repo.svfs.join(b'lfs/objects')
120 self.vfs = lfsvfs(fullpath)
120 self.vfs = lfsvfs(fullpath)
121
121
122 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
122 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
123 self.cachevfs = nullvfs()
123 self.cachevfs = nullvfs()
124 else:
124 else:
125 usercache = lfutil._usercachedir(repo.ui, b'lfs')
125 usercache = lfutil._usercachedir(repo.ui, b'lfs')
126 self.cachevfs = lfsvfs(usercache)
126 self.cachevfs = lfsvfs(usercache)
127 self.ui = repo.ui
127 self.ui = repo.ui
128
128
129 def open(self, oid):
129 def open(self, oid):
130 """Open a read-only file descriptor to the named blob, in either the
130 """Open a read-only file descriptor to the named blob, in either the
131 usercache or the local store."""
131 usercache or the local store."""
132 return open(self.path(oid), 'rb')
132 return open(self.path(oid), 'rb')
133
133
134 def path(self, oid):
134 def path(self, oid):
135 """Build the path for the given blob ``oid``.
135 """Build the path for the given blob ``oid``.
136
136
137 If the blob exists locally, the path may point to either the usercache
137 If the blob exists locally, the path may point to either the usercache
138 or the local store. If it doesn't, it will point to the local store.
138 or the local store. If it doesn't, it will point to the local store.
139 This is meant for situations where existing code that isn't LFS aware
139 This is meant for situations where existing code that isn't LFS aware
140 needs to open a blob. Generally, prefer the ``open`` method on this
140 needs to open a blob. Generally, prefer the ``open`` method on this
141 class.
141 class.
142 """
142 """
143 # The usercache is the most likely place to hold the file. Commit will
143 # The usercache is the most likely place to hold the file. Commit will
144 # write to both it and the local store, as will anything that downloads
144 # write to both it and the local store, as will anything that downloads
145 # the blobs. However, things like clone without an update won't
145 # the blobs. However, things like clone without an update won't
146 # populate the local store. For an init + push of a local clone,
146 # populate the local store. For an init + push of a local clone,
147 # the usercache is the only place it _could_ be. If not present, the
147 # the usercache is the only place it _could_ be. If not present, the
148 # missing file msg here will indicate the local repo, not the usercache.
148 # missing file msg here will indicate the local repo, not the usercache.
149 if self.cachevfs.exists(oid):
149 if self.cachevfs.exists(oid):
150 return self.cachevfs.join(oid)
150 return self.cachevfs.join(oid)
151
151
152 return self.vfs.join(oid)
152 return self.vfs.join(oid)
153
153
154 def download(self, oid, src, content_length):
154 def download(self, oid, src, content_length):
155 """Read the blob from the remote source in chunks, verify the content,
155 """Read the blob from the remote source in chunks, verify the content,
156 and write to this local blobstore."""
156 and write to this local blobstore."""
157 sha256 = hashlib.sha256()
157 sha256 = hashlib.sha256()
158 size = 0
158 size = 0
159
159
160 with self.vfs(oid, b'wb', atomictemp=True) as fp:
160 with self.vfs(oid, b'wb', atomictemp=True) as fp:
161 for chunk in util.filechunkiter(src, size=1048576):
161 for chunk in util.filechunkiter(src, size=1048576):
162 fp.write(chunk)
162 fp.write(chunk)
163 sha256.update(chunk)
163 sha256.update(chunk)
164 size += len(chunk)
164 size += len(chunk)
165
165
166 # If the server advertised a length longer than what we actually
166 # If the server advertised a length longer than what we actually
167 # received, then we should expect that the server crashed while
167 # received, then we should expect that the server crashed while
168 # producing the response (but the server has no way of telling us
168 # producing the response (but the server has no way of telling us
169 # that), and we really don't need to try to write the response to
169 # that), and we really don't need to try to write the response to
170 # the localstore, because it's not going to match the expected.
170 # the localstore, because it's not going to match the expected.
171 if content_length is not None and int(content_length) != size:
171 if content_length is not None and int(content_length) != size:
172 msg = (
172 msg = (
173 b"Response length (%s) does not match Content-Length "
173 b"Response length (%s) does not match Content-Length "
174 b"header (%d): likely server-side crash"
174 b"header (%d): likely server-side crash"
175 )
175 )
176 raise LfsRemoteError(_(msg) % (size, int(content_length)))
176 raise LfsRemoteError(_(msg) % (size, int(content_length)))
177
177
178 realoid = hex(sha256.digest())
178 realoid = hex(sha256.digest())
179 if realoid != oid:
179 if realoid != oid:
180 raise LfsCorruptionError(
180 raise LfsCorruptionError(
181 _(b'corrupt remote lfs object: %s') % oid
181 _(b'corrupt remote lfs object: %s') % oid
182 )
182 )
183
183
184 self._linktousercache(oid)
184 self._linktousercache(oid)
185
185
186 def write(self, oid, data):
186 def write(self, oid, data):
187 """Write blob to local blobstore.
187 """Write blob to local blobstore.
188
188
189 This should only be called from the filelog during a commit or similar.
189 This should only be called from the filelog during a commit or similar.
190 As such, there is no need to verify the data. Imports from a remote
190 As such, there is no need to verify the data. Imports from a remote
191 store must use ``download()`` instead."""
191 store must use ``download()`` instead."""
192 with self.vfs(oid, b'wb', atomictemp=True) as fp:
192 with self.vfs(oid, b'wb', atomictemp=True) as fp:
193 fp.write(data)
193 fp.write(data)
194
194
195 self._linktousercache(oid)
195 self._linktousercache(oid)
196
196
197 def linkfromusercache(self, oid):
197 def linkfromusercache(self, oid):
198 """Link blobs found in the user cache into this store.
198 """Link blobs found in the user cache into this store.
199
199
200 The server module needs to do this when it lets the client know not to
200 The server module needs to do this when it lets the client know not to
201 upload the blob, to ensure it is always available in this store.
201 upload the blob, to ensure it is always available in this store.
202 Normally this is done implicitly when the client reads or writes the
202 Normally this is done implicitly when the client reads or writes the
203 blob, but that doesn't happen when the server tells the client that it
203 blob, but that doesn't happen when the server tells the client that it
204 already has the blob.
204 already has the blob.
205 """
205 """
206 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
206 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
207 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
207 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
208 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
208 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
209
209
210 def _linktousercache(self, oid):
210 def _linktousercache(self, oid):
211 # XXX: should we verify the content of the cache, and hardlink back to
211 # XXX: should we verify the content of the cache, and hardlink back to
212 # the local store on success, but truncate, write and link on failure?
212 # the local store on success, but truncate, write and link on failure?
213 if not self.cachevfs.exists(oid) and not isinstance(
213 if not self.cachevfs.exists(oid) and not isinstance(
214 self.cachevfs, nullvfs
214 self.cachevfs, nullvfs
215 ):
215 ):
216 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
216 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
217 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
217 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
218
218
219 def read(self, oid, verify=True):
219 def read(self, oid, verify=True):
220 """Read blob from local blobstore."""
220 """Read blob from local blobstore."""
221 if not self.vfs.exists(oid):
221 if not self.vfs.exists(oid):
222 blob = self._read(self.cachevfs, oid, verify)
222 blob = self._read(self.cachevfs, oid, verify)
223
223
224 # Even if revlog will verify the content, it needs to be verified
224 # Even if revlog will verify the content, it needs to be verified
225 # now before making the hardlink to avoid propagating corrupt blobs.
225 # now before making the hardlink to avoid propagating corrupt blobs.
226 # Don't abort if corruption is detected, because `hg verify` will
226 # Don't abort if corruption is detected, because `hg verify` will
227 # give more useful info about the corruption- simply don't add the
227 # give more useful info about the corruption- simply don't add the
228 # hardlink.
228 # hardlink.
229 if verify or hex(hashlib.sha256(blob).digest()) == oid:
229 if verify or hex(hashlib.sha256(blob).digest()) == oid:
230 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
230 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
231 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
231 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
232 else:
232 else:
233 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
233 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
234 blob = self._read(self.vfs, oid, verify)
234 blob = self._read(self.vfs, oid, verify)
235 return blob
235 return blob
236
236
237 def _read(self, vfs, oid, verify):
237 def _read(self, vfs, oid, verify):
238 """Read blob (after verifying) from the given store"""
238 """Read blob (after verifying) from the given store"""
239 blob = vfs.read(oid)
239 blob = vfs.read(oid)
240 if verify:
240 if verify:
241 _verify(oid, blob)
241 _verify(oid, blob)
242 return blob
242 return blob
243
243
244 def verify(self, oid):
244 def verify(self, oid):
245 """Indicate whether or not the hash of the underlying file matches its
245 """Indicate whether or not the hash of the underlying file matches its
246 name."""
246 name."""
247 sha256 = hashlib.sha256()
247 sha256 = hashlib.sha256()
248
248
249 with self.open(oid) as fp:
249 with self.open(oid) as fp:
250 for chunk in util.filechunkiter(fp, size=1048576):
250 for chunk in util.filechunkiter(fp, size=1048576):
251 sha256.update(chunk)
251 sha256.update(chunk)
252
252
253 return oid == hex(sha256.digest())
253 return oid == hex(sha256.digest())
254
254
255 def has(self, oid):
255 def has(self, oid):
256 """Returns True if the local blobstore contains the requested blob,
256 """Returns True if the local blobstore contains the requested blob,
257 False otherwise."""
257 False otherwise."""
258 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
258 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
259
259
260
260
261 def _urlerrorreason(urlerror):
261 def _urlerrorreason(urlerror):
262 """Create a friendly message for the given URLError to be used in an
262 """Create a friendly message for the given URLError to be used in an
263 LfsRemoteError message.
263 LfsRemoteError message.
264 """
264 """
265 inst = urlerror
265 inst = urlerror
266
266
267 if isinstance(urlerror.reason, Exception):
267 if isinstance(urlerror.reason, Exception):
268 inst = urlerror.reason
268 inst = urlerror.reason
269
269
270 if util.safehasattr(inst, b'reason'):
270 if util.safehasattr(inst, b'reason'):
271 try: # usually it is in the form (errno, strerror)
271 try: # usually it is in the form (errno, strerror)
272 reason = inst.reason.args[1]
272 reason = inst.reason.args[1]
273 except (AttributeError, IndexError):
273 except (AttributeError, IndexError):
274 # it might be anything, for example a string
274 # it might be anything, for example a string
275 reason = inst.reason
275 reason = inst.reason
276 if isinstance(reason, pycompat.unicode):
276 if isinstance(reason, str):
277 # SSLError of Python 2.7.9 contains a unicode
277 # SSLError of Python 2.7.9 contains a unicode
278 reason = encoding.unitolocal(reason)
278 reason = encoding.unitolocal(reason)
279 return reason
279 return reason
280 elif getattr(inst, "strerror", None):
280 elif getattr(inst, "strerror", None):
281 return encoding.strtolocal(inst.strerror)
281 return encoding.strtolocal(inst.strerror)
282 else:
282 else:
283 return stringutil.forcebytestr(urlerror)
283 return stringutil.forcebytestr(urlerror)
284
284
285
285
286 class lfsauthhandler(util.urlreq.basehandler):
286 class lfsauthhandler(util.urlreq.basehandler):
287 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
287 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
288
288
289 def http_error_401(self, req, fp, code, msg, headers):
289 def http_error_401(self, req, fp, code, msg, headers):
290 """Enforces that any authentication performed is HTTP Basic
290 """Enforces that any authentication performed is HTTP Basic
291 Authentication. No authentication is also acceptable.
291 Authentication. No authentication is also acceptable.
292 """
292 """
293 authreq = headers.get('www-authenticate', None)
293 authreq = headers.get('www-authenticate', None)
294 if authreq:
294 if authreq:
295 scheme = authreq.split()[0]
295 scheme = authreq.split()[0]
296
296
297 if scheme.lower() != 'basic':
297 if scheme.lower() != 'basic':
298 msg = _(b'the server must support Basic Authentication')
298 msg = _(b'the server must support Basic Authentication')
299 raise util.urlerr.httperror(
299 raise util.urlerr.httperror(
300 req.get_full_url(),
300 req.get_full_url(),
301 code,
301 code,
302 encoding.strfromlocal(msg),
302 encoding.strfromlocal(msg),
303 headers,
303 headers,
304 fp,
304 fp,
305 )
305 )
306 return None
306 return None
307
307
308
308
309 class _gitlfsremote(object):
309 class _gitlfsremote(object):
310 def __init__(self, repo, url):
310 def __init__(self, repo, url):
311 ui = repo.ui
311 ui = repo.ui
312 self.ui = ui
312 self.ui = ui
313 baseurl, authinfo = url.authinfo()
313 baseurl, authinfo = url.authinfo()
314 self.baseurl = baseurl.rstrip(b'/')
314 self.baseurl = baseurl.rstrip(b'/')
315 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
315 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
316 if not useragent:
316 if not useragent:
317 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
317 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
318 self.urlopener = urlmod.opener(ui, authinfo, useragent)
318 self.urlopener = urlmod.opener(ui, authinfo, useragent)
319 self.urlopener.add_handler(lfsauthhandler())
319 self.urlopener.add_handler(lfsauthhandler())
320 self.retry = ui.configint(b'lfs', b'retry')
320 self.retry = ui.configint(b'lfs', b'retry')
321
321
322 def writebatch(self, pointers, fromstore):
322 def writebatch(self, pointers, fromstore):
323 """Batch upload from local to remote blobstore."""
323 """Batch upload from local to remote blobstore."""
324 self._batch(_deduplicate(pointers), fromstore, b'upload')
324 self._batch(_deduplicate(pointers), fromstore, b'upload')
325
325
326 def readbatch(self, pointers, tostore):
326 def readbatch(self, pointers, tostore):
327 """Batch download from remote to local blostore."""
327 """Batch download from remote to local blostore."""
328 self._batch(_deduplicate(pointers), tostore, b'download')
328 self._batch(_deduplicate(pointers), tostore, b'download')
329
329
330 def _batchrequest(self, pointers, action):
330 def _batchrequest(self, pointers, action):
331 """Get metadata about objects pointed by pointers for given action
331 """Get metadata about objects pointed by pointers for given action
332
332
333 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
333 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
334 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
334 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
335 """
335 """
336 objects = [
336 objects = [
337 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
337 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
338 for p in pointers
338 for p in pointers
339 ]
339 ]
340 requestdata = pycompat.bytesurl(
340 requestdata = pycompat.bytesurl(
341 json.dumps(
341 json.dumps(
342 {
342 {
343 'objects': objects,
343 'objects': objects,
344 'operation': pycompat.strurl(action),
344 'operation': pycompat.strurl(action),
345 }
345 }
346 )
346 )
347 )
347 )
348 url = b'%s/objects/batch' % self.baseurl
348 url = b'%s/objects/batch' % self.baseurl
349 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
349 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
350 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
350 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
351 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
351 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
352 try:
352 try:
353 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
353 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
354 rawjson = rsp.read()
354 rawjson = rsp.read()
355 except util.urlerr.httperror as ex:
355 except util.urlerr.httperror as ex:
356 hints = {
356 hints = {
357 400: _(
357 400: _(
358 b'check that lfs serving is enabled on %s and "%s" is '
358 b'check that lfs serving is enabled on %s and "%s" is '
359 b'supported'
359 b'supported'
360 )
360 )
361 % (self.baseurl, action),
361 % (self.baseurl, action),
362 404: _(b'the "lfs.url" config may be used to override %s')
362 404: _(b'the "lfs.url" config may be used to override %s')
363 % self.baseurl,
363 % self.baseurl,
364 }
364 }
365 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
365 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
366 raise LfsRemoteError(
366 raise LfsRemoteError(
367 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
367 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
368 hint=hint,
368 hint=hint,
369 )
369 )
370 except util.urlerr.urlerror as ex:
370 except util.urlerr.urlerror as ex:
371 hint = (
371 hint = (
372 _(b'the "lfs.url" config may be used to override %s')
372 _(b'the "lfs.url" config may be used to override %s')
373 % self.baseurl
373 % self.baseurl
374 )
374 )
375 raise LfsRemoteError(
375 raise LfsRemoteError(
376 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
376 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
377 )
377 )
378 try:
378 try:
379 response = pycompat.json_loads(rawjson)
379 response = pycompat.json_loads(rawjson)
380 except ValueError:
380 except ValueError:
381 raise LfsRemoteError(
381 raise LfsRemoteError(
382 _(b'LFS server returns invalid JSON: %s')
382 _(b'LFS server returns invalid JSON: %s')
383 % rawjson.encode("utf-8")
383 % rawjson.encode("utf-8")
384 )
384 )
385
385
386 if self.ui.debugflag:
386 if self.ui.debugflag:
387 self.ui.debug(b'Status: %d\n' % rsp.status)
387 self.ui.debug(b'Status: %d\n' % rsp.status)
388 # lfs-test-server and hg serve return headers in different order
388 # lfs-test-server and hg serve return headers in different order
389 headers = pycompat.bytestr(rsp.info()).strip()
389 headers = pycompat.bytestr(rsp.info()).strip()
390 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
390 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
391
391
392 if 'objects' in response:
392 if 'objects' in response:
393 response['objects'] = sorted(
393 response['objects'] = sorted(
394 response['objects'], key=lambda p: p['oid']
394 response['objects'], key=lambda p: p['oid']
395 )
395 )
396 self.ui.debug(
396 self.ui.debug(
397 b'%s\n'
397 b'%s\n'
398 % pycompat.bytesurl(
398 % pycompat.bytesurl(
399 json.dumps(
399 json.dumps(
400 response,
400 response,
401 indent=2,
401 indent=2,
402 separators=('', ': '),
402 separators=('', ': '),
403 sort_keys=True,
403 sort_keys=True,
404 )
404 )
405 )
405 )
406 )
406 )
407
407
408 def encodestr(x):
408 def encodestr(x):
409 if isinstance(x, pycompat.unicode):
409 if isinstance(x, str):
410 return x.encode('utf-8')
410 return x.encode('utf-8')
411 return x
411 return x
412
412
413 return pycompat.rapply(encodestr, response)
413 return pycompat.rapply(encodestr, response)
414
414
415 def _checkforservererror(self, pointers, responses, action):
415 def _checkforservererror(self, pointers, responses, action):
416 """Scans errors from objects
416 """Scans errors from objects
417
417
418 Raises LfsRemoteError if any objects have an error"""
418 Raises LfsRemoteError if any objects have an error"""
419 for response in responses:
419 for response in responses:
420 # The server should return 404 when objects cannot be found. Some
420 # The server should return 404 when objects cannot be found. Some
421 # server implementation (ex. lfs-test-server) does not set "error"
421 # server implementation (ex. lfs-test-server) does not set "error"
422 # but just removes "download" from "actions". Treat that case
422 # but just removes "download" from "actions". Treat that case
423 # as the same as 404 error.
423 # as the same as 404 error.
424 if b'error' not in response:
424 if b'error' not in response:
425 if action == b'download' and action not in response.get(
425 if action == b'download' and action not in response.get(
426 b'actions', []
426 b'actions', []
427 ):
427 ):
428 code = 404
428 code = 404
429 else:
429 else:
430 continue
430 continue
431 else:
431 else:
432 # An error dict without a code doesn't make much sense, so
432 # An error dict without a code doesn't make much sense, so
433 # treat as a server error.
433 # treat as a server error.
434 code = response.get(b'error').get(b'code', 500)
434 code = response.get(b'error').get(b'code', 500)
435
435
436 ptrmap = {p.oid(): p for p in pointers}
436 ptrmap = {p.oid(): p for p in pointers}
437 p = ptrmap.get(response[b'oid'], None)
437 p = ptrmap.get(response[b'oid'], None)
438 if p:
438 if p:
439 filename = getattr(p, 'filename', b'unknown')
439 filename = getattr(p, 'filename', b'unknown')
440 errors = {
440 errors = {
441 404: b'The object does not exist',
441 404: b'The object does not exist',
442 410: b'The object was removed by the owner',
442 410: b'The object was removed by the owner',
443 422: b'Validation error',
443 422: b'Validation error',
444 500: b'Internal server error',
444 500: b'Internal server error',
445 }
445 }
446 msg = errors.get(code, b'status code %d' % code)
446 msg = errors.get(code, b'status code %d' % code)
447 raise LfsRemoteError(
447 raise LfsRemoteError(
448 _(b'LFS server error for "%s": %s') % (filename, msg)
448 _(b'LFS server error for "%s": %s') % (filename, msg)
449 )
449 )
450 else:
450 else:
451 raise LfsRemoteError(
451 raise LfsRemoteError(
452 _(b'LFS server error. Unsolicited response for oid %s')
452 _(b'LFS server error. Unsolicited response for oid %s')
453 % response[b'oid']
453 % response[b'oid']
454 )
454 )
455
455
456 def _extractobjects(self, response, pointers, action):
456 def _extractobjects(self, response, pointers, action):
457 """extract objects from response of the batch API
457 """extract objects from response of the batch API
458
458
459 response: parsed JSON object returned by batch API
459 response: parsed JSON object returned by batch API
460 return response['objects'] filtered by action
460 return response['objects'] filtered by action
461 raise if any object has an error
461 raise if any object has an error
462 """
462 """
463 # Scan errors from objects - fail early
463 # Scan errors from objects - fail early
464 objects = response.get(b'objects', [])
464 objects = response.get(b'objects', [])
465 self._checkforservererror(pointers, objects, action)
465 self._checkforservererror(pointers, objects, action)
466
466
467 # Filter objects with given action. Practically, this skips uploading
467 # Filter objects with given action. Practically, this skips uploading
468 # objects which exist in the server.
468 # objects which exist in the server.
469 filteredobjects = [
469 filteredobjects = [
470 o for o in objects if action in o.get(b'actions', [])
470 o for o in objects if action in o.get(b'actions', [])
471 ]
471 ]
472
472
473 return filteredobjects
473 return filteredobjects
474
474
475 def _basictransfer(self, obj, action, localstore):
475 def _basictransfer(self, obj, action, localstore):
476 """Download or upload a single object using basic transfer protocol
476 """Download or upload a single object using basic transfer protocol
477
477
478 obj: dict, an object description returned by batch API
478 obj: dict, an object description returned by batch API
479 action: string, one of ['upload', 'download']
479 action: string, one of ['upload', 'download']
480 localstore: blobstore.local
480 localstore: blobstore.local
481
481
482 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
482 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
483 basic-transfers.md
483 basic-transfers.md
484 """
484 """
485 oid = obj[b'oid']
485 oid = obj[b'oid']
486 href = obj[b'actions'][action].get(b'href')
486 href = obj[b'actions'][action].get(b'href')
487 headers = obj[b'actions'][action].get(b'header', {}).items()
487 headers = obj[b'actions'][action].get(b'header', {}).items()
488
488
489 request = util.urlreq.request(pycompat.strurl(href))
489 request = util.urlreq.request(pycompat.strurl(href))
490 if action == b'upload':
490 if action == b'upload':
491 # If uploading blobs, read data from local blobstore.
491 # If uploading blobs, read data from local blobstore.
492 if not localstore.verify(oid):
492 if not localstore.verify(oid):
493 raise error.Abort(
493 raise error.Abort(
494 _(b'detected corrupt lfs object: %s') % oid,
494 _(b'detected corrupt lfs object: %s') % oid,
495 hint=_(b'run hg verify'),
495 hint=_(b'run hg verify'),
496 )
496 )
497
497
498 for k, v in headers:
498 for k, v in headers:
499 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
499 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
500
500
501 try:
501 try:
502 if action == b'upload':
502 if action == b'upload':
503 request.data = lfsuploadfile(self.ui, localstore.path(oid))
503 request.data = lfsuploadfile(self.ui, localstore.path(oid))
504 request.get_method = lambda: 'PUT'
504 request.get_method = lambda: 'PUT'
505 request.add_header('Content-Type', 'application/octet-stream')
505 request.add_header('Content-Type', 'application/octet-stream')
506 request.add_header('Content-Length', request.data.length)
506 request.add_header('Content-Length', request.data.length)
507
507
508 with contextlib.closing(self.urlopener.open(request)) as res:
508 with contextlib.closing(self.urlopener.open(request)) as res:
509 contentlength = res.info().get(b"content-length")
509 contentlength = res.info().get(b"content-length")
510 ui = self.ui # Shorten debug lines
510 ui = self.ui # Shorten debug lines
511 if self.ui.debugflag:
511 if self.ui.debugflag:
512 ui.debug(b'Status: %d\n' % res.status)
512 ui.debug(b'Status: %d\n' % res.status)
513 # lfs-test-server and hg serve return headers in different
513 # lfs-test-server and hg serve return headers in different
514 # order
514 # order
515 headers = pycompat.bytestr(res.info()).strip()
515 headers = pycompat.bytestr(res.info()).strip()
516 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
516 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
517
517
518 if action == b'download':
518 if action == b'download':
519 # If downloading blobs, store downloaded data to local
519 # If downloading blobs, store downloaded data to local
520 # blobstore
520 # blobstore
521 localstore.download(oid, res, contentlength)
521 localstore.download(oid, res, contentlength)
522 else:
522 else:
523 blocks = []
523 blocks = []
524 while True:
524 while True:
525 data = res.read(1048576)
525 data = res.read(1048576)
526 if not data:
526 if not data:
527 break
527 break
528 blocks.append(data)
528 blocks.append(data)
529
529
530 response = b"".join(blocks)
530 response = b"".join(blocks)
531 if response:
531 if response:
532 ui.debug(b'lfs %s response: %s' % (action, response))
532 ui.debug(b'lfs %s response: %s' % (action, response))
533 except util.urlerr.httperror as ex:
533 except util.urlerr.httperror as ex:
534 if self.ui.debugflag:
534 if self.ui.debugflag:
535 self.ui.debug(
535 self.ui.debug(
536 b'%s: %s\n' % (oid, ex.read())
536 b'%s: %s\n' % (oid, ex.read())
537 ) # XXX: also bytes?
537 ) # XXX: also bytes?
538 raise LfsRemoteError(
538 raise LfsRemoteError(
539 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
539 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
540 % (stringutil.forcebytestr(ex), oid, action)
540 % (stringutil.forcebytestr(ex), oid, action)
541 )
541 )
542 except util.urlerr.urlerror as ex:
542 except util.urlerr.urlerror as ex:
543 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
543 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
544 util.urllibcompat.getfullurl(request)
544 util.urllibcompat.getfullurl(request)
545 )
545 )
546 raise LfsRemoteError(
546 raise LfsRemoteError(
547 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
547 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
548 )
548 )
549 finally:
549 finally:
550 if request.data:
550 if request.data:
551 request.data.close()
551 request.data.close()
552
552
553 def _batch(self, pointers, localstore, action):
553 def _batch(self, pointers, localstore, action):
554 if action not in [b'upload', b'download']:
554 if action not in [b'upload', b'download']:
555 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
555 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
556
556
557 response = self._batchrequest(pointers, action)
557 response = self._batchrequest(pointers, action)
558 objects = self._extractobjects(response, pointers, action)
558 objects = self._extractobjects(response, pointers, action)
559 total = sum(x.get(b'size', 0) for x in objects)
559 total = sum(x.get(b'size', 0) for x in objects)
560 sizes = {}
560 sizes = {}
561 for obj in objects:
561 for obj in objects:
562 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
562 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
563 topic = {
563 topic = {
564 b'upload': _(b'lfs uploading'),
564 b'upload': _(b'lfs uploading'),
565 b'download': _(b'lfs downloading'),
565 b'download': _(b'lfs downloading'),
566 }[action]
566 }[action]
567 if len(objects) > 1:
567 if len(objects) > 1:
568 self.ui.note(
568 self.ui.note(
569 _(b'lfs: need to transfer %d objects (%s)\n')
569 _(b'lfs: need to transfer %d objects (%s)\n')
570 % (len(objects), util.bytecount(total))
570 % (len(objects), util.bytecount(total))
571 )
571 )
572
572
573 def transfer(chunk):
573 def transfer(chunk):
574 for obj in chunk:
574 for obj in chunk:
575 objsize = obj.get(b'size', 0)
575 objsize = obj.get(b'size', 0)
576 if self.ui.verbose:
576 if self.ui.verbose:
577 if action == b'download':
577 if action == b'download':
578 msg = _(b'lfs: downloading %s (%s)\n')
578 msg = _(b'lfs: downloading %s (%s)\n')
579 elif action == b'upload':
579 elif action == b'upload':
580 msg = _(b'lfs: uploading %s (%s)\n')
580 msg = _(b'lfs: uploading %s (%s)\n')
581 self.ui.note(
581 self.ui.note(
582 msg % (obj.get(b'oid'), util.bytecount(objsize))
582 msg % (obj.get(b'oid'), util.bytecount(objsize))
583 )
583 )
584 retry = self.retry
584 retry = self.retry
585 while True:
585 while True:
586 try:
586 try:
587 self._basictransfer(obj, action, localstore)
587 self._basictransfer(obj, action, localstore)
588 yield 1, obj.get(b'oid')
588 yield 1, obj.get(b'oid')
589 break
589 break
590 except socket.error as ex:
590 except socket.error as ex:
591 if retry > 0:
591 if retry > 0:
592 self.ui.note(
592 self.ui.note(
593 _(b'lfs: failed: %r (remaining retry %d)\n')
593 _(b'lfs: failed: %r (remaining retry %d)\n')
594 % (stringutil.forcebytestr(ex), retry)
594 % (stringutil.forcebytestr(ex), retry)
595 )
595 )
596 retry -= 1
596 retry -= 1
597 continue
597 continue
598 raise
598 raise
599
599
600 # Until https multiplexing gets sorted out
600 # Until https multiplexing gets sorted out
601 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
601 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
602 oids = worker.worker(
602 oids = worker.worker(
603 self.ui,
603 self.ui,
604 0.1,
604 0.1,
605 transfer,
605 transfer,
606 (),
606 (),
607 sorted(objects, key=lambda o: o.get(b'oid')),
607 sorted(objects, key=lambda o: o.get(b'oid')),
608 )
608 )
609 else:
609 else:
610 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
610 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
611
611
612 with self.ui.makeprogress(
612 with self.ui.makeprogress(
613 topic, unit=_(b"bytes"), total=total
613 topic, unit=_(b"bytes"), total=total
614 ) as progress:
614 ) as progress:
615 progress.update(0)
615 progress.update(0)
616 processed = 0
616 processed = 0
617 blobs = 0
617 blobs = 0
618 for _one, oid in oids:
618 for _one, oid in oids:
619 processed += sizes[oid]
619 processed += sizes[oid]
620 blobs += 1
620 blobs += 1
621 progress.update(processed)
621 progress.update(processed)
622 self.ui.note(_(b'lfs: processed: %s\n') % oid)
622 self.ui.note(_(b'lfs: processed: %s\n') % oid)
623
623
624 if blobs > 0:
624 if blobs > 0:
625 if action == b'upload':
625 if action == b'upload':
626 self.ui.status(
626 self.ui.status(
627 _(b'lfs: uploaded %d files (%s)\n')
627 _(b'lfs: uploaded %d files (%s)\n')
628 % (blobs, util.bytecount(processed))
628 % (blobs, util.bytecount(processed))
629 )
629 )
630 elif action == b'download':
630 elif action == b'download':
631 self.ui.status(
631 self.ui.status(
632 _(b'lfs: downloaded %d files (%s)\n')
632 _(b'lfs: downloaded %d files (%s)\n')
633 % (blobs, util.bytecount(processed))
633 % (blobs, util.bytecount(processed))
634 )
634 )
635
635
636 def __del__(self):
636 def __del__(self):
637 # copied from mercurial/httppeer.py
637 # copied from mercurial/httppeer.py
638 urlopener = getattr(self, 'urlopener', None)
638 urlopener = getattr(self, 'urlopener', None)
639 if urlopener:
639 if urlopener:
640 for h in urlopener.handlers:
640 for h in urlopener.handlers:
641 h.close()
641 h.close()
642 getattr(h, "close_all", lambda: None)()
642 getattr(h, "close_all", lambda: None)()
643
643
644
644
645 class _dummyremote(object):
645 class _dummyremote(object):
646 """Dummy store storing blobs to temp directory."""
646 """Dummy store storing blobs to temp directory."""
647
647
648 def __init__(self, repo, url):
648 def __init__(self, repo, url):
649 fullpath = repo.vfs.join(b'lfs', url.path)
649 fullpath = repo.vfs.join(b'lfs', url.path)
650 self.vfs = lfsvfs(fullpath)
650 self.vfs = lfsvfs(fullpath)
651
651
652 def writebatch(self, pointers, fromstore):
652 def writebatch(self, pointers, fromstore):
653 for p in _deduplicate(pointers):
653 for p in _deduplicate(pointers):
654 content = fromstore.read(p.oid(), verify=True)
654 content = fromstore.read(p.oid(), verify=True)
655 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
655 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
656 fp.write(content)
656 fp.write(content)
657
657
658 def readbatch(self, pointers, tostore):
658 def readbatch(self, pointers, tostore):
659 for p in _deduplicate(pointers):
659 for p in _deduplicate(pointers):
660 with self.vfs(p.oid(), b'rb') as fp:
660 with self.vfs(p.oid(), b'rb') as fp:
661 tostore.download(p.oid(), fp, None)
661 tostore.download(p.oid(), fp, None)
662
662
663
663
664 class _nullremote(object):
664 class _nullremote(object):
665 """Null store storing blobs to /dev/null."""
665 """Null store storing blobs to /dev/null."""
666
666
667 def __init__(self, repo, url):
667 def __init__(self, repo, url):
668 pass
668 pass
669
669
670 def writebatch(self, pointers, fromstore):
670 def writebatch(self, pointers, fromstore):
671 pass
671 pass
672
672
673 def readbatch(self, pointers, tostore):
673 def readbatch(self, pointers, tostore):
674 pass
674 pass
675
675
676
676
677 class _promptremote(object):
677 class _promptremote(object):
678 """Prompt user to set lfs.url when accessed."""
678 """Prompt user to set lfs.url when accessed."""
679
679
680 def __init__(self, repo, url):
680 def __init__(self, repo, url):
681 pass
681 pass
682
682
683 def writebatch(self, pointers, fromstore, ui=None):
683 def writebatch(self, pointers, fromstore, ui=None):
684 self._prompt()
684 self._prompt()
685
685
686 def readbatch(self, pointers, tostore, ui=None):
686 def readbatch(self, pointers, tostore, ui=None):
687 self._prompt()
687 self._prompt()
688
688
689 def _prompt(self):
689 def _prompt(self):
690 raise error.Abort(_(b'lfs.url needs to be configured'))
690 raise error.Abort(_(b'lfs.url needs to be configured'))
691
691
692
692
693 _storemap = {
693 _storemap = {
694 b'https': _gitlfsremote,
694 b'https': _gitlfsremote,
695 b'http': _gitlfsremote,
695 b'http': _gitlfsremote,
696 b'file': _dummyremote,
696 b'file': _dummyremote,
697 b'null': _nullremote,
697 b'null': _nullremote,
698 None: _promptremote,
698 None: _promptremote,
699 }
699 }
700
700
701
701
702 def _deduplicate(pointers):
702 def _deduplicate(pointers):
703 """Remove any duplicate oids that exist in the list"""
703 """Remove any duplicate oids that exist in the list"""
704 reduced = util.sortdict()
704 reduced = util.sortdict()
705 for p in pointers:
705 for p in pointers:
706 reduced[p.oid()] = p
706 reduced[p.oid()] = p
707 return reduced.values()
707 return reduced.values()
708
708
709
709
710 def _verify(oid, content):
710 def _verify(oid, content):
711 realoid = hex(hashlib.sha256(content).digest())
711 realoid = hex(hashlib.sha256(content).digest())
712 if realoid != oid:
712 if realoid != oid:
713 raise LfsCorruptionError(
713 raise LfsCorruptionError(
714 _(b'detected corrupt lfs object: %s') % oid,
714 _(b'detected corrupt lfs object: %s') % oid,
715 hint=_(b'run hg verify'),
715 hint=_(b'run hg verify'),
716 )
716 )
717
717
718
718
719 def remote(repo, remote=None):
719 def remote(repo, remote=None):
720 """remotestore factory. return a store in _storemap depending on config
720 """remotestore factory. return a store in _storemap depending on config
721
721
722 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
722 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
723 infer the endpoint, based on the remote repository using the same path
723 infer the endpoint, based on the remote repository using the same path
724 adjustments as git. As an extension, 'http' is supported as well so that
724 adjustments as git. As an extension, 'http' is supported as well so that
725 ``hg serve`` works out of the box.
725 ``hg serve`` works out of the box.
726
726
727 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
727 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
728 """
728 """
729 lfsurl = repo.ui.config(b'lfs', b'url')
729 lfsurl = repo.ui.config(b'lfs', b'url')
730 url = urlutil.url(lfsurl or b'')
730 url = urlutil.url(lfsurl or b'')
731 if lfsurl is None:
731 if lfsurl is None:
732 if remote:
732 if remote:
733 path = remote
733 path = remote
734 elif util.safehasattr(repo, b'_subtoppath'):
734 elif util.safehasattr(repo, b'_subtoppath'):
735 # The pull command sets this during the optional update phase, which
735 # The pull command sets this during the optional update phase, which
736 # tells exactly where the pull originated, whether 'paths.default'
736 # tells exactly where the pull originated, whether 'paths.default'
737 # or explicit.
737 # or explicit.
738 path = repo._subtoppath
738 path = repo._subtoppath
739 else:
739 else:
740 # TODO: investigate 'paths.remote:lfsurl' style path customization,
740 # TODO: investigate 'paths.remote:lfsurl' style path customization,
741 # and fall back to inferring from 'paths.remote' if unspecified.
741 # and fall back to inferring from 'paths.remote' if unspecified.
742 path = repo.ui.config(b'paths', b'default') or b''
742 path = repo.ui.config(b'paths', b'default') or b''
743
743
744 defaulturl = urlutil.url(path)
744 defaulturl = urlutil.url(path)
745
745
746 # TODO: support local paths as well.
746 # TODO: support local paths as well.
747 # TODO: consider the ssh -> https transformation that git applies
747 # TODO: consider the ssh -> https transformation that git applies
748 if defaulturl.scheme in (b'http', b'https'):
748 if defaulturl.scheme in (b'http', b'https'):
749 if defaulturl.path and defaulturl.path[:-1] != b'/':
749 if defaulturl.path and defaulturl.path[:-1] != b'/':
750 defaulturl.path += b'/'
750 defaulturl.path += b'/'
751 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
751 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
752
752
753 url = urlutil.url(bytes(defaulturl))
753 url = urlutil.url(bytes(defaulturl))
754 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
754 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
755
755
756 scheme = url.scheme
756 scheme = url.scheme
757 if scheme not in _storemap:
757 if scheme not in _storemap:
758 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
758 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
759 return _storemap[scheme](repo, url)
759 return _storemap[scheme](repo, url)
760
760
761
761
762 class LfsRemoteError(error.StorageError):
762 class LfsRemoteError(error.StorageError):
763 pass
763 pass
764
764
765
765
766 class LfsCorruptionError(error.Abort):
766 class LfsCorruptionError(error.Abort):
767 """Raised when a corrupt blob is detected, aborting an operation
767 """Raised when a corrupt blob is detected, aborting an operation
768
768
769 It exists to allow specialized handling on the server side."""
769 It exists to allow specialized handling on the server side."""
@@ -1,2401 +1,2395
1 # phabricator.py - simple Phabricator integration
1 # phabricator.py - simple Phabricator integration
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """simple Phabricator integration (EXPERIMENTAL)
7 """simple Phabricator integration (EXPERIMENTAL)
8
8
9 This extension provides a ``phabsend`` command which sends a stack of
9 This extension provides a ``phabsend`` command which sends a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
12 to update statuses in batch.
12 to update statuses in batch.
13
13
14 A "phabstatus" view for :hg:`show` is also provided; it displays status
14 A "phabstatus" view for :hg:`show` is also provided; it displays status
15 information of Phabricator differentials associated with unfinished
15 information of Phabricator differentials associated with unfinished
16 changesets.
16 changesets.
17
17
18 By default, Phabricator requires ``Test Plan`` which might prevent some
18 By default, Phabricator requires ``Test Plan`` which might prevent some
19 changeset from being sent. The requirement could be disabled by changing
19 changeset from being sent. The requirement could be disabled by changing
20 ``differential.require-test-plan-field`` config server side.
20 ``differential.require-test-plan-field`` config server side.
21
21
22 Config::
22 Config::
23
23
24 [phabricator]
24 [phabricator]
25 # Phabricator URL
25 # Phabricator URL
26 url = https://phab.example.com/
26 url = https://phab.example.com/
27
27
28 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
28 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
29 # callsign is "FOO".
29 # callsign is "FOO".
30 callsign = FOO
30 callsign = FOO
31
31
32 # curl command to use. If not set (default), use builtin HTTP library to
32 # curl command to use. If not set (default), use builtin HTTP library to
33 # communicate. If set, use the specified curl command. This could be useful
33 # communicate. If set, use the specified curl command. This could be useful
34 # if you need to specify advanced options that is not easily supported by
34 # if you need to specify advanced options that is not easily supported by
35 # the internal library.
35 # the internal library.
36 curlcmd = curl --connect-timeout 2 --retry 3 --silent
36 curlcmd = curl --connect-timeout 2 --retry 3 --silent
37
37
38 # retry failed command N time (default 0). Useful when using the extension
38 # retry failed command N time (default 0). Useful when using the extension
39 # over flakly connection.
39 # over flakly connection.
40 #
40 #
41 # We wait `retry.interval` between each retry, in seconds.
41 # We wait `retry.interval` between each retry, in seconds.
42 # (default 1 second).
42 # (default 1 second).
43 retry = 3
43 retry = 3
44 retry.interval = 10
44 retry.interval = 10
45
45
46 # the retry option can combine well with the http.timeout one.
46 # the retry option can combine well with the http.timeout one.
47 #
47 #
48 # For example to give up on http request after 20 seconds:
48 # For example to give up on http request after 20 seconds:
49 [http]
49 [http]
50 timeout=20
50 timeout=20
51
51
52 [auth]
52 [auth]
53 example.schemes = https
53 example.schemes = https
54 example.prefix = phab.example.com
54 example.prefix = phab.example.com
55
55
56 # API token. Get it from https://$HOST/conduit/login/
56 # API token. Get it from https://$HOST/conduit/login/
57 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
57 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
58 """
58 """
59
59
60
60
61 import base64
61 import base64
62 import contextlib
62 import contextlib
63 import hashlib
63 import hashlib
64 import io
64 import io
65 import itertools
65 import itertools
66 import json
66 import json
67 import mimetypes
67 import mimetypes
68 import operator
68 import operator
69 import re
69 import re
70 import time
70 import time
71
71
72 from mercurial.node import bin, short
72 from mercurial.node import bin, short
73 from mercurial.i18n import _
73 from mercurial.i18n import _
74 from mercurial.pycompat import getattr
74 from mercurial.pycompat import getattr
75 from mercurial.thirdparty import attr
75 from mercurial.thirdparty import attr
76 from mercurial import (
76 from mercurial import (
77 cmdutil,
77 cmdutil,
78 context,
78 context,
79 copies,
79 copies,
80 encoding,
80 encoding,
81 error,
81 error,
82 exthelper,
82 exthelper,
83 graphmod,
83 graphmod,
84 httpconnection as httpconnectionmod,
84 httpconnection as httpconnectionmod,
85 localrepo,
85 localrepo,
86 logcmdutil,
86 logcmdutil,
87 match,
87 match,
88 mdiff,
88 mdiff,
89 obsutil,
89 obsutil,
90 parser,
90 parser,
91 patch,
91 patch,
92 phases,
92 phases,
93 pycompat,
93 pycompat,
94 rewriteutil,
94 rewriteutil,
95 scmutil,
95 scmutil,
96 smartset,
96 smartset,
97 tags,
97 tags,
98 templatefilters,
98 templatefilters,
99 templateutil,
99 templateutil,
100 url as urlmod,
100 url as urlmod,
101 util,
101 util,
102 )
102 )
103 from mercurial.utils import (
103 from mercurial.utils import (
104 procutil,
104 procutil,
105 stringutil,
105 stringutil,
106 urlutil,
106 urlutil,
107 )
107 )
108 from . import show
108 from . import show
109
109
110
110
111 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
111 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
112 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
112 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
113 # be specifying the version(s) of Mercurial they are tested with, or
113 # be specifying the version(s) of Mercurial they are tested with, or
114 # leave the attribute unspecified.
114 # leave the attribute unspecified.
115 testedwith = b'ships-with-hg-core'
115 testedwith = b'ships-with-hg-core'
116
116
117 eh = exthelper.exthelper()
117 eh = exthelper.exthelper()
118
118
119 cmdtable = eh.cmdtable
119 cmdtable = eh.cmdtable
120 command = eh.command
120 command = eh.command
121 configtable = eh.configtable
121 configtable = eh.configtable
122 templatekeyword = eh.templatekeyword
122 templatekeyword = eh.templatekeyword
123 uisetup = eh.finaluisetup
123 uisetup = eh.finaluisetup
124
124
125 # developer config: phabricator.batchsize
125 # developer config: phabricator.batchsize
126 eh.configitem(
126 eh.configitem(
127 b'phabricator',
127 b'phabricator',
128 b'batchsize',
128 b'batchsize',
129 default=12,
129 default=12,
130 )
130 )
131 eh.configitem(
131 eh.configitem(
132 b'phabricator',
132 b'phabricator',
133 b'callsign',
133 b'callsign',
134 default=None,
134 default=None,
135 )
135 )
136 eh.configitem(
136 eh.configitem(
137 b'phabricator',
137 b'phabricator',
138 b'curlcmd',
138 b'curlcmd',
139 default=None,
139 default=None,
140 )
140 )
141 # developer config: phabricator.debug
141 # developer config: phabricator.debug
142 eh.configitem(
142 eh.configitem(
143 b'phabricator',
143 b'phabricator',
144 b'debug',
144 b'debug',
145 default=False,
145 default=False,
146 )
146 )
147 # developer config: phabricator.repophid
147 # developer config: phabricator.repophid
148 eh.configitem(
148 eh.configitem(
149 b'phabricator',
149 b'phabricator',
150 b'repophid',
150 b'repophid',
151 default=None,
151 default=None,
152 )
152 )
153 eh.configitem(
153 eh.configitem(
154 b'phabricator',
154 b'phabricator',
155 b'retry',
155 b'retry',
156 default=0,
156 default=0,
157 )
157 )
158 eh.configitem(
158 eh.configitem(
159 b'phabricator',
159 b'phabricator',
160 b'retry.interval',
160 b'retry.interval',
161 default=1,
161 default=1,
162 )
162 )
163 eh.configitem(
163 eh.configitem(
164 b'phabricator',
164 b'phabricator',
165 b'url',
165 b'url',
166 default=None,
166 default=None,
167 )
167 )
168 eh.configitem(
168 eh.configitem(
169 b'phabsend',
169 b'phabsend',
170 b'confirm',
170 b'confirm',
171 default=False,
171 default=False,
172 )
172 )
173 eh.configitem(
173 eh.configitem(
174 b'phabimport',
174 b'phabimport',
175 b'secret',
175 b'secret',
176 default=False,
176 default=False,
177 )
177 )
178 eh.configitem(
178 eh.configitem(
179 b'phabimport',
179 b'phabimport',
180 b'obsolete',
180 b'obsolete',
181 default=False,
181 default=False,
182 )
182 )
183
183
184 colortable = {
184 colortable = {
185 b'phabricator.action.created': b'green',
185 b'phabricator.action.created': b'green',
186 b'phabricator.action.skipped': b'magenta',
186 b'phabricator.action.skipped': b'magenta',
187 b'phabricator.action.updated': b'magenta',
187 b'phabricator.action.updated': b'magenta',
188 b'phabricator.drev': b'bold',
188 b'phabricator.drev': b'bold',
189 b'phabricator.status.abandoned': b'magenta dim',
189 b'phabricator.status.abandoned': b'magenta dim',
190 b'phabricator.status.accepted': b'green bold',
190 b'phabricator.status.accepted': b'green bold',
191 b'phabricator.status.closed': b'green',
191 b'phabricator.status.closed': b'green',
192 b'phabricator.status.needsreview': b'yellow',
192 b'phabricator.status.needsreview': b'yellow',
193 b'phabricator.status.needsrevision': b'red',
193 b'phabricator.status.needsrevision': b'red',
194 b'phabricator.status.changesplanned': b'red',
194 b'phabricator.status.changesplanned': b'red',
195 }
195 }
196
196
197 _VCR_FLAGS = [
197 _VCR_FLAGS = [
198 (
198 (
199 b'',
199 b'',
200 b'test-vcr',
200 b'test-vcr',
201 b'',
201 b'',
202 _(
202 _(
203 b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
203 b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
204 b', otherwise will mock all http requests using the specified vcr file.'
204 b', otherwise will mock all http requests using the specified vcr file.'
205 b' (ADVANCED)'
205 b' (ADVANCED)'
206 ),
206 ),
207 ),
207 ),
208 ]
208 ]
209
209
210
210
211 @eh.wrapfunction(localrepo, "loadhgrc")
211 @eh.wrapfunction(localrepo, "loadhgrc")
212 def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements, *args, **opts):
212 def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements, *args, **opts):
213 """Load ``.arcconfig`` content into a ui instance on repository open."""
213 """Load ``.arcconfig`` content into a ui instance on repository open."""
214 result = False
214 result = False
215 arcconfig = {}
215 arcconfig = {}
216
216
217 try:
217 try:
218 # json.loads only accepts bytes from 3.6+
218 # json.loads only accepts bytes from 3.6+
219 rawparams = encoding.unifromlocal(wdirvfs.read(b".arcconfig"))
219 rawparams = encoding.unifromlocal(wdirvfs.read(b".arcconfig"))
220 # json.loads only returns unicode strings
220 # json.loads only returns unicode strings
221 arcconfig = pycompat.rapply(
221 arcconfig = pycompat.rapply(
222 lambda x: encoding.unitolocal(x)
222 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
223 if isinstance(x, pycompat.unicode)
224 else x,
225 pycompat.json_loads(rawparams),
223 pycompat.json_loads(rawparams),
226 )
224 )
227
225
228 result = True
226 result = True
229 except ValueError:
227 except ValueError:
230 ui.warn(_(b"invalid JSON in %s\n") % wdirvfs.join(b".arcconfig"))
228 ui.warn(_(b"invalid JSON in %s\n") % wdirvfs.join(b".arcconfig"))
231 except IOError:
229 except IOError:
232 pass
230 pass
233
231
234 cfg = util.sortdict()
232 cfg = util.sortdict()
235
233
236 if b"repository.callsign" in arcconfig:
234 if b"repository.callsign" in arcconfig:
237 cfg[(b"phabricator", b"callsign")] = arcconfig[b"repository.callsign"]
235 cfg[(b"phabricator", b"callsign")] = arcconfig[b"repository.callsign"]
238
236
239 if b"phabricator.uri" in arcconfig:
237 if b"phabricator.uri" in arcconfig:
240 cfg[(b"phabricator", b"url")] = arcconfig[b"phabricator.uri"]
238 cfg[(b"phabricator", b"url")] = arcconfig[b"phabricator.uri"]
241
239
242 if cfg:
240 if cfg:
243 ui.applyconfig(cfg, source=wdirvfs.join(b".arcconfig"))
241 ui.applyconfig(cfg, source=wdirvfs.join(b".arcconfig"))
244
242
245 return (
243 return (
246 orig(ui, wdirvfs, hgvfs, requirements, *args, **opts) or result
244 orig(ui, wdirvfs, hgvfs, requirements, *args, **opts) or result
247 ) # Load .hg/hgrc
245 ) # Load .hg/hgrc
248
246
249
247
250 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
248 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
251 fullflags = flags + _VCR_FLAGS
249 fullflags = flags + _VCR_FLAGS
252
250
253 def hgmatcher(r1, r2):
251 def hgmatcher(r1, r2):
254 if r1.uri != r2.uri or r1.method != r2.method:
252 if r1.uri != r2.uri or r1.method != r2.method:
255 return False
253 return False
256 r1params = util.urlreq.parseqs(r1.body)
254 r1params = util.urlreq.parseqs(r1.body)
257 r2params = util.urlreq.parseqs(r2.body)
255 r2params = util.urlreq.parseqs(r2.body)
258 for key in r1params:
256 for key in r1params:
259 if key not in r2params:
257 if key not in r2params:
260 return False
258 return False
261 value = r1params[key][0]
259 value = r1params[key][0]
262 # we want to compare json payloads without worrying about ordering
260 # we want to compare json payloads without worrying about ordering
263 if value.startswith(b'{') and value.endswith(b'}'):
261 if value.startswith(b'{') and value.endswith(b'}'):
264 r1json = pycompat.json_loads(value)
262 r1json = pycompat.json_loads(value)
265 r2json = pycompat.json_loads(r2params[key][0])
263 r2json = pycompat.json_loads(r2params[key][0])
266 if r1json != r2json:
264 if r1json != r2json:
267 return False
265 return False
268 elif r2params[key][0] != value:
266 elif r2params[key][0] != value:
269 return False
267 return False
270 return True
268 return True
271
269
272 def sanitiserequest(request):
270 def sanitiserequest(request):
273 request.body = re.sub(
271 request.body = re.sub(
274 br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
272 br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
275 )
273 )
276 return request
274 return request
277
275
278 def sanitiseresponse(response):
276 def sanitiseresponse(response):
279 if 'set-cookie' in response['headers']:
277 if 'set-cookie' in response['headers']:
280 del response['headers']['set-cookie']
278 del response['headers']['set-cookie']
281 return response
279 return response
282
280
283 def decorate(fn):
281 def decorate(fn):
284 def inner(*args, **kwargs):
282 def inner(*args, **kwargs):
285 vcr = kwargs.pop('test_vcr')
283 vcr = kwargs.pop('test_vcr')
286 if vcr:
284 if vcr:
287 cassette = pycompat.fsdecode(vcr)
285 cassette = pycompat.fsdecode(vcr)
288 import hgdemandimport
286 import hgdemandimport
289
287
290 with hgdemandimport.deactivated():
288 with hgdemandimport.deactivated():
291 import vcr as vcrmod
289 import vcr as vcrmod
292 import vcr.stubs as stubs
290 import vcr.stubs as stubs
293
291
294 vcr = vcrmod.VCR(
292 vcr = vcrmod.VCR(
295 serializer='json',
293 serializer='json',
296 before_record_request=sanitiserequest,
294 before_record_request=sanitiserequest,
297 before_record_response=sanitiseresponse,
295 before_record_response=sanitiseresponse,
298 custom_patches=[
296 custom_patches=[
299 (
297 (
300 urlmod,
298 urlmod,
301 'httpconnection',
299 'httpconnection',
302 stubs.VCRHTTPConnection,
300 stubs.VCRHTTPConnection,
303 ),
301 ),
304 (
302 (
305 urlmod,
303 urlmod,
306 'httpsconnection',
304 'httpsconnection',
307 stubs.VCRHTTPSConnection,
305 stubs.VCRHTTPSConnection,
308 ),
306 ),
309 ],
307 ],
310 )
308 )
311 vcr.register_matcher('hgmatcher', hgmatcher)
309 vcr.register_matcher('hgmatcher', hgmatcher)
312 with vcr.use_cassette(cassette, match_on=['hgmatcher']):
310 with vcr.use_cassette(cassette, match_on=['hgmatcher']):
313 return fn(*args, **kwargs)
311 return fn(*args, **kwargs)
314 return fn(*args, **kwargs)
312 return fn(*args, **kwargs)
315
313
316 cmd = util.checksignature(inner, depth=2)
314 cmd = util.checksignature(inner, depth=2)
317 cmd.__name__ = fn.__name__
315 cmd.__name__ = fn.__name__
318 cmd.__doc__ = fn.__doc__
316 cmd.__doc__ = fn.__doc__
319
317
320 return command(
318 return command(
321 name,
319 name,
322 fullflags,
320 fullflags,
323 spec,
321 spec,
324 helpcategory=helpcategory,
322 helpcategory=helpcategory,
325 optionalrepo=optionalrepo,
323 optionalrepo=optionalrepo,
326 )(cmd)
324 )(cmd)
327
325
328 return decorate
326 return decorate
329
327
330
328
331 def _debug(ui, *msg, **opts):
329 def _debug(ui, *msg, **opts):
332 """write debug output for Phabricator if ``phabricator.debug`` is set
330 """write debug output for Phabricator if ``phabricator.debug`` is set
333
331
334 Specifically, this avoids dumping Conduit and HTTP auth chatter that is
332 Specifically, this avoids dumping Conduit and HTTP auth chatter that is
335 printed with the --debug argument.
333 printed with the --debug argument.
336 """
334 """
337 if ui.configbool(b"phabricator", b"debug"):
335 if ui.configbool(b"phabricator", b"debug"):
338 flag = ui.debugflag
336 flag = ui.debugflag
339 try:
337 try:
340 ui.debugflag = True
338 ui.debugflag = True
341 ui.write(*msg, **opts)
339 ui.write(*msg, **opts)
342 finally:
340 finally:
343 ui.debugflag = flag
341 ui.debugflag = flag
344
342
345
343
346 def urlencodenested(params):
344 def urlencodenested(params):
347 """like urlencode, but works with nested parameters.
345 """like urlencode, but works with nested parameters.
348
346
349 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
347 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
350 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
348 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
351 urlencode. Note: the encoding is consistent with PHP's http_build_query.
349 urlencode. Note: the encoding is consistent with PHP's http_build_query.
352 """
350 """
353 flatparams = util.sortdict()
351 flatparams = util.sortdict()
354
352
355 def process(prefix, obj):
353 def process(prefix, obj):
356 if isinstance(obj, bool):
354 if isinstance(obj, bool):
357 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
355 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
358 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
356 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
359 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
357 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
360 if items is None:
358 if items is None:
361 flatparams[prefix] = obj
359 flatparams[prefix] = obj
362 else:
360 else:
363 for k, v in items(obj):
361 for k, v in items(obj):
364 if prefix:
362 if prefix:
365 process(b'%s[%s]' % (prefix, k), v)
363 process(b'%s[%s]' % (prefix, k), v)
366 else:
364 else:
367 process(k, v)
365 process(k, v)
368
366
369 process(b'', params)
367 process(b'', params)
370 return urlutil.urlreq.urlencode(flatparams)
368 return urlutil.urlreq.urlencode(flatparams)
371
369
372
370
373 def readurltoken(ui):
371 def readurltoken(ui):
374 """return conduit url, token and make sure they exist
372 """return conduit url, token and make sure they exist
375
373
376 Currently read from [auth] config section. In the future, it might
374 Currently read from [auth] config section. In the future, it might
377 make sense to read from .arcconfig and .arcrc as well.
375 make sense to read from .arcconfig and .arcrc as well.
378 """
376 """
379 url = ui.config(b'phabricator', b'url')
377 url = ui.config(b'phabricator', b'url')
380 if not url:
378 if not url:
381 raise error.Abort(
379 raise error.Abort(
382 _(b'config %s.%s is required') % (b'phabricator', b'url')
380 _(b'config %s.%s is required') % (b'phabricator', b'url')
383 )
381 )
384
382
385 res = httpconnectionmod.readauthforuri(ui, url, urlutil.url(url).user)
383 res = httpconnectionmod.readauthforuri(ui, url, urlutil.url(url).user)
386 token = None
384 token = None
387
385
388 if res:
386 if res:
389 group, auth = res
387 group, auth = res
390
388
391 ui.debug(b"using auth.%s.* for authentication\n" % group)
389 ui.debug(b"using auth.%s.* for authentication\n" % group)
392
390
393 token = auth.get(b'phabtoken')
391 token = auth.get(b'phabtoken')
394
392
395 if not token:
393 if not token:
396 raise error.Abort(
394 raise error.Abort(
397 _(b'Can\'t find conduit token associated to %s') % (url,)
395 _(b'Can\'t find conduit token associated to %s') % (url,)
398 )
396 )
399
397
400 return url, token
398 return url, token
401
399
402
400
403 def callconduit(ui, name, params):
401 def callconduit(ui, name, params):
404 """call Conduit API, params is a dict. return json.loads result, or None"""
402 """call Conduit API, params is a dict. return json.loads result, or None"""
405 host, token = readurltoken(ui)
403 host, token = readurltoken(ui)
406 url, authinfo = urlutil.url(b'/'.join([host, b'api', name])).authinfo()
404 url, authinfo = urlutil.url(b'/'.join([host, b'api', name])).authinfo()
407 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
405 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
408 params = params.copy()
406 params = params.copy()
409 params[b'__conduit__'] = {
407 params[b'__conduit__'] = {
410 b'token': token,
408 b'token': token,
411 }
409 }
412 rawdata = {
410 rawdata = {
413 b'params': templatefilters.json(params),
411 b'params': templatefilters.json(params),
414 b'output': b'json',
412 b'output': b'json',
415 b'__conduit__': 1,
413 b'__conduit__': 1,
416 }
414 }
417 data = urlencodenested(rawdata)
415 data = urlencodenested(rawdata)
418 curlcmd = ui.config(b'phabricator', b'curlcmd')
416 curlcmd = ui.config(b'phabricator', b'curlcmd')
419 if curlcmd:
417 if curlcmd:
420 sin, sout = procutil.popen2(
418 sin, sout = procutil.popen2(
421 b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
419 b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
422 )
420 )
423 sin.write(data)
421 sin.write(data)
424 sin.close()
422 sin.close()
425 body = sout.read()
423 body = sout.read()
426 else:
424 else:
427 urlopener = urlmod.opener(ui, authinfo)
425 urlopener = urlmod.opener(ui, authinfo)
428 request = util.urlreq.request(pycompat.strurl(url), data=data)
426 request = util.urlreq.request(pycompat.strurl(url), data=data)
429 max_try = ui.configint(b'phabricator', b'retry') + 1
427 max_try = ui.configint(b'phabricator', b'retry') + 1
430 timeout = ui.configwith(float, b'http', b'timeout')
428 timeout = ui.configwith(float, b'http', b'timeout')
431 for try_count in range(max_try):
429 for try_count in range(max_try):
432 try:
430 try:
433 with contextlib.closing(
431 with contextlib.closing(
434 urlopener.open(request, timeout=timeout)
432 urlopener.open(request, timeout=timeout)
435 ) as rsp:
433 ) as rsp:
436 body = rsp.read()
434 body = rsp.read()
437 break
435 break
438 except util.urlerr.urlerror as err:
436 except util.urlerr.urlerror as err:
439 if try_count == max_try - 1:
437 if try_count == max_try - 1:
440 raise
438 raise
441 ui.debug(
439 ui.debug(
442 b'Conduit Request failed (try %d/%d): %r\n'
440 b'Conduit Request failed (try %d/%d): %r\n'
443 % (try_count + 1, max_try, err)
441 % (try_count + 1, max_try, err)
444 )
442 )
445 # failing request might come from overloaded server
443 # failing request might come from overloaded server
446 retry_interval = ui.configint(b'phabricator', b'retry.interval')
444 retry_interval = ui.configint(b'phabricator', b'retry.interval')
447 time.sleep(retry_interval)
445 time.sleep(retry_interval)
448 ui.debug(b'Conduit Response: %s\n' % body)
446 ui.debug(b'Conduit Response: %s\n' % body)
449 parsed = pycompat.rapply(
447 parsed = pycompat.rapply(
450 lambda x: encoding.unitolocal(x)
448 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
451 if isinstance(x, pycompat.unicode)
452 else x,
453 # json.loads only accepts bytes from py3.6+
449 # json.loads only accepts bytes from py3.6+
454 pycompat.json_loads(encoding.unifromlocal(body)),
450 pycompat.json_loads(encoding.unifromlocal(body)),
455 )
451 )
456 if parsed.get(b'error_code'):
452 if parsed.get(b'error_code'):
457 msg = _(b'Conduit Error (%s): %s') % (
453 msg = _(b'Conduit Error (%s): %s') % (
458 parsed[b'error_code'],
454 parsed[b'error_code'],
459 parsed[b'error_info'],
455 parsed[b'error_info'],
460 )
456 )
461 raise error.Abort(msg)
457 raise error.Abort(msg)
462 return parsed[b'result']
458 return parsed[b'result']
463
459
464
460
465 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
461 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
466 def debugcallconduit(ui, repo, name):
462 def debugcallconduit(ui, repo, name):
467 """call Conduit API
463 """call Conduit API
468
464
469 Call parameters are read from stdin as a JSON blob. Result will be written
465 Call parameters are read from stdin as a JSON blob. Result will be written
470 to stdout as a JSON blob.
466 to stdout as a JSON blob.
471 """
467 """
472 # json.loads only accepts bytes from 3.6+
468 # json.loads only accepts bytes from 3.6+
473 rawparams = encoding.unifromlocal(ui.fin.read())
469 rawparams = encoding.unifromlocal(ui.fin.read())
474 # json.loads only returns unicode strings
470 # json.loads only returns unicode strings
475 params = pycompat.rapply(
471 params = pycompat.rapply(
476 lambda x: encoding.unitolocal(x)
472 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
477 if isinstance(x, pycompat.unicode)
478 else x,
479 pycompat.json_loads(rawparams),
473 pycompat.json_loads(rawparams),
480 )
474 )
481 # json.dumps only accepts unicode strings
475 # json.dumps only accepts unicode strings
482 result = pycompat.rapply(
476 result = pycompat.rapply(
483 lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
477 lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
484 callconduit(ui, name, params),
478 callconduit(ui, name, params),
485 )
479 )
486 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
480 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
487 ui.write(b'%s\n' % encoding.unitolocal(s))
481 ui.write(b'%s\n' % encoding.unitolocal(s))
488
482
489
483
490 def getrepophid(repo):
484 def getrepophid(repo):
491 """given callsign, return repository PHID or None"""
485 """given callsign, return repository PHID or None"""
492 # developer config: phabricator.repophid
486 # developer config: phabricator.repophid
493 repophid = repo.ui.config(b'phabricator', b'repophid')
487 repophid = repo.ui.config(b'phabricator', b'repophid')
494 if repophid:
488 if repophid:
495 return repophid
489 return repophid
496 callsign = repo.ui.config(b'phabricator', b'callsign')
490 callsign = repo.ui.config(b'phabricator', b'callsign')
497 if not callsign:
491 if not callsign:
498 return None
492 return None
499 query = callconduit(
493 query = callconduit(
500 repo.ui,
494 repo.ui,
501 b'diffusion.repository.search',
495 b'diffusion.repository.search',
502 {b'constraints': {b'callsigns': [callsign]}},
496 {b'constraints': {b'callsigns': [callsign]}},
503 )
497 )
504 if len(query[b'data']) == 0:
498 if len(query[b'data']) == 0:
505 return None
499 return None
506 repophid = query[b'data'][0][b'phid']
500 repophid = query[b'data'][0][b'phid']
507 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
501 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
508 return repophid
502 return repophid
509
503
510
504
511 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
505 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
512 _differentialrevisiondescre = re.compile(
506 _differentialrevisiondescre = re.compile(
513 br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
507 br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
514 )
508 )
515
509
516
510
517 def getoldnodedrevmap(repo, nodelist):
511 def getoldnodedrevmap(repo, nodelist):
518 """find previous nodes that has been sent to Phabricator
512 """find previous nodes that has been sent to Phabricator
519
513
520 return {node: (oldnode, Differential diff, Differential Revision ID)}
514 return {node: (oldnode, Differential diff, Differential Revision ID)}
521 for node in nodelist with known previous sent versions, or associated
515 for node in nodelist with known previous sent versions, or associated
522 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
516 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
523 be ``None``.
517 be ``None``.
524
518
525 Examines commit messages like "Differential Revision:" to get the
519 Examines commit messages like "Differential Revision:" to get the
526 association information.
520 association information.
527
521
528 If such commit message line is not found, examines all precursors and their
522 If such commit message line is not found, examines all precursors and their
529 tags. Tags with format like "D1234" are considered a match and the node
523 tags. Tags with format like "D1234" are considered a match and the node
530 with that tag, and the number after "D" (ex. 1234) will be returned.
524 with that tag, and the number after "D" (ex. 1234) will be returned.
531
525
532 The ``old node``, if not None, is guaranteed to be the last diff of
526 The ``old node``, if not None, is guaranteed to be the last diff of
533 corresponding Differential Revision, and exist in the repo.
527 corresponding Differential Revision, and exist in the repo.
534 """
528 """
535 unfi = repo.unfiltered()
529 unfi = repo.unfiltered()
536 has_node = unfi.changelog.index.has_node
530 has_node = unfi.changelog.index.has_node
537
531
538 result = {} # {node: (oldnode?, lastdiff?, drev)}
532 result = {} # {node: (oldnode?, lastdiff?, drev)}
539 # ordered for test stability when printing new -> old mapping below
533 # ordered for test stability when printing new -> old mapping below
540 toconfirm = util.sortdict() # {node: (force, {precnode}, drev)}
534 toconfirm = util.sortdict() # {node: (force, {precnode}, drev)}
541 for node in nodelist:
535 for node in nodelist:
542 ctx = unfi[node]
536 ctx = unfi[node]
543 # For tags like "D123", put them into "toconfirm" to verify later
537 # For tags like "D123", put them into "toconfirm" to verify later
544 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
538 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
545 for n in precnodes:
539 for n in precnodes:
546 if has_node(n):
540 if has_node(n):
547 for tag in unfi.nodetags(n):
541 for tag in unfi.nodetags(n):
548 m = _differentialrevisiontagre.match(tag)
542 m = _differentialrevisiontagre.match(tag)
549 if m:
543 if m:
550 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
544 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
551 break
545 break
552 else:
546 else:
553 continue # move to next predecessor
547 continue # move to next predecessor
554 break # found a tag, stop
548 break # found a tag, stop
555 else:
549 else:
556 # Check commit message
550 # Check commit message
557 m = _differentialrevisiondescre.search(ctx.description())
551 m = _differentialrevisiondescre.search(ctx.description())
558 if m:
552 if m:
559 toconfirm[node] = (1, set(precnodes), int(m.group('id')))
553 toconfirm[node] = (1, set(precnodes), int(m.group('id')))
560
554
561 # Double check if tags are genuine by collecting all old nodes from
555 # Double check if tags are genuine by collecting all old nodes from
562 # Phabricator, and expect precursors overlap with it.
556 # Phabricator, and expect precursors overlap with it.
563 if toconfirm:
557 if toconfirm:
564 drevs = [drev for force, precs, drev in toconfirm.values()]
558 drevs = [drev for force, precs, drev in toconfirm.values()]
565 alldiffs = callconduit(
559 alldiffs = callconduit(
566 unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
560 unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
567 )
561 )
568
562
569 def getnodes(d, precset):
563 def getnodes(d, precset):
570 # Ignore other nodes that were combined into the Differential
564 # Ignore other nodes that were combined into the Differential
571 # that aren't predecessors of the current local node.
565 # that aren't predecessors of the current local node.
572 return [n for n in getlocalcommits(d) if n in precset]
566 return [n for n in getlocalcommits(d) if n in precset]
573
567
574 for newnode, (force, precset, drev) in toconfirm.items():
568 for newnode, (force, precset, drev) in toconfirm.items():
575 diffs = [
569 diffs = [
576 d for d in alldiffs.values() if int(d[b'revisionID']) == drev
570 d for d in alldiffs.values() if int(d[b'revisionID']) == drev
577 ]
571 ]
578
572
579 # local predecessors known by Phabricator
573 # local predecessors known by Phabricator
580 phprecset = {n for d in diffs for n in getnodes(d, precset)}
574 phprecset = {n for d in diffs for n in getnodes(d, precset)}
581
575
582 # Ignore if precursors (Phabricator and local repo) do not overlap,
576 # Ignore if precursors (Phabricator and local repo) do not overlap,
583 # and force is not set (when commit message says nothing)
577 # and force is not set (when commit message says nothing)
584 if not force and not phprecset:
578 if not force and not phprecset:
585 tagname = b'D%d' % drev
579 tagname = b'D%d' % drev
586 tags.tag(
580 tags.tag(
587 repo,
581 repo,
588 tagname,
582 tagname,
589 repo.nullid,
583 repo.nullid,
590 message=None,
584 message=None,
591 user=None,
585 user=None,
592 date=None,
586 date=None,
593 local=True,
587 local=True,
594 )
588 )
595 unfi.ui.warn(
589 unfi.ui.warn(
596 _(
590 _(
597 b'D%d: local tag removed - does not match '
591 b'D%d: local tag removed - does not match '
598 b'Differential history\n'
592 b'Differential history\n'
599 )
593 )
600 % drev
594 % drev
601 )
595 )
602 continue
596 continue
603
597
604 # Find the last node using Phabricator metadata, and make sure it
598 # Find the last node using Phabricator metadata, and make sure it
605 # exists in the repo
599 # exists in the repo
606 oldnode = lastdiff = None
600 oldnode = lastdiff = None
607 if diffs:
601 if diffs:
608 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
602 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
609 oldnodes = getnodes(lastdiff, precset)
603 oldnodes = getnodes(lastdiff, precset)
610
604
611 _debug(
605 _debug(
612 unfi.ui,
606 unfi.ui,
613 b"%s mapped to old nodes %s\n"
607 b"%s mapped to old nodes %s\n"
614 % (
608 % (
615 short(newnode),
609 short(newnode),
616 stringutil.pprint([short(n) for n in sorted(oldnodes)]),
610 stringutil.pprint([short(n) for n in sorted(oldnodes)]),
617 ),
611 ),
618 )
612 )
619
613
620 # If this commit was the result of `hg fold` after submission,
614 # If this commit was the result of `hg fold` after submission,
621 # and now resubmitted with --fold, the easiest thing to do is
615 # and now resubmitted with --fold, the easiest thing to do is
622 # to leave the node clear. This only results in creating a new
616 # to leave the node clear. This only results in creating a new
623 # diff for the _same_ Differential Revision if this commit is
617 # diff for the _same_ Differential Revision if this commit is
624 # the first or last in the selected range. If we picked a node
618 # the first or last in the selected range. If we picked a node
625 # from the list instead, it would have to be the lowest if at
619 # from the list instead, it would have to be the lowest if at
626 # the beginning of the --fold range, or the highest at the end.
620 # the beginning of the --fold range, or the highest at the end.
627 # Otherwise, one or more of the nodes wouldn't be considered in
621 # Otherwise, one or more of the nodes wouldn't be considered in
628 # the diff, and the Differential wouldn't be properly updated.
622 # the diff, and the Differential wouldn't be properly updated.
629 # If this commit is the result of `hg split` in the same
623 # If this commit is the result of `hg split` in the same
630 # scenario, there is a single oldnode here (and multiple
624 # scenario, there is a single oldnode here (and multiple
631 # newnodes mapped to it). That makes it the same as the normal
625 # newnodes mapped to it). That makes it the same as the normal
632 # case, as the edges of the newnode range cleanly maps to one
626 # case, as the edges of the newnode range cleanly maps to one
633 # oldnode each.
627 # oldnode each.
634 if len(oldnodes) == 1:
628 if len(oldnodes) == 1:
635 oldnode = oldnodes[0]
629 oldnode = oldnodes[0]
636 if oldnode and not has_node(oldnode):
630 if oldnode and not has_node(oldnode):
637 oldnode = None
631 oldnode = None
638
632
639 result[newnode] = (oldnode, lastdiff, drev)
633 result[newnode] = (oldnode, lastdiff, drev)
640
634
641 return result
635 return result
642
636
643
637
644 def getdrevmap(repo, revs):
638 def getdrevmap(repo, revs):
645 """Return a dict mapping each rev in `revs` to their Differential Revision
639 """Return a dict mapping each rev in `revs` to their Differential Revision
646 ID or None.
640 ID or None.
647 """
641 """
648 result = {}
642 result = {}
649 for rev in revs:
643 for rev in revs:
650 result[rev] = None
644 result[rev] = None
651 ctx = repo[rev]
645 ctx = repo[rev]
652 # Check commit message
646 # Check commit message
653 m = _differentialrevisiondescre.search(ctx.description())
647 m = _differentialrevisiondescre.search(ctx.description())
654 if m:
648 if m:
655 result[rev] = int(m.group('id'))
649 result[rev] = int(m.group('id'))
656 continue
650 continue
657 # Check tags
651 # Check tags
658 for tag in repo.nodetags(ctx.node()):
652 for tag in repo.nodetags(ctx.node()):
659 m = _differentialrevisiontagre.match(tag)
653 m = _differentialrevisiontagre.match(tag)
660 if m:
654 if m:
661 result[rev] = int(m.group(1))
655 result[rev] = int(m.group(1))
662 break
656 break
663
657
664 return result
658 return result
665
659
666
660
667 def getdiff(basectx, ctx, diffopts):
661 def getdiff(basectx, ctx, diffopts):
668 """plain-text diff without header (user, commit message, etc)"""
662 """plain-text diff without header (user, commit message, etc)"""
669 output = util.stringio()
663 output = util.stringio()
670 for chunk, _label in patch.diffui(
664 for chunk, _label in patch.diffui(
671 ctx.repo(), basectx.p1().node(), ctx.node(), None, opts=diffopts
665 ctx.repo(), basectx.p1().node(), ctx.node(), None, opts=diffopts
672 ):
666 ):
673 output.write(chunk)
667 output.write(chunk)
674 return output.getvalue()
668 return output.getvalue()
675
669
676
670
677 class DiffChangeType(object):
671 class DiffChangeType(object):
678 ADD = 1
672 ADD = 1
679 CHANGE = 2
673 CHANGE = 2
680 DELETE = 3
674 DELETE = 3
681 MOVE_AWAY = 4
675 MOVE_AWAY = 4
682 COPY_AWAY = 5
676 COPY_AWAY = 5
683 MOVE_HERE = 6
677 MOVE_HERE = 6
684 COPY_HERE = 7
678 COPY_HERE = 7
685 MULTICOPY = 8
679 MULTICOPY = 8
686
680
687
681
688 class DiffFileType(object):
682 class DiffFileType(object):
689 TEXT = 1
683 TEXT = 1
690 IMAGE = 2
684 IMAGE = 2
691 BINARY = 3
685 BINARY = 3
692
686
693
687
694 @attr.s
688 @attr.s
695 class phabhunk(dict):
689 class phabhunk(dict):
696 """Represents a Differential hunk, which is owned by a Differential change"""
690 """Represents a Differential hunk, which is owned by a Differential change"""
697
691
698 oldOffset = attr.ib(default=0) # camelcase-required
692 oldOffset = attr.ib(default=0) # camelcase-required
699 oldLength = attr.ib(default=0) # camelcase-required
693 oldLength = attr.ib(default=0) # camelcase-required
700 newOffset = attr.ib(default=0) # camelcase-required
694 newOffset = attr.ib(default=0) # camelcase-required
701 newLength = attr.ib(default=0) # camelcase-required
695 newLength = attr.ib(default=0) # camelcase-required
702 corpus = attr.ib(default='')
696 corpus = attr.ib(default='')
703 # These get added to the phabchange's equivalents
697 # These get added to the phabchange's equivalents
704 addLines = attr.ib(default=0) # camelcase-required
698 addLines = attr.ib(default=0) # camelcase-required
705 delLines = attr.ib(default=0) # camelcase-required
699 delLines = attr.ib(default=0) # camelcase-required
706
700
707
701
708 @attr.s
702 @attr.s
709 class phabchange(object):
703 class phabchange(object):
710 """Represents a Differential change, owns Differential hunks and owned by a
704 """Represents a Differential change, owns Differential hunks and owned by a
711 Differential diff. Each one represents one file in a diff.
705 Differential diff. Each one represents one file in a diff.
712 """
706 """
713
707
714 currentPath = attr.ib(default=None) # camelcase-required
708 currentPath = attr.ib(default=None) # camelcase-required
715 oldPath = attr.ib(default=None) # camelcase-required
709 oldPath = attr.ib(default=None) # camelcase-required
716 awayPaths = attr.ib(default=attr.Factory(list)) # camelcase-required
710 awayPaths = attr.ib(default=attr.Factory(list)) # camelcase-required
717 metadata = attr.ib(default=attr.Factory(dict))
711 metadata = attr.ib(default=attr.Factory(dict))
718 oldProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
712 oldProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
719 newProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
713 newProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
720 type = attr.ib(default=DiffChangeType.CHANGE)
714 type = attr.ib(default=DiffChangeType.CHANGE)
721 fileType = attr.ib(default=DiffFileType.TEXT) # camelcase-required
715 fileType = attr.ib(default=DiffFileType.TEXT) # camelcase-required
722 commitHash = attr.ib(default=None) # camelcase-required
716 commitHash = attr.ib(default=None) # camelcase-required
723 addLines = attr.ib(default=0) # camelcase-required
717 addLines = attr.ib(default=0) # camelcase-required
724 delLines = attr.ib(default=0) # camelcase-required
718 delLines = attr.ib(default=0) # camelcase-required
725 hunks = attr.ib(default=attr.Factory(list))
719 hunks = attr.ib(default=attr.Factory(list))
726
720
727 def copynewmetadatatoold(self):
721 def copynewmetadatatoold(self):
728 for key in list(self.metadata.keys()):
722 for key in list(self.metadata.keys()):
729 newkey = key.replace(b'new:', b'old:')
723 newkey = key.replace(b'new:', b'old:')
730 self.metadata[newkey] = self.metadata[key]
724 self.metadata[newkey] = self.metadata[key]
731
725
732 def addoldmode(self, value):
726 def addoldmode(self, value):
733 self.oldProperties[b'unix:filemode'] = value
727 self.oldProperties[b'unix:filemode'] = value
734
728
735 def addnewmode(self, value):
729 def addnewmode(self, value):
736 self.newProperties[b'unix:filemode'] = value
730 self.newProperties[b'unix:filemode'] = value
737
731
738 def addhunk(self, hunk):
732 def addhunk(self, hunk):
739 if not isinstance(hunk, phabhunk):
733 if not isinstance(hunk, phabhunk):
740 raise error.Abort(b'phabchange.addhunk only takes phabhunks')
734 raise error.Abort(b'phabchange.addhunk only takes phabhunks')
741 self.hunks.append(pycompat.byteskwargs(attr.asdict(hunk)))
735 self.hunks.append(pycompat.byteskwargs(attr.asdict(hunk)))
742 # It's useful to include these stats since the Phab web UI shows them,
736 # It's useful to include these stats since the Phab web UI shows them,
743 # and uses them to estimate how large a change a Revision is. Also used
737 # and uses them to estimate how large a change a Revision is. Also used
744 # in email subjects for the [+++--] bit.
738 # in email subjects for the [+++--] bit.
745 self.addLines += hunk.addLines
739 self.addLines += hunk.addLines
746 self.delLines += hunk.delLines
740 self.delLines += hunk.delLines
747
741
748
742
749 @attr.s
743 @attr.s
750 class phabdiff(object):
744 class phabdiff(object):
751 """Represents a Differential diff, owns Differential changes. Corresponds
745 """Represents a Differential diff, owns Differential changes. Corresponds
752 to a commit.
746 to a commit.
753 """
747 """
754
748
755 # Doesn't seem to be any reason to send this (output of uname -n)
749 # Doesn't seem to be any reason to send this (output of uname -n)
756 sourceMachine = attr.ib(default=b'') # camelcase-required
750 sourceMachine = attr.ib(default=b'') # camelcase-required
757 sourcePath = attr.ib(default=b'/') # camelcase-required
751 sourcePath = attr.ib(default=b'/') # camelcase-required
758 sourceControlBaseRevision = attr.ib(default=b'0' * 40) # camelcase-required
752 sourceControlBaseRevision = attr.ib(default=b'0' * 40) # camelcase-required
759 sourceControlPath = attr.ib(default=b'/') # camelcase-required
753 sourceControlPath = attr.ib(default=b'/') # camelcase-required
760 sourceControlSystem = attr.ib(default=b'hg') # camelcase-required
754 sourceControlSystem = attr.ib(default=b'hg') # camelcase-required
761 branch = attr.ib(default=b'default')
755 branch = attr.ib(default=b'default')
762 bookmark = attr.ib(default=None)
756 bookmark = attr.ib(default=None)
763 creationMethod = attr.ib(default=b'phabsend') # camelcase-required
757 creationMethod = attr.ib(default=b'phabsend') # camelcase-required
764 lintStatus = attr.ib(default=b'none') # camelcase-required
758 lintStatus = attr.ib(default=b'none') # camelcase-required
765 unitStatus = attr.ib(default=b'none') # camelcase-required
759 unitStatus = attr.ib(default=b'none') # camelcase-required
766 changes = attr.ib(default=attr.Factory(dict))
760 changes = attr.ib(default=attr.Factory(dict))
767 repositoryPHID = attr.ib(default=None) # camelcase-required
761 repositoryPHID = attr.ib(default=None) # camelcase-required
768
762
769 def addchange(self, change):
763 def addchange(self, change):
770 if not isinstance(change, phabchange):
764 if not isinstance(change, phabchange):
771 raise error.Abort(b'phabdiff.addchange only takes phabchanges')
765 raise error.Abort(b'phabdiff.addchange only takes phabchanges')
772 self.changes[change.currentPath] = pycompat.byteskwargs(
766 self.changes[change.currentPath] = pycompat.byteskwargs(
773 attr.asdict(change)
767 attr.asdict(change)
774 )
768 )
775
769
776
770
777 def maketext(pchange, basectx, ctx, fname):
771 def maketext(pchange, basectx, ctx, fname):
778 """populate the phabchange for a text file"""
772 """populate the phabchange for a text file"""
779 repo = ctx.repo()
773 repo = ctx.repo()
780 fmatcher = match.exact([fname])
774 fmatcher = match.exact([fname])
781 diffopts = mdiff.diffopts(git=True, context=32767)
775 diffopts = mdiff.diffopts(git=True, context=32767)
782 _pfctx, _fctx, header, fhunks = next(
776 _pfctx, _fctx, header, fhunks = next(
783 patch.diffhunks(repo, basectx.p1(), ctx, fmatcher, opts=diffopts)
777 patch.diffhunks(repo, basectx.p1(), ctx, fmatcher, opts=diffopts)
784 )
778 )
785
779
786 for fhunk in fhunks:
780 for fhunk in fhunks:
787 (oldOffset, oldLength, newOffset, newLength), lines = fhunk
781 (oldOffset, oldLength, newOffset, newLength), lines = fhunk
788 corpus = b''.join(lines[1:])
782 corpus = b''.join(lines[1:])
789 shunk = list(header)
783 shunk = list(header)
790 shunk.extend(lines)
784 shunk.extend(lines)
791 _mf, _mt, addLines, delLines, _hb = patch.diffstatsum(
785 _mf, _mt, addLines, delLines, _hb = patch.diffstatsum(
792 patch.diffstatdata(util.iterlines(shunk))
786 patch.diffstatdata(util.iterlines(shunk))
793 )
787 )
794 pchange.addhunk(
788 pchange.addhunk(
795 phabhunk(
789 phabhunk(
796 oldOffset,
790 oldOffset,
797 oldLength,
791 oldLength,
798 newOffset,
792 newOffset,
799 newLength,
793 newLength,
800 corpus,
794 corpus,
801 addLines,
795 addLines,
802 delLines,
796 delLines,
803 )
797 )
804 )
798 )
805
799
806
800
807 def uploadchunks(fctx, fphid):
801 def uploadchunks(fctx, fphid):
808 """upload large binary files as separate chunks.
802 """upload large binary files as separate chunks.
809 Phab requests chunking over 8MiB, and splits into 4MiB chunks
803 Phab requests chunking over 8MiB, and splits into 4MiB chunks
810 """
804 """
811 ui = fctx.repo().ui
805 ui = fctx.repo().ui
812 chunks = callconduit(ui, b'file.querychunks', {b'filePHID': fphid})
806 chunks = callconduit(ui, b'file.querychunks', {b'filePHID': fphid})
813 with ui.makeprogress(
807 with ui.makeprogress(
814 _(b'uploading file chunks'), unit=_(b'chunks'), total=len(chunks)
808 _(b'uploading file chunks'), unit=_(b'chunks'), total=len(chunks)
815 ) as progress:
809 ) as progress:
816 for chunk in chunks:
810 for chunk in chunks:
817 progress.increment()
811 progress.increment()
818 if chunk[b'complete']:
812 if chunk[b'complete']:
819 continue
813 continue
820 bstart = int(chunk[b'byteStart'])
814 bstart = int(chunk[b'byteStart'])
821 bend = int(chunk[b'byteEnd'])
815 bend = int(chunk[b'byteEnd'])
822 callconduit(
816 callconduit(
823 ui,
817 ui,
824 b'file.uploadchunk',
818 b'file.uploadchunk',
825 {
819 {
826 b'filePHID': fphid,
820 b'filePHID': fphid,
827 b'byteStart': bstart,
821 b'byteStart': bstart,
828 b'data': base64.b64encode(fctx.data()[bstart:bend]),
822 b'data': base64.b64encode(fctx.data()[bstart:bend]),
829 b'dataEncoding': b'base64',
823 b'dataEncoding': b'base64',
830 },
824 },
831 )
825 )
832
826
833
827
834 def uploadfile(fctx):
828 def uploadfile(fctx):
835 """upload binary files to Phabricator"""
829 """upload binary files to Phabricator"""
836 repo = fctx.repo()
830 repo = fctx.repo()
837 ui = repo.ui
831 ui = repo.ui
838 fname = fctx.path()
832 fname = fctx.path()
839 size = fctx.size()
833 size = fctx.size()
840 fhash = pycompat.bytestr(hashlib.sha256(fctx.data()).hexdigest())
834 fhash = pycompat.bytestr(hashlib.sha256(fctx.data()).hexdigest())
841
835
842 # an allocate call is required first to see if an upload is even required
836 # an allocate call is required first to see if an upload is even required
843 # (Phab might already have it) and to determine if chunking is needed
837 # (Phab might already have it) and to determine if chunking is needed
844 allocateparams = {
838 allocateparams = {
845 b'name': fname,
839 b'name': fname,
846 b'contentLength': size,
840 b'contentLength': size,
847 b'contentHash': fhash,
841 b'contentHash': fhash,
848 }
842 }
849 filealloc = callconduit(ui, b'file.allocate', allocateparams)
843 filealloc = callconduit(ui, b'file.allocate', allocateparams)
850 fphid = filealloc[b'filePHID']
844 fphid = filealloc[b'filePHID']
851
845
852 if filealloc[b'upload']:
846 if filealloc[b'upload']:
853 ui.write(_(b'uploading %s\n') % bytes(fctx))
847 ui.write(_(b'uploading %s\n') % bytes(fctx))
854 if not fphid:
848 if not fphid:
855 uploadparams = {
849 uploadparams = {
856 b'name': fname,
850 b'name': fname,
857 b'data_base64': base64.b64encode(fctx.data()),
851 b'data_base64': base64.b64encode(fctx.data()),
858 }
852 }
859 fphid = callconduit(ui, b'file.upload', uploadparams)
853 fphid = callconduit(ui, b'file.upload', uploadparams)
860 else:
854 else:
861 uploadchunks(fctx, fphid)
855 uploadchunks(fctx, fphid)
862 else:
856 else:
863 ui.debug(b'server already has %s\n' % bytes(fctx))
857 ui.debug(b'server already has %s\n' % bytes(fctx))
864
858
865 if not fphid:
859 if not fphid:
866 raise error.Abort(b'Upload of %s failed.' % bytes(fctx))
860 raise error.Abort(b'Upload of %s failed.' % bytes(fctx))
867
861
868 return fphid
862 return fphid
869
863
870
864
871 def addoldbinary(pchange, oldfctx, fctx):
865 def addoldbinary(pchange, oldfctx, fctx):
872 """add the metadata for the previous version of a binary file to the
866 """add the metadata for the previous version of a binary file to the
873 phabchange for the new version
867 phabchange for the new version
874
868
875 ``oldfctx`` is the previous version of the file; ``fctx`` is the new
869 ``oldfctx`` is the previous version of the file; ``fctx`` is the new
876 version of the file, or None if the file is being removed.
870 version of the file, or None if the file is being removed.
877 """
871 """
878 if not fctx or fctx.cmp(oldfctx):
872 if not fctx or fctx.cmp(oldfctx):
879 # Files differ, add the old one
873 # Files differ, add the old one
880 pchange.metadata[b'old:file:size'] = oldfctx.size()
874 pchange.metadata[b'old:file:size'] = oldfctx.size()
881 mimeguess, _enc = mimetypes.guess_type(
875 mimeguess, _enc = mimetypes.guess_type(
882 encoding.unifromlocal(oldfctx.path())
876 encoding.unifromlocal(oldfctx.path())
883 )
877 )
884 if mimeguess:
878 if mimeguess:
885 pchange.metadata[b'old:file:mime-type'] = pycompat.bytestr(
879 pchange.metadata[b'old:file:mime-type'] = pycompat.bytestr(
886 mimeguess
880 mimeguess
887 )
881 )
888 fphid = uploadfile(oldfctx)
882 fphid = uploadfile(oldfctx)
889 pchange.metadata[b'old:binary-phid'] = fphid
883 pchange.metadata[b'old:binary-phid'] = fphid
890 else:
884 else:
891 # If it's left as IMAGE/BINARY web UI might try to display it
885 # If it's left as IMAGE/BINARY web UI might try to display it
892 pchange.fileType = DiffFileType.TEXT
886 pchange.fileType = DiffFileType.TEXT
893 pchange.copynewmetadatatoold()
887 pchange.copynewmetadatatoold()
894
888
895
889
896 def makebinary(pchange, fctx):
890 def makebinary(pchange, fctx):
897 """populate the phabchange for a binary file"""
891 """populate the phabchange for a binary file"""
898 pchange.fileType = DiffFileType.BINARY
892 pchange.fileType = DiffFileType.BINARY
899 fphid = uploadfile(fctx)
893 fphid = uploadfile(fctx)
900 pchange.metadata[b'new:binary-phid'] = fphid
894 pchange.metadata[b'new:binary-phid'] = fphid
901 pchange.metadata[b'new:file:size'] = fctx.size()
895 pchange.metadata[b'new:file:size'] = fctx.size()
902 mimeguess, _enc = mimetypes.guess_type(encoding.unifromlocal(fctx.path()))
896 mimeguess, _enc = mimetypes.guess_type(encoding.unifromlocal(fctx.path()))
903 if mimeguess:
897 if mimeguess:
904 mimeguess = pycompat.bytestr(mimeguess)
898 mimeguess = pycompat.bytestr(mimeguess)
905 pchange.metadata[b'new:file:mime-type'] = mimeguess
899 pchange.metadata[b'new:file:mime-type'] = mimeguess
906 if mimeguess.startswith(b'image/'):
900 if mimeguess.startswith(b'image/'):
907 pchange.fileType = DiffFileType.IMAGE
901 pchange.fileType = DiffFileType.IMAGE
908
902
909
903
910 # Copied from mercurial/patch.py
904 # Copied from mercurial/patch.py
911 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
905 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
912
906
913
907
914 def notutf8(fctx):
908 def notutf8(fctx):
915 """detect non-UTF-8 text files since Phabricator requires them to be marked
909 """detect non-UTF-8 text files since Phabricator requires them to be marked
916 as binary
910 as binary
917 """
911 """
918 try:
912 try:
919 fctx.data().decode('utf-8')
913 fctx.data().decode('utf-8')
920 return False
914 return False
921 except UnicodeDecodeError:
915 except UnicodeDecodeError:
922 fctx.repo().ui.write(
916 fctx.repo().ui.write(
923 _(b'file %s detected as non-UTF-8, marked as binary\n')
917 _(b'file %s detected as non-UTF-8, marked as binary\n')
924 % fctx.path()
918 % fctx.path()
925 )
919 )
926 return True
920 return True
927
921
928
922
929 def addremoved(pdiff, basectx, ctx, removed):
923 def addremoved(pdiff, basectx, ctx, removed):
930 """add removed files to the phabdiff. Shouldn't include moves"""
924 """add removed files to the phabdiff. Shouldn't include moves"""
931 for fname in removed:
925 for fname in removed:
932 pchange = phabchange(
926 pchange = phabchange(
933 currentPath=fname, oldPath=fname, type=DiffChangeType.DELETE
927 currentPath=fname, oldPath=fname, type=DiffChangeType.DELETE
934 )
928 )
935 oldfctx = basectx.p1()[fname]
929 oldfctx = basectx.p1()[fname]
936 pchange.addoldmode(gitmode[oldfctx.flags()])
930 pchange.addoldmode(gitmode[oldfctx.flags()])
937 if not (oldfctx.isbinary() or notutf8(oldfctx)):
931 if not (oldfctx.isbinary() or notutf8(oldfctx)):
938 maketext(pchange, basectx, ctx, fname)
932 maketext(pchange, basectx, ctx, fname)
939
933
940 pdiff.addchange(pchange)
934 pdiff.addchange(pchange)
941
935
942
936
943 def addmodified(pdiff, basectx, ctx, modified):
937 def addmodified(pdiff, basectx, ctx, modified):
944 """add modified files to the phabdiff"""
938 """add modified files to the phabdiff"""
945 for fname in modified:
939 for fname in modified:
946 fctx = ctx[fname]
940 fctx = ctx[fname]
947 oldfctx = basectx.p1()[fname]
941 oldfctx = basectx.p1()[fname]
948 pchange = phabchange(currentPath=fname, oldPath=fname)
942 pchange = phabchange(currentPath=fname, oldPath=fname)
949 filemode = gitmode[fctx.flags()]
943 filemode = gitmode[fctx.flags()]
950 originalmode = gitmode[oldfctx.flags()]
944 originalmode = gitmode[oldfctx.flags()]
951 if filemode != originalmode:
945 if filemode != originalmode:
952 pchange.addoldmode(originalmode)
946 pchange.addoldmode(originalmode)
953 pchange.addnewmode(filemode)
947 pchange.addnewmode(filemode)
954
948
955 if (
949 if (
956 fctx.isbinary()
950 fctx.isbinary()
957 or notutf8(fctx)
951 or notutf8(fctx)
958 or oldfctx.isbinary()
952 or oldfctx.isbinary()
959 or notutf8(oldfctx)
953 or notutf8(oldfctx)
960 ):
954 ):
961 makebinary(pchange, fctx)
955 makebinary(pchange, fctx)
962 addoldbinary(pchange, oldfctx, fctx)
956 addoldbinary(pchange, oldfctx, fctx)
963 else:
957 else:
964 maketext(pchange, basectx, ctx, fname)
958 maketext(pchange, basectx, ctx, fname)
965
959
966 pdiff.addchange(pchange)
960 pdiff.addchange(pchange)
967
961
968
962
969 def addadded(pdiff, basectx, ctx, added, removed):
963 def addadded(pdiff, basectx, ctx, added, removed):
970 """add file adds to the phabdiff, both new files and copies/moves"""
964 """add file adds to the phabdiff, both new files and copies/moves"""
971 # Keep track of files that've been recorded as moved/copied, so if there are
965 # Keep track of files that've been recorded as moved/copied, so if there are
972 # additional copies we can mark them (moves get removed from removed)
966 # additional copies we can mark them (moves get removed from removed)
973 copiedchanges = {}
967 copiedchanges = {}
974 movedchanges = {}
968 movedchanges = {}
975
969
976 copy = {}
970 copy = {}
977 if basectx != ctx:
971 if basectx != ctx:
978 copy = copies.pathcopies(basectx.p1(), ctx)
972 copy = copies.pathcopies(basectx.p1(), ctx)
979
973
980 for fname in added:
974 for fname in added:
981 fctx = ctx[fname]
975 fctx = ctx[fname]
982 oldfctx = None
976 oldfctx = None
983 pchange = phabchange(currentPath=fname)
977 pchange = phabchange(currentPath=fname)
984
978
985 filemode = gitmode[fctx.flags()]
979 filemode = gitmode[fctx.flags()]
986
980
987 if copy:
981 if copy:
988 originalfname = copy.get(fname, fname)
982 originalfname = copy.get(fname, fname)
989 else:
983 else:
990 originalfname = fname
984 originalfname = fname
991 if fctx.renamed():
985 if fctx.renamed():
992 originalfname = fctx.renamed()[0]
986 originalfname = fctx.renamed()[0]
993
987
994 renamed = fname != originalfname
988 renamed = fname != originalfname
995
989
996 if renamed:
990 if renamed:
997 oldfctx = basectx.p1()[originalfname]
991 oldfctx = basectx.p1()[originalfname]
998 originalmode = gitmode[oldfctx.flags()]
992 originalmode = gitmode[oldfctx.flags()]
999 pchange.oldPath = originalfname
993 pchange.oldPath = originalfname
1000
994
1001 if originalfname in removed:
995 if originalfname in removed:
1002 origpchange = phabchange(
996 origpchange = phabchange(
1003 currentPath=originalfname,
997 currentPath=originalfname,
1004 oldPath=originalfname,
998 oldPath=originalfname,
1005 type=DiffChangeType.MOVE_AWAY,
999 type=DiffChangeType.MOVE_AWAY,
1006 awayPaths=[fname],
1000 awayPaths=[fname],
1007 )
1001 )
1008 movedchanges[originalfname] = origpchange
1002 movedchanges[originalfname] = origpchange
1009 removed.remove(originalfname)
1003 removed.remove(originalfname)
1010 pchange.type = DiffChangeType.MOVE_HERE
1004 pchange.type = DiffChangeType.MOVE_HERE
1011 elif originalfname in movedchanges:
1005 elif originalfname in movedchanges:
1012 movedchanges[originalfname].type = DiffChangeType.MULTICOPY
1006 movedchanges[originalfname].type = DiffChangeType.MULTICOPY
1013 movedchanges[originalfname].awayPaths.append(fname)
1007 movedchanges[originalfname].awayPaths.append(fname)
1014 pchange.type = DiffChangeType.COPY_HERE
1008 pchange.type = DiffChangeType.COPY_HERE
1015 else: # pure copy
1009 else: # pure copy
1016 if originalfname not in copiedchanges:
1010 if originalfname not in copiedchanges:
1017 origpchange = phabchange(
1011 origpchange = phabchange(
1018 currentPath=originalfname, type=DiffChangeType.COPY_AWAY
1012 currentPath=originalfname, type=DiffChangeType.COPY_AWAY
1019 )
1013 )
1020 copiedchanges[originalfname] = origpchange
1014 copiedchanges[originalfname] = origpchange
1021 else:
1015 else:
1022 origpchange = copiedchanges[originalfname]
1016 origpchange = copiedchanges[originalfname]
1023 origpchange.awayPaths.append(fname)
1017 origpchange.awayPaths.append(fname)
1024 pchange.type = DiffChangeType.COPY_HERE
1018 pchange.type = DiffChangeType.COPY_HERE
1025
1019
1026 if filemode != originalmode:
1020 if filemode != originalmode:
1027 pchange.addoldmode(originalmode)
1021 pchange.addoldmode(originalmode)
1028 pchange.addnewmode(filemode)
1022 pchange.addnewmode(filemode)
1029 else: # Brand-new file
1023 else: # Brand-new file
1030 pchange.addnewmode(gitmode[fctx.flags()])
1024 pchange.addnewmode(gitmode[fctx.flags()])
1031 pchange.type = DiffChangeType.ADD
1025 pchange.type = DiffChangeType.ADD
1032
1026
1033 if (
1027 if (
1034 fctx.isbinary()
1028 fctx.isbinary()
1035 or notutf8(fctx)
1029 or notutf8(fctx)
1036 or (oldfctx and (oldfctx.isbinary() or notutf8(oldfctx)))
1030 or (oldfctx and (oldfctx.isbinary() or notutf8(oldfctx)))
1037 ):
1031 ):
1038 makebinary(pchange, fctx)
1032 makebinary(pchange, fctx)
1039 if renamed:
1033 if renamed:
1040 addoldbinary(pchange, oldfctx, fctx)
1034 addoldbinary(pchange, oldfctx, fctx)
1041 else:
1035 else:
1042 maketext(pchange, basectx, ctx, fname)
1036 maketext(pchange, basectx, ctx, fname)
1043
1037
1044 pdiff.addchange(pchange)
1038 pdiff.addchange(pchange)
1045
1039
1046 for _path, copiedchange in copiedchanges.items():
1040 for _path, copiedchange in copiedchanges.items():
1047 pdiff.addchange(copiedchange)
1041 pdiff.addchange(copiedchange)
1048 for _path, movedchange in movedchanges.items():
1042 for _path, movedchange in movedchanges.items():
1049 pdiff.addchange(movedchange)
1043 pdiff.addchange(movedchange)
1050
1044
1051
1045
1052 def creatediff(basectx, ctx):
1046 def creatediff(basectx, ctx):
1053 """create a Differential Diff"""
1047 """create a Differential Diff"""
1054 repo = ctx.repo()
1048 repo = ctx.repo()
1055 repophid = getrepophid(repo)
1049 repophid = getrepophid(repo)
1056 # Create a "Differential Diff" via "differential.creatediff" API
1050 # Create a "Differential Diff" via "differential.creatediff" API
1057 pdiff = phabdiff(
1051 pdiff = phabdiff(
1058 sourceControlBaseRevision=b'%s' % basectx.p1().hex(),
1052 sourceControlBaseRevision=b'%s' % basectx.p1().hex(),
1059 branch=b'%s' % ctx.branch(),
1053 branch=b'%s' % ctx.branch(),
1060 )
1054 )
1061 modified, added, removed, _d, _u, _i, _c = basectx.p1().status(ctx)
1055 modified, added, removed, _d, _u, _i, _c = basectx.p1().status(ctx)
1062 # addadded will remove moved files from removed, so addremoved won't get
1056 # addadded will remove moved files from removed, so addremoved won't get
1063 # them
1057 # them
1064 addadded(pdiff, basectx, ctx, added, removed)
1058 addadded(pdiff, basectx, ctx, added, removed)
1065 addmodified(pdiff, basectx, ctx, modified)
1059 addmodified(pdiff, basectx, ctx, modified)
1066 addremoved(pdiff, basectx, ctx, removed)
1060 addremoved(pdiff, basectx, ctx, removed)
1067 if repophid:
1061 if repophid:
1068 pdiff.repositoryPHID = repophid
1062 pdiff.repositoryPHID = repophid
1069 diff = callconduit(
1063 diff = callconduit(
1070 repo.ui,
1064 repo.ui,
1071 b'differential.creatediff',
1065 b'differential.creatediff',
1072 pycompat.byteskwargs(attr.asdict(pdiff)),
1066 pycompat.byteskwargs(attr.asdict(pdiff)),
1073 )
1067 )
1074 if not diff:
1068 if not diff:
1075 if basectx != ctx:
1069 if basectx != ctx:
1076 msg = _(b'cannot create diff for %s::%s') % (basectx, ctx)
1070 msg = _(b'cannot create diff for %s::%s') % (basectx, ctx)
1077 else:
1071 else:
1078 msg = _(b'cannot create diff for %s') % ctx
1072 msg = _(b'cannot create diff for %s') % ctx
1079 raise error.Abort(msg)
1073 raise error.Abort(msg)
1080 return diff
1074 return diff
1081
1075
1082
1076
1083 def writediffproperties(ctxs, diff):
1077 def writediffproperties(ctxs, diff):
1084 """write metadata to diff so patches could be applied losslessly
1078 """write metadata to diff so patches could be applied losslessly
1085
1079
1086 ``ctxs`` is the list of commits that created the diff, in ascending order.
1080 ``ctxs`` is the list of commits that created the diff, in ascending order.
1087 The list is generally a single commit, but may be several when using
1081 The list is generally a single commit, but may be several when using
1088 ``phabsend --fold``.
1082 ``phabsend --fold``.
1089 """
1083 """
1090 # creatediff returns with a diffid but query returns with an id
1084 # creatediff returns with a diffid but query returns with an id
1091 diffid = diff.get(b'diffid', diff.get(b'id'))
1085 diffid = diff.get(b'diffid', diff.get(b'id'))
1092 basectx = ctxs[0]
1086 basectx = ctxs[0]
1093 tipctx = ctxs[-1]
1087 tipctx = ctxs[-1]
1094
1088
1095 params = {
1089 params = {
1096 b'diff_id': diffid,
1090 b'diff_id': diffid,
1097 b'name': b'hg:meta',
1091 b'name': b'hg:meta',
1098 b'data': templatefilters.json(
1092 b'data': templatefilters.json(
1099 {
1093 {
1100 b'user': tipctx.user(),
1094 b'user': tipctx.user(),
1101 b'date': b'%d %d' % tipctx.date(),
1095 b'date': b'%d %d' % tipctx.date(),
1102 b'branch': tipctx.branch(),
1096 b'branch': tipctx.branch(),
1103 b'node': tipctx.hex(),
1097 b'node': tipctx.hex(),
1104 b'parent': basectx.p1().hex(),
1098 b'parent': basectx.p1().hex(),
1105 }
1099 }
1106 ),
1100 ),
1107 }
1101 }
1108 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1102 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1109
1103
1110 commits = {}
1104 commits = {}
1111 for ctx in ctxs:
1105 for ctx in ctxs:
1112 commits[ctx.hex()] = {
1106 commits[ctx.hex()] = {
1113 b'author': stringutil.person(ctx.user()),
1107 b'author': stringutil.person(ctx.user()),
1114 b'authorEmail': stringutil.email(ctx.user()),
1108 b'authorEmail': stringutil.email(ctx.user()),
1115 b'time': int(ctx.date()[0]),
1109 b'time': int(ctx.date()[0]),
1116 b'commit': ctx.hex(),
1110 b'commit': ctx.hex(),
1117 b'parents': [ctx.p1().hex()],
1111 b'parents': [ctx.p1().hex()],
1118 b'branch': ctx.branch(),
1112 b'branch': ctx.branch(),
1119 }
1113 }
1120 params = {
1114 params = {
1121 b'diff_id': diffid,
1115 b'diff_id': diffid,
1122 b'name': b'local:commits',
1116 b'name': b'local:commits',
1123 b'data': templatefilters.json(commits),
1117 b'data': templatefilters.json(commits),
1124 }
1118 }
1125 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1119 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1126
1120
1127
1121
1128 def createdifferentialrevision(
1122 def createdifferentialrevision(
1129 ctxs,
1123 ctxs,
1130 revid=None,
1124 revid=None,
1131 parentrevphid=None,
1125 parentrevphid=None,
1132 oldbasenode=None,
1126 oldbasenode=None,
1133 oldnode=None,
1127 oldnode=None,
1134 olddiff=None,
1128 olddiff=None,
1135 actions=None,
1129 actions=None,
1136 comment=None,
1130 comment=None,
1137 ):
1131 ):
1138 """create or update a Differential Revision
1132 """create or update a Differential Revision
1139
1133
1140 If revid is None, create a new Differential Revision, otherwise update
1134 If revid is None, create a new Differential Revision, otherwise update
1141 revid. If parentrevphid is not None, set it as a dependency.
1135 revid. If parentrevphid is not None, set it as a dependency.
1142
1136
1143 If there is a single commit for the new Differential Revision, ``ctxs`` will
1137 If there is a single commit for the new Differential Revision, ``ctxs`` will
1144 be a list of that single context. Otherwise, it is a list that covers the
1138 be a list of that single context. Otherwise, it is a list that covers the
1145 range of changes for the differential, where ``ctxs[0]`` is the first change
1139 range of changes for the differential, where ``ctxs[0]`` is the first change
1146 to include and ``ctxs[-1]`` is the last.
1140 to include and ``ctxs[-1]`` is the last.
1147
1141
1148 If oldnode is not None, check if the patch content (without commit message
1142 If oldnode is not None, check if the patch content (without commit message
1149 and metadata) has changed before creating another diff. For a Revision with
1143 and metadata) has changed before creating another diff. For a Revision with
1150 a single commit, ``oldbasenode`` and ``oldnode`` have the same value. For a
1144 a single commit, ``oldbasenode`` and ``oldnode`` have the same value. For a
1151 Revision covering multiple commits, ``oldbasenode`` corresponds to
1145 Revision covering multiple commits, ``oldbasenode`` corresponds to
1152 ``ctxs[0]`` the previous time this Revision was posted, and ``oldnode``
1146 ``ctxs[0]`` the previous time this Revision was posted, and ``oldnode``
1153 corresponds to ``ctxs[-1]``.
1147 corresponds to ``ctxs[-1]``.
1154
1148
1155 If actions is not None, they will be appended to the transaction.
1149 If actions is not None, they will be appended to the transaction.
1156 """
1150 """
1157 ctx = ctxs[-1]
1151 ctx = ctxs[-1]
1158 basectx = ctxs[0]
1152 basectx = ctxs[0]
1159
1153
1160 repo = ctx.repo()
1154 repo = ctx.repo()
1161 if oldnode:
1155 if oldnode:
1162 diffopts = mdiff.diffopts(git=True, context=32767)
1156 diffopts = mdiff.diffopts(git=True, context=32767)
1163 unfi = repo.unfiltered()
1157 unfi = repo.unfiltered()
1164 oldctx = unfi[oldnode]
1158 oldctx = unfi[oldnode]
1165 oldbasectx = unfi[oldbasenode]
1159 oldbasectx = unfi[oldbasenode]
1166 neednewdiff = getdiff(basectx, ctx, diffopts) != getdiff(
1160 neednewdiff = getdiff(basectx, ctx, diffopts) != getdiff(
1167 oldbasectx, oldctx, diffopts
1161 oldbasectx, oldctx, diffopts
1168 )
1162 )
1169 else:
1163 else:
1170 neednewdiff = True
1164 neednewdiff = True
1171
1165
1172 transactions = []
1166 transactions = []
1173 if neednewdiff:
1167 if neednewdiff:
1174 diff = creatediff(basectx, ctx)
1168 diff = creatediff(basectx, ctx)
1175 transactions.append({b'type': b'update', b'value': diff[b'phid']})
1169 transactions.append({b'type': b'update', b'value': diff[b'phid']})
1176 if comment:
1170 if comment:
1177 transactions.append({b'type': b'comment', b'value': comment})
1171 transactions.append({b'type': b'comment', b'value': comment})
1178 else:
1172 else:
1179 # Even if we don't need to upload a new diff because the patch content
1173 # Even if we don't need to upload a new diff because the patch content
1180 # does not change. We might still need to update its metadata so
1174 # does not change. We might still need to update its metadata so
1181 # pushers could know the correct node metadata.
1175 # pushers could know the correct node metadata.
1182 assert olddiff
1176 assert olddiff
1183 diff = olddiff
1177 diff = olddiff
1184 writediffproperties(ctxs, diff)
1178 writediffproperties(ctxs, diff)
1185
1179
1186 # Set the parent Revision every time, so commit re-ordering is picked-up
1180 # Set the parent Revision every time, so commit re-ordering is picked-up
1187 if parentrevphid:
1181 if parentrevphid:
1188 transactions.append(
1182 transactions.append(
1189 {b'type': b'parents.set', b'value': [parentrevphid]}
1183 {b'type': b'parents.set', b'value': [parentrevphid]}
1190 )
1184 )
1191
1185
1192 if actions:
1186 if actions:
1193 transactions += actions
1187 transactions += actions
1194
1188
1195 # When folding multiple local commits into a single review, arcanist will
1189 # When folding multiple local commits into a single review, arcanist will
1196 # take the summary line of the first commit as the title, and then
1190 # take the summary line of the first commit as the title, and then
1197 # concatenate the rest of the remaining messages (including each of their
1191 # concatenate the rest of the remaining messages (including each of their
1198 # first lines) to the rest of the first commit message (each separated by
1192 # first lines) to the rest of the first commit message (each separated by
1199 # an empty line), and use that as the summary field. Do the same here.
1193 # an empty line), and use that as the summary field. Do the same here.
1200 # For commits with only a one line message, there is no summary field, as
1194 # For commits with only a one line message, there is no summary field, as
1201 # this gets assigned to the title.
1195 # this gets assigned to the title.
1202 fields = util.sortdict() # sorted for stable wire protocol in tests
1196 fields = util.sortdict() # sorted for stable wire protocol in tests
1203
1197
1204 for i, _ctx in enumerate(ctxs):
1198 for i, _ctx in enumerate(ctxs):
1205 # Parse commit message and update related fields.
1199 # Parse commit message and update related fields.
1206 desc = _ctx.description()
1200 desc = _ctx.description()
1207 info = callconduit(
1201 info = callconduit(
1208 repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
1202 repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
1209 )
1203 )
1210
1204
1211 for k in [b'title', b'summary', b'testPlan']:
1205 for k in [b'title', b'summary', b'testPlan']:
1212 v = info[b'fields'].get(k)
1206 v = info[b'fields'].get(k)
1213 if not v:
1207 if not v:
1214 continue
1208 continue
1215
1209
1216 if i == 0:
1210 if i == 0:
1217 # Title, summary and test plan (if present) are taken verbatim
1211 # Title, summary and test plan (if present) are taken verbatim
1218 # for the first commit.
1212 # for the first commit.
1219 fields[k] = v.rstrip()
1213 fields[k] = v.rstrip()
1220 continue
1214 continue
1221 elif k == b'title':
1215 elif k == b'title':
1222 # Add subsequent titles (i.e. the first line of the commit
1216 # Add subsequent titles (i.e. the first line of the commit
1223 # message) back to the summary.
1217 # message) back to the summary.
1224 k = b'summary'
1218 k = b'summary'
1225
1219
1226 # Append any current field to the existing composite field
1220 # Append any current field to the existing composite field
1227 fields[k] = b'\n\n'.join(filter(None, [fields.get(k), v.rstrip()]))
1221 fields[k] = b'\n\n'.join(filter(None, [fields.get(k), v.rstrip()]))
1228
1222
1229 for k, v in fields.items():
1223 for k, v in fields.items():
1230 transactions.append({b'type': k, b'value': v})
1224 transactions.append({b'type': k, b'value': v})
1231
1225
1232 params = {b'transactions': transactions}
1226 params = {b'transactions': transactions}
1233 if revid is not None:
1227 if revid is not None:
1234 # Update an existing Differential Revision
1228 # Update an existing Differential Revision
1235 params[b'objectIdentifier'] = revid
1229 params[b'objectIdentifier'] = revid
1236
1230
1237 revision = callconduit(repo.ui, b'differential.revision.edit', params)
1231 revision = callconduit(repo.ui, b'differential.revision.edit', params)
1238 if not revision:
1232 if not revision:
1239 if len(ctxs) == 1:
1233 if len(ctxs) == 1:
1240 msg = _(b'cannot create revision for %s') % ctx
1234 msg = _(b'cannot create revision for %s') % ctx
1241 else:
1235 else:
1242 msg = _(b'cannot create revision for %s::%s') % (basectx, ctx)
1236 msg = _(b'cannot create revision for %s::%s') % (basectx, ctx)
1243 raise error.Abort(msg)
1237 raise error.Abort(msg)
1244
1238
1245 return revision, diff
1239 return revision, diff
1246
1240
1247
1241
1248 def userphids(ui, names):
1242 def userphids(ui, names):
1249 """convert user names to PHIDs"""
1243 """convert user names to PHIDs"""
1250 names = [name.lower() for name in names]
1244 names = [name.lower() for name in names]
1251 query = {b'constraints': {b'usernames': names}}
1245 query = {b'constraints': {b'usernames': names}}
1252 result = callconduit(ui, b'user.search', query)
1246 result = callconduit(ui, b'user.search', query)
1253 # username not found is not an error of the API. So check if we have missed
1247 # username not found is not an error of the API. So check if we have missed
1254 # some names here.
1248 # some names here.
1255 data = result[b'data']
1249 data = result[b'data']
1256 resolved = {entry[b'fields'][b'username'].lower() for entry in data}
1250 resolved = {entry[b'fields'][b'username'].lower() for entry in data}
1257 unresolved = set(names) - resolved
1251 unresolved = set(names) - resolved
1258 if unresolved:
1252 if unresolved:
1259 raise error.Abort(
1253 raise error.Abort(
1260 _(b'unknown username: %s') % b' '.join(sorted(unresolved))
1254 _(b'unknown username: %s') % b' '.join(sorted(unresolved))
1261 )
1255 )
1262 return [entry[b'phid'] for entry in data]
1256 return [entry[b'phid'] for entry in data]
1263
1257
1264
1258
1265 def _print_phabsend_action(ui, ctx, newrevid, action):
1259 def _print_phabsend_action(ui, ctx, newrevid, action):
1266 """print the ``action`` that occurred when posting ``ctx`` for review
1260 """print the ``action`` that occurred when posting ``ctx`` for review
1267
1261
1268 This is a utility function for the sending phase of ``phabsend``, which
1262 This is a utility function for the sending phase of ``phabsend``, which
1269 makes it easier to show a status for all local commits with `--fold``.
1263 makes it easier to show a status for all local commits with `--fold``.
1270 """
1264 """
1271 actiondesc = ui.label(
1265 actiondesc = ui.label(
1272 {
1266 {
1273 b'created': _(b'created'),
1267 b'created': _(b'created'),
1274 b'skipped': _(b'skipped'),
1268 b'skipped': _(b'skipped'),
1275 b'updated': _(b'updated'),
1269 b'updated': _(b'updated'),
1276 }[action],
1270 }[action],
1277 b'phabricator.action.%s' % action,
1271 b'phabricator.action.%s' % action,
1278 )
1272 )
1279 drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
1273 drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
1280 summary = cmdutil.format_changeset_summary(ui, ctx, b'phabsend')
1274 summary = cmdutil.format_changeset_summary(ui, ctx, b'phabsend')
1281 ui.write(_(b'%s - %s - %s\n') % (drevdesc, actiondesc, summary))
1275 ui.write(_(b'%s - %s - %s\n') % (drevdesc, actiondesc, summary))
1282
1276
1283
1277
1284 def _amend_diff_properties(unfi, drevid, newnodes, diff):
1278 def _amend_diff_properties(unfi, drevid, newnodes, diff):
1285 """update the local commit list for the ``diff`` associated with ``drevid``
1279 """update the local commit list for the ``diff`` associated with ``drevid``
1286
1280
1287 This is a utility function for the amend phase of ``phabsend``, which
1281 This is a utility function for the amend phase of ``phabsend``, which
1288 converts failures to warning messages.
1282 converts failures to warning messages.
1289 """
1283 """
1290 _debug(
1284 _debug(
1291 unfi.ui,
1285 unfi.ui,
1292 b"new commits: %s\n" % stringutil.pprint([short(n) for n in newnodes]),
1286 b"new commits: %s\n" % stringutil.pprint([short(n) for n in newnodes]),
1293 )
1287 )
1294
1288
1295 try:
1289 try:
1296 writediffproperties([unfi[newnode] for newnode in newnodes], diff)
1290 writediffproperties([unfi[newnode] for newnode in newnodes], diff)
1297 except util.urlerr.urlerror:
1291 except util.urlerr.urlerror:
1298 # If it fails just warn and keep going, otherwise the DREV
1292 # If it fails just warn and keep going, otherwise the DREV
1299 # associations will be lost
1293 # associations will be lost
1300 unfi.ui.warnnoi18n(b'Failed to update metadata for D%d\n' % drevid)
1294 unfi.ui.warnnoi18n(b'Failed to update metadata for D%d\n' % drevid)
1301
1295
1302
1296
1303 @vcrcommand(
1297 @vcrcommand(
1304 b'phabsend',
1298 b'phabsend',
1305 [
1299 [
1306 (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
1300 (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
1307 (b'', b'amend', True, _(b'update commit messages')),
1301 (b'', b'amend', True, _(b'update commit messages')),
1308 (b'', b'reviewer', [], _(b'specify reviewers')),
1302 (b'', b'reviewer', [], _(b'specify reviewers')),
1309 (b'', b'blocker', [], _(b'specify blocking reviewers')),
1303 (b'', b'blocker', [], _(b'specify blocking reviewers')),
1310 (
1304 (
1311 b'm',
1305 b'm',
1312 b'comment',
1306 b'comment',
1313 b'',
1307 b'',
1314 _(b'add a comment to Revisions with new/updated Diffs'),
1308 _(b'add a comment to Revisions with new/updated Diffs'),
1315 ),
1309 ),
1316 (b'', b'confirm', None, _(b'ask for confirmation before sending')),
1310 (b'', b'confirm', None, _(b'ask for confirmation before sending')),
1317 (b'', b'fold', False, _(b'combine the revisions into one review')),
1311 (b'', b'fold', False, _(b'combine the revisions into one review')),
1318 ],
1312 ],
1319 _(b'REV [OPTIONS]'),
1313 _(b'REV [OPTIONS]'),
1320 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1314 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1321 )
1315 )
1322 def phabsend(ui, repo, *revs, **opts):
1316 def phabsend(ui, repo, *revs, **opts):
1323 """upload changesets to Phabricator
1317 """upload changesets to Phabricator
1324
1318
1325 If there are multiple revisions specified, they will be send as a stack
1319 If there are multiple revisions specified, they will be send as a stack
1326 with a linear dependencies relationship using the order specified by the
1320 with a linear dependencies relationship using the order specified by the
1327 revset.
1321 revset.
1328
1322
1329 For the first time uploading changesets, local tags will be created to
1323 For the first time uploading changesets, local tags will be created to
1330 maintain the association. After the first time, phabsend will check
1324 maintain the association. After the first time, phabsend will check
1331 obsstore and tags information so it can figure out whether to update an
1325 obsstore and tags information so it can figure out whether to update an
1332 existing Differential Revision, or create a new one.
1326 existing Differential Revision, or create a new one.
1333
1327
1334 If --amend is set, update commit messages so they have the
1328 If --amend is set, update commit messages so they have the
1335 ``Differential Revision`` URL, remove related tags. This is similar to what
1329 ``Differential Revision`` URL, remove related tags. This is similar to what
1336 arcanist will do, and is more desired in author-push workflows. Otherwise,
1330 arcanist will do, and is more desired in author-push workflows. Otherwise,
1337 use local tags to record the ``Differential Revision`` association.
1331 use local tags to record the ``Differential Revision`` association.
1338
1332
1339 The --confirm option lets you confirm changesets before sending them. You
1333 The --confirm option lets you confirm changesets before sending them. You
1340 can also add following to your configuration file to make it default
1334 can also add following to your configuration file to make it default
1341 behaviour::
1335 behaviour::
1342
1336
1343 [phabsend]
1337 [phabsend]
1344 confirm = true
1338 confirm = true
1345
1339
1346 By default, a separate review will be created for each commit that is
1340 By default, a separate review will be created for each commit that is
1347 selected, and will have the same parent/child relationship in Phabricator.
1341 selected, and will have the same parent/child relationship in Phabricator.
1348 If ``--fold`` is set, multiple commits are rolled up into a single review
1342 If ``--fold`` is set, multiple commits are rolled up into a single review
1349 as if diffed from the parent of the first revision to the last. The commit
1343 as if diffed from the parent of the first revision to the last. The commit
1350 messages are concatenated in the summary field on Phabricator.
1344 messages are concatenated in the summary field on Phabricator.
1351
1345
1352 phabsend will check obsstore and the above association to decide whether to
1346 phabsend will check obsstore and the above association to decide whether to
1353 update an existing Differential Revision, or create a new one.
1347 update an existing Differential Revision, or create a new one.
1354 """
1348 """
1355 opts = pycompat.byteskwargs(opts)
1349 opts = pycompat.byteskwargs(opts)
1356 revs = list(revs) + opts.get(b'rev', [])
1350 revs = list(revs) + opts.get(b'rev', [])
1357 revs = logcmdutil.revrange(repo, revs)
1351 revs = logcmdutil.revrange(repo, revs)
1358 revs.sort() # ascending order to preserve topological parent/child in phab
1352 revs.sort() # ascending order to preserve topological parent/child in phab
1359
1353
1360 if not revs:
1354 if not revs:
1361 raise error.Abort(_(b'phabsend requires at least one changeset'))
1355 raise error.Abort(_(b'phabsend requires at least one changeset'))
1362 if opts.get(b'amend'):
1356 if opts.get(b'amend'):
1363 cmdutil.checkunfinished(repo)
1357 cmdutil.checkunfinished(repo)
1364
1358
1365 ctxs = [repo[rev] for rev in revs]
1359 ctxs = [repo[rev] for rev in revs]
1366
1360
1367 if any(c for c in ctxs if c.obsolete()):
1361 if any(c for c in ctxs if c.obsolete()):
1368 raise error.Abort(_(b"obsolete commits cannot be posted for review"))
1362 raise error.Abort(_(b"obsolete commits cannot be posted for review"))
1369
1363
1370 # Ensure the local commits are an unbroken range. The semantics of the
1364 # Ensure the local commits are an unbroken range. The semantics of the
1371 # --fold option implies this, and the auto restacking of orphans requires
1365 # --fold option implies this, and the auto restacking of orphans requires
1372 # it. Otherwise A+C in A->B->C will cause B to be orphaned, and C' to
1366 # it. Otherwise A+C in A->B->C will cause B to be orphaned, and C' to
1373 # get A' as a parent.
1367 # get A' as a parent.
1374 def _fail_nonlinear_revs(revs, revtype):
1368 def _fail_nonlinear_revs(revs, revtype):
1375 badnodes = [repo[r].node() for r in revs]
1369 badnodes = [repo[r].node() for r in revs]
1376 raise error.Abort(
1370 raise error.Abort(
1377 _(b"cannot phabsend multiple %s revisions: %s")
1371 _(b"cannot phabsend multiple %s revisions: %s")
1378 % (revtype, scmutil.nodesummaries(repo, badnodes)),
1372 % (revtype, scmutil.nodesummaries(repo, badnodes)),
1379 hint=_(b"the revisions must form a linear chain"),
1373 hint=_(b"the revisions must form a linear chain"),
1380 )
1374 )
1381
1375
1382 heads = repo.revs(b'heads(%ld)', revs)
1376 heads = repo.revs(b'heads(%ld)', revs)
1383 if len(heads) > 1:
1377 if len(heads) > 1:
1384 _fail_nonlinear_revs(heads, b"head")
1378 _fail_nonlinear_revs(heads, b"head")
1385
1379
1386 roots = repo.revs(b'roots(%ld)', revs)
1380 roots = repo.revs(b'roots(%ld)', revs)
1387 if len(roots) > 1:
1381 if len(roots) > 1:
1388 _fail_nonlinear_revs(roots, b"root")
1382 _fail_nonlinear_revs(roots, b"root")
1389
1383
1390 fold = opts.get(b'fold')
1384 fold = opts.get(b'fold')
1391 if fold:
1385 if fold:
1392 if len(revs) == 1:
1386 if len(revs) == 1:
1393 # TODO: just switch to --no-fold instead?
1387 # TODO: just switch to --no-fold instead?
1394 raise error.Abort(_(b"cannot fold a single revision"))
1388 raise error.Abort(_(b"cannot fold a single revision"))
1395
1389
1396 # There's no clear way to manage multiple commits with a Dxxx tag, so
1390 # There's no clear way to manage multiple commits with a Dxxx tag, so
1397 # require the amend option. (We could append "_nnn", but then it
1391 # require the amend option. (We could append "_nnn", but then it
1398 # becomes jumbled if earlier commits are added to an update.) It should
1392 # becomes jumbled if earlier commits are added to an update.) It should
1399 # lock the repo and ensure that the range is editable, but that would
1393 # lock the repo and ensure that the range is editable, but that would
1400 # make the code pretty convoluted. The default behavior of `arc` is to
1394 # make the code pretty convoluted. The default behavior of `arc` is to
1401 # create a new review anyway.
1395 # create a new review anyway.
1402 if not opts.get(b"amend"):
1396 if not opts.get(b"amend"):
1403 raise error.Abort(_(b"cannot fold with --no-amend"))
1397 raise error.Abort(_(b"cannot fold with --no-amend"))
1404
1398
1405 # It might be possible to bucketize the revisions by the DREV value, and
1399 # It might be possible to bucketize the revisions by the DREV value, and
1406 # iterate over those groups when posting, and then again when amending.
1400 # iterate over those groups when posting, and then again when amending.
1407 # But for simplicity, require all selected revisions to be for the same
1401 # But for simplicity, require all selected revisions to be for the same
1408 # DREV (if present). Adding local revisions to an existing DREV is
1402 # DREV (if present). Adding local revisions to an existing DREV is
1409 # acceptable.
1403 # acceptable.
1410 drevmatchers = [
1404 drevmatchers = [
1411 _differentialrevisiondescre.search(ctx.description())
1405 _differentialrevisiondescre.search(ctx.description())
1412 for ctx in ctxs
1406 for ctx in ctxs
1413 ]
1407 ]
1414 if len({m.group('url') for m in drevmatchers if m}) > 1:
1408 if len({m.group('url') for m in drevmatchers if m}) > 1:
1415 raise error.Abort(
1409 raise error.Abort(
1416 _(b"cannot fold revisions with different DREV values")
1410 _(b"cannot fold revisions with different DREV values")
1417 )
1411 )
1418
1412
1419 # {newnode: (oldnode, olddiff, olddrev}
1413 # {newnode: (oldnode, olddiff, olddrev}
1420 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
1414 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
1421
1415
1422 confirm = ui.configbool(b'phabsend', b'confirm')
1416 confirm = ui.configbool(b'phabsend', b'confirm')
1423 confirm |= bool(opts.get(b'confirm'))
1417 confirm |= bool(opts.get(b'confirm'))
1424 if confirm:
1418 if confirm:
1425 confirmed = _confirmbeforesend(repo, revs, oldmap)
1419 confirmed = _confirmbeforesend(repo, revs, oldmap)
1426 if not confirmed:
1420 if not confirmed:
1427 raise error.Abort(_(b'phabsend cancelled'))
1421 raise error.Abort(_(b'phabsend cancelled'))
1428
1422
1429 actions = []
1423 actions = []
1430 reviewers = opts.get(b'reviewer', [])
1424 reviewers = opts.get(b'reviewer', [])
1431 blockers = opts.get(b'blocker', [])
1425 blockers = opts.get(b'blocker', [])
1432 phids = []
1426 phids = []
1433 if reviewers:
1427 if reviewers:
1434 phids.extend(userphids(repo.ui, reviewers))
1428 phids.extend(userphids(repo.ui, reviewers))
1435 if blockers:
1429 if blockers:
1436 phids.extend(
1430 phids.extend(
1437 map(
1431 map(
1438 lambda phid: b'blocking(%s)' % phid,
1432 lambda phid: b'blocking(%s)' % phid,
1439 userphids(repo.ui, blockers),
1433 userphids(repo.ui, blockers),
1440 )
1434 )
1441 )
1435 )
1442 if phids:
1436 if phids:
1443 actions.append({b'type': b'reviewers.add', b'value': phids})
1437 actions.append({b'type': b'reviewers.add', b'value': phids})
1444
1438
1445 drevids = [] # [int]
1439 drevids = [] # [int]
1446 diffmap = {} # {newnode: diff}
1440 diffmap = {} # {newnode: diff}
1447
1441
1448 # Send patches one by one so we know their Differential Revision PHIDs and
1442 # Send patches one by one so we know their Differential Revision PHIDs and
1449 # can provide dependency relationship
1443 # can provide dependency relationship
1450 lastrevphid = None
1444 lastrevphid = None
1451 for ctx in ctxs:
1445 for ctx in ctxs:
1452 if fold:
1446 if fold:
1453 ui.debug(b'sending rev %d::%d\n' % (ctx.rev(), ctxs[-1].rev()))
1447 ui.debug(b'sending rev %d::%d\n' % (ctx.rev(), ctxs[-1].rev()))
1454 else:
1448 else:
1455 ui.debug(b'sending rev %d\n' % ctx.rev())
1449 ui.debug(b'sending rev %d\n' % ctx.rev())
1456
1450
1457 # Get Differential Revision ID
1451 # Get Differential Revision ID
1458 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
1452 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
1459 oldbasenode, oldbasediff, oldbaserevid = oldnode, olddiff, revid
1453 oldbasenode, oldbasediff, oldbaserevid = oldnode, olddiff, revid
1460
1454
1461 if fold:
1455 if fold:
1462 oldbasenode, oldbasediff, oldbaserevid = oldmap.get(
1456 oldbasenode, oldbasediff, oldbaserevid = oldmap.get(
1463 ctxs[-1].node(), (None, None, None)
1457 ctxs[-1].node(), (None, None, None)
1464 )
1458 )
1465
1459
1466 if oldnode != ctx.node() or opts.get(b'amend'):
1460 if oldnode != ctx.node() or opts.get(b'amend'):
1467 # Create or update Differential Revision
1461 # Create or update Differential Revision
1468 revision, diff = createdifferentialrevision(
1462 revision, diff = createdifferentialrevision(
1469 ctxs if fold else [ctx],
1463 ctxs if fold else [ctx],
1470 revid,
1464 revid,
1471 lastrevphid,
1465 lastrevphid,
1472 oldbasenode,
1466 oldbasenode,
1473 oldnode,
1467 oldnode,
1474 olddiff,
1468 olddiff,
1475 actions,
1469 actions,
1476 opts.get(b'comment'),
1470 opts.get(b'comment'),
1477 )
1471 )
1478
1472
1479 if fold:
1473 if fold:
1480 for ctx in ctxs:
1474 for ctx in ctxs:
1481 diffmap[ctx.node()] = diff
1475 diffmap[ctx.node()] = diff
1482 else:
1476 else:
1483 diffmap[ctx.node()] = diff
1477 diffmap[ctx.node()] = diff
1484
1478
1485 newrevid = int(revision[b'object'][b'id'])
1479 newrevid = int(revision[b'object'][b'id'])
1486 newrevphid = revision[b'object'][b'phid']
1480 newrevphid = revision[b'object'][b'phid']
1487 if revid:
1481 if revid:
1488 action = b'updated'
1482 action = b'updated'
1489 else:
1483 else:
1490 action = b'created'
1484 action = b'created'
1491
1485
1492 # Create a local tag to note the association, if commit message
1486 # Create a local tag to note the association, if commit message
1493 # does not have it already
1487 # does not have it already
1494 if not fold:
1488 if not fold:
1495 m = _differentialrevisiondescre.search(ctx.description())
1489 m = _differentialrevisiondescre.search(ctx.description())
1496 if not m or int(m.group('id')) != newrevid:
1490 if not m or int(m.group('id')) != newrevid:
1497 tagname = b'D%d' % newrevid
1491 tagname = b'D%d' % newrevid
1498 tags.tag(
1492 tags.tag(
1499 repo,
1493 repo,
1500 tagname,
1494 tagname,
1501 ctx.node(),
1495 ctx.node(),
1502 message=None,
1496 message=None,
1503 user=None,
1497 user=None,
1504 date=None,
1498 date=None,
1505 local=True,
1499 local=True,
1506 )
1500 )
1507 else:
1501 else:
1508 # Nothing changed. But still set "newrevphid" so the next revision
1502 # Nothing changed. But still set "newrevphid" so the next revision
1509 # could depend on this one and "newrevid" for the summary line.
1503 # could depend on this one and "newrevid" for the summary line.
1510 newrevphid = querydrev(repo.ui, b'%d' % revid)[0][b'phid']
1504 newrevphid = querydrev(repo.ui, b'%d' % revid)[0][b'phid']
1511 newrevid = revid
1505 newrevid = revid
1512 action = b'skipped'
1506 action = b'skipped'
1513
1507
1514 drevids.append(newrevid)
1508 drevids.append(newrevid)
1515 lastrevphid = newrevphid
1509 lastrevphid = newrevphid
1516
1510
1517 if fold:
1511 if fold:
1518 for c in ctxs:
1512 for c in ctxs:
1519 if oldmap.get(c.node(), (None, None, None))[2]:
1513 if oldmap.get(c.node(), (None, None, None))[2]:
1520 action = b'updated'
1514 action = b'updated'
1521 else:
1515 else:
1522 action = b'created'
1516 action = b'created'
1523 _print_phabsend_action(ui, c, newrevid, action)
1517 _print_phabsend_action(ui, c, newrevid, action)
1524 break
1518 break
1525
1519
1526 _print_phabsend_action(ui, ctx, newrevid, action)
1520 _print_phabsend_action(ui, ctx, newrevid, action)
1527
1521
1528 # Update commit messages and remove tags
1522 # Update commit messages and remove tags
1529 if opts.get(b'amend'):
1523 if opts.get(b'amend'):
1530 unfi = repo.unfiltered()
1524 unfi = repo.unfiltered()
1531 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
1525 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
1532 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
1526 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
1533 # Eagerly evaluate commits to restabilize before creating new
1527 # Eagerly evaluate commits to restabilize before creating new
1534 # commits. The selected revisions are excluded because they are
1528 # commits. The selected revisions are excluded because they are
1535 # automatically restacked as part of the submission process.
1529 # automatically restacked as part of the submission process.
1536 restack = [
1530 restack = [
1537 c
1531 c
1538 for c in repo.set(
1532 for c in repo.set(
1539 b"(%ld::) - (%ld) - unstable() - obsolete() - public()",
1533 b"(%ld::) - (%ld) - unstable() - obsolete() - public()",
1540 revs,
1534 revs,
1541 revs,
1535 revs,
1542 )
1536 )
1543 ]
1537 ]
1544 wnode = unfi[b'.'].node()
1538 wnode = unfi[b'.'].node()
1545 mapping = {} # {oldnode: [newnode]}
1539 mapping = {} # {oldnode: [newnode]}
1546 newnodes = []
1540 newnodes = []
1547
1541
1548 drevid = drevids[0]
1542 drevid = drevids[0]
1549
1543
1550 for i, rev in enumerate(revs):
1544 for i, rev in enumerate(revs):
1551 old = unfi[rev]
1545 old = unfi[rev]
1552 if not fold:
1546 if not fold:
1553 drevid = drevids[i]
1547 drevid = drevids[i]
1554 drev = [d for d in drevs if int(d[b'id']) == drevid][0]
1548 drev = [d for d in drevs if int(d[b'id']) == drevid][0]
1555
1549
1556 newdesc = get_amended_desc(drev, old, fold)
1550 newdesc = get_amended_desc(drev, old, fold)
1557 # Make sure commit message contain "Differential Revision"
1551 # Make sure commit message contain "Differential Revision"
1558 if (
1552 if (
1559 old.description() != newdesc
1553 old.description() != newdesc
1560 or old.p1().node() in mapping
1554 or old.p1().node() in mapping
1561 or old.p2().node() in mapping
1555 or old.p2().node() in mapping
1562 ):
1556 ):
1563 if old.phase() == phases.public:
1557 if old.phase() == phases.public:
1564 ui.warn(
1558 ui.warn(
1565 _(b"warning: not updating public commit %s\n")
1559 _(b"warning: not updating public commit %s\n")
1566 % scmutil.formatchangeid(old)
1560 % scmutil.formatchangeid(old)
1567 )
1561 )
1568 continue
1562 continue
1569 parents = [
1563 parents = [
1570 mapping.get(old.p1().node(), (old.p1(),))[0],
1564 mapping.get(old.p1().node(), (old.p1(),))[0],
1571 mapping.get(old.p2().node(), (old.p2(),))[0],
1565 mapping.get(old.p2().node(), (old.p2(),))[0],
1572 ]
1566 ]
1573 newdesc = rewriteutil.update_hash_refs(
1567 newdesc = rewriteutil.update_hash_refs(
1574 repo,
1568 repo,
1575 newdesc,
1569 newdesc,
1576 mapping,
1570 mapping,
1577 )
1571 )
1578 new = context.metadataonlyctx(
1572 new = context.metadataonlyctx(
1579 repo,
1573 repo,
1580 old,
1574 old,
1581 parents=parents,
1575 parents=parents,
1582 text=newdesc,
1576 text=newdesc,
1583 user=old.user(),
1577 user=old.user(),
1584 date=old.date(),
1578 date=old.date(),
1585 extra=old.extra(),
1579 extra=old.extra(),
1586 )
1580 )
1587
1581
1588 newnode = new.commit()
1582 newnode = new.commit()
1589
1583
1590 mapping[old.node()] = [newnode]
1584 mapping[old.node()] = [newnode]
1591
1585
1592 if fold:
1586 if fold:
1593 # Defer updating the (single) Diff until all nodes are
1587 # Defer updating the (single) Diff until all nodes are
1594 # collected. No tags were created, so none need to be
1588 # collected. No tags were created, so none need to be
1595 # removed.
1589 # removed.
1596 newnodes.append(newnode)
1590 newnodes.append(newnode)
1597 continue
1591 continue
1598
1592
1599 _amend_diff_properties(
1593 _amend_diff_properties(
1600 unfi, drevid, [newnode], diffmap[old.node()]
1594 unfi, drevid, [newnode], diffmap[old.node()]
1601 )
1595 )
1602
1596
1603 # Remove local tags since it's no longer necessary
1597 # Remove local tags since it's no longer necessary
1604 tagname = b'D%d' % drevid
1598 tagname = b'D%d' % drevid
1605 if tagname in repo.tags():
1599 if tagname in repo.tags():
1606 tags.tag(
1600 tags.tag(
1607 repo,
1601 repo,
1608 tagname,
1602 tagname,
1609 repo.nullid,
1603 repo.nullid,
1610 message=None,
1604 message=None,
1611 user=None,
1605 user=None,
1612 date=None,
1606 date=None,
1613 local=True,
1607 local=True,
1614 )
1608 )
1615 elif fold:
1609 elif fold:
1616 # When folding multiple commits into one review with
1610 # When folding multiple commits into one review with
1617 # --fold, track even the commits that weren't amended, so
1611 # --fold, track even the commits that weren't amended, so
1618 # that their association isn't lost if the properties are
1612 # that their association isn't lost if the properties are
1619 # rewritten below.
1613 # rewritten below.
1620 newnodes.append(old.node())
1614 newnodes.append(old.node())
1621
1615
1622 # If the submitted commits are public, no amend takes place so
1616 # If the submitted commits are public, no amend takes place so
1623 # there are no newnodes and therefore no diff update to do.
1617 # there are no newnodes and therefore no diff update to do.
1624 if fold and newnodes:
1618 if fold and newnodes:
1625 diff = diffmap[old.node()]
1619 diff = diffmap[old.node()]
1626
1620
1627 # The diff object in diffmap doesn't have the local commits
1621 # The diff object in diffmap doesn't have the local commits
1628 # because that could be returned from differential.creatediff,
1622 # because that could be returned from differential.creatediff,
1629 # not differential.querydiffs. So use the queried diff (if
1623 # not differential.querydiffs. So use the queried diff (if
1630 # present), or force the amend (a new revision is being posted.)
1624 # present), or force the amend (a new revision is being posted.)
1631 if not olddiff or set(newnodes) != getlocalcommits(olddiff):
1625 if not olddiff or set(newnodes) != getlocalcommits(olddiff):
1632 _debug(ui, b"updating local commit list for D%d\n" % drevid)
1626 _debug(ui, b"updating local commit list for D%d\n" % drevid)
1633 _amend_diff_properties(unfi, drevid, newnodes, diff)
1627 _amend_diff_properties(unfi, drevid, newnodes, diff)
1634 else:
1628 else:
1635 _debug(
1629 _debug(
1636 ui,
1630 ui,
1637 b"local commit list for D%d is already up-to-date\n"
1631 b"local commit list for D%d is already up-to-date\n"
1638 % drevid,
1632 % drevid,
1639 )
1633 )
1640 elif fold:
1634 elif fold:
1641 _debug(ui, b"no newnodes to update\n")
1635 _debug(ui, b"no newnodes to update\n")
1642
1636
1643 # Restack any children of first-time submissions that were orphaned
1637 # Restack any children of first-time submissions that were orphaned
1644 # in the process. The ctx won't report that it is an orphan until
1638 # in the process. The ctx won't report that it is an orphan until
1645 # the cleanup takes place below.
1639 # the cleanup takes place below.
1646 for old in restack:
1640 for old in restack:
1647 parents = [
1641 parents = [
1648 mapping.get(old.p1().node(), (old.p1(),))[0],
1642 mapping.get(old.p1().node(), (old.p1(),))[0],
1649 mapping.get(old.p2().node(), (old.p2(),))[0],
1643 mapping.get(old.p2().node(), (old.p2(),))[0],
1650 ]
1644 ]
1651 new = context.metadataonlyctx(
1645 new = context.metadataonlyctx(
1652 repo,
1646 repo,
1653 old,
1647 old,
1654 parents=parents,
1648 parents=parents,
1655 text=rewriteutil.update_hash_refs(
1649 text=rewriteutil.update_hash_refs(
1656 repo, old.description(), mapping
1650 repo, old.description(), mapping
1657 ),
1651 ),
1658 user=old.user(),
1652 user=old.user(),
1659 date=old.date(),
1653 date=old.date(),
1660 extra=old.extra(),
1654 extra=old.extra(),
1661 )
1655 )
1662
1656
1663 newnode = new.commit()
1657 newnode = new.commit()
1664
1658
1665 # Don't obsolete unselected descendants of nodes that have not
1659 # Don't obsolete unselected descendants of nodes that have not
1666 # been changed in this transaction- that results in an error.
1660 # been changed in this transaction- that results in an error.
1667 if newnode != old.node():
1661 if newnode != old.node():
1668 mapping[old.node()] = [newnode]
1662 mapping[old.node()] = [newnode]
1669 _debug(
1663 _debug(
1670 ui,
1664 ui,
1671 b"restabilizing %s as %s\n"
1665 b"restabilizing %s as %s\n"
1672 % (short(old.node()), short(newnode)),
1666 % (short(old.node()), short(newnode)),
1673 )
1667 )
1674 else:
1668 else:
1675 _debug(
1669 _debug(
1676 ui,
1670 ui,
1677 b"not restabilizing unchanged %s\n" % short(old.node()),
1671 b"not restabilizing unchanged %s\n" % short(old.node()),
1678 )
1672 )
1679
1673
1680 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
1674 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
1681 if wnode in mapping:
1675 if wnode in mapping:
1682 unfi.setparents(mapping[wnode][0])
1676 unfi.setparents(mapping[wnode][0])
1683
1677
1684
1678
1685 # Map from "hg:meta" keys to header understood by "hg import". The order is
1679 # Map from "hg:meta" keys to header understood by "hg import". The order is
1686 # consistent with "hg export" output.
1680 # consistent with "hg export" output.
1687 _metanamemap = util.sortdict(
1681 _metanamemap = util.sortdict(
1688 [
1682 [
1689 (b'user', b'User'),
1683 (b'user', b'User'),
1690 (b'date', b'Date'),
1684 (b'date', b'Date'),
1691 (b'branch', b'Branch'),
1685 (b'branch', b'Branch'),
1692 (b'node', b'Node ID'),
1686 (b'node', b'Node ID'),
1693 (b'parent', b'Parent '),
1687 (b'parent', b'Parent '),
1694 ]
1688 ]
1695 )
1689 )
1696
1690
1697
1691
1698 def _confirmbeforesend(repo, revs, oldmap):
1692 def _confirmbeforesend(repo, revs, oldmap):
1699 url, token = readurltoken(repo.ui)
1693 url, token = readurltoken(repo.ui)
1700 ui = repo.ui
1694 ui = repo.ui
1701 for rev in revs:
1695 for rev in revs:
1702 ctx = repo[rev]
1696 ctx = repo[rev]
1703 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
1697 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
1704 if drevid:
1698 if drevid:
1705 drevdesc = ui.label(b'D%d' % drevid, b'phabricator.drev')
1699 drevdesc = ui.label(b'D%d' % drevid, b'phabricator.drev')
1706 else:
1700 else:
1707 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
1701 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
1708
1702
1709 ui.write(
1703 ui.write(
1710 _(b'%s - %s\n')
1704 _(b'%s - %s\n')
1711 % (
1705 % (
1712 drevdesc,
1706 drevdesc,
1713 cmdutil.format_changeset_summary(ui, ctx, b'phabsend'),
1707 cmdutil.format_changeset_summary(ui, ctx, b'phabsend'),
1714 )
1708 )
1715 )
1709 )
1716
1710
1717 if ui.promptchoice(
1711 if ui.promptchoice(
1718 _(b'Send the above changes to %s (Y/n)?$$ &Yes $$ &No') % url
1712 _(b'Send the above changes to %s (Y/n)?$$ &Yes $$ &No') % url
1719 ):
1713 ):
1720 return False
1714 return False
1721
1715
1722 return True
1716 return True
1723
1717
1724
1718
1725 _knownstatusnames = {
1719 _knownstatusnames = {
1726 b'accepted',
1720 b'accepted',
1727 b'needsreview',
1721 b'needsreview',
1728 b'needsrevision',
1722 b'needsrevision',
1729 b'closed',
1723 b'closed',
1730 b'abandoned',
1724 b'abandoned',
1731 b'changesplanned',
1725 b'changesplanned',
1732 }
1726 }
1733
1727
1734
1728
1735 def _getstatusname(drev):
1729 def _getstatusname(drev):
1736 """get normalized status name from a Differential Revision"""
1730 """get normalized status name from a Differential Revision"""
1737 return drev[b'statusName'].replace(b' ', b'').lower()
1731 return drev[b'statusName'].replace(b' ', b'').lower()
1738
1732
1739
1733
1740 # Small language to specify differential revisions. Support symbols: (), :X,
1734 # Small language to specify differential revisions. Support symbols: (), :X,
1741 # +, and -.
1735 # +, and -.
1742
1736
1743 _elements = {
1737 _elements = {
1744 # token-type: binding-strength, primary, prefix, infix, suffix
1738 # token-type: binding-strength, primary, prefix, infix, suffix
1745 b'(': (12, None, (b'group', 1, b')'), None, None),
1739 b'(': (12, None, (b'group', 1, b')'), None, None),
1746 b':': (8, None, (b'ancestors', 8), None, None),
1740 b':': (8, None, (b'ancestors', 8), None, None),
1747 b'&': (5, None, None, (b'and_', 5), None),
1741 b'&': (5, None, None, (b'and_', 5), None),
1748 b'+': (4, None, None, (b'add', 4), None),
1742 b'+': (4, None, None, (b'add', 4), None),
1749 b'-': (4, None, None, (b'sub', 4), None),
1743 b'-': (4, None, None, (b'sub', 4), None),
1750 b')': (0, None, None, None, None),
1744 b')': (0, None, None, None, None),
1751 b'symbol': (0, b'symbol', None, None, None),
1745 b'symbol': (0, b'symbol', None, None, None),
1752 b'end': (0, None, None, None, None),
1746 b'end': (0, None, None, None, None),
1753 }
1747 }
1754
1748
1755
1749
1756 def _tokenize(text):
1750 def _tokenize(text):
1757 view = memoryview(text) # zero-copy slice
1751 view = memoryview(text) # zero-copy slice
1758 special = b'():+-& '
1752 special = b'():+-& '
1759 pos = 0
1753 pos = 0
1760 length = len(text)
1754 length = len(text)
1761 while pos < length:
1755 while pos < length:
1762 symbol = b''.join(
1756 symbol = b''.join(
1763 itertools.takewhile(
1757 itertools.takewhile(
1764 lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
1758 lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
1765 )
1759 )
1766 )
1760 )
1767 if symbol:
1761 if symbol:
1768 yield (b'symbol', symbol, pos)
1762 yield (b'symbol', symbol, pos)
1769 pos += len(symbol)
1763 pos += len(symbol)
1770 else: # special char, ignore space
1764 else: # special char, ignore space
1771 if text[pos : pos + 1] != b' ':
1765 if text[pos : pos + 1] != b' ':
1772 yield (text[pos : pos + 1], None, pos)
1766 yield (text[pos : pos + 1], None, pos)
1773 pos += 1
1767 pos += 1
1774 yield (b'end', None, pos)
1768 yield (b'end', None, pos)
1775
1769
1776
1770
1777 def _parse(text):
1771 def _parse(text):
1778 tree, pos = parser.parser(_elements).parse(_tokenize(text))
1772 tree, pos = parser.parser(_elements).parse(_tokenize(text))
1779 if pos != len(text):
1773 if pos != len(text):
1780 raise error.ParseError(b'invalid token', pos)
1774 raise error.ParseError(b'invalid token', pos)
1781 return tree
1775 return tree
1782
1776
1783
1777
1784 def _parsedrev(symbol):
1778 def _parsedrev(symbol):
1785 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
1779 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
1786 if symbol.startswith(b'D') and symbol[1:].isdigit():
1780 if symbol.startswith(b'D') and symbol[1:].isdigit():
1787 return int(symbol[1:])
1781 return int(symbol[1:])
1788 if symbol.isdigit():
1782 if symbol.isdigit():
1789 return int(symbol)
1783 return int(symbol)
1790
1784
1791
1785
1792 def _prefetchdrevs(tree):
1786 def _prefetchdrevs(tree):
1793 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
1787 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
1794 drevs = set()
1788 drevs = set()
1795 ancestordrevs = set()
1789 ancestordrevs = set()
1796 op = tree[0]
1790 op = tree[0]
1797 if op == b'symbol':
1791 if op == b'symbol':
1798 r = _parsedrev(tree[1])
1792 r = _parsedrev(tree[1])
1799 if r:
1793 if r:
1800 drevs.add(r)
1794 drevs.add(r)
1801 elif op == b'ancestors':
1795 elif op == b'ancestors':
1802 r, a = _prefetchdrevs(tree[1])
1796 r, a = _prefetchdrevs(tree[1])
1803 drevs.update(r)
1797 drevs.update(r)
1804 ancestordrevs.update(r)
1798 ancestordrevs.update(r)
1805 ancestordrevs.update(a)
1799 ancestordrevs.update(a)
1806 else:
1800 else:
1807 for t in tree[1:]:
1801 for t in tree[1:]:
1808 r, a = _prefetchdrevs(t)
1802 r, a = _prefetchdrevs(t)
1809 drevs.update(r)
1803 drevs.update(r)
1810 ancestordrevs.update(a)
1804 ancestordrevs.update(a)
1811 return drevs, ancestordrevs
1805 return drevs, ancestordrevs
1812
1806
1813
1807
1814 def querydrev(ui, spec):
1808 def querydrev(ui, spec):
1815 """return a list of "Differential Revision" dicts
1809 """return a list of "Differential Revision" dicts
1816
1810
1817 spec is a string using a simple query language, see docstring in phabread
1811 spec is a string using a simple query language, see docstring in phabread
1818 for details.
1812 for details.
1819
1813
1820 A "Differential Revision dict" looks like:
1814 A "Differential Revision dict" looks like:
1821
1815
1822 {
1816 {
1823 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
1817 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
1824 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
1818 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
1825 "auxiliary": {
1819 "auxiliary": {
1826 "phabricator:depends-on": [
1820 "phabricator:depends-on": [
1827 "PHID-DREV-gbapp366kutjebt7agcd"
1821 "PHID-DREV-gbapp366kutjebt7agcd"
1828 ]
1822 ]
1829 "phabricator:projects": [],
1823 "phabricator:projects": [],
1830 },
1824 },
1831 "branch": "default",
1825 "branch": "default",
1832 "ccs": [],
1826 "ccs": [],
1833 "commits": [],
1827 "commits": [],
1834 "dateCreated": "1499181406",
1828 "dateCreated": "1499181406",
1835 "dateModified": "1499182103",
1829 "dateModified": "1499182103",
1836 "diffs": [
1830 "diffs": [
1837 "3",
1831 "3",
1838 "4",
1832 "4",
1839 ],
1833 ],
1840 "hashes": [],
1834 "hashes": [],
1841 "id": "2",
1835 "id": "2",
1842 "lineCount": "2",
1836 "lineCount": "2",
1843 "phid": "PHID-DREV-672qvysjcczopag46qty",
1837 "phid": "PHID-DREV-672qvysjcczopag46qty",
1844 "properties": {},
1838 "properties": {},
1845 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
1839 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
1846 "reviewers": [],
1840 "reviewers": [],
1847 "sourcePath": null
1841 "sourcePath": null
1848 "status": "0",
1842 "status": "0",
1849 "statusName": "Needs Review",
1843 "statusName": "Needs Review",
1850 "summary": "",
1844 "summary": "",
1851 "testPlan": "",
1845 "testPlan": "",
1852 "title": "example",
1846 "title": "example",
1853 "uri": "https://phab.example.com/D2",
1847 "uri": "https://phab.example.com/D2",
1854 }
1848 }
1855 """
1849 """
1856 # TODO: replace differential.query and differential.querydiffs with
1850 # TODO: replace differential.query and differential.querydiffs with
1857 # differential.diff.search because the former (and their output) are
1851 # differential.diff.search because the former (and their output) are
1858 # frozen, and planned to be deprecated and removed.
1852 # frozen, and planned to be deprecated and removed.
1859
1853
1860 def fetch(params):
1854 def fetch(params):
1861 """params -> single drev or None"""
1855 """params -> single drev or None"""
1862 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
1856 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
1863 if key in prefetched:
1857 if key in prefetched:
1864 return prefetched[key]
1858 return prefetched[key]
1865 drevs = callconduit(ui, b'differential.query', params)
1859 drevs = callconduit(ui, b'differential.query', params)
1866 # Fill prefetched with the result
1860 # Fill prefetched with the result
1867 for drev in drevs:
1861 for drev in drevs:
1868 prefetched[drev[b'phid']] = drev
1862 prefetched[drev[b'phid']] = drev
1869 prefetched[int(drev[b'id'])] = drev
1863 prefetched[int(drev[b'id'])] = drev
1870 if key not in prefetched:
1864 if key not in prefetched:
1871 raise error.Abort(
1865 raise error.Abort(
1872 _(b'cannot get Differential Revision %r') % params
1866 _(b'cannot get Differential Revision %r') % params
1873 )
1867 )
1874 return prefetched[key]
1868 return prefetched[key]
1875
1869
1876 def getstack(topdrevids):
1870 def getstack(topdrevids):
1877 """given a top, get a stack from the bottom, [id] -> [id]"""
1871 """given a top, get a stack from the bottom, [id] -> [id]"""
1878 visited = set()
1872 visited = set()
1879 result = []
1873 result = []
1880 queue = [{b'ids': [i]} for i in topdrevids]
1874 queue = [{b'ids': [i]} for i in topdrevids]
1881 while queue:
1875 while queue:
1882 params = queue.pop()
1876 params = queue.pop()
1883 drev = fetch(params)
1877 drev = fetch(params)
1884 if drev[b'id'] in visited:
1878 if drev[b'id'] in visited:
1885 continue
1879 continue
1886 visited.add(drev[b'id'])
1880 visited.add(drev[b'id'])
1887 result.append(int(drev[b'id']))
1881 result.append(int(drev[b'id']))
1888 auxiliary = drev.get(b'auxiliary', {})
1882 auxiliary = drev.get(b'auxiliary', {})
1889 depends = auxiliary.get(b'phabricator:depends-on', [])
1883 depends = auxiliary.get(b'phabricator:depends-on', [])
1890 for phid in depends:
1884 for phid in depends:
1891 queue.append({b'phids': [phid]})
1885 queue.append({b'phids': [phid]})
1892 result.reverse()
1886 result.reverse()
1893 return smartset.baseset(result)
1887 return smartset.baseset(result)
1894
1888
1895 # Initialize prefetch cache
1889 # Initialize prefetch cache
1896 prefetched = {} # {id or phid: drev}
1890 prefetched = {} # {id or phid: drev}
1897
1891
1898 tree = _parse(spec)
1892 tree = _parse(spec)
1899 drevs, ancestordrevs = _prefetchdrevs(tree)
1893 drevs, ancestordrevs = _prefetchdrevs(tree)
1900
1894
1901 # developer config: phabricator.batchsize
1895 # developer config: phabricator.batchsize
1902 batchsize = ui.configint(b'phabricator', b'batchsize')
1896 batchsize = ui.configint(b'phabricator', b'batchsize')
1903
1897
1904 # Prefetch Differential Revisions in batch
1898 # Prefetch Differential Revisions in batch
1905 tofetch = set(drevs)
1899 tofetch = set(drevs)
1906 for r in ancestordrevs:
1900 for r in ancestordrevs:
1907 tofetch.update(range(max(1, r - batchsize), r + 1))
1901 tofetch.update(range(max(1, r - batchsize), r + 1))
1908 if drevs:
1902 if drevs:
1909 fetch({b'ids': list(tofetch)})
1903 fetch({b'ids': list(tofetch)})
1910 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
1904 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
1911
1905
1912 # Walk through the tree, return smartsets
1906 # Walk through the tree, return smartsets
1913 def walk(tree):
1907 def walk(tree):
1914 op = tree[0]
1908 op = tree[0]
1915 if op == b'symbol':
1909 if op == b'symbol':
1916 drev = _parsedrev(tree[1])
1910 drev = _parsedrev(tree[1])
1917 if drev:
1911 if drev:
1918 return smartset.baseset([drev])
1912 return smartset.baseset([drev])
1919 elif tree[1] in _knownstatusnames:
1913 elif tree[1] in _knownstatusnames:
1920 drevs = [
1914 drevs = [
1921 r
1915 r
1922 for r in validids
1916 for r in validids
1923 if _getstatusname(prefetched[r]) == tree[1]
1917 if _getstatusname(prefetched[r]) == tree[1]
1924 ]
1918 ]
1925 return smartset.baseset(drevs)
1919 return smartset.baseset(drevs)
1926 else:
1920 else:
1927 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
1921 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
1928 elif op in {b'and_', b'add', b'sub'}:
1922 elif op in {b'and_', b'add', b'sub'}:
1929 assert len(tree) == 3
1923 assert len(tree) == 3
1930 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
1924 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
1931 elif op == b'group':
1925 elif op == b'group':
1932 return walk(tree[1])
1926 return walk(tree[1])
1933 elif op == b'ancestors':
1927 elif op == b'ancestors':
1934 return getstack(walk(tree[1]))
1928 return getstack(walk(tree[1]))
1935 else:
1929 else:
1936 raise error.ProgrammingError(b'illegal tree: %r' % tree)
1930 raise error.ProgrammingError(b'illegal tree: %r' % tree)
1937
1931
1938 return [prefetched[r] for r in walk(tree)]
1932 return [prefetched[r] for r in walk(tree)]
1939
1933
1940
1934
1941 def getdescfromdrev(drev):
1935 def getdescfromdrev(drev):
1942 """get description (commit message) from "Differential Revision"
1936 """get description (commit message) from "Differential Revision"
1943
1937
1944 This is similar to differential.getcommitmessage API. But we only care
1938 This is similar to differential.getcommitmessage API. But we only care
1945 about limited fields: title, summary, test plan, and URL.
1939 about limited fields: title, summary, test plan, and URL.
1946 """
1940 """
1947 title = drev[b'title']
1941 title = drev[b'title']
1948 summary = drev[b'summary'].rstrip()
1942 summary = drev[b'summary'].rstrip()
1949 testplan = drev[b'testPlan'].rstrip()
1943 testplan = drev[b'testPlan'].rstrip()
1950 if testplan:
1944 if testplan:
1951 testplan = b'Test Plan:\n%s' % testplan
1945 testplan = b'Test Plan:\n%s' % testplan
1952 uri = b'Differential Revision: %s' % drev[b'uri']
1946 uri = b'Differential Revision: %s' % drev[b'uri']
1953 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
1947 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
1954
1948
1955
1949
1956 def get_amended_desc(drev, ctx, folded):
1950 def get_amended_desc(drev, ctx, folded):
1957 """similar to ``getdescfromdrev``, but supports a folded series of commits
1951 """similar to ``getdescfromdrev``, but supports a folded series of commits
1958
1952
1959 This is used when determining if an individual commit needs to have its
1953 This is used when determining if an individual commit needs to have its
1960 message amended after posting it for review. The determination is made for
1954 message amended after posting it for review. The determination is made for
1961 each individual commit, even when they were folded into one review.
1955 each individual commit, even when they were folded into one review.
1962 """
1956 """
1963 if not folded:
1957 if not folded:
1964 return getdescfromdrev(drev)
1958 return getdescfromdrev(drev)
1965
1959
1966 uri = b'Differential Revision: %s' % drev[b'uri']
1960 uri = b'Differential Revision: %s' % drev[b'uri']
1967
1961
1968 # Since the commit messages were combined when posting multiple commits
1962 # Since the commit messages were combined when posting multiple commits
1969 # with --fold, the fields can't be read from Phabricator here, or *all*
1963 # with --fold, the fields can't be read from Phabricator here, or *all*
1970 # affected local revisions will end up with the same commit message after
1964 # affected local revisions will end up with the same commit message after
1971 # the URI is amended in. Append in the DREV line, or update it if it
1965 # the URI is amended in. Append in the DREV line, or update it if it
1972 # exists. At worst, this means commit message or test plan updates on
1966 # exists. At worst, this means commit message or test plan updates on
1973 # Phabricator aren't propagated back to the repository, but that seems
1967 # Phabricator aren't propagated back to the repository, but that seems
1974 # reasonable for the case where local commits are effectively combined
1968 # reasonable for the case where local commits are effectively combined
1975 # in Phabricator.
1969 # in Phabricator.
1976 m = _differentialrevisiondescre.search(ctx.description())
1970 m = _differentialrevisiondescre.search(ctx.description())
1977 if not m:
1971 if not m:
1978 return b'\n\n'.join([ctx.description(), uri])
1972 return b'\n\n'.join([ctx.description(), uri])
1979
1973
1980 return _differentialrevisiondescre.sub(uri, ctx.description())
1974 return _differentialrevisiondescre.sub(uri, ctx.description())
1981
1975
1982
1976
1983 def getlocalcommits(diff):
1977 def getlocalcommits(diff):
1984 """get the set of local commits from a diff object
1978 """get the set of local commits from a diff object
1985
1979
1986 See ``getdiffmeta()`` for an example diff object.
1980 See ``getdiffmeta()`` for an example diff object.
1987 """
1981 """
1988 props = diff.get(b'properties') or {}
1982 props = diff.get(b'properties') or {}
1989 commits = props.get(b'local:commits') or {}
1983 commits = props.get(b'local:commits') or {}
1990 if len(commits) > 1:
1984 if len(commits) > 1:
1991 return {bin(c) for c in commits.keys()}
1985 return {bin(c) for c in commits.keys()}
1992
1986
1993 # Storing the diff metadata predates storing `local:commits`, so continue
1987 # Storing the diff metadata predates storing `local:commits`, so continue
1994 # to use that in the --no-fold case.
1988 # to use that in the --no-fold case.
1995 return {bin(getdiffmeta(diff).get(b'node', b'')) or None}
1989 return {bin(getdiffmeta(diff).get(b'node', b'')) or None}
1996
1990
1997
1991
1998 def getdiffmeta(diff):
1992 def getdiffmeta(diff):
1999 """get commit metadata (date, node, user, p1) from a diff object
1993 """get commit metadata (date, node, user, p1) from a diff object
2000
1994
2001 The metadata could be "hg:meta", sent by phabsend, like:
1995 The metadata could be "hg:meta", sent by phabsend, like:
2002
1996
2003 "properties": {
1997 "properties": {
2004 "hg:meta": {
1998 "hg:meta": {
2005 "branch": "default",
1999 "branch": "default",
2006 "date": "1499571514 25200",
2000 "date": "1499571514 25200",
2007 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
2001 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
2008 "user": "Foo Bar <foo@example.com>",
2002 "user": "Foo Bar <foo@example.com>",
2009 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
2003 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
2010 }
2004 }
2011 }
2005 }
2012
2006
2013 Or converted from "local:commits", sent by "arc", like:
2007 Or converted from "local:commits", sent by "arc", like:
2014
2008
2015 "properties": {
2009 "properties": {
2016 "local:commits": {
2010 "local:commits": {
2017 "98c08acae292b2faf60a279b4189beb6cff1414d": {
2011 "98c08acae292b2faf60a279b4189beb6cff1414d": {
2018 "author": "Foo Bar",
2012 "author": "Foo Bar",
2019 "authorEmail": "foo@example.com"
2013 "authorEmail": "foo@example.com"
2020 "branch": "default",
2014 "branch": "default",
2021 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
2015 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
2022 "local": "1000",
2016 "local": "1000",
2023 "message": "...",
2017 "message": "...",
2024 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
2018 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
2025 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
2019 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
2026 "summary": "...",
2020 "summary": "...",
2027 "tag": "",
2021 "tag": "",
2028 "time": 1499546314,
2022 "time": 1499546314,
2029 }
2023 }
2030 }
2024 }
2031 }
2025 }
2032
2026
2033 Note: metadata extracted from "local:commits" will lose time zone
2027 Note: metadata extracted from "local:commits" will lose time zone
2034 information.
2028 information.
2035 """
2029 """
2036 props = diff.get(b'properties') or {}
2030 props = diff.get(b'properties') or {}
2037 meta = props.get(b'hg:meta')
2031 meta = props.get(b'hg:meta')
2038 if not meta:
2032 if not meta:
2039 if props.get(b'local:commits'):
2033 if props.get(b'local:commits'):
2040 commit = sorted(props[b'local:commits'].values())[0]
2034 commit = sorted(props[b'local:commits'].values())[0]
2041 meta = {}
2035 meta = {}
2042 if b'author' in commit and b'authorEmail' in commit:
2036 if b'author' in commit and b'authorEmail' in commit:
2043 meta[b'user'] = b'%s <%s>' % (
2037 meta[b'user'] = b'%s <%s>' % (
2044 commit[b'author'],
2038 commit[b'author'],
2045 commit[b'authorEmail'],
2039 commit[b'authorEmail'],
2046 )
2040 )
2047 if b'time' in commit:
2041 if b'time' in commit:
2048 meta[b'date'] = b'%d 0' % int(commit[b'time'])
2042 meta[b'date'] = b'%d 0' % int(commit[b'time'])
2049 if b'branch' in commit:
2043 if b'branch' in commit:
2050 meta[b'branch'] = commit[b'branch']
2044 meta[b'branch'] = commit[b'branch']
2051 node = commit.get(b'commit', commit.get(b'rev'))
2045 node = commit.get(b'commit', commit.get(b'rev'))
2052 if node:
2046 if node:
2053 meta[b'node'] = node
2047 meta[b'node'] = node
2054 if len(commit.get(b'parents', ())) >= 1:
2048 if len(commit.get(b'parents', ())) >= 1:
2055 meta[b'parent'] = commit[b'parents'][0]
2049 meta[b'parent'] = commit[b'parents'][0]
2056 else:
2050 else:
2057 meta = {}
2051 meta = {}
2058 if b'date' not in meta and b'dateCreated' in diff:
2052 if b'date' not in meta and b'dateCreated' in diff:
2059 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
2053 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
2060 if b'branch' not in meta and diff.get(b'branch'):
2054 if b'branch' not in meta and diff.get(b'branch'):
2061 meta[b'branch'] = diff[b'branch']
2055 meta[b'branch'] = diff[b'branch']
2062 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
2056 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
2063 meta[b'parent'] = diff[b'sourceControlBaseRevision']
2057 meta[b'parent'] = diff[b'sourceControlBaseRevision']
2064 return meta
2058 return meta
2065
2059
2066
2060
2067 def _getdrevs(ui, stack, specs):
2061 def _getdrevs(ui, stack, specs):
2068 """convert user supplied DREVSPECs into "Differential Revision" dicts
2062 """convert user supplied DREVSPECs into "Differential Revision" dicts
2069
2063
2070 See ``hg help phabread`` for how to specify each DREVSPEC.
2064 See ``hg help phabread`` for how to specify each DREVSPEC.
2071 """
2065 """
2072 if len(specs) > 0:
2066 if len(specs) > 0:
2073
2067
2074 def _formatspec(s):
2068 def _formatspec(s):
2075 if stack:
2069 if stack:
2076 s = b':(%s)' % s
2070 s = b':(%s)' % s
2077 return b'(%s)' % s
2071 return b'(%s)' % s
2078
2072
2079 spec = b'+'.join(pycompat.maplist(_formatspec, specs))
2073 spec = b'+'.join(pycompat.maplist(_formatspec, specs))
2080
2074
2081 drevs = querydrev(ui, spec)
2075 drevs = querydrev(ui, spec)
2082 if drevs:
2076 if drevs:
2083 return drevs
2077 return drevs
2084
2078
2085 raise error.Abort(_(b"empty DREVSPEC set"))
2079 raise error.Abort(_(b"empty DREVSPEC set"))
2086
2080
2087
2081
2088 def readpatch(ui, drevs, write):
2082 def readpatch(ui, drevs, write):
2089 """generate plain-text patch readable by 'hg import'
2083 """generate plain-text patch readable by 'hg import'
2090
2084
2091 write takes a list of (DREV, bytes), where DREV is the differential number
2085 write takes a list of (DREV, bytes), where DREV is the differential number
2092 (as bytes, without the "D" prefix) and the bytes are the text of a patch
2086 (as bytes, without the "D" prefix) and the bytes are the text of a patch
2093 to be imported. drevs is what "querydrev" returns, results of
2087 to be imported. drevs is what "querydrev" returns, results of
2094 "differential.query".
2088 "differential.query".
2095 """
2089 """
2096 # Prefetch hg:meta property for all diffs
2090 # Prefetch hg:meta property for all diffs
2097 diffids = sorted({max(int(v) for v in drev[b'diffs']) for drev in drevs})
2091 diffids = sorted({max(int(v) for v in drev[b'diffs']) for drev in drevs})
2098 diffs = callconduit(ui, b'differential.querydiffs', {b'ids': diffids})
2092 diffs = callconduit(ui, b'differential.querydiffs', {b'ids': diffids})
2099
2093
2100 patches = []
2094 patches = []
2101
2095
2102 # Generate patch for each drev
2096 # Generate patch for each drev
2103 for drev in drevs:
2097 for drev in drevs:
2104 ui.note(_(b'reading D%s\n') % drev[b'id'])
2098 ui.note(_(b'reading D%s\n') % drev[b'id'])
2105
2099
2106 diffid = max(int(v) for v in drev[b'diffs'])
2100 diffid = max(int(v) for v in drev[b'diffs'])
2107 body = callconduit(ui, b'differential.getrawdiff', {b'diffID': diffid})
2101 body = callconduit(ui, b'differential.getrawdiff', {b'diffID': diffid})
2108 desc = getdescfromdrev(drev)
2102 desc = getdescfromdrev(drev)
2109 header = b'# HG changeset patch\n'
2103 header = b'# HG changeset patch\n'
2110
2104
2111 # Try to preserve metadata from hg:meta property. Write hg patch
2105 # Try to preserve metadata from hg:meta property. Write hg patch
2112 # headers that can be read by the "import" command. See patchheadermap
2106 # headers that can be read by the "import" command. See patchheadermap
2113 # and extract in mercurial/patch.py for supported headers.
2107 # and extract in mercurial/patch.py for supported headers.
2114 meta = getdiffmeta(diffs[b'%d' % diffid])
2108 meta = getdiffmeta(diffs[b'%d' % diffid])
2115 for k in _metanamemap.keys():
2109 for k in _metanamemap.keys():
2116 if k in meta:
2110 if k in meta:
2117 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
2111 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
2118
2112
2119 content = b'%s%s\n%s' % (header, desc, body)
2113 content = b'%s%s\n%s' % (header, desc, body)
2120 patches.append((drev[b'id'], content))
2114 patches.append((drev[b'id'], content))
2121
2115
2122 # Write patches to the supplied callback
2116 # Write patches to the supplied callback
2123 write(patches)
2117 write(patches)
2124
2118
2125
2119
2126 @vcrcommand(
2120 @vcrcommand(
2127 b'phabread',
2121 b'phabread',
2128 [(b'', b'stack', False, _(b'read dependencies'))],
2122 [(b'', b'stack', False, _(b'read dependencies'))],
2129 _(b'DREVSPEC... [OPTIONS]'),
2123 _(b'DREVSPEC... [OPTIONS]'),
2130 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2124 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2131 optionalrepo=True,
2125 optionalrepo=True,
2132 )
2126 )
2133 def phabread(ui, repo, *specs, **opts):
2127 def phabread(ui, repo, *specs, **opts):
2134 """print patches from Phabricator suitable for importing
2128 """print patches from Phabricator suitable for importing
2135
2129
2136 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
2130 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
2137 the number ``123``. It could also have common operators like ``+``, ``-``,
2131 the number ``123``. It could also have common operators like ``+``, ``-``,
2138 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
2132 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
2139 select a stack. If multiple DREVSPEC values are given, the result is the
2133 select a stack. If multiple DREVSPEC values are given, the result is the
2140 union of each individually evaluated value. No attempt is currently made
2134 union of each individually evaluated value. No attempt is currently made
2141 to reorder the values to run from parent to child.
2135 to reorder the values to run from parent to child.
2142
2136
2143 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
2137 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
2144 could be used to filter patches by status. For performance reason, they
2138 could be used to filter patches by status. For performance reason, they
2145 only represent a subset of non-status selections and cannot be used alone.
2139 only represent a subset of non-status selections and cannot be used alone.
2146
2140
2147 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
2141 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
2148 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
2142 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
2149 stack up to D9.
2143 stack up to D9.
2150
2144
2151 If --stack is given, follow dependencies information and read all patches.
2145 If --stack is given, follow dependencies information and read all patches.
2152 It is equivalent to the ``:`` operator.
2146 It is equivalent to the ``:`` operator.
2153 """
2147 """
2154 opts = pycompat.byteskwargs(opts)
2148 opts = pycompat.byteskwargs(opts)
2155 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2149 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2156
2150
2157 def _write(patches):
2151 def _write(patches):
2158 for drev, content in patches:
2152 for drev, content in patches:
2159 ui.write(content)
2153 ui.write(content)
2160
2154
2161 readpatch(ui, drevs, _write)
2155 readpatch(ui, drevs, _write)
2162
2156
2163
2157
2164 @vcrcommand(
2158 @vcrcommand(
2165 b'phabimport',
2159 b'phabimport',
2166 [(b'', b'stack', False, _(b'import dependencies as well'))],
2160 [(b'', b'stack', False, _(b'import dependencies as well'))],
2167 _(b'DREVSPEC... [OPTIONS]'),
2161 _(b'DREVSPEC... [OPTIONS]'),
2168 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2162 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2169 )
2163 )
2170 def phabimport(ui, repo, *specs, **opts):
2164 def phabimport(ui, repo, *specs, **opts):
2171 """import patches from Phabricator for the specified Differential Revisions
2165 """import patches from Phabricator for the specified Differential Revisions
2172
2166
2173 The patches are read and applied starting at the parent of the working
2167 The patches are read and applied starting at the parent of the working
2174 directory.
2168 directory.
2175
2169
2176 See ``hg help phabread`` for how to specify DREVSPEC.
2170 See ``hg help phabread`` for how to specify DREVSPEC.
2177 """
2171 """
2178 opts = pycompat.byteskwargs(opts)
2172 opts = pycompat.byteskwargs(opts)
2179
2173
2180 # --bypass avoids losing exec and symlink bits when importing on Windows,
2174 # --bypass avoids losing exec and symlink bits when importing on Windows,
2181 # and allows importing with a dirty wdir. It also aborts instead of leaving
2175 # and allows importing with a dirty wdir. It also aborts instead of leaving
2182 # rejects.
2176 # rejects.
2183 opts[b'bypass'] = True
2177 opts[b'bypass'] = True
2184
2178
2185 # Mandatory default values, synced with commands.import
2179 # Mandatory default values, synced with commands.import
2186 opts[b'strip'] = 1
2180 opts[b'strip'] = 1
2187 opts[b'prefix'] = b''
2181 opts[b'prefix'] = b''
2188 # Evolve 9.3.0 assumes this key is present in cmdutil.tryimportone()
2182 # Evolve 9.3.0 assumes this key is present in cmdutil.tryimportone()
2189 opts[b'obsolete'] = False
2183 opts[b'obsolete'] = False
2190
2184
2191 if ui.configbool(b'phabimport', b'secret'):
2185 if ui.configbool(b'phabimport', b'secret'):
2192 opts[b'secret'] = True
2186 opts[b'secret'] = True
2193 if ui.configbool(b'phabimport', b'obsolete'):
2187 if ui.configbool(b'phabimport', b'obsolete'):
2194 opts[b'obsolete'] = True # Handled by evolve wrapping tryimportone()
2188 opts[b'obsolete'] = True # Handled by evolve wrapping tryimportone()
2195
2189
2196 def _write(patches):
2190 def _write(patches):
2197 parents = repo[None].parents()
2191 parents = repo[None].parents()
2198
2192
2199 with repo.wlock(), repo.lock(), repo.transaction(b'phabimport'):
2193 with repo.wlock(), repo.lock(), repo.transaction(b'phabimport'):
2200 for drev, contents in patches:
2194 for drev, contents in patches:
2201 ui.status(_(b'applying patch from D%s\n') % drev)
2195 ui.status(_(b'applying patch from D%s\n') % drev)
2202
2196
2203 with patch.extract(ui, io.BytesIO(contents)) as patchdata:
2197 with patch.extract(ui, io.BytesIO(contents)) as patchdata:
2204 msg, node, rej = cmdutil.tryimportone(
2198 msg, node, rej = cmdutil.tryimportone(
2205 ui,
2199 ui,
2206 repo,
2200 repo,
2207 patchdata,
2201 patchdata,
2208 parents,
2202 parents,
2209 opts,
2203 opts,
2210 [],
2204 [],
2211 None, # Never update wdir to another revision
2205 None, # Never update wdir to another revision
2212 )
2206 )
2213
2207
2214 if not node:
2208 if not node:
2215 raise error.Abort(_(b'D%s: no diffs found') % drev)
2209 raise error.Abort(_(b'D%s: no diffs found') % drev)
2216
2210
2217 ui.note(msg + b'\n')
2211 ui.note(msg + b'\n')
2218 parents = [repo[node]]
2212 parents = [repo[node]]
2219
2213
2220 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2214 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2221
2215
2222 readpatch(repo.ui, drevs, _write)
2216 readpatch(repo.ui, drevs, _write)
2223
2217
2224
2218
2225 @vcrcommand(
2219 @vcrcommand(
2226 b'phabupdate',
2220 b'phabupdate',
2227 [
2221 [
2228 (b'', b'accept', False, _(b'accept revisions')),
2222 (b'', b'accept', False, _(b'accept revisions')),
2229 (b'', b'reject', False, _(b'reject revisions')),
2223 (b'', b'reject', False, _(b'reject revisions')),
2230 (b'', b'request-review', False, _(b'request review on revisions')),
2224 (b'', b'request-review', False, _(b'request review on revisions')),
2231 (b'', b'abandon', False, _(b'abandon revisions')),
2225 (b'', b'abandon', False, _(b'abandon revisions')),
2232 (b'', b'reclaim', False, _(b'reclaim revisions')),
2226 (b'', b'reclaim', False, _(b'reclaim revisions')),
2233 (b'', b'close', False, _(b'close revisions')),
2227 (b'', b'close', False, _(b'close revisions')),
2234 (b'', b'reopen', False, _(b'reopen revisions')),
2228 (b'', b'reopen', False, _(b'reopen revisions')),
2235 (b'', b'plan-changes', False, _(b'plan changes for revisions')),
2229 (b'', b'plan-changes', False, _(b'plan changes for revisions')),
2236 (b'', b'resign', False, _(b'resign as a reviewer from revisions')),
2230 (b'', b'resign', False, _(b'resign as a reviewer from revisions')),
2237 (b'', b'commandeer', False, _(b'commandeer revisions')),
2231 (b'', b'commandeer', False, _(b'commandeer revisions')),
2238 (b'm', b'comment', b'', _(b'comment on the last revision')),
2232 (b'm', b'comment', b'', _(b'comment on the last revision')),
2239 (b'r', b'rev', b'', _(b'local revision to update'), _(b'REV')),
2233 (b'r', b'rev', b'', _(b'local revision to update'), _(b'REV')),
2240 ],
2234 ],
2241 _(b'[DREVSPEC...| -r REV...] [OPTIONS]'),
2235 _(b'[DREVSPEC...| -r REV...] [OPTIONS]'),
2242 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2236 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2243 optionalrepo=True,
2237 optionalrepo=True,
2244 )
2238 )
2245 def phabupdate(ui, repo, *specs, **opts):
2239 def phabupdate(ui, repo, *specs, **opts):
2246 """update Differential Revision in batch
2240 """update Differential Revision in batch
2247
2241
2248 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
2242 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
2249 """
2243 """
2250 opts = pycompat.byteskwargs(opts)
2244 opts = pycompat.byteskwargs(opts)
2251 transactions = [
2245 transactions = [
2252 b'abandon',
2246 b'abandon',
2253 b'accept',
2247 b'accept',
2254 b'close',
2248 b'close',
2255 b'commandeer',
2249 b'commandeer',
2256 b'plan-changes',
2250 b'plan-changes',
2257 b'reclaim',
2251 b'reclaim',
2258 b'reject',
2252 b'reject',
2259 b'reopen',
2253 b'reopen',
2260 b'request-review',
2254 b'request-review',
2261 b'resign',
2255 b'resign',
2262 ]
2256 ]
2263 flags = [n for n in transactions if opts.get(n.replace(b'-', b'_'))]
2257 flags = [n for n in transactions if opts.get(n.replace(b'-', b'_'))]
2264 if len(flags) > 1:
2258 if len(flags) > 1:
2265 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
2259 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
2266
2260
2267 actions = []
2261 actions = []
2268 for f in flags:
2262 for f in flags:
2269 actions.append({b'type': f, b'value': True})
2263 actions.append({b'type': f, b'value': True})
2270
2264
2271 revs = opts.get(b'rev')
2265 revs = opts.get(b'rev')
2272 if revs:
2266 if revs:
2273 if not repo:
2267 if not repo:
2274 raise error.InputError(_(b'--rev requires a repository'))
2268 raise error.InputError(_(b'--rev requires a repository'))
2275
2269
2276 if specs:
2270 if specs:
2277 raise error.InputError(_(b'cannot specify both DREVSPEC and --rev'))
2271 raise error.InputError(_(b'cannot specify both DREVSPEC and --rev'))
2278
2272
2279 drevmap = getdrevmap(repo, logcmdutil.revrange(repo, [revs]))
2273 drevmap = getdrevmap(repo, logcmdutil.revrange(repo, [revs]))
2280 specs = []
2274 specs = []
2281 unknown = []
2275 unknown = []
2282 for r, d in drevmap.items():
2276 for r, d in drevmap.items():
2283 if d is None:
2277 if d is None:
2284 unknown.append(repo[r])
2278 unknown.append(repo[r])
2285 else:
2279 else:
2286 specs.append(b'D%d' % d)
2280 specs.append(b'D%d' % d)
2287 if unknown:
2281 if unknown:
2288 raise error.InputError(
2282 raise error.InputError(
2289 _(b'selected revisions without a Differential: %s')
2283 _(b'selected revisions without a Differential: %s')
2290 % scmutil.nodesummaries(repo, unknown)
2284 % scmutil.nodesummaries(repo, unknown)
2291 )
2285 )
2292
2286
2293 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2287 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2294 for i, drev in enumerate(drevs):
2288 for i, drev in enumerate(drevs):
2295 if i + 1 == len(drevs) and opts.get(b'comment'):
2289 if i + 1 == len(drevs) and opts.get(b'comment'):
2296 actions.append({b'type': b'comment', b'value': opts[b'comment']})
2290 actions.append({b'type': b'comment', b'value': opts[b'comment']})
2297 if actions:
2291 if actions:
2298 params = {
2292 params = {
2299 b'objectIdentifier': drev[b'phid'],
2293 b'objectIdentifier': drev[b'phid'],
2300 b'transactions': actions,
2294 b'transactions': actions,
2301 }
2295 }
2302 callconduit(ui, b'differential.revision.edit', params)
2296 callconduit(ui, b'differential.revision.edit', params)
2303
2297
2304
2298
2305 @eh.templatekeyword(b'phabreview', requires={b'ctx'})
2299 @eh.templatekeyword(b'phabreview', requires={b'ctx'})
2306 def template_review(context, mapping):
2300 def template_review(context, mapping):
2307 """:phabreview: Object describing the review for this changeset.
2301 """:phabreview: Object describing the review for this changeset.
2308 Has attributes `url` and `id`.
2302 Has attributes `url` and `id`.
2309 """
2303 """
2310 ctx = context.resource(mapping, b'ctx')
2304 ctx = context.resource(mapping, b'ctx')
2311 m = _differentialrevisiondescre.search(ctx.description())
2305 m = _differentialrevisiondescre.search(ctx.description())
2312 if m:
2306 if m:
2313 return templateutil.hybriddict(
2307 return templateutil.hybriddict(
2314 {
2308 {
2315 b'url': m.group('url'),
2309 b'url': m.group('url'),
2316 b'id': b"D%s" % m.group('id'),
2310 b'id': b"D%s" % m.group('id'),
2317 }
2311 }
2318 )
2312 )
2319 else:
2313 else:
2320 tags = ctx.repo().nodetags(ctx.node())
2314 tags = ctx.repo().nodetags(ctx.node())
2321 for t in tags:
2315 for t in tags:
2322 if _differentialrevisiontagre.match(t):
2316 if _differentialrevisiontagre.match(t):
2323 url = ctx.repo().ui.config(b'phabricator', b'url')
2317 url = ctx.repo().ui.config(b'phabricator', b'url')
2324 if not url.endswith(b'/'):
2318 if not url.endswith(b'/'):
2325 url += b'/'
2319 url += b'/'
2326 url += t
2320 url += t
2327
2321
2328 return templateutil.hybriddict(
2322 return templateutil.hybriddict(
2329 {
2323 {
2330 b'url': url,
2324 b'url': url,
2331 b'id': t,
2325 b'id': t,
2332 }
2326 }
2333 )
2327 )
2334 return None
2328 return None
2335
2329
2336
2330
2337 @eh.templatekeyword(b'phabstatus', requires={b'ctx', b'repo', b'ui'})
2331 @eh.templatekeyword(b'phabstatus', requires={b'ctx', b'repo', b'ui'})
2338 def template_status(context, mapping):
2332 def template_status(context, mapping):
2339 """:phabstatus: String. Status of Phabricator differential."""
2333 """:phabstatus: String. Status of Phabricator differential."""
2340 ctx = context.resource(mapping, b'ctx')
2334 ctx = context.resource(mapping, b'ctx')
2341 repo = context.resource(mapping, b'repo')
2335 repo = context.resource(mapping, b'repo')
2342 ui = context.resource(mapping, b'ui')
2336 ui = context.resource(mapping, b'ui')
2343
2337
2344 rev = ctx.rev()
2338 rev = ctx.rev()
2345 try:
2339 try:
2346 drevid = getdrevmap(repo, [rev])[rev]
2340 drevid = getdrevmap(repo, [rev])[rev]
2347 except KeyError:
2341 except KeyError:
2348 return None
2342 return None
2349 drevs = callconduit(ui, b'differential.query', {b'ids': [drevid]})
2343 drevs = callconduit(ui, b'differential.query', {b'ids': [drevid]})
2350 for drev in drevs:
2344 for drev in drevs:
2351 if int(drev[b'id']) == drevid:
2345 if int(drev[b'id']) == drevid:
2352 return templateutil.hybriddict(
2346 return templateutil.hybriddict(
2353 {
2347 {
2354 b'url': drev[b'uri'],
2348 b'url': drev[b'uri'],
2355 b'status': drev[b'statusName'],
2349 b'status': drev[b'statusName'],
2356 }
2350 }
2357 )
2351 )
2358 return None
2352 return None
2359
2353
2360
2354
2361 @show.showview(b'phabstatus', csettopic=b'work')
2355 @show.showview(b'phabstatus', csettopic=b'work')
2362 def phabstatusshowview(ui, repo, displayer):
2356 def phabstatusshowview(ui, repo, displayer):
2363 """Phabricator differiential status"""
2357 """Phabricator differiential status"""
2364 revs = repo.revs('sort(_underway(), topo)')
2358 revs = repo.revs('sort(_underway(), topo)')
2365 drevmap = getdrevmap(repo, revs)
2359 drevmap = getdrevmap(repo, revs)
2366 unknownrevs, drevids, revsbydrevid = [], set(), {}
2360 unknownrevs, drevids, revsbydrevid = [], set(), {}
2367 for rev, drevid in drevmap.items():
2361 for rev, drevid in drevmap.items():
2368 if drevid is not None:
2362 if drevid is not None:
2369 drevids.add(drevid)
2363 drevids.add(drevid)
2370 revsbydrevid.setdefault(drevid, set()).add(rev)
2364 revsbydrevid.setdefault(drevid, set()).add(rev)
2371 else:
2365 else:
2372 unknownrevs.append(rev)
2366 unknownrevs.append(rev)
2373
2367
2374 drevs = callconduit(ui, b'differential.query', {b'ids': list(drevids)})
2368 drevs = callconduit(ui, b'differential.query', {b'ids': list(drevids)})
2375 drevsbyrev = {}
2369 drevsbyrev = {}
2376 for drev in drevs:
2370 for drev in drevs:
2377 for rev in revsbydrevid[int(drev[b'id'])]:
2371 for rev in revsbydrevid[int(drev[b'id'])]:
2378 drevsbyrev[rev] = drev
2372 drevsbyrev[rev] = drev
2379
2373
2380 def phabstatus(ctx):
2374 def phabstatus(ctx):
2381 drev = drevsbyrev[ctx.rev()]
2375 drev = drevsbyrev[ctx.rev()]
2382 status = ui.label(
2376 status = ui.label(
2383 b'%(statusName)s' % drev,
2377 b'%(statusName)s' % drev,
2384 b'phabricator.status.%s' % _getstatusname(drev),
2378 b'phabricator.status.%s' % _getstatusname(drev),
2385 )
2379 )
2386 ui.write(b"\n%s %s\n" % (drev[b'uri'], status))
2380 ui.write(b"\n%s %s\n" % (drev[b'uri'], status))
2387
2381
2388 revs -= smartset.baseset(unknownrevs)
2382 revs -= smartset.baseset(unknownrevs)
2389 revdag = graphmod.dagwalker(repo, revs)
2383 revdag = graphmod.dagwalker(repo, revs)
2390
2384
2391 ui.setconfig(b'experimental', b'graphshorten', True)
2385 ui.setconfig(b'experimental', b'graphshorten', True)
2392 displayer._exthook = phabstatus
2386 displayer._exthook = phabstatus
2393 nodelen = show.longestshortest(repo, revs)
2387 nodelen = show.longestshortest(repo, revs)
2394 logcmdutil.displaygraph(
2388 logcmdutil.displaygraph(
2395 ui,
2389 ui,
2396 repo,
2390 repo,
2397 revdag,
2391 revdag,
2398 displayer,
2392 displayer,
2399 graphmod.asciiedges,
2393 graphmod.asciiedges,
2400 props={b'nodelen': nodelen},
2394 props={b'nodelen': nodelen},
2401 )
2395 )
@@ -1,222 +1,222
1 # win32mbcs.py -- MBCS filename support for Mercurial
1 # win32mbcs.py -- MBCS filename support for Mercurial
2 #
2 #
3 # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
3 # Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
4 #
4 #
5 # Version: 0.3
5 # Version: 0.3
6 # Author: Shun-ichi Goto <shunichi.goto@gmail.com>
6 # Author: Shun-ichi Goto <shunichi.goto@gmail.com>
7 #
7 #
8 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2 or any later version.
9 # GNU General Public License version 2 or any later version.
10 #
10 #
11
11
12 '''allow the use of MBCS paths with problematic encodings
12 '''allow the use of MBCS paths with problematic encodings
13
13
14 Some MBCS encodings are not good for some path operations (i.e.
14 Some MBCS encodings are not good for some path operations (i.e.
15 splitting path, case conversion, etc.) with its encoded bytes. We call
15 splitting path, case conversion, etc.) with its encoded bytes. We call
16 such a encoding (i.e. shift_jis and big5) as "problematic encoding".
16 such a encoding (i.e. shift_jis and big5) as "problematic encoding".
17 This extension can be used to fix the issue with those encodings by
17 This extension can be used to fix the issue with those encodings by
18 wrapping some functions to convert to Unicode string before path
18 wrapping some functions to convert to Unicode string before path
19 operation.
19 operation.
20
20
21 This extension is useful for:
21 This extension is useful for:
22
22
23 - Japanese Windows users using shift_jis encoding.
23 - Japanese Windows users using shift_jis encoding.
24 - Chinese Windows users using big5 encoding.
24 - Chinese Windows users using big5 encoding.
25 - All users who use a repository with one of problematic encodings on
25 - All users who use a repository with one of problematic encodings on
26 case-insensitive file system.
26 case-insensitive file system.
27
27
28 This extension is not needed for:
28 This extension is not needed for:
29
29
30 - Any user who use only ASCII chars in path.
30 - Any user who use only ASCII chars in path.
31 - Any user who do not use any of problematic encodings.
31 - Any user who do not use any of problematic encodings.
32
32
33 Note that there are some limitations on using this extension:
33 Note that there are some limitations on using this extension:
34
34
35 - You should use single encoding in one repository.
35 - You should use single encoding in one repository.
36 - If the repository path ends with 0x5c, .hg/hgrc cannot be read.
36 - If the repository path ends with 0x5c, .hg/hgrc cannot be read.
37 - win32mbcs is not compatible with fixutf8 extension.
37 - win32mbcs is not compatible with fixutf8 extension.
38
38
39 By default, win32mbcs uses encoding.encoding decided by Mercurial.
39 By default, win32mbcs uses encoding.encoding decided by Mercurial.
40 You can specify the encoding by config option::
40 You can specify the encoding by config option::
41
41
42 [win32mbcs]
42 [win32mbcs]
43 encoding = sjis
43 encoding = sjis
44
44
45 It is useful for the users who want to commit with UTF-8 log message.
45 It is useful for the users who want to commit with UTF-8 log message.
46 '''
46 '''
47
47
48 import os
48 import os
49 import sys
49 import sys
50
50
51 from mercurial.i18n import _
51 from mercurial.i18n import _
52 from mercurial.pycompat import getattr, setattr
52 from mercurial.pycompat import getattr, setattr
53 from mercurial import (
53 from mercurial import (
54 encoding,
54 encoding,
55 error,
55 error,
56 pycompat,
56 pycompat,
57 registrar,
57 registrar,
58 )
58 )
59
59
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # be specifying the version(s) of Mercurial they are tested with, or
62 # be specifying the version(s) of Mercurial they are tested with, or
63 # leave the attribute unspecified.
63 # leave the attribute unspecified.
64 testedwith = b'ships-with-hg-core'
64 testedwith = b'ships-with-hg-core'
65
65
66 configtable = {}
66 configtable = {}
67 configitem = registrar.configitem(configtable)
67 configitem = registrar.configitem(configtable)
68
68
69 # Encoding.encoding may be updated by --encoding option.
69 # Encoding.encoding may be updated by --encoding option.
70 # Use a lambda do delay the resolution.
70 # Use a lambda do delay the resolution.
71 configitem(
71 configitem(
72 b'win32mbcs',
72 b'win32mbcs',
73 b'encoding',
73 b'encoding',
74 default=lambda: encoding.encoding,
74 default=lambda: encoding.encoding,
75 )
75 )
76
76
77 _encoding = None # see extsetup
77 _encoding = None # see extsetup
78
78
79
79
80 def decode(arg):
80 def decode(arg):
81 if isinstance(arg, bytes):
81 if isinstance(arg, bytes):
82 uarg = arg.decode(_encoding)
82 uarg = arg.decode(_encoding)
83 if arg == uarg.encode(_encoding):
83 if arg == uarg.encode(_encoding):
84 return uarg
84 return uarg
85 raise UnicodeError(b"Not local encoding")
85 raise UnicodeError(b"Not local encoding")
86 elif isinstance(arg, tuple):
86 elif isinstance(arg, tuple):
87 return tuple(map(decode, arg))
87 return tuple(map(decode, arg))
88 elif isinstance(arg, list):
88 elif isinstance(arg, list):
89 return map(decode, arg)
89 return map(decode, arg)
90 elif isinstance(arg, dict):
90 elif isinstance(arg, dict):
91 for k, v in arg.items():
91 for k, v in arg.items():
92 arg[k] = decode(v)
92 arg[k] = decode(v)
93 return arg
93 return arg
94
94
95
95
96 def encode(arg):
96 def encode(arg):
97 if isinstance(arg, pycompat.unicode):
97 if isinstance(arg, str):
98 return arg.encode(_encoding)
98 return arg.encode(_encoding)
99 elif isinstance(arg, tuple):
99 elif isinstance(arg, tuple):
100 return tuple(map(encode, arg))
100 return tuple(map(encode, arg))
101 elif isinstance(arg, list):
101 elif isinstance(arg, list):
102 return map(encode, arg)
102 return map(encode, arg)
103 elif isinstance(arg, dict):
103 elif isinstance(arg, dict):
104 for k, v in arg.items():
104 for k, v in arg.items():
105 arg[k] = encode(v)
105 arg[k] = encode(v)
106 return arg
106 return arg
107
107
108
108
109 def appendsep(s):
109 def appendsep(s):
110 # ensure the path ends with os.sep, appending it if necessary.
110 # ensure the path ends with os.sep, appending it if necessary.
111 try:
111 try:
112 us = decode(s)
112 us = decode(s)
113 except UnicodeError:
113 except UnicodeError:
114 us = s
114 us = s
115 if us and us[-1] not in b':/\\':
115 if us and us[-1] not in b':/\\':
116 s += pycompat.ossep
116 s += pycompat.ossep
117 return s
117 return s
118
118
119
119
120 def basewrapper(func, argtype, enc, dec, args, kwds):
120 def basewrapper(func, argtype, enc, dec, args, kwds):
121 # check check already converted, then call original
121 # check check already converted, then call original
122 for arg in args:
122 for arg in args:
123 if isinstance(arg, argtype):
123 if isinstance(arg, argtype):
124 return func(*args, **kwds)
124 return func(*args, **kwds)
125
125
126 try:
126 try:
127 # convert string arguments, call func, then convert back the
127 # convert string arguments, call func, then convert back the
128 # return value.
128 # return value.
129 return enc(func(*dec(args), **dec(kwds)))
129 return enc(func(*dec(args), **dec(kwds)))
130 except UnicodeError:
130 except UnicodeError:
131 raise error.Abort(
131 raise error.Abort(
132 _(b"[win32mbcs] filename conversion failed with %s encoding\n")
132 _(b"[win32mbcs] filename conversion failed with %s encoding\n")
133 % _encoding
133 % _encoding
134 )
134 )
135
135
136
136
137 def wrapper(func, args, kwds):
137 def wrapper(func, args, kwds):
138 return basewrapper(func, pycompat.unicode, encode, decode, args, kwds)
138 return basewrapper(func, str, encode, decode, args, kwds)
139
139
140
140
141 def reversewrapper(func, args, kwds):
141 def reversewrapper(func, args, kwds):
142 return basewrapper(func, str, decode, encode, args, kwds)
142 return basewrapper(func, str, decode, encode, args, kwds)
143
143
144
144
145 def wrapperforlistdir(func, args, kwds):
145 def wrapperforlistdir(func, args, kwds):
146 # Ensure 'path' argument ends with os.sep to avoids
146 # Ensure 'path' argument ends with os.sep to avoids
147 # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
147 # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
148 if args:
148 if args:
149 args = list(args)
149 args = list(args)
150 args[0] = appendsep(args[0])
150 args[0] = appendsep(args[0])
151 if b'path' in kwds:
151 if b'path' in kwds:
152 kwds[b'path'] = appendsep(kwds[b'path'])
152 kwds[b'path'] = appendsep(kwds[b'path'])
153 return func(*args, **kwds)
153 return func(*args, **kwds)
154
154
155
155
156 def wrapname(name, wrapper):
156 def wrapname(name, wrapper):
157 module, name = name.rsplit(b'.', 1)
157 module, name = name.rsplit(b'.', 1)
158 module = sys.modules[module]
158 module = sys.modules[module]
159 func = getattr(module, name)
159 func = getattr(module, name)
160
160
161 def f(*args, **kwds):
161 def f(*args, **kwds):
162 return wrapper(func, args, kwds)
162 return wrapper(func, args, kwds)
163
163
164 f.__name__ = func.__name__
164 f.__name__ = func.__name__
165 setattr(module, name, f)
165 setattr(module, name, f)
166
166
167
167
168 # List of functions to be wrapped.
168 # List of functions to be wrapped.
169 # NOTE: os.path.dirname() and os.path.basename() are safe because
169 # NOTE: os.path.dirname() and os.path.basename() are safe because
170 # they use result of os.path.split()
170 # they use result of os.path.split()
171 funcs = b'''os.path.join os.path.split os.path.splitext
171 funcs = b'''os.path.join os.path.split os.path.splitext
172 os.path.normpath os.makedirs mercurial.util.endswithsep
172 os.path.normpath os.makedirs mercurial.util.endswithsep
173 mercurial.util.splitpath mercurial.util.fscasesensitive
173 mercurial.util.splitpath mercurial.util.fscasesensitive
174 mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
174 mercurial.util.fspath mercurial.util.pconvert mercurial.util.normpath
175 mercurial.util.checkwinfilename mercurial.util.checkosfilename
175 mercurial.util.checkwinfilename mercurial.util.checkosfilename
176 mercurial.util.split'''
176 mercurial.util.split'''
177
177
178 # These functions are required to be called with local encoded string
178 # These functions are required to be called with local encoded string
179 # because they expects argument is local encoded string and cause
179 # because they expects argument is local encoded string and cause
180 # problem with unicode string.
180 # problem with unicode string.
181 rfuncs = b'''mercurial.encoding.upper mercurial.encoding.lower
181 rfuncs = b'''mercurial.encoding.upper mercurial.encoding.lower
182 mercurial.util._filenamebytestr'''
182 mercurial.util._filenamebytestr'''
183
183
184 # List of Windows specific functions to be wrapped.
184 # List of Windows specific functions to be wrapped.
185 winfuncs = b'''os.path.splitunc'''
185 winfuncs = b'''os.path.splitunc'''
186
186
187 # codec and alias names of sjis and big5 to be faked.
187 # codec and alias names of sjis and big5 to be faked.
188 problematic_encodings = b'''big5 big5-tw csbig5 big5hkscs big5-hkscs
188 problematic_encodings = b'''big5 big5-tw csbig5 big5hkscs big5-hkscs
189 hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
189 hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
190 sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
190 sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
191 shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
191 shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
192
192
193
193
194 def extsetup(ui):
194 def extsetup(ui):
195 # TODO: decide use of config section for this extension
195 # TODO: decide use of config section for this extension
196 if (not os.path.supports_unicode_filenames) and (
196 if (not os.path.supports_unicode_filenames) and (
197 pycompat.sysplatform != b'cygwin'
197 pycompat.sysplatform != b'cygwin'
198 ):
198 ):
199 ui.warn(_(b"[win32mbcs] cannot activate on this platform.\n"))
199 ui.warn(_(b"[win32mbcs] cannot activate on this platform.\n"))
200 return
200 return
201 # determine encoding for filename
201 # determine encoding for filename
202 global _encoding
202 global _encoding
203 _encoding = ui.config(b'win32mbcs', b'encoding')
203 _encoding = ui.config(b'win32mbcs', b'encoding')
204 # fake is only for relevant environment.
204 # fake is only for relevant environment.
205 if _encoding.lower() in problematic_encodings.split():
205 if _encoding.lower() in problematic_encodings.split():
206 for f in funcs.split():
206 for f in funcs.split():
207 wrapname(f, wrapper)
207 wrapname(f, wrapper)
208 if pycompat.iswindows:
208 if pycompat.iswindows:
209 for f in winfuncs.split():
209 for f in winfuncs.split():
210 wrapname(f, wrapper)
210 wrapname(f, wrapper)
211 wrapname(b"mercurial.util.listdir", wrapperforlistdir)
211 wrapname(b"mercurial.util.listdir", wrapperforlistdir)
212 wrapname(b"mercurial.windows.listdir", wrapperforlistdir)
212 wrapname(b"mercurial.windows.listdir", wrapperforlistdir)
213 # wrap functions to be called with local byte string arguments
213 # wrap functions to be called with local byte string arguments
214 for f in rfuncs.split():
214 for f in rfuncs.split():
215 wrapname(f, reversewrapper)
215 wrapname(f, reversewrapper)
216 # Check sys.args manually instead of using ui.debug() because
216 # Check sys.args manually instead of using ui.debug() because
217 # command line options is not yet applied when
217 # command line options is not yet applied when
218 # extensions.loadall() is called.
218 # extensions.loadall() is called.
219 if b'--debug' in sys.argv:
219 if b'--debug' in sys.argv:
220 ui.writenoi18n(
220 ui.writenoi18n(
221 b"[win32mbcs] activated with encoding: %s\n" % _encoding
221 b"[win32mbcs] activated with encoding: %s\n" % _encoding
222 )
222 )
@@ -1,123 +1,123
1 # hgweb/__init__.py - web interface to a mercurial repository
1 # hgweb/__init__.py - web interface to a mercurial repository
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import os
10 import os
11
11
12 from ..i18n import _
12 from ..i18n import _
13
13
14 from .. import (
14 from .. import (
15 error,
15 error,
16 pycompat,
16 pycompat,
17 )
17 )
18
18
19 from ..utils import procutil
19 from ..utils import procutil
20
20
21 from . import (
21 from . import (
22 hgweb_mod,
22 hgweb_mod,
23 hgwebdir_mod,
23 hgwebdir_mod,
24 server,
24 server,
25 )
25 )
26
26
27
27
28 def hgweb(config, name=None, baseui=None):
28 def hgweb(config, name=None, baseui=None):
29 """create an hgweb wsgi object
29 """create an hgweb wsgi object
30
30
31 config can be one of:
31 config can be one of:
32 - repo object (single repo view)
32 - repo object (single repo view)
33 - path to repo (single repo view)
33 - path to repo (single repo view)
34 - path to config file (multi-repo view)
34 - path to config file (multi-repo view)
35 - dict of virtual:real pairs (multi-repo view)
35 - dict of virtual:real pairs (multi-repo view)
36 - list of virtual:real tuples (multi-repo view)
36 - list of virtual:real tuples (multi-repo view)
37 """
37 """
38
38
39 if isinstance(config, pycompat.unicode):
39 if isinstance(config, str):
40 raise error.ProgrammingError(
40 raise error.ProgrammingError(
41 b'Mercurial only supports encoded strings: %r' % config
41 b'Mercurial only supports encoded strings: %r' % config
42 )
42 )
43 if (
43 if (
44 (isinstance(config, bytes) and not os.path.isdir(config))
44 (isinstance(config, bytes) and not os.path.isdir(config))
45 or isinstance(config, dict)
45 or isinstance(config, dict)
46 or isinstance(config, list)
46 or isinstance(config, list)
47 ):
47 ):
48 # create a multi-dir interface
48 # create a multi-dir interface
49 return hgwebdir_mod.hgwebdir(config, baseui=baseui)
49 return hgwebdir_mod.hgwebdir(config, baseui=baseui)
50 return hgweb_mod.hgweb(config, name=name, baseui=baseui)
50 return hgweb_mod.hgweb(config, name=name, baseui=baseui)
51
51
52
52
53 def hgwebdir(config, baseui=None):
53 def hgwebdir(config, baseui=None):
54 return hgwebdir_mod.hgwebdir(config, baseui=baseui)
54 return hgwebdir_mod.hgwebdir(config, baseui=baseui)
55
55
56
56
57 class httpservice(object):
57 class httpservice(object):
58 def __init__(self, ui, app, opts):
58 def __init__(self, ui, app, opts):
59 self.ui = ui
59 self.ui = ui
60 self.app = app
60 self.app = app
61 self.opts = opts
61 self.opts = opts
62
62
63 def init(self):
63 def init(self):
64 procutil.setsignalhandler()
64 procutil.setsignalhandler()
65 self.httpd = server.create_server(self.ui, self.app)
65 self.httpd = server.create_server(self.ui, self.app)
66
66
67 if (
67 if (
68 self.opts[b'port']
68 self.opts[b'port']
69 and not self.ui.verbose
69 and not self.ui.verbose
70 and not self.opts[b'print_url']
70 and not self.opts[b'print_url']
71 ):
71 ):
72 return
72 return
73
73
74 if self.httpd.prefix:
74 if self.httpd.prefix:
75 prefix = self.httpd.prefix.strip(b'/') + b'/'
75 prefix = self.httpd.prefix.strip(b'/') + b'/'
76 else:
76 else:
77 prefix = b''
77 prefix = b''
78
78
79 port = ':%d' % self.httpd.port
79 port = ':%d' % self.httpd.port
80 if port == ':80':
80 if port == ':80':
81 port = ''
81 port = ''
82
82
83 bindaddr = self.httpd.addr
83 bindaddr = self.httpd.addr
84 if bindaddr == '0.0.0.0':
84 if bindaddr == '0.0.0.0':
85 bindaddr = '*'
85 bindaddr = '*'
86 elif ':' in bindaddr: # IPv6
86 elif ':' in bindaddr: # IPv6
87 bindaddr = '[%s]' % bindaddr
87 bindaddr = '[%s]' % bindaddr
88
88
89 fqaddr = self.httpd.fqaddr
89 fqaddr = self.httpd.fqaddr
90 if ':' in fqaddr:
90 if ':' in fqaddr:
91 fqaddr = '[%s]' % fqaddr
91 fqaddr = '[%s]' % fqaddr
92
92
93 url = b'http://%s%s/%s' % (
93 url = b'http://%s%s/%s' % (
94 pycompat.sysbytes(fqaddr),
94 pycompat.sysbytes(fqaddr),
95 pycompat.sysbytes(port),
95 pycompat.sysbytes(port),
96 prefix,
96 prefix,
97 )
97 )
98 if self.opts[b'print_url']:
98 if self.opts[b'print_url']:
99 self.ui.write(b'%s\n' % url)
99 self.ui.write(b'%s\n' % url)
100 else:
100 else:
101 if self.opts[b'port']:
101 if self.opts[b'port']:
102 write = self.ui.status
102 write = self.ui.status
103 else:
103 else:
104 write = self.ui.write
104 write = self.ui.write
105 write(
105 write(
106 _(b'listening at %s (bound to %s:%d)\n')
106 _(b'listening at %s (bound to %s:%d)\n')
107 % (url, pycompat.sysbytes(bindaddr), self.httpd.port)
107 % (url, pycompat.sysbytes(bindaddr), self.httpd.port)
108 )
108 )
109 self.ui.flush() # avoid buffering of status message
109 self.ui.flush() # avoid buffering of status message
110
110
111 def run(self):
111 def run(self):
112 self.httpd.serve_forever()
112 self.httpd.serve_forever()
113
113
114
114
115 def createapp(baseui, repo, webconf):
115 def createapp(baseui, repo, webconf):
116 if webconf:
116 if webconf:
117 return hgwebdir_mod.hgwebdir(webconf, baseui=baseui)
117 return hgwebdir_mod.hgwebdir(webconf, baseui=baseui)
118 else:
118 else:
119 if not repo:
119 if not repo:
120 raise error.RepoError(
120 raise error.RepoError(
121 _(b"there is no Mercurial repository here (.hg not found)")
121 _(b"there is no Mercurial repository here (.hg not found)")
122 )
122 )
123 return hgweb_mod.hgweb(repo, baseui=baseui)
123 return hgweb_mod.hgweb(repo, baseui=baseui)
@@ -1,125 +1,125
1 # i18n.py - internationalization support for mercurial
1 # i18n.py - internationalization support for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import gettext as gettextmod
9 import gettext as gettextmod
10 import locale
10 import locale
11 import os
11 import os
12 import sys
12 import sys
13
13
14 from .pycompat import getattr
14 from .pycompat import getattr
15 from .utils import resourceutil
15 from .utils import resourceutil
16 from . import (
16 from . import (
17 encoding,
17 encoding,
18 pycompat,
18 pycompat,
19 )
19 )
20
20
21 if pycompat.TYPE_CHECKING:
21 if pycompat.TYPE_CHECKING:
22 from typing import (
22 from typing import (
23 Callable,
23 Callable,
24 List,
24 List,
25 )
25 )
26
26
27
27
28 # modelled after templater.templatepath:
28 # modelled after templater.templatepath:
29 if getattr(sys, 'frozen', None) is not None:
29 if getattr(sys, 'frozen', None) is not None:
30 module = pycompat.sysexecutable
30 module = pycompat.sysexecutable
31 else:
31 else:
32 module = pycompat.fsencode(__file__)
32 module = pycompat.fsencode(__file__)
33
33
34 _languages = None
34 _languages = None
35 if (
35 if (
36 pycompat.iswindows
36 pycompat.iswindows
37 and b'LANGUAGE' not in encoding.environ
37 and b'LANGUAGE' not in encoding.environ
38 and b'LC_ALL' not in encoding.environ
38 and b'LC_ALL' not in encoding.environ
39 and b'LC_MESSAGES' not in encoding.environ
39 and b'LC_MESSAGES' not in encoding.environ
40 and b'LANG' not in encoding.environ
40 and b'LANG' not in encoding.environ
41 ):
41 ):
42 # Try to detect UI language by "User Interface Language Management" API
42 # Try to detect UI language by "User Interface Language Management" API
43 # if no locale variables are set. Note that locale.getdefaultlocale()
43 # if no locale variables are set. Note that locale.getdefaultlocale()
44 # uses GetLocaleInfo(), which may be different from UI language.
44 # uses GetLocaleInfo(), which may be different from UI language.
45 # (See http://msdn.microsoft.com/en-us/library/dd374098(v=VS.85).aspx )
45 # (See http://msdn.microsoft.com/en-us/library/dd374098(v=VS.85).aspx )
46 try:
46 try:
47 import ctypes
47 import ctypes
48
48
49 # pytype: disable=module-attr
49 # pytype: disable=module-attr
50 langid = ctypes.windll.kernel32.GetUserDefaultUILanguage()
50 langid = ctypes.windll.kernel32.GetUserDefaultUILanguage()
51 # pytype: enable=module-attr
51 # pytype: enable=module-attr
52
52
53 _languages = [locale.windows_locale[langid]]
53 _languages = [locale.windows_locale[langid]]
54 except (ImportError, AttributeError, KeyError):
54 except (ImportError, AttributeError, KeyError):
55 # ctypes not found or unknown langid
55 # ctypes not found or unknown langid
56 pass
56 pass
57
57
58
58
59 datapath = pycompat.fsdecode(resourceutil.datapath)
59 datapath = pycompat.fsdecode(resourceutil.datapath)
60 localedir = os.path.join(datapath, 'locale')
60 localedir = os.path.join(datapath, 'locale')
61 t = gettextmod.translation('hg', localedir, _languages, fallback=True)
61 t = gettextmod.translation('hg', localedir, _languages, fallback=True)
62 try:
62 try:
63 _ugettext = t.ugettext # pytype: disable=attribute-error
63 _ugettext = t.ugettext # pytype: disable=attribute-error
64 except AttributeError:
64 except AttributeError:
65 _ugettext = t.gettext
65 _ugettext = t.gettext
66
66
67
67
68 _msgcache = {} # encoding: {message: translation}
68 _msgcache = {} # encoding: {message: translation}
69
69
70
70
71 def gettext(message):
71 def gettext(message):
72 # type: (bytes) -> bytes
72 # type: (bytes) -> bytes
73 """Translate message.
73 """Translate message.
74
74
75 The message is looked up in the catalog to get a Unicode string,
75 The message is looked up in the catalog to get a Unicode string,
76 which is encoded in the local encoding before being returned.
76 which is encoded in the local encoding before being returned.
77
77
78 Important: message is restricted to characters in the encoding
78 Important: message is restricted to characters in the encoding
79 given by sys.getdefaultencoding() which is most likely 'ascii'.
79 given by sys.getdefaultencoding() which is most likely 'ascii'.
80 """
80 """
81 # If message is None, t.ugettext will return u'None' as the
81 # If message is None, t.ugettext will return u'None' as the
82 # translation whereas our callers expect us to return None.
82 # translation whereas our callers expect us to return None.
83 if message is None or not _ugettext:
83 if message is None or not _ugettext:
84 return message
84 return message
85
85
86 cache = _msgcache.setdefault(encoding.encoding, {})
86 cache = _msgcache.setdefault(encoding.encoding, {})
87 if message not in cache:
87 if message not in cache:
88 if type(message) is pycompat.unicode:
88 if type(message) is str:
89 # goofy unicode docstrings in test
89 # goofy unicode docstrings in test
90 paragraphs = message.split(u'\n\n') # type: List[pycompat.unicode]
90 paragraphs = message.split(u'\n\n') # type: List[str]
91 else:
91 else:
92 # should be ascii, but we have unicode docstrings in test, which
92 # should be ascii, but we have unicode docstrings in test, which
93 # are converted to utf-8 bytes on Python 3.
93 # are converted to utf-8 bytes on Python 3.
94 paragraphs = [p.decode("utf-8") for p in message.split(b'\n\n')]
94 paragraphs = [p.decode("utf-8") for p in message.split(b'\n\n')]
95 # Be careful not to translate the empty string -- it holds the
95 # Be careful not to translate the empty string -- it holds the
96 # meta data of the .po file.
96 # meta data of the .po file.
97 u = u'\n\n'.join([p and _ugettext(p) or u'' for p in paragraphs])
97 u = u'\n\n'.join([p and _ugettext(p) or u'' for p in paragraphs])
98 try:
98 try:
99 # encoding.tolocal cannot be used since it will first try to
99 # encoding.tolocal cannot be used since it will first try to
100 # decode the Unicode string. Calling u.decode(enc) really
100 # decode the Unicode string. Calling u.decode(enc) really
101 # means u.encode(sys.getdefaultencoding()).decode(enc). Since
101 # means u.encode(sys.getdefaultencoding()).decode(enc). Since
102 # the Python encoding defaults to 'ascii', this fails if the
102 # the Python encoding defaults to 'ascii', this fails if the
103 # translated string use non-ASCII characters.
103 # translated string use non-ASCII characters.
104 encodingstr = pycompat.sysstr(encoding.encoding)
104 encodingstr = pycompat.sysstr(encoding.encoding)
105 cache[message] = u.encode(encodingstr, "replace")
105 cache[message] = u.encode(encodingstr, "replace")
106 except LookupError:
106 except LookupError:
107 # An unknown encoding results in a LookupError.
107 # An unknown encoding results in a LookupError.
108 cache[message] = message
108 cache[message] = message
109 return cache[message]
109 return cache[message]
110
110
111
111
112 def _plain():
112 def _plain():
113 if (
113 if (
114 b'HGPLAIN' not in encoding.environ
114 b'HGPLAIN' not in encoding.environ
115 and b'HGPLAINEXCEPT' not in encoding.environ
115 and b'HGPLAINEXCEPT' not in encoding.environ
116 ):
116 ):
117 return False
117 return False
118 exceptions = encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',')
118 exceptions = encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',')
119 return b'i18n' not in exceptions
119 return b'i18n' not in exceptions
120
120
121
121
122 if _plain():
122 if _plain():
123 _ = lambda message: message # type: Callable[[bytes], bytes]
123 _ = lambda message: message # type: Callable[[bytes], bytes]
124 else:
124 else:
125 _ = gettext
125 _ = gettext
@@ -1,2303 +1,2303
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Olivia Mackall <olivia@selenic.com>
3 # Copyright Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import errno
9 import errno
10 import glob
10 import glob
11 import os
11 import os
12 import posixpath
12 import posixpath
13 import re
13 import re
14 import subprocess
14 import subprocess
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from .pycompat import getattr
25 from .pycompat import getattr
26 from .thirdparty import attr
26 from .thirdparty import attr
27 from . import (
27 from . import (
28 copies as copiesmod,
28 copies as copiesmod,
29 encoding,
29 encoding,
30 error,
30 error,
31 match as matchmod,
31 match as matchmod,
32 obsolete,
32 obsolete,
33 obsutil,
33 obsutil,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 policy,
36 policy,
37 pycompat,
37 pycompat,
38 requirements as requirementsmod,
38 requirements as requirementsmod,
39 revsetlang,
39 revsetlang,
40 similar,
40 similar,
41 smartset,
41 smartset,
42 url,
42 url,
43 util,
43 util,
44 vfs,
44 vfs,
45 )
45 )
46
46
47 from .utils import (
47 from .utils import (
48 hashutil,
48 hashutil,
49 procutil,
49 procutil,
50 stringutil,
50 stringutil,
51 )
51 )
52
52
53 if pycompat.iswindows:
53 if pycompat.iswindows:
54 from . import scmwindows as scmplatform
54 from . import scmwindows as scmplatform
55 else:
55 else:
56 from . import scmposix as scmplatform
56 from . import scmposix as scmplatform
57
57
58 parsers = policy.importmod('parsers')
58 parsers = policy.importmod('parsers')
59 rustrevlog = policy.importrust('revlog')
59 rustrevlog = policy.importrust('revlog')
60
60
61 termsize = scmplatform.termsize
61 termsize = scmplatform.termsize
62
62
63
63
64 @attr.s(slots=True, repr=False)
64 @attr.s(slots=True, repr=False)
65 class status(object):
65 class status(object):
66 """Struct with a list of files per status.
66 """Struct with a list of files per status.
67
67
68 The 'deleted', 'unknown' and 'ignored' properties are only
68 The 'deleted', 'unknown' and 'ignored' properties are only
69 relevant to the working copy.
69 relevant to the working copy.
70 """
70 """
71
71
72 modified = attr.ib(default=attr.Factory(list))
72 modified = attr.ib(default=attr.Factory(list))
73 added = attr.ib(default=attr.Factory(list))
73 added = attr.ib(default=attr.Factory(list))
74 removed = attr.ib(default=attr.Factory(list))
74 removed = attr.ib(default=attr.Factory(list))
75 deleted = attr.ib(default=attr.Factory(list))
75 deleted = attr.ib(default=attr.Factory(list))
76 unknown = attr.ib(default=attr.Factory(list))
76 unknown = attr.ib(default=attr.Factory(list))
77 ignored = attr.ib(default=attr.Factory(list))
77 ignored = attr.ib(default=attr.Factory(list))
78 clean = attr.ib(default=attr.Factory(list))
78 clean = attr.ib(default=attr.Factory(list))
79
79
80 def __iter__(self):
80 def __iter__(self):
81 yield self.modified
81 yield self.modified
82 yield self.added
82 yield self.added
83 yield self.removed
83 yield self.removed
84 yield self.deleted
84 yield self.deleted
85 yield self.unknown
85 yield self.unknown
86 yield self.ignored
86 yield self.ignored
87 yield self.clean
87 yield self.clean
88
88
89 def __repr__(self):
89 def __repr__(self):
90 return (
90 return (
91 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
91 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
92 r'unknown=%s, ignored=%s, clean=%s>'
92 r'unknown=%s, ignored=%s, clean=%s>'
93 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
93 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
94
94
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.items()):
111 for subpath, ctx in sorted(subpaths.items()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121
121
122 def nochangesfound(ui, repo, excluded=None):
122 def nochangesfound(ui, repo, excluded=None):
123 """Report no changes for push/pull, excluded is None or a list of
123 """Report no changes for push/pull, excluded is None or a list of
124 nodes excluded from the push/pull.
124 nodes excluded from the push/pull.
125 """
125 """
126 secretlist = []
126 secretlist = []
127 if excluded:
127 if excluded:
128 for n in excluded:
128 for n in excluded:
129 ctx = repo[n]
129 ctx = repo[n]
130 if ctx.phase() >= phases.secret and not ctx.extinct():
130 if ctx.phase() >= phases.secret and not ctx.extinct():
131 secretlist.append(n)
131 secretlist.append(n)
132
132
133 if secretlist:
133 if secretlist:
134 ui.status(
134 ui.status(
135 _(b"no changes found (ignored %d secret changesets)\n")
135 _(b"no changes found (ignored %d secret changesets)\n")
136 % len(secretlist)
136 % len(secretlist)
137 )
137 )
138 else:
138 else:
139 ui.status(_(b"no changes found\n"))
139 ui.status(_(b"no changes found\n"))
140
140
141
141
142 def callcatch(ui, func):
142 def callcatch(ui, func):
143 """call func() with global exception handling
143 """call func() with global exception handling
144
144
145 return func() if no exception happens. otherwise do some error handling
145 return func() if no exception happens. otherwise do some error handling
146 and return an exit code accordingly. does not handle all exceptions.
146 and return an exit code accordingly. does not handle all exceptions.
147 """
147 """
148 coarse_exit_code = -1
148 coarse_exit_code = -1
149 detailed_exit_code = -1
149 detailed_exit_code = -1
150 try:
150 try:
151 try:
151 try:
152 return func()
152 return func()
153 except: # re-raises
153 except: # re-raises
154 ui.traceback()
154 ui.traceback()
155 raise
155 raise
156 # Global exception handling, alphabetically
156 # Global exception handling, alphabetically
157 # Mercurial-specific first, followed by built-in and library exceptions
157 # Mercurial-specific first, followed by built-in and library exceptions
158 except error.LockHeld as inst:
158 except error.LockHeld as inst:
159 detailed_exit_code = 20
159 detailed_exit_code = 20
160 if inst.errno == errno.ETIMEDOUT:
160 if inst.errno == errno.ETIMEDOUT:
161 reason = _(b'timed out waiting for lock held by %r') % (
161 reason = _(b'timed out waiting for lock held by %r') % (
162 pycompat.bytestr(inst.locker)
162 pycompat.bytestr(inst.locker)
163 )
163 )
164 else:
164 else:
165 reason = _(b'lock held by %r') % inst.locker
165 reason = _(b'lock held by %r') % inst.locker
166 ui.error(
166 ui.error(
167 _(b"abort: %s: %s\n")
167 _(b"abort: %s: %s\n")
168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 )
169 )
170 if not inst.locker:
170 if not inst.locker:
171 ui.error(_(b"(lock might be very busy)\n"))
171 ui.error(_(b"(lock might be very busy)\n"))
172 except error.LockUnavailable as inst:
172 except error.LockUnavailable as inst:
173 detailed_exit_code = 20
173 detailed_exit_code = 20
174 ui.error(
174 ui.error(
175 _(b"abort: could not lock %s: %s\n")
175 _(b"abort: could not lock %s: %s\n")
176 % (
176 % (
177 inst.desc or stringutil.forcebytestr(inst.filename),
177 inst.desc or stringutil.forcebytestr(inst.filename),
178 encoding.strtolocal(inst.strerror),
178 encoding.strtolocal(inst.strerror),
179 )
179 )
180 )
180 )
181 except error.RepoError as inst:
181 except error.RepoError as inst:
182 if isinstance(inst, error.RepoLookupError):
182 if isinstance(inst, error.RepoLookupError):
183 detailed_exit_code = 10
183 detailed_exit_code = 10
184 ui.error(_(b"abort: %s\n") % inst)
184 ui.error(_(b"abort: %s\n") % inst)
185 if inst.hint:
185 if inst.hint:
186 ui.error(_(b"(%s)\n") % inst.hint)
186 ui.error(_(b"(%s)\n") % inst.hint)
187 except error.ResponseError as inst:
187 except error.ResponseError as inst:
188 ui.error(_(b"abort: %s") % inst.args[0])
188 ui.error(_(b"abort: %s") % inst.args[0])
189 msg = inst.args[1]
189 msg = inst.args[1]
190 if isinstance(msg, type(u'')):
190 if isinstance(msg, type(u'')):
191 msg = pycompat.sysbytes(msg)
191 msg = pycompat.sysbytes(msg)
192 if msg is None:
192 if msg is None:
193 ui.error(b"\n")
193 ui.error(b"\n")
194 elif not isinstance(msg, bytes):
194 elif not isinstance(msg, bytes):
195 ui.error(b" %r\n" % (msg,))
195 ui.error(b" %r\n" % (msg,))
196 elif not msg:
196 elif not msg:
197 ui.error(_(b" empty string\n"))
197 ui.error(_(b" empty string\n"))
198 else:
198 else:
199 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
199 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
200 except error.CensoredNodeError as inst:
200 except error.CensoredNodeError as inst:
201 ui.error(_(b"abort: file censored %s\n") % inst)
201 ui.error(_(b"abort: file censored %s\n") % inst)
202 except error.WdirUnsupported:
202 except error.WdirUnsupported:
203 ui.error(_(b"abort: working directory revision cannot be specified\n"))
203 ui.error(_(b"abort: working directory revision cannot be specified\n"))
204 except error.Error as inst:
204 except error.Error as inst:
205 if inst.detailed_exit_code is not None:
205 if inst.detailed_exit_code is not None:
206 detailed_exit_code = inst.detailed_exit_code
206 detailed_exit_code = inst.detailed_exit_code
207 if inst.coarse_exit_code is not None:
207 if inst.coarse_exit_code is not None:
208 coarse_exit_code = inst.coarse_exit_code
208 coarse_exit_code = inst.coarse_exit_code
209 ui.error(inst.format())
209 ui.error(inst.format())
210 except error.WorkerError as inst:
210 except error.WorkerError as inst:
211 # Don't print a message -- the worker already should have
211 # Don't print a message -- the worker already should have
212 return inst.status_code
212 return inst.status_code
213 except ImportError as inst:
213 except ImportError as inst:
214 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
214 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
215 m = stringutil.forcebytestr(inst).split()[-1]
215 m = stringutil.forcebytestr(inst).split()[-1]
216 if m in b"mpatch bdiff".split():
216 if m in b"mpatch bdiff".split():
217 ui.error(_(b"(did you forget to compile extensions?)\n"))
217 ui.error(_(b"(did you forget to compile extensions?)\n"))
218 elif m in b"zlib".split():
218 elif m in b"zlib".split():
219 ui.error(_(b"(is your Python install correct?)\n"))
219 ui.error(_(b"(is your Python install correct?)\n"))
220 except util.urlerr.httperror as inst:
220 except util.urlerr.httperror as inst:
221 detailed_exit_code = 100
221 detailed_exit_code = 100
222 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
222 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
223 except util.urlerr.urlerror as inst:
223 except util.urlerr.urlerror as inst:
224 detailed_exit_code = 100
224 detailed_exit_code = 100
225 try: # usually it is in the form (errno, strerror)
225 try: # usually it is in the form (errno, strerror)
226 reason = inst.reason.args[1]
226 reason = inst.reason.args[1]
227 except (AttributeError, IndexError):
227 except (AttributeError, IndexError):
228 # it might be anything, for example a string
228 # it might be anything, for example a string
229 reason = inst.reason
229 reason = inst.reason
230 if isinstance(reason, pycompat.unicode):
230 if isinstance(reason, str):
231 # SSLError of Python 2.7.9 contains a unicode
231 # SSLError of Python 2.7.9 contains a unicode
232 reason = encoding.unitolocal(reason)
232 reason = encoding.unitolocal(reason)
233 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
233 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
234 except (IOError, OSError) as inst:
234 except (IOError, OSError) as inst:
235 if (
235 if (
236 util.safehasattr(inst, b"args")
236 util.safehasattr(inst, b"args")
237 and inst.args
237 and inst.args
238 and inst.args[0] == errno.EPIPE
238 and inst.args[0] == errno.EPIPE
239 ):
239 ):
240 pass
240 pass
241 elif getattr(inst, "strerror", None): # common IOError or OSError
241 elif getattr(inst, "strerror", None): # common IOError or OSError
242 if getattr(inst, "filename", None) is not None:
242 if getattr(inst, "filename", None) is not None:
243 ui.error(
243 ui.error(
244 _(b"abort: %s: '%s'\n")
244 _(b"abort: %s: '%s'\n")
245 % (
245 % (
246 encoding.strtolocal(inst.strerror),
246 encoding.strtolocal(inst.strerror),
247 stringutil.forcebytestr(inst.filename),
247 stringutil.forcebytestr(inst.filename),
248 )
248 )
249 )
249 )
250 else:
250 else:
251 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
251 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
252 else: # suspicious IOError
252 else: # suspicious IOError
253 raise
253 raise
254 except MemoryError:
254 except MemoryError:
255 ui.error(_(b"abort: out of memory\n"))
255 ui.error(_(b"abort: out of memory\n"))
256 except SystemExit as inst:
256 except SystemExit as inst:
257 # Commands shouldn't sys.exit directly, but give a return code.
257 # Commands shouldn't sys.exit directly, but give a return code.
258 # Just in case catch this and and pass exit code to caller.
258 # Just in case catch this and and pass exit code to caller.
259 detailed_exit_code = 254
259 detailed_exit_code = 254
260 coarse_exit_code = inst.code
260 coarse_exit_code = inst.code
261
261
262 if ui.configbool(b'ui', b'detailed-exit-code'):
262 if ui.configbool(b'ui', b'detailed-exit-code'):
263 return detailed_exit_code
263 return detailed_exit_code
264 else:
264 else:
265 return coarse_exit_code
265 return coarse_exit_code
266
266
267
267
268 def checknewlabel(repo, lbl, kind):
268 def checknewlabel(repo, lbl, kind):
269 # Do not use the "kind" parameter in ui output.
269 # Do not use the "kind" parameter in ui output.
270 # It makes strings difficult to translate.
270 # It makes strings difficult to translate.
271 if lbl in [b'tip', b'.', b'null']:
271 if lbl in [b'tip', b'.', b'null']:
272 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
272 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
273 for c in (b':', b'\0', b'\n', b'\r'):
273 for c in (b':', b'\0', b'\n', b'\r'):
274 if c in lbl:
274 if c in lbl:
275 raise error.InputError(
275 raise error.InputError(
276 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
276 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
277 )
277 )
278 try:
278 try:
279 int(lbl)
279 int(lbl)
280 raise error.InputError(_(b"cannot use an integer as a name"))
280 raise error.InputError(_(b"cannot use an integer as a name"))
281 except ValueError:
281 except ValueError:
282 pass
282 pass
283 if lbl.strip() != lbl:
283 if lbl.strip() != lbl:
284 raise error.InputError(
284 raise error.InputError(
285 _(b"leading or trailing whitespace in name %r") % lbl
285 _(b"leading or trailing whitespace in name %r") % lbl
286 )
286 )
287
287
288
288
289 def checkfilename(f):
289 def checkfilename(f):
290 '''Check that the filename f is an acceptable filename for a tracked file'''
290 '''Check that the filename f is an acceptable filename for a tracked file'''
291 if b'\r' in f or b'\n' in f:
291 if b'\r' in f or b'\n' in f:
292 raise error.InputError(
292 raise error.InputError(
293 _(b"'\\n' and '\\r' disallowed in filenames: %r")
293 _(b"'\\n' and '\\r' disallowed in filenames: %r")
294 % pycompat.bytestr(f)
294 % pycompat.bytestr(f)
295 )
295 )
296
296
297
297
298 def checkportable(ui, f):
298 def checkportable(ui, f):
299 '''Check if filename f is portable and warn or abort depending on config'''
299 '''Check if filename f is portable and warn or abort depending on config'''
300 checkfilename(f)
300 checkfilename(f)
301 abort, warn = checkportabilityalert(ui)
301 abort, warn = checkportabilityalert(ui)
302 if abort or warn:
302 if abort or warn:
303 msg = util.checkwinfilename(f)
303 msg = util.checkwinfilename(f)
304 if msg:
304 if msg:
305 msg = b"%s: %s" % (msg, procutil.shellquote(f))
305 msg = b"%s: %s" % (msg, procutil.shellquote(f))
306 if abort:
306 if abort:
307 raise error.InputError(msg)
307 raise error.InputError(msg)
308 ui.warn(_(b"warning: %s\n") % msg)
308 ui.warn(_(b"warning: %s\n") % msg)
309
309
310
310
311 def checkportabilityalert(ui):
311 def checkportabilityalert(ui):
312 """check if the user's config requests nothing, a warning, or abort for
312 """check if the user's config requests nothing, a warning, or abort for
313 non-portable filenames"""
313 non-portable filenames"""
314 val = ui.config(b'ui', b'portablefilenames')
314 val = ui.config(b'ui', b'portablefilenames')
315 lval = val.lower()
315 lval = val.lower()
316 bval = stringutil.parsebool(val)
316 bval = stringutil.parsebool(val)
317 abort = pycompat.iswindows or lval == b'abort'
317 abort = pycompat.iswindows or lval == b'abort'
318 warn = bval or lval == b'warn'
318 warn = bval or lval == b'warn'
319 if bval is None and not (warn or abort or lval == b'ignore'):
319 if bval is None and not (warn or abort or lval == b'ignore'):
320 raise error.ConfigError(
320 raise error.ConfigError(
321 _(b"ui.portablefilenames value is invalid ('%s')") % val
321 _(b"ui.portablefilenames value is invalid ('%s')") % val
322 )
322 )
323 return abort, warn
323 return abort, warn
324
324
325
325
326 class casecollisionauditor(object):
326 class casecollisionauditor(object):
327 def __init__(self, ui, abort, dirstate):
327 def __init__(self, ui, abort, dirstate):
328 self._ui = ui
328 self._ui = ui
329 self._abort = abort
329 self._abort = abort
330 allfiles = b'\0'.join(dirstate)
330 allfiles = b'\0'.join(dirstate)
331 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
331 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
332 self._dirstate = dirstate
332 self._dirstate = dirstate
333 # The purpose of _newfiles is so that we don't complain about
333 # The purpose of _newfiles is so that we don't complain about
334 # case collisions if someone were to call this object with the
334 # case collisions if someone were to call this object with the
335 # same filename twice.
335 # same filename twice.
336 self._newfiles = set()
336 self._newfiles = set()
337
337
338 def __call__(self, f):
338 def __call__(self, f):
339 if f in self._newfiles:
339 if f in self._newfiles:
340 return
340 return
341 fl = encoding.lower(f)
341 fl = encoding.lower(f)
342 if fl in self._loweredfiles and f not in self._dirstate:
342 if fl in self._loweredfiles and f not in self._dirstate:
343 msg = _(b'possible case-folding collision for %s') % f
343 msg = _(b'possible case-folding collision for %s') % f
344 if self._abort:
344 if self._abort:
345 raise error.StateError(msg)
345 raise error.StateError(msg)
346 self._ui.warn(_(b"warning: %s\n") % msg)
346 self._ui.warn(_(b"warning: %s\n") % msg)
347 self._loweredfiles.add(fl)
347 self._loweredfiles.add(fl)
348 self._newfiles.add(f)
348 self._newfiles.add(f)
349
349
350
350
351 def filteredhash(repo, maxrev, needobsolete=False):
351 def filteredhash(repo, maxrev, needobsolete=False):
352 """build hash of filtered revisions in the current repoview.
352 """build hash of filtered revisions in the current repoview.
353
353
354 Multiple caches perform up-to-date validation by checking that the
354 Multiple caches perform up-to-date validation by checking that the
355 tiprev and tipnode stored in the cache file match the current repository.
355 tiprev and tipnode stored in the cache file match the current repository.
356 However, this is not sufficient for validating repoviews because the set
356 However, this is not sufficient for validating repoviews because the set
357 of revisions in the view may change without the repository tiprev and
357 of revisions in the view may change without the repository tiprev and
358 tipnode changing.
358 tipnode changing.
359
359
360 This function hashes all the revs filtered from the view (and, optionally,
360 This function hashes all the revs filtered from the view (and, optionally,
361 all obsolete revs) up to maxrev and returns that SHA-1 digest.
361 all obsolete revs) up to maxrev and returns that SHA-1 digest.
362 """
362 """
363 cl = repo.changelog
363 cl = repo.changelog
364 if needobsolete:
364 if needobsolete:
365 obsrevs = obsolete.getrevs(repo, b'obsolete')
365 obsrevs = obsolete.getrevs(repo, b'obsolete')
366 if not cl.filteredrevs and not obsrevs:
366 if not cl.filteredrevs and not obsrevs:
367 return None
367 return None
368 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
368 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
369 else:
369 else:
370 if not cl.filteredrevs:
370 if not cl.filteredrevs:
371 return None
371 return None
372 key = maxrev
372 key = maxrev
373 obsrevs = frozenset()
373 obsrevs = frozenset()
374
374
375 result = cl._filteredrevs_hashcache.get(key)
375 result = cl._filteredrevs_hashcache.get(key)
376 if not result:
376 if not result:
377 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
377 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
378 if revs:
378 if revs:
379 s = hashutil.sha1()
379 s = hashutil.sha1()
380 for rev in revs:
380 for rev in revs:
381 s.update(b'%d;' % rev)
381 s.update(b'%d;' % rev)
382 result = s.digest()
382 result = s.digest()
383 cl._filteredrevs_hashcache[key] = result
383 cl._filteredrevs_hashcache[key] = result
384 return result
384 return result
385
385
386
386
387 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
387 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
388 """yield every hg repository under path, always recursively.
388 """yield every hg repository under path, always recursively.
389 The recurse flag will only control recursion into repo working dirs"""
389 The recurse flag will only control recursion into repo working dirs"""
390
390
391 def errhandler(err):
391 def errhandler(err):
392 if err.filename == path:
392 if err.filename == path:
393 raise err
393 raise err
394
394
395 samestat = getattr(os.path, 'samestat', None)
395 samestat = getattr(os.path, 'samestat', None)
396 if followsym and samestat is not None:
396 if followsym and samestat is not None:
397
397
398 def adddir(dirlst, dirname):
398 def adddir(dirlst, dirname):
399 dirstat = os.stat(dirname)
399 dirstat = os.stat(dirname)
400 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
400 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
401 if not match:
401 if not match:
402 dirlst.append(dirstat)
402 dirlst.append(dirstat)
403 return not match
403 return not match
404
404
405 else:
405 else:
406 followsym = False
406 followsym = False
407
407
408 if (seen_dirs is None) and followsym:
408 if (seen_dirs is None) and followsym:
409 seen_dirs = []
409 seen_dirs = []
410 adddir(seen_dirs, path)
410 adddir(seen_dirs, path)
411 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
411 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
412 dirs.sort()
412 dirs.sort()
413 if b'.hg' in dirs:
413 if b'.hg' in dirs:
414 yield root # found a repository
414 yield root # found a repository
415 qroot = os.path.join(root, b'.hg', b'patches')
415 qroot = os.path.join(root, b'.hg', b'patches')
416 if os.path.isdir(os.path.join(qroot, b'.hg')):
416 if os.path.isdir(os.path.join(qroot, b'.hg')):
417 yield qroot # we have a patch queue repo here
417 yield qroot # we have a patch queue repo here
418 if recurse:
418 if recurse:
419 # avoid recursing inside the .hg directory
419 # avoid recursing inside the .hg directory
420 dirs.remove(b'.hg')
420 dirs.remove(b'.hg')
421 else:
421 else:
422 dirs[:] = [] # don't descend further
422 dirs[:] = [] # don't descend further
423 elif followsym:
423 elif followsym:
424 newdirs = []
424 newdirs = []
425 for d in dirs:
425 for d in dirs:
426 fname = os.path.join(root, d)
426 fname = os.path.join(root, d)
427 if adddir(seen_dirs, fname):
427 if adddir(seen_dirs, fname):
428 if os.path.islink(fname):
428 if os.path.islink(fname):
429 for hgname in walkrepos(fname, True, seen_dirs):
429 for hgname in walkrepos(fname, True, seen_dirs):
430 yield hgname
430 yield hgname
431 else:
431 else:
432 newdirs.append(d)
432 newdirs.append(d)
433 dirs[:] = newdirs
433 dirs[:] = newdirs
434
434
435
435
436 def binnode(ctx):
436 def binnode(ctx):
437 """Return binary node id for a given basectx"""
437 """Return binary node id for a given basectx"""
438 node = ctx.node()
438 node = ctx.node()
439 if node is None:
439 if node is None:
440 return ctx.repo().nodeconstants.wdirid
440 return ctx.repo().nodeconstants.wdirid
441 return node
441 return node
442
442
443
443
444 def intrev(ctx):
444 def intrev(ctx):
445 """Return integer for a given basectx that can be used in comparison or
445 """Return integer for a given basectx that can be used in comparison or
446 arithmetic operation"""
446 arithmetic operation"""
447 rev = ctx.rev()
447 rev = ctx.rev()
448 if rev is None:
448 if rev is None:
449 return wdirrev
449 return wdirrev
450 return rev
450 return rev
451
451
452
452
453 def formatchangeid(ctx):
453 def formatchangeid(ctx):
454 """Format changectx as '{rev}:{node|formatnode}', which is the default
454 """Format changectx as '{rev}:{node|formatnode}', which is the default
455 template provided by logcmdutil.changesettemplater"""
455 template provided by logcmdutil.changesettemplater"""
456 repo = ctx.repo()
456 repo = ctx.repo()
457 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
457 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
458
458
459
459
460 def formatrevnode(ui, rev, node):
460 def formatrevnode(ui, rev, node):
461 """Format given revision and node depending on the current verbosity"""
461 """Format given revision and node depending on the current verbosity"""
462 if ui.debugflag:
462 if ui.debugflag:
463 hexfunc = hex
463 hexfunc = hex
464 else:
464 else:
465 hexfunc = short
465 hexfunc = short
466 return b'%d:%s' % (rev, hexfunc(node))
466 return b'%d:%s' % (rev, hexfunc(node))
467
467
468
468
469 def resolvehexnodeidprefix(repo, prefix):
469 def resolvehexnodeidprefix(repo, prefix):
470 if prefix.startswith(b'x'):
470 if prefix.startswith(b'x'):
471 prefix = prefix[1:]
471 prefix = prefix[1:]
472 try:
472 try:
473 # Uses unfiltered repo because it's faster when prefix is ambiguous/
473 # Uses unfiltered repo because it's faster when prefix is ambiguous/
474 # This matches the shortesthexnodeidprefix() function below.
474 # This matches the shortesthexnodeidprefix() function below.
475 node = repo.unfiltered().changelog._partialmatch(prefix)
475 node = repo.unfiltered().changelog._partialmatch(prefix)
476 except error.AmbiguousPrefixLookupError:
476 except error.AmbiguousPrefixLookupError:
477 revset = repo.ui.config(
477 revset = repo.ui.config(
478 b'experimental', b'revisions.disambiguatewithin'
478 b'experimental', b'revisions.disambiguatewithin'
479 )
479 )
480 if revset:
480 if revset:
481 # Clear config to avoid infinite recursion
481 # Clear config to avoid infinite recursion
482 configoverrides = {
482 configoverrides = {
483 (b'experimental', b'revisions.disambiguatewithin'): None
483 (b'experimental', b'revisions.disambiguatewithin'): None
484 }
484 }
485 with repo.ui.configoverride(configoverrides):
485 with repo.ui.configoverride(configoverrides):
486 revs = repo.anyrevs([revset], user=True)
486 revs = repo.anyrevs([revset], user=True)
487 matches = []
487 matches = []
488 for rev in revs:
488 for rev in revs:
489 node = repo.changelog.node(rev)
489 node = repo.changelog.node(rev)
490 if hex(node).startswith(prefix):
490 if hex(node).startswith(prefix):
491 matches.append(node)
491 matches.append(node)
492 if len(matches) == 1:
492 if len(matches) == 1:
493 return matches[0]
493 return matches[0]
494 raise
494 raise
495 if node is None:
495 if node is None:
496 return
496 return
497 repo.changelog.rev(node) # make sure node isn't filtered
497 repo.changelog.rev(node) # make sure node isn't filtered
498 return node
498 return node
499
499
500
500
501 def mayberevnum(repo, prefix):
501 def mayberevnum(repo, prefix):
502 """Checks if the given prefix may be mistaken for a revision number"""
502 """Checks if the given prefix may be mistaken for a revision number"""
503 try:
503 try:
504 i = int(prefix)
504 i = int(prefix)
505 # if we are a pure int, then starting with zero will not be
505 # if we are a pure int, then starting with zero will not be
506 # confused as a rev; or, obviously, if the int is larger
506 # confused as a rev; or, obviously, if the int is larger
507 # than the value of the tip rev. We still need to disambiguate if
507 # than the value of the tip rev. We still need to disambiguate if
508 # prefix == '0', since that *is* a valid revnum.
508 # prefix == '0', since that *is* a valid revnum.
509 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
509 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
510 return False
510 return False
511 return True
511 return True
512 except ValueError:
512 except ValueError:
513 return False
513 return False
514
514
515
515
516 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
516 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
517 """Find the shortest unambiguous prefix that matches hexnode.
517 """Find the shortest unambiguous prefix that matches hexnode.
518
518
519 If "cache" is not None, it must be a dictionary that can be used for
519 If "cache" is not None, it must be a dictionary that can be used for
520 caching between calls to this method.
520 caching between calls to this method.
521 """
521 """
522 # _partialmatch() of filtered changelog could take O(len(repo)) time,
522 # _partialmatch() of filtered changelog could take O(len(repo)) time,
523 # which would be unacceptably slow. so we look for hash collision in
523 # which would be unacceptably slow. so we look for hash collision in
524 # unfiltered space, which means some hashes may be slightly longer.
524 # unfiltered space, which means some hashes may be slightly longer.
525
525
526 minlength = max(minlength, 1)
526 minlength = max(minlength, 1)
527
527
528 def disambiguate(prefix):
528 def disambiguate(prefix):
529 """Disambiguate against revnums."""
529 """Disambiguate against revnums."""
530 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
530 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
531 if mayberevnum(repo, prefix):
531 if mayberevnum(repo, prefix):
532 return b'x' + prefix
532 return b'x' + prefix
533 else:
533 else:
534 return prefix
534 return prefix
535
535
536 hexnode = hex(node)
536 hexnode = hex(node)
537 for length in range(len(prefix), len(hexnode) + 1):
537 for length in range(len(prefix), len(hexnode) + 1):
538 prefix = hexnode[:length]
538 prefix = hexnode[:length]
539 if not mayberevnum(repo, prefix):
539 if not mayberevnum(repo, prefix):
540 return prefix
540 return prefix
541
541
542 cl = repo.unfiltered().changelog
542 cl = repo.unfiltered().changelog
543 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
543 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
544 if revset:
544 if revset:
545 revs = None
545 revs = None
546 if cache is not None:
546 if cache is not None:
547 revs = cache.get(b'disambiguationrevset')
547 revs = cache.get(b'disambiguationrevset')
548 if revs is None:
548 if revs is None:
549 revs = repo.anyrevs([revset], user=True)
549 revs = repo.anyrevs([revset], user=True)
550 if cache is not None:
550 if cache is not None:
551 cache[b'disambiguationrevset'] = revs
551 cache[b'disambiguationrevset'] = revs
552 if cl.rev(node) in revs:
552 if cl.rev(node) in revs:
553 hexnode = hex(node)
553 hexnode = hex(node)
554 nodetree = None
554 nodetree = None
555 if cache is not None:
555 if cache is not None:
556 nodetree = cache.get(b'disambiguationnodetree')
556 nodetree = cache.get(b'disambiguationnodetree')
557 if not nodetree:
557 if not nodetree:
558 if util.safehasattr(parsers, 'nodetree'):
558 if util.safehasattr(parsers, 'nodetree'):
559 # The CExt is the only implementation to provide a nodetree
559 # The CExt is the only implementation to provide a nodetree
560 # class so far.
560 # class so far.
561 index = cl.index
561 index = cl.index
562 if util.safehasattr(index, 'get_cindex'):
562 if util.safehasattr(index, 'get_cindex'):
563 # the rust wrapped need to give access to its internal index
563 # the rust wrapped need to give access to its internal index
564 index = index.get_cindex()
564 index = index.get_cindex()
565 nodetree = parsers.nodetree(index, len(revs))
565 nodetree = parsers.nodetree(index, len(revs))
566 for r in revs:
566 for r in revs:
567 nodetree.insert(r)
567 nodetree.insert(r)
568 if cache is not None:
568 if cache is not None:
569 cache[b'disambiguationnodetree'] = nodetree
569 cache[b'disambiguationnodetree'] = nodetree
570 if nodetree is not None:
570 if nodetree is not None:
571 length = max(nodetree.shortest(node), minlength)
571 length = max(nodetree.shortest(node), minlength)
572 prefix = hexnode[:length]
572 prefix = hexnode[:length]
573 return disambiguate(prefix)
573 return disambiguate(prefix)
574 for length in range(minlength, len(hexnode) + 1):
574 for length in range(minlength, len(hexnode) + 1):
575 matches = []
575 matches = []
576 prefix = hexnode[:length]
576 prefix = hexnode[:length]
577 for rev in revs:
577 for rev in revs:
578 otherhexnode = repo[rev].hex()
578 otherhexnode = repo[rev].hex()
579 if prefix == otherhexnode[:length]:
579 if prefix == otherhexnode[:length]:
580 matches.append(otherhexnode)
580 matches.append(otherhexnode)
581 if len(matches) == 1:
581 if len(matches) == 1:
582 return disambiguate(prefix)
582 return disambiguate(prefix)
583
583
584 try:
584 try:
585 return disambiguate(cl.shortest(node, minlength))
585 return disambiguate(cl.shortest(node, minlength))
586 except error.LookupError:
586 except error.LookupError:
587 raise error.RepoLookupError()
587 raise error.RepoLookupError()
588
588
589
589
590 def isrevsymbol(repo, symbol):
590 def isrevsymbol(repo, symbol):
591 """Checks if a symbol exists in the repo.
591 """Checks if a symbol exists in the repo.
592
592
593 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
593 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
594 symbol is an ambiguous nodeid prefix.
594 symbol is an ambiguous nodeid prefix.
595 """
595 """
596 try:
596 try:
597 revsymbol(repo, symbol)
597 revsymbol(repo, symbol)
598 return True
598 return True
599 except error.RepoLookupError:
599 except error.RepoLookupError:
600 return False
600 return False
601
601
602
602
603 def revsymbol(repo, symbol):
603 def revsymbol(repo, symbol):
604 """Returns a context given a single revision symbol (as string).
604 """Returns a context given a single revision symbol (as string).
605
605
606 This is similar to revsingle(), but accepts only a single revision symbol,
606 This is similar to revsingle(), but accepts only a single revision symbol,
607 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
607 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
608 not "max(public())".
608 not "max(public())".
609 """
609 """
610 if not isinstance(symbol, bytes):
610 if not isinstance(symbol, bytes):
611 msg = (
611 msg = (
612 b"symbol (%s of type %s) was not a string, did you mean "
612 b"symbol (%s of type %s) was not a string, did you mean "
613 b"repo[symbol]?" % (symbol, type(symbol))
613 b"repo[symbol]?" % (symbol, type(symbol))
614 )
614 )
615 raise error.ProgrammingError(msg)
615 raise error.ProgrammingError(msg)
616 try:
616 try:
617 if symbol in (b'.', b'tip', b'null'):
617 if symbol in (b'.', b'tip', b'null'):
618 return repo[symbol]
618 return repo[symbol]
619
619
620 try:
620 try:
621 r = int(symbol)
621 r = int(symbol)
622 if b'%d' % r != symbol:
622 if b'%d' % r != symbol:
623 raise ValueError
623 raise ValueError
624 l = len(repo.changelog)
624 l = len(repo.changelog)
625 if r < 0:
625 if r < 0:
626 r += l
626 r += l
627 if r < 0 or r >= l and r != wdirrev:
627 if r < 0 or r >= l and r != wdirrev:
628 raise ValueError
628 raise ValueError
629 return repo[r]
629 return repo[r]
630 except error.FilteredIndexError:
630 except error.FilteredIndexError:
631 raise
631 raise
632 except (ValueError, OverflowError, IndexError):
632 except (ValueError, OverflowError, IndexError):
633 pass
633 pass
634
634
635 if len(symbol) == 2 * repo.nodeconstants.nodelen:
635 if len(symbol) == 2 * repo.nodeconstants.nodelen:
636 try:
636 try:
637 node = bin(symbol)
637 node = bin(symbol)
638 rev = repo.changelog.rev(node)
638 rev = repo.changelog.rev(node)
639 return repo[rev]
639 return repo[rev]
640 except error.FilteredLookupError:
640 except error.FilteredLookupError:
641 raise
641 raise
642 except (TypeError, LookupError):
642 except (TypeError, LookupError):
643 pass
643 pass
644
644
645 # look up bookmarks through the name interface
645 # look up bookmarks through the name interface
646 try:
646 try:
647 node = repo.names.singlenode(repo, symbol)
647 node = repo.names.singlenode(repo, symbol)
648 rev = repo.changelog.rev(node)
648 rev = repo.changelog.rev(node)
649 return repo[rev]
649 return repo[rev]
650 except KeyError:
650 except KeyError:
651 pass
651 pass
652
652
653 node = resolvehexnodeidprefix(repo, symbol)
653 node = resolvehexnodeidprefix(repo, symbol)
654 if node is not None:
654 if node is not None:
655 rev = repo.changelog.rev(node)
655 rev = repo.changelog.rev(node)
656 return repo[rev]
656 return repo[rev]
657
657
658 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
658 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
659
659
660 except error.WdirUnsupported:
660 except error.WdirUnsupported:
661 return repo[None]
661 return repo[None]
662 except (
662 except (
663 error.FilteredIndexError,
663 error.FilteredIndexError,
664 error.FilteredLookupError,
664 error.FilteredLookupError,
665 error.FilteredRepoLookupError,
665 error.FilteredRepoLookupError,
666 ):
666 ):
667 raise _filterederror(repo, symbol)
667 raise _filterederror(repo, symbol)
668
668
669
669
670 def _filterederror(repo, changeid):
670 def _filterederror(repo, changeid):
671 """build an exception to be raised about a filtered changeid
671 """build an exception to be raised about a filtered changeid
672
672
673 This is extracted in a function to help extensions (eg: evolve) to
673 This is extracted in a function to help extensions (eg: evolve) to
674 experiment with various message variants."""
674 experiment with various message variants."""
675 if repo.filtername.startswith(b'visible'):
675 if repo.filtername.startswith(b'visible'):
676
676
677 # Check if the changeset is obsolete
677 # Check if the changeset is obsolete
678 unfilteredrepo = repo.unfiltered()
678 unfilteredrepo = repo.unfiltered()
679 ctx = revsymbol(unfilteredrepo, changeid)
679 ctx = revsymbol(unfilteredrepo, changeid)
680
680
681 # If the changeset is obsolete, enrich the message with the reason
681 # If the changeset is obsolete, enrich the message with the reason
682 # that made this changeset not visible
682 # that made this changeset not visible
683 if ctx.obsolete():
683 if ctx.obsolete():
684 msg = obsutil._getfilteredreason(repo, changeid, ctx)
684 msg = obsutil._getfilteredreason(repo, changeid, ctx)
685 else:
685 else:
686 msg = _(b"hidden revision '%s'") % changeid
686 msg = _(b"hidden revision '%s'") % changeid
687
687
688 hint = _(b'use --hidden to access hidden revisions')
688 hint = _(b'use --hidden to access hidden revisions')
689
689
690 return error.FilteredRepoLookupError(msg, hint=hint)
690 return error.FilteredRepoLookupError(msg, hint=hint)
691 msg = _(b"filtered revision '%s' (not in '%s' subset)")
691 msg = _(b"filtered revision '%s' (not in '%s' subset)")
692 msg %= (changeid, repo.filtername)
692 msg %= (changeid, repo.filtername)
693 return error.FilteredRepoLookupError(msg)
693 return error.FilteredRepoLookupError(msg)
694
694
695
695
696 def revsingle(repo, revspec, default=b'.', localalias=None):
696 def revsingle(repo, revspec, default=b'.', localalias=None):
697 if not revspec and revspec != 0:
697 if not revspec and revspec != 0:
698 return repo[default]
698 return repo[default]
699
699
700 l = revrange(repo, [revspec], localalias=localalias)
700 l = revrange(repo, [revspec], localalias=localalias)
701 if not l:
701 if not l:
702 raise error.InputError(_(b'empty revision set'))
702 raise error.InputError(_(b'empty revision set'))
703 return repo[l.last()]
703 return repo[l.last()]
704
704
705
705
706 def _pairspec(revspec):
706 def _pairspec(revspec):
707 tree = revsetlang.parse(revspec)
707 tree = revsetlang.parse(revspec)
708 return tree and tree[0] in (
708 return tree and tree[0] in (
709 b'range',
709 b'range',
710 b'rangepre',
710 b'rangepre',
711 b'rangepost',
711 b'rangepost',
712 b'rangeall',
712 b'rangeall',
713 )
713 )
714
714
715
715
716 def revpair(repo, revs):
716 def revpair(repo, revs):
717 if not revs:
717 if not revs:
718 return repo[b'.'], repo[None]
718 return repo[b'.'], repo[None]
719
719
720 l = revrange(repo, revs)
720 l = revrange(repo, revs)
721
721
722 if not l:
722 if not l:
723 raise error.InputError(_(b'empty revision range'))
723 raise error.InputError(_(b'empty revision range'))
724
724
725 first = l.first()
725 first = l.first()
726 second = l.last()
726 second = l.last()
727
727
728 if (
728 if (
729 first == second
729 first == second
730 and len(revs) >= 2
730 and len(revs) >= 2
731 and not all(revrange(repo, [r]) for r in revs)
731 and not all(revrange(repo, [r]) for r in revs)
732 ):
732 ):
733 raise error.InputError(_(b'empty revision on one side of range'))
733 raise error.InputError(_(b'empty revision on one side of range'))
734
734
735 # if top-level is range expression, the result must always be a pair
735 # if top-level is range expression, the result must always be a pair
736 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
736 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
737 return repo[first], repo[None]
737 return repo[first], repo[None]
738
738
739 return repo[first], repo[second]
739 return repo[first], repo[second]
740
740
741
741
742 def revrange(repo, specs, localalias=None):
742 def revrange(repo, specs, localalias=None):
743 """Execute 1 to many revsets and return the union.
743 """Execute 1 to many revsets and return the union.
744
744
745 This is the preferred mechanism for executing revsets using user-specified
745 This is the preferred mechanism for executing revsets using user-specified
746 config options, such as revset aliases.
746 config options, such as revset aliases.
747
747
748 The revsets specified by ``specs`` will be executed via a chained ``OR``
748 The revsets specified by ``specs`` will be executed via a chained ``OR``
749 expression. If ``specs`` is empty, an empty result is returned.
749 expression. If ``specs`` is empty, an empty result is returned.
750
750
751 ``specs`` can contain integers, in which case they are assumed to be
751 ``specs`` can contain integers, in which case they are assumed to be
752 revision numbers.
752 revision numbers.
753
753
754 It is assumed the revsets are already formatted. If you have arguments
754 It is assumed the revsets are already formatted. If you have arguments
755 that need to be expanded in the revset, call ``revsetlang.formatspec()``
755 that need to be expanded in the revset, call ``revsetlang.formatspec()``
756 and pass the result as an element of ``specs``.
756 and pass the result as an element of ``specs``.
757
757
758 Specifying a single revset is allowed.
758 Specifying a single revset is allowed.
759
759
760 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
760 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
761 integer revisions.
761 integer revisions.
762 """
762 """
763 allspecs = []
763 allspecs = []
764 for spec in specs:
764 for spec in specs:
765 if isinstance(spec, int):
765 if isinstance(spec, int):
766 spec = revsetlang.formatspec(b'%d', spec)
766 spec = revsetlang.formatspec(b'%d', spec)
767 allspecs.append(spec)
767 allspecs.append(spec)
768 return repo.anyrevs(allspecs, user=True, localalias=localalias)
768 return repo.anyrevs(allspecs, user=True, localalias=localalias)
769
769
770
770
771 def increasingwindows(windowsize=8, sizelimit=512):
771 def increasingwindows(windowsize=8, sizelimit=512):
772 while True:
772 while True:
773 yield windowsize
773 yield windowsize
774 if windowsize < sizelimit:
774 if windowsize < sizelimit:
775 windowsize *= 2
775 windowsize *= 2
776
776
777
777
778 def walkchangerevs(repo, revs, makefilematcher, prepare):
778 def walkchangerevs(repo, revs, makefilematcher, prepare):
779 """Iterate over files and the revs in a "windowed" way.
779 """Iterate over files and the revs in a "windowed" way.
780
780
781 Callers most commonly need to iterate backwards over the history
781 Callers most commonly need to iterate backwards over the history
782 in which they are interested. Doing so has awful (quadratic-looking)
782 in which they are interested. Doing so has awful (quadratic-looking)
783 performance, so we use iterators in a "windowed" way.
783 performance, so we use iterators in a "windowed" way.
784
784
785 We walk a window of revisions in the desired order. Within the
785 We walk a window of revisions in the desired order. Within the
786 window, we first walk forwards to gather data, then in the desired
786 window, we first walk forwards to gather data, then in the desired
787 order (usually backwards) to display it.
787 order (usually backwards) to display it.
788
788
789 This function returns an iterator yielding contexts. Before
789 This function returns an iterator yielding contexts. Before
790 yielding each context, the iterator will first call the prepare
790 yielding each context, the iterator will first call the prepare
791 function on each context in the window in forward order."""
791 function on each context in the window in forward order."""
792
792
793 if not revs:
793 if not revs:
794 return []
794 return []
795 change = repo.__getitem__
795 change = repo.__getitem__
796
796
797 def iterate():
797 def iterate():
798 it = iter(revs)
798 it = iter(revs)
799 stopiteration = False
799 stopiteration = False
800 for windowsize in increasingwindows():
800 for windowsize in increasingwindows():
801 nrevs = []
801 nrevs = []
802 for i in pycompat.xrange(windowsize):
802 for i in pycompat.xrange(windowsize):
803 rev = next(it, None)
803 rev = next(it, None)
804 if rev is None:
804 if rev is None:
805 stopiteration = True
805 stopiteration = True
806 break
806 break
807 nrevs.append(rev)
807 nrevs.append(rev)
808 for rev in sorted(nrevs):
808 for rev in sorted(nrevs):
809 ctx = change(rev)
809 ctx = change(rev)
810 prepare(ctx, makefilematcher(ctx))
810 prepare(ctx, makefilematcher(ctx))
811 for rev in nrevs:
811 for rev in nrevs:
812 yield change(rev)
812 yield change(rev)
813
813
814 if stopiteration:
814 if stopiteration:
815 break
815 break
816
816
817 return iterate()
817 return iterate()
818
818
819
819
820 def meaningfulparents(repo, ctx):
820 def meaningfulparents(repo, ctx):
821 """Return list of meaningful (or all if debug) parentrevs for rev.
821 """Return list of meaningful (or all if debug) parentrevs for rev.
822
822
823 For merges (two non-nullrev revisions) both parents are meaningful.
823 For merges (two non-nullrev revisions) both parents are meaningful.
824 Otherwise the first parent revision is considered meaningful if it
824 Otherwise the first parent revision is considered meaningful if it
825 is not the preceding revision.
825 is not the preceding revision.
826 """
826 """
827 parents = ctx.parents()
827 parents = ctx.parents()
828 if len(parents) > 1:
828 if len(parents) > 1:
829 return parents
829 return parents
830 if repo.ui.debugflag:
830 if repo.ui.debugflag:
831 return [parents[0], repo[nullrev]]
831 return [parents[0], repo[nullrev]]
832 if parents[0].rev() >= intrev(ctx) - 1:
832 if parents[0].rev() >= intrev(ctx) - 1:
833 return []
833 return []
834 return parents
834 return parents
835
835
836
836
837 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
837 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
838 """Return a function that produced paths for presenting to the user.
838 """Return a function that produced paths for presenting to the user.
839
839
840 The returned function takes a repo-relative path and produces a path
840 The returned function takes a repo-relative path and produces a path
841 that can be presented in the UI.
841 that can be presented in the UI.
842
842
843 Depending on the value of ui.relative-paths, either a repo-relative or
843 Depending on the value of ui.relative-paths, either a repo-relative or
844 cwd-relative path will be produced.
844 cwd-relative path will be produced.
845
845
846 legacyrelativevalue is the value to use if ui.relative-paths=legacy
846 legacyrelativevalue is the value to use if ui.relative-paths=legacy
847
847
848 If forcerelativevalue is not None, then that value will be used regardless
848 If forcerelativevalue is not None, then that value will be used regardless
849 of what ui.relative-paths is set to.
849 of what ui.relative-paths is set to.
850 """
850 """
851 if forcerelativevalue is not None:
851 if forcerelativevalue is not None:
852 relative = forcerelativevalue
852 relative = forcerelativevalue
853 else:
853 else:
854 config = repo.ui.config(b'ui', b'relative-paths')
854 config = repo.ui.config(b'ui', b'relative-paths')
855 if config == b'legacy':
855 if config == b'legacy':
856 relative = legacyrelativevalue
856 relative = legacyrelativevalue
857 else:
857 else:
858 relative = stringutil.parsebool(config)
858 relative = stringutil.parsebool(config)
859 if relative is None:
859 if relative is None:
860 raise error.ConfigError(
860 raise error.ConfigError(
861 _(b"ui.relative-paths is not a boolean ('%s')") % config
861 _(b"ui.relative-paths is not a boolean ('%s')") % config
862 )
862 )
863
863
864 if relative:
864 if relative:
865 cwd = repo.getcwd()
865 cwd = repo.getcwd()
866 if cwd != b'':
866 if cwd != b'':
867 # this branch would work even if cwd == b'' (ie cwd = repo
867 # this branch would work even if cwd == b'' (ie cwd = repo
868 # root), but its generality makes the returned function slower
868 # root), but its generality makes the returned function slower
869 pathto = repo.pathto
869 pathto = repo.pathto
870 return lambda f: pathto(f, cwd)
870 return lambda f: pathto(f, cwd)
871 if repo.ui.configbool(b'ui', b'slash'):
871 if repo.ui.configbool(b'ui', b'slash'):
872 return lambda f: f
872 return lambda f: f
873 else:
873 else:
874 return util.localpath
874 return util.localpath
875
875
876
876
877 def subdiruipathfn(subpath, uipathfn):
877 def subdiruipathfn(subpath, uipathfn):
878 '''Create a new uipathfn that treats the file as relative to subpath.'''
878 '''Create a new uipathfn that treats the file as relative to subpath.'''
879 return lambda f: uipathfn(posixpath.join(subpath, f))
879 return lambda f: uipathfn(posixpath.join(subpath, f))
880
880
881
881
882 def anypats(pats, opts):
882 def anypats(pats, opts):
883 """Checks if any patterns, including --include and --exclude were given.
883 """Checks if any patterns, including --include and --exclude were given.
884
884
885 Some commands (e.g. addremove) use this condition for deciding whether to
885 Some commands (e.g. addremove) use this condition for deciding whether to
886 print absolute or relative paths.
886 print absolute or relative paths.
887 """
887 """
888 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
888 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
889
889
890
890
891 def expandpats(pats):
891 def expandpats(pats):
892 """Expand bare globs when running on windows.
892 """Expand bare globs when running on windows.
893 On posix we assume it already has already been done by sh."""
893 On posix we assume it already has already been done by sh."""
894 if not util.expandglobs:
894 if not util.expandglobs:
895 return list(pats)
895 return list(pats)
896 ret = []
896 ret = []
897 for kindpat in pats:
897 for kindpat in pats:
898 kind, pat = matchmod._patsplit(kindpat, None)
898 kind, pat = matchmod._patsplit(kindpat, None)
899 if kind is None:
899 if kind is None:
900 try:
900 try:
901 globbed = glob.glob(pat)
901 globbed = glob.glob(pat)
902 except re.error:
902 except re.error:
903 globbed = [pat]
903 globbed = [pat]
904 if globbed:
904 if globbed:
905 ret.extend(globbed)
905 ret.extend(globbed)
906 continue
906 continue
907 ret.append(kindpat)
907 ret.append(kindpat)
908 return ret
908 return ret
909
909
910
910
911 def matchandpats(
911 def matchandpats(
912 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
912 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
913 ):
913 ):
914 """Return a matcher and the patterns that were used.
914 """Return a matcher and the patterns that were used.
915 The matcher will warn about bad matches, unless an alternate badfn callback
915 The matcher will warn about bad matches, unless an alternate badfn callback
916 is provided."""
916 is provided."""
917 if opts is None:
917 if opts is None:
918 opts = {}
918 opts = {}
919 if not globbed and default == b'relpath':
919 if not globbed and default == b'relpath':
920 pats = expandpats(pats or [])
920 pats = expandpats(pats or [])
921
921
922 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
922 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
923
923
924 def bad(f, msg):
924 def bad(f, msg):
925 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
925 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
926
926
927 if badfn is None:
927 if badfn is None:
928 badfn = bad
928 badfn = bad
929
929
930 m = ctx.match(
930 m = ctx.match(
931 pats,
931 pats,
932 opts.get(b'include'),
932 opts.get(b'include'),
933 opts.get(b'exclude'),
933 opts.get(b'exclude'),
934 default,
934 default,
935 listsubrepos=opts.get(b'subrepos'),
935 listsubrepos=opts.get(b'subrepos'),
936 badfn=badfn,
936 badfn=badfn,
937 )
937 )
938
938
939 if m.always():
939 if m.always():
940 pats = []
940 pats = []
941 return m, pats
941 return m, pats
942
942
943
943
944 def match(
944 def match(
945 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
945 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
946 ):
946 ):
947 '''Return a matcher that will warn about bad matches.'''
947 '''Return a matcher that will warn about bad matches.'''
948 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
948 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
949
949
950
950
951 def matchall(repo):
951 def matchall(repo):
952 '''Return a matcher that will efficiently match everything.'''
952 '''Return a matcher that will efficiently match everything.'''
953 return matchmod.always()
953 return matchmod.always()
954
954
955
955
956 def matchfiles(repo, files, badfn=None):
956 def matchfiles(repo, files, badfn=None):
957 '''Return a matcher that will efficiently match exactly these files.'''
957 '''Return a matcher that will efficiently match exactly these files.'''
958 return matchmod.exact(files, badfn=badfn)
958 return matchmod.exact(files, badfn=badfn)
959
959
960
960
961 def parsefollowlinespattern(repo, rev, pat, msg):
961 def parsefollowlinespattern(repo, rev, pat, msg):
962 """Return a file name from `pat` pattern suitable for usage in followlines
962 """Return a file name from `pat` pattern suitable for usage in followlines
963 logic.
963 logic.
964 """
964 """
965 if not matchmod.patkind(pat):
965 if not matchmod.patkind(pat):
966 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
966 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
967 else:
967 else:
968 ctx = repo[rev]
968 ctx = repo[rev]
969 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
969 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
970 files = [f for f in ctx if m(f)]
970 files = [f for f in ctx if m(f)]
971 if len(files) != 1:
971 if len(files) != 1:
972 raise error.ParseError(msg)
972 raise error.ParseError(msg)
973 return files[0]
973 return files[0]
974
974
975
975
976 def getorigvfs(ui, repo):
976 def getorigvfs(ui, repo):
977 """return a vfs suitable to save 'orig' file
977 """return a vfs suitable to save 'orig' file
978
978
979 return None if no special directory is configured"""
979 return None if no special directory is configured"""
980 origbackuppath = ui.config(b'ui', b'origbackuppath')
980 origbackuppath = ui.config(b'ui', b'origbackuppath')
981 if not origbackuppath:
981 if not origbackuppath:
982 return None
982 return None
983 return vfs.vfs(repo.wvfs.join(origbackuppath))
983 return vfs.vfs(repo.wvfs.join(origbackuppath))
984
984
985
985
986 def backuppath(ui, repo, filepath):
986 def backuppath(ui, repo, filepath):
987 """customize where working copy backup files (.orig files) are created
987 """customize where working copy backup files (.orig files) are created
988
988
989 Fetch user defined path from config file: [ui] origbackuppath = <path>
989 Fetch user defined path from config file: [ui] origbackuppath = <path>
990 Fall back to default (filepath with .orig suffix) if not specified
990 Fall back to default (filepath with .orig suffix) if not specified
991
991
992 filepath is repo-relative
992 filepath is repo-relative
993
993
994 Returns an absolute path
994 Returns an absolute path
995 """
995 """
996 origvfs = getorigvfs(ui, repo)
996 origvfs = getorigvfs(ui, repo)
997 if origvfs is None:
997 if origvfs is None:
998 return repo.wjoin(filepath + b".orig")
998 return repo.wjoin(filepath + b".orig")
999
999
1000 origbackupdir = origvfs.dirname(filepath)
1000 origbackupdir = origvfs.dirname(filepath)
1001 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1001 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1002 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1002 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1003
1003
1004 # Remove any files that conflict with the backup file's path
1004 # Remove any files that conflict with the backup file's path
1005 for f in reversed(list(pathutil.finddirs(filepath))):
1005 for f in reversed(list(pathutil.finddirs(filepath))):
1006 if origvfs.isfileorlink(f):
1006 if origvfs.isfileorlink(f):
1007 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1007 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1008 origvfs.unlink(f)
1008 origvfs.unlink(f)
1009 break
1009 break
1010
1010
1011 origvfs.makedirs(origbackupdir)
1011 origvfs.makedirs(origbackupdir)
1012
1012
1013 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1013 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1014 ui.note(
1014 ui.note(
1015 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1015 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1016 )
1016 )
1017 origvfs.rmtree(filepath, forcibly=True)
1017 origvfs.rmtree(filepath, forcibly=True)
1018
1018
1019 return origvfs.join(filepath)
1019 return origvfs.join(filepath)
1020
1020
1021
1021
1022 class _containsnode(object):
1022 class _containsnode(object):
1023 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1023 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1024
1024
1025 def __init__(self, repo, revcontainer):
1025 def __init__(self, repo, revcontainer):
1026 self._torev = repo.changelog.rev
1026 self._torev = repo.changelog.rev
1027 self._revcontains = revcontainer.__contains__
1027 self._revcontains = revcontainer.__contains__
1028
1028
1029 def __contains__(self, node):
1029 def __contains__(self, node):
1030 return self._revcontains(self._torev(node))
1030 return self._revcontains(self._torev(node))
1031
1031
1032
1032
1033 def cleanupnodes(
1033 def cleanupnodes(
1034 repo,
1034 repo,
1035 replacements,
1035 replacements,
1036 operation,
1036 operation,
1037 moves=None,
1037 moves=None,
1038 metadata=None,
1038 metadata=None,
1039 fixphase=False,
1039 fixphase=False,
1040 targetphase=None,
1040 targetphase=None,
1041 backup=True,
1041 backup=True,
1042 ):
1042 ):
1043 """do common cleanups when old nodes are replaced by new nodes
1043 """do common cleanups when old nodes are replaced by new nodes
1044
1044
1045 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1045 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1046 (we might also want to move working directory parent in the future)
1046 (we might also want to move working directory parent in the future)
1047
1047
1048 By default, bookmark moves are calculated automatically from 'replacements',
1048 By default, bookmark moves are calculated automatically from 'replacements',
1049 but 'moves' can be used to override that. Also, 'moves' may include
1049 but 'moves' can be used to override that. Also, 'moves' may include
1050 additional bookmark moves that should not have associated obsmarkers.
1050 additional bookmark moves that should not have associated obsmarkers.
1051
1051
1052 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1052 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1053 have replacements. operation is a string, like "rebase".
1053 have replacements. operation is a string, like "rebase".
1054
1054
1055 metadata is dictionary containing metadata to be stored in obsmarker if
1055 metadata is dictionary containing metadata to be stored in obsmarker if
1056 obsolescence is enabled.
1056 obsolescence is enabled.
1057 """
1057 """
1058 assert fixphase or targetphase is None
1058 assert fixphase or targetphase is None
1059 if not replacements and not moves:
1059 if not replacements and not moves:
1060 return
1060 return
1061
1061
1062 # translate mapping's other forms
1062 # translate mapping's other forms
1063 if not util.safehasattr(replacements, b'items'):
1063 if not util.safehasattr(replacements, b'items'):
1064 replacements = {(n,): () for n in replacements}
1064 replacements = {(n,): () for n in replacements}
1065 else:
1065 else:
1066 # upgrading non tuple "source" to tuple ones for BC
1066 # upgrading non tuple "source" to tuple ones for BC
1067 repls = {}
1067 repls = {}
1068 for key, value in replacements.items():
1068 for key, value in replacements.items():
1069 if not isinstance(key, tuple):
1069 if not isinstance(key, tuple):
1070 key = (key,)
1070 key = (key,)
1071 repls[key] = value
1071 repls[key] = value
1072 replacements = repls
1072 replacements = repls
1073
1073
1074 # Unfiltered repo is needed since nodes in replacements might be hidden.
1074 # Unfiltered repo is needed since nodes in replacements might be hidden.
1075 unfi = repo.unfiltered()
1075 unfi = repo.unfiltered()
1076
1076
1077 # Calculate bookmark movements
1077 # Calculate bookmark movements
1078 if moves is None:
1078 if moves is None:
1079 moves = {}
1079 moves = {}
1080 for oldnodes, newnodes in replacements.items():
1080 for oldnodes, newnodes in replacements.items():
1081 for oldnode in oldnodes:
1081 for oldnode in oldnodes:
1082 if oldnode in moves:
1082 if oldnode in moves:
1083 continue
1083 continue
1084 if len(newnodes) > 1:
1084 if len(newnodes) > 1:
1085 # usually a split, take the one with biggest rev number
1085 # usually a split, take the one with biggest rev number
1086 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1086 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1087 elif len(newnodes) == 0:
1087 elif len(newnodes) == 0:
1088 # move bookmark backwards
1088 # move bookmark backwards
1089 allreplaced = []
1089 allreplaced = []
1090 for rep in replacements:
1090 for rep in replacements:
1091 allreplaced.extend(rep)
1091 allreplaced.extend(rep)
1092 roots = list(
1092 roots = list(
1093 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1093 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1094 )
1094 )
1095 if roots:
1095 if roots:
1096 newnode = roots[0].node()
1096 newnode = roots[0].node()
1097 else:
1097 else:
1098 newnode = repo.nullid
1098 newnode = repo.nullid
1099 else:
1099 else:
1100 newnode = newnodes[0]
1100 newnode = newnodes[0]
1101 moves[oldnode] = newnode
1101 moves[oldnode] = newnode
1102
1102
1103 allnewnodes = [n for ns in replacements.values() for n in ns]
1103 allnewnodes = [n for ns in replacements.values() for n in ns]
1104 toretract = {}
1104 toretract = {}
1105 toadvance = {}
1105 toadvance = {}
1106 if fixphase:
1106 if fixphase:
1107 precursors = {}
1107 precursors = {}
1108 for oldnodes, newnodes in replacements.items():
1108 for oldnodes, newnodes in replacements.items():
1109 for oldnode in oldnodes:
1109 for oldnode in oldnodes:
1110 for newnode in newnodes:
1110 for newnode in newnodes:
1111 precursors.setdefault(newnode, []).append(oldnode)
1111 precursors.setdefault(newnode, []).append(oldnode)
1112
1112
1113 allnewnodes.sort(key=lambda n: unfi[n].rev())
1113 allnewnodes.sort(key=lambda n: unfi[n].rev())
1114 newphases = {}
1114 newphases = {}
1115
1115
1116 def phase(ctx):
1116 def phase(ctx):
1117 return newphases.get(ctx.node(), ctx.phase())
1117 return newphases.get(ctx.node(), ctx.phase())
1118
1118
1119 for newnode in allnewnodes:
1119 for newnode in allnewnodes:
1120 ctx = unfi[newnode]
1120 ctx = unfi[newnode]
1121 parentphase = max(phase(p) for p in ctx.parents())
1121 parentphase = max(phase(p) for p in ctx.parents())
1122 if targetphase is None:
1122 if targetphase is None:
1123 oldphase = max(
1123 oldphase = max(
1124 unfi[oldnode].phase() for oldnode in precursors[newnode]
1124 unfi[oldnode].phase() for oldnode in precursors[newnode]
1125 )
1125 )
1126 newphase = max(oldphase, parentphase)
1126 newphase = max(oldphase, parentphase)
1127 else:
1127 else:
1128 newphase = max(targetphase, parentphase)
1128 newphase = max(targetphase, parentphase)
1129 newphases[newnode] = newphase
1129 newphases[newnode] = newphase
1130 if newphase > ctx.phase():
1130 if newphase > ctx.phase():
1131 toretract.setdefault(newphase, []).append(newnode)
1131 toretract.setdefault(newphase, []).append(newnode)
1132 elif newphase < ctx.phase():
1132 elif newphase < ctx.phase():
1133 toadvance.setdefault(newphase, []).append(newnode)
1133 toadvance.setdefault(newphase, []).append(newnode)
1134
1134
1135 with repo.transaction(b'cleanup') as tr:
1135 with repo.transaction(b'cleanup') as tr:
1136 # Move bookmarks
1136 # Move bookmarks
1137 bmarks = repo._bookmarks
1137 bmarks = repo._bookmarks
1138 bmarkchanges = []
1138 bmarkchanges = []
1139 for oldnode, newnode in moves.items():
1139 for oldnode, newnode in moves.items():
1140 oldbmarks = repo.nodebookmarks(oldnode)
1140 oldbmarks = repo.nodebookmarks(oldnode)
1141 if not oldbmarks:
1141 if not oldbmarks:
1142 continue
1142 continue
1143 from . import bookmarks # avoid import cycle
1143 from . import bookmarks # avoid import cycle
1144
1144
1145 repo.ui.debug(
1145 repo.ui.debug(
1146 b'moving bookmarks %r from %s to %s\n'
1146 b'moving bookmarks %r from %s to %s\n'
1147 % (
1147 % (
1148 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1148 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1149 hex(oldnode),
1149 hex(oldnode),
1150 hex(newnode),
1150 hex(newnode),
1151 )
1151 )
1152 )
1152 )
1153 # Delete divergent bookmarks being parents of related newnodes
1153 # Delete divergent bookmarks being parents of related newnodes
1154 deleterevs = repo.revs(
1154 deleterevs = repo.revs(
1155 b'parents(roots(%ln & (::%n))) - parents(%n)',
1155 b'parents(roots(%ln & (::%n))) - parents(%n)',
1156 allnewnodes,
1156 allnewnodes,
1157 newnode,
1157 newnode,
1158 oldnode,
1158 oldnode,
1159 )
1159 )
1160 deletenodes = _containsnode(repo, deleterevs)
1160 deletenodes = _containsnode(repo, deleterevs)
1161 for name in oldbmarks:
1161 for name in oldbmarks:
1162 bmarkchanges.append((name, newnode))
1162 bmarkchanges.append((name, newnode))
1163 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1163 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1164 bmarkchanges.append((b, None))
1164 bmarkchanges.append((b, None))
1165
1165
1166 if bmarkchanges:
1166 if bmarkchanges:
1167 bmarks.applychanges(repo, tr, bmarkchanges)
1167 bmarks.applychanges(repo, tr, bmarkchanges)
1168
1168
1169 for phase, nodes in toretract.items():
1169 for phase, nodes in toretract.items():
1170 phases.retractboundary(repo, tr, phase, nodes)
1170 phases.retractboundary(repo, tr, phase, nodes)
1171 for phase, nodes in toadvance.items():
1171 for phase, nodes in toadvance.items():
1172 phases.advanceboundary(repo, tr, phase, nodes)
1172 phases.advanceboundary(repo, tr, phase, nodes)
1173
1173
1174 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1174 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1175 # Obsolete or strip nodes
1175 # Obsolete or strip nodes
1176 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1176 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1177 # If a node is already obsoleted, and we want to obsolete it
1177 # If a node is already obsoleted, and we want to obsolete it
1178 # without a successor, skip that obssolete request since it's
1178 # without a successor, skip that obssolete request since it's
1179 # unnecessary. That's the "if s or not isobs(n)" check below.
1179 # unnecessary. That's the "if s or not isobs(n)" check below.
1180 # Also sort the node in topology order, that might be useful for
1180 # Also sort the node in topology order, that might be useful for
1181 # some obsstore logic.
1181 # some obsstore logic.
1182 # NOTE: the sorting might belong to createmarkers.
1182 # NOTE: the sorting might belong to createmarkers.
1183 torev = unfi.changelog.rev
1183 torev = unfi.changelog.rev
1184 sortfunc = lambda ns: torev(ns[0][0])
1184 sortfunc = lambda ns: torev(ns[0][0])
1185 rels = []
1185 rels = []
1186 for ns, s in sorted(replacements.items(), key=sortfunc):
1186 for ns, s in sorted(replacements.items(), key=sortfunc):
1187 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1187 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1188 rels.append(rel)
1188 rels.append(rel)
1189 if rels:
1189 if rels:
1190 obsolete.createmarkers(
1190 obsolete.createmarkers(
1191 repo, rels, operation=operation, metadata=metadata
1191 repo, rels, operation=operation, metadata=metadata
1192 )
1192 )
1193 elif phases.supportinternal(repo) and mayusearchived:
1193 elif phases.supportinternal(repo) and mayusearchived:
1194 # this assume we do not have "unstable" nodes above the cleaned ones
1194 # this assume we do not have "unstable" nodes above the cleaned ones
1195 allreplaced = set()
1195 allreplaced = set()
1196 for ns in replacements.keys():
1196 for ns in replacements.keys():
1197 allreplaced.update(ns)
1197 allreplaced.update(ns)
1198 if backup:
1198 if backup:
1199 from . import repair # avoid import cycle
1199 from . import repair # avoid import cycle
1200
1200
1201 node = min(allreplaced, key=repo.changelog.rev)
1201 node = min(allreplaced, key=repo.changelog.rev)
1202 repair.backupbundle(
1202 repair.backupbundle(
1203 repo, allreplaced, allreplaced, node, operation
1203 repo, allreplaced, allreplaced, node, operation
1204 )
1204 )
1205 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1205 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1206 else:
1206 else:
1207 from . import repair # avoid import cycle
1207 from . import repair # avoid import cycle
1208
1208
1209 tostrip = list(n for ns in replacements for n in ns)
1209 tostrip = list(n for ns in replacements for n in ns)
1210 if tostrip:
1210 if tostrip:
1211 repair.delayedstrip(
1211 repair.delayedstrip(
1212 repo.ui, repo, tostrip, operation, backup=backup
1212 repo.ui, repo, tostrip, operation, backup=backup
1213 )
1213 )
1214
1214
1215
1215
1216 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1216 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1217 if opts is None:
1217 if opts is None:
1218 opts = {}
1218 opts = {}
1219 m = matcher
1219 m = matcher
1220 dry_run = opts.get(b'dry_run')
1220 dry_run = opts.get(b'dry_run')
1221 try:
1221 try:
1222 similarity = float(opts.get(b'similarity') or 0)
1222 similarity = float(opts.get(b'similarity') or 0)
1223 except ValueError:
1223 except ValueError:
1224 raise error.InputError(_(b'similarity must be a number'))
1224 raise error.InputError(_(b'similarity must be a number'))
1225 if similarity < 0 or similarity > 100:
1225 if similarity < 0 or similarity > 100:
1226 raise error.InputError(_(b'similarity must be between 0 and 100'))
1226 raise error.InputError(_(b'similarity must be between 0 and 100'))
1227 similarity /= 100.0
1227 similarity /= 100.0
1228
1228
1229 ret = 0
1229 ret = 0
1230
1230
1231 wctx = repo[None]
1231 wctx = repo[None]
1232 for subpath in sorted(wctx.substate):
1232 for subpath in sorted(wctx.substate):
1233 submatch = matchmod.subdirmatcher(subpath, m)
1233 submatch = matchmod.subdirmatcher(subpath, m)
1234 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1234 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1235 sub = wctx.sub(subpath)
1235 sub = wctx.sub(subpath)
1236 subprefix = repo.wvfs.reljoin(prefix, subpath)
1236 subprefix = repo.wvfs.reljoin(prefix, subpath)
1237 subuipathfn = subdiruipathfn(subpath, uipathfn)
1237 subuipathfn = subdiruipathfn(subpath, uipathfn)
1238 try:
1238 try:
1239 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1239 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1240 ret = 1
1240 ret = 1
1241 except error.LookupError:
1241 except error.LookupError:
1242 repo.ui.status(
1242 repo.ui.status(
1243 _(b"skipping missing subrepository: %s\n")
1243 _(b"skipping missing subrepository: %s\n")
1244 % uipathfn(subpath)
1244 % uipathfn(subpath)
1245 )
1245 )
1246
1246
1247 rejected = []
1247 rejected = []
1248
1248
1249 def badfn(f, msg):
1249 def badfn(f, msg):
1250 if f in m.files():
1250 if f in m.files():
1251 m.bad(f, msg)
1251 m.bad(f, msg)
1252 rejected.append(f)
1252 rejected.append(f)
1253
1253
1254 badmatch = matchmod.badmatch(m, badfn)
1254 badmatch = matchmod.badmatch(m, badfn)
1255 added, unknown, deleted, removed, forgotten = _interestingfiles(
1255 added, unknown, deleted, removed, forgotten = _interestingfiles(
1256 repo, badmatch
1256 repo, badmatch
1257 )
1257 )
1258
1258
1259 unknownset = set(unknown + forgotten)
1259 unknownset = set(unknown + forgotten)
1260 toprint = unknownset.copy()
1260 toprint = unknownset.copy()
1261 toprint.update(deleted)
1261 toprint.update(deleted)
1262 for abs in sorted(toprint):
1262 for abs in sorted(toprint):
1263 if repo.ui.verbose or not m.exact(abs):
1263 if repo.ui.verbose or not m.exact(abs):
1264 if abs in unknownset:
1264 if abs in unknownset:
1265 status = _(b'adding %s\n') % uipathfn(abs)
1265 status = _(b'adding %s\n') % uipathfn(abs)
1266 label = b'ui.addremove.added'
1266 label = b'ui.addremove.added'
1267 else:
1267 else:
1268 status = _(b'removing %s\n') % uipathfn(abs)
1268 status = _(b'removing %s\n') % uipathfn(abs)
1269 label = b'ui.addremove.removed'
1269 label = b'ui.addremove.removed'
1270 repo.ui.status(status, label=label)
1270 repo.ui.status(status, label=label)
1271
1271
1272 renames = _findrenames(
1272 renames = _findrenames(
1273 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1273 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1274 )
1274 )
1275
1275
1276 if not dry_run:
1276 if not dry_run:
1277 _markchanges(repo, unknown + forgotten, deleted, renames)
1277 _markchanges(repo, unknown + forgotten, deleted, renames)
1278
1278
1279 for f in rejected:
1279 for f in rejected:
1280 if f in m.files():
1280 if f in m.files():
1281 return 1
1281 return 1
1282 return ret
1282 return ret
1283
1283
1284
1284
1285 def marktouched(repo, files, similarity=0.0):
1285 def marktouched(repo, files, similarity=0.0):
1286 """Assert that files have somehow been operated upon. files are relative to
1286 """Assert that files have somehow been operated upon. files are relative to
1287 the repo root."""
1287 the repo root."""
1288 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1288 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1289 rejected = []
1289 rejected = []
1290
1290
1291 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1291 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1292
1292
1293 if repo.ui.verbose:
1293 if repo.ui.verbose:
1294 unknownset = set(unknown + forgotten)
1294 unknownset = set(unknown + forgotten)
1295 toprint = unknownset.copy()
1295 toprint = unknownset.copy()
1296 toprint.update(deleted)
1296 toprint.update(deleted)
1297 for abs in sorted(toprint):
1297 for abs in sorted(toprint):
1298 if abs in unknownset:
1298 if abs in unknownset:
1299 status = _(b'adding %s\n') % abs
1299 status = _(b'adding %s\n') % abs
1300 else:
1300 else:
1301 status = _(b'removing %s\n') % abs
1301 status = _(b'removing %s\n') % abs
1302 repo.ui.status(status)
1302 repo.ui.status(status)
1303
1303
1304 # TODO: We should probably have the caller pass in uipathfn and apply it to
1304 # TODO: We should probably have the caller pass in uipathfn and apply it to
1305 # the messages above too. legacyrelativevalue=True is consistent with how
1305 # the messages above too. legacyrelativevalue=True is consistent with how
1306 # it used to work.
1306 # it used to work.
1307 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1307 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1308 renames = _findrenames(
1308 renames = _findrenames(
1309 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1309 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1310 )
1310 )
1311
1311
1312 _markchanges(repo, unknown + forgotten, deleted, renames)
1312 _markchanges(repo, unknown + forgotten, deleted, renames)
1313
1313
1314 for f in rejected:
1314 for f in rejected:
1315 if f in m.files():
1315 if f in m.files():
1316 return 1
1316 return 1
1317 return 0
1317 return 0
1318
1318
1319
1319
1320 def _interestingfiles(repo, matcher):
1320 def _interestingfiles(repo, matcher):
1321 """Walk dirstate with matcher, looking for files that addremove would care
1321 """Walk dirstate with matcher, looking for files that addremove would care
1322 about.
1322 about.
1323
1323
1324 This is different from dirstate.status because it doesn't care about
1324 This is different from dirstate.status because it doesn't care about
1325 whether files are modified or clean."""
1325 whether files are modified or clean."""
1326 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1326 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1327 audit_path = pathutil.pathauditor(repo.root, cached=True)
1327 audit_path = pathutil.pathauditor(repo.root, cached=True)
1328
1328
1329 ctx = repo[None]
1329 ctx = repo[None]
1330 dirstate = repo.dirstate
1330 dirstate = repo.dirstate
1331 matcher = repo.narrowmatch(matcher, includeexact=True)
1331 matcher = repo.narrowmatch(matcher, includeexact=True)
1332 walkresults = dirstate.walk(
1332 walkresults = dirstate.walk(
1333 matcher,
1333 matcher,
1334 subrepos=sorted(ctx.substate),
1334 subrepos=sorted(ctx.substate),
1335 unknown=True,
1335 unknown=True,
1336 ignored=False,
1336 ignored=False,
1337 full=False,
1337 full=False,
1338 )
1338 )
1339 for abs, st in walkresults.items():
1339 for abs, st in walkresults.items():
1340 entry = dirstate.get_entry(abs)
1340 entry = dirstate.get_entry(abs)
1341 if (not entry.any_tracked) and audit_path.check(abs):
1341 if (not entry.any_tracked) and audit_path.check(abs):
1342 unknown.append(abs)
1342 unknown.append(abs)
1343 elif (not entry.removed) and not st:
1343 elif (not entry.removed) and not st:
1344 deleted.append(abs)
1344 deleted.append(abs)
1345 elif entry.removed and st:
1345 elif entry.removed and st:
1346 forgotten.append(abs)
1346 forgotten.append(abs)
1347 # for finding renames
1347 # for finding renames
1348 elif entry.removed and not st:
1348 elif entry.removed and not st:
1349 removed.append(abs)
1349 removed.append(abs)
1350 elif entry.added:
1350 elif entry.added:
1351 added.append(abs)
1351 added.append(abs)
1352
1352
1353 return added, unknown, deleted, removed, forgotten
1353 return added, unknown, deleted, removed, forgotten
1354
1354
1355
1355
1356 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1356 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1357 '''Find renames from removed files to added ones.'''
1357 '''Find renames from removed files to added ones.'''
1358 renames = {}
1358 renames = {}
1359 if similarity > 0:
1359 if similarity > 0:
1360 for old, new, score in similar.findrenames(
1360 for old, new, score in similar.findrenames(
1361 repo, added, removed, similarity
1361 repo, added, removed, similarity
1362 ):
1362 ):
1363 if (
1363 if (
1364 repo.ui.verbose
1364 repo.ui.verbose
1365 or not matcher.exact(old)
1365 or not matcher.exact(old)
1366 or not matcher.exact(new)
1366 or not matcher.exact(new)
1367 ):
1367 ):
1368 repo.ui.status(
1368 repo.ui.status(
1369 _(
1369 _(
1370 b'recording removal of %s as rename to %s '
1370 b'recording removal of %s as rename to %s '
1371 b'(%d%% similar)\n'
1371 b'(%d%% similar)\n'
1372 )
1372 )
1373 % (uipathfn(old), uipathfn(new), score * 100)
1373 % (uipathfn(old), uipathfn(new), score * 100)
1374 )
1374 )
1375 renames[new] = old
1375 renames[new] = old
1376 return renames
1376 return renames
1377
1377
1378
1378
1379 def _markchanges(repo, unknown, deleted, renames):
1379 def _markchanges(repo, unknown, deleted, renames):
1380 """Marks the files in unknown as added, the files in deleted as removed,
1380 """Marks the files in unknown as added, the files in deleted as removed,
1381 and the files in renames as copied."""
1381 and the files in renames as copied."""
1382 wctx = repo[None]
1382 wctx = repo[None]
1383 with repo.wlock():
1383 with repo.wlock():
1384 wctx.forget(deleted)
1384 wctx.forget(deleted)
1385 wctx.add(unknown)
1385 wctx.add(unknown)
1386 for new, old in renames.items():
1386 for new, old in renames.items():
1387 wctx.copy(old, new)
1387 wctx.copy(old, new)
1388
1388
1389
1389
1390 def getrenamedfn(repo, endrev=None):
1390 def getrenamedfn(repo, endrev=None):
1391 if copiesmod.usechangesetcentricalgo(repo):
1391 if copiesmod.usechangesetcentricalgo(repo):
1392
1392
1393 def getrenamed(fn, rev):
1393 def getrenamed(fn, rev):
1394 ctx = repo[rev]
1394 ctx = repo[rev]
1395 p1copies = ctx.p1copies()
1395 p1copies = ctx.p1copies()
1396 if fn in p1copies:
1396 if fn in p1copies:
1397 return p1copies[fn]
1397 return p1copies[fn]
1398 p2copies = ctx.p2copies()
1398 p2copies = ctx.p2copies()
1399 if fn in p2copies:
1399 if fn in p2copies:
1400 return p2copies[fn]
1400 return p2copies[fn]
1401 return None
1401 return None
1402
1402
1403 return getrenamed
1403 return getrenamed
1404
1404
1405 rcache = {}
1405 rcache = {}
1406 if endrev is None:
1406 if endrev is None:
1407 endrev = len(repo)
1407 endrev = len(repo)
1408
1408
1409 def getrenamed(fn, rev):
1409 def getrenamed(fn, rev):
1410 """looks up all renames for a file (up to endrev) the first
1410 """looks up all renames for a file (up to endrev) the first
1411 time the file is given. It indexes on the changerev and only
1411 time the file is given. It indexes on the changerev and only
1412 parses the manifest if linkrev != changerev.
1412 parses the manifest if linkrev != changerev.
1413 Returns rename info for fn at changerev rev."""
1413 Returns rename info for fn at changerev rev."""
1414 if fn not in rcache:
1414 if fn not in rcache:
1415 rcache[fn] = {}
1415 rcache[fn] = {}
1416 fl = repo.file(fn)
1416 fl = repo.file(fn)
1417 for i in fl:
1417 for i in fl:
1418 lr = fl.linkrev(i)
1418 lr = fl.linkrev(i)
1419 renamed = fl.renamed(fl.node(i))
1419 renamed = fl.renamed(fl.node(i))
1420 rcache[fn][lr] = renamed and renamed[0]
1420 rcache[fn][lr] = renamed and renamed[0]
1421 if lr >= endrev:
1421 if lr >= endrev:
1422 break
1422 break
1423 if rev in rcache[fn]:
1423 if rev in rcache[fn]:
1424 return rcache[fn][rev]
1424 return rcache[fn][rev]
1425
1425
1426 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1426 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1427 # filectx logic.
1427 # filectx logic.
1428 try:
1428 try:
1429 return repo[rev][fn].copysource()
1429 return repo[rev][fn].copysource()
1430 except error.LookupError:
1430 except error.LookupError:
1431 return None
1431 return None
1432
1432
1433 return getrenamed
1433 return getrenamed
1434
1434
1435
1435
1436 def getcopiesfn(repo, endrev=None):
1436 def getcopiesfn(repo, endrev=None):
1437 if copiesmod.usechangesetcentricalgo(repo):
1437 if copiesmod.usechangesetcentricalgo(repo):
1438
1438
1439 def copiesfn(ctx):
1439 def copiesfn(ctx):
1440 if ctx.p2copies():
1440 if ctx.p2copies():
1441 allcopies = ctx.p1copies().copy()
1441 allcopies = ctx.p1copies().copy()
1442 # There should be no overlap
1442 # There should be no overlap
1443 allcopies.update(ctx.p2copies())
1443 allcopies.update(ctx.p2copies())
1444 return sorted(allcopies.items())
1444 return sorted(allcopies.items())
1445 else:
1445 else:
1446 return sorted(ctx.p1copies().items())
1446 return sorted(ctx.p1copies().items())
1447
1447
1448 else:
1448 else:
1449 getrenamed = getrenamedfn(repo, endrev)
1449 getrenamed = getrenamedfn(repo, endrev)
1450
1450
1451 def copiesfn(ctx):
1451 def copiesfn(ctx):
1452 copies = []
1452 copies = []
1453 for fn in ctx.files():
1453 for fn in ctx.files():
1454 rename = getrenamed(fn, ctx.rev())
1454 rename = getrenamed(fn, ctx.rev())
1455 if rename:
1455 if rename:
1456 copies.append((fn, rename))
1456 copies.append((fn, rename))
1457 return copies
1457 return copies
1458
1458
1459 return copiesfn
1459 return copiesfn
1460
1460
1461
1461
1462 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1462 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1463 """Update the dirstate to reflect the intent of copying src to dst. For
1463 """Update the dirstate to reflect the intent of copying src to dst. For
1464 different reasons it might not end with dst being marked as copied from src.
1464 different reasons it might not end with dst being marked as copied from src.
1465 """
1465 """
1466 origsrc = repo.dirstate.copied(src) or src
1466 origsrc = repo.dirstate.copied(src) or src
1467 if dst == origsrc: # copying back a copy?
1467 if dst == origsrc: # copying back a copy?
1468 entry = repo.dirstate.get_entry(dst)
1468 entry = repo.dirstate.get_entry(dst)
1469 if (entry.added or not entry.tracked) and not dryrun:
1469 if (entry.added or not entry.tracked) and not dryrun:
1470 repo.dirstate.set_tracked(dst)
1470 repo.dirstate.set_tracked(dst)
1471 else:
1471 else:
1472 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1472 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1473 if not ui.quiet:
1473 if not ui.quiet:
1474 ui.warn(
1474 ui.warn(
1475 _(
1475 _(
1476 b"%s has not been committed yet, so no copy "
1476 b"%s has not been committed yet, so no copy "
1477 b"data will be stored for %s.\n"
1477 b"data will be stored for %s.\n"
1478 )
1478 )
1479 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1479 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1480 )
1480 )
1481 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1481 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1482 wctx.add([dst])
1482 wctx.add([dst])
1483 elif not dryrun:
1483 elif not dryrun:
1484 wctx.copy(origsrc, dst)
1484 wctx.copy(origsrc, dst)
1485
1485
1486
1486
1487 def movedirstate(repo, newctx, match=None):
1487 def movedirstate(repo, newctx, match=None):
1488 """Move the dirstate to newctx and adjust it as necessary.
1488 """Move the dirstate to newctx and adjust it as necessary.
1489
1489
1490 A matcher can be provided as an optimization. It is probably a bug to pass
1490 A matcher can be provided as an optimization. It is probably a bug to pass
1491 a matcher that doesn't match all the differences between the parent of the
1491 a matcher that doesn't match all the differences between the parent of the
1492 working copy and newctx.
1492 working copy and newctx.
1493 """
1493 """
1494 oldctx = repo[b'.']
1494 oldctx = repo[b'.']
1495 ds = repo.dirstate
1495 ds = repo.dirstate
1496 copies = dict(ds.copies())
1496 copies = dict(ds.copies())
1497 ds.setparents(newctx.node(), repo.nullid)
1497 ds.setparents(newctx.node(), repo.nullid)
1498 s = newctx.status(oldctx, match=match)
1498 s = newctx.status(oldctx, match=match)
1499
1499
1500 for f in s.modified:
1500 for f in s.modified:
1501 ds.update_file_p1(f, p1_tracked=True)
1501 ds.update_file_p1(f, p1_tracked=True)
1502
1502
1503 for f in s.added:
1503 for f in s.added:
1504 ds.update_file_p1(f, p1_tracked=False)
1504 ds.update_file_p1(f, p1_tracked=False)
1505
1505
1506 for f in s.removed:
1506 for f in s.removed:
1507 ds.update_file_p1(f, p1_tracked=True)
1507 ds.update_file_p1(f, p1_tracked=True)
1508
1508
1509 # Merge old parent and old working dir copies
1509 # Merge old parent and old working dir copies
1510 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1510 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1511 oldcopies.update(copies)
1511 oldcopies.update(copies)
1512 copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()}
1512 copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()}
1513 # Adjust the dirstate copies
1513 # Adjust the dirstate copies
1514 for dst, src in copies.items():
1514 for dst, src in copies.items():
1515 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1515 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1516 src = None
1516 src = None
1517 ds.copy(src, dst)
1517 ds.copy(src, dst)
1518 repo._quick_access_changeid_invalidate()
1518 repo._quick_access_changeid_invalidate()
1519
1519
1520
1520
1521 def filterrequirements(requirements):
1521 def filterrequirements(requirements):
1522 """filters the requirements into two sets:
1522 """filters the requirements into two sets:
1523
1523
1524 wcreq: requirements which should be written in .hg/requires
1524 wcreq: requirements which should be written in .hg/requires
1525 storereq: which should be written in .hg/store/requires
1525 storereq: which should be written in .hg/store/requires
1526
1526
1527 Returns (wcreq, storereq)
1527 Returns (wcreq, storereq)
1528 """
1528 """
1529 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1529 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1530 wc, store = set(), set()
1530 wc, store = set(), set()
1531 for r in requirements:
1531 for r in requirements:
1532 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1532 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1533 wc.add(r)
1533 wc.add(r)
1534 else:
1534 else:
1535 store.add(r)
1535 store.add(r)
1536 return wc, store
1536 return wc, store
1537 return requirements, None
1537 return requirements, None
1538
1538
1539
1539
1540 def istreemanifest(repo):
1540 def istreemanifest(repo):
1541 """returns whether the repository is using treemanifest or not"""
1541 """returns whether the repository is using treemanifest or not"""
1542 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1542 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1543
1543
1544
1544
1545 def writereporequirements(repo, requirements=None):
1545 def writereporequirements(repo, requirements=None):
1546 """writes requirements for the repo
1546 """writes requirements for the repo
1547
1547
1548 Requirements are written to .hg/requires and .hg/store/requires based
1548 Requirements are written to .hg/requires and .hg/store/requires based
1549 on whether share-safe mode is enabled and which requirements are wdir
1549 on whether share-safe mode is enabled and which requirements are wdir
1550 requirements and which are store requirements
1550 requirements and which are store requirements
1551 """
1551 """
1552 if requirements:
1552 if requirements:
1553 repo.requirements = requirements
1553 repo.requirements = requirements
1554 wcreq, storereq = filterrequirements(repo.requirements)
1554 wcreq, storereq = filterrequirements(repo.requirements)
1555 if wcreq is not None:
1555 if wcreq is not None:
1556 writerequires(repo.vfs, wcreq)
1556 writerequires(repo.vfs, wcreq)
1557 if storereq is not None:
1557 if storereq is not None:
1558 writerequires(repo.svfs, storereq)
1558 writerequires(repo.svfs, storereq)
1559 elif repo.ui.configbool(b'format', b'usestore'):
1559 elif repo.ui.configbool(b'format', b'usestore'):
1560 # only remove store requires if we are using store
1560 # only remove store requires if we are using store
1561 repo.svfs.tryunlink(b'requires')
1561 repo.svfs.tryunlink(b'requires')
1562
1562
1563
1563
1564 def writerequires(opener, requirements):
1564 def writerequires(opener, requirements):
1565 with opener(b'requires', b'w', atomictemp=True) as fp:
1565 with opener(b'requires', b'w', atomictemp=True) as fp:
1566 for r in sorted(requirements):
1566 for r in sorted(requirements):
1567 fp.write(b"%s\n" % r)
1567 fp.write(b"%s\n" % r)
1568
1568
1569
1569
1570 class filecachesubentry(object):
1570 class filecachesubentry(object):
1571 def __init__(self, path, stat):
1571 def __init__(self, path, stat):
1572 self.path = path
1572 self.path = path
1573 self.cachestat = None
1573 self.cachestat = None
1574 self._cacheable = None
1574 self._cacheable = None
1575
1575
1576 if stat:
1576 if stat:
1577 self.cachestat = filecachesubentry.stat(self.path)
1577 self.cachestat = filecachesubentry.stat(self.path)
1578
1578
1579 if self.cachestat:
1579 if self.cachestat:
1580 self._cacheable = self.cachestat.cacheable()
1580 self._cacheable = self.cachestat.cacheable()
1581 else:
1581 else:
1582 # None means we don't know yet
1582 # None means we don't know yet
1583 self._cacheable = None
1583 self._cacheable = None
1584
1584
1585 def refresh(self):
1585 def refresh(self):
1586 if self.cacheable():
1586 if self.cacheable():
1587 self.cachestat = filecachesubentry.stat(self.path)
1587 self.cachestat = filecachesubentry.stat(self.path)
1588
1588
1589 def cacheable(self):
1589 def cacheable(self):
1590 if self._cacheable is not None:
1590 if self._cacheable is not None:
1591 return self._cacheable
1591 return self._cacheable
1592
1592
1593 # we don't know yet, assume it is for now
1593 # we don't know yet, assume it is for now
1594 return True
1594 return True
1595
1595
1596 def changed(self):
1596 def changed(self):
1597 # no point in going further if we can't cache it
1597 # no point in going further if we can't cache it
1598 if not self.cacheable():
1598 if not self.cacheable():
1599 return True
1599 return True
1600
1600
1601 newstat = filecachesubentry.stat(self.path)
1601 newstat = filecachesubentry.stat(self.path)
1602
1602
1603 # we may not know if it's cacheable yet, check again now
1603 # we may not know if it's cacheable yet, check again now
1604 if newstat and self._cacheable is None:
1604 if newstat and self._cacheable is None:
1605 self._cacheable = newstat.cacheable()
1605 self._cacheable = newstat.cacheable()
1606
1606
1607 # check again
1607 # check again
1608 if not self._cacheable:
1608 if not self._cacheable:
1609 return True
1609 return True
1610
1610
1611 if self.cachestat != newstat:
1611 if self.cachestat != newstat:
1612 self.cachestat = newstat
1612 self.cachestat = newstat
1613 return True
1613 return True
1614 else:
1614 else:
1615 return False
1615 return False
1616
1616
1617 @staticmethod
1617 @staticmethod
1618 def stat(path):
1618 def stat(path):
1619 try:
1619 try:
1620 return util.cachestat(path)
1620 return util.cachestat(path)
1621 except OSError as e:
1621 except OSError as e:
1622 if e.errno != errno.ENOENT:
1622 if e.errno != errno.ENOENT:
1623 raise
1623 raise
1624
1624
1625
1625
1626 class filecacheentry(object):
1626 class filecacheentry(object):
1627 def __init__(self, paths, stat=True):
1627 def __init__(self, paths, stat=True):
1628 self._entries = []
1628 self._entries = []
1629 for path in paths:
1629 for path in paths:
1630 self._entries.append(filecachesubentry(path, stat))
1630 self._entries.append(filecachesubentry(path, stat))
1631
1631
1632 def changed(self):
1632 def changed(self):
1633 '''true if any entry has changed'''
1633 '''true if any entry has changed'''
1634 for entry in self._entries:
1634 for entry in self._entries:
1635 if entry.changed():
1635 if entry.changed():
1636 return True
1636 return True
1637 return False
1637 return False
1638
1638
1639 def refresh(self):
1639 def refresh(self):
1640 for entry in self._entries:
1640 for entry in self._entries:
1641 entry.refresh()
1641 entry.refresh()
1642
1642
1643
1643
1644 class filecache(object):
1644 class filecache(object):
1645 """A property like decorator that tracks files under .hg/ for updates.
1645 """A property like decorator that tracks files under .hg/ for updates.
1646
1646
1647 On first access, the files defined as arguments are stat()ed and the
1647 On first access, the files defined as arguments are stat()ed and the
1648 results cached. The decorated function is called. The results are stashed
1648 results cached. The decorated function is called. The results are stashed
1649 away in a ``_filecache`` dict on the object whose method is decorated.
1649 away in a ``_filecache`` dict on the object whose method is decorated.
1650
1650
1651 On subsequent access, the cached result is used as it is set to the
1651 On subsequent access, the cached result is used as it is set to the
1652 instance dictionary.
1652 instance dictionary.
1653
1653
1654 On external property set/delete operations, the caller must update the
1654 On external property set/delete operations, the caller must update the
1655 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1655 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1656 instead of directly setting <attr>.
1656 instead of directly setting <attr>.
1657
1657
1658 When using the property API, the cached data is always used if available.
1658 When using the property API, the cached data is always used if available.
1659 No stat() is performed to check if the file has changed.
1659 No stat() is performed to check if the file has changed.
1660
1660
1661 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1661 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1662 can populate an entry before the property's getter is called. In this case,
1662 can populate an entry before the property's getter is called. In this case,
1663 entries in ``_filecache`` will be used during property operations,
1663 entries in ``_filecache`` will be used during property operations,
1664 if available. If the underlying file changes, it is up to external callers
1664 if available. If the underlying file changes, it is up to external callers
1665 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1665 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1666 method result as well as possibly calling ``del obj._filecache[attr]`` to
1666 method result as well as possibly calling ``del obj._filecache[attr]`` to
1667 remove the ``filecacheentry``.
1667 remove the ``filecacheentry``.
1668 """
1668 """
1669
1669
1670 def __init__(self, *paths):
1670 def __init__(self, *paths):
1671 self.paths = paths
1671 self.paths = paths
1672
1672
1673 def tracked_paths(self, obj):
1673 def tracked_paths(self, obj):
1674 return [self.join(obj, path) for path in self.paths]
1674 return [self.join(obj, path) for path in self.paths]
1675
1675
1676 def join(self, obj, fname):
1676 def join(self, obj, fname):
1677 """Used to compute the runtime path of a cached file.
1677 """Used to compute the runtime path of a cached file.
1678
1678
1679 Users should subclass filecache and provide their own version of this
1679 Users should subclass filecache and provide their own version of this
1680 function to call the appropriate join function on 'obj' (an instance
1680 function to call the appropriate join function on 'obj' (an instance
1681 of the class that its member function was decorated).
1681 of the class that its member function was decorated).
1682 """
1682 """
1683 raise NotImplementedError
1683 raise NotImplementedError
1684
1684
1685 def __call__(self, func):
1685 def __call__(self, func):
1686 self.func = func
1686 self.func = func
1687 self.sname = func.__name__
1687 self.sname = func.__name__
1688 self.name = pycompat.sysbytes(self.sname)
1688 self.name = pycompat.sysbytes(self.sname)
1689 return self
1689 return self
1690
1690
1691 def __get__(self, obj, type=None):
1691 def __get__(self, obj, type=None):
1692 # if accessed on the class, return the descriptor itself.
1692 # if accessed on the class, return the descriptor itself.
1693 if obj is None:
1693 if obj is None:
1694 return self
1694 return self
1695
1695
1696 assert self.sname not in obj.__dict__
1696 assert self.sname not in obj.__dict__
1697
1697
1698 entry = obj._filecache.get(self.name)
1698 entry = obj._filecache.get(self.name)
1699
1699
1700 if entry:
1700 if entry:
1701 if entry.changed():
1701 if entry.changed():
1702 entry.obj = self.func(obj)
1702 entry.obj = self.func(obj)
1703 else:
1703 else:
1704 paths = self.tracked_paths(obj)
1704 paths = self.tracked_paths(obj)
1705
1705
1706 # We stat -before- creating the object so our cache doesn't lie if
1706 # We stat -before- creating the object so our cache doesn't lie if
1707 # a writer modified between the time we read and stat
1707 # a writer modified between the time we read and stat
1708 entry = filecacheentry(paths, True)
1708 entry = filecacheentry(paths, True)
1709 entry.obj = self.func(obj)
1709 entry.obj = self.func(obj)
1710
1710
1711 obj._filecache[self.name] = entry
1711 obj._filecache[self.name] = entry
1712
1712
1713 obj.__dict__[self.sname] = entry.obj
1713 obj.__dict__[self.sname] = entry.obj
1714 return entry.obj
1714 return entry.obj
1715
1715
1716 # don't implement __set__(), which would make __dict__ lookup as slow as
1716 # don't implement __set__(), which would make __dict__ lookup as slow as
1717 # function call.
1717 # function call.
1718
1718
1719 def set(self, obj, value):
1719 def set(self, obj, value):
1720 if self.name not in obj._filecache:
1720 if self.name not in obj._filecache:
1721 # we add an entry for the missing value because X in __dict__
1721 # we add an entry for the missing value because X in __dict__
1722 # implies X in _filecache
1722 # implies X in _filecache
1723 paths = self.tracked_paths(obj)
1723 paths = self.tracked_paths(obj)
1724 ce = filecacheentry(paths, False)
1724 ce = filecacheentry(paths, False)
1725 obj._filecache[self.name] = ce
1725 obj._filecache[self.name] = ce
1726 else:
1726 else:
1727 ce = obj._filecache[self.name]
1727 ce = obj._filecache[self.name]
1728
1728
1729 ce.obj = value # update cached copy
1729 ce.obj = value # update cached copy
1730 obj.__dict__[self.sname] = value # update copy returned by obj.x
1730 obj.__dict__[self.sname] = value # update copy returned by obj.x
1731
1731
1732
1732
1733 def extdatasource(repo, source):
1733 def extdatasource(repo, source):
1734 """Gather a map of rev -> value dict from the specified source
1734 """Gather a map of rev -> value dict from the specified source
1735
1735
1736 A source spec is treated as a URL, with a special case shell: type
1736 A source spec is treated as a URL, with a special case shell: type
1737 for parsing the output from a shell command.
1737 for parsing the output from a shell command.
1738
1738
1739 The data is parsed as a series of newline-separated records where
1739 The data is parsed as a series of newline-separated records where
1740 each record is a revision specifier optionally followed by a space
1740 each record is a revision specifier optionally followed by a space
1741 and a freeform string value. If the revision is known locally, it
1741 and a freeform string value. If the revision is known locally, it
1742 is converted to a rev, otherwise the record is skipped.
1742 is converted to a rev, otherwise the record is skipped.
1743
1743
1744 Note that both key and value are treated as UTF-8 and converted to
1744 Note that both key and value are treated as UTF-8 and converted to
1745 the local encoding. This allows uniformity between local and
1745 the local encoding. This allows uniformity between local and
1746 remote data sources.
1746 remote data sources.
1747 """
1747 """
1748
1748
1749 spec = repo.ui.config(b"extdata", source)
1749 spec = repo.ui.config(b"extdata", source)
1750 if not spec:
1750 if not spec:
1751 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1751 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1752
1752
1753 data = {}
1753 data = {}
1754 src = proc = None
1754 src = proc = None
1755 try:
1755 try:
1756 if spec.startswith(b"shell:"):
1756 if spec.startswith(b"shell:"):
1757 # external commands should be run relative to the repo root
1757 # external commands should be run relative to the repo root
1758 cmd = spec[6:]
1758 cmd = spec[6:]
1759 proc = subprocess.Popen(
1759 proc = subprocess.Popen(
1760 procutil.tonativestr(cmd),
1760 procutil.tonativestr(cmd),
1761 shell=True,
1761 shell=True,
1762 bufsize=-1,
1762 bufsize=-1,
1763 close_fds=procutil.closefds,
1763 close_fds=procutil.closefds,
1764 stdout=subprocess.PIPE,
1764 stdout=subprocess.PIPE,
1765 cwd=procutil.tonativestr(repo.root),
1765 cwd=procutil.tonativestr(repo.root),
1766 )
1766 )
1767 src = proc.stdout
1767 src = proc.stdout
1768 else:
1768 else:
1769 # treat as a URL or file
1769 # treat as a URL or file
1770 src = url.open(repo.ui, spec)
1770 src = url.open(repo.ui, spec)
1771 for l in src:
1771 for l in src:
1772 if b" " in l:
1772 if b" " in l:
1773 k, v = l.strip().split(b" ", 1)
1773 k, v = l.strip().split(b" ", 1)
1774 else:
1774 else:
1775 k, v = l.strip(), b""
1775 k, v = l.strip(), b""
1776
1776
1777 k = encoding.tolocal(k)
1777 k = encoding.tolocal(k)
1778 try:
1778 try:
1779 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1779 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1780 except (error.LookupError, error.RepoLookupError, error.InputError):
1780 except (error.LookupError, error.RepoLookupError, error.InputError):
1781 pass # we ignore data for nodes that don't exist locally
1781 pass # we ignore data for nodes that don't exist locally
1782 finally:
1782 finally:
1783 if proc:
1783 if proc:
1784 try:
1784 try:
1785 proc.communicate()
1785 proc.communicate()
1786 except ValueError:
1786 except ValueError:
1787 # This happens if we started iterating src and then
1787 # This happens if we started iterating src and then
1788 # get a parse error on a line. It should be safe to ignore.
1788 # get a parse error on a line. It should be safe to ignore.
1789 pass
1789 pass
1790 if src:
1790 if src:
1791 src.close()
1791 src.close()
1792 if proc and proc.returncode != 0:
1792 if proc and proc.returncode != 0:
1793 raise error.Abort(
1793 raise error.Abort(
1794 _(b"extdata command '%s' failed: %s")
1794 _(b"extdata command '%s' failed: %s")
1795 % (cmd, procutil.explainexit(proc.returncode))
1795 % (cmd, procutil.explainexit(proc.returncode))
1796 )
1796 )
1797
1797
1798 return data
1798 return data
1799
1799
1800
1800
1801 class progress(object):
1801 class progress(object):
1802 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1802 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1803 self.ui = ui
1803 self.ui = ui
1804 self.pos = 0
1804 self.pos = 0
1805 self.topic = topic
1805 self.topic = topic
1806 self.unit = unit
1806 self.unit = unit
1807 self.total = total
1807 self.total = total
1808 self.debug = ui.configbool(b'progress', b'debug')
1808 self.debug = ui.configbool(b'progress', b'debug')
1809 self._updatebar = updatebar
1809 self._updatebar = updatebar
1810
1810
1811 def __enter__(self):
1811 def __enter__(self):
1812 return self
1812 return self
1813
1813
1814 def __exit__(self, exc_type, exc_value, exc_tb):
1814 def __exit__(self, exc_type, exc_value, exc_tb):
1815 self.complete()
1815 self.complete()
1816
1816
1817 def update(self, pos, item=b"", total=None):
1817 def update(self, pos, item=b"", total=None):
1818 assert pos is not None
1818 assert pos is not None
1819 if total:
1819 if total:
1820 self.total = total
1820 self.total = total
1821 self.pos = pos
1821 self.pos = pos
1822 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1822 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1823 if self.debug:
1823 if self.debug:
1824 self._printdebug(item)
1824 self._printdebug(item)
1825
1825
1826 def increment(self, step=1, item=b"", total=None):
1826 def increment(self, step=1, item=b"", total=None):
1827 self.update(self.pos + step, item, total)
1827 self.update(self.pos + step, item, total)
1828
1828
1829 def complete(self):
1829 def complete(self):
1830 self.pos = None
1830 self.pos = None
1831 self.unit = b""
1831 self.unit = b""
1832 self.total = None
1832 self.total = None
1833 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1833 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1834
1834
1835 def _printdebug(self, item):
1835 def _printdebug(self, item):
1836 unit = b''
1836 unit = b''
1837 if self.unit:
1837 if self.unit:
1838 unit = b' ' + self.unit
1838 unit = b' ' + self.unit
1839 if item:
1839 if item:
1840 item = b' ' + item
1840 item = b' ' + item
1841
1841
1842 if self.total:
1842 if self.total:
1843 pct = 100.0 * self.pos / self.total
1843 pct = 100.0 * self.pos / self.total
1844 self.ui.debug(
1844 self.ui.debug(
1845 b'%s:%s %d/%d%s (%4.2f%%)\n'
1845 b'%s:%s %d/%d%s (%4.2f%%)\n'
1846 % (self.topic, item, self.pos, self.total, unit, pct)
1846 % (self.topic, item, self.pos, self.total, unit, pct)
1847 )
1847 )
1848 else:
1848 else:
1849 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1849 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1850
1850
1851
1851
1852 def gdinitconfig(ui):
1852 def gdinitconfig(ui):
1853 """helper function to know if a repo should be created as general delta"""
1853 """helper function to know if a repo should be created as general delta"""
1854 # experimental config: format.generaldelta
1854 # experimental config: format.generaldelta
1855 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1855 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1856 b'format', b'usegeneraldelta'
1856 b'format', b'usegeneraldelta'
1857 )
1857 )
1858
1858
1859
1859
1860 def gddeltaconfig(ui):
1860 def gddeltaconfig(ui):
1861 """helper function to know if incoming delta should be optimised"""
1861 """helper function to know if incoming delta should be optimised"""
1862 # experimental config: format.generaldelta
1862 # experimental config: format.generaldelta
1863 return ui.configbool(b'format', b'generaldelta')
1863 return ui.configbool(b'format', b'generaldelta')
1864
1864
1865
1865
1866 class simplekeyvaluefile(object):
1866 class simplekeyvaluefile(object):
1867 """A simple file with key=value lines
1867 """A simple file with key=value lines
1868
1868
1869 Keys must be alphanumerics and start with a letter, values must not
1869 Keys must be alphanumerics and start with a letter, values must not
1870 contain '\n' characters"""
1870 contain '\n' characters"""
1871
1871
1872 firstlinekey = b'__firstline'
1872 firstlinekey = b'__firstline'
1873
1873
1874 def __init__(self, vfs, path, keys=None):
1874 def __init__(self, vfs, path, keys=None):
1875 self.vfs = vfs
1875 self.vfs = vfs
1876 self.path = path
1876 self.path = path
1877
1877
1878 def read(self, firstlinenonkeyval=False):
1878 def read(self, firstlinenonkeyval=False):
1879 """Read the contents of a simple key-value file
1879 """Read the contents of a simple key-value file
1880
1880
1881 'firstlinenonkeyval' indicates whether the first line of file should
1881 'firstlinenonkeyval' indicates whether the first line of file should
1882 be treated as a key-value pair or reuturned fully under the
1882 be treated as a key-value pair or reuturned fully under the
1883 __firstline key."""
1883 __firstline key."""
1884 lines = self.vfs.readlines(self.path)
1884 lines = self.vfs.readlines(self.path)
1885 d = {}
1885 d = {}
1886 if firstlinenonkeyval:
1886 if firstlinenonkeyval:
1887 if not lines:
1887 if not lines:
1888 e = _(b"empty simplekeyvalue file")
1888 e = _(b"empty simplekeyvalue file")
1889 raise error.CorruptedState(e)
1889 raise error.CorruptedState(e)
1890 # we don't want to include '\n' in the __firstline
1890 # we don't want to include '\n' in the __firstline
1891 d[self.firstlinekey] = lines[0][:-1]
1891 d[self.firstlinekey] = lines[0][:-1]
1892 del lines[0]
1892 del lines[0]
1893
1893
1894 try:
1894 try:
1895 # the 'if line.strip()' part prevents us from failing on empty
1895 # the 'if line.strip()' part prevents us from failing on empty
1896 # lines which only contain '\n' therefore are not skipped
1896 # lines which only contain '\n' therefore are not skipped
1897 # by 'if line'
1897 # by 'if line'
1898 updatedict = dict(
1898 updatedict = dict(
1899 line[:-1].split(b'=', 1) for line in lines if line.strip()
1899 line[:-1].split(b'=', 1) for line in lines if line.strip()
1900 )
1900 )
1901 if self.firstlinekey in updatedict:
1901 if self.firstlinekey in updatedict:
1902 e = _(b"%r can't be used as a key")
1902 e = _(b"%r can't be used as a key")
1903 raise error.CorruptedState(e % self.firstlinekey)
1903 raise error.CorruptedState(e % self.firstlinekey)
1904 d.update(updatedict)
1904 d.update(updatedict)
1905 except ValueError as e:
1905 except ValueError as e:
1906 raise error.CorruptedState(stringutil.forcebytestr(e))
1906 raise error.CorruptedState(stringutil.forcebytestr(e))
1907 return d
1907 return d
1908
1908
1909 def write(self, data, firstline=None):
1909 def write(self, data, firstline=None):
1910 """Write key=>value mapping to a file
1910 """Write key=>value mapping to a file
1911 data is a dict. Keys must be alphanumerical and start with a letter.
1911 data is a dict. Keys must be alphanumerical and start with a letter.
1912 Values must not contain newline characters.
1912 Values must not contain newline characters.
1913
1913
1914 If 'firstline' is not None, it is written to file before
1914 If 'firstline' is not None, it is written to file before
1915 everything else, as it is, not in a key=value form"""
1915 everything else, as it is, not in a key=value form"""
1916 lines = []
1916 lines = []
1917 if firstline is not None:
1917 if firstline is not None:
1918 lines.append(b'%s\n' % firstline)
1918 lines.append(b'%s\n' % firstline)
1919
1919
1920 for k, v in data.items():
1920 for k, v in data.items():
1921 if k == self.firstlinekey:
1921 if k == self.firstlinekey:
1922 e = b"key name '%s' is reserved" % self.firstlinekey
1922 e = b"key name '%s' is reserved" % self.firstlinekey
1923 raise error.ProgrammingError(e)
1923 raise error.ProgrammingError(e)
1924 if not k[0:1].isalpha():
1924 if not k[0:1].isalpha():
1925 e = b"keys must start with a letter in a key-value file"
1925 e = b"keys must start with a letter in a key-value file"
1926 raise error.ProgrammingError(e)
1926 raise error.ProgrammingError(e)
1927 if not k.isalnum():
1927 if not k.isalnum():
1928 e = b"invalid key name in a simple key-value file"
1928 e = b"invalid key name in a simple key-value file"
1929 raise error.ProgrammingError(e)
1929 raise error.ProgrammingError(e)
1930 if b'\n' in v:
1930 if b'\n' in v:
1931 e = b"invalid value in a simple key-value file"
1931 e = b"invalid value in a simple key-value file"
1932 raise error.ProgrammingError(e)
1932 raise error.ProgrammingError(e)
1933 lines.append(b"%s=%s\n" % (k, v))
1933 lines.append(b"%s=%s\n" % (k, v))
1934 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1934 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1935 fp.write(b''.join(lines))
1935 fp.write(b''.join(lines))
1936
1936
1937
1937
1938 _reportobsoletedsource = [
1938 _reportobsoletedsource = [
1939 b'debugobsolete',
1939 b'debugobsolete',
1940 b'pull',
1940 b'pull',
1941 b'push',
1941 b'push',
1942 b'serve',
1942 b'serve',
1943 b'unbundle',
1943 b'unbundle',
1944 ]
1944 ]
1945
1945
1946 _reportnewcssource = [
1946 _reportnewcssource = [
1947 b'pull',
1947 b'pull',
1948 b'unbundle',
1948 b'unbundle',
1949 ]
1949 ]
1950
1950
1951
1951
1952 def prefetchfiles(repo, revmatches):
1952 def prefetchfiles(repo, revmatches):
1953 """Invokes the registered file prefetch functions, allowing extensions to
1953 """Invokes the registered file prefetch functions, allowing extensions to
1954 ensure the corresponding files are available locally, before the command
1954 ensure the corresponding files are available locally, before the command
1955 uses them.
1955 uses them.
1956
1956
1957 Args:
1957 Args:
1958 revmatches: a list of (revision, match) tuples to indicate the files to
1958 revmatches: a list of (revision, match) tuples to indicate the files to
1959 fetch at each revision. If any of the match elements is None, it matches
1959 fetch at each revision. If any of the match elements is None, it matches
1960 all files.
1960 all files.
1961 """
1961 """
1962
1962
1963 def _matcher(m):
1963 def _matcher(m):
1964 if m:
1964 if m:
1965 assert isinstance(m, matchmod.basematcher)
1965 assert isinstance(m, matchmod.basematcher)
1966 # The command itself will complain about files that don't exist, so
1966 # The command itself will complain about files that don't exist, so
1967 # don't duplicate the message.
1967 # don't duplicate the message.
1968 return matchmod.badmatch(m, lambda fn, msg: None)
1968 return matchmod.badmatch(m, lambda fn, msg: None)
1969 else:
1969 else:
1970 return matchall(repo)
1970 return matchall(repo)
1971
1971
1972 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1972 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1973
1973
1974 fileprefetchhooks(repo, revbadmatches)
1974 fileprefetchhooks(repo, revbadmatches)
1975
1975
1976
1976
1977 # a list of (repo, revs, match) prefetch functions
1977 # a list of (repo, revs, match) prefetch functions
1978 fileprefetchhooks = util.hooks()
1978 fileprefetchhooks = util.hooks()
1979
1979
1980 # A marker that tells the evolve extension to suppress its own reporting
1980 # A marker that tells the evolve extension to suppress its own reporting
1981 _reportstroubledchangesets = True
1981 _reportstroubledchangesets = True
1982
1982
1983
1983
1984 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1984 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1985 """register a callback to issue a summary after the transaction is closed
1985 """register a callback to issue a summary after the transaction is closed
1986
1986
1987 If as_validator is true, then the callbacks are registered as transaction
1987 If as_validator is true, then the callbacks are registered as transaction
1988 validators instead
1988 validators instead
1989 """
1989 """
1990
1990
1991 def txmatch(sources):
1991 def txmatch(sources):
1992 return any(txnname.startswith(source) for source in sources)
1992 return any(txnname.startswith(source) for source in sources)
1993
1993
1994 categories = []
1994 categories = []
1995
1995
1996 def reportsummary(func):
1996 def reportsummary(func):
1997 """decorator for report callbacks."""
1997 """decorator for report callbacks."""
1998 # The repoview life cycle is shorter than the one of the actual
1998 # The repoview life cycle is shorter than the one of the actual
1999 # underlying repository. So the filtered object can die before the
1999 # underlying repository. So the filtered object can die before the
2000 # weakref is used leading to troubles. We keep a reference to the
2000 # weakref is used leading to troubles. We keep a reference to the
2001 # unfiltered object and restore the filtering when retrieving the
2001 # unfiltered object and restore the filtering when retrieving the
2002 # repository through the weakref.
2002 # repository through the weakref.
2003 filtername = repo.filtername
2003 filtername = repo.filtername
2004 reporef = weakref.ref(repo.unfiltered())
2004 reporef = weakref.ref(repo.unfiltered())
2005
2005
2006 def wrapped(tr):
2006 def wrapped(tr):
2007 repo = reporef()
2007 repo = reporef()
2008 if filtername:
2008 if filtername:
2009 assert repo is not None # help pytype
2009 assert repo is not None # help pytype
2010 repo = repo.filtered(filtername)
2010 repo = repo.filtered(filtername)
2011 func(repo, tr)
2011 func(repo, tr)
2012
2012
2013 newcat = b'%02i-txnreport' % len(categories)
2013 newcat = b'%02i-txnreport' % len(categories)
2014 if as_validator:
2014 if as_validator:
2015 otr.addvalidator(newcat, wrapped)
2015 otr.addvalidator(newcat, wrapped)
2016 else:
2016 else:
2017 otr.addpostclose(newcat, wrapped)
2017 otr.addpostclose(newcat, wrapped)
2018 categories.append(newcat)
2018 categories.append(newcat)
2019 return wrapped
2019 return wrapped
2020
2020
2021 @reportsummary
2021 @reportsummary
2022 def reportchangegroup(repo, tr):
2022 def reportchangegroup(repo, tr):
2023 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2023 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2024 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2024 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2025 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2025 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2026 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2026 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2027 if cgchangesets or cgrevisions or cgfiles:
2027 if cgchangesets or cgrevisions or cgfiles:
2028 htext = b""
2028 htext = b""
2029 if cgheads:
2029 if cgheads:
2030 htext = _(b" (%+d heads)") % cgheads
2030 htext = _(b" (%+d heads)") % cgheads
2031 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2031 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2032 if as_validator:
2032 if as_validator:
2033 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2033 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2034 assert repo is not None # help pytype
2034 assert repo is not None # help pytype
2035 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2035 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2036
2036
2037 if txmatch(_reportobsoletedsource):
2037 if txmatch(_reportobsoletedsource):
2038
2038
2039 @reportsummary
2039 @reportsummary
2040 def reportobsoleted(repo, tr):
2040 def reportobsoleted(repo, tr):
2041 obsoleted = obsutil.getobsoleted(repo, tr)
2041 obsoleted = obsutil.getobsoleted(repo, tr)
2042 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2042 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2043 if newmarkers:
2043 if newmarkers:
2044 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2044 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2045 if obsoleted:
2045 if obsoleted:
2046 msg = _(b'obsoleted %i changesets\n')
2046 msg = _(b'obsoleted %i changesets\n')
2047 if as_validator:
2047 if as_validator:
2048 msg = _(b'obsoleting %i changesets\n')
2048 msg = _(b'obsoleting %i changesets\n')
2049 repo.ui.status(msg % len(obsoleted))
2049 repo.ui.status(msg % len(obsoleted))
2050
2050
2051 if obsolete.isenabled(
2051 if obsolete.isenabled(
2052 repo, obsolete.createmarkersopt
2052 repo, obsolete.createmarkersopt
2053 ) and repo.ui.configbool(
2053 ) and repo.ui.configbool(
2054 b'experimental', b'evolution.report-instabilities'
2054 b'experimental', b'evolution.report-instabilities'
2055 ):
2055 ):
2056 instabilitytypes = [
2056 instabilitytypes = [
2057 (b'orphan', b'orphan'),
2057 (b'orphan', b'orphan'),
2058 (b'phase-divergent', b'phasedivergent'),
2058 (b'phase-divergent', b'phasedivergent'),
2059 (b'content-divergent', b'contentdivergent'),
2059 (b'content-divergent', b'contentdivergent'),
2060 ]
2060 ]
2061
2061
2062 def getinstabilitycounts(repo):
2062 def getinstabilitycounts(repo):
2063 filtered = repo.changelog.filteredrevs
2063 filtered = repo.changelog.filteredrevs
2064 counts = {}
2064 counts = {}
2065 for instability, revset in instabilitytypes:
2065 for instability, revset in instabilitytypes:
2066 counts[instability] = len(
2066 counts[instability] = len(
2067 set(obsolete.getrevs(repo, revset)) - filtered
2067 set(obsolete.getrevs(repo, revset)) - filtered
2068 )
2068 )
2069 return counts
2069 return counts
2070
2070
2071 oldinstabilitycounts = getinstabilitycounts(repo)
2071 oldinstabilitycounts = getinstabilitycounts(repo)
2072
2072
2073 @reportsummary
2073 @reportsummary
2074 def reportnewinstabilities(repo, tr):
2074 def reportnewinstabilities(repo, tr):
2075 newinstabilitycounts = getinstabilitycounts(repo)
2075 newinstabilitycounts = getinstabilitycounts(repo)
2076 for instability, revset in instabilitytypes:
2076 for instability, revset in instabilitytypes:
2077 delta = (
2077 delta = (
2078 newinstabilitycounts[instability]
2078 newinstabilitycounts[instability]
2079 - oldinstabilitycounts[instability]
2079 - oldinstabilitycounts[instability]
2080 )
2080 )
2081 msg = getinstabilitymessage(delta, instability)
2081 msg = getinstabilitymessage(delta, instability)
2082 if msg:
2082 if msg:
2083 repo.ui.warn(msg)
2083 repo.ui.warn(msg)
2084
2084
2085 if txmatch(_reportnewcssource):
2085 if txmatch(_reportnewcssource):
2086
2086
2087 @reportsummary
2087 @reportsummary
2088 def reportnewcs(repo, tr):
2088 def reportnewcs(repo, tr):
2089 """Report the range of new revisions pulled/unbundled."""
2089 """Report the range of new revisions pulled/unbundled."""
2090 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2090 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2091 unfi = repo.unfiltered()
2091 unfi = repo.unfiltered()
2092 if origrepolen >= len(unfi):
2092 if origrepolen >= len(unfi):
2093 return
2093 return
2094
2094
2095 # Compute the bounds of new visible revisions' range.
2095 # Compute the bounds of new visible revisions' range.
2096 revs = smartset.spanset(repo, start=origrepolen)
2096 revs = smartset.spanset(repo, start=origrepolen)
2097 if revs:
2097 if revs:
2098 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2098 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2099
2099
2100 if minrev == maxrev:
2100 if minrev == maxrev:
2101 revrange = minrev
2101 revrange = minrev
2102 else:
2102 else:
2103 revrange = b'%s:%s' % (minrev, maxrev)
2103 revrange = b'%s:%s' % (minrev, maxrev)
2104 draft = len(repo.revs(b'%ld and draft()', revs))
2104 draft = len(repo.revs(b'%ld and draft()', revs))
2105 secret = len(repo.revs(b'%ld and secret()', revs))
2105 secret = len(repo.revs(b'%ld and secret()', revs))
2106 if not (draft or secret):
2106 if not (draft or secret):
2107 msg = _(b'new changesets %s\n') % revrange
2107 msg = _(b'new changesets %s\n') % revrange
2108 elif draft and secret:
2108 elif draft and secret:
2109 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2109 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2110 msg %= (revrange, draft, secret)
2110 msg %= (revrange, draft, secret)
2111 elif draft:
2111 elif draft:
2112 msg = _(b'new changesets %s (%d drafts)\n')
2112 msg = _(b'new changesets %s (%d drafts)\n')
2113 msg %= (revrange, draft)
2113 msg %= (revrange, draft)
2114 elif secret:
2114 elif secret:
2115 msg = _(b'new changesets %s (%d secrets)\n')
2115 msg = _(b'new changesets %s (%d secrets)\n')
2116 msg %= (revrange, secret)
2116 msg %= (revrange, secret)
2117 else:
2117 else:
2118 errormsg = b'entered unreachable condition'
2118 errormsg = b'entered unreachable condition'
2119 raise error.ProgrammingError(errormsg)
2119 raise error.ProgrammingError(errormsg)
2120 repo.ui.status(msg)
2120 repo.ui.status(msg)
2121
2121
2122 # search new changesets directly pulled as obsolete
2122 # search new changesets directly pulled as obsolete
2123 duplicates = tr.changes.get(b'revduplicates', ())
2123 duplicates = tr.changes.get(b'revduplicates', ())
2124 obsadded = unfi.revs(
2124 obsadded = unfi.revs(
2125 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2125 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2126 )
2126 )
2127 cl = repo.changelog
2127 cl = repo.changelog
2128 extinctadded = [r for r in obsadded if r not in cl]
2128 extinctadded = [r for r in obsadded if r not in cl]
2129 if extinctadded:
2129 if extinctadded:
2130 # They are not just obsolete, but obsolete and invisible
2130 # They are not just obsolete, but obsolete and invisible
2131 # we call them "extinct" internally but the terms have not been
2131 # we call them "extinct" internally but the terms have not been
2132 # exposed to users.
2132 # exposed to users.
2133 msg = b'(%d other changesets obsolete on arrival)\n'
2133 msg = b'(%d other changesets obsolete on arrival)\n'
2134 repo.ui.status(msg % len(extinctadded))
2134 repo.ui.status(msg % len(extinctadded))
2135
2135
2136 @reportsummary
2136 @reportsummary
2137 def reportphasechanges(repo, tr):
2137 def reportphasechanges(repo, tr):
2138 """Report statistics of phase changes for changesets pre-existing
2138 """Report statistics of phase changes for changesets pre-existing
2139 pull/unbundle.
2139 pull/unbundle.
2140 """
2140 """
2141 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2141 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2142 published = []
2142 published = []
2143 for revs, (old, new) in tr.changes.get(b'phases', []):
2143 for revs, (old, new) in tr.changes.get(b'phases', []):
2144 if new != phases.public:
2144 if new != phases.public:
2145 continue
2145 continue
2146 published.extend(rev for rev in revs if rev < origrepolen)
2146 published.extend(rev for rev in revs if rev < origrepolen)
2147 if not published:
2147 if not published:
2148 return
2148 return
2149 msg = _(b'%d local changesets published\n')
2149 msg = _(b'%d local changesets published\n')
2150 if as_validator:
2150 if as_validator:
2151 msg = _(b'%d local changesets will be published\n')
2151 msg = _(b'%d local changesets will be published\n')
2152 repo.ui.status(msg % len(published))
2152 repo.ui.status(msg % len(published))
2153
2153
2154
2154
2155 def getinstabilitymessage(delta, instability):
2155 def getinstabilitymessage(delta, instability):
2156 """function to return the message to show warning about new instabilities
2156 """function to return the message to show warning about new instabilities
2157
2157
2158 exists as a separate function so that extension can wrap to show more
2158 exists as a separate function so that extension can wrap to show more
2159 information like how to fix instabilities"""
2159 information like how to fix instabilities"""
2160 if delta > 0:
2160 if delta > 0:
2161 return _(b'%i new %s changesets\n') % (delta, instability)
2161 return _(b'%i new %s changesets\n') % (delta, instability)
2162
2162
2163
2163
2164 def nodesummaries(repo, nodes, maxnumnodes=4):
2164 def nodesummaries(repo, nodes, maxnumnodes=4):
2165 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2165 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2166 return b' '.join(short(h) for h in nodes)
2166 return b' '.join(short(h) for h in nodes)
2167 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2167 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2168 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2168 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2169
2169
2170
2170
2171 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2171 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2172 """check that no named branch has multiple heads"""
2172 """check that no named branch has multiple heads"""
2173 if desc in (b'strip', b'repair'):
2173 if desc in (b'strip', b'repair'):
2174 # skip the logic during strip
2174 # skip the logic during strip
2175 return
2175 return
2176 visible = repo.filtered(filtername)
2176 visible = repo.filtered(filtername)
2177 # possible improvement: we could restrict the check to affected branch
2177 # possible improvement: we could restrict the check to affected branch
2178 bm = visible.branchmap()
2178 bm = visible.branchmap()
2179 for name in bm:
2179 for name in bm:
2180 heads = bm.branchheads(name, closed=accountclosed)
2180 heads = bm.branchheads(name, closed=accountclosed)
2181 if len(heads) > 1:
2181 if len(heads) > 1:
2182 msg = _(b'rejecting multiple heads on branch "%s"')
2182 msg = _(b'rejecting multiple heads on branch "%s"')
2183 msg %= name
2183 msg %= name
2184 hint = _(b'%d heads: %s')
2184 hint = _(b'%d heads: %s')
2185 hint %= (len(heads), nodesummaries(repo, heads))
2185 hint %= (len(heads), nodesummaries(repo, heads))
2186 raise error.Abort(msg, hint=hint)
2186 raise error.Abort(msg, hint=hint)
2187
2187
2188
2188
2189 def wrapconvertsink(sink):
2189 def wrapconvertsink(sink):
2190 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2190 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2191 before it is used, whether or not the convert extension was formally loaded.
2191 before it is used, whether or not the convert extension was formally loaded.
2192 """
2192 """
2193 return sink
2193 return sink
2194
2194
2195
2195
2196 def unhidehashlikerevs(repo, specs, hiddentype):
2196 def unhidehashlikerevs(repo, specs, hiddentype):
2197 """parse the user specs and unhide changesets whose hash or revision number
2197 """parse the user specs and unhide changesets whose hash or revision number
2198 is passed.
2198 is passed.
2199
2199
2200 hiddentype can be: 1) 'warn': warn while unhiding changesets
2200 hiddentype can be: 1) 'warn': warn while unhiding changesets
2201 2) 'nowarn': don't warn while unhiding changesets
2201 2) 'nowarn': don't warn while unhiding changesets
2202
2202
2203 returns a repo object with the required changesets unhidden
2203 returns a repo object with the required changesets unhidden
2204 """
2204 """
2205 if not specs:
2205 if not specs:
2206 return repo
2206 return repo
2207
2207
2208 if not repo.filtername or not repo.ui.configbool(
2208 if not repo.filtername or not repo.ui.configbool(
2209 b'experimental', b'directaccess'
2209 b'experimental', b'directaccess'
2210 ):
2210 ):
2211 return repo
2211 return repo
2212
2212
2213 if repo.filtername not in (b'visible', b'visible-hidden'):
2213 if repo.filtername not in (b'visible', b'visible-hidden'):
2214 return repo
2214 return repo
2215
2215
2216 symbols = set()
2216 symbols = set()
2217 for spec in specs:
2217 for spec in specs:
2218 try:
2218 try:
2219 tree = revsetlang.parse(spec)
2219 tree = revsetlang.parse(spec)
2220 except error.ParseError: # will be reported by scmutil.revrange()
2220 except error.ParseError: # will be reported by scmutil.revrange()
2221 continue
2221 continue
2222
2222
2223 symbols.update(revsetlang.gethashlikesymbols(tree))
2223 symbols.update(revsetlang.gethashlikesymbols(tree))
2224
2224
2225 if not symbols:
2225 if not symbols:
2226 return repo
2226 return repo
2227
2227
2228 revs = _getrevsfromsymbols(repo, symbols)
2228 revs = _getrevsfromsymbols(repo, symbols)
2229
2229
2230 if not revs:
2230 if not revs:
2231 return repo
2231 return repo
2232
2232
2233 if hiddentype == b'warn':
2233 if hiddentype == b'warn':
2234 unfi = repo.unfiltered()
2234 unfi = repo.unfiltered()
2235 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2235 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2236 repo.ui.warn(
2236 repo.ui.warn(
2237 _(
2237 _(
2238 b"warning: accessing hidden changesets for write "
2238 b"warning: accessing hidden changesets for write "
2239 b"operation: %s\n"
2239 b"operation: %s\n"
2240 )
2240 )
2241 % revstr
2241 % revstr
2242 )
2242 )
2243
2243
2244 # we have to use new filtername to separate branch/tags cache until we can
2244 # we have to use new filtername to separate branch/tags cache until we can
2245 # disbale these cache when revisions are dynamically pinned.
2245 # disbale these cache when revisions are dynamically pinned.
2246 return repo.filtered(b'visible-hidden', revs)
2246 return repo.filtered(b'visible-hidden', revs)
2247
2247
2248
2248
2249 def _getrevsfromsymbols(repo, symbols):
2249 def _getrevsfromsymbols(repo, symbols):
2250 """parse the list of symbols and returns a set of revision numbers of hidden
2250 """parse the list of symbols and returns a set of revision numbers of hidden
2251 changesets present in symbols"""
2251 changesets present in symbols"""
2252 revs = set()
2252 revs = set()
2253 unfi = repo.unfiltered()
2253 unfi = repo.unfiltered()
2254 unficl = unfi.changelog
2254 unficl = unfi.changelog
2255 cl = repo.changelog
2255 cl = repo.changelog
2256 tiprev = len(unficl)
2256 tiprev = len(unficl)
2257 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2257 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2258 for s in symbols:
2258 for s in symbols:
2259 try:
2259 try:
2260 n = int(s)
2260 n = int(s)
2261 if n <= tiprev:
2261 if n <= tiprev:
2262 if not allowrevnums:
2262 if not allowrevnums:
2263 continue
2263 continue
2264 else:
2264 else:
2265 if n not in cl:
2265 if n not in cl:
2266 revs.add(n)
2266 revs.add(n)
2267 continue
2267 continue
2268 except ValueError:
2268 except ValueError:
2269 pass
2269 pass
2270
2270
2271 try:
2271 try:
2272 s = resolvehexnodeidprefix(unfi, s)
2272 s = resolvehexnodeidprefix(unfi, s)
2273 except (error.LookupError, error.WdirUnsupported):
2273 except (error.LookupError, error.WdirUnsupported):
2274 s = None
2274 s = None
2275
2275
2276 if s is not None:
2276 if s is not None:
2277 rev = unficl.rev(s)
2277 rev = unficl.rev(s)
2278 if rev not in cl:
2278 if rev not in cl:
2279 revs.add(rev)
2279 revs.add(rev)
2280
2280
2281 return revs
2281 return revs
2282
2282
2283
2283
2284 def bookmarkrevs(repo, mark):
2284 def bookmarkrevs(repo, mark):
2285 """Select revisions reachable by a given bookmark
2285 """Select revisions reachable by a given bookmark
2286
2286
2287 If the bookmarked revision isn't a head, an empty set will be returned.
2287 If the bookmarked revision isn't a head, an empty set will be returned.
2288 """
2288 """
2289 return repo.revs(format_bookmark_revspec(mark))
2289 return repo.revs(format_bookmark_revspec(mark))
2290
2290
2291
2291
2292 def format_bookmark_revspec(mark):
2292 def format_bookmark_revspec(mark):
2293 """Build a revset expression to select revisions reachable by a given
2293 """Build a revset expression to select revisions reachable by a given
2294 bookmark"""
2294 bookmark"""
2295 mark = b'literal:' + mark
2295 mark = b'literal:' + mark
2296 return revsetlang.formatspec(
2296 return revsetlang.formatspec(
2297 b"ancestors(bookmark(%s)) - "
2297 b"ancestors(bookmark(%s)) - "
2298 b"ancestors(head() and not bookmark(%s)) - "
2298 b"ancestors(head() and not bookmark(%s)) - "
2299 b"ancestors(bookmark() and not bookmark(%s))",
2299 b"ancestors(bookmark() and not bookmark(%s))",
2300 mark,
2300 mark,
2301 mark,
2301 mark,
2302 mark,
2302 mark,
2303 )
2303 )
@@ -1,556 +1,554
1 # templatefilters.py - common template expansion filters
1 # templatefilters.py - common template expansion filters
2 #
2 #
3 # Copyright 2005-2008 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2008 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import os
9 import os
10 import re
10 import re
11 import time
11 import time
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import hex
14 from .node import hex
15 from . import (
15 from . import (
16 encoding,
16 encoding,
17 error,
17 error,
18 pycompat,
18 pycompat,
19 registrar,
19 registrar,
20 smartset,
20 smartset,
21 templateutil,
21 templateutil,
22 url,
22 url,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 cborutil,
26 cborutil,
27 dateutil,
27 dateutil,
28 stringutil,
28 stringutil,
29 )
29 )
30
30
31 urlerr = util.urlerr
31 urlerr = util.urlerr
32 urlreq = util.urlreq
32 urlreq = util.urlreq
33
33
34 # filters are callables like:
34 # filters are callables like:
35 # fn(obj)
35 # fn(obj)
36 # with:
36 # with:
37 # obj - object to be filtered (text, date, list and so on)
37 # obj - object to be filtered (text, date, list and so on)
38 filters = {}
38 filters = {}
39
39
40 templatefilter = registrar.templatefilter(filters)
40 templatefilter = registrar.templatefilter(filters)
41
41
42
42
43 @templatefilter(b'addbreaks', intype=bytes)
43 @templatefilter(b'addbreaks', intype=bytes)
44 def addbreaks(text):
44 def addbreaks(text):
45 """Any text. Add an XHTML "<br />" tag before the end of
45 """Any text. Add an XHTML "<br />" tag before the end of
46 every line except the last.
46 every line except the last.
47 """
47 """
48 return text.replace(b'\n', b'<br/>\n')
48 return text.replace(b'\n', b'<br/>\n')
49
49
50
50
51 agescales = [
51 agescales = [
52 (b"year", 3600 * 24 * 365, b'Y'),
52 (b"year", 3600 * 24 * 365, b'Y'),
53 (b"month", 3600 * 24 * 30, b'M'),
53 (b"month", 3600 * 24 * 30, b'M'),
54 (b"week", 3600 * 24 * 7, b'W'),
54 (b"week", 3600 * 24 * 7, b'W'),
55 (b"day", 3600 * 24, b'd'),
55 (b"day", 3600 * 24, b'd'),
56 (b"hour", 3600, b'h'),
56 (b"hour", 3600, b'h'),
57 (b"minute", 60, b'm'),
57 (b"minute", 60, b'm'),
58 (b"second", 1, b's'),
58 (b"second", 1, b's'),
59 ]
59 ]
60
60
61
61
62 @templatefilter(b'age', intype=templateutil.date)
62 @templatefilter(b'age', intype=templateutil.date)
63 def age(date, abbrev=False):
63 def age(date, abbrev=False):
64 """Date. Returns a human-readable date/time difference between the
64 """Date. Returns a human-readable date/time difference between the
65 given date/time and the current date/time.
65 given date/time and the current date/time.
66 """
66 """
67
67
68 def plural(t, c):
68 def plural(t, c):
69 if c == 1:
69 if c == 1:
70 return t
70 return t
71 return t + b"s"
71 return t + b"s"
72
72
73 def fmt(t, c, a):
73 def fmt(t, c, a):
74 if abbrev:
74 if abbrev:
75 return b"%d%s" % (c, a)
75 return b"%d%s" % (c, a)
76 return b"%d %s" % (c, plural(t, c))
76 return b"%d %s" % (c, plural(t, c))
77
77
78 now = time.time()
78 now = time.time()
79 then = date[0]
79 then = date[0]
80 future = False
80 future = False
81 if then > now:
81 if then > now:
82 future = True
82 future = True
83 delta = max(1, int(then - now))
83 delta = max(1, int(then - now))
84 if delta > agescales[0][1] * 30:
84 if delta > agescales[0][1] * 30:
85 return b'in the distant future'
85 return b'in the distant future'
86 else:
86 else:
87 delta = max(1, int(now - then))
87 delta = max(1, int(now - then))
88 if delta > agescales[0][1] * 2:
88 if delta > agescales[0][1] * 2:
89 return dateutil.shortdate(date)
89 return dateutil.shortdate(date)
90
90
91 for t, s, a in agescales:
91 for t, s, a in agescales:
92 n = delta // s
92 n = delta // s
93 if n >= 2 or s == 1:
93 if n >= 2 or s == 1:
94 if future:
94 if future:
95 return b'%s from now' % fmt(t, n, a)
95 return b'%s from now' % fmt(t, n, a)
96 return b'%s ago' % fmt(t, n, a)
96 return b'%s ago' % fmt(t, n, a)
97
97
98
98
99 @templatefilter(b'basename', intype=bytes)
99 @templatefilter(b'basename', intype=bytes)
100 def basename(path):
100 def basename(path):
101 """Any text. Treats the text as a path, and returns the last
101 """Any text. Treats the text as a path, and returns the last
102 component of the path after splitting by the path separator.
102 component of the path after splitting by the path separator.
103 For example, "foo/bar/baz" becomes "baz" and "foo/bar//" becomes "".
103 For example, "foo/bar/baz" becomes "baz" and "foo/bar//" becomes "".
104 """
104 """
105 return os.path.basename(path)
105 return os.path.basename(path)
106
106
107
107
108 def _tocborencodable(obj):
108 def _tocborencodable(obj):
109 if isinstance(obj, smartset.abstractsmartset):
109 if isinstance(obj, smartset.abstractsmartset):
110 return list(obj)
110 return list(obj)
111 return obj
111 return obj
112
112
113
113
114 @templatefilter(b'cbor')
114 @templatefilter(b'cbor')
115 def cbor(obj):
115 def cbor(obj):
116 """Any object. Serializes the object to CBOR bytes."""
116 """Any object. Serializes the object to CBOR bytes."""
117 # cborutil is stricter about type than json() filter
117 # cborutil is stricter about type than json() filter
118 obj = pycompat.rapply(_tocborencodable, obj)
118 obj = pycompat.rapply(_tocborencodable, obj)
119 return b''.join(cborutil.streamencode(obj))
119 return b''.join(cborutil.streamencode(obj))
120
120
121
121
122 @templatefilter(b'commondir')
122 @templatefilter(b'commondir')
123 def commondir(filelist):
123 def commondir(filelist):
124 """List of text. Treats each list item as file name with /
124 """List of text. Treats each list item as file name with /
125 as path separator and returns the longest common directory
125 as path separator and returns the longest common directory
126 prefix shared by all list items.
126 prefix shared by all list items.
127 Returns the empty string if no common prefix exists.
127 Returns the empty string if no common prefix exists.
128
128
129 The list items are not normalized, i.e. "foo/../bar" is handled as
129 The list items are not normalized, i.e. "foo/../bar" is handled as
130 file "bar" in the directory "foo/..". Leading slashes are ignored.
130 file "bar" in the directory "foo/..". Leading slashes are ignored.
131
131
132 For example, ["foo/bar/baz", "foo/baz/bar"] becomes "foo" and
132 For example, ["foo/bar/baz", "foo/baz/bar"] becomes "foo" and
133 ["foo/bar", "baz"] becomes "".
133 ["foo/bar", "baz"] becomes "".
134 """
134 """
135
135
136 def common(a, b):
136 def common(a, b):
137 if len(a) > len(b):
137 if len(a) > len(b):
138 a = b[: len(a)]
138 a = b[: len(a)]
139 elif len(b) > len(a):
139 elif len(b) > len(a):
140 b = b[: len(a)]
140 b = b[: len(a)]
141 if a == b:
141 if a == b:
142 return a
142 return a
143 for i in pycompat.xrange(len(a)):
143 for i in pycompat.xrange(len(a)):
144 if a[i] != b[i]:
144 if a[i] != b[i]:
145 return a[:i]
145 return a[:i]
146 return a
146 return a
147
147
148 try:
148 try:
149 if not filelist:
149 if not filelist:
150 return b""
150 return b""
151 dirlist = [f.lstrip(b'/').split(b'/')[:-1] for f in filelist]
151 dirlist = [f.lstrip(b'/').split(b'/')[:-1] for f in filelist]
152 if len(dirlist) == 1:
152 if len(dirlist) == 1:
153 return b'/'.join(dirlist[0])
153 return b'/'.join(dirlist[0])
154 a = min(dirlist)
154 a = min(dirlist)
155 b = max(dirlist)
155 b = max(dirlist)
156 # The common prefix of a and b is shared with all
156 # The common prefix of a and b is shared with all
157 # elements of the list since Python sorts lexicographical
157 # elements of the list since Python sorts lexicographical
158 # and [1, x] after [1].
158 # and [1, x] after [1].
159 return b'/'.join(common(a, b))
159 return b'/'.join(common(a, b))
160 except TypeError:
160 except TypeError:
161 raise error.ParseError(_(b'argument is not a list of text'))
161 raise error.ParseError(_(b'argument is not a list of text'))
162
162
163
163
164 @templatefilter(b'count')
164 @templatefilter(b'count')
165 def count(i):
165 def count(i):
166 """List or text. Returns the length as an integer."""
166 """List or text. Returns the length as an integer."""
167 try:
167 try:
168 return len(i)
168 return len(i)
169 except TypeError:
169 except TypeError:
170 raise error.ParseError(_(b'not countable'))
170 raise error.ParseError(_(b'not countable'))
171
171
172
172
173 @templatefilter(b'dirname', intype=bytes)
173 @templatefilter(b'dirname', intype=bytes)
174 def dirname(path):
174 def dirname(path):
175 """Any text. Treats the text as a path, and strips the last
175 """Any text. Treats the text as a path, and strips the last
176 component of the path after splitting by the path separator.
176 component of the path after splitting by the path separator.
177 """
177 """
178 return os.path.dirname(path)
178 return os.path.dirname(path)
179
179
180
180
181 @templatefilter(b'domain', intype=bytes)
181 @templatefilter(b'domain', intype=bytes)
182 def domain(author):
182 def domain(author):
183 """Any text. Finds the first string that looks like an email
183 """Any text. Finds the first string that looks like an email
184 address, and extracts just the domain component. Example: ``User
184 address, and extracts just the domain component. Example: ``User
185 <user@example.com>`` becomes ``example.com``.
185 <user@example.com>`` becomes ``example.com``.
186 """
186 """
187 f = author.find(b'@')
187 f = author.find(b'@')
188 if f == -1:
188 if f == -1:
189 return b''
189 return b''
190 author = author[f + 1 :]
190 author = author[f + 1 :]
191 f = author.find(b'>')
191 f = author.find(b'>')
192 if f >= 0:
192 if f >= 0:
193 author = author[:f]
193 author = author[:f]
194 return author
194 return author
195
195
196
196
197 @templatefilter(b'email', intype=bytes)
197 @templatefilter(b'email', intype=bytes)
198 def email(text):
198 def email(text):
199 """Any text. Extracts the first string that looks like an email
199 """Any text. Extracts the first string that looks like an email
200 address. Example: ``User <user@example.com>`` becomes
200 address. Example: ``User <user@example.com>`` becomes
201 ``user@example.com``.
201 ``user@example.com``.
202 """
202 """
203 return stringutil.email(text)
203 return stringutil.email(text)
204
204
205
205
206 @templatefilter(b'escape', intype=bytes)
206 @templatefilter(b'escape', intype=bytes)
207 def escape(text):
207 def escape(text):
208 """Any text. Replaces the special XML/XHTML characters "&", "<"
208 """Any text. Replaces the special XML/XHTML characters "&", "<"
209 and ">" with XML entities, and filters out NUL characters.
209 and ">" with XML entities, and filters out NUL characters.
210 """
210 """
211 return url.escape(text.replace(b'\0', b''), True)
211 return url.escape(text.replace(b'\0', b''), True)
212
212
213
213
214 para_re = None
214 para_re = None
215 space_re = None
215 space_re = None
216
216
217
217
218 def fill(text, width, initindent=b'', hangindent=b''):
218 def fill(text, width, initindent=b'', hangindent=b''):
219 '''fill many paragraphs with optional indentation.'''
219 '''fill many paragraphs with optional indentation.'''
220 global para_re, space_re
220 global para_re, space_re
221 if para_re is None:
221 if para_re is None:
222 para_re = re.compile(b'(\n\n|\n\\s*[-*]\\s*)', re.M)
222 para_re = re.compile(b'(\n\n|\n\\s*[-*]\\s*)', re.M)
223 space_re = re.compile(br' +')
223 space_re = re.compile(br' +')
224
224
225 def findparas():
225 def findparas():
226 start = 0
226 start = 0
227 while True:
227 while True:
228 m = para_re.search(text, start)
228 m = para_re.search(text, start)
229 if not m:
229 if not m:
230 uctext = encoding.unifromlocal(text[start:])
230 uctext = encoding.unifromlocal(text[start:])
231 w = len(uctext)
231 w = len(uctext)
232 while w > 0 and uctext[w - 1].isspace():
232 while w > 0 and uctext[w - 1].isspace():
233 w -= 1
233 w -= 1
234 yield (
234 yield (
235 encoding.unitolocal(uctext[:w]),
235 encoding.unitolocal(uctext[:w]),
236 encoding.unitolocal(uctext[w:]),
236 encoding.unitolocal(uctext[w:]),
237 )
237 )
238 break
238 break
239 yield text[start : m.start(0)], m.group(1)
239 yield text[start : m.start(0)], m.group(1)
240 start = m.end(1)
240 start = m.end(1)
241
241
242 return b"".join(
242 return b"".join(
243 [
243 [
244 stringutil.wrap(
244 stringutil.wrap(
245 space_re.sub(b' ', stringutil.wrap(para, width)),
245 space_re.sub(b' ', stringutil.wrap(para, width)),
246 width,
246 width,
247 initindent,
247 initindent,
248 hangindent,
248 hangindent,
249 )
249 )
250 + rest
250 + rest
251 for para, rest in findparas()
251 for para, rest in findparas()
252 ]
252 ]
253 )
253 )
254
254
255
255
256 @templatefilter(b'fill68', intype=bytes)
256 @templatefilter(b'fill68', intype=bytes)
257 def fill68(text):
257 def fill68(text):
258 """Any text. Wraps the text to fit in 68 columns."""
258 """Any text. Wraps the text to fit in 68 columns."""
259 return fill(text, 68)
259 return fill(text, 68)
260
260
261
261
262 @templatefilter(b'fill76', intype=bytes)
262 @templatefilter(b'fill76', intype=bytes)
263 def fill76(text):
263 def fill76(text):
264 """Any text. Wraps the text to fit in 76 columns."""
264 """Any text. Wraps the text to fit in 76 columns."""
265 return fill(text, 76)
265 return fill(text, 76)
266
266
267
267
268 @templatefilter(b'firstline', intype=bytes)
268 @templatefilter(b'firstline', intype=bytes)
269 def firstline(text):
269 def firstline(text):
270 """Any text. Returns the first line of text."""
270 """Any text. Returns the first line of text."""
271 try:
271 try:
272 return text.splitlines(True)[0].rstrip(b'\r\n')
272 return text.splitlines(True)[0].rstrip(b'\r\n')
273 except IndexError:
273 except IndexError:
274 return b''
274 return b''
275
275
276
276
277 @templatefilter(b'hex', intype=bytes)
277 @templatefilter(b'hex', intype=bytes)
278 def hexfilter(text):
278 def hexfilter(text):
279 """Any text. Convert a binary Mercurial node identifier into
279 """Any text. Convert a binary Mercurial node identifier into
280 its long hexadecimal representation.
280 its long hexadecimal representation.
281 """
281 """
282 return hex(text)
282 return hex(text)
283
283
284
284
285 @templatefilter(b'hgdate', intype=templateutil.date)
285 @templatefilter(b'hgdate', intype=templateutil.date)
286 def hgdate(text):
286 def hgdate(text):
287 """Date. Returns the date as a pair of numbers: "1157407993
287 """Date. Returns the date as a pair of numbers: "1157407993
288 25200" (Unix timestamp, timezone offset).
288 25200" (Unix timestamp, timezone offset).
289 """
289 """
290 return b"%d %d" % text
290 return b"%d %d" % text
291
291
292
292
293 @templatefilter(b'isodate', intype=templateutil.date)
293 @templatefilter(b'isodate', intype=templateutil.date)
294 def isodate(text):
294 def isodate(text):
295 """Date. Returns the date in ISO 8601 format: "2009-08-18 13:00
295 """Date. Returns the date in ISO 8601 format: "2009-08-18 13:00
296 +0200".
296 +0200".
297 """
297 """
298 return dateutil.datestr(text, b'%Y-%m-%d %H:%M %1%2')
298 return dateutil.datestr(text, b'%Y-%m-%d %H:%M %1%2')
299
299
300
300
301 @templatefilter(b'isodatesec', intype=templateutil.date)
301 @templatefilter(b'isodatesec', intype=templateutil.date)
302 def isodatesec(text):
302 def isodatesec(text):
303 """Date. Returns the date in ISO 8601 format, including
303 """Date. Returns the date in ISO 8601 format, including
304 seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date
304 seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date
305 filter.
305 filter.
306 """
306 """
307 return dateutil.datestr(text, b'%Y-%m-%d %H:%M:%S %1%2')
307 return dateutil.datestr(text, b'%Y-%m-%d %H:%M:%S %1%2')
308
308
309
309
310 def indent(text, prefix, firstline=b''):
310 def indent(text, prefix, firstline=b''):
311 '''indent each non-empty line of text after first with prefix.'''
311 '''indent each non-empty line of text after first with prefix.'''
312 lines = text.splitlines()
312 lines = text.splitlines()
313 num_lines = len(lines)
313 num_lines = len(lines)
314 endswithnewline = text[-1:] == b'\n'
314 endswithnewline = text[-1:] == b'\n'
315
315
316 def indenter():
316 def indenter():
317 for i in pycompat.xrange(num_lines):
317 for i in pycompat.xrange(num_lines):
318 l = lines[i]
318 l = lines[i]
319 if l.strip():
319 if l.strip():
320 yield prefix if i else firstline
320 yield prefix if i else firstline
321 yield l
321 yield l
322 if i < num_lines - 1 or endswithnewline:
322 if i < num_lines - 1 or endswithnewline:
323 yield b'\n'
323 yield b'\n'
324
324
325 return b"".join(indenter())
325 return b"".join(indenter())
326
326
327
327
328 @templatefilter(b'json')
328 @templatefilter(b'json')
329 def json(obj, paranoid=True):
329 def json(obj, paranoid=True):
330 """Any object. Serializes the object to a JSON formatted text."""
330 """Any object. Serializes the object to a JSON formatted text."""
331 if obj is None:
331 if obj is None:
332 return b'null'
332 return b'null'
333 elif obj is False:
333 elif obj is False:
334 return b'false'
334 return b'false'
335 elif obj is True:
335 elif obj is True:
336 return b'true'
336 return b'true'
337 elif isinstance(obj, (int, int, float)):
337 elif isinstance(obj, (int, int, float)):
338 return pycompat.bytestr(obj)
338 return pycompat.bytestr(obj)
339 elif isinstance(obj, bytes):
339 elif isinstance(obj, bytes):
340 return b'"%s"' % encoding.jsonescape(obj, paranoid=paranoid)
340 return b'"%s"' % encoding.jsonescape(obj, paranoid=paranoid)
341 elif isinstance(obj, type(u'')):
341 elif isinstance(obj, type(u'')):
342 raise error.ProgrammingError(
342 raise error.ProgrammingError(
343 b'Mercurial only does output with bytes: %r' % obj
343 b'Mercurial only does output with bytes: %r' % obj
344 )
344 )
345 elif util.safehasattr(obj, b'keys'):
345 elif util.safehasattr(obj, b'keys'):
346 out = [
346 out = [
347 b'"%s": %s'
347 b'"%s": %s'
348 % (encoding.jsonescape(k, paranoid=paranoid), json(v, paranoid))
348 % (encoding.jsonescape(k, paranoid=paranoid), json(v, paranoid))
349 for k, v in sorted(obj.items())
349 for k, v in sorted(obj.items())
350 ]
350 ]
351 return b'{' + b', '.join(out) + b'}'
351 return b'{' + b', '.join(out) + b'}'
352 elif util.safehasattr(obj, b'__iter__'):
352 elif util.safehasattr(obj, b'__iter__'):
353 out = [json(i, paranoid) for i in obj]
353 out = [json(i, paranoid) for i in obj]
354 return b'[' + b', '.join(out) + b']'
354 return b'[' + b', '.join(out) + b']'
355 raise error.ProgrammingError(b'cannot encode %r' % obj)
355 raise error.ProgrammingError(b'cannot encode %r' % obj)
356
356
357
357
358 @templatefilter(b'lower', intype=bytes)
358 @templatefilter(b'lower', intype=bytes)
359 def lower(text):
359 def lower(text):
360 """Any text. Converts the text to lowercase."""
360 """Any text. Converts the text to lowercase."""
361 return encoding.lower(text)
361 return encoding.lower(text)
362
362
363
363
364 @templatefilter(b'nonempty', intype=bytes)
364 @templatefilter(b'nonempty', intype=bytes)
365 def nonempty(text):
365 def nonempty(text):
366 """Any text. Returns '(none)' if the string is empty."""
366 """Any text. Returns '(none)' if the string is empty."""
367 return text or b"(none)"
367 return text or b"(none)"
368
368
369
369
370 @templatefilter(b'obfuscate', intype=bytes)
370 @templatefilter(b'obfuscate', intype=bytes)
371 def obfuscate(text):
371 def obfuscate(text):
372 """Any text. Returns the input text rendered as a sequence of
372 """Any text. Returns the input text rendered as a sequence of
373 XML entities.
373 XML entities.
374 """
374 """
375 text = pycompat.unicode(
375 text = str(text, pycompat.sysstr(encoding.encoding), r'replace')
376 text, pycompat.sysstr(encoding.encoding), r'replace'
377 )
378 return b''.join([b'&#%d;' % ord(c) for c in text])
376 return b''.join([b'&#%d;' % ord(c) for c in text])
379
377
380
378
381 @templatefilter(b'permissions', intype=bytes)
379 @templatefilter(b'permissions', intype=bytes)
382 def permissions(flags):
380 def permissions(flags):
383 if b"l" in flags:
381 if b"l" in flags:
384 return b"lrwxrwxrwx"
382 return b"lrwxrwxrwx"
385 if b"x" in flags:
383 if b"x" in flags:
386 return b"-rwxr-xr-x"
384 return b"-rwxr-xr-x"
387 return b"-rw-r--r--"
385 return b"-rw-r--r--"
388
386
389
387
390 @templatefilter(b'person', intype=bytes)
388 @templatefilter(b'person', intype=bytes)
391 def person(author):
389 def person(author):
392 """Any text. Returns the name before an email address,
390 """Any text. Returns the name before an email address,
393 interpreting it as per RFC 5322.
391 interpreting it as per RFC 5322.
394 """
392 """
395 return stringutil.person(author)
393 return stringutil.person(author)
396
394
397
395
398 @templatefilter(b'revescape', intype=bytes)
396 @templatefilter(b'revescape', intype=bytes)
399 def revescape(text):
397 def revescape(text):
400 """Any text. Escapes all "special" characters, except @.
398 """Any text. Escapes all "special" characters, except @.
401 Forward slashes are escaped twice to prevent web servers from prematurely
399 Forward slashes are escaped twice to prevent web servers from prematurely
402 unescaping them. For example, "@foo bar/baz" becomes "@foo%20bar%252Fbaz".
400 unescaping them. For example, "@foo bar/baz" becomes "@foo%20bar%252Fbaz".
403 """
401 """
404 return urlreq.quote(text, safe=b'/@').replace(b'/', b'%252F')
402 return urlreq.quote(text, safe=b'/@').replace(b'/', b'%252F')
405
403
406
404
407 @templatefilter(b'rfc3339date', intype=templateutil.date)
405 @templatefilter(b'rfc3339date', intype=templateutil.date)
408 def rfc3339date(text):
406 def rfc3339date(text):
409 """Date. Returns a date using the Internet date format
407 """Date. Returns a date using the Internet date format
410 specified in RFC 3339: "2009-08-18T13:00:13+02:00".
408 specified in RFC 3339: "2009-08-18T13:00:13+02:00".
411 """
409 """
412 return dateutil.datestr(text, b"%Y-%m-%dT%H:%M:%S%1:%2")
410 return dateutil.datestr(text, b"%Y-%m-%dT%H:%M:%S%1:%2")
413
411
414
412
415 @templatefilter(b'rfc822date', intype=templateutil.date)
413 @templatefilter(b'rfc822date', intype=templateutil.date)
416 def rfc822date(text):
414 def rfc822date(text):
417 """Date. Returns a date using the same format used in email
415 """Date. Returns a date using the same format used in email
418 headers: "Tue, 18 Aug 2009 13:00:13 +0200".
416 headers: "Tue, 18 Aug 2009 13:00:13 +0200".
419 """
417 """
420 return dateutil.datestr(text, b"%a, %d %b %Y %H:%M:%S %1%2")
418 return dateutil.datestr(text, b"%a, %d %b %Y %H:%M:%S %1%2")
421
419
422
420
423 @templatefilter(b'short', intype=bytes)
421 @templatefilter(b'short', intype=bytes)
424 def short(text):
422 def short(text):
425 """Changeset hash. Returns the short form of a changeset hash,
423 """Changeset hash. Returns the short form of a changeset hash,
426 i.e. a 12 hexadecimal digit string.
424 i.e. a 12 hexadecimal digit string.
427 """
425 """
428 return text[:12]
426 return text[:12]
429
427
430
428
431 @templatefilter(b'shortbisect', intype=bytes)
429 @templatefilter(b'shortbisect', intype=bytes)
432 def shortbisect(label):
430 def shortbisect(label):
433 """Any text. Treats `label` as a bisection status, and
431 """Any text. Treats `label` as a bisection status, and
434 returns a single-character representing the status (G: good, B: bad,
432 returns a single-character representing the status (G: good, B: bad,
435 S: skipped, U: untested, I: ignored). Returns single space if `text`
433 S: skipped, U: untested, I: ignored). Returns single space if `text`
436 is not a valid bisection status.
434 is not a valid bisection status.
437 """
435 """
438 if label:
436 if label:
439 return label[0:1].upper()
437 return label[0:1].upper()
440 return b' '
438 return b' '
441
439
442
440
443 @templatefilter(b'shortdate', intype=templateutil.date)
441 @templatefilter(b'shortdate', intype=templateutil.date)
444 def shortdate(text):
442 def shortdate(text):
445 """Date. Returns a date like "2006-09-18"."""
443 """Date. Returns a date like "2006-09-18"."""
446 return dateutil.shortdate(text)
444 return dateutil.shortdate(text)
447
445
448
446
449 @templatefilter(b'slashpath', intype=bytes)
447 @templatefilter(b'slashpath', intype=bytes)
450 def slashpath(path):
448 def slashpath(path):
451 """Any text. Replaces the native path separator with slash."""
449 """Any text. Replaces the native path separator with slash."""
452 return util.pconvert(path)
450 return util.pconvert(path)
453
451
454
452
455 @templatefilter(b'splitlines', intype=bytes)
453 @templatefilter(b'splitlines', intype=bytes)
456 def splitlines(text):
454 def splitlines(text):
457 """Any text. Split text into a list of lines."""
455 """Any text. Split text into a list of lines."""
458 return templateutil.hybridlist(text.splitlines(), name=b'line')
456 return templateutil.hybridlist(text.splitlines(), name=b'line')
459
457
460
458
461 @templatefilter(b'stringescape', intype=bytes)
459 @templatefilter(b'stringescape', intype=bytes)
462 def stringescape(text):
460 def stringescape(text):
463 return stringutil.escapestr(text)
461 return stringutil.escapestr(text)
464
462
465
463
466 @templatefilter(b'stringify', intype=bytes)
464 @templatefilter(b'stringify', intype=bytes)
467 def stringify(thing):
465 def stringify(thing):
468 """Any type. Turns the value into text by converting values into
466 """Any type. Turns the value into text by converting values into
469 text and concatenating them.
467 text and concatenating them.
470 """
468 """
471 return thing # coerced by the intype
469 return thing # coerced by the intype
472
470
473
471
474 @templatefilter(b'stripdir', intype=bytes)
472 @templatefilter(b'stripdir', intype=bytes)
475 def stripdir(text):
473 def stripdir(text):
476 """Treat the text as path and strip a directory level, if
474 """Treat the text as path and strip a directory level, if
477 possible. For example, "foo" and "foo/bar" becomes "foo".
475 possible. For example, "foo" and "foo/bar" becomes "foo".
478 """
476 """
479 dir = os.path.dirname(text)
477 dir = os.path.dirname(text)
480 if dir == b"":
478 if dir == b"":
481 return os.path.basename(text)
479 return os.path.basename(text)
482 else:
480 else:
483 return dir
481 return dir
484
482
485
483
486 @templatefilter(b'tabindent', intype=bytes)
484 @templatefilter(b'tabindent', intype=bytes)
487 def tabindent(text):
485 def tabindent(text):
488 """Any text. Returns the text, with every non-empty line
486 """Any text. Returns the text, with every non-empty line
489 except the first starting with a tab character.
487 except the first starting with a tab character.
490 """
488 """
491 return indent(text, b'\t')
489 return indent(text, b'\t')
492
490
493
491
494 @templatefilter(b'upper', intype=bytes)
492 @templatefilter(b'upper', intype=bytes)
495 def upper(text):
493 def upper(text):
496 """Any text. Converts the text to uppercase."""
494 """Any text. Converts the text to uppercase."""
497 return encoding.upper(text)
495 return encoding.upper(text)
498
496
499
497
500 @templatefilter(b'urlescape', intype=bytes)
498 @templatefilter(b'urlescape', intype=bytes)
501 def urlescape(text):
499 def urlescape(text):
502 """Any text. Escapes all "special" characters. For example,
500 """Any text. Escapes all "special" characters. For example,
503 "foo bar" becomes "foo%20bar".
501 "foo bar" becomes "foo%20bar".
504 """
502 """
505 return urlreq.quote(text)
503 return urlreq.quote(text)
506
504
507
505
508 @templatefilter(b'user', intype=bytes)
506 @templatefilter(b'user', intype=bytes)
509 def userfilter(text):
507 def userfilter(text):
510 """Any text. Returns a short representation of a user name or email
508 """Any text. Returns a short representation of a user name or email
511 address."""
509 address."""
512 return stringutil.shortuser(text)
510 return stringutil.shortuser(text)
513
511
514
512
515 @templatefilter(b'emailuser', intype=bytes)
513 @templatefilter(b'emailuser', intype=bytes)
516 def emailuser(text):
514 def emailuser(text):
517 """Any text. Returns the user portion of an email address."""
515 """Any text. Returns the user portion of an email address."""
518 return stringutil.emailuser(text)
516 return stringutil.emailuser(text)
519
517
520
518
521 @templatefilter(b'utf8', intype=bytes)
519 @templatefilter(b'utf8', intype=bytes)
522 def utf8(text):
520 def utf8(text):
523 """Any text. Converts from the local character encoding to UTF-8."""
521 """Any text. Converts from the local character encoding to UTF-8."""
524 return encoding.fromlocal(text)
522 return encoding.fromlocal(text)
525
523
526
524
527 @templatefilter(b'xmlescape', intype=bytes)
525 @templatefilter(b'xmlescape', intype=bytes)
528 def xmlescape(text):
526 def xmlescape(text):
529 text = (
527 text = (
530 text.replace(b'&', b'&amp;')
528 text.replace(b'&', b'&amp;')
531 .replace(b'<', b'&lt;')
529 .replace(b'<', b'&lt;')
532 .replace(b'>', b'&gt;')
530 .replace(b'>', b'&gt;')
533 .replace(b'"', b'&quot;')
531 .replace(b'"', b'&quot;')
534 .replace(b"'", b'&#39;')
532 .replace(b"'", b'&#39;')
535 ) # &apos; invalid in HTML
533 ) # &apos; invalid in HTML
536 return re.sub(b'[\x00-\x08\x0B\x0C\x0E-\x1F]', b' ', text)
534 return re.sub(b'[\x00-\x08\x0B\x0C\x0E-\x1F]', b' ', text)
537
535
538
536
539 def websub(text, websubtable):
537 def websub(text, websubtable):
540 """:websub: Any text. Only applies to hgweb. Applies the regular
538 """:websub: Any text. Only applies to hgweb. Applies the regular
541 expression replacements defined in the websub section.
539 expression replacements defined in the websub section.
542 """
540 """
543 if websubtable:
541 if websubtable:
544 for regexp, format in websubtable:
542 for regexp, format in websubtable:
545 text = regexp.sub(format, text)
543 text = regexp.sub(format, text)
546 return text
544 return text
547
545
548
546
549 def loadfilter(ui, extname, registrarobj):
547 def loadfilter(ui, extname, registrarobj):
550 """Load template filter from specified registrarobj"""
548 """Load template filter from specified registrarobj"""
551 for name, func in registrarobj._table.items():
549 for name, func in registrarobj._table.items():
552 filters[name] = func
550 filters[name] = func
553
551
554
552
555 # tell hggettext to extract docstrings from these functions:
553 # tell hggettext to extract docstrings from these functions:
556 i18nfunctions = filters.values()
554 i18nfunctions = filters.values()
General Comments 0
You need to be logged in to leave comments. Login now