##// END OF EJS Templates
progress: create helper class for incrementing progress...
Martin von Zweigbergk -
r38364:bec1212e default
parent child Browse files
Show More
@@ -1,2247 +1,2232 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from .thirdparty import (
25 from .thirdparty import (
26 attr,
26 attr,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 error,
30 error,
31 filemerge,
31 filemerge,
32 match as matchmod,
32 match as matchmod,
33 obsutil,
33 obsutil,
34 pycompat,
34 pycompat,
35 scmutil,
35 scmutil,
36 subrepoutil,
36 subrepoutil,
37 util,
37 util,
38 worker,
38 worker,
39 )
39 )
40
40
41 _pack = struct.pack
41 _pack = struct.pack
42 _unpack = struct.unpack
42 _unpack = struct.unpack
43
43
44 def _droponode(data):
44 def _droponode(data):
45 # used for compatibility for v1
45 # used for compatibility for v1
46 bits = data.split('\0')
46 bits = data.split('\0')
47 bits = bits[:-2] + bits[-1:]
47 bits = bits[:-2] + bits[-1:]
48 return '\0'.join(bits)
48 return '\0'.join(bits)
49
49
50 # Merge state record types. See ``mergestate`` docs for more.
50 # Merge state record types. See ``mergestate`` docs for more.
51 RECORD_LOCAL = b'L'
51 RECORD_LOCAL = b'L'
52 RECORD_OTHER = b'O'
52 RECORD_OTHER = b'O'
53 RECORD_MERGED = b'F'
53 RECORD_MERGED = b'F'
54 RECORD_CHANGEDELETE_CONFLICT = b'C'
54 RECORD_CHANGEDELETE_CONFLICT = b'C'
55 RECORD_MERGE_DRIVER_MERGE = b'D'
55 RECORD_MERGE_DRIVER_MERGE = b'D'
56 RECORD_PATH_CONFLICT = b'P'
56 RECORD_PATH_CONFLICT = b'P'
57 RECORD_MERGE_DRIVER_STATE = b'm'
57 RECORD_MERGE_DRIVER_STATE = b'm'
58 RECORD_FILE_VALUES = b'f'
58 RECORD_FILE_VALUES = b'f'
59 RECORD_LABELS = b'l'
59 RECORD_LABELS = b'l'
60 RECORD_OVERRIDE = b't'
60 RECORD_OVERRIDE = b't'
61 RECORD_UNSUPPORTED_MANDATORY = b'X'
61 RECORD_UNSUPPORTED_MANDATORY = b'X'
62 RECORD_UNSUPPORTED_ADVISORY = b'x'
62 RECORD_UNSUPPORTED_ADVISORY = b'x'
63
63
64 MERGE_DRIVER_STATE_UNMARKED = b'u'
64 MERGE_DRIVER_STATE_UNMARKED = b'u'
65 MERGE_DRIVER_STATE_MARKED = b'm'
65 MERGE_DRIVER_STATE_MARKED = b'm'
66 MERGE_DRIVER_STATE_SUCCESS = b's'
66 MERGE_DRIVER_STATE_SUCCESS = b's'
67
67
68 MERGE_RECORD_UNRESOLVED = b'u'
68 MERGE_RECORD_UNRESOLVED = b'u'
69 MERGE_RECORD_RESOLVED = b'r'
69 MERGE_RECORD_RESOLVED = b'r'
70 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
70 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
71 MERGE_RECORD_RESOLVED_PATH = b'pr'
71 MERGE_RECORD_RESOLVED_PATH = b'pr'
72 MERGE_RECORD_DRIVER_RESOLVED = b'd'
72 MERGE_RECORD_DRIVER_RESOLVED = b'd'
73
73
74 ACTION_FORGET = b'f'
74 ACTION_FORGET = b'f'
75 ACTION_REMOVE = b'r'
75 ACTION_REMOVE = b'r'
76 ACTION_ADD = b'a'
76 ACTION_ADD = b'a'
77 ACTION_GET = b'g'
77 ACTION_GET = b'g'
78 ACTION_PATH_CONFLICT = b'p'
78 ACTION_PATH_CONFLICT = b'p'
79 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
79 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
80 ACTION_ADD_MODIFIED = b'am'
80 ACTION_ADD_MODIFIED = b'am'
81 ACTION_CREATED = b'c'
81 ACTION_CREATED = b'c'
82 ACTION_DELETED_CHANGED = b'dc'
82 ACTION_DELETED_CHANGED = b'dc'
83 ACTION_CHANGED_DELETED = b'cd'
83 ACTION_CHANGED_DELETED = b'cd'
84 ACTION_MERGE = b'm'
84 ACTION_MERGE = b'm'
85 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
85 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
86 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
86 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
87 ACTION_KEEP = b'k'
87 ACTION_KEEP = b'k'
88 ACTION_EXEC = b'e'
88 ACTION_EXEC = b'e'
89 ACTION_CREATED_MERGE = b'cm'
89 ACTION_CREATED_MERGE = b'cm'
90
90
91 class mergestate(object):
91 class mergestate(object):
92 '''track 3-way merge state of individual files
92 '''track 3-way merge state of individual files
93
93
94 The merge state is stored on disk when needed. Two files are used: one with
94 The merge state is stored on disk when needed. Two files are used: one with
95 an old format (version 1), and one with a new format (version 2). Version 2
95 an old format (version 1), and one with a new format (version 2). Version 2
96 stores a superset of the data in version 1, including new kinds of records
96 stores a superset of the data in version 1, including new kinds of records
97 in the future. For more about the new format, see the documentation for
97 in the future. For more about the new format, see the documentation for
98 `_readrecordsv2`.
98 `_readrecordsv2`.
99
99
100 Each record can contain arbitrary content, and has an associated type. This
100 Each record can contain arbitrary content, and has an associated type. This
101 `type` should be a letter. If `type` is uppercase, the record is mandatory:
101 `type` should be a letter. If `type` is uppercase, the record is mandatory:
102 versions of Mercurial that don't support it should abort. If `type` is
102 versions of Mercurial that don't support it should abort. If `type` is
103 lowercase, the record can be safely ignored.
103 lowercase, the record can be safely ignored.
104
104
105 Currently known records:
105 Currently known records:
106
106
107 L: the node of the "local" part of the merge (hexified version)
107 L: the node of the "local" part of the merge (hexified version)
108 O: the node of the "other" part of the merge (hexified version)
108 O: the node of the "other" part of the merge (hexified version)
109 F: a file to be merged entry
109 F: a file to be merged entry
110 C: a change/delete or delete/change conflict
110 C: a change/delete or delete/change conflict
111 D: a file that the external merge driver will merge internally
111 D: a file that the external merge driver will merge internally
112 (experimental)
112 (experimental)
113 P: a path conflict (file vs directory)
113 P: a path conflict (file vs directory)
114 m: the external merge driver defined for this merge plus its run state
114 m: the external merge driver defined for this merge plus its run state
115 (experimental)
115 (experimental)
116 f: a (filename, dictionary) tuple of optional values for a given file
116 f: a (filename, dictionary) tuple of optional values for a given file
117 X: unsupported mandatory record type (used in tests)
117 X: unsupported mandatory record type (used in tests)
118 x: unsupported advisory record type (used in tests)
118 x: unsupported advisory record type (used in tests)
119 l: the labels for the parts of the merge.
119 l: the labels for the parts of the merge.
120
120
121 Merge driver run states (experimental):
121 Merge driver run states (experimental):
122 u: driver-resolved files unmarked -- needs to be run next time we're about
122 u: driver-resolved files unmarked -- needs to be run next time we're about
123 to resolve or commit
123 to resolve or commit
124 m: driver-resolved files marked -- only needs to be run before commit
124 m: driver-resolved files marked -- only needs to be run before commit
125 s: success/skipped -- does not need to be run any more
125 s: success/skipped -- does not need to be run any more
126
126
127 Merge record states (stored in self._state, indexed by filename):
127 Merge record states (stored in self._state, indexed by filename):
128 u: unresolved conflict
128 u: unresolved conflict
129 r: resolved conflict
129 r: resolved conflict
130 pu: unresolved path conflict (file conflicts with directory)
130 pu: unresolved path conflict (file conflicts with directory)
131 pr: resolved path conflict
131 pr: resolved path conflict
132 d: driver-resolved conflict
132 d: driver-resolved conflict
133
133
134 The resolve command transitions between 'u' and 'r' for conflicts and
134 The resolve command transitions between 'u' and 'r' for conflicts and
135 'pu' and 'pr' for path conflicts.
135 'pu' and 'pr' for path conflicts.
136 '''
136 '''
137 statepathv1 = 'merge/state'
137 statepathv1 = 'merge/state'
138 statepathv2 = 'merge/state2'
138 statepathv2 = 'merge/state2'
139
139
140 @staticmethod
140 @staticmethod
141 def clean(repo, node=None, other=None, labels=None):
141 def clean(repo, node=None, other=None, labels=None):
142 """Initialize a brand new merge state, removing any existing state on
142 """Initialize a brand new merge state, removing any existing state on
143 disk."""
143 disk."""
144 ms = mergestate(repo)
144 ms = mergestate(repo)
145 ms.reset(node, other, labels)
145 ms.reset(node, other, labels)
146 return ms
146 return ms
147
147
148 @staticmethod
148 @staticmethod
149 def read(repo):
149 def read(repo):
150 """Initialize the merge state, reading it from disk."""
150 """Initialize the merge state, reading it from disk."""
151 ms = mergestate(repo)
151 ms = mergestate(repo)
152 ms._read()
152 ms._read()
153 return ms
153 return ms
154
154
155 def __init__(self, repo):
155 def __init__(self, repo):
156 """Initialize the merge state.
156 """Initialize the merge state.
157
157
158 Do not use this directly! Instead call read() or clean()."""
158 Do not use this directly! Instead call read() or clean()."""
159 self._repo = repo
159 self._repo = repo
160 self._dirty = False
160 self._dirty = False
161 self._labels = None
161 self._labels = None
162
162
163 def reset(self, node=None, other=None, labels=None):
163 def reset(self, node=None, other=None, labels=None):
164 self._state = {}
164 self._state = {}
165 self._stateextras = {}
165 self._stateextras = {}
166 self._local = None
166 self._local = None
167 self._other = None
167 self._other = None
168 self._labels = labels
168 self._labels = labels
169 for var in ('localctx', 'otherctx'):
169 for var in ('localctx', 'otherctx'):
170 if var in vars(self):
170 if var in vars(self):
171 delattr(self, var)
171 delattr(self, var)
172 if node:
172 if node:
173 self._local = node
173 self._local = node
174 self._other = other
174 self._other = other
175 self._readmergedriver = None
175 self._readmergedriver = None
176 if self.mergedriver:
176 if self.mergedriver:
177 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
177 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
178 else:
178 else:
179 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
179 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
180 shutil.rmtree(self._repo.vfs.join('merge'), True)
180 shutil.rmtree(self._repo.vfs.join('merge'), True)
181 self._results = {}
181 self._results = {}
182 self._dirty = False
182 self._dirty = False
183
183
184 def _read(self):
184 def _read(self):
185 """Analyse each record content to restore a serialized state from disk
185 """Analyse each record content to restore a serialized state from disk
186
186
187 This function process "record" entry produced by the de-serialization
187 This function process "record" entry produced by the de-serialization
188 of on disk file.
188 of on disk file.
189 """
189 """
190 self._state = {}
190 self._state = {}
191 self._stateextras = {}
191 self._stateextras = {}
192 self._local = None
192 self._local = None
193 self._other = None
193 self._other = None
194 for var in ('localctx', 'otherctx'):
194 for var in ('localctx', 'otherctx'):
195 if var in vars(self):
195 if var in vars(self):
196 delattr(self, var)
196 delattr(self, var)
197 self._readmergedriver = None
197 self._readmergedriver = None
198 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
198 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
199 unsupported = set()
199 unsupported = set()
200 records = self._readrecords()
200 records = self._readrecords()
201 for rtype, record in records:
201 for rtype, record in records:
202 if rtype == RECORD_LOCAL:
202 if rtype == RECORD_LOCAL:
203 self._local = bin(record)
203 self._local = bin(record)
204 elif rtype == RECORD_OTHER:
204 elif rtype == RECORD_OTHER:
205 self._other = bin(record)
205 self._other = bin(record)
206 elif rtype == RECORD_MERGE_DRIVER_STATE:
206 elif rtype == RECORD_MERGE_DRIVER_STATE:
207 bits = record.split('\0', 1)
207 bits = record.split('\0', 1)
208 mdstate = bits[1]
208 mdstate = bits[1]
209 if len(mdstate) != 1 or mdstate not in (
209 if len(mdstate) != 1 or mdstate not in (
210 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
210 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
211 MERGE_DRIVER_STATE_SUCCESS):
211 MERGE_DRIVER_STATE_SUCCESS):
212 # the merge driver should be idempotent, so just rerun it
212 # the merge driver should be idempotent, so just rerun it
213 mdstate = MERGE_DRIVER_STATE_UNMARKED
213 mdstate = MERGE_DRIVER_STATE_UNMARKED
214
214
215 self._readmergedriver = bits[0]
215 self._readmergedriver = bits[0]
216 self._mdstate = mdstate
216 self._mdstate = mdstate
217 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
217 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
218 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
218 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
219 bits = record.split('\0')
219 bits = record.split('\0')
220 self._state[bits[0]] = bits[1:]
220 self._state[bits[0]] = bits[1:]
221 elif rtype == RECORD_FILE_VALUES:
221 elif rtype == RECORD_FILE_VALUES:
222 filename, rawextras = record.split('\0', 1)
222 filename, rawextras = record.split('\0', 1)
223 extraparts = rawextras.split('\0')
223 extraparts = rawextras.split('\0')
224 extras = {}
224 extras = {}
225 i = 0
225 i = 0
226 while i < len(extraparts):
226 while i < len(extraparts):
227 extras[extraparts[i]] = extraparts[i + 1]
227 extras[extraparts[i]] = extraparts[i + 1]
228 i += 2
228 i += 2
229
229
230 self._stateextras[filename] = extras
230 self._stateextras[filename] = extras
231 elif rtype == RECORD_LABELS:
231 elif rtype == RECORD_LABELS:
232 labels = record.split('\0', 2)
232 labels = record.split('\0', 2)
233 self._labels = [l for l in labels if len(l) > 0]
233 self._labels = [l for l in labels if len(l) > 0]
234 elif not rtype.islower():
234 elif not rtype.islower():
235 unsupported.add(rtype)
235 unsupported.add(rtype)
236 self._results = {}
236 self._results = {}
237 self._dirty = False
237 self._dirty = False
238
238
239 if unsupported:
239 if unsupported:
240 raise error.UnsupportedMergeRecords(unsupported)
240 raise error.UnsupportedMergeRecords(unsupported)
241
241
242 def _readrecords(self):
242 def _readrecords(self):
243 """Read merge state from disk and return a list of record (TYPE, data)
243 """Read merge state from disk and return a list of record (TYPE, data)
244
244
245 We read data from both v1 and v2 files and decide which one to use.
245 We read data from both v1 and v2 files and decide which one to use.
246
246
247 V1 has been used by version prior to 2.9.1 and contains less data than
247 V1 has been used by version prior to 2.9.1 and contains less data than
248 v2. We read both versions and check if no data in v2 contradicts
248 v2. We read both versions and check if no data in v2 contradicts
249 v1. If there is not contradiction we can safely assume that both v1
249 v1. If there is not contradiction we can safely assume that both v1
250 and v2 were written at the same time and use the extract data in v2. If
250 and v2 were written at the same time and use the extract data in v2. If
251 there is contradiction we ignore v2 content as we assume an old version
251 there is contradiction we ignore v2 content as we assume an old version
252 of Mercurial has overwritten the mergestate file and left an old v2
252 of Mercurial has overwritten the mergestate file and left an old v2
253 file around.
253 file around.
254
254
255 returns list of record [(TYPE, data), ...]"""
255 returns list of record [(TYPE, data), ...]"""
256 v1records = self._readrecordsv1()
256 v1records = self._readrecordsv1()
257 v2records = self._readrecordsv2()
257 v2records = self._readrecordsv2()
258 if self._v1v2match(v1records, v2records):
258 if self._v1v2match(v1records, v2records):
259 return v2records
259 return v2records
260 else:
260 else:
261 # v1 file is newer than v2 file, use it
261 # v1 file is newer than v2 file, use it
262 # we have to infer the "other" changeset of the merge
262 # we have to infer the "other" changeset of the merge
263 # we cannot do better than that with v1 of the format
263 # we cannot do better than that with v1 of the format
264 mctx = self._repo[None].parents()[-1]
264 mctx = self._repo[None].parents()[-1]
265 v1records.append((RECORD_OTHER, mctx.hex()))
265 v1records.append((RECORD_OTHER, mctx.hex()))
266 # add place holder "other" file node information
266 # add place holder "other" file node information
267 # nobody is using it yet so we do no need to fetch the data
267 # nobody is using it yet so we do no need to fetch the data
268 # if mctx was wrong `mctx[bits[-2]]` may fails.
268 # if mctx was wrong `mctx[bits[-2]]` may fails.
269 for idx, r in enumerate(v1records):
269 for idx, r in enumerate(v1records):
270 if r[0] == RECORD_MERGED:
270 if r[0] == RECORD_MERGED:
271 bits = r[1].split('\0')
271 bits = r[1].split('\0')
272 bits.insert(-2, '')
272 bits.insert(-2, '')
273 v1records[idx] = (r[0], '\0'.join(bits))
273 v1records[idx] = (r[0], '\0'.join(bits))
274 return v1records
274 return v1records
275
275
276 def _v1v2match(self, v1records, v2records):
276 def _v1v2match(self, v1records, v2records):
277 oldv2 = set() # old format version of v2 record
277 oldv2 = set() # old format version of v2 record
278 for rec in v2records:
278 for rec in v2records:
279 if rec[0] == RECORD_LOCAL:
279 if rec[0] == RECORD_LOCAL:
280 oldv2.add(rec)
280 oldv2.add(rec)
281 elif rec[0] == RECORD_MERGED:
281 elif rec[0] == RECORD_MERGED:
282 # drop the onode data (not contained in v1)
282 # drop the onode data (not contained in v1)
283 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
283 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
284 for rec in v1records:
284 for rec in v1records:
285 if rec not in oldv2:
285 if rec not in oldv2:
286 return False
286 return False
287 else:
287 else:
288 return True
288 return True
289
289
290 def _readrecordsv1(self):
290 def _readrecordsv1(self):
291 """read on disk merge state for version 1 file
291 """read on disk merge state for version 1 file
292
292
293 returns list of record [(TYPE, data), ...]
293 returns list of record [(TYPE, data), ...]
294
294
295 Note: the "F" data from this file are one entry short
295 Note: the "F" data from this file are one entry short
296 (no "other file node" entry)
296 (no "other file node" entry)
297 """
297 """
298 records = []
298 records = []
299 try:
299 try:
300 f = self._repo.vfs(self.statepathv1)
300 f = self._repo.vfs(self.statepathv1)
301 for i, l in enumerate(f):
301 for i, l in enumerate(f):
302 if i == 0:
302 if i == 0:
303 records.append((RECORD_LOCAL, l[:-1]))
303 records.append((RECORD_LOCAL, l[:-1]))
304 else:
304 else:
305 records.append((RECORD_MERGED, l[:-1]))
305 records.append((RECORD_MERGED, l[:-1]))
306 f.close()
306 f.close()
307 except IOError as err:
307 except IOError as err:
308 if err.errno != errno.ENOENT:
308 if err.errno != errno.ENOENT:
309 raise
309 raise
310 return records
310 return records
311
311
312 def _readrecordsv2(self):
312 def _readrecordsv2(self):
313 """read on disk merge state for version 2 file
313 """read on disk merge state for version 2 file
314
314
315 This format is a list of arbitrary records of the form:
315 This format is a list of arbitrary records of the form:
316
316
317 [type][length][content]
317 [type][length][content]
318
318
319 `type` is a single character, `length` is a 4 byte integer, and
319 `type` is a single character, `length` is a 4 byte integer, and
320 `content` is an arbitrary byte sequence of length `length`.
320 `content` is an arbitrary byte sequence of length `length`.
321
321
322 Mercurial versions prior to 3.7 have a bug where if there are
322 Mercurial versions prior to 3.7 have a bug where if there are
323 unsupported mandatory merge records, attempting to clear out the merge
323 unsupported mandatory merge records, attempting to clear out the merge
324 state with hg update --clean or similar aborts. The 't' record type
324 state with hg update --clean or similar aborts. The 't' record type
325 works around that by writing out what those versions treat as an
325 works around that by writing out what those versions treat as an
326 advisory record, but later versions interpret as special: the first
326 advisory record, but later versions interpret as special: the first
327 character is the 'real' record type and everything onwards is the data.
327 character is the 'real' record type and everything onwards is the data.
328
328
329 Returns list of records [(TYPE, data), ...]."""
329 Returns list of records [(TYPE, data), ...]."""
330 records = []
330 records = []
331 try:
331 try:
332 f = self._repo.vfs(self.statepathv2)
332 f = self._repo.vfs(self.statepathv2)
333 data = f.read()
333 data = f.read()
334 off = 0
334 off = 0
335 end = len(data)
335 end = len(data)
336 while off < end:
336 while off < end:
337 rtype = data[off:off + 1]
337 rtype = data[off:off + 1]
338 off += 1
338 off += 1
339 length = _unpack('>I', data[off:(off + 4)])[0]
339 length = _unpack('>I', data[off:(off + 4)])[0]
340 off += 4
340 off += 4
341 record = data[off:(off + length)]
341 record = data[off:(off + length)]
342 off += length
342 off += length
343 if rtype == RECORD_OVERRIDE:
343 if rtype == RECORD_OVERRIDE:
344 rtype, record = record[0:1], record[1:]
344 rtype, record = record[0:1], record[1:]
345 records.append((rtype, record))
345 records.append((rtype, record))
346 f.close()
346 f.close()
347 except IOError as err:
347 except IOError as err:
348 if err.errno != errno.ENOENT:
348 if err.errno != errno.ENOENT:
349 raise
349 raise
350 return records
350 return records
351
351
352 @util.propertycache
352 @util.propertycache
353 def mergedriver(self):
353 def mergedriver(self):
354 # protect against the following:
354 # protect against the following:
355 # - A configures a malicious merge driver in their hgrc, then
355 # - A configures a malicious merge driver in their hgrc, then
356 # pauses the merge
356 # pauses the merge
357 # - A edits their hgrc to remove references to the merge driver
357 # - A edits their hgrc to remove references to the merge driver
358 # - A gives a copy of their entire repo, including .hg, to B
358 # - A gives a copy of their entire repo, including .hg, to B
359 # - B inspects .hgrc and finds it to be clean
359 # - B inspects .hgrc and finds it to be clean
360 # - B then continues the merge and the malicious merge driver
360 # - B then continues the merge and the malicious merge driver
361 # gets invoked
361 # gets invoked
362 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
362 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
363 if (self._readmergedriver is not None
363 if (self._readmergedriver is not None
364 and self._readmergedriver != configmergedriver):
364 and self._readmergedriver != configmergedriver):
365 raise error.ConfigError(
365 raise error.ConfigError(
366 _("merge driver changed since merge started"),
366 _("merge driver changed since merge started"),
367 hint=_("revert merge driver change or abort merge"))
367 hint=_("revert merge driver change or abort merge"))
368
368
369 return configmergedriver
369 return configmergedriver
370
370
371 @util.propertycache
371 @util.propertycache
372 def localctx(self):
372 def localctx(self):
373 if self._local is None:
373 if self._local is None:
374 msg = "localctx accessed but self._local isn't set"
374 msg = "localctx accessed but self._local isn't set"
375 raise error.ProgrammingError(msg)
375 raise error.ProgrammingError(msg)
376 return self._repo[self._local]
376 return self._repo[self._local]
377
377
378 @util.propertycache
378 @util.propertycache
379 def otherctx(self):
379 def otherctx(self):
380 if self._other is None:
380 if self._other is None:
381 msg = "otherctx accessed but self._other isn't set"
381 msg = "otherctx accessed but self._other isn't set"
382 raise error.ProgrammingError(msg)
382 raise error.ProgrammingError(msg)
383 return self._repo[self._other]
383 return self._repo[self._other]
384
384
385 def active(self):
385 def active(self):
386 """Whether mergestate is active.
386 """Whether mergestate is active.
387
387
388 Returns True if there appears to be mergestate. This is a rough proxy
388 Returns True if there appears to be mergestate. This is a rough proxy
389 for "is a merge in progress."
389 for "is a merge in progress."
390 """
390 """
391 # Check local variables before looking at filesystem for performance
391 # Check local variables before looking at filesystem for performance
392 # reasons.
392 # reasons.
393 return bool(self._local) or bool(self._state) or \
393 return bool(self._local) or bool(self._state) or \
394 self._repo.vfs.exists(self.statepathv1) or \
394 self._repo.vfs.exists(self.statepathv1) or \
395 self._repo.vfs.exists(self.statepathv2)
395 self._repo.vfs.exists(self.statepathv2)
396
396
397 def commit(self):
397 def commit(self):
398 """Write current state on disk (if necessary)"""
398 """Write current state on disk (if necessary)"""
399 if self._dirty:
399 if self._dirty:
400 records = self._makerecords()
400 records = self._makerecords()
401 self._writerecords(records)
401 self._writerecords(records)
402 self._dirty = False
402 self._dirty = False
403
403
404 def _makerecords(self):
404 def _makerecords(self):
405 records = []
405 records = []
406 records.append((RECORD_LOCAL, hex(self._local)))
406 records.append((RECORD_LOCAL, hex(self._local)))
407 records.append((RECORD_OTHER, hex(self._other)))
407 records.append((RECORD_OTHER, hex(self._other)))
408 if self.mergedriver:
408 if self.mergedriver:
409 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
409 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
410 self.mergedriver, self._mdstate])))
410 self.mergedriver, self._mdstate])))
411 # Write out state items. In all cases, the value of the state map entry
411 # Write out state items. In all cases, the value of the state map entry
412 # is written as the contents of the record. The record type depends on
412 # is written as the contents of the record. The record type depends on
413 # the type of state that is stored, and capital-letter records are used
413 # the type of state that is stored, and capital-letter records are used
414 # to prevent older versions of Mercurial that do not support the feature
414 # to prevent older versions of Mercurial that do not support the feature
415 # from loading them.
415 # from loading them.
416 for filename, v in self._state.iteritems():
416 for filename, v in self._state.iteritems():
417 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
417 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
418 # Driver-resolved merge. These are stored in 'D' records.
418 # Driver-resolved merge. These are stored in 'D' records.
419 records.append((RECORD_MERGE_DRIVER_MERGE,
419 records.append((RECORD_MERGE_DRIVER_MERGE,
420 '\0'.join([filename] + v)))
420 '\0'.join([filename] + v)))
421 elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH,
421 elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH,
422 MERGE_RECORD_RESOLVED_PATH):
422 MERGE_RECORD_RESOLVED_PATH):
423 # Path conflicts. These are stored in 'P' records. The current
423 # Path conflicts. These are stored in 'P' records. The current
424 # resolution state ('pu' or 'pr') is stored within the record.
424 # resolution state ('pu' or 'pr') is stored within the record.
425 records.append((RECORD_PATH_CONFLICT,
425 records.append((RECORD_PATH_CONFLICT,
426 '\0'.join([filename] + v)))
426 '\0'.join([filename] + v)))
427 elif v[1] == nullhex or v[6] == nullhex:
427 elif v[1] == nullhex or v[6] == nullhex:
428 # Change/Delete or Delete/Change conflicts. These are stored in
428 # Change/Delete or Delete/Change conflicts. These are stored in
429 # 'C' records. v[1] is the local file, and is nullhex when the
429 # 'C' records. v[1] is the local file, and is nullhex when the
430 # file is deleted locally ('dc'). v[6] is the remote file, and
430 # file is deleted locally ('dc'). v[6] is the remote file, and
431 # is nullhex when the file is deleted remotely ('cd').
431 # is nullhex when the file is deleted remotely ('cd').
432 records.append((RECORD_CHANGEDELETE_CONFLICT,
432 records.append((RECORD_CHANGEDELETE_CONFLICT,
433 '\0'.join([filename] + v)))
433 '\0'.join([filename] + v)))
434 else:
434 else:
435 # Normal files. These are stored in 'F' records.
435 # Normal files. These are stored in 'F' records.
436 records.append((RECORD_MERGED,
436 records.append((RECORD_MERGED,
437 '\0'.join([filename] + v)))
437 '\0'.join([filename] + v)))
438 for filename, extras in sorted(self._stateextras.iteritems()):
438 for filename, extras in sorted(self._stateextras.iteritems()):
439 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
439 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
440 extras.iteritems())
440 extras.iteritems())
441 records.append((RECORD_FILE_VALUES,
441 records.append((RECORD_FILE_VALUES,
442 '%s\0%s' % (filename, rawextras)))
442 '%s\0%s' % (filename, rawextras)))
443 if self._labels is not None:
443 if self._labels is not None:
444 labels = '\0'.join(self._labels)
444 labels = '\0'.join(self._labels)
445 records.append((RECORD_LABELS, labels))
445 records.append((RECORD_LABELS, labels))
446 return records
446 return records
447
447
448 def _writerecords(self, records):
448 def _writerecords(self, records):
449 """Write current state on disk (both v1 and v2)"""
449 """Write current state on disk (both v1 and v2)"""
450 self._writerecordsv1(records)
450 self._writerecordsv1(records)
451 self._writerecordsv2(records)
451 self._writerecordsv2(records)
452
452
453 def _writerecordsv1(self, records):
453 def _writerecordsv1(self, records):
454 """Write current state on disk in a version 1 file"""
454 """Write current state on disk in a version 1 file"""
455 f = self._repo.vfs(self.statepathv1, 'wb')
455 f = self._repo.vfs(self.statepathv1, 'wb')
456 irecords = iter(records)
456 irecords = iter(records)
457 lrecords = next(irecords)
457 lrecords = next(irecords)
458 assert lrecords[0] == RECORD_LOCAL
458 assert lrecords[0] == RECORD_LOCAL
459 f.write(hex(self._local) + '\n')
459 f.write(hex(self._local) + '\n')
460 for rtype, data in irecords:
460 for rtype, data in irecords:
461 if rtype == RECORD_MERGED:
461 if rtype == RECORD_MERGED:
462 f.write('%s\n' % _droponode(data))
462 f.write('%s\n' % _droponode(data))
463 f.close()
463 f.close()
464
464
465 def _writerecordsv2(self, records):
465 def _writerecordsv2(self, records):
466 """Write current state on disk in a version 2 file
466 """Write current state on disk in a version 2 file
467
467
468 See the docstring for _readrecordsv2 for why we use 't'."""
468 See the docstring for _readrecordsv2 for why we use 't'."""
469 # these are the records that all version 2 clients can read
469 # these are the records that all version 2 clients can read
470 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
470 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
471 f = self._repo.vfs(self.statepathv2, 'wb')
471 f = self._repo.vfs(self.statepathv2, 'wb')
472 for key, data in records:
472 for key, data in records:
473 assert len(key) == 1
473 assert len(key) == 1
474 if key not in allowlist:
474 if key not in allowlist:
475 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
475 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
476 format = '>sI%is' % len(data)
476 format = '>sI%is' % len(data)
477 f.write(_pack(format, key, len(data), data))
477 f.write(_pack(format, key, len(data), data))
478 f.close()
478 f.close()
479
479
480 def add(self, fcl, fco, fca, fd):
480 def add(self, fcl, fco, fca, fd):
481 """add a new (potentially?) conflicting file the merge state
481 """add a new (potentially?) conflicting file the merge state
482 fcl: file context for local,
482 fcl: file context for local,
483 fco: file context for remote,
483 fco: file context for remote,
484 fca: file context for ancestors,
484 fca: file context for ancestors,
485 fd: file path of the resulting merge.
485 fd: file path of the resulting merge.
486
486
487 note: also write the local version to the `.hg/merge` directory.
487 note: also write the local version to the `.hg/merge` directory.
488 """
488 """
489 if fcl.isabsent():
489 if fcl.isabsent():
490 hash = nullhex
490 hash = nullhex
491 else:
491 else:
492 hash = hex(hashlib.sha1(fcl.path()).digest())
492 hash = hex(hashlib.sha1(fcl.path()).digest())
493 self._repo.vfs.write('merge/' + hash, fcl.data())
493 self._repo.vfs.write('merge/' + hash, fcl.data())
494 self._state[fd] = [MERGE_RECORD_UNRESOLVED, hash, fcl.path(),
494 self._state[fd] = [MERGE_RECORD_UNRESOLVED, hash, fcl.path(),
495 fca.path(), hex(fca.filenode()),
495 fca.path(), hex(fca.filenode()),
496 fco.path(), hex(fco.filenode()),
496 fco.path(), hex(fco.filenode()),
497 fcl.flags()]
497 fcl.flags()]
498 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
498 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
499 self._dirty = True
499 self._dirty = True
500
500
501 def addpath(self, path, frename, forigin):
501 def addpath(self, path, frename, forigin):
502 """add a new conflicting path to the merge state
502 """add a new conflicting path to the merge state
503 path: the path that conflicts
503 path: the path that conflicts
504 frename: the filename the conflicting file was renamed to
504 frename: the filename the conflicting file was renamed to
505 forigin: origin of the file ('l' or 'r' for local/remote)
505 forigin: origin of the file ('l' or 'r' for local/remote)
506 """
506 """
507 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
507 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
508 self._dirty = True
508 self._dirty = True
509
509
510 def __contains__(self, dfile):
510 def __contains__(self, dfile):
511 return dfile in self._state
511 return dfile in self._state
512
512
513 def __getitem__(self, dfile):
513 def __getitem__(self, dfile):
514 return self._state[dfile][0]
514 return self._state[dfile][0]
515
515
516 def __iter__(self):
516 def __iter__(self):
517 return iter(sorted(self._state))
517 return iter(sorted(self._state))
518
518
519 def files(self):
519 def files(self):
520 return self._state.keys()
520 return self._state.keys()
521
521
522 def mark(self, dfile, state):
522 def mark(self, dfile, state):
523 self._state[dfile][0] = state
523 self._state[dfile][0] = state
524 self._dirty = True
524 self._dirty = True
525
525
526 def mdstate(self):
526 def mdstate(self):
527 return self._mdstate
527 return self._mdstate
528
528
529 def unresolved(self):
529 def unresolved(self):
530 """Obtain the paths of unresolved files."""
530 """Obtain the paths of unresolved files."""
531
531
532 for f, entry in self._state.iteritems():
532 for f, entry in self._state.iteritems():
533 if entry[0] in (MERGE_RECORD_UNRESOLVED,
533 if entry[0] in (MERGE_RECORD_UNRESOLVED,
534 MERGE_RECORD_UNRESOLVED_PATH):
534 MERGE_RECORD_UNRESOLVED_PATH):
535 yield f
535 yield f
536
536
537 def driverresolved(self):
537 def driverresolved(self):
538 """Obtain the paths of driver-resolved files."""
538 """Obtain the paths of driver-resolved files."""
539
539
540 for f, entry in self._state.items():
540 for f, entry in self._state.items():
541 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
541 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
542 yield f
542 yield f
543
543
544 def extras(self, filename):
544 def extras(self, filename):
545 return self._stateextras.setdefault(filename, {})
545 return self._stateextras.setdefault(filename, {})
546
546
547 def _resolve(self, preresolve, dfile, wctx):
547 def _resolve(self, preresolve, dfile, wctx):
548 """rerun merge process for file path `dfile`"""
548 """rerun merge process for file path `dfile`"""
549 if self[dfile] in (MERGE_RECORD_RESOLVED,
549 if self[dfile] in (MERGE_RECORD_RESOLVED,
550 MERGE_RECORD_DRIVER_RESOLVED):
550 MERGE_RECORD_DRIVER_RESOLVED):
551 return True, 0
551 return True, 0
552 stateentry = self._state[dfile]
552 stateentry = self._state[dfile]
553 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
553 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
554 octx = self._repo[self._other]
554 octx = self._repo[self._other]
555 extras = self.extras(dfile)
555 extras = self.extras(dfile)
556 anccommitnode = extras.get('ancestorlinknode')
556 anccommitnode = extras.get('ancestorlinknode')
557 if anccommitnode:
557 if anccommitnode:
558 actx = self._repo[anccommitnode]
558 actx = self._repo[anccommitnode]
559 else:
559 else:
560 actx = None
560 actx = None
561 fcd = self._filectxorabsent(hash, wctx, dfile)
561 fcd = self._filectxorabsent(hash, wctx, dfile)
562 fco = self._filectxorabsent(onode, octx, ofile)
562 fco = self._filectxorabsent(onode, octx, ofile)
563 # TODO: move this to filectxorabsent
563 # TODO: move this to filectxorabsent
564 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
564 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
565 # "premerge" x flags
565 # "premerge" x flags
566 flo = fco.flags()
566 flo = fco.flags()
567 fla = fca.flags()
567 fla = fca.flags()
568 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
568 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
569 if fca.node() == nullid and flags != flo:
569 if fca.node() == nullid and flags != flo:
570 if preresolve:
570 if preresolve:
571 self._repo.ui.warn(
571 self._repo.ui.warn(
572 _('warning: cannot merge flags for %s '
572 _('warning: cannot merge flags for %s '
573 'without common ancestor - keeping local flags\n')
573 'without common ancestor - keeping local flags\n')
574 % afile)
574 % afile)
575 elif flags == fla:
575 elif flags == fla:
576 flags = flo
576 flags = flo
577 if preresolve:
577 if preresolve:
578 # restore local
578 # restore local
579 if hash != nullhex:
579 if hash != nullhex:
580 f = self._repo.vfs('merge/' + hash)
580 f = self._repo.vfs('merge/' + hash)
581 wctx[dfile].write(f.read(), flags)
581 wctx[dfile].write(f.read(), flags)
582 f.close()
582 f.close()
583 else:
583 else:
584 wctx[dfile].remove(ignoremissing=True)
584 wctx[dfile].remove(ignoremissing=True)
585 complete, r, deleted = filemerge.premerge(self._repo, wctx,
585 complete, r, deleted = filemerge.premerge(self._repo, wctx,
586 self._local, lfile, fcd,
586 self._local, lfile, fcd,
587 fco, fca,
587 fco, fca,
588 labels=self._labels)
588 labels=self._labels)
589 else:
589 else:
590 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
590 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
591 self._local, lfile, fcd,
591 self._local, lfile, fcd,
592 fco, fca,
592 fco, fca,
593 labels=self._labels)
593 labels=self._labels)
594 if r is None:
594 if r is None:
595 # no real conflict
595 # no real conflict
596 del self._state[dfile]
596 del self._state[dfile]
597 self._stateextras.pop(dfile, None)
597 self._stateextras.pop(dfile, None)
598 self._dirty = True
598 self._dirty = True
599 elif not r:
599 elif not r:
600 self.mark(dfile, MERGE_RECORD_RESOLVED)
600 self.mark(dfile, MERGE_RECORD_RESOLVED)
601
601
602 if complete:
602 if complete:
603 action = None
603 action = None
604 if deleted:
604 if deleted:
605 if fcd.isabsent():
605 if fcd.isabsent():
606 # dc: local picked. Need to drop if present, which may
606 # dc: local picked. Need to drop if present, which may
607 # happen on re-resolves.
607 # happen on re-resolves.
608 action = ACTION_FORGET
608 action = ACTION_FORGET
609 else:
609 else:
610 # cd: remote picked (or otherwise deleted)
610 # cd: remote picked (or otherwise deleted)
611 action = ACTION_REMOVE
611 action = ACTION_REMOVE
612 else:
612 else:
613 if fcd.isabsent(): # dc: remote picked
613 if fcd.isabsent(): # dc: remote picked
614 action = ACTION_GET
614 action = ACTION_GET
615 elif fco.isabsent(): # cd: local picked
615 elif fco.isabsent(): # cd: local picked
616 if dfile in self.localctx:
616 if dfile in self.localctx:
617 action = ACTION_ADD_MODIFIED
617 action = ACTION_ADD_MODIFIED
618 else:
618 else:
619 action = ACTION_ADD
619 action = ACTION_ADD
620 # else: regular merges (no action necessary)
620 # else: regular merges (no action necessary)
621 self._results[dfile] = r, action
621 self._results[dfile] = r, action
622
622
623 return complete, r
623 return complete, r
624
624
625 def _filectxorabsent(self, hexnode, ctx, f):
625 def _filectxorabsent(self, hexnode, ctx, f):
626 if hexnode == nullhex:
626 if hexnode == nullhex:
627 return filemerge.absentfilectx(ctx, f)
627 return filemerge.absentfilectx(ctx, f)
628 else:
628 else:
629 return ctx[f]
629 return ctx[f]
630
630
631 def preresolve(self, dfile, wctx):
631 def preresolve(self, dfile, wctx):
632 """run premerge process for dfile
632 """run premerge process for dfile
633
633
634 Returns whether the merge is complete, and the exit code."""
634 Returns whether the merge is complete, and the exit code."""
635 return self._resolve(True, dfile, wctx)
635 return self._resolve(True, dfile, wctx)
636
636
637 def resolve(self, dfile, wctx):
637 def resolve(self, dfile, wctx):
638 """run merge process (assuming premerge was run) for dfile
638 """run merge process (assuming premerge was run) for dfile
639
639
640 Returns the exit code of the merge."""
640 Returns the exit code of the merge."""
641 return self._resolve(False, dfile, wctx)[1]
641 return self._resolve(False, dfile, wctx)[1]
642
642
643 def counts(self):
643 def counts(self):
644 """return counts for updated, merged and removed files in this
644 """return counts for updated, merged and removed files in this
645 session"""
645 session"""
646 updated, merged, removed = 0, 0, 0
646 updated, merged, removed = 0, 0, 0
647 for r, action in self._results.itervalues():
647 for r, action in self._results.itervalues():
648 if r is None:
648 if r is None:
649 updated += 1
649 updated += 1
650 elif r == 0:
650 elif r == 0:
651 if action == ACTION_REMOVE:
651 if action == ACTION_REMOVE:
652 removed += 1
652 removed += 1
653 else:
653 else:
654 merged += 1
654 merged += 1
655 return updated, merged, removed
655 return updated, merged, removed
656
656
657 def unresolvedcount(self):
657 def unresolvedcount(self):
658 """get unresolved count for this merge (persistent)"""
658 """get unresolved count for this merge (persistent)"""
659 return len(list(self.unresolved()))
659 return len(list(self.unresolved()))
660
660
661 def actions(self):
661 def actions(self):
662 """return lists of actions to perform on the dirstate"""
662 """return lists of actions to perform on the dirstate"""
663 actions = {
663 actions = {
664 ACTION_REMOVE: [],
664 ACTION_REMOVE: [],
665 ACTION_FORGET: [],
665 ACTION_FORGET: [],
666 ACTION_ADD: [],
666 ACTION_ADD: [],
667 ACTION_ADD_MODIFIED: [],
667 ACTION_ADD_MODIFIED: [],
668 ACTION_GET: [],
668 ACTION_GET: [],
669 }
669 }
670 for f, (r, action) in self._results.iteritems():
670 for f, (r, action) in self._results.iteritems():
671 if action is not None:
671 if action is not None:
672 actions[action].append((f, None, "merge result"))
672 actions[action].append((f, None, "merge result"))
673 return actions
673 return actions
674
674
675 def recordactions(self):
675 def recordactions(self):
676 """record remove/add/get actions in the dirstate"""
676 """record remove/add/get actions in the dirstate"""
677 branchmerge = self._repo.dirstate.p2() != nullid
677 branchmerge = self._repo.dirstate.p2() != nullid
678 recordupdates(self._repo, self.actions(), branchmerge)
678 recordupdates(self._repo, self.actions(), branchmerge)
679
679
680 def queueremove(self, f):
680 def queueremove(self, f):
681 """queues a file to be removed from the dirstate
681 """queues a file to be removed from the dirstate
682
682
683 Meant for use by custom merge drivers."""
683 Meant for use by custom merge drivers."""
684 self._results[f] = 0, ACTION_REMOVE
684 self._results[f] = 0, ACTION_REMOVE
685
685
686 def queueadd(self, f):
686 def queueadd(self, f):
687 """queues a file to be added to the dirstate
687 """queues a file to be added to the dirstate
688
688
689 Meant for use by custom merge drivers."""
689 Meant for use by custom merge drivers."""
690 self._results[f] = 0, ACTION_ADD
690 self._results[f] = 0, ACTION_ADD
691
691
692 def queueget(self, f):
692 def queueget(self, f):
693 """queues a file to be marked modified in the dirstate
693 """queues a file to be marked modified in the dirstate
694
694
695 Meant for use by custom merge drivers."""
695 Meant for use by custom merge drivers."""
696 self._results[f] = 0, ACTION_GET
696 self._results[f] = 0, ACTION_GET
697
697
698 def _getcheckunknownconfig(repo, section, name):
698 def _getcheckunknownconfig(repo, section, name):
699 config = repo.ui.config(section, name)
699 config = repo.ui.config(section, name)
700 valid = ['abort', 'ignore', 'warn']
700 valid = ['abort', 'ignore', 'warn']
701 if config not in valid:
701 if config not in valid:
702 validstr = ', '.join(["'" + v + "'" for v in valid])
702 validstr = ', '.join(["'" + v + "'" for v in valid])
703 raise error.ConfigError(_("%s.%s not valid "
703 raise error.ConfigError(_("%s.%s not valid "
704 "('%s' is none of %s)")
704 "('%s' is none of %s)")
705 % (section, name, config, validstr))
705 % (section, name, config, validstr))
706 return config
706 return config
707
707
708 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
708 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
709 if wctx.isinmemory():
709 if wctx.isinmemory():
710 # Nothing to do in IMM because nothing in the "working copy" can be an
710 # Nothing to do in IMM because nothing in the "working copy" can be an
711 # unknown file.
711 # unknown file.
712 #
712 #
713 # Note that we should bail out here, not in ``_checkunknownfiles()``,
713 # Note that we should bail out here, not in ``_checkunknownfiles()``,
714 # because that function does other useful work.
714 # because that function does other useful work.
715 return False
715 return False
716
716
717 if f2 is None:
717 if f2 is None:
718 f2 = f
718 f2 = f
719 return (repo.wvfs.audit.check(f)
719 return (repo.wvfs.audit.check(f)
720 and repo.wvfs.isfileorlink(f)
720 and repo.wvfs.isfileorlink(f)
721 and repo.dirstate.normalize(f) not in repo.dirstate
721 and repo.dirstate.normalize(f) not in repo.dirstate
722 and mctx[f2].cmp(wctx[f]))
722 and mctx[f2].cmp(wctx[f]))
723
723
724 class _unknowndirschecker(object):
724 class _unknowndirschecker(object):
725 """
725 """
726 Look for any unknown files or directories that may have a path conflict
726 Look for any unknown files or directories that may have a path conflict
727 with a file. If any path prefix of the file exists as a file or link,
727 with a file. If any path prefix of the file exists as a file or link,
728 then it conflicts. If the file itself is a directory that contains any
728 then it conflicts. If the file itself is a directory that contains any
729 file that is not tracked, then it conflicts.
729 file that is not tracked, then it conflicts.
730
730
731 Returns the shortest path at which a conflict occurs, or None if there is
731 Returns the shortest path at which a conflict occurs, or None if there is
732 no conflict.
732 no conflict.
733 """
733 """
734 def __init__(self):
734 def __init__(self):
735 # A set of paths known to be good. This prevents repeated checking of
735 # A set of paths known to be good. This prevents repeated checking of
736 # dirs. It will be updated with any new dirs that are checked and found
736 # dirs. It will be updated with any new dirs that are checked and found
737 # to be safe.
737 # to be safe.
738 self._unknowndircache = set()
738 self._unknowndircache = set()
739
739
740 # A set of paths that are known to be absent. This prevents repeated
740 # A set of paths that are known to be absent. This prevents repeated
741 # checking of subdirectories that are known not to exist. It will be
741 # checking of subdirectories that are known not to exist. It will be
742 # updated with any new dirs that are checked and found to be absent.
742 # updated with any new dirs that are checked and found to be absent.
743 self._missingdircache = set()
743 self._missingdircache = set()
744
744
745 def __call__(self, repo, wctx, f):
745 def __call__(self, repo, wctx, f):
746 if wctx.isinmemory():
746 if wctx.isinmemory():
747 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
747 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
748 return False
748 return False
749
749
750 # Check for path prefixes that exist as unknown files.
750 # Check for path prefixes that exist as unknown files.
751 for p in reversed(list(util.finddirs(f))):
751 for p in reversed(list(util.finddirs(f))):
752 if p in self._missingdircache:
752 if p in self._missingdircache:
753 return
753 return
754 if p in self._unknowndircache:
754 if p in self._unknowndircache:
755 continue
755 continue
756 if repo.wvfs.audit.check(p):
756 if repo.wvfs.audit.check(p):
757 if (repo.wvfs.isfileorlink(p)
757 if (repo.wvfs.isfileorlink(p)
758 and repo.dirstate.normalize(p) not in repo.dirstate):
758 and repo.dirstate.normalize(p) not in repo.dirstate):
759 return p
759 return p
760 if not repo.wvfs.lexists(p):
760 if not repo.wvfs.lexists(p):
761 self._missingdircache.add(p)
761 self._missingdircache.add(p)
762 return
762 return
763 self._unknowndircache.add(p)
763 self._unknowndircache.add(p)
764
764
765 # Check if the file conflicts with a directory containing unknown files.
765 # Check if the file conflicts with a directory containing unknown files.
766 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
766 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
767 # Does the directory contain any files that are not in the dirstate?
767 # Does the directory contain any files that are not in the dirstate?
768 for p, dirs, files in repo.wvfs.walk(f):
768 for p, dirs, files in repo.wvfs.walk(f):
769 for fn in files:
769 for fn in files:
770 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
770 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
771 relf = repo.dirstate.normalize(relf, isknown=True)
771 relf = repo.dirstate.normalize(relf, isknown=True)
772 if relf not in repo.dirstate:
772 if relf not in repo.dirstate:
773 return f
773 return f
774 return None
774 return None
775
775
776 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
776 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
777 """
777 """
778 Considers any actions that care about the presence of conflicting unknown
778 Considers any actions that care about the presence of conflicting unknown
779 files. For some actions, the result is to abort; for others, it is to
779 files. For some actions, the result is to abort; for others, it is to
780 choose a different action.
780 choose a different action.
781 """
781 """
782 fileconflicts = set()
782 fileconflicts = set()
783 pathconflicts = set()
783 pathconflicts = set()
784 warnconflicts = set()
784 warnconflicts = set()
785 abortconflicts = set()
785 abortconflicts = set()
786 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
786 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
787 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
787 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
788 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
788 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
789 if not force:
789 if not force:
790 def collectconflicts(conflicts, config):
790 def collectconflicts(conflicts, config):
791 if config == 'abort':
791 if config == 'abort':
792 abortconflicts.update(conflicts)
792 abortconflicts.update(conflicts)
793 elif config == 'warn':
793 elif config == 'warn':
794 warnconflicts.update(conflicts)
794 warnconflicts.update(conflicts)
795
795
796 checkunknowndirs = _unknowndirschecker()
796 checkunknowndirs = _unknowndirschecker()
797 for f, (m, args, msg) in actions.iteritems():
797 for f, (m, args, msg) in actions.iteritems():
798 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
798 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
799 if _checkunknownfile(repo, wctx, mctx, f):
799 if _checkunknownfile(repo, wctx, mctx, f):
800 fileconflicts.add(f)
800 fileconflicts.add(f)
801 elif pathconfig and f not in wctx:
801 elif pathconfig and f not in wctx:
802 path = checkunknowndirs(repo, wctx, f)
802 path = checkunknowndirs(repo, wctx, f)
803 if path is not None:
803 if path is not None:
804 pathconflicts.add(path)
804 pathconflicts.add(path)
805 elif m == ACTION_LOCAL_DIR_RENAME_GET:
805 elif m == ACTION_LOCAL_DIR_RENAME_GET:
806 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
806 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
807 fileconflicts.add(f)
807 fileconflicts.add(f)
808
808
809 allconflicts = fileconflicts | pathconflicts
809 allconflicts = fileconflicts | pathconflicts
810 ignoredconflicts = set([c for c in allconflicts
810 ignoredconflicts = set([c for c in allconflicts
811 if repo.dirstate._ignore(c)])
811 if repo.dirstate._ignore(c)])
812 unknownconflicts = allconflicts - ignoredconflicts
812 unknownconflicts = allconflicts - ignoredconflicts
813 collectconflicts(ignoredconflicts, ignoredconfig)
813 collectconflicts(ignoredconflicts, ignoredconfig)
814 collectconflicts(unknownconflicts, unknownconfig)
814 collectconflicts(unknownconflicts, unknownconfig)
815 else:
815 else:
816 for f, (m, args, msg) in actions.iteritems():
816 for f, (m, args, msg) in actions.iteritems():
817 if m == ACTION_CREATED_MERGE:
817 if m == ACTION_CREATED_MERGE:
818 fl2, anc = args
818 fl2, anc = args
819 different = _checkunknownfile(repo, wctx, mctx, f)
819 different = _checkunknownfile(repo, wctx, mctx, f)
820 if repo.dirstate._ignore(f):
820 if repo.dirstate._ignore(f):
821 config = ignoredconfig
821 config = ignoredconfig
822 else:
822 else:
823 config = unknownconfig
823 config = unknownconfig
824
824
825 # The behavior when force is True is described by this table:
825 # The behavior when force is True is described by this table:
826 # config different mergeforce | action backup
826 # config different mergeforce | action backup
827 # * n * | get n
827 # * n * | get n
828 # * y y | merge -
828 # * y y | merge -
829 # abort y n | merge - (1)
829 # abort y n | merge - (1)
830 # warn y n | warn + get y
830 # warn y n | warn + get y
831 # ignore y n | get y
831 # ignore y n | get y
832 #
832 #
833 # (1) this is probably the wrong behavior here -- we should
833 # (1) this is probably the wrong behavior here -- we should
834 # probably abort, but some actions like rebases currently
834 # probably abort, but some actions like rebases currently
835 # don't like an abort happening in the middle of
835 # don't like an abort happening in the middle of
836 # merge.update.
836 # merge.update.
837 if not different:
837 if not different:
838 actions[f] = (ACTION_GET, (fl2, False), 'remote created')
838 actions[f] = (ACTION_GET, (fl2, False), 'remote created')
839 elif mergeforce or config == 'abort':
839 elif mergeforce or config == 'abort':
840 actions[f] = (ACTION_MERGE, (f, f, None, False, anc),
840 actions[f] = (ACTION_MERGE, (f, f, None, False, anc),
841 'remote differs from untracked local')
841 'remote differs from untracked local')
842 elif config == 'abort':
842 elif config == 'abort':
843 abortconflicts.add(f)
843 abortconflicts.add(f)
844 else:
844 else:
845 if config == 'warn':
845 if config == 'warn':
846 warnconflicts.add(f)
846 warnconflicts.add(f)
847 actions[f] = (ACTION_GET, (fl2, True), 'remote created')
847 actions[f] = (ACTION_GET, (fl2, True), 'remote created')
848
848
849 for f in sorted(abortconflicts):
849 for f in sorted(abortconflicts):
850 warn = repo.ui.warn
850 warn = repo.ui.warn
851 if f in pathconflicts:
851 if f in pathconflicts:
852 if repo.wvfs.isfileorlink(f):
852 if repo.wvfs.isfileorlink(f):
853 warn(_("%s: untracked file conflicts with directory\n") % f)
853 warn(_("%s: untracked file conflicts with directory\n") % f)
854 else:
854 else:
855 warn(_("%s: untracked directory conflicts with file\n") % f)
855 warn(_("%s: untracked directory conflicts with file\n") % f)
856 else:
856 else:
857 warn(_("%s: untracked file differs\n") % f)
857 warn(_("%s: untracked file differs\n") % f)
858 if abortconflicts:
858 if abortconflicts:
859 raise error.Abort(_("untracked files in working directory "
859 raise error.Abort(_("untracked files in working directory "
860 "differ from files in requested revision"))
860 "differ from files in requested revision"))
861
861
862 for f in sorted(warnconflicts):
862 for f in sorted(warnconflicts):
863 if repo.wvfs.isfileorlink(f):
863 if repo.wvfs.isfileorlink(f):
864 repo.ui.warn(_("%s: replacing untracked file\n") % f)
864 repo.ui.warn(_("%s: replacing untracked file\n") % f)
865 else:
865 else:
866 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
866 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
867
867
868 for f, (m, args, msg) in actions.iteritems():
868 for f, (m, args, msg) in actions.iteritems():
869 if m == ACTION_CREATED:
869 if m == ACTION_CREATED:
870 backup = (f in fileconflicts or f in pathconflicts or
870 backup = (f in fileconflicts or f in pathconflicts or
871 any(p in pathconflicts for p in util.finddirs(f)))
871 any(p in pathconflicts for p in util.finddirs(f)))
872 flags, = args
872 flags, = args
873 actions[f] = (ACTION_GET, (flags, backup), msg)
873 actions[f] = (ACTION_GET, (flags, backup), msg)
874
874
875 def _forgetremoved(wctx, mctx, branchmerge):
875 def _forgetremoved(wctx, mctx, branchmerge):
876 """
876 """
877 Forget removed files
877 Forget removed files
878
878
879 If we're jumping between revisions (as opposed to merging), and if
879 If we're jumping between revisions (as opposed to merging), and if
880 neither the working directory nor the target rev has the file,
880 neither the working directory nor the target rev has the file,
881 then we need to remove it from the dirstate, to prevent the
881 then we need to remove it from the dirstate, to prevent the
882 dirstate from listing the file when it is no longer in the
882 dirstate from listing the file when it is no longer in the
883 manifest.
883 manifest.
884
884
885 If we're merging, and the other revision has removed a file
885 If we're merging, and the other revision has removed a file
886 that is not present in the working directory, we need to mark it
886 that is not present in the working directory, we need to mark it
887 as removed.
887 as removed.
888 """
888 """
889
889
890 actions = {}
890 actions = {}
891 m = ACTION_FORGET
891 m = ACTION_FORGET
892 if branchmerge:
892 if branchmerge:
893 m = ACTION_REMOVE
893 m = ACTION_REMOVE
894 for f in wctx.deleted():
894 for f in wctx.deleted():
895 if f not in mctx:
895 if f not in mctx:
896 actions[f] = m, None, "forget deleted"
896 actions[f] = m, None, "forget deleted"
897
897
898 if not branchmerge:
898 if not branchmerge:
899 for f in wctx.removed():
899 for f in wctx.removed():
900 if f not in mctx:
900 if f not in mctx:
901 actions[f] = ACTION_FORGET, None, "forget removed"
901 actions[f] = ACTION_FORGET, None, "forget removed"
902
902
903 return actions
903 return actions
904
904
905 def _checkcollision(repo, wmf, actions):
905 def _checkcollision(repo, wmf, actions):
906 """
906 """
907 Check for case-folding collisions.
907 Check for case-folding collisions.
908 """
908 """
909
909
910 # If the repo is narrowed, filter out files outside the narrowspec.
910 # If the repo is narrowed, filter out files outside the narrowspec.
911 narrowmatch = repo.narrowmatch()
911 narrowmatch = repo.narrowmatch()
912 if not narrowmatch.always():
912 if not narrowmatch.always():
913 wmf = wmf.matches(narrowmatch)
913 wmf = wmf.matches(narrowmatch)
914 if actions:
914 if actions:
915 narrowactions = {}
915 narrowactions = {}
916 for m, actionsfortype in actions.iteritems():
916 for m, actionsfortype in actions.iteritems():
917 narrowactions[m] = []
917 narrowactions[m] = []
918 for (f, args, msg) in actionsfortype:
918 for (f, args, msg) in actionsfortype:
919 if narrowmatch(f):
919 if narrowmatch(f):
920 narrowactions[m].append((f, args, msg))
920 narrowactions[m].append((f, args, msg))
921 actions = narrowactions
921 actions = narrowactions
922
922
923 # build provisional merged manifest up
923 # build provisional merged manifest up
924 pmmf = set(wmf)
924 pmmf = set(wmf)
925
925
926 if actions:
926 if actions:
927 # KEEP and EXEC are no-op
927 # KEEP and EXEC are no-op
928 for m in (ACTION_ADD, ACTION_ADD_MODIFIED, ACTION_FORGET, ACTION_GET,
928 for m in (ACTION_ADD, ACTION_ADD_MODIFIED, ACTION_FORGET, ACTION_GET,
929 ACTION_CHANGED_DELETED, ACTION_DELETED_CHANGED):
929 ACTION_CHANGED_DELETED, ACTION_DELETED_CHANGED):
930 for f, args, msg in actions[m]:
930 for f, args, msg in actions[m]:
931 pmmf.add(f)
931 pmmf.add(f)
932 for f, args, msg in actions[ACTION_REMOVE]:
932 for f, args, msg in actions[ACTION_REMOVE]:
933 pmmf.discard(f)
933 pmmf.discard(f)
934 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
934 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
935 f2, flags = args
935 f2, flags = args
936 pmmf.discard(f2)
936 pmmf.discard(f2)
937 pmmf.add(f)
937 pmmf.add(f)
938 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
938 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
939 pmmf.add(f)
939 pmmf.add(f)
940 for f, args, msg in actions[ACTION_MERGE]:
940 for f, args, msg in actions[ACTION_MERGE]:
941 f1, f2, fa, move, anc = args
941 f1, f2, fa, move, anc = args
942 if move:
942 if move:
943 pmmf.discard(f1)
943 pmmf.discard(f1)
944 pmmf.add(f)
944 pmmf.add(f)
945
945
946 # check case-folding collision in provisional merged manifest
946 # check case-folding collision in provisional merged manifest
947 foldmap = {}
947 foldmap = {}
948 for f in pmmf:
948 for f in pmmf:
949 fold = util.normcase(f)
949 fold = util.normcase(f)
950 if fold in foldmap:
950 if fold in foldmap:
951 raise error.Abort(_("case-folding collision between %s and %s")
951 raise error.Abort(_("case-folding collision between %s and %s")
952 % (f, foldmap[fold]))
952 % (f, foldmap[fold]))
953 foldmap[fold] = f
953 foldmap[fold] = f
954
954
955 # check case-folding of directories
955 # check case-folding of directories
956 foldprefix = unfoldprefix = lastfull = ''
956 foldprefix = unfoldprefix = lastfull = ''
957 for fold, f in sorted(foldmap.items()):
957 for fold, f in sorted(foldmap.items()):
958 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
958 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
959 # the folded prefix matches but actual casing is different
959 # the folded prefix matches but actual casing is different
960 raise error.Abort(_("case-folding collision between "
960 raise error.Abort(_("case-folding collision between "
961 "%s and directory of %s") % (lastfull, f))
961 "%s and directory of %s") % (lastfull, f))
962 foldprefix = fold + '/'
962 foldprefix = fold + '/'
963 unfoldprefix = f + '/'
963 unfoldprefix = f + '/'
964 lastfull = f
964 lastfull = f
965
965
966 def driverpreprocess(repo, ms, wctx, labels=None):
966 def driverpreprocess(repo, ms, wctx, labels=None):
967 """run the preprocess step of the merge driver, if any
967 """run the preprocess step of the merge driver, if any
968
968
969 This is currently not implemented -- it's an extension point."""
969 This is currently not implemented -- it's an extension point."""
970 return True
970 return True
971
971
972 def driverconclude(repo, ms, wctx, labels=None):
972 def driverconclude(repo, ms, wctx, labels=None):
973 """run the conclude step of the merge driver, if any
973 """run the conclude step of the merge driver, if any
974
974
975 This is currently not implemented -- it's an extension point."""
975 This is currently not implemented -- it's an extension point."""
976 return True
976 return True
977
977
978 def _filesindirs(repo, manifest, dirs):
978 def _filesindirs(repo, manifest, dirs):
979 """
979 """
980 Generator that yields pairs of all the files in the manifest that are found
980 Generator that yields pairs of all the files in the manifest that are found
981 inside the directories listed in dirs, and which directory they are found
981 inside the directories listed in dirs, and which directory they are found
982 in.
982 in.
983 """
983 """
984 for f in manifest:
984 for f in manifest:
985 for p in util.finddirs(f):
985 for p in util.finddirs(f):
986 if p in dirs:
986 if p in dirs:
987 yield f, p
987 yield f, p
988 break
988 break
989
989
990 def checkpathconflicts(repo, wctx, mctx, actions):
990 def checkpathconflicts(repo, wctx, mctx, actions):
991 """
991 """
992 Check if any actions introduce path conflicts in the repository, updating
992 Check if any actions introduce path conflicts in the repository, updating
993 actions to record or handle the path conflict accordingly.
993 actions to record or handle the path conflict accordingly.
994 """
994 """
995 mf = wctx.manifest()
995 mf = wctx.manifest()
996
996
997 # The set of local files that conflict with a remote directory.
997 # The set of local files that conflict with a remote directory.
998 localconflicts = set()
998 localconflicts = set()
999
999
1000 # The set of directories that conflict with a remote file, and so may cause
1000 # The set of directories that conflict with a remote file, and so may cause
1001 # conflicts if they still contain any files after the merge.
1001 # conflicts if they still contain any files after the merge.
1002 remoteconflicts = set()
1002 remoteconflicts = set()
1003
1003
1004 # The set of directories that appear as both a file and a directory in the
1004 # The set of directories that appear as both a file and a directory in the
1005 # remote manifest. These indicate an invalid remote manifest, which
1005 # remote manifest. These indicate an invalid remote manifest, which
1006 # can't be updated to cleanly.
1006 # can't be updated to cleanly.
1007 invalidconflicts = set()
1007 invalidconflicts = set()
1008
1008
1009 # The set of directories that contain files that are being created.
1009 # The set of directories that contain files that are being created.
1010 createdfiledirs = set()
1010 createdfiledirs = set()
1011
1011
1012 # The set of files deleted by all the actions.
1012 # The set of files deleted by all the actions.
1013 deletedfiles = set()
1013 deletedfiles = set()
1014
1014
1015 for f, (m, args, msg) in actions.items():
1015 for f, (m, args, msg) in actions.items():
1016 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED, ACTION_MERGE,
1016 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED, ACTION_MERGE,
1017 ACTION_CREATED_MERGE):
1017 ACTION_CREATED_MERGE):
1018 # This action may create a new local file.
1018 # This action may create a new local file.
1019 createdfiledirs.update(util.finddirs(f))
1019 createdfiledirs.update(util.finddirs(f))
1020 if mf.hasdir(f):
1020 if mf.hasdir(f):
1021 # The file aliases a local directory. This might be ok if all
1021 # The file aliases a local directory. This might be ok if all
1022 # the files in the local directory are being deleted. This
1022 # the files in the local directory are being deleted. This
1023 # will be checked once we know what all the deleted files are.
1023 # will be checked once we know what all the deleted files are.
1024 remoteconflicts.add(f)
1024 remoteconflicts.add(f)
1025 # Track the names of all deleted files.
1025 # Track the names of all deleted files.
1026 if m == ACTION_REMOVE:
1026 if m == ACTION_REMOVE:
1027 deletedfiles.add(f)
1027 deletedfiles.add(f)
1028 if m == ACTION_MERGE:
1028 if m == ACTION_MERGE:
1029 f1, f2, fa, move, anc = args
1029 f1, f2, fa, move, anc = args
1030 if move:
1030 if move:
1031 deletedfiles.add(f1)
1031 deletedfiles.add(f1)
1032 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1032 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1033 f2, flags = args
1033 f2, flags = args
1034 deletedfiles.add(f2)
1034 deletedfiles.add(f2)
1035
1035
1036 # Check all directories that contain created files for path conflicts.
1036 # Check all directories that contain created files for path conflicts.
1037 for p in createdfiledirs:
1037 for p in createdfiledirs:
1038 if p in mf:
1038 if p in mf:
1039 if p in mctx:
1039 if p in mctx:
1040 # A file is in a directory which aliases both a local
1040 # A file is in a directory which aliases both a local
1041 # and a remote file. This is an internal inconsistency
1041 # and a remote file. This is an internal inconsistency
1042 # within the remote manifest.
1042 # within the remote manifest.
1043 invalidconflicts.add(p)
1043 invalidconflicts.add(p)
1044 else:
1044 else:
1045 # A file is in a directory which aliases a local file.
1045 # A file is in a directory which aliases a local file.
1046 # We will need to rename the local file.
1046 # We will need to rename the local file.
1047 localconflicts.add(p)
1047 localconflicts.add(p)
1048 if p in actions and actions[p][0] in (ACTION_CREATED,
1048 if p in actions and actions[p][0] in (ACTION_CREATED,
1049 ACTION_DELETED_CHANGED,
1049 ACTION_DELETED_CHANGED,
1050 ACTION_MERGE,
1050 ACTION_MERGE,
1051 ACTION_CREATED_MERGE):
1051 ACTION_CREATED_MERGE):
1052 # The file is in a directory which aliases a remote file.
1052 # The file is in a directory which aliases a remote file.
1053 # This is an internal inconsistency within the remote
1053 # This is an internal inconsistency within the remote
1054 # manifest.
1054 # manifest.
1055 invalidconflicts.add(p)
1055 invalidconflicts.add(p)
1056
1056
1057 # Rename all local conflicting files that have not been deleted.
1057 # Rename all local conflicting files that have not been deleted.
1058 for p in localconflicts:
1058 for p in localconflicts:
1059 if p not in deletedfiles:
1059 if p not in deletedfiles:
1060 ctxname = bytes(wctx).rstrip('+')
1060 ctxname = bytes(wctx).rstrip('+')
1061 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1061 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1062 actions[pnew] = (ACTION_PATH_CONFLICT_RESOLVE, (p,),
1062 actions[pnew] = (ACTION_PATH_CONFLICT_RESOLVE, (p,),
1063 'local path conflict')
1063 'local path conflict')
1064 actions[p] = (ACTION_PATH_CONFLICT, (pnew, 'l'),
1064 actions[p] = (ACTION_PATH_CONFLICT, (pnew, 'l'),
1065 'path conflict')
1065 'path conflict')
1066
1066
1067 if remoteconflicts:
1067 if remoteconflicts:
1068 # Check if all files in the conflicting directories have been removed.
1068 # Check if all files in the conflicting directories have been removed.
1069 ctxname = bytes(mctx).rstrip('+')
1069 ctxname = bytes(mctx).rstrip('+')
1070 for f, p in _filesindirs(repo, mf, remoteconflicts):
1070 for f, p in _filesindirs(repo, mf, remoteconflicts):
1071 if f not in deletedfiles:
1071 if f not in deletedfiles:
1072 m, args, msg = actions[p]
1072 m, args, msg = actions[p]
1073 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1073 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1074 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1074 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1075 # Action was merge, just update target.
1075 # Action was merge, just update target.
1076 actions[pnew] = (m, args, msg)
1076 actions[pnew] = (m, args, msg)
1077 else:
1077 else:
1078 # Action was create, change to renamed get action.
1078 # Action was create, change to renamed get action.
1079 fl = args[0]
1079 fl = args[0]
1080 actions[pnew] = (ACTION_LOCAL_DIR_RENAME_GET, (p, fl),
1080 actions[pnew] = (ACTION_LOCAL_DIR_RENAME_GET, (p, fl),
1081 'remote path conflict')
1081 'remote path conflict')
1082 actions[p] = (ACTION_PATH_CONFLICT, (pnew, ACTION_REMOVE),
1082 actions[p] = (ACTION_PATH_CONFLICT, (pnew, ACTION_REMOVE),
1083 'path conflict')
1083 'path conflict')
1084 remoteconflicts.remove(p)
1084 remoteconflicts.remove(p)
1085 break
1085 break
1086
1086
1087 if invalidconflicts:
1087 if invalidconflicts:
1088 for p in invalidconflicts:
1088 for p in invalidconflicts:
1089 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1089 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1090 raise error.Abort(_("destination manifest contains path conflicts"))
1090 raise error.Abort(_("destination manifest contains path conflicts"))
1091
1091
1092 def _filternarrowactions(narrowmatch, branchmerge, actions):
1092 def _filternarrowactions(narrowmatch, branchmerge, actions):
1093 """
1093 """
1094 Filters out actions that can ignored because the repo is narrowed.
1094 Filters out actions that can ignored because the repo is narrowed.
1095
1095
1096 Raise an exception if the merge cannot be completed because the repo is
1096 Raise an exception if the merge cannot be completed because the repo is
1097 narrowed.
1097 narrowed.
1098 """
1098 """
1099 nooptypes = set(['k']) # TODO: handle with nonconflicttypes
1099 nooptypes = set(['k']) # TODO: handle with nonconflicttypes
1100 nonconflicttypes = set('a am c cm f g r e'.split())
1100 nonconflicttypes = set('a am c cm f g r e'.split())
1101 # We mutate the items in the dict during iteration, so iterate
1101 # We mutate the items in the dict during iteration, so iterate
1102 # over a copy.
1102 # over a copy.
1103 for f, action in list(actions.items()):
1103 for f, action in list(actions.items()):
1104 if narrowmatch(f):
1104 if narrowmatch(f):
1105 pass
1105 pass
1106 elif not branchmerge:
1106 elif not branchmerge:
1107 del actions[f] # just updating, ignore changes outside clone
1107 del actions[f] # just updating, ignore changes outside clone
1108 elif action[0] in nooptypes:
1108 elif action[0] in nooptypes:
1109 del actions[f] # merge does not affect file
1109 del actions[f] # merge does not affect file
1110 elif action[0] in nonconflicttypes:
1110 elif action[0] in nonconflicttypes:
1111 raise error.Abort(_('merge affects file \'%s\' outside narrow, '
1111 raise error.Abort(_('merge affects file \'%s\' outside narrow, '
1112 'which is not yet supported') % f,
1112 'which is not yet supported') % f,
1113 hint=_('merging in the other direction '
1113 hint=_('merging in the other direction '
1114 'may work'))
1114 'may work'))
1115 else:
1115 else:
1116 raise error.Abort(_('conflict in file \'%s\' is outside '
1116 raise error.Abort(_('conflict in file \'%s\' is outside '
1117 'narrow clone') % f)
1117 'narrow clone') % f)
1118
1118
1119 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1119 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1120 acceptremote, followcopies, forcefulldiff=False):
1120 acceptremote, followcopies, forcefulldiff=False):
1121 """
1121 """
1122 Merge wctx and p2 with ancestor pa and generate merge action list
1122 Merge wctx and p2 with ancestor pa and generate merge action list
1123
1123
1124 branchmerge and force are as passed in to update
1124 branchmerge and force are as passed in to update
1125 matcher = matcher to filter file lists
1125 matcher = matcher to filter file lists
1126 acceptremote = accept the incoming changes without prompting
1126 acceptremote = accept the incoming changes without prompting
1127 """
1127 """
1128 if matcher is not None and matcher.always():
1128 if matcher is not None and matcher.always():
1129 matcher = None
1129 matcher = None
1130
1130
1131 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1131 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1132
1132
1133 # manifests fetched in order are going to be faster, so prime the caches
1133 # manifests fetched in order are going to be faster, so prime the caches
1134 [x.manifest() for x in
1134 [x.manifest() for x in
1135 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1135 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1136
1136
1137 if followcopies:
1137 if followcopies:
1138 ret = copies.mergecopies(repo, wctx, p2, pa)
1138 ret = copies.mergecopies(repo, wctx, p2, pa)
1139 copy, movewithdir, diverge, renamedelete, dirmove = ret
1139 copy, movewithdir, diverge, renamedelete, dirmove = ret
1140
1140
1141 boolbm = pycompat.bytestr(bool(branchmerge))
1141 boolbm = pycompat.bytestr(bool(branchmerge))
1142 boolf = pycompat.bytestr(bool(force))
1142 boolf = pycompat.bytestr(bool(force))
1143 boolm = pycompat.bytestr(bool(matcher))
1143 boolm = pycompat.bytestr(bool(matcher))
1144 repo.ui.note(_("resolving manifests\n"))
1144 repo.ui.note(_("resolving manifests\n"))
1145 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1145 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1146 % (boolbm, boolf, boolm))
1146 % (boolbm, boolf, boolm))
1147 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1147 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1148
1148
1149 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1149 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1150 copied = set(copy.values())
1150 copied = set(copy.values())
1151 copied.update(movewithdir.values())
1151 copied.update(movewithdir.values())
1152
1152
1153 if '.hgsubstate' in m1:
1153 if '.hgsubstate' in m1:
1154 # check whether sub state is modified
1154 # check whether sub state is modified
1155 if any(wctx.sub(s).dirty() for s in wctx.substate):
1155 if any(wctx.sub(s).dirty() for s in wctx.substate):
1156 m1['.hgsubstate'] = modifiednodeid
1156 m1['.hgsubstate'] = modifiednodeid
1157
1157
1158 # Don't use m2-vs-ma optimization if:
1158 # Don't use m2-vs-ma optimization if:
1159 # - ma is the same as m1 or m2, which we're just going to diff again later
1159 # - ma is the same as m1 or m2, which we're just going to diff again later
1160 # - The caller specifically asks for a full diff, which is useful during bid
1160 # - The caller specifically asks for a full diff, which is useful during bid
1161 # merge.
1161 # merge.
1162 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1162 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1163 # Identify which files are relevant to the merge, so we can limit the
1163 # Identify which files are relevant to the merge, so we can limit the
1164 # total m1-vs-m2 diff to just those files. This has significant
1164 # total m1-vs-m2 diff to just those files. This has significant
1165 # performance benefits in large repositories.
1165 # performance benefits in large repositories.
1166 relevantfiles = set(ma.diff(m2).keys())
1166 relevantfiles = set(ma.diff(m2).keys())
1167
1167
1168 # For copied and moved files, we need to add the source file too.
1168 # For copied and moved files, we need to add the source file too.
1169 for copykey, copyvalue in copy.iteritems():
1169 for copykey, copyvalue in copy.iteritems():
1170 if copyvalue in relevantfiles:
1170 if copyvalue in relevantfiles:
1171 relevantfiles.add(copykey)
1171 relevantfiles.add(copykey)
1172 for movedirkey in movewithdir:
1172 for movedirkey in movewithdir:
1173 relevantfiles.add(movedirkey)
1173 relevantfiles.add(movedirkey)
1174 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1174 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1175 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1175 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1176
1176
1177 diff = m1.diff(m2, match=matcher)
1177 diff = m1.diff(m2, match=matcher)
1178
1178
1179 if matcher is None:
1179 if matcher is None:
1180 matcher = matchmod.always('', '')
1180 matcher = matchmod.always('', '')
1181
1181
1182 actions = {}
1182 actions = {}
1183 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1183 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1184 if n1 and n2: # file exists on both local and remote side
1184 if n1 and n2: # file exists on both local and remote side
1185 if f not in ma:
1185 if f not in ma:
1186 fa = copy.get(f, None)
1186 fa = copy.get(f, None)
1187 if fa is not None:
1187 if fa is not None:
1188 actions[f] = (ACTION_MERGE, (f, f, fa, False, pa.node()),
1188 actions[f] = (ACTION_MERGE, (f, f, fa, False, pa.node()),
1189 'both renamed from %s' % fa)
1189 'both renamed from %s' % fa)
1190 else:
1190 else:
1191 actions[f] = (ACTION_MERGE, (f, f, None, False, pa.node()),
1191 actions[f] = (ACTION_MERGE, (f, f, None, False, pa.node()),
1192 'both created')
1192 'both created')
1193 else:
1193 else:
1194 a = ma[f]
1194 a = ma[f]
1195 fla = ma.flags(f)
1195 fla = ma.flags(f)
1196 nol = 'l' not in fl1 + fl2 + fla
1196 nol = 'l' not in fl1 + fl2 + fla
1197 if n2 == a and fl2 == fla:
1197 if n2 == a and fl2 == fla:
1198 actions[f] = (ACTION_KEEP, (), 'remote unchanged')
1198 actions[f] = (ACTION_KEEP, (), 'remote unchanged')
1199 elif n1 == a and fl1 == fla: # local unchanged - use remote
1199 elif n1 == a and fl1 == fla: # local unchanged - use remote
1200 if n1 == n2: # optimization: keep local content
1200 if n1 == n2: # optimization: keep local content
1201 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1201 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1202 else:
1202 else:
1203 actions[f] = (ACTION_GET, (fl2, False),
1203 actions[f] = (ACTION_GET, (fl2, False),
1204 'remote is newer')
1204 'remote is newer')
1205 elif nol and n2 == a: # remote only changed 'x'
1205 elif nol and n2 == a: # remote only changed 'x'
1206 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1206 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1207 elif nol and n1 == a: # local only changed 'x'
1207 elif nol and n1 == a: # local only changed 'x'
1208 actions[f] = (ACTION_GET, (fl1, False), 'remote is newer')
1208 actions[f] = (ACTION_GET, (fl1, False), 'remote is newer')
1209 else: # both changed something
1209 else: # both changed something
1210 actions[f] = (ACTION_MERGE, (f, f, f, False, pa.node()),
1210 actions[f] = (ACTION_MERGE, (f, f, f, False, pa.node()),
1211 'versions differ')
1211 'versions differ')
1212 elif n1: # file exists only on local side
1212 elif n1: # file exists only on local side
1213 if f in copied:
1213 if f in copied:
1214 pass # we'll deal with it on m2 side
1214 pass # we'll deal with it on m2 side
1215 elif f in movewithdir: # directory rename, move local
1215 elif f in movewithdir: # directory rename, move local
1216 f2 = movewithdir[f]
1216 f2 = movewithdir[f]
1217 if f2 in m2:
1217 if f2 in m2:
1218 actions[f2] = (ACTION_MERGE, (f, f2, None, True, pa.node()),
1218 actions[f2] = (ACTION_MERGE, (f, f2, None, True, pa.node()),
1219 'remote directory rename, both created')
1219 'remote directory rename, both created')
1220 else:
1220 else:
1221 actions[f2] = (ACTION_DIR_RENAME_MOVE_LOCAL, (f, fl1),
1221 actions[f2] = (ACTION_DIR_RENAME_MOVE_LOCAL, (f, fl1),
1222 'remote directory rename - move from %s' % f)
1222 'remote directory rename - move from %s' % f)
1223 elif f in copy:
1223 elif f in copy:
1224 f2 = copy[f]
1224 f2 = copy[f]
1225 actions[f] = (ACTION_MERGE, (f, f2, f2, False, pa.node()),
1225 actions[f] = (ACTION_MERGE, (f, f2, f2, False, pa.node()),
1226 'local copied/moved from %s' % f2)
1226 'local copied/moved from %s' % f2)
1227 elif f in ma: # clean, a different, no remote
1227 elif f in ma: # clean, a different, no remote
1228 if n1 != ma[f]:
1228 if n1 != ma[f]:
1229 if acceptremote:
1229 if acceptremote:
1230 actions[f] = (ACTION_REMOVE, None, 'remote delete')
1230 actions[f] = (ACTION_REMOVE, None, 'remote delete')
1231 else:
1231 else:
1232 actions[f] = (ACTION_CHANGED_DELETED,
1232 actions[f] = (ACTION_CHANGED_DELETED,
1233 (f, None, f, False, pa.node()),
1233 (f, None, f, False, pa.node()),
1234 'prompt changed/deleted')
1234 'prompt changed/deleted')
1235 elif n1 == addednodeid:
1235 elif n1 == addednodeid:
1236 # This extra 'a' is added by working copy manifest to mark
1236 # This extra 'a' is added by working copy manifest to mark
1237 # the file as locally added. We should forget it instead of
1237 # the file as locally added. We should forget it instead of
1238 # deleting it.
1238 # deleting it.
1239 actions[f] = (ACTION_FORGET, None, 'remote deleted')
1239 actions[f] = (ACTION_FORGET, None, 'remote deleted')
1240 else:
1240 else:
1241 actions[f] = (ACTION_REMOVE, None, 'other deleted')
1241 actions[f] = (ACTION_REMOVE, None, 'other deleted')
1242 elif n2: # file exists only on remote side
1242 elif n2: # file exists only on remote side
1243 if f in copied:
1243 if f in copied:
1244 pass # we'll deal with it on m1 side
1244 pass # we'll deal with it on m1 side
1245 elif f in movewithdir:
1245 elif f in movewithdir:
1246 f2 = movewithdir[f]
1246 f2 = movewithdir[f]
1247 if f2 in m1:
1247 if f2 in m1:
1248 actions[f2] = (ACTION_MERGE,
1248 actions[f2] = (ACTION_MERGE,
1249 (f2, f, None, False, pa.node()),
1249 (f2, f, None, False, pa.node()),
1250 'local directory rename, both created')
1250 'local directory rename, both created')
1251 else:
1251 else:
1252 actions[f2] = (ACTION_LOCAL_DIR_RENAME_GET, (f, fl2),
1252 actions[f2] = (ACTION_LOCAL_DIR_RENAME_GET, (f, fl2),
1253 'local directory rename - get from %s' % f)
1253 'local directory rename - get from %s' % f)
1254 elif f in copy:
1254 elif f in copy:
1255 f2 = copy[f]
1255 f2 = copy[f]
1256 if f2 in m2:
1256 if f2 in m2:
1257 actions[f] = (ACTION_MERGE, (f2, f, f2, False, pa.node()),
1257 actions[f] = (ACTION_MERGE, (f2, f, f2, False, pa.node()),
1258 'remote copied from %s' % f2)
1258 'remote copied from %s' % f2)
1259 else:
1259 else:
1260 actions[f] = (ACTION_MERGE, (f2, f, f2, True, pa.node()),
1260 actions[f] = (ACTION_MERGE, (f2, f, f2, True, pa.node()),
1261 'remote moved from %s' % f2)
1261 'remote moved from %s' % f2)
1262 elif f not in ma:
1262 elif f not in ma:
1263 # local unknown, remote created: the logic is described by the
1263 # local unknown, remote created: the logic is described by the
1264 # following table:
1264 # following table:
1265 #
1265 #
1266 # force branchmerge different | action
1266 # force branchmerge different | action
1267 # n * * | create
1267 # n * * | create
1268 # y n * | create
1268 # y n * | create
1269 # y y n | create
1269 # y y n | create
1270 # y y y | merge
1270 # y y y | merge
1271 #
1271 #
1272 # Checking whether the files are different is expensive, so we
1272 # Checking whether the files are different is expensive, so we
1273 # don't do that when we can avoid it.
1273 # don't do that when we can avoid it.
1274 if not force:
1274 if not force:
1275 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1275 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1276 elif not branchmerge:
1276 elif not branchmerge:
1277 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1277 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1278 else:
1278 else:
1279 actions[f] = (ACTION_CREATED_MERGE, (fl2, pa.node()),
1279 actions[f] = (ACTION_CREATED_MERGE, (fl2, pa.node()),
1280 'remote created, get or merge')
1280 'remote created, get or merge')
1281 elif n2 != ma[f]:
1281 elif n2 != ma[f]:
1282 df = None
1282 df = None
1283 for d in dirmove:
1283 for d in dirmove:
1284 if f.startswith(d):
1284 if f.startswith(d):
1285 # new file added in a directory that was moved
1285 # new file added in a directory that was moved
1286 df = dirmove[d] + f[len(d):]
1286 df = dirmove[d] + f[len(d):]
1287 break
1287 break
1288 if df is not None and df in m1:
1288 if df is not None and df in m1:
1289 actions[df] = (ACTION_MERGE, (df, f, f, False, pa.node()),
1289 actions[df] = (ACTION_MERGE, (df, f, f, False, pa.node()),
1290 'local directory rename - respect move '
1290 'local directory rename - respect move '
1291 'from %s' % f)
1291 'from %s' % f)
1292 elif acceptremote:
1292 elif acceptremote:
1293 actions[f] = (ACTION_CREATED, (fl2,), 'remote recreating')
1293 actions[f] = (ACTION_CREATED, (fl2,), 'remote recreating')
1294 else:
1294 else:
1295 actions[f] = (ACTION_DELETED_CHANGED,
1295 actions[f] = (ACTION_DELETED_CHANGED,
1296 (None, f, f, False, pa.node()),
1296 (None, f, f, False, pa.node()),
1297 'prompt deleted/changed')
1297 'prompt deleted/changed')
1298
1298
1299 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1299 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1300 # If we are merging, look for path conflicts.
1300 # If we are merging, look for path conflicts.
1301 checkpathconflicts(repo, wctx, p2, actions)
1301 checkpathconflicts(repo, wctx, p2, actions)
1302
1302
1303 narrowmatch = repo.narrowmatch()
1303 narrowmatch = repo.narrowmatch()
1304 if not narrowmatch.always():
1304 if not narrowmatch.always():
1305 # Updates "actions" in place
1305 # Updates "actions" in place
1306 _filternarrowactions(narrowmatch, branchmerge, actions)
1306 _filternarrowactions(narrowmatch, branchmerge, actions)
1307
1307
1308 return actions, diverge, renamedelete
1308 return actions, diverge, renamedelete
1309
1309
1310 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1310 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1311 """Resolves false conflicts where the nodeid changed but the content
1311 """Resolves false conflicts where the nodeid changed but the content
1312 remained the same."""
1312 remained the same."""
1313 # We force a copy of actions.items() because we're going to mutate
1313 # We force a copy of actions.items() because we're going to mutate
1314 # actions as we resolve trivial conflicts.
1314 # actions as we resolve trivial conflicts.
1315 for f, (m, args, msg) in list(actions.items()):
1315 for f, (m, args, msg) in list(actions.items()):
1316 if (m == ACTION_CHANGED_DELETED and f in ancestor
1316 if (m == ACTION_CHANGED_DELETED and f in ancestor
1317 and not wctx[f].cmp(ancestor[f])):
1317 and not wctx[f].cmp(ancestor[f])):
1318 # local did change but ended up with same content
1318 # local did change but ended up with same content
1319 actions[f] = ACTION_REMOVE, None, 'prompt same'
1319 actions[f] = ACTION_REMOVE, None, 'prompt same'
1320 elif (m == ACTION_DELETED_CHANGED and f in ancestor
1320 elif (m == ACTION_DELETED_CHANGED and f in ancestor
1321 and not mctx[f].cmp(ancestor[f])):
1321 and not mctx[f].cmp(ancestor[f])):
1322 # remote did change but ended up with same content
1322 # remote did change but ended up with same content
1323 del actions[f] # don't get = keep local deleted
1323 del actions[f] # don't get = keep local deleted
1324
1324
1325 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1325 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1326 acceptremote, followcopies, matcher=None,
1326 acceptremote, followcopies, matcher=None,
1327 mergeforce=False):
1327 mergeforce=False):
1328 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1328 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1329 # Avoid cycle.
1329 # Avoid cycle.
1330 from . import sparse
1330 from . import sparse
1331
1331
1332 if len(ancestors) == 1: # default
1332 if len(ancestors) == 1: # default
1333 actions, diverge, renamedelete = manifestmerge(
1333 actions, diverge, renamedelete = manifestmerge(
1334 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1334 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1335 acceptremote, followcopies)
1335 acceptremote, followcopies)
1336 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1336 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1337
1337
1338 else: # only when merge.preferancestor=* - the default
1338 else: # only when merge.preferancestor=* - the default
1339 repo.ui.note(
1339 repo.ui.note(
1340 _("note: merging %s and %s using bids from ancestors %s\n") %
1340 _("note: merging %s and %s using bids from ancestors %s\n") %
1341 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1341 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1342 for anc in ancestors)))
1342 for anc in ancestors)))
1343
1343
1344 # Call for bids
1344 # Call for bids
1345 fbids = {} # mapping filename to bids (action method to list af actions)
1345 fbids = {} # mapping filename to bids (action method to list af actions)
1346 diverge, renamedelete = None, None
1346 diverge, renamedelete = None, None
1347 for ancestor in ancestors:
1347 for ancestor in ancestors:
1348 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1348 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1349 actions, diverge1, renamedelete1 = manifestmerge(
1349 actions, diverge1, renamedelete1 = manifestmerge(
1350 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1350 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1351 acceptremote, followcopies, forcefulldiff=True)
1351 acceptremote, followcopies, forcefulldiff=True)
1352 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1352 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1353
1353
1354 # Track the shortest set of warning on the theory that bid
1354 # Track the shortest set of warning on the theory that bid
1355 # merge will correctly incorporate more information
1355 # merge will correctly incorporate more information
1356 if diverge is None or len(diverge1) < len(diverge):
1356 if diverge is None or len(diverge1) < len(diverge):
1357 diverge = diverge1
1357 diverge = diverge1
1358 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1358 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1359 renamedelete = renamedelete1
1359 renamedelete = renamedelete1
1360
1360
1361 for f, a in sorted(actions.iteritems()):
1361 for f, a in sorted(actions.iteritems()):
1362 m, args, msg = a
1362 m, args, msg = a
1363 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1363 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1364 if f in fbids:
1364 if f in fbids:
1365 d = fbids[f]
1365 d = fbids[f]
1366 if m in d:
1366 if m in d:
1367 d[m].append(a)
1367 d[m].append(a)
1368 else:
1368 else:
1369 d[m] = [a]
1369 d[m] = [a]
1370 else:
1370 else:
1371 fbids[f] = {m: [a]}
1371 fbids[f] = {m: [a]}
1372
1372
1373 # Pick the best bid for each file
1373 # Pick the best bid for each file
1374 repo.ui.note(_('\nauction for merging merge bids\n'))
1374 repo.ui.note(_('\nauction for merging merge bids\n'))
1375 actions = {}
1375 actions = {}
1376 dms = [] # filenames that have dm actions
1376 dms = [] # filenames that have dm actions
1377 for f, bids in sorted(fbids.items()):
1377 for f, bids in sorted(fbids.items()):
1378 # bids is a mapping from action method to list af actions
1378 # bids is a mapping from action method to list af actions
1379 # Consensus?
1379 # Consensus?
1380 if len(bids) == 1: # all bids are the same kind of method
1380 if len(bids) == 1: # all bids are the same kind of method
1381 m, l = list(bids.items())[0]
1381 m, l = list(bids.items())[0]
1382 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1382 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1383 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1383 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1384 actions[f] = l[0]
1384 actions[f] = l[0]
1385 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1385 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1386 dms.append(f)
1386 dms.append(f)
1387 continue
1387 continue
1388 # If keep is an option, just do it.
1388 # If keep is an option, just do it.
1389 if ACTION_KEEP in bids:
1389 if ACTION_KEEP in bids:
1390 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1390 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1391 actions[f] = bids[ACTION_KEEP][0]
1391 actions[f] = bids[ACTION_KEEP][0]
1392 continue
1392 continue
1393 # If there are gets and they all agree [how could they not?], do it.
1393 # If there are gets and they all agree [how could they not?], do it.
1394 if ACTION_GET in bids:
1394 if ACTION_GET in bids:
1395 ga0 = bids[ACTION_GET][0]
1395 ga0 = bids[ACTION_GET][0]
1396 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1396 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1397 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1397 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1398 actions[f] = ga0
1398 actions[f] = ga0
1399 continue
1399 continue
1400 # TODO: Consider other simple actions such as mode changes
1400 # TODO: Consider other simple actions such as mode changes
1401 # Handle inefficient democrazy.
1401 # Handle inefficient democrazy.
1402 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1402 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1403 for m, l in sorted(bids.items()):
1403 for m, l in sorted(bids.items()):
1404 for _f, args, msg in l:
1404 for _f, args, msg in l:
1405 repo.ui.note(' %s -> %s\n' % (msg, m))
1405 repo.ui.note(' %s -> %s\n' % (msg, m))
1406 # Pick random action. TODO: Instead, prompt user when resolving
1406 # Pick random action. TODO: Instead, prompt user when resolving
1407 m, l = list(bids.items())[0]
1407 m, l = list(bids.items())[0]
1408 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1408 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1409 (f, m))
1409 (f, m))
1410 actions[f] = l[0]
1410 actions[f] = l[0]
1411 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1411 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1412 dms.append(f)
1412 dms.append(f)
1413 continue
1413 continue
1414 # Work around 'dm' that can cause multiple actions for the same file
1414 # Work around 'dm' that can cause multiple actions for the same file
1415 for f in dms:
1415 for f in dms:
1416 dm, (f0, flags), msg = actions[f]
1416 dm, (f0, flags), msg = actions[f]
1417 assert dm == ACTION_DIR_RENAME_MOVE_LOCAL, dm
1417 assert dm == ACTION_DIR_RENAME_MOVE_LOCAL, dm
1418 if f0 in actions and actions[f0][0] == ACTION_REMOVE:
1418 if f0 in actions and actions[f0][0] == ACTION_REMOVE:
1419 # We have one bid for removing a file and another for moving it.
1419 # We have one bid for removing a file and another for moving it.
1420 # These two could be merged as first move and then delete ...
1420 # These two could be merged as first move and then delete ...
1421 # but instead drop moving and just delete.
1421 # but instead drop moving and just delete.
1422 del actions[f]
1422 del actions[f]
1423 repo.ui.note(_('end of auction\n\n'))
1423 repo.ui.note(_('end of auction\n\n'))
1424
1424
1425 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1425 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1426
1426
1427 if wctx.rev() is None:
1427 if wctx.rev() is None:
1428 fractions = _forgetremoved(wctx, mctx, branchmerge)
1428 fractions = _forgetremoved(wctx, mctx, branchmerge)
1429 actions.update(fractions)
1429 actions.update(fractions)
1430
1430
1431 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1431 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1432 actions)
1432 actions)
1433
1433
1434 return prunedactions, diverge, renamedelete
1434 return prunedactions, diverge, renamedelete
1435
1435
1436 def _getcwd():
1436 def _getcwd():
1437 try:
1437 try:
1438 return pycompat.getcwd()
1438 return pycompat.getcwd()
1439 except OSError as err:
1439 except OSError as err:
1440 if err.errno == errno.ENOENT:
1440 if err.errno == errno.ENOENT:
1441 return None
1441 return None
1442 raise
1442 raise
1443
1443
1444 def batchremove(repo, wctx, actions):
1444 def batchremove(repo, wctx, actions):
1445 """apply removes to the working directory
1445 """apply removes to the working directory
1446
1446
1447 yields tuples for progress updates
1447 yields tuples for progress updates
1448 """
1448 """
1449 verbose = repo.ui.verbose
1449 verbose = repo.ui.verbose
1450 cwd = _getcwd()
1450 cwd = _getcwd()
1451 i = 0
1451 i = 0
1452 for f, args, msg in actions:
1452 for f, args, msg in actions:
1453 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1453 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1454 if verbose:
1454 if verbose:
1455 repo.ui.note(_("removing %s\n") % f)
1455 repo.ui.note(_("removing %s\n") % f)
1456 wctx[f].audit()
1456 wctx[f].audit()
1457 try:
1457 try:
1458 wctx[f].remove(ignoremissing=True)
1458 wctx[f].remove(ignoremissing=True)
1459 except OSError as inst:
1459 except OSError as inst:
1460 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1460 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1461 (f, inst.strerror))
1461 (f, inst.strerror))
1462 if i == 100:
1462 if i == 100:
1463 yield i, f
1463 yield i, f
1464 i = 0
1464 i = 0
1465 i += 1
1465 i += 1
1466 if i > 0:
1466 if i > 0:
1467 yield i, f
1467 yield i, f
1468
1468
1469 if cwd and not _getcwd():
1469 if cwd and not _getcwd():
1470 # cwd was removed in the course of removing files; print a helpful
1470 # cwd was removed in the course of removing files; print a helpful
1471 # warning.
1471 # warning.
1472 repo.ui.warn(_("current directory was removed\n"
1472 repo.ui.warn(_("current directory was removed\n"
1473 "(consider changing to repo root: %s)\n") % repo.root)
1473 "(consider changing to repo root: %s)\n") % repo.root)
1474
1474
1475 def batchget(repo, mctx, wctx, actions):
1475 def batchget(repo, mctx, wctx, actions):
1476 """apply gets to the working directory
1476 """apply gets to the working directory
1477
1477
1478 mctx is the context to get from
1478 mctx is the context to get from
1479
1479
1480 yields tuples for progress updates
1480 yields tuples for progress updates
1481 """
1481 """
1482 verbose = repo.ui.verbose
1482 verbose = repo.ui.verbose
1483 fctx = mctx.filectx
1483 fctx = mctx.filectx
1484 ui = repo.ui
1484 ui = repo.ui
1485 i = 0
1485 i = 0
1486 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1486 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1487 for f, (flags, backup), msg in actions:
1487 for f, (flags, backup), msg in actions:
1488 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1488 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1489 if verbose:
1489 if verbose:
1490 repo.ui.note(_("getting %s\n") % f)
1490 repo.ui.note(_("getting %s\n") % f)
1491
1491
1492 if backup:
1492 if backup:
1493 # If a file or directory exists with the same name, back that
1493 # If a file or directory exists with the same name, back that
1494 # up. Otherwise, look to see if there is a file that conflicts
1494 # up. Otherwise, look to see if there is a file that conflicts
1495 # with a directory this file is in, and if so, back that up.
1495 # with a directory this file is in, and if so, back that up.
1496 absf = repo.wjoin(f)
1496 absf = repo.wjoin(f)
1497 if not repo.wvfs.lexists(f):
1497 if not repo.wvfs.lexists(f):
1498 for p in util.finddirs(f):
1498 for p in util.finddirs(f):
1499 if repo.wvfs.isfileorlink(p):
1499 if repo.wvfs.isfileorlink(p):
1500 absf = repo.wjoin(p)
1500 absf = repo.wjoin(p)
1501 break
1501 break
1502 orig = scmutil.origpath(ui, repo, absf)
1502 orig = scmutil.origpath(ui, repo, absf)
1503 if repo.wvfs.lexists(absf):
1503 if repo.wvfs.lexists(absf):
1504 util.rename(absf, orig)
1504 util.rename(absf, orig)
1505 wctx[f].clearunknown()
1505 wctx[f].clearunknown()
1506 atomictemp = ui.configbool("experimental", "update.atomic-file")
1506 atomictemp = ui.configbool("experimental", "update.atomic-file")
1507 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1507 wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
1508 atomictemp=atomictemp)
1508 atomictemp=atomictemp)
1509 if i == 100:
1509 if i == 100:
1510 yield i, f
1510 yield i, f
1511 i = 0
1511 i = 0
1512 i += 1
1512 i += 1
1513 if i > 0:
1513 if i > 0:
1514 yield i, f
1514 yield i, f
1515
1515
1516 def _prefetchfiles(repo, ctx, actions):
1516 def _prefetchfiles(repo, ctx, actions):
1517 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1517 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1518 of merge actions. ``ctx`` is the context being merged in."""
1518 of merge actions. ``ctx`` is the context being merged in."""
1519
1519
1520 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1520 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1521 # don't touch the context to be merged in. 'cd' is skipped, because
1521 # don't touch the context to be merged in. 'cd' is skipped, because
1522 # changed/deleted never resolves to something from the remote side.
1522 # changed/deleted never resolves to something from the remote side.
1523 oplist = [actions[a] for a in (ACTION_GET, ACTION_DELETED_CHANGED,
1523 oplist = [actions[a] for a in (ACTION_GET, ACTION_DELETED_CHANGED,
1524 ACTION_LOCAL_DIR_RENAME_GET, ACTION_MERGE)]
1524 ACTION_LOCAL_DIR_RENAME_GET, ACTION_MERGE)]
1525 prefetch = scmutil.prefetchfiles
1525 prefetch = scmutil.prefetchfiles
1526 matchfiles = scmutil.matchfiles
1526 matchfiles = scmutil.matchfiles
1527 prefetch(repo, [ctx.rev()],
1527 prefetch(repo, [ctx.rev()],
1528 matchfiles(repo,
1528 matchfiles(repo,
1529 [f for sublist in oplist for f, args, msg in sublist]))
1529 [f for sublist in oplist for f, args, msg in sublist]))
1530
1530
1531 @attr.s(frozen=True)
1531 @attr.s(frozen=True)
1532 class updateresult(object):
1532 class updateresult(object):
1533 updatedcount = attr.ib()
1533 updatedcount = attr.ib()
1534 mergedcount = attr.ib()
1534 mergedcount = attr.ib()
1535 removedcount = attr.ib()
1535 removedcount = attr.ib()
1536 unresolvedcount = attr.ib()
1536 unresolvedcount = attr.ib()
1537
1537
1538 def isempty(self):
1538 def isempty(self):
1539 return (not self.updatedcount and not self.mergedcount
1539 return (not self.updatedcount and not self.mergedcount
1540 and not self.removedcount and not self.unresolvedcount)
1540 and not self.removedcount and not self.unresolvedcount)
1541
1541
1542 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1542 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1543 """apply the merge action list to the working directory
1543 """apply the merge action list to the working directory
1544
1544
1545 wctx is the working copy context
1545 wctx is the working copy context
1546 mctx is the context to be merged into the working copy
1546 mctx is the context to be merged into the working copy
1547
1547
1548 Return a tuple of counts (updated, merged, removed, unresolved) that
1548 Return a tuple of counts (updated, merged, removed, unresolved) that
1549 describes how many files were affected by the update.
1549 describes how many files were affected by the update.
1550 """
1550 """
1551
1551
1552 _prefetchfiles(repo, mctx, actions)
1552 _prefetchfiles(repo, mctx, actions)
1553
1553
1554 updated, merged, removed = 0, 0, 0
1554 updated, merged, removed = 0, 0, 0
1555 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1555 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1556 moves = []
1556 moves = []
1557 for m, l in actions.items():
1557 for m, l in actions.items():
1558 l.sort()
1558 l.sort()
1559
1559
1560 # 'cd' and 'dc' actions are treated like other merge conflicts
1560 # 'cd' and 'dc' actions are treated like other merge conflicts
1561 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1561 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1562 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1562 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1563 mergeactions.extend(actions[ACTION_MERGE])
1563 mergeactions.extend(actions[ACTION_MERGE])
1564 for f, args, msg in mergeactions:
1564 for f, args, msg in mergeactions:
1565 f1, f2, fa, move, anc = args
1565 f1, f2, fa, move, anc = args
1566 if f == '.hgsubstate': # merged internally
1566 if f == '.hgsubstate': # merged internally
1567 continue
1567 continue
1568 if f1 is None:
1568 if f1 is None:
1569 fcl = filemerge.absentfilectx(wctx, fa)
1569 fcl = filemerge.absentfilectx(wctx, fa)
1570 else:
1570 else:
1571 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1571 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1572 fcl = wctx[f1]
1572 fcl = wctx[f1]
1573 if f2 is None:
1573 if f2 is None:
1574 fco = filemerge.absentfilectx(mctx, fa)
1574 fco = filemerge.absentfilectx(mctx, fa)
1575 else:
1575 else:
1576 fco = mctx[f2]
1576 fco = mctx[f2]
1577 actx = repo[anc]
1577 actx = repo[anc]
1578 if fa in actx:
1578 if fa in actx:
1579 fca = actx[fa]
1579 fca = actx[fa]
1580 else:
1580 else:
1581 # TODO: move to absentfilectx
1581 # TODO: move to absentfilectx
1582 fca = repo.filectx(f1, fileid=nullrev)
1582 fca = repo.filectx(f1, fileid=nullrev)
1583 ms.add(fcl, fco, fca, f)
1583 ms.add(fcl, fco, fca, f)
1584 if f1 != f and move:
1584 if f1 != f and move:
1585 moves.append(f1)
1585 moves.append(f1)
1586
1586
1587 _updating = _('updating')
1588 _files = _('files')
1589 progress = repo.ui.progress
1590
1591 # remove renamed files after safely stored
1587 # remove renamed files after safely stored
1592 for f in moves:
1588 for f in moves:
1593 if wctx[f].lexists():
1589 if wctx[f].lexists():
1594 repo.ui.debug("removing %s\n" % f)
1590 repo.ui.debug("removing %s\n" % f)
1595 wctx[f].audit()
1591 wctx[f].audit()
1596 wctx[f].remove()
1592 wctx[f].remove()
1597
1593
1598 numupdates = sum(len(l) for m, l in actions.items()
1594 numupdates = sum(len(l) for m, l in actions.items()
1599 if m != ACTION_KEEP)
1595 if m != ACTION_KEEP)
1600 z = 0
1596 progress = repo.ui.makeprogress(_('updating'), unit=_('files'),
1597 total=numupdates)
1601
1598
1602 if [a for a in actions[ACTION_REMOVE] if a[0] == '.hgsubstate']:
1599 if [a for a in actions[ACTION_REMOVE] if a[0] == '.hgsubstate']:
1603 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1600 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1604
1601
1605 # record path conflicts
1602 # record path conflicts
1606 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1603 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1607 f1, fo = args
1604 f1, fo = args
1608 s = repo.ui.status
1605 s = repo.ui.status
1609 s(_("%s: path conflict - a file or link has the same name as a "
1606 s(_("%s: path conflict - a file or link has the same name as a "
1610 "directory\n") % f)
1607 "directory\n") % f)
1611 if fo == 'l':
1608 if fo == 'l':
1612 s(_("the local file has been renamed to %s\n") % f1)
1609 s(_("the local file has been renamed to %s\n") % f1)
1613 else:
1610 else:
1614 s(_("the remote file has been renamed to %s\n") % f1)
1611 s(_("the remote file has been renamed to %s\n") % f1)
1615 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1612 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1616 ms.addpath(f, f1, fo)
1613 ms.addpath(f, f1, fo)
1617 z += 1
1614 progress.increment(item=f)
1618 progress(_updating, z, item=f, total=numupdates, unit=_files)
1619
1615
1620 # When merging in-memory, we can't support worker processes, so set the
1616 # When merging in-memory, we can't support worker processes, so set the
1621 # per-item cost at 0 in that case.
1617 # per-item cost at 0 in that case.
1622 cost = 0 if wctx.isinmemory() else 0.001
1618 cost = 0 if wctx.isinmemory() else 0.001
1623
1619
1624 # remove in parallel (must come before resolving path conflicts and getting)
1620 # remove in parallel (must come before resolving path conflicts and getting)
1625 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1621 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1626 actions[ACTION_REMOVE])
1622 actions[ACTION_REMOVE])
1627 for i, item in prog:
1623 for i, item in prog:
1628 z += i
1624 progress.increment(step=i, item=item)
1629 progress(_updating, z, item=item, total=numupdates, unit=_files)
1630 removed = len(actions[ACTION_REMOVE])
1625 removed = len(actions[ACTION_REMOVE])
1631
1626
1632 # resolve path conflicts (must come before getting)
1627 # resolve path conflicts (must come before getting)
1633 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1628 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1634 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1629 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1635 f0, = args
1630 f0, = args
1636 if wctx[f0].lexists():
1631 if wctx[f0].lexists():
1637 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1632 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1638 wctx[f].audit()
1633 wctx[f].audit()
1639 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1634 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1640 wctx[f0].remove()
1635 wctx[f0].remove()
1641 z += 1
1636 progress.increment(item=f)
1642 progress(_updating, z, item=f, total=numupdates, unit=_files)
1643
1637
1644 # get in parallel
1638 # get in parallel
1645 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1639 prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
1646 actions[ACTION_GET])
1640 actions[ACTION_GET])
1647 for i, item in prog:
1641 for i, item in prog:
1648 z += i
1642 progress.increment(step=i, item=item)
1649 progress(_updating, z, item=item, total=numupdates, unit=_files)
1650 updated = len(actions[ACTION_GET])
1643 updated = len(actions[ACTION_GET])
1651
1644
1652 if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']:
1645 if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']:
1653 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1646 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1654
1647
1655 # forget (manifest only, just log it) (must come first)
1648 # forget (manifest only, just log it) (must come first)
1656 for f, args, msg in actions[ACTION_FORGET]:
1649 for f, args, msg in actions[ACTION_FORGET]:
1657 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1650 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1658 z += 1
1651 progress.increment(item=f)
1659 progress(_updating, z, item=f, total=numupdates, unit=_files)
1660
1652
1661 # re-add (manifest only, just log it)
1653 # re-add (manifest only, just log it)
1662 for f, args, msg in actions[ACTION_ADD]:
1654 for f, args, msg in actions[ACTION_ADD]:
1663 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1655 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1664 z += 1
1656 progress.increment(item=f)
1665 progress(_updating, z, item=f, total=numupdates, unit=_files)
1666
1657
1667 # re-add/mark as modified (manifest only, just log it)
1658 # re-add/mark as modified (manifest only, just log it)
1668 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1659 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1669 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1660 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1670 z += 1
1661 progress.increment(item=f)
1671 progress(_updating, z, item=f, total=numupdates, unit=_files)
1672
1662
1673 # keep (noop, just log it)
1663 # keep (noop, just log it)
1674 for f, args, msg in actions[ACTION_KEEP]:
1664 for f, args, msg in actions[ACTION_KEEP]:
1675 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1665 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1676 # no progress
1666 # no progress
1677
1667
1678 # directory rename, move local
1668 # directory rename, move local
1679 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1669 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1680 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1670 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1681 z += 1
1671 progress.increment(item=f)
1682 progress(_updating, z, item=f, total=numupdates, unit=_files)
1683 f0, flags = args
1672 f0, flags = args
1684 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1673 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1685 wctx[f].audit()
1674 wctx[f].audit()
1686 wctx[f].write(wctx.filectx(f0).data(), flags)
1675 wctx[f].write(wctx.filectx(f0).data(), flags)
1687 wctx[f0].remove()
1676 wctx[f0].remove()
1688 updated += 1
1677 updated += 1
1689
1678
1690 # local directory rename, get
1679 # local directory rename, get
1691 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1680 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1692 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1681 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1693 z += 1
1682 progress.increment(item=f)
1694 progress(_updating, z, item=f, total=numupdates, unit=_files)
1695 f0, flags = args
1683 f0, flags = args
1696 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1684 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1697 wctx[f].write(mctx.filectx(f0).data(), flags)
1685 wctx[f].write(mctx.filectx(f0).data(), flags)
1698 updated += 1
1686 updated += 1
1699
1687
1700 # exec
1688 # exec
1701 for f, args, msg in actions[ACTION_EXEC]:
1689 for f, args, msg in actions[ACTION_EXEC]:
1702 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1690 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1703 z += 1
1691 progress.increment(item=f)
1704 progress(_updating, z, item=f, total=numupdates, unit=_files)
1705 flags, = args
1692 flags, = args
1706 wctx[f].audit()
1693 wctx[f].audit()
1707 wctx[f].setflags('l' in flags, 'x' in flags)
1694 wctx[f].setflags('l' in flags, 'x' in flags)
1708 updated += 1
1695 updated += 1
1709
1696
1710 # the ordering is important here -- ms.mergedriver will raise if the merge
1697 # the ordering is important here -- ms.mergedriver will raise if the merge
1711 # driver has changed, and we want to be able to bypass it when overwrite is
1698 # driver has changed, and we want to be able to bypass it when overwrite is
1712 # True
1699 # True
1713 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1700 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1714
1701
1715 if usemergedriver:
1702 if usemergedriver:
1716 if wctx.isinmemory():
1703 if wctx.isinmemory():
1717 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1704 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1718 "support mergedriver")
1705 "support mergedriver")
1719 ms.commit()
1706 ms.commit()
1720 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1707 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1721 # the driver might leave some files unresolved
1708 # the driver might leave some files unresolved
1722 unresolvedf = set(ms.unresolved())
1709 unresolvedf = set(ms.unresolved())
1723 if not proceed:
1710 if not proceed:
1724 # XXX setting unresolved to at least 1 is a hack to make sure we
1711 # XXX setting unresolved to at least 1 is a hack to make sure we
1725 # error out
1712 # error out
1726 return updateresult(updated, merged, removed,
1713 return updateresult(updated, merged, removed,
1727 max(len(unresolvedf), 1))
1714 max(len(unresolvedf), 1))
1728 newactions = []
1715 newactions = []
1729 for f, args, msg in mergeactions:
1716 for f, args, msg in mergeactions:
1730 if f in unresolvedf:
1717 if f in unresolvedf:
1731 newactions.append((f, args, msg))
1718 newactions.append((f, args, msg))
1732 mergeactions = newactions
1719 mergeactions = newactions
1733
1720
1734 try:
1721 try:
1735 # premerge
1722 # premerge
1736 tocomplete = []
1723 tocomplete = []
1737 for f, args, msg in mergeactions:
1724 for f, args, msg in mergeactions:
1738 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1725 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1739 z += 1
1726 progress.increment(item=f)
1740 progress(_updating, z, item=f, total=numupdates, unit=_files)
1741 if f == '.hgsubstate': # subrepo states need updating
1727 if f == '.hgsubstate': # subrepo states need updating
1742 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1728 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1743 overwrite, labels)
1729 overwrite, labels)
1744 continue
1730 continue
1745 wctx[f].audit()
1731 wctx[f].audit()
1746 complete, r = ms.preresolve(f, wctx)
1732 complete, r = ms.preresolve(f, wctx)
1747 if not complete:
1733 if not complete:
1748 numupdates += 1
1734 numupdates += 1
1749 tocomplete.append((f, args, msg))
1735 tocomplete.append((f, args, msg))
1750
1736
1751 # merge
1737 # merge
1752 for f, args, msg in tocomplete:
1738 for f, args, msg in tocomplete:
1753 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1739 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1754 z += 1
1740 progress.increment(item=f, total=numupdates)
1755 progress(_updating, z, item=f, total=numupdates, unit=_files)
1756 ms.resolve(f, wctx)
1741 ms.resolve(f, wctx)
1757
1742
1758 finally:
1743 finally:
1759 ms.commit()
1744 ms.commit()
1760
1745
1761 unresolved = ms.unresolvedcount()
1746 unresolved = ms.unresolvedcount()
1762
1747
1763 if (usemergedriver and not unresolved
1748 if (usemergedriver and not unresolved
1764 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1749 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1765 if not driverconclude(repo, ms, wctx, labels=labels):
1750 if not driverconclude(repo, ms, wctx, labels=labels):
1766 # XXX setting unresolved to at least 1 is a hack to make sure we
1751 # XXX setting unresolved to at least 1 is a hack to make sure we
1767 # error out
1752 # error out
1768 unresolved = max(unresolved, 1)
1753 unresolved = max(unresolved, 1)
1769
1754
1770 ms.commit()
1755 ms.commit()
1771
1756
1772 msupdated, msmerged, msremoved = ms.counts()
1757 msupdated, msmerged, msremoved = ms.counts()
1773 updated += msupdated
1758 updated += msupdated
1774 merged += msmerged
1759 merged += msmerged
1775 removed += msremoved
1760 removed += msremoved
1776
1761
1777 extraactions = ms.actions()
1762 extraactions = ms.actions()
1778 if extraactions:
1763 if extraactions:
1779 mfiles = set(a[0] for a in actions[ACTION_MERGE])
1764 mfiles = set(a[0] for a in actions[ACTION_MERGE])
1780 for k, acts in extraactions.iteritems():
1765 for k, acts in extraactions.iteritems():
1781 actions[k].extend(acts)
1766 actions[k].extend(acts)
1782 # Remove these files from actions[ACTION_MERGE] as well. This is
1767 # Remove these files from actions[ACTION_MERGE] as well. This is
1783 # important because in recordupdates, files in actions[ACTION_MERGE]
1768 # important because in recordupdates, files in actions[ACTION_MERGE]
1784 # are processed after files in other actions, and the merge driver
1769 # are processed after files in other actions, and the merge driver
1785 # might add files to those actions via extraactions above. This can
1770 # might add files to those actions via extraactions above. This can
1786 # lead to a file being recorded twice, with poor results. This is
1771 # lead to a file being recorded twice, with poor results. This is
1787 # especially problematic for actions[ACTION_REMOVE] (currently only
1772 # especially problematic for actions[ACTION_REMOVE] (currently only
1788 # possible with the merge driver in the initial merge process;
1773 # possible with the merge driver in the initial merge process;
1789 # interrupted merges don't go through this flow).
1774 # interrupted merges don't go through this flow).
1790 #
1775 #
1791 # The real fix here is to have indexes by both file and action so
1776 # The real fix here is to have indexes by both file and action so
1792 # that when the action for a file is changed it is automatically
1777 # that when the action for a file is changed it is automatically
1793 # reflected in the other action lists. But that involves a more
1778 # reflected in the other action lists. But that involves a more
1794 # complex data structure, so this will do for now.
1779 # complex data structure, so this will do for now.
1795 #
1780 #
1796 # We don't need to do the same operation for 'dc' and 'cd' because
1781 # We don't need to do the same operation for 'dc' and 'cd' because
1797 # those lists aren't consulted again.
1782 # those lists aren't consulted again.
1798 mfiles.difference_update(a[0] for a in acts)
1783 mfiles.difference_update(a[0] for a in acts)
1799
1784
1800 actions[ACTION_MERGE] = [a for a in actions[ACTION_MERGE]
1785 actions[ACTION_MERGE] = [a for a in actions[ACTION_MERGE]
1801 if a[0] in mfiles]
1786 if a[0] in mfiles]
1802
1787
1803 progress(_updating, None, total=numupdates, unit=_files)
1788 progress.update(None)
1804 return updateresult(updated, merged, removed, unresolved)
1789 return updateresult(updated, merged, removed, unresolved)
1805
1790
1806 def recordupdates(repo, actions, branchmerge):
1791 def recordupdates(repo, actions, branchmerge):
1807 "record merge actions to the dirstate"
1792 "record merge actions to the dirstate"
1808 # remove (must come first)
1793 # remove (must come first)
1809 for f, args, msg in actions.get(ACTION_REMOVE, []):
1794 for f, args, msg in actions.get(ACTION_REMOVE, []):
1810 if branchmerge:
1795 if branchmerge:
1811 repo.dirstate.remove(f)
1796 repo.dirstate.remove(f)
1812 else:
1797 else:
1813 repo.dirstate.drop(f)
1798 repo.dirstate.drop(f)
1814
1799
1815 # forget (must come first)
1800 # forget (must come first)
1816 for f, args, msg in actions.get(ACTION_FORGET, []):
1801 for f, args, msg in actions.get(ACTION_FORGET, []):
1817 repo.dirstate.drop(f)
1802 repo.dirstate.drop(f)
1818
1803
1819 # resolve path conflicts
1804 # resolve path conflicts
1820 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
1805 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
1821 f0, = args
1806 f0, = args
1822 origf0 = repo.dirstate.copied(f0) or f0
1807 origf0 = repo.dirstate.copied(f0) or f0
1823 repo.dirstate.add(f)
1808 repo.dirstate.add(f)
1824 repo.dirstate.copy(origf0, f)
1809 repo.dirstate.copy(origf0, f)
1825 if f0 == origf0:
1810 if f0 == origf0:
1826 repo.dirstate.remove(f0)
1811 repo.dirstate.remove(f0)
1827 else:
1812 else:
1828 repo.dirstate.drop(f0)
1813 repo.dirstate.drop(f0)
1829
1814
1830 # re-add
1815 # re-add
1831 for f, args, msg in actions.get(ACTION_ADD, []):
1816 for f, args, msg in actions.get(ACTION_ADD, []):
1832 repo.dirstate.add(f)
1817 repo.dirstate.add(f)
1833
1818
1834 # re-add/mark as modified
1819 # re-add/mark as modified
1835 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
1820 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
1836 if branchmerge:
1821 if branchmerge:
1837 repo.dirstate.normallookup(f)
1822 repo.dirstate.normallookup(f)
1838 else:
1823 else:
1839 repo.dirstate.add(f)
1824 repo.dirstate.add(f)
1840
1825
1841 # exec change
1826 # exec change
1842 for f, args, msg in actions.get(ACTION_EXEC, []):
1827 for f, args, msg in actions.get(ACTION_EXEC, []):
1843 repo.dirstate.normallookup(f)
1828 repo.dirstate.normallookup(f)
1844
1829
1845 # keep
1830 # keep
1846 for f, args, msg in actions.get(ACTION_KEEP, []):
1831 for f, args, msg in actions.get(ACTION_KEEP, []):
1847 pass
1832 pass
1848
1833
1849 # get
1834 # get
1850 for f, args, msg in actions.get(ACTION_GET, []):
1835 for f, args, msg in actions.get(ACTION_GET, []):
1851 if branchmerge:
1836 if branchmerge:
1852 repo.dirstate.otherparent(f)
1837 repo.dirstate.otherparent(f)
1853 else:
1838 else:
1854 repo.dirstate.normal(f)
1839 repo.dirstate.normal(f)
1855
1840
1856 # merge
1841 # merge
1857 for f, args, msg in actions.get(ACTION_MERGE, []):
1842 for f, args, msg in actions.get(ACTION_MERGE, []):
1858 f1, f2, fa, move, anc = args
1843 f1, f2, fa, move, anc = args
1859 if branchmerge:
1844 if branchmerge:
1860 # We've done a branch merge, mark this file as merged
1845 # We've done a branch merge, mark this file as merged
1861 # so that we properly record the merger later
1846 # so that we properly record the merger later
1862 repo.dirstate.merge(f)
1847 repo.dirstate.merge(f)
1863 if f1 != f2: # copy/rename
1848 if f1 != f2: # copy/rename
1864 if move:
1849 if move:
1865 repo.dirstate.remove(f1)
1850 repo.dirstate.remove(f1)
1866 if f1 != f:
1851 if f1 != f:
1867 repo.dirstate.copy(f1, f)
1852 repo.dirstate.copy(f1, f)
1868 else:
1853 else:
1869 repo.dirstate.copy(f2, f)
1854 repo.dirstate.copy(f2, f)
1870 else:
1855 else:
1871 # We've update-merged a locally modified file, so
1856 # We've update-merged a locally modified file, so
1872 # we set the dirstate to emulate a normal checkout
1857 # we set the dirstate to emulate a normal checkout
1873 # of that file some time in the past. Thus our
1858 # of that file some time in the past. Thus our
1874 # merge will appear as a normal local file
1859 # merge will appear as a normal local file
1875 # modification.
1860 # modification.
1876 if f2 == f: # file not locally copied/moved
1861 if f2 == f: # file not locally copied/moved
1877 repo.dirstate.normallookup(f)
1862 repo.dirstate.normallookup(f)
1878 if move:
1863 if move:
1879 repo.dirstate.drop(f1)
1864 repo.dirstate.drop(f1)
1880
1865
1881 # directory rename, move local
1866 # directory rename, move local
1882 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
1867 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
1883 f0, flag = args
1868 f0, flag = args
1884 if branchmerge:
1869 if branchmerge:
1885 repo.dirstate.add(f)
1870 repo.dirstate.add(f)
1886 repo.dirstate.remove(f0)
1871 repo.dirstate.remove(f0)
1887 repo.dirstate.copy(f0, f)
1872 repo.dirstate.copy(f0, f)
1888 else:
1873 else:
1889 repo.dirstate.normal(f)
1874 repo.dirstate.normal(f)
1890 repo.dirstate.drop(f0)
1875 repo.dirstate.drop(f0)
1891
1876
1892 # directory rename, get
1877 # directory rename, get
1893 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
1878 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
1894 f0, flag = args
1879 f0, flag = args
1895 if branchmerge:
1880 if branchmerge:
1896 repo.dirstate.add(f)
1881 repo.dirstate.add(f)
1897 repo.dirstate.copy(f0, f)
1882 repo.dirstate.copy(f0, f)
1898 else:
1883 else:
1899 repo.dirstate.normal(f)
1884 repo.dirstate.normal(f)
1900
1885
1901 def update(repo, node, branchmerge, force, ancestor=None,
1886 def update(repo, node, branchmerge, force, ancestor=None,
1902 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1887 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1903 updatecheck=None, wc=None):
1888 updatecheck=None, wc=None):
1904 """
1889 """
1905 Perform a merge between the working directory and the given node
1890 Perform a merge between the working directory and the given node
1906
1891
1907 node = the node to update to
1892 node = the node to update to
1908 branchmerge = whether to merge between branches
1893 branchmerge = whether to merge between branches
1909 force = whether to force branch merging or file overwriting
1894 force = whether to force branch merging or file overwriting
1910 matcher = a matcher to filter file lists (dirstate not updated)
1895 matcher = a matcher to filter file lists (dirstate not updated)
1911 mergeancestor = whether it is merging with an ancestor. If true,
1896 mergeancestor = whether it is merging with an ancestor. If true,
1912 we should accept the incoming changes for any prompts that occur.
1897 we should accept the incoming changes for any prompts that occur.
1913 If false, merging with an ancestor (fast-forward) is only allowed
1898 If false, merging with an ancestor (fast-forward) is only allowed
1914 between different named branches. This flag is used by rebase extension
1899 between different named branches. This flag is used by rebase extension
1915 as a temporary fix and should be avoided in general.
1900 as a temporary fix and should be avoided in general.
1916 labels = labels to use for base, local and other
1901 labels = labels to use for base, local and other
1917 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1902 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1918 this is True, then 'force' should be True as well.
1903 this is True, then 'force' should be True as well.
1919
1904
1920 The table below shows all the behaviors of the update command given the
1905 The table below shows all the behaviors of the update command given the
1921 -c/--check and -C/--clean or no options, whether the working directory is
1906 -c/--check and -C/--clean or no options, whether the working directory is
1922 dirty, whether a revision is specified, and the relationship of the parent
1907 dirty, whether a revision is specified, and the relationship of the parent
1923 rev to the target rev (linear or not). Match from top first. The -n
1908 rev to the target rev (linear or not). Match from top first. The -n
1924 option doesn't exist on the command line, but represents the
1909 option doesn't exist on the command line, but represents the
1925 experimental.updatecheck=noconflict option.
1910 experimental.updatecheck=noconflict option.
1926
1911
1927 This logic is tested by test-update-branches.t.
1912 This logic is tested by test-update-branches.t.
1928
1913
1929 -c -C -n -m dirty rev linear | result
1914 -c -C -n -m dirty rev linear | result
1930 y y * * * * * | (1)
1915 y y * * * * * | (1)
1931 y * y * * * * | (1)
1916 y * y * * * * | (1)
1932 y * * y * * * | (1)
1917 y * * y * * * | (1)
1933 * y y * * * * | (1)
1918 * y y * * * * | (1)
1934 * y * y * * * | (1)
1919 * y * y * * * | (1)
1935 * * y y * * * | (1)
1920 * * y y * * * | (1)
1936 * * * * * n n | x
1921 * * * * * n n | x
1937 * * * * n * * | ok
1922 * * * * n * * | ok
1938 n n n n y * y | merge
1923 n n n n y * y | merge
1939 n n n n y y n | (2)
1924 n n n n y y n | (2)
1940 n n n y y * * | merge
1925 n n n y y * * | merge
1941 n n y n y * * | merge if no conflict
1926 n n y n y * * | merge if no conflict
1942 n y n n y * * | discard
1927 n y n n y * * | discard
1943 y n n n y * * | (3)
1928 y n n n y * * | (3)
1944
1929
1945 x = can't happen
1930 x = can't happen
1946 * = don't-care
1931 * = don't-care
1947 1 = incompatible options (checked in commands.py)
1932 1 = incompatible options (checked in commands.py)
1948 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1933 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1949 3 = abort: uncommitted changes (checked in commands.py)
1934 3 = abort: uncommitted changes (checked in commands.py)
1950
1935
1951 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1936 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1952 to repo[None] if None is passed.
1937 to repo[None] if None is passed.
1953
1938
1954 Return the same tuple as applyupdates().
1939 Return the same tuple as applyupdates().
1955 """
1940 """
1956 # Avoid cycle.
1941 # Avoid cycle.
1957 from . import sparse
1942 from . import sparse
1958
1943
1959 # This function used to find the default destination if node was None, but
1944 # This function used to find the default destination if node was None, but
1960 # that's now in destutil.py.
1945 # that's now in destutil.py.
1961 assert node is not None
1946 assert node is not None
1962 if not branchmerge and not force:
1947 if not branchmerge and not force:
1963 # TODO: remove the default once all callers that pass branchmerge=False
1948 # TODO: remove the default once all callers that pass branchmerge=False
1964 # and force=False pass a value for updatecheck. We may want to allow
1949 # and force=False pass a value for updatecheck. We may want to allow
1965 # updatecheck='abort' to better suppport some of these callers.
1950 # updatecheck='abort' to better suppport some of these callers.
1966 if updatecheck is None:
1951 if updatecheck is None:
1967 updatecheck = 'linear'
1952 updatecheck = 'linear'
1968 assert updatecheck in ('none', 'linear', 'noconflict')
1953 assert updatecheck in ('none', 'linear', 'noconflict')
1969 # If we're doing a partial update, we need to skip updating
1954 # If we're doing a partial update, we need to skip updating
1970 # the dirstate, so make a note of any partial-ness to the
1955 # the dirstate, so make a note of any partial-ness to the
1971 # update here.
1956 # update here.
1972 if matcher is None or matcher.always():
1957 if matcher is None or matcher.always():
1973 partial = False
1958 partial = False
1974 else:
1959 else:
1975 partial = True
1960 partial = True
1976 with repo.wlock():
1961 with repo.wlock():
1977 if wc is None:
1962 if wc is None:
1978 wc = repo[None]
1963 wc = repo[None]
1979 pl = wc.parents()
1964 pl = wc.parents()
1980 p1 = pl[0]
1965 p1 = pl[0]
1981 pas = [None]
1966 pas = [None]
1982 if ancestor is not None:
1967 if ancestor is not None:
1983 pas = [repo[ancestor]]
1968 pas = [repo[ancestor]]
1984
1969
1985 overwrite = force and not branchmerge
1970 overwrite = force and not branchmerge
1986
1971
1987 p2 = repo[node]
1972 p2 = repo[node]
1988 if pas[0] is None:
1973 if pas[0] is None:
1989 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1974 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1990 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1975 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1991 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1976 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1992 else:
1977 else:
1993 pas = [p1.ancestor(p2, warn=branchmerge)]
1978 pas = [p1.ancestor(p2, warn=branchmerge)]
1994
1979
1995 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1980 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1996
1981
1997 ### check phase
1982 ### check phase
1998 if not overwrite:
1983 if not overwrite:
1999 if len(pl) > 1:
1984 if len(pl) > 1:
2000 raise error.Abort(_("outstanding uncommitted merge"))
1985 raise error.Abort(_("outstanding uncommitted merge"))
2001 ms = mergestate.read(repo)
1986 ms = mergestate.read(repo)
2002 if list(ms.unresolved()):
1987 if list(ms.unresolved()):
2003 raise error.Abort(_("outstanding merge conflicts"))
1988 raise error.Abort(_("outstanding merge conflicts"))
2004 if branchmerge:
1989 if branchmerge:
2005 if pas == [p2]:
1990 if pas == [p2]:
2006 raise error.Abort(_("merging with a working directory ancestor"
1991 raise error.Abort(_("merging with a working directory ancestor"
2007 " has no effect"))
1992 " has no effect"))
2008 elif pas == [p1]:
1993 elif pas == [p1]:
2009 if not mergeancestor and wc.branch() == p2.branch():
1994 if not mergeancestor and wc.branch() == p2.branch():
2010 raise error.Abort(_("nothing to merge"),
1995 raise error.Abort(_("nothing to merge"),
2011 hint=_("use 'hg update' "
1996 hint=_("use 'hg update' "
2012 "or check 'hg heads'"))
1997 "or check 'hg heads'"))
2013 if not force and (wc.files() or wc.deleted()):
1998 if not force and (wc.files() or wc.deleted()):
2014 raise error.Abort(_("uncommitted changes"),
1999 raise error.Abort(_("uncommitted changes"),
2015 hint=_("use 'hg status' to list changes"))
2000 hint=_("use 'hg status' to list changes"))
2016 if not wc.isinmemory():
2001 if not wc.isinmemory():
2017 for s in sorted(wc.substate):
2002 for s in sorted(wc.substate):
2018 wc.sub(s).bailifchanged()
2003 wc.sub(s).bailifchanged()
2019
2004
2020 elif not overwrite:
2005 elif not overwrite:
2021 if p1 == p2: # no-op update
2006 if p1 == p2: # no-op update
2022 # call the hooks and exit early
2007 # call the hooks and exit early
2023 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
2008 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
2024 repo.hook('update', parent1=xp2, parent2='', error=0)
2009 repo.hook('update', parent1=xp2, parent2='', error=0)
2025 return updateresult(0, 0, 0, 0)
2010 return updateresult(0, 0, 0, 0)
2026
2011
2027 if (updatecheck == 'linear' and
2012 if (updatecheck == 'linear' and
2028 pas not in ([p1], [p2])): # nonlinear
2013 pas not in ([p1], [p2])): # nonlinear
2029 dirty = wc.dirty(missing=True)
2014 dirty = wc.dirty(missing=True)
2030 if dirty:
2015 if dirty:
2031 # Branching is a bit strange to ensure we do the minimal
2016 # Branching is a bit strange to ensure we do the minimal
2032 # amount of call to obsutil.foreground.
2017 # amount of call to obsutil.foreground.
2033 foreground = obsutil.foreground(repo, [p1.node()])
2018 foreground = obsutil.foreground(repo, [p1.node()])
2034 # note: the <node> variable contains a random identifier
2019 # note: the <node> variable contains a random identifier
2035 if repo[node].node() in foreground:
2020 if repo[node].node() in foreground:
2036 pass # allow updating to successors
2021 pass # allow updating to successors
2037 else:
2022 else:
2038 msg = _("uncommitted changes")
2023 msg = _("uncommitted changes")
2039 hint = _("commit or update --clean to discard changes")
2024 hint = _("commit or update --clean to discard changes")
2040 raise error.UpdateAbort(msg, hint=hint)
2025 raise error.UpdateAbort(msg, hint=hint)
2041 else:
2026 else:
2042 # Allow jumping branches if clean and specific rev given
2027 # Allow jumping branches if clean and specific rev given
2043 pass
2028 pass
2044
2029
2045 if overwrite:
2030 if overwrite:
2046 pas = [wc]
2031 pas = [wc]
2047 elif not branchmerge:
2032 elif not branchmerge:
2048 pas = [p1]
2033 pas = [p1]
2049
2034
2050 # deprecated config: merge.followcopies
2035 # deprecated config: merge.followcopies
2051 followcopies = repo.ui.configbool('merge', 'followcopies')
2036 followcopies = repo.ui.configbool('merge', 'followcopies')
2052 if overwrite:
2037 if overwrite:
2053 followcopies = False
2038 followcopies = False
2054 elif not pas[0]:
2039 elif not pas[0]:
2055 followcopies = False
2040 followcopies = False
2056 if not branchmerge and not wc.dirty(missing=True):
2041 if not branchmerge and not wc.dirty(missing=True):
2057 followcopies = False
2042 followcopies = False
2058
2043
2059 ### calculate phase
2044 ### calculate phase
2060 actionbyfile, diverge, renamedelete = calculateupdates(
2045 actionbyfile, diverge, renamedelete = calculateupdates(
2061 repo, wc, p2, pas, branchmerge, force, mergeancestor,
2046 repo, wc, p2, pas, branchmerge, force, mergeancestor,
2062 followcopies, matcher=matcher, mergeforce=mergeforce)
2047 followcopies, matcher=matcher, mergeforce=mergeforce)
2063
2048
2064 if updatecheck == 'noconflict':
2049 if updatecheck == 'noconflict':
2065 for f, (m, args, msg) in actionbyfile.iteritems():
2050 for f, (m, args, msg) in actionbyfile.iteritems():
2066 if m not in (ACTION_GET, ACTION_KEEP, ACTION_EXEC,
2051 if m not in (ACTION_GET, ACTION_KEEP, ACTION_EXEC,
2067 ACTION_REMOVE, ACTION_PATH_CONFLICT_RESOLVE):
2052 ACTION_REMOVE, ACTION_PATH_CONFLICT_RESOLVE):
2068 msg = _("conflicting changes")
2053 msg = _("conflicting changes")
2069 hint = _("commit or update --clean to discard changes")
2054 hint = _("commit or update --clean to discard changes")
2070 raise error.Abort(msg, hint=hint)
2055 raise error.Abort(msg, hint=hint)
2071
2056
2072 # Prompt and create actions. Most of this is in the resolve phase
2057 # Prompt and create actions. Most of this is in the resolve phase
2073 # already, but we can't handle .hgsubstate in filemerge or
2058 # already, but we can't handle .hgsubstate in filemerge or
2074 # subrepoutil.submerge yet so we have to keep prompting for it.
2059 # subrepoutil.submerge yet so we have to keep prompting for it.
2075 if '.hgsubstate' in actionbyfile:
2060 if '.hgsubstate' in actionbyfile:
2076 f = '.hgsubstate'
2061 f = '.hgsubstate'
2077 m, args, msg = actionbyfile[f]
2062 m, args, msg = actionbyfile[f]
2078 prompts = filemerge.partextras(labels)
2063 prompts = filemerge.partextras(labels)
2079 prompts['f'] = f
2064 prompts['f'] = f
2080 if m == ACTION_CHANGED_DELETED:
2065 if m == ACTION_CHANGED_DELETED:
2081 if repo.ui.promptchoice(
2066 if repo.ui.promptchoice(
2082 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
2067 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
2083 "use (c)hanged version or (d)elete?"
2068 "use (c)hanged version or (d)elete?"
2084 "$$ &Changed $$ &Delete") % prompts, 0):
2069 "$$ &Changed $$ &Delete") % prompts, 0):
2085 actionbyfile[f] = (ACTION_REMOVE, None, 'prompt delete')
2070 actionbyfile[f] = (ACTION_REMOVE, None, 'prompt delete')
2086 elif f in p1:
2071 elif f in p1:
2087 actionbyfile[f] = (ACTION_ADD_MODIFIED, None, 'prompt keep')
2072 actionbyfile[f] = (ACTION_ADD_MODIFIED, None, 'prompt keep')
2088 else:
2073 else:
2089 actionbyfile[f] = (ACTION_ADD, None, 'prompt keep')
2074 actionbyfile[f] = (ACTION_ADD, None, 'prompt keep')
2090 elif m == ACTION_DELETED_CHANGED:
2075 elif m == ACTION_DELETED_CHANGED:
2091 f1, f2, fa, move, anc = args
2076 f1, f2, fa, move, anc = args
2092 flags = p2[f2].flags()
2077 flags = p2[f2].flags()
2093 if repo.ui.promptchoice(
2078 if repo.ui.promptchoice(
2094 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2079 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2095 "use (c)hanged version or leave (d)eleted?"
2080 "use (c)hanged version or leave (d)eleted?"
2096 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2081 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2097 actionbyfile[f] = (ACTION_GET, (flags, False),
2082 actionbyfile[f] = (ACTION_GET, (flags, False),
2098 'prompt recreating')
2083 'prompt recreating')
2099 else:
2084 else:
2100 del actionbyfile[f]
2085 del actionbyfile[f]
2101
2086
2102 # Convert to dictionary-of-lists format
2087 # Convert to dictionary-of-lists format
2103 actions = dict((m, [])
2088 actions = dict((m, [])
2104 for m in (
2089 for m in (
2105 ACTION_ADD,
2090 ACTION_ADD,
2106 ACTION_ADD_MODIFIED,
2091 ACTION_ADD_MODIFIED,
2107 ACTION_FORGET,
2092 ACTION_FORGET,
2108 ACTION_GET,
2093 ACTION_GET,
2109 ACTION_CHANGED_DELETED,
2094 ACTION_CHANGED_DELETED,
2110 ACTION_DELETED_CHANGED,
2095 ACTION_DELETED_CHANGED,
2111 ACTION_REMOVE,
2096 ACTION_REMOVE,
2112 ACTION_DIR_RENAME_MOVE_LOCAL,
2097 ACTION_DIR_RENAME_MOVE_LOCAL,
2113 ACTION_LOCAL_DIR_RENAME_GET,
2098 ACTION_LOCAL_DIR_RENAME_GET,
2114 ACTION_MERGE,
2099 ACTION_MERGE,
2115 ACTION_EXEC,
2100 ACTION_EXEC,
2116 ACTION_KEEP,
2101 ACTION_KEEP,
2117 ACTION_PATH_CONFLICT,
2102 ACTION_PATH_CONFLICT,
2118 ACTION_PATH_CONFLICT_RESOLVE))
2103 ACTION_PATH_CONFLICT_RESOLVE))
2119 for f, (m, args, msg) in actionbyfile.iteritems():
2104 for f, (m, args, msg) in actionbyfile.iteritems():
2120 if m not in actions:
2105 if m not in actions:
2121 actions[m] = []
2106 actions[m] = []
2122 actions[m].append((f, args, msg))
2107 actions[m].append((f, args, msg))
2123
2108
2124 if not util.fscasesensitive(repo.path):
2109 if not util.fscasesensitive(repo.path):
2125 # check collision between files only in p2 for clean update
2110 # check collision between files only in p2 for clean update
2126 if (not branchmerge and
2111 if (not branchmerge and
2127 (force or not wc.dirty(missing=True, branch=False))):
2112 (force or not wc.dirty(missing=True, branch=False))):
2128 _checkcollision(repo, p2.manifest(), None)
2113 _checkcollision(repo, p2.manifest(), None)
2129 else:
2114 else:
2130 _checkcollision(repo, wc.manifest(), actions)
2115 _checkcollision(repo, wc.manifest(), actions)
2131
2116
2132 # divergent renames
2117 # divergent renames
2133 for f, fl in sorted(diverge.iteritems()):
2118 for f, fl in sorted(diverge.iteritems()):
2134 repo.ui.warn(_("note: possible conflict - %s was renamed "
2119 repo.ui.warn(_("note: possible conflict - %s was renamed "
2135 "multiple times to:\n") % f)
2120 "multiple times to:\n") % f)
2136 for nf in fl:
2121 for nf in fl:
2137 repo.ui.warn(" %s\n" % nf)
2122 repo.ui.warn(" %s\n" % nf)
2138
2123
2139 # rename and delete
2124 # rename and delete
2140 for f, fl in sorted(renamedelete.iteritems()):
2125 for f, fl in sorted(renamedelete.iteritems()):
2141 repo.ui.warn(_("note: possible conflict - %s was deleted "
2126 repo.ui.warn(_("note: possible conflict - %s was deleted "
2142 "and renamed to:\n") % f)
2127 "and renamed to:\n") % f)
2143 for nf in fl:
2128 for nf in fl:
2144 repo.ui.warn(" %s\n" % nf)
2129 repo.ui.warn(" %s\n" % nf)
2145
2130
2146 ### apply phase
2131 ### apply phase
2147 if not branchmerge: # just jump to the new rev
2132 if not branchmerge: # just jump to the new rev
2148 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2133 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2149 if not partial and not wc.isinmemory():
2134 if not partial and not wc.isinmemory():
2150 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2135 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2151 # note that we're in the middle of an update
2136 # note that we're in the middle of an update
2152 repo.vfs.write('updatestate', p2.hex())
2137 repo.vfs.write('updatestate', p2.hex())
2153
2138
2154 # Advertise fsmonitor when its presence could be useful.
2139 # Advertise fsmonitor when its presence could be useful.
2155 #
2140 #
2156 # We only advertise when performing an update from an empty working
2141 # We only advertise when performing an update from an empty working
2157 # directory. This typically only occurs during initial clone.
2142 # directory. This typically only occurs during initial clone.
2158 #
2143 #
2159 # We give users a mechanism to disable the warning in case it is
2144 # We give users a mechanism to disable the warning in case it is
2160 # annoying.
2145 # annoying.
2161 #
2146 #
2162 # We only allow on Linux and MacOS because that's where fsmonitor is
2147 # We only allow on Linux and MacOS because that's where fsmonitor is
2163 # considered stable.
2148 # considered stable.
2164 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2149 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2165 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2150 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2166 'warn_update_file_count')
2151 'warn_update_file_count')
2167 try:
2152 try:
2168 # avoid cycle: extensions -> cmdutil -> merge
2153 # avoid cycle: extensions -> cmdutil -> merge
2169 from . import extensions
2154 from . import extensions
2170 extensions.find('fsmonitor')
2155 extensions.find('fsmonitor')
2171 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2156 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2172 # We intentionally don't look at whether fsmonitor has disabled
2157 # We intentionally don't look at whether fsmonitor has disabled
2173 # itself because a) fsmonitor may have already printed a warning
2158 # itself because a) fsmonitor may have already printed a warning
2174 # b) we only care about the config state here.
2159 # b) we only care about the config state here.
2175 except KeyError:
2160 except KeyError:
2176 fsmonitorenabled = False
2161 fsmonitorenabled = False
2177
2162
2178 if (fsmonitorwarning
2163 if (fsmonitorwarning
2179 and not fsmonitorenabled
2164 and not fsmonitorenabled
2180 and p1.node() == nullid
2165 and p1.node() == nullid
2181 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2166 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2182 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2167 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2183 repo.ui.warn(
2168 repo.ui.warn(
2184 _('(warning: large working directory being used without '
2169 _('(warning: large working directory being used without '
2185 'fsmonitor enabled; enable fsmonitor to improve performance; '
2170 'fsmonitor enabled; enable fsmonitor to improve performance; '
2186 'see "hg help -e fsmonitor")\n'))
2171 'see "hg help -e fsmonitor")\n'))
2187
2172
2188 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2173 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
2189
2174
2190 if not partial and not wc.isinmemory():
2175 if not partial and not wc.isinmemory():
2191 with repo.dirstate.parentchange():
2176 with repo.dirstate.parentchange():
2192 repo.setparents(fp1, fp2)
2177 repo.setparents(fp1, fp2)
2193 recordupdates(repo, actions, branchmerge)
2178 recordupdates(repo, actions, branchmerge)
2194 # update completed, clear state
2179 # update completed, clear state
2195 util.unlink(repo.vfs.join('updatestate'))
2180 util.unlink(repo.vfs.join('updatestate'))
2196
2181
2197 if not branchmerge:
2182 if not branchmerge:
2198 repo.dirstate.setbranch(p2.branch())
2183 repo.dirstate.setbranch(p2.branch())
2199
2184
2200 # If we're updating to a location, clean up any stale temporary includes
2185 # If we're updating to a location, clean up any stale temporary includes
2201 # (ex: this happens during hg rebase --abort).
2186 # (ex: this happens during hg rebase --abort).
2202 if not branchmerge:
2187 if not branchmerge:
2203 sparse.prunetemporaryincludes(repo)
2188 sparse.prunetemporaryincludes(repo)
2204
2189
2205 if not partial:
2190 if not partial:
2206 repo.hook('update', parent1=xp1, parent2=xp2,
2191 repo.hook('update', parent1=xp1, parent2=xp2,
2207 error=stats.unresolvedcount)
2192 error=stats.unresolvedcount)
2208 return stats
2193 return stats
2209
2194
2210 def graft(repo, ctx, pctx, labels, keepparent=False):
2195 def graft(repo, ctx, pctx, labels, keepparent=False):
2211 """Do a graft-like merge.
2196 """Do a graft-like merge.
2212
2197
2213 This is a merge where the merge ancestor is chosen such that one
2198 This is a merge where the merge ancestor is chosen such that one
2214 or more changesets are grafted onto the current changeset. In
2199 or more changesets are grafted onto the current changeset. In
2215 addition to the merge, this fixes up the dirstate to include only
2200 addition to the merge, this fixes up the dirstate to include only
2216 a single parent (if keepparent is False) and tries to duplicate any
2201 a single parent (if keepparent is False) and tries to duplicate any
2217 renames/copies appropriately.
2202 renames/copies appropriately.
2218
2203
2219 ctx - changeset to rebase
2204 ctx - changeset to rebase
2220 pctx - merge base, usually ctx.p1()
2205 pctx - merge base, usually ctx.p1()
2221 labels - merge labels eg ['local', 'graft']
2206 labels - merge labels eg ['local', 'graft']
2222 keepparent - keep second parent if any
2207 keepparent - keep second parent if any
2223
2208
2224 """
2209 """
2225 # If we're grafting a descendant onto an ancestor, be sure to pass
2210 # If we're grafting a descendant onto an ancestor, be sure to pass
2226 # mergeancestor=True to update. This does two things: 1) allows the merge if
2211 # mergeancestor=True to update. This does two things: 1) allows the merge if
2227 # the destination is the same as the parent of the ctx (so we can use graft
2212 # the destination is the same as the parent of the ctx (so we can use graft
2228 # to copy commits), and 2) informs update that the incoming changes are
2213 # to copy commits), and 2) informs update that the incoming changes are
2229 # newer than the destination so it doesn't prompt about "remote changed foo
2214 # newer than the destination so it doesn't prompt about "remote changed foo
2230 # which local deleted".
2215 # which local deleted".
2231 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2216 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2232
2217
2233 stats = update(repo, ctx.node(), True, True, pctx.node(),
2218 stats = update(repo, ctx.node(), True, True, pctx.node(),
2234 mergeancestor=mergeancestor, labels=labels)
2219 mergeancestor=mergeancestor, labels=labels)
2235
2220
2236 pother = nullid
2221 pother = nullid
2237 parents = ctx.parents()
2222 parents = ctx.parents()
2238 if keepparent and len(parents) == 2 and pctx in parents:
2223 if keepparent and len(parents) == 2 and pctx in parents:
2239 parents.remove(pctx)
2224 parents.remove(pctx)
2240 pother = parents[0].node()
2225 pother = parents[0].node()
2241
2226
2242 with repo.dirstate.parentchange():
2227 with repo.dirstate.parentchange():
2243 repo.setparents(repo['.'].node(), pother)
2228 repo.setparents(repo['.'].node(), pother)
2244 repo.dirstate.write(repo.currenttransaction())
2229 repo.dirstate.write(repo.currenttransaction())
2245 # fix up dirstate for copies and renames
2230 # fix up dirstate for copies and renames
2246 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2231 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2247 return stats
2232 return stats
@@ -1,1615 +1,1636 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28
28
29 from . import (
29 from . import (
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 revsetlang,
38 revsetlang,
39 similar,
39 similar,
40 url,
40 url,
41 util,
41 util,
42 vfs,
42 vfs,
43 )
43 )
44
44
45 from .utils import (
45 from .utils import (
46 procutil,
46 procutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 if pycompat.iswindows:
50 if pycompat.iswindows:
51 from . import scmwindows as scmplatform
51 from . import scmwindows as scmplatform
52 else:
52 else:
53 from . import scmposix as scmplatform
53 from . import scmposix as scmplatform
54
54
55 termsize = scmplatform.termsize
55 termsize = scmplatform.termsize
56
56
57 class status(tuple):
57 class status(tuple):
58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 and 'ignored' properties are only relevant to the working copy.
59 and 'ignored' properties are only relevant to the working copy.
60 '''
60 '''
61
61
62 __slots__ = ()
62 __slots__ = ()
63
63
64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 clean):
65 clean):
66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 ignored, clean))
67 ignored, clean))
68
68
69 @property
69 @property
70 def modified(self):
70 def modified(self):
71 '''files that have been modified'''
71 '''files that have been modified'''
72 return self[0]
72 return self[0]
73
73
74 @property
74 @property
75 def added(self):
75 def added(self):
76 '''files that have been added'''
76 '''files that have been added'''
77 return self[1]
77 return self[1]
78
78
79 @property
79 @property
80 def removed(self):
80 def removed(self):
81 '''files that have been removed'''
81 '''files that have been removed'''
82 return self[2]
82 return self[2]
83
83
84 @property
84 @property
85 def deleted(self):
85 def deleted(self):
86 '''files that are in the dirstate, but have been deleted from the
86 '''files that are in the dirstate, but have been deleted from the
87 working copy (aka "missing")
87 working copy (aka "missing")
88 '''
88 '''
89 return self[3]
89 return self[3]
90
90
91 @property
91 @property
92 def unknown(self):
92 def unknown(self):
93 '''files not in the dirstate that are not ignored'''
93 '''files not in the dirstate that are not ignored'''
94 return self[4]
94 return self[4]
95
95
96 @property
96 @property
97 def ignored(self):
97 def ignored(self):
98 '''files not in the dirstate that are ignored (by _dirignore())'''
98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 return self[5]
99 return self[5]
100
100
101 @property
101 @property
102 def clean(self):
102 def clean(self):
103 '''files that have not been modified'''
103 '''files that have not been modified'''
104 return self[6]
104 return self[6]
105
105
106 def __repr__(self, *args, **kwargs):
106 def __repr__(self, *args, **kwargs):
107 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
107 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
108 r'unknown=%s, ignored=%s, clean=%s>') %
108 r'unknown=%s, ignored=%s, clean=%s>') %
109 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
109 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
110
110
111 def itersubrepos(ctx1, ctx2):
111 def itersubrepos(ctx1, ctx2):
112 """find subrepos in ctx1 or ctx2"""
112 """find subrepos in ctx1 or ctx2"""
113 # Create a (subpath, ctx) mapping where we prefer subpaths from
113 # Create a (subpath, ctx) mapping where we prefer subpaths from
114 # ctx1. The subpaths from ctx2 are important when the .hgsub file
114 # ctx1. The subpaths from ctx2 are important when the .hgsub file
115 # has been modified (in ctx2) but not yet committed (in ctx1).
115 # has been modified (in ctx2) but not yet committed (in ctx1).
116 subpaths = dict.fromkeys(ctx2.substate, ctx2)
116 subpaths = dict.fromkeys(ctx2.substate, ctx2)
117 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
117 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
118
118
119 missing = set()
119 missing = set()
120
120
121 for subpath in ctx2.substate:
121 for subpath in ctx2.substate:
122 if subpath not in ctx1.substate:
122 if subpath not in ctx1.substate:
123 del subpaths[subpath]
123 del subpaths[subpath]
124 missing.add(subpath)
124 missing.add(subpath)
125
125
126 for subpath, ctx in sorted(subpaths.iteritems()):
126 for subpath, ctx in sorted(subpaths.iteritems()):
127 yield subpath, ctx.sub(subpath)
127 yield subpath, ctx.sub(subpath)
128
128
129 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
129 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
130 # status and diff will have an accurate result when it does
130 # status and diff will have an accurate result when it does
131 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
131 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
132 # against itself.
132 # against itself.
133 for subpath in missing:
133 for subpath in missing:
134 yield subpath, ctx2.nullsub(subpath, ctx1)
134 yield subpath, ctx2.nullsub(subpath, ctx1)
135
135
136 def nochangesfound(ui, repo, excluded=None):
136 def nochangesfound(ui, repo, excluded=None):
137 '''Report no changes for push/pull, excluded is None or a list of
137 '''Report no changes for push/pull, excluded is None or a list of
138 nodes excluded from the push/pull.
138 nodes excluded from the push/pull.
139 '''
139 '''
140 secretlist = []
140 secretlist = []
141 if excluded:
141 if excluded:
142 for n in excluded:
142 for n in excluded:
143 ctx = repo[n]
143 ctx = repo[n]
144 if ctx.phase() >= phases.secret and not ctx.extinct():
144 if ctx.phase() >= phases.secret and not ctx.extinct():
145 secretlist.append(n)
145 secretlist.append(n)
146
146
147 if secretlist:
147 if secretlist:
148 ui.status(_("no changes found (ignored %d secret changesets)\n")
148 ui.status(_("no changes found (ignored %d secret changesets)\n")
149 % len(secretlist))
149 % len(secretlist))
150 else:
150 else:
151 ui.status(_("no changes found\n"))
151 ui.status(_("no changes found\n"))
152
152
153 def callcatch(ui, func):
153 def callcatch(ui, func):
154 """call func() with global exception handling
154 """call func() with global exception handling
155
155
156 return func() if no exception happens. otherwise do some error handling
156 return func() if no exception happens. otherwise do some error handling
157 and return an exit code accordingly. does not handle all exceptions.
157 and return an exit code accordingly. does not handle all exceptions.
158 """
158 """
159 try:
159 try:
160 try:
160 try:
161 return func()
161 return func()
162 except: # re-raises
162 except: # re-raises
163 ui.traceback()
163 ui.traceback()
164 raise
164 raise
165 # Global exception handling, alphabetically
165 # Global exception handling, alphabetically
166 # Mercurial-specific first, followed by built-in and library exceptions
166 # Mercurial-specific first, followed by built-in and library exceptions
167 except error.LockHeld as inst:
167 except error.LockHeld as inst:
168 if inst.errno == errno.ETIMEDOUT:
168 if inst.errno == errno.ETIMEDOUT:
169 reason = _('timed out waiting for lock held by %r') % inst.locker
169 reason = _('timed out waiting for lock held by %r') % inst.locker
170 else:
170 else:
171 reason = _('lock held by %r') % inst.locker
171 reason = _('lock held by %r') % inst.locker
172 ui.warn(_("abort: %s: %s\n")
172 ui.warn(_("abort: %s: %s\n")
173 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
173 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
174 if not inst.locker:
174 if not inst.locker:
175 ui.warn(_("(lock might be very busy)\n"))
175 ui.warn(_("(lock might be very busy)\n"))
176 except error.LockUnavailable as inst:
176 except error.LockUnavailable as inst:
177 ui.warn(_("abort: could not lock %s: %s\n") %
177 ui.warn(_("abort: could not lock %s: %s\n") %
178 (inst.desc or stringutil.forcebytestr(inst.filename),
178 (inst.desc or stringutil.forcebytestr(inst.filename),
179 encoding.strtolocal(inst.strerror)))
179 encoding.strtolocal(inst.strerror)))
180 except error.OutOfBandError as inst:
180 except error.OutOfBandError as inst:
181 if inst.args:
181 if inst.args:
182 msg = _("abort: remote error:\n")
182 msg = _("abort: remote error:\n")
183 else:
183 else:
184 msg = _("abort: remote error\n")
184 msg = _("abort: remote error\n")
185 ui.warn(msg)
185 ui.warn(msg)
186 if inst.args:
186 if inst.args:
187 ui.warn(''.join(inst.args))
187 ui.warn(''.join(inst.args))
188 if inst.hint:
188 if inst.hint:
189 ui.warn('(%s)\n' % inst.hint)
189 ui.warn('(%s)\n' % inst.hint)
190 except error.RepoError as inst:
190 except error.RepoError as inst:
191 ui.warn(_("abort: %s!\n") % inst)
191 ui.warn(_("abort: %s!\n") % inst)
192 if inst.hint:
192 if inst.hint:
193 ui.warn(_("(%s)\n") % inst.hint)
193 ui.warn(_("(%s)\n") % inst.hint)
194 except error.ResponseError as inst:
194 except error.ResponseError as inst:
195 ui.warn(_("abort: %s") % inst.args[0])
195 ui.warn(_("abort: %s") % inst.args[0])
196 msg = inst.args[1]
196 msg = inst.args[1]
197 if isinstance(msg, type(u'')):
197 if isinstance(msg, type(u'')):
198 msg = pycompat.sysbytes(msg)
198 msg = pycompat.sysbytes(msg)
199 if not isinstance(msg, bytes):
199 if not isinstance(msg, bytes):
200 ui.warn(" %r\n" % (msg,))
200 ui.warn(" %r\n" % (msg,))
201 elif not msg:
201 elif not msg:
202 ui.warn(_(" empty string\n"))
202 ui.warn(_(" empty string\n"))
203 else:
203 else:
204 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
204 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
205 except error.CensoredNodeError as inst:
205 except error.CensoredNodeError as inst:
206 ui.warn(_("abort: file censored %s!\n") % inst)
206 ui.warn(_("abort: file censored %s!\n") % inst)
207 except error.RevlogError as inst:
207 except error.RevlogError as inst:
208 ui.warn(_("abort: %s!\n") % inst)
208 ui.warn(_("abort: %s!\n") % inst)
209 except error.InterventionRequired as inst:
209 except error.InterventionRequired as inst:
210 ui.warn("%s\n" % inst)
210 ui.warn("%s\n" % inst)
211 if inst.hint:
211 if inst.hint:
212 ui.warn(_("(%s)\n") % inst.hint)
212 ui.warn(_("(%s)\n") % inst.hint)
213 return 1
213 return 1
214 except error.WdirUnsupported:
214 except error.WdirUnsupported:
215 ui.warn(_("abort: working directory revision cannot be specified\n"))
215 ui.warn(_("abort: working directory revision cannot be specified\n"))
216 except error.Abort as inst:
216 except error.Abort as inst:
217 ui.warn(_("abort: %s\n") % inst)
217 ui.warn(_("abort: %s\n") % inst)
218 if inst.hint:
218 if inst.hint:
219 ui.warn(_("(%s)\n") % inst.hint)
219 ui.warn(_("(%s)\n") % inst.hint)
220 except ImportError as inst:
220 except ImportError as inst:
221 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
221 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
222 m = stringutil.forcebytestr(inst).split()[-1]
222 m = stringutil.forcebytestr(inst).split()[-1]
223 if m in "mpatch bdiff".split():
223 if m in "mpatch bdiff".split():
224 ui.warn(_("(did you forget to compile extensions?)\n"))
224 ui.warn(_("(did you forget to compile extensions?)\n"))
225 elif m in "zlib".split():
225 elif m in "zlib".split():
226 ui.warn(_("(is your Python install correct?)\n"))
226 ui.warn(_("(is your Python install correct?)\n"))
227 except IOError as inst:
227 except IOError as inst:
228 if util.safehasattr(inst, "code"):
228 if util.safehasattr(inst, "code"):
229 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
229 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
230 elif util.safehasattr(inst, "reason"):
230 elif util.safehasattr(inst, "reason"):
231 try: # usually it is in the form (errno, strerror)
231 try: # usually it is in the form (errno, strerror)
232 reason = inst.reason.args[1]
232 reason = inst.reason.args[1]
233 except (AttributeError, IndexError):
233 except (AttributeError, IndexError):
234 # it might be anything, for example a string
234 # it might be anything, for example a string
235 reason = inst.reason
235 reason = inst.reason
236 if isinstance(reason, pycompat.unicode):
236 if isinstance(reason, pycompat.unicode):
237 # SSLError of Python 2.7.9 contains a unicode
237 # SSLError of Python 2.7.9 contains a unicode
238 reason = encoding.unitolocal(reason)
238 reason = encoding.unitolocal(reason)
239 ui.warn(_("abort: error: %s\n") % reason)
239 ui.warn(_("abort: error: %s\n") % reason)
240 elif (util.safehasattr(inst, "args")
240 elif (util.safehasattr(inst, "args")
241 and inst.args and inst.args[0] == errno.EPIPE):
241 and inst.args and inst.args[0] == errno.EPIPE):
242 pass
242 pass
243 elif getattr(inst, "strerror", None):
243 elif getattr(inst, "strerror", None):
244 if getattr(inst, "filename", None):
244 if getattr(inst, "filename", None):
245 ui.warn(_("abort: %s: %s\n") % (
245 ui.warn(_("abort: %s: %s\n") % (
246 encoding.strtolocal(inst.strerror),
246 encoding.strtolocal(inst.strerror),
247 stringutil.forcebytestr(inst.filename)))
247 stringutil.forcebytestr(inst.filename)))
248 else:
248 else:
249 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
249 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
250 else:
250 else:
251 raise
251 raise
252 except OSError as inst:
252 except OSError as inst:
253 if getattr(inst, "filename", None) is not None:
253 if getattr(inst, "filename", None) is not None:
254 ui.warn(_("abort: %s: '%s'\n") % (
254 ui.warn(_("abort: %s: '%s'\n") % (
255 encoding.strtolocal(inst.strerror),
255 encoding.strtolocal(inst.strerror),
256 stringutil.forcebytestr(inst.filename)))
256 stringutil.forcebytestr(inst.filename)))
257 else:
257 else:
258 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 except MemoryError:
259 except MemoryError:
260 ui.warn(_("abort: out of memory\n"))
260 ui.warn(_("abort: out of memory\n"))
261 except SystemExit as inst:
261 except SystemExit as inst:
262 # Commands shouldn't sys.exit directly, but give a return code.
262 # Commands shouldn't sys.exit directly, but give a return code.
263 # Just in case catch this and and pass exit code to caller.
263 # Just in case catch this and and pass exit code to caller.
264 return inst.code
264 return inst.code
265 except socket.error as inst:
265 except socket.error as inst:
266 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
266 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
267
267
268 return -1
268 return -1
269
269
270 def checknewlabel(repo, lbl, kind):
270 def checknewlabel(repo, lbl, kind):
271 # Do not use the "kind" parameter in ui output.
271 # Do not use the "kind" parameter in ui output.
272 # It makes strings difficult to translate.
272 # It makes strings difficult to translate.
273 if lbl in ['tip', '.', 'null']:
273 if lbl in ['tip', '.', 'null']:
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 for c in (':', '\0', '\n', '\r'):
275 for c in (':', '\0', '\n', '\r'):
276 if c in lbl:
276 if c in lbl:
277 raise error.Abort(
277 raise error.Abort(
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 try:
279 try:
280 int(lbl)
280 int(lbl)
281 raise error.Abort(_("cannot use an integer as a name"))
281 raise error.Abort(_("cannot use an integer as a name"))
282 except ValueError:
282 except ValueError:
283 pass
283 pass
284 if lbl.strip() != lbl:
284 if lbl.strip() != lbl:
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286
286
287 def checkfilename(f):
287 def checkfilename(f):
288 '''Check that the filename f is an acceptable filename for a tracked file'''
288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 if '\r' in f or '\n' in f:
289 if '\r' in f or '\n' in f:
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 % pycompat.bytestr(f))
291 % pycompat.bytestr(f))
292
292
293 def checkportable(ui, f):
293 def checkportable(ui, f):
294 '''Check if filename f is portable and warn or abort depending on config'''
294 '''Check if filename f is portable and warn or abort depending on config'''
295 checkfilename(f)
295 checkfilename(f)
296 abort, warn = checkportabilityalert(ui)
296 abort, warn = checkportabilityalert(ui)
297 if abort or warn:
297 if abort or warn:
298 msg = util.checkwinfilename(f)
298 msg = util.checkwinfilename(f)
299 if msg:
299 if msg:
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 if abort:
301 if abort:
302 raise error.Abort(msg)
302 raise error.Abort(msg)
303 ui.warn(_("warning: %s\n") % msg)
303 ui.warn(_("warning: %s\n") % msg)
304
304
305 def checkportabilityalert(ui):
305 def checkportabilityalert(ui):
306 '''check if the user's config requests nothing, a warning, or abort for
306 '''check if the user's config requests nothing, a warning, or abort for
307 non-portable filenames'''
307 non-portable filenames'''
308 val = ui.config('ui', 'portablefilenames')
308 val = ui.config('ui', 'portablefilenames')
309 lval = val.lower()
309 lval = val.lower()
310 bval = stringutil.parsebool(val)
310 bval = stringutil.parsebool(val)
311 abort = pycompat.iswindows or lval == 'abort'
311 abort = pycompat.iswindows or lval == 'abort'
312 warn = bval or lval == 'warn'
312 warn = bval or lval == 'warn'
313 if bval is None and not (warn or abort or lval == 'ignore'):
313 if bval is None and not (warn or abort or lval == 'ignore'):
314 raise error.ConfigError(
314 raise error.ConfigError(
315 _("ui.portablefilenames value is invalid ('%s')") % val)
315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 return abort, warn
316 return abort, warn
317
317
318 class casecollisionauditor(object):
318 class casecollisionauditor(object):
319 def __init__(self, ui, abort, dirstate):
319 def __init__(self, ui, abort, dirstate):
320 self._ui = ui
320 self._ui = ui
321 self._abort = abort
321 self._abort = abort
322 allfiles = '\0'.join(dirstate._map)
322 allfiles = '\0'.join(dirstate._map)
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 self._dirstate = dirstate
324 self._dirstate = dirstate
325 # The purpose of _newfiles is so that we don't complain about
325 # The purpose of _newfiles is so that we don't complain about
326 # case collisions if someone were to call this object with the
326 # case collisions if someone were to call this object with the
327 # same filename twice.
327 # same filename twice.
328 self._newfiles = set()
328 self._newfiles = set()
329
329
330 def __call__(self, f):
330 def __call__(self, f):
331 if f in self._newfiles:
331 if f in self._newfiles:
332 return
332 return
333 fl = encoding.lower(f)
333 fl = encoding.lower(f)
334 if fl in self._loweredfiles and f not in self._dirstate:
334 if fl in self._loweredfiles and f not in self._dirstate:
335 msg = _('possible case-folding collision for %s') % f
335 msg = _('possible case-folding collision for %s') % f
336 if self._abort:
336 if self._abort:
337 raise error.Abort(msg)
337 raise error.Abort(msg)
338 self._ui.warn(_("warning: %s\n") % msg)
338 self._ui.warn(_("warning: %s\n") % msg)
339 self._loweredfiles.add(fl)
339 self._loweredfiles.add(fl)
340 self._newfiles.add(f)
340 self._newfiles.add(f)
341
341
342 def filteredhash(repo, maxrev):
342 def filteredhash(repo, maxrev):
343 """build hash of filtered revisions in the current repoview.
343 """build hash of filtered revisions in the current repoview.
344
344
345 Multiple caches perform up-to-date validation by checking that the
345 Multiple caches perform up-to-date validation by checking that the
346 tiprev and tipnode stored in the cache file match the current repository.
346 tiprev and tipnode stored in the cache file match the current repository.
347 However, this is not sufficient for validating repoviews because the set
347 However, this is not sufficient for validating repoviews because the set
348 of revisions in the view may change without the repository tiprev and
348 of revisions in the view may change without the repository tiprev and
349 tipnode changing.
349 tipnode changing.
350
350
351 This function hashes all the revs filtered from the view and returns
351 This function hashes all the revs filtered from the view and returns
352 that SHA-1 digest.
352 that SHA-1 digest.
353 """
353 """
354 cl = repo.changelog
354 cl = repo.changelog
355 if not cl.filteredrevs:
355 if not cl.filteredrevs:
356 return None
356 return None
357 key = None
357 key = None
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 if revs:
359 if revs:
360 s = hashlib.sha1()
360 s = hashlib.sha1()
361 for rev in revs:
361 for rev in revs:
362 s.update('%d;' % rev)
362 s.update('%d;' % rev)
363 key = s.digest()
363 key = s.digest()
364 return key
364 return key
365
365
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 '''yield every hg repository under path, always recursively.
367 '''yield every hg repository under path, always recursively.
368 The recurse flag will only control recursion into repo working dirs'''
368 The recurse flag will only control recursion into repo working dirs'''
369 def errhandler(err):
369 def errhandler(err):
370 if err.filename == path:
370 if err.filename == path:
371 raise err
371 raise err
372 samestat = getattr(os.path, 'samestat', None)
372 samestat = getattr(os.path, 'samestat', None)
373 if followsym and samestat is not None:
373 if followsym and samestat is not None:
374 def adddir(dirlst, dirname):
374 def adddir(dirlst, dirname):
375 dirstat = os.stat(dirname)
375 dirstat = os.stat(dirname)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 if not match:
377 if not match:
378 dirlst.append(dirstat)
378 dirlst.append(dirstat)
379 return not match
379 return not match
380 else:
380 else:
381 followsym = False
381 followsym = False
382
382
383 if (seen_dirs is None) and followsym:
383 if (seen_dirs is None) and followsym:
384 seen_dirs = []
384 seen_dirs = []
385 adddir(seen_dirs, path)
385 adddir(seen_dirs, path)
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 dirs.sort()
387 dirs.sort()
388 if '.hg' in dirs:
388 if '.hg' in dirs:
389 yield root # found a repository
389 yield root # found a repository
390 qroot = os.path.join(root, '.hg', 'patches')
390 qroot = os.path.join(root, '.hg', 'patches')
391 if os.path.isdir(os.path.join(qroot, '.hg')):
391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 yield qroot # we have a patch queue repo here
392 yield qroot # we have a patch queue repo here
393 if recurse:
393 if recurse:
394 # avoid recursing inside the .hg directory
394 # avoid recursing inside the .hg directory
395 dirs.remove('.hg')
395 dirs.remove('.hg')
396 else:
396 else:
397 dirs[:] = [] # don't descend further
397 dirs[:] = [] # don't descend further
398 elif followsym:
398 elif followsym:
399 newdirs = []
399 newdirs = []
400 for d in dirs:
400 for d in dirs:
401 fname = os.path.join(root, d)
401 fname = os.path.join(root, d)
402 if adddir(seen_dirs, fname):
402 if adddir(seen_dirs, fname):
403 if os.path.islink(fname):
403 if os.path.islink(fname):
404 for hgname in walkrepos(fname, True, seen_dirs):
404 for hgname in walkrepos(fname, True, seen_dirs):
405 yield hgname
405 yield hgname
406 else:
406 else:
407 newdirs.append(d)
407 newdirs.append(d)
408 dirs[:] = newdirs
408 dirs[:] = newdirs
409
409
410 def binnode(ctx):
410 def binnode(ctx):
411 """Return binary node id for a given basectx"""
411 """Return binary node id for a given basectx"""
412 node = ctx.node()
412 node = ctx.node()
413 if node is None:
413 if node is None:
414 return wdirid
414 return wdirid
415 return node
415 return node
416
416
417 def intrev(ctx):
417 def intrev(ctx):
418 """Return integer for a given basectx that can be used in comparison or
418 """Return integer for a given basectx that can be used in comparison or
419 arithmetic operation"""
419 arithmetic operation"""
420 rev = ctx.rev()
420 rev = ctx.rev()
421 if rev is None:
421 if rev is None:
422 return wdirrev
422 return wdirrev
423 return rev
423 return rev
424
424
425 def formatchangeid(ctx):
425 def formatchangeid(ctx):
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 template provided by logcmdutil.changesettemplater"""
427 template provided by logcmdutil.changesettemplater"""
428 repo = ctx.repo()
428 repo = ctx.repo()
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430
430
431 def formatrevnode(ui, rev, node):
431 def formatrevnode(ui, rev, node):
432 """Format given revision and node depending on the current verbosity"""
432 """Format given revision and node depending on the current verbosity"""
433 if ui.debugflag:
433 if ui.debugflag:
434 hexfunc = hex
434 hexfunc = hex
435 else:
435 else:
436 hexfunc = short
436 hexfunc = short
437 return '%d:%s' % (rev, hexfunc(node))
437 return '%d:%s' % (rev, hexfunc(node))
438
438
439 def resolvehexnodeidprefix(repo, prefix):
439 def resolvehexnodeidprefix(repo, prefix):
440 # Uses unfiltered repo because it's faster when prefix is ambiguous/
440 # Uses unfiltered repo because it's faster when prefix is ambiguous/
441 # This matches the shortesthexnodeidprefix() function below.
441 # This matches the shortesthexnodeidprefix() function below.
442 node = repo.unfiltered().changelog._partialmatch(prefix)
442 node = repo.unfiltered().changelog._partialmatch(prefix)
443 if node is None:
443 if node is None:
444 return
444 return
445 repo.changelog.rev(node) # make sure node isn't filtered
445 repo.changelog.rev(node) # make sure node isn't filtered
446 return node
446 return node
447
447
448 def shortesthexnodeidprefix(repo, node, minlength=1):
448 def shortesthexnodeidprefix(repo, node, minlength=1):
449 """Find the shortest unambiguous prefix that matches hexnode."""
449 """Find the shortest unambiguous prefix that matches hexnode."""
450 # _partialmatch() of filtered changelog could take O(len(repo)) time,
450 # _partialmatch() of filtered changelog could take O(len(repo)) time,
451 # which would be unacceptably slow. so we look for hash collision in
451 # which would be unacceptably slow. so we look for hash collision in
452 # unfiltered space, which means some hashes may be slightly longer.
452 # unfiltered space, which means some hashes may be slightly longer.
453 cl = repo.unfiltered().changelog
453 cl = repo.unfiltered().changelog
454
454
455 def isrev(prefix):
455 def isrev(prefix):
456 try:
456 try:
457 i = int(prefix)
457 i = int(prefix)
458 # if we are a pure int, then starting with zero will not be
458 # if we are a pure int, then starting with zero will not be
459 # confused as a rev; or, obviously, if the int is larger
459 # confused as a rev; or, obviously, if the int is larger
460 # than the value of the tip rev
460 # than the value of the tip rev
461 if prefix[0] == '0' or i > len(cl):
461 if prefix[0] == '0' or i > len(cl):
462 return False
462 return False
463 return True
463 return True
464 except ValueError:
464 except ValueError:
465 return False
465 return False
466
466
467 def disambiguate(prefix):
467 def disambiguate(prefix):
468 """Disambiguate against revnums."""
468 """Disambiguate against revnums."""
469 hexnode = hex(node)
469 hexnode = hex(node)
470 for length in range(len(prefix), len(hexnode) + 1):
470 for length in range(len(prefix), len(hexnode) + 1):
471 prefix = hexnode[:length]
471 prefix = hexnode[:length]
472 if not isrev(prefix):
472 if not isrev(prefix):
473 return prefix
473 return prefix
474
474
475 try:
475 try:
476 return disambiguate(cl.shortest(node, minlength))
476 return disambiguate(cl.shortest(node, minlength))
477 except error.LookupError:
477 except error.LookupError:
478 raise error.RepoLookupError()
478 raise error.RepoLookupError()
479
479
480 def isrevsymbol(repo, symbol):
480 def isrevsymbol(repo, symbol):
481 """Checks if a symbol exists in the repo.
481 """Checks if a symbol exists in the repo.
482
482
483 See revsymbol() for details. Raises error.LookupError if the symbol is an
483 See revsymbol() for details. Raises error.LookupError if the symbol is an
484 ambiguous nodeid prefix.
484 ambiguous nodeid prefix.
485 """
485 """
486 try:
486 try:
487 revsymbol(repo, symbol)
487 revsymbol(repo, symbol)
488 return True
488 return True
489 except error.RepoLookupError:
489 except error.RepoLookupError:
490 return False
490 return False
491
491
492 def revsymbol(repo, symbol):
492 def revsymbol(repo, symbol):
493 """Returns a context given a single revision symbol (as string).
493 """Returns a context given a single revision symbol (as string).
494
494
495 This is similar to revsingle(), but accepts only a single revision symbol,
495 This is similar to revsingle(), but accepts only a single revision symbol,
496 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
496 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
497 not "max(public())".
497 not "max(public())".
498 """
498 """
499 if not isinstance(symbol, bytes):
499 if not isinstance(symbol, bytes):
500 msg = ("symbol (%s of type %s) was not a string, did you mean "
500 msg = ("symbol (%s of type %s) was not a string, did you mean "
501 "repo[symbol]?" % (symbol, type(symbol)))
501 "repo[symbol]?" % (symbol, type(symbol)))
502 raise error.ProgrammingError(msg)
502 raise error.ProgrammingError(msg)
503 try:
503 try:
504 if symbol in ('.', 'tip', 'null'):
504 if symbol in ('.', 'tip', 'null'):
505 return repo[symbol]
505 return repo[symbol]
506
506
507 try:
507 try:
508 r = int(symbol)
508 r = int(symbol)
509 if '%d' % r != symbol:
509 if '%d' % r != symbol:
510 raise ValueError
510 raise ValueError
511 l = len(repo.changelog)
511 l = len(repo.changelog)
512 if r < 0:
512 if r < 0:
513 r += l
513 r += l
514 if r < 0 or r >= l and r != wdirrev:
514 if r < 0 or r >= l and r != wdirrev:
515 raise ValueError
515 raise ValueError
516 return repo[r]
516 return repo[r]
517 except error.FilteredIndexError:
517 except error.FilteredIndexError:
518 raise
518 raise
519 except (ValueError, OverflowError, IndexError):
519 except (ValueError, OverflowError, IndexError):
520 pass
520 pass
521
521
522 if len(symbol) == 40:
522 if len(symbol) == 40:
523 try:
523 try:
524 node = bin(symbol)
524 node = bin(symbol)
525 rev = repo.changelog.rev(node)
525 rev = repo.changelog.rev(node)
526 return repo[rev]
526 return repo[rev]
527 except error.FilteredLookupError:
527 except error.FilteredLookupError:
528 raise
528 raise
529 except (TypeError, LookupError):
529 except (TypeError, LookupError):
530 pass
530 pass
531
531
532 # look up bookmarks through the name interface
532 # look up bookmarks through the name interface
533 try:
533 try:
534 node = repo.names.singlenode(repo, symbol)
534 node = repo.names.singlenode(repo, symbol)
535 rev = repo.changelog.rev(node)
535 rev = repo.changelog.rev(node)
536 return repo[rev]
536 return repo[rev]
537 except KeyError:
537 except KeyError:
538 pass
538 pass
539
539
540 node = resolvehexnodeidprefix(repo, symbol)
540 node = resolvehexnodeidprefix(repo, symbol)
541 if node is not None:
541 if node is not None:
542 rev = repo.changelog.rev(node)
542 rev = repo.changelog.rev(node)
543 return repo[rev]
543 return repo[rev]
544
544
545 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
545 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
546
546
547 except error.WdirUnsupported:
547 except error.WdirUnsupported:
548 return repo[None]
548 return repo[None]
549 except (error.FilteredIndexError, error.FilteredLookupError,
549 except (error.FilteredIndexError, error.FilteredLookupError,
550 error.FilteredRepoLookupError):
550 error.FilteredRepoLookupError):
551 raise _filterederror(repo, symbol)
551 raise _filterederror(repo, symbol)
552
552
553 def _filterederror(repo, changeid):
553 def _filterederror(repo, changeid):
554 """build an exception to be raised about a filtered changeid
554 """build an exception to be raised about a filtered changeid
555
555
556 This is extracted in a function to help extensions (eg: evolve) to
556 This is extracted in a function to help extensions (eg: evolve) to
557 experiment with various message variants."""
557 experiment with various message variants."""
558 if repo.filtername.startswith('visible'):
558 if repo.filtername.startswith('visible'):
559
559
560 # Check if the changeset is obsolete
560 # Check if the changeset is obsolete
561 unfilteredrepo = repo.unfiltered()
561 unfilteredrepo = repo.unfiltered()
562 ctx = revsymbol(unfilteredrepo, changeid)
562 ctx = revsymbol(unfilteredrepo, changeid)
563
563
564 # If the changeset is obsolete, enrich the message with the reason
564 # If the changeset is obsolete, enrich the message with the reason
565 # that made this changeset not visible
565 # that made this changeset not visible
566 if ctx.obsolete():
566 if ctx.obsolete():
567 msg = obsutil._getfilteredreason(repo, changeid, ctx)
567 msg = obsutil._getfilteredreason(repo, changeid, ctx)
568 else:
568 else:
569 msg = _("hidden revision '%s'") % changeid
569 msg = _("hidden revision '%s'") % changeid
570
570
571 hint = _('use --hidden to access hidden revisions')
571 hint = _('use --hidden to access hidden revisions')
572
572
573 return error.FilteredRepoLookupError(msg, hint=hint)
573 return error.FilteredRepoLookupError(msg, hint=hint)
574 msg = _("filtered revision '%s' (not in '%s' subset)")
574 msg = _("filtered revision '%s' (not in '%s' subset)")
575 msg %= (changeid, repo.filtername)
575 msg %= (changeid, repo.filtername)
576 return error.FilteredRepoLookupError(msg)
576 return error.FilteredRepoLookupError(msg)
577
577
578 def revsingle(repo, revspec, default='.', localalias=None):
578 def revsingle(repo, revspec, default='.', localalias=None):
579 if not revspec and revspec != 0:
579 if not revspec and revspec != 0:
580 return repo[default]
580 return repo[default]
581
581
582 l = revrange(repo, [revspec], localalias=localalias)
582 l = revrange(repo, [revspec], localalias=localalias)
583 if not l:
583 if not l:
584 raise error.Abort(_('empty revision set'))
584 raise error.Abort(_('empty revision set'))
585 return repo[l.last()]
585 return repo[l.last()]
586
586
587 def _pairspec(revspec):
587 def _pairspec(revspec):
588 tree = revsetlang.parse(revspec)
588 tree = revsetlang.parse(revspec)
589 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
589 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
590
590
591 def revpair(repo, revs):
591 def revpair(repo, revs):
592 if not revs:
592 if not revs:
593 return repo['.'], repo[None]
593 return repo['.'], repo[None]
594
594
595 l = revrange(repo, revs)
595 l = revrange(repo, revs)
596
596
597 if not l:
597 if not l:
598 first = second = None
598 first = second = None
599 elif l.isascending():
599 elif l.isascending():
600 first = l.min()
600 first = l.min()
601 second = l.max()
601 second = l.max()
602 elif l.isdescending():
602 elif l.isdescending():
603 first = l.max()
603 first = l.max()
604 second = l.min()
604 second = l.min()
605 else:
605 else:
606 first = l.first()
606 first = l.first()
607 second = l.last()
607 second = l.last()
608
608
609 if first is None:
609 if first is None:
610 raise error.Abort(_('empty revision range'))
610 raise error.Abort(_('empty revision range'))
611 if (first == second and len(revs) >= 2
611 if (first == second and len(revs) >= 2
612 and not all(revrange(repo, [r]) for r in revs)):
612 and not all(revrange(repo, [r]) for r in revs)):
613 raise error.Abort(_('empty revision on one side of range'))
613 raise error.Abort(_('empty revision on one side of range'))
614
614
615 # if top-level is range expression, the result must always be a pair
615 # if top-level is range expression, the result must always be a pair
616 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
616 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
617 return repo[first], repo[None]
617 return repo[first], repo[None]
618
618
619 return repo[first], repo[second]
619 return repo[first], repo[second]
620
620
621 def revrange(repo, specs, localalias=None):
621 def revrange(repo, specs, localalias=None):
622 """Execute 1 to many revsets and return the union.
622 """Execute 1 to many revsets and return the union.
623
623
624 This is the preferred mechanism for executing revsets using user-specified
624 This is the preferred mechanism for executing revsets using user-specified
625 config options, such as revset aliases.
625 config options, such as revset aliases.
626
626
627 The revsets specified by ``specs`` will be executed via a chained ``OR``
627 The revsets specified by ``specs`` will be executed via a chained ``OR``
628 expression. If ``specs`` is empty, an empty result is returned.
628 expression. If ``specs`` is empty, an empty result is returned.
629
629
630 ``specs`` can contain integers, in which case they are assumed to be
630 ``specs`` can contain integers, in which case they are assumed to be
631 revision numbers.
631 revision numbers.
632
632
633 It is assumed the revsets are already formatted. If you have arguments
633 It is assumed the revsets are already formatted. If you have arguments
634 that need to be expanded in the revset, call ``revsetlang.formatspec()``
634 that need to be expanded in the revset, call ``revsetlang.formatspec()``
635 and pass the result as an element of ``specs``.
635 and pass the result as an element of ``specs``.
636
636
637 Specifying a single revset is allowed.
637 Specifying a single revset is allowed.
638
638
639 Returns a ``revset.abstractsmartset`` which is a list-like interface over
639 Returns a ``revset.abstractsmartset`` which is a list-like interface over
640 integer revisions.
640 integer revisions.
641 """
641 """
642 allspecs = []
642 allspecs = []
643 for spec in specs:
643 for spec in specs:
644 if isinstance(spec, int):
644 if isinstance(spec, int):
645 spec = revsetlang.formatspec('rev(%d)', spec)
645 spec = revsetlang.formatspec('rev(%d)', spec)
646 allspecs.append(spec)
646 allspecs.append(spec)
647 return repo.anyrevs(allspecs, user=True, localalias=localalias)
647 return repo.anyrevs(allspecs, user=True, localalias=localalias)
648
648
649 def meaningfulparents(repo, ctx):
649 def meaningfulparents(repo, ctx):
650 """Return list of meaningful (or all if debug) parentrevs for rev.
650 """Return list of meaningful (or all if debug) parentrevs for rev.
651
651
652 For merges (two non-nullrev revisions) both parents are meaningful.
652 For merges (two non-nullrev revisions) both parents are meaningful.
653 Otherwise the first parent revision is considered meaningful if it
653 Otherwise the first parent revision is considered meaningful if it
654 is not the preceding revision.
654 is not the preceding revision.
655 """
655 """
656 parents = ctx.parents()
656 parents = ctx.parents()
657 if len(parents) > 1:
657 if len(parents) > 1:
658 return parents
658 return parents
659 if repo.ui.debugflag:
659 if repo.ui.debugflag:
660 return [parents[0], repo['null']]
660 return [parents[0], repo['null']]
661 if parents[0].rev() >= intrev(ctx) - 1:
661 if parents[0].rev() >= intrev(ctx) - 1:
662 return []
662 return []
663 return parents
663 return parents
664
664
665 def expandpats(pats):
665 def expandpats(pats):
666 '''Expand bare globs when running on windows.
666 '''Expand bare globs when running on windows.
667 On posix we assume it already has already been done by sh.'''
667 On posix we assume it already has already been done by sh.'''
668 if not util.expandglobs:
668 if not util.expandglobs:
669 return list(pats)
669 return list(pats)
670 ret = []
670 ret = []
671 for kindpat in pats:
671 for kindpat in pats:
672 kind, pat = matchmod._patsplit(kindpat, None)
672 kind, pat = matchmod._patsplit(kindpat, None)
673 if kind is None:
673 if kind is None:
674 try:
674 try:
675 globbed = glob.glob(pat)
675 globbed = glob.glob(pat)
676 except re.error:
676 except re.error:
677 globbed = [pat]
677 globbed = [pat]
678 if globbed:
678 if globbed:
679 ret.extend(globbed)
679 ret.extend(globbed)
680 continue
680 continue
681 ret.append(kindpat)
681 ret.append(kindpat)
682 return ret
682 return ret
683
683
684 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
684 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
685 badfn=None):
685 badfn=None):
686 '''Return a matcher and the patterns that were used.
686 '''Return a matcher and the patterns that were used.
687 The matcher will warn about bad matches, unless an alternate badfn callback
687 The matcher will warn about bad matches, unless an alternate badfn callback
688 is provided.'''
688 is provided.'''
689 if pats == ("",):
689 if pats == ("",):
690 pats = []
690 pats = []
691 if opts is None:
691 if opts is None:
692 opts = {}
692 opts = {}
693 if not globbed and default == 'relpath':
693 if not globbed and default == 'relpath':
694 pats = expandpats(pats or [])
694 pats = expandpats(pats or [])
695
695
696 def bad(f, msg):
696 def bad(f, msg):
697 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
697 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
698
698
699 if badfn is None:
699 if badfn is None:
700 badfn = bad
700 badfn = bad
701
701
702 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
702 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
703 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
703 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
704
704
705 if m.always():
705 if m.always():
706 pats = []
706 pats = []
707 return m, pats
707 return m, pats
708
708
709 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
709 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
710 badfn=None):
710 badfn=None):
711 '''Return a matcher that will warn about bad matches.'''
711 '''Return a matcher that will warn about bad matches.'''
712 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
712 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
713
713
714 def matchall(repo):
714 def matchall(repo):
715 '''Return a matcher that will efficiently match everything.'''
715 '''Return a matcher that will efficiently match everything.'''
716 return matchmod.always(repo.root, repo.getcwd())
716 return matchmod.always(repo.root, repo.getcwd())
717
717
718 def matchfiles(repo, files, badfn=None):
718 def matchfiles(repo, files, badfn=None):
719 '''Return a matcher that will efficiently match exactly these files.'''
719 '''Return a matcher that will efficiently match exactly these files.'''
720 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
720 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
721
721
722 def parsefollowlinespattern(repo, rev, pat, msg):
722 def parsefollowlinespattern(repo, rev, pat, msg):
723 """Return a file name from `pat` pattern suitable for usage in followlines
723 """Return a file name from `pat` pattern suitable for usage in followlines
724 logic.
724 logic.
725 """
725 """
726 if not matchmod.patkind(pat):
726 if not matchmod.patkind(pat):
727 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
727 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
728 else:
728 else:
729 ctx = repo[rev]
729 ctx = repo[rev]
730 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
730 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
731 files = [f for f in ctx if m(f)]
731 files = [f for f in ctx if m(f)]
732 if len(files) != 1:
732 if len(files) != 1:
733 raise error.ParseError(msg)
733 raise error.ParseError(msg)
734 return files[0]
734 return files[0]
735
735
736 def origpath(ui, repo, filepath):
736 def origpath(ui, repo, filepath):
737 '''customize where .orig files are created
737 '''customize where .orig files are created
738
738
739 Fetch user defined path from config file: [ui] origbackuppath = <path>
739 Fetch user defined path from config file: [ui] origbackuppath = <path>
740 Fall back to default (filepath with .orig suffix) if not specified
740 Fall back to default (filepath with .orig suffix) if not specified
741 '''
741 '''
742 origbackuppath = ui.config('ui', 'origbackuppath')
742 origbackuppath = ui.config('ui', 'origbackuppath')
743 if not origbackuppath:
743 if not origbackuppath:
744 return filepath + ".orig"
744 return filepath + ".orig"
745
745
746 # Convert filepath from an absolute path into a path inside the repo.
746 # Convert filepath from an absolute path into a path inside the repo.
747 filepathfromroot = util.normpath(os.path.relpath(filepath,
747 filepathfromroot = util.normpath(os.path.relpath(filepath,
748 start=repo.root))
748 start=repo.root))
749
749
750 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
750 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
751 origbackupdir = origvfs.dirname(filepathfromroot)
751 origbackupdir = origvfs.dirname(filepathfromroot)
752 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
752 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
753 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
753 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
754
754
755 # Remove any files that conflict with the backup file's path
755 # Remove any files that conflict with the backup file's path
756 for f in reversed(list(util.finddirs(filepathfromroot))):
756 for f in reversed(list(util.finddirs(filepathfromroot))):
757 if origvfs.isfileorlink(f):
757 if origvfs.isfileorlink(f):
758 ui.note(_('removing conflicting file: %s\n')
758 ui.note(_('removing conflicting file: %s\n')
759 % origvfs.join(f))
759 % origvfs.join(f))
760 origvfs.unlink(f)
760 origvfs.unlink(f)
761 break
761 break
762
762
763 origvfs.makedirs(origbackupdir)
763 origvfs.makedirs(origbackupdir)
764
764
765 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
765 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
766 ui.note(_('removing conflicting directory: %s\n')
766 ui.note(_('removing conflicting directory: %s\n')
767 % origvfs.join(filepathfromroot))
767 % origvfs.join(filepathfromroot))
768 origvfs.rmtree(filepathfromroot, forcibly=True)
768 origvfs.rmtree(filepathfromroot, forcibly=True)
769
769
770 return origvfs.join(filepathfromroot)
770 return origvfs.join(filepathfromroot)
771
771
772 class _containsnode(object):
772 class _containsnode(object):
773 """proxy __contains__(node) to container.__contains__ which accepts revs"""
773 """proxy __contains__(node) to container.__contains__ which accepts revs"""
774
774
775 def __init__(self, repo, revcontainer):
775 def __init__(self, repo, revcontainer):
776 self._torev = repo.changelog.rev
776 self._torev = repo.changelog.rev
777 self._revcontains = revcontainer.__contains__
777 self._revcontains = revcontainer.__contains__
778
778
779 def __contains__(self, node):
779 def __contains__(self, node):
780 return self._revcontains(self._torev(node))
780 return self._revcontains(self._torev(node))
781
781
782 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
782 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
783 """do common cleanups when old nodes are replaced by new nodes
783 """do common cleanups when old nodes are replaced by new nodes
784
784
785 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
785 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
786 (we might also want to move working directory parent in the future)
786 (we might also want to move working directory parent in the future)
787
787
788 By default, bookmark moves are calculated automatically from 'replacements',
788 By default, bookmark moves are calculated automatically from 'replacements',
789 but 'moves' can be used to override that. Also, 'moves' may include
789 but 'moves' can be used to override that. Also, 'moves' may include
790 additional bookmark moves that should not have associated obsmarkers.
790 additional bookmark moves that should not have associated obsmarkers.
791
791
792 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
792 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
793 have replacements. operation is a string, like "rebase".
793 have replacements. operation is a string, like "rebase".
794
794
795 metadata is dictionary containing metadata to be stored in obsmarker if
795 metadata is dictionary containing metadata to be stored in obsmarker if
796 obsolescence is enabled.
796 obsolescence is enabled.
797 """
797 """
798 if not replacements and not moves:
798 if not replacements and not moves:
799 return
799 return
800
800
801 # translate mapping's other forms
801 # translate mapping's other forms
802 if not util.safehasattr(replacements, 'items'):
802 if not util.safehasattr(replacements, 'items'):
803 replacements = {n: () for n in replacements}
803 replacements = {n: () for n in replacements}
804
804
805 # Calculate bookmark movements
805 # Calculate bookmark movements
806 if moves is None:
806 if moves is None:
807 moves = {}
807 moves = {}
808 # Unfiltered repo is needed since nodes in replacements might be hidden.
808 # Unfiltered repo is needed since nodes in replacements might be hidden.
809 unfi = repo.unfiltered()
809 unfi = repo.unfiltered()
810 for oldnode, newnodes in replacements.items():
810 for oldnode, newnodes in replacements.items():
811 if oldnode in moves:
811 if oldnode in moves:
812 continue
812 continue
813 if len(newnodes) > 1:
813 if len(newnodes) > 1:
814 # usually a split, take the one with biggest rev number
814 # usually a split, take the one with biggest rev number
815 newnode = next(unfi.set('max(%ln)', newnodes)).node()
815 newnode = next(unfi.set('max(%ln)', newnodes)).node()
816 elif len(newnodes) == 0:
816 elif len(newnodes) == 0:
817 # move bookmark backwards
817 # move bookmark backwards
818 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
818 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
819 list(replacements)))
819 list(replacements)))
820 if roots:
820 if roots:
821 newnode = roots[0].node()
821 newnode = roots[0].node()
822 else:
822 else:
823 newnode = nullid
823 newnode = nullid
824 else:
824 else:
825 newnode = newnodes[0]
825 newnode = newnodes[0]
826 moves[oldnode] = newnode
826 moves[oldnode] = newnode
827
827
828 with repo.transaction('cleanup') as tr:
828 with repo.transaction('cleanup') as tr:
829 # Move bookmarks
829 # Move bookmarks
830 bmarks = repo._bookmarks
830 bmarks = repo._bookmarks
831 bmarkchanges = []
831 bmarkchanges = []
832 allnewnodes = [n for ns in replacements.values() for n in ns]
832 allnewnodes = [n for ns in replacements.values() for n in ns]
833 for oldnode, newnode in moves.items():
833 for oldnode, newnode in moves.items():
834 oldbmarks = repo.nodebookmarks(oldnode)
834 oldbmarks = repo.nodebookmarks(oldnode)
835 if not oldbmarks:
835 if not oldbmarks:
836 continue
836 continue
837 from . import bookmarks # avoid import cycle
837 from . import bookmarks # avoid import cycle
838 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
838 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
839 (util.rapply(pycompat.maybebytestr, oldbmarks),
839 (util.rapply(pycompat.maybebytestr, oldbmarks),
840 hex(oldnode), hex(newnode)))
840 hex(oldnode), hex(newnode)))
841 # Delete divergent bookmarks being parents of related newnodes
841 # Delete divergent bookmarks being parents of related newnodes
842 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
842 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
843 allnewnodes, newnode, oldnode)
843 allnewnodes, newnode, oldnode)
844 deletenodes = _containsnode(repo, deleterevs)
844 deletenodes = _containsnode(repo, deleterevs)
845 for name in oldbmarks:
845 for name in oldbmarks:
846 bmarkchanges.append((name, newnode))
846 bmarkchanges.append((name, newnode))
847 for b in bookmarks.divergent2delete(repo, deletenodes, name):
847 for b in bookmarks.divergent2delete(repo, deletenodes, name):
848 bmarkchanges.append((b, None))
848 bmarkchanges.append((b, None))
849
849
850 if bmarkchanges:
850 if bmarkchanges:
851 bmarks.applychanges(repo, tr, bmarkchanges)
851 bmarks.applychanges(repo, tr, bmarkchanges)
852
852
853 # Obsolete or strip nodes
853 # Obsolete or strip nodes
854 if obsolete.isenabled(repo, obsolete.createmarkersopt):
854 if obsolete.isenabled(repo, obsolete.createmarkersopt):
855 # If a node is already obsoleted, and we want to obsolete it
855 # If a node is already obsoleted, and we want to obsolete it
856 # without a successor, skip that obssolete request since it's
856 # without a successor, skip that obssolete request since it's
857 # unnecessary. That's the "if s or not isobs(n)" check below.
857 # unnecessary. That's the "if s or not isobs(n)" check below.
858 # Also sort the node in topology order, that might be useful for
858 # Also sort the node in topology order, that might be useful for
859 # some obsstore logic.
859 # some obsstore logic.
860 # NOTE: the filtering and sorting might belong to createmarkers.
860 # NOTE: the filtering and sorting might belong to createmarkers.
861 isobs = unfi.obsstore.successors.__contains__
861 isobs = unfi.obsstore.successors.__contains__
862 torev = unfi.changelog.rev
862 torev = unfi.changelog.rev
863 sortfunc = lambda ns: torev(ns[0])
863 sortfunc = lambda ns: torev(ns[0])
864 rels = [(unfi[n], tuple(unfi[m] for m in s))
864 rels = [(unfi[n], tuple(unfi[m] for m in s))
865 for n, s in sorted(replacements.items(), key=sortfunc)
865 for n, s in sorted(replacements.items(), key=sortfunc)
866 if s or not isobs(n)]
866 if s or not isobs(n)]
867 if rels:
867 if rels:
868 obsolete.createmarkers(repo, rels, operation=operation,
868 obsolete.createmarkers(repo, rels, operation=operation,
869 metadata=metadata)
869 metadata=metadata)
870 else:
870 else:
871 from . import repair # avoid import cycle
871 from . import repair # avoid import cycle
872 tostrip = list(replacements)
872 tostrip = list(replacements)
873 if tostrip:
873 if tostrip:
874 repair.delayedstrip(repo.ui, repo, tostrip, operation)
874 repair.delayedstrip(repo.ui, repo, tostrip, operation)
875
875
876 def addremove(repo, matcher, prefix, opts=None):
876 def addremove(repo, matcher, prefix, opts=None):
877 if opts is None:
877 if opts is None:
878 opts = {}
878 opts = {}
879 m = matcher
879 m = matcher
880 dry_run = opts.get('dry_run')
880 dry_run = opts.get('dry_run')
881 try:
881 try:
882 similarity = float(opts.get('similarity') or 0)
882 similarity = float(opts.get('similarity') or 0)
883 except ValueError:
883 except ValueError:
884 raise error.Abort(_('similarity must be a number'))
884 raise error.Abort(_('similarity must be a number'))
885 if similarity < 0 or similarity > 100:
885 if similarity < 0 or similarity > 100:
886 raise error.Abort(_('similarity must be between 0 and 100'))
886 raise error.Abort(_('similarity must be between 0 and 100'))
887 similarity /= 100.0
887 similarity /= 100.0
888
888
889 ret = 0
889 ret = 0
890 join = lambda f: os.path.join(prefix, f)
890 join = lambda f: os.path.join(prefix, f)
891
891
892 wctx = repo[None]
892 wctx = repo[None]
893 for subpath in sorted(wctx.substate):
893 for subpath in sorted(wctx.substate):
894 submatch = matchmod.subdirmatcher(subpath, m)
894 submatch = matchmod.subdirmatcher(subpath, m)
895 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
895 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
896 sub = wctx.sub(subpath)
896 sub = wctx.sub(subpath)
897 try:
897 try:
898 if sub.addremove(submatch, prefix, opts):
898 if sub.addremove(submatch, prefix, opts):
899 ret = 1
899 ret = 1
900 except error.LookupError:
900 except error.LookupError:
901 repo.ui.status(_("skipping missing subrepository: %s\n")
901 repo.ui.status(_("skipping missing subrepository: %s\n")
902 % join(subpath))
902 % join(subpath))
903
903
904 rejected = []
904 rejected = []
905 def badfn(f, msg):
905 def badfn(f, msg):
906 if f in m.files():
906 if f in m.files():
907 m.bad(f, msg)
907 m.bad(f, msg)
908 rejected.append(f)
908 rejected.append(f)
909
909
910 badmatch = matchmod.badmatch(m, badfn)
910 badmatch = matchmod.badmatch(m, badfn)
911 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
911 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
912 badmatch)
912 badmatch)
913
913
914 unknownset = set(unknown + forgotten)
914 unknownset = set(unknown + forgotten)
915 toprint = unknownset.copy()
915 toprint = unknownset.copy()
916 toprint.update(deleted)
916 toprint.update(deleted)
917 for abs in sorted(toprint):
917 for abs in sorted(toprint):
918 if repo.ui.verbose or not m.exact(abs):
918 if repo.ui.verbose or not m.exact(abs):
919 if abs in unknownset:
919 if abs in unknownset:
920 status = _('adding %s\n') % m.uipath(abs)
920 status = _('adding %s\n') % m.uipath(abs)
921 else:
921 else:
922 status = _('removing %s\n') % m.uipath(abs)
922 status = _('removing %s\n') % m.uipath(abs)
923 repo.ui.status(status)
923 repo.ui.status(status)
924
924
925 renames = _findrenames(repo, m, added + unknown, removed + deleted,
925 renames = _findrenames(repo, m, added + unknown, removed + deleted,
926 similarity)
926 similarity)
927
927
928 if not dry_run:
928 if not dry_run:
929 _markchanges(repo, unknown + forgotten, deleted, renames)
929 _markchanges(repo, unknown + forgotten, deleted, renames)
930
930
931 for f in rejected:
931 for f in rejected:
932 if f in m.files():
932 if f in m.files():
933 return 1
933 return 1
934 return ret
934 return ret
935
935
936 def marktouched(repo, files, similarity=0.0):
936 def marktouched(repo, files, similarity=0.0):
937 '''Assert that files have somehow been operated upon. files are relative to
937 '''Assert that files have somehow been operated upon. files are relative to
938 the repo root.'''
938 the repo root.'''
939 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
939 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
940 rejected = []
940 rejected = []
941
941
942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
943
943
944 if repo.ui.verbose:
944 if repo.ui.verbose:
945 unknownset = set(unknown + forgotten)
945 unknownset = set(unknown + forgotten)
946 toprint = unknownset.copy()
946 toprint = unknownset.copy()
947 toprint.update(deleted)
947 toprint.update(deleted)
948 for abs in sorted(toprint):
948 for abs in sorted(toprint):
949 if abs in unknownset:
949 if abs in unknownset:
950 status = _('adding %s\n') % abs
950 status = _('adding %s\n') % abs
951 else:
951 else:
952 status = _('removing %s\n') % abs
952 status = _('removing %s\n') % abs
953 repo.ui.status(status)
953 repo.ui.status(status)
954
954
955 renames = _findrenames(repo, m, added + unknown, removed + deleted,
955 renames = _findrenames(repo, m, added + unknown, removed + deleted,
956 similarity)
956 similarity)
957
957
958 _markchanges(repo, unknown + forgotten, deleted, renames)
958 _markchanges(repo, unknown + forgotten, deleted, renames)
959
959
960 for f in rejected:
960 for f in rejected:
961 if f in m.files():
961 if f in m.files():
962 return 1
962 return 1
963 return 0
963 return 0
964
964
965 def _interestingfiles(repo, matcher):
965 def _interestingfiles(repo, matcher):
966 '''Walk dirstate with matcher, looking for files that addremove would care
966 '''Walk dirstate with matcher, looking for files that addremove would care
967 about.
967 about.
968
968
969 This is different from dirstate.status because it doesn't care about
969 This is different from dirstate.status because it doesn't care about
970 whether files are modified or clean.'''
970 whether files are modified or clean.'''
971 added, unknown, deleted, removed, forgotten = [], [], [], [], []
971 added, unknown, deleted, removed, forgotten = [], [], [], [], []
972 audit_path = pathutil.pathauditor(repo.root, cached=True)
972 audit_path = pathutil.pathauditor(repo.root, cached=True)
973
973
974 ctx = repo[None]
974 ctx = repo[None]
975 dirstate = repo.dirstate
975 dirstate = repo.dirstate
976 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
976 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
977 unknown=True, ignored=False, full=False)
977 unknown=True, ignored=False, full=False)
978 for abs, st in walkresults.iteritems():
978 for abs, st in walkresults.iteritems():
979 dstate = dirstate[abs]
979 dstate = dirstate[abs]
980 if dstate == '?' and audit_path.check(abs):
980 if dstate == '?' and audit_path.check(abs):
981 unknown.append(abs)
981 unknown.append(abs)
982 elif dstate != 'r' and not st:
982 elif dstate != 'r' and not st:
983 deleted.append(abs)
983 deleted.append(abs)
984 elif dstate == 'r' and st:
984 elif dstate == 'r' and st:
985 forgotten.append(abs)
985 forgotten.append(abs)
986 # for finding renames
986 # for finding renames
987 elif dstate == 'r' and not st:
987 elif dstate == 'r' and not st:
988 removed.append(abs)
988 removed.append(abs)
989 elif dstate == 'a':
989 elif dstate == 'a':
990 added.append(abs)
990 added.append(abs)
991
991
992 return added, unknown, deleted, removed, forgotten
992 return added, unknown, deleted, removed, forgotten
993
993
994 def _findrenames(repo, matcher, added, removed, similarity):
994 def _findrenames(repo, matcher, added, removed, similarity):
995 '''Find renames from removed files to added ones.'''
995 '''Find renames from removed files to added ones.'''
996 renames = {}
996 renames = {}
997 if similarity > 0:
997 if similarity > 0:
998 for old, new, score in similar.findrenames(repo, added, removed,
998 for old, new, score in similar.findrenames(repo, added, removed,
999 similarity):
999 similarity):
1000 if (repo.ui.verbose or not matcher.exact(old)
1000 if (repo.ui.verbose or not matcher.exact(old)
1001 or not matcher.exact(new)):
1001 or not matcher.exact(new)):
1002 repo.ui.status(_('recording removal of %s as rename to %s '
1002 repo.ui.status(_('recording removal of %s as rename to %s '
1003 '(%d%% similar)\n') %
1003 '(%d%% similar)\n') %
1004 (matcher.rel(old), matcher.rel(new),
1004 (matcher.rel(old), matcher.rel(new),
1005 score * 100))
1005 score * 100))
1006 renames[new] = old
1006 renames[new] = old
1007 return renames
1007 return renames
1008
1008
1009 def _markchanges(repo, unknown, deleted, renames):
1009 def _markchanges(repo, unknown, deleted, renames):
1010 '''Marks the files in unknown as added, the files in deleted as removed,
1010 '''Marks the files in unknown as added, the files in deleted as removed,
1011 and the files in renames as copied.'''
1011 and the files in renames as copied.'''
1012 wctx = repo[None]
1012 wctx = repo[None]
1013 with repo.wlock():
1013 with repo.wlock():
1014 wctx.forget(deleted)
1014 wctx.forget(deleted)
1015 wctx.add(unknown)
1015 wctx.add(unknown)
1016 for new, old in renames.iteritems():
1016 for new, old in renames.iteritems():
1017 wctx.copy(old, new)
1017 wctx.copy(old, new)
1018
1018
1019 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1019 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1020 """Update the dirstate to reflect the intent of copying src to dst. For
1020 """Update the dirstate to reflect the intent of copying src to dst. For
1021 different reasons it might not end with dst being marked as copied from src.
1021 different reasons it might not end with dst being marked as copied from src.
1022 """
1022 """
1023 origsrc = repo.dirstate.copied(src) or src
1023 origsrc = repo.dirstate.copied(src) or src
1024 if dst == origsrc: # copying back a copy?
1024 if dst == origsrc: # copying back a copy?
1025 if repo.dirstate[dst] not in 'mn' and not dryrun:
1025 if repo.dirstate[dst] not in 'mn' and not dryrun:
1026 repo.dirstate.normallookup(dst)
1026 repo.dirstate.normallookup(dst)
1027 else:
1027 else:
1028 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1028 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1029 if not ui.quiet:
1029 if not ui.quiet:
1030 ui.warn(_("%s has not been committed yet, so no copy "
1030 ui.warn(_("%s has not been committed yet, so no copy "
1031 "data will be stored for %s.\n")
1031 "data will be stored for %s.\n")
1032 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1032 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1033 if repo.dirstate[dst] in '?r' and not dryrun:
1033 if repo.dirstate[dst] in '?r' and not dryrun:
1034 wctx.add([dst])
1034 wctx.add([dst])
1035 elif not dryrun:
1035 elif not dryrun:
1036 wctx.copy(origsrc, dst)
1036 wctx.copy(origsrc, dst)
1037
1037
1038 def readrequires(opener, supported):
1038 def readrequires(opener, supported):
1039 '''Reads and parses .hg/requires and checks if all entries found
1039 '''Reads and parses .hg/requires and checks if all entries found
1040 are in the list of supported features.'''
1040 are in the list of supported features.'''
1041 requirements = set(opener.read("requires").splitlines())
1041 requirements = set(opener.read("requires").splitlines())
1042 missings = []
1042 missings = []
1043 for r in requirements:
1043 for r in requirements:
1044 if r not in supported:
1044 if r not in supported:
1045 if not r or not r[0:1].isalnum():
1045 if not r or not r[0:1].isalnum():
1046 raise error.RequirementError(_(".hg/requires file is corrupt"))
1046 raise error.RequirementError(_(".hg/requires file is corrupt"))
1047 missings.append(r)
1047 missings.append(r)
1048 missings.sort()
1048 missings.sort()
1049 if missings:
1049 if missings:
1050 raise error.RequirementError(
1050 raise error.RequirementError(
1051 _("repository requires features unknown to this Mercurial: %s")
1051 _("repository requires features unknown to this Mercurial: %s")
1052 % " ".join(missings),
1052 % " ".join(missings),
1053 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1053 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1054 " for more information"))
1054 " for more information"))
1055 return requirements
1055 return requirements
1056
1056
1057 def writerequires(opener, requirements):
1057 def writerequires(opener, requirements):
1058 with opener('requires', 'w') as fp:
1058 with opener('requires', 'w') as fp:
1059 for r in sorted(requirements):
1059 for r in sorted(requirements):
1060 fp.write("%s\n" % r)
1060 fp.write("%s\n" % r)
1061
1061
1062 class filecachesubentry(object):
1062 class filecachesubentry(object):
1063 def __init__(self, path, stat):
1063 def __init__(self, path, stat):
1064 self.path = path
1064 self.path = path
1065 self.cachestat = None
1065 self.cachestat = None
1066 self._cacheable = None
1066 self._cacheable = None
1067
1067
1068 if stat:
1068 if stat:
1069 self.cachestat = filecachesubentry.stat(self.path)
1069 self.cachestat = filecachesubentry.stat(self.path)
1070
1070
1071 if self.cachestat:
1071 if self.cachestat:
1072 self._cacheable = self.cachestat.cacheable()
1072 self._cacheable = self.cachestat.cacheable()
1073 else:
1073 else:
1074 # None means we don't know yet
1074 # None means we don't know yet
1075 self._cacheable = None
1075 self._cacheable = None
1076
1076
1077 def refresh(self):
1077 def refresh(self):
1078 if self.cacheable():
1078 if self.cacheable():
1079 self.cachestat = filecachesubentry.stat(self.path)
1079 self.cachestat = filecachesubentry.stat(self.path)
1080
1080
1081 def cacheable(self):
1081 def cacheable(self):
1082 if self._cacheable is not None:
1082 if self._cacheable is not None:
1083 return self._cacheable
1083 return self._cacheable
1084
1084
1085 # we don't know yet, assume it is for now
1085 # we don't know yet, assume it is for now
1086 return True
1086 return True
1087
1087
1088 def changed(self):
1088 def changed(self):
1089 # no point in going further if we can't cache it
1089 # no point in going further if we can't cache it
1090 if not self.cacheable():
1090 if not self.cacheable():
1091 return True
1091 return True
1092
1092
1093 newstat = filecachesubentry.stat(self.path)
1093 newstat = filecachesubentry.stat(self.path)
1094
1094
1095 # we may not know if it's cacheable yet, check again now
1095 # we may not know if it's cacheable yet, check again now
1096 if newstat and self._cacheable is None:
1096 if newstat and self._cacheable is None:
1097 self._cacheable = newstat.cacheable()
1097 self._cacheable = newstat.cacheable()
1098
1098
1099 # check again
1099 # check again
1100 if not self._cacheable:
1100 if not self._cacheable:
1101 return True
1101 return True
1102
1102
1103 if self.cachestat != newstat:
1103 if self.cachestat != newstat:
1104 self.cachestat = newstat
1104 self.cachestat = newstat
1105 return True
1105 return True
1106 else:
1106 else:
1107 return False
1107 return False
1108
1108
1109 @staticmethod
1109 @staticmethod
1110 def stat(path):
1110 def stat(path):
1111 try:
1111 try:
1112 return util.cachestat(path)
1112 return util.cachestat(path)
1113 except OSError as e:
1113 except OSError as e:
1114 if e.errno != errno.ENOENT:
1114 if e.errno != errno.ENOENT:
1115 raise
1115 raise
1116
1116
1117 class filecacheentry(object):
1117 class filecacheentry(object):
1118 def __init__(self, paths, stat=True):
1118 def __init__(self, paths, stat=True):
1119 self._entries = []
1119 self._entries = []
1120 for path in paths:
1120 for path in paths:
1121 self._entries.append(filecachesubentry(path, stat))
1121 self._entries.append(filecachesubentry(path, stat))
1122
1122
1123 def changed(self):
1123 def changed(self):
1124 '''true if any entry has changed'''
1124 '''true if any entry has changed'''
1125 for entry in self._entries:
1125 for entry in self._entries:
1126 if entry.changed():
1126 if entry.changed():
1127 return True
1127 return True
1128 return False
1128 return False
1129
1129
1130 def refresh(self):
1130 def refresh(self):
1131 for entry in self._entries:
1131 for entry in self._entries:
1132 entry.refresh()
1132 entry.refresh()
1133
1133
1134 class filecache(object):
1134 class filecache(object):
1135 '''A property like decorator that tracks files under .hg/ for updates.
1135 '''A property like decorator that tracks files under .hg/ for updates.
1136
1136
1137 Records stat info when called in _filecache.
1137 Records stat info when called in _filecache.
1138
1138
1139 On subsequent calls, compares old stat info with new info, and recreates the
1139 On subsequent calls, compares old stat info with new info, and recreates the
1140 object when any of the files changes, updating the new stat info in
1140 object when any of the files changes, updating the new stat info in
1141 _filecache.
1141 _filecache.
1142
1142
1143 Mercurial either atomic renames or appends for files under .hg,
1143 Mercurial either atomic renames or appends for files under .hg,
1144 so to ensure the cache is reliable we need the filesystem to be able
1144 so to ensure the cache is reliable we need the filesystem to be able
1145 to tell us if a file has been replaced. If it can't, we fallback to
1145 to tell us if a file has been replaced. If it can't, we fallback to
1146 recreating the object on every call (essentially the same behavior as
1146 recreating the object on every call (essentially the same behavior as
1147 propertycache).
1147 propertycache).
1148
1148
1149 '''
1149 '''
1150 def __init__(self, *paths):
1150 def __init__(self, *paths):
1151 self.paths = paths
1151 self.paths = paths
1152
1152
1153 def join(self, obj, fname):
1153 def join(self, obj, fname):
1154 """Used to compute the runtime path of a cached file.
1154 """Used to compute the runtime path of a cached file.
1155
1155
1156 Users should subclass filecache and provide their own version of this
1156 Users should subclass filecache and provide their own version of this
1157 function to call the appropriate join function on 'obj' (an instance
1157 function to call the appropriate join function on 'obj' (an instance
1158 of the class that its member function was decorated).
1158 of the class that its member function was decorated).
1159 """
1159 """
1160 raise NotImplementedError
1160 raise NotImplementedError
1161
1161
1162 def __call__(self, func):
1162 def __call__(self, func):
1163 self.func = func
1163 self.func = func
1164 self.sname = func.__name__
1164 self.sname = func.__name__
1165 self.name = pycompat.sysbytes(self.sname)
1165 self.name = pycompat.sysbytes(self.sname)
1166 return self
1166 return self
1167
1167
1168 def __get__(self, obj, type=None):
1168 def __get__(self, obj, type=None):
1169 # if accessed on the class, return the descriptor itself.
1169 # if accessed on the class, return the descriptor itself.
1170 if obj is None:
1170 if obj is None:
1171 return self
1171 return self
1172 # do we need to check if the file changed?
1172 # do we need to check if the file changed?
1173 if self.sname in obj.__dict__:
1173 if self.sname in obj.__dict__:
1174 assert self.name in obj._filecache, self.name
1174 assert self.name in obj._filecache, self.name
1175 return obj.__dict__[self.sname]
1175 return obj.__dict__[self.sname]
1176
1176
1177 entry = obj._filecache.get(self.name)
1177 entry = obj._filecache.get(self.name)
1178
1178
1179 if entry:
1179 if entry:
1180 if entry.changed():
1180 if entry.changed():
1181 entry.obj = self.func(obj)
1181 entry.obj = self.func(obj)
1182 else:
1182 else:
1183 paths = [self.join(obj, path) for path in self.paths]
1183 paths = [self.join(obj, path) for path in self.paths]
1184
1184
1185 # We stat -before- creating the object so our cache doesn't lie if
1185 # We stat -before- creating the object so our cache doesn't lie if
1186 # a writer modified between the time we read and stat
1186 # a writer modified between the time we read and stat
1187 entry = filecacheentry(paths, True)
1187 entry = filecacheentry(paths, True)
1188 entry.obj = self.func(obj)
1188 entry.obj = self.func(obj)
1189
1189
1190 obj._filecache[self.name] = entry
1190 obj._filecache[self.name] = entry
1191
1191
1192 obj.__dict__[self.sname] = entry.obj
1192 obj.__dict__[self.sname] = entry.obj
1193 return entry.obj
1193 return entry.obj
1194
1194
1195 def __set__(self, obj, value):
1195 def __set__(self, obj, value):
1196 if self.name not in obj._filecache:
1196 if self.name not in obj._filecache:
1197 # we add an entry for the missing value because X in __dict__
1197 # we add an entry for the missing value because X in __dict__
1198 # implies X in _filecache
1198 # implies X in _filecache
1199 paths = [self.join(obj, path) for path in self.paths]
1199 paths = [self.join(obj, path) for path in self.paths]
1200 ce = filecacheentry(paths, False)
1200 ce = filecacheentry(paths, False)
1201 obj._filecache[self.name] = ce
1201 obj._filecache[self.name] = ce
1202 else:
1202 else:
1203 ce = obj._filecache[self.name]
1203 ce = obj._filecache[self.name]
1204
1204
1205 ce.obj = value # update cached copy
1205 ce.obj = value # update cached copy
1206 obj.__dict__[self.sname] = value # update copy returned by obj.x
1206 obj.__dict__[self.sname] = value # update copy returned by obj.x
1207
1207
1208 def __delete__(self, obj):
1208 def __delete__(self, obj):
1209 try:
1209 try:
1210 del obj.__dict__[self.sname]
1210 del obj.__dict__[self.sname]
1211 except KeyError:
1211 except KeyError:
1212 raise AttributeError(self.sname)
1212 raise AttributeError(self.sname)
1213
1213
1214 def extdatasource(repo, source):
1214 def extdatasource(repo, source):
1215 """Gather a map of rev -> value dict from the specified source
1215 """Gather a map of rev -> value dict from the specified source
1216
1216
1217 A source spec is treated as a URL, with a special case shell: type
1217 A source spec is treated as a URL, with a special case shell: type
1218 for parsing the output from a shell command.
1218 for parsing the output from a shell command.
1219
1219
1220 The data is parsed as a series of newline-separated records where
1220 The data is parsed as a series of newline-separated records where
1221 each record is a revision specifier optionally followed by a space
1221 each record is a revision specifier optionally followed by a space
1222 and a freeform string value. If the revision is known locally, it
1222 and a freeform string value. If the revision is known locally, it
1223 is converted to a rev, otherwise the record is skipped.
1223 is converted to a rev, otherwise the record is skipped.
1224
1224
1225 Note that both key and value are treated as UTF-8 and converted to
1225 Note that both key and value are treated as UTF-8 and converted to
1226 the local encoding. This allows uniformity between local and
1226 the local encoding. This allows uniformity between local and
1227 remote data sources.
1227 remote data sources.
1228 """
1228 """
1229
1229
1230 spec = repo.ui.config("extdata", source)
1230 spec = repo.ui.config("extdata", source)
1231 if not spec:
1231 if not spec:
1232 raise error.Abort(_("unknown extdata source '%s'") % source)
1232 raise error.Abort(_("unknown extdata source '%s'") % source)
1233
1233
1234 data = {}
1234 data = {}
1235 src = proc = None
1235 src = proc = None
1236 try:
1236 try:
1237 if spec.startswith("shell:"):
1237 if spec.startswith("shell:"):
1238 # external commands should be run relative to the repo root
1238 # external commands should be run relative to the repo root
1239 cmd = spec[6:]
1239 cmd = spec[6:]
1240 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1240 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1241 close_fds=procutil.closefds,
1241 close_fds=procutil.closefds,
1242 stdout=subprocess.PIPE, cwd=repo.root)
1242 stdout=subprocess.PIPE, cwd=repo.root)
1243 src = proc.stdout
1243 src = proc.stdout
1244 else:
1244 else:
1245 # treat as a URL or file
1245 # treat as a URL or file
1246 src = url.open(repo.ui, spec)
1246 src = url.open(repo.ui, spec)
1247 for l in src:
1247 for l in src:
1248 if " " in l:
1248 if " " in l:
1249 k, v = l.strip().split(" ", 1)
1249 k, v = l.strip().split(" ", 1)
1250 else:
1250 else:
1251 k, v = l.strip(), ""
1251 k, v = l.strip(), ""
1252
1252
1253 k = encoding.tolocal(k)
1253 k = encoding.tolocal(k)
1254 try:
1254 try:
1255 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1255 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1256 except (error.LookupError, error.RepoLookupError):
1256 except (error.LookupError, error.RepoLookupError):
1257 pass # we ignore data for nodes that don't exist locally
1257 pass # we ignore data for nodes that don't exist locally
1258 finally:
1258 finally:
1259 if proc:
1259 if proc:
1260 proc.communicate()
1260 proc.communicate()
1261 if src:
1261 if src:
1262 src.close()
1262 src.close()
1263 if proc and proc.returncode != 0:
1263 if proc and proc.returncode != 0:
1264 raise error.Abort(_("extdata command '%s' failed: %s")
1264 raise error.Abort(_("extdata command '%s' failed: %s")
1265 % (cmd, procutil.explainexit(proc.returncode)))
1265 % (cmd, procutil.explainexit(proc.returncode)))
1266
1266
1267 return data
1267 return data
1268
1268
1269 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1269 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1270 if lock is None:
1270 if lock is None:
1271 raise error.LockInheritanceContractViolation(
1271 raise error.LockInheritanceContractViolation(
1272 'lock can only be inherited while held')
1272 'lock can only be inherited while held')
1273 if environ is None:
1273 if environ is None:
1274 environ = {}
1274 environ = {}
1275 with lock.inherit() as locker:
1275 with lock.inherit() as locker:
1276 environ[envvar] = locker
1276 environ[envvar] = locker
1277 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1277 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1278
1278
1279 def wlocksub(repo, cmd, *args, **kwargs):
1279 def wlocksub(repo, cmd, *args, **kwargs):
1280 """run cmd as a subprocess that allows inheriting repo's wlock
1280 """run cmd as a subprocess that allows inheriting repo's wlock
1281
1281
1282 This can only be called while the wlock is held. This takes all the
1282 This can only be called while the wlock is held. This takes all the
1283 arguments that ui.system does, and returns the exit code of the
1283 arguments that ui.system does, and returns the exit code of the
1284 subprocess."""
1284 subprocess."""
1285 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1285 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1286 **kwargs)
1286 **kwargs)
1287
1287
1288 class progress(object):
1289 def __init__(self, ui, topic, unit="", total=None):
1290 self.ui = ui
1291 self.pos = 0
1292 self.topic = topic
1293 self.unit = unit
1294 self.total = total
1295
1296 def update(self, pos, item="", total=None):
1297 if total:
1298 self.total = total
1299 self.pos = pos
1300 self._print(item)
1301
1302 def increment(self, step=1, item="", total=None):
1303 self.update(self.pos + step, item, total)
1304
1305 def _print(self, item):
1306 self.ui.progress(self.topic, self.pos, item, self.unit,
1307 self.total)
1308
1288 def gdinitconfig(ui):
1309 def gdinitconfig(ui):
1289 """helper function to know if a repo should be created as general delta
1310 """helper function to know if a repo should be created as general delta
1290 """
1311 """
1291 # experimental config: format.generaldelta
1312 # experimental config: format.generaldelta
1292 return (ui.configbool('format', 'generaldelta')
1313 return (ui.configbool('format', 'generaldelta')
1293 or ui.configbool('format', 'usegeneraldelta'))
1314 or ui.configbool('format', 'usegeneraldelta'))
1294
1315
1295 def gddeltaconfig(ui):
1316 def gddeltaconfig(ui):
1296 """helper function to know if incoming delta should be optimised
1317 """helper function to know if incoming delta should be optimised
1297 """
1318 """
1298 # experimental config: format.generaldelta
1319 # experimental config: format.generaldelta
1299 return ui.configbool('format', 'generaldelta')
1320 return ui.configbool('format', 'generaldelta')
1300
1321
1301 class simplekeyvaluefile(object):
1322 class simplekeyvaluefile(object):
1302 """A simple file with key=value lines
1323 """A simple file with key=value lines
1303
1324
1304 Keys must be alphanumerics and start with a letter, values must not
1325 Keys must be alphanumerics and start with a letter, values must not
1305 contain '\n' characters"""
1326 contain '\n' characters"""
1306 firstlinekey = '__firstline'
1327 firstlinekey = '__firstline'
1307
1328
1308 def __init__(self, vfs, path, keys=None):
1329 def __init__(self, vfs, path, keys=None):
1309 self.vfs = vfs
1330 self.vfs = vfs
1310 self.path = path
1331 self.path = path
1311
1332
1312 def read(self, firstlinenonkeyval=False):
1333 def read(self, firstlinenonkeyval=False):
1313 """Read the contents of a simple key-value file
1334 """Read the contents of a simple key-value file
1314
1335
1315 'firstlinenonkeyval' indicates whether the first line of file should
1336 'firstlinenonkeyval' indicates whether the first line of file should
1316 be treated as a key-value pair or reuturned fully under the
1337 be treated as a key-value pair or reuturned fully under the
1317 __firstline key."""
1338 __firstline key."""
1318 lines = self.vfs.readlines(self.path)
1339 lines = self.vfs.readlines(self.path)
1319 d = {}
1340 d = {}
1320 if firstlinenonkeyval:
1341 if firstlinenonkeyval:
1321 if not lines:
1342 if not lines:
1322 e = _("empty simplekeyvalue file")
1343 e = _("empty simplekeyvalue file")
1323 raise error.CorruptedState(e)
1344 raise error.CorruptedState(e)
1324 # we don't want to include '\n' in the __firstline
1345 # we don't want to include '\n' in the __firstline
1325 d[self.firstlinekey] = lines[0][:-1]
1346 d[self.firstlinekey] = lines[0][:-1]
1326 del lines[0]
1347 del lines[0]
1327
1348
1328 try:
1349 try:
1329 # the 'if line.strip()' part prevents us from failing on empty
1350 # the 'if line.strip()' part prevents us from failing on empty
1330 # lines which only contain '\n' therefore are not skipped
1351 # lines which only contain '\n' therefore are not skipped
1331 # by 'if line'
1352 # by 'if line'
1332 updatedict = dict(line[:-1].split('=', 1) for line in lines
1353 updatedict = dict(line[:-1].split('=', 1) for line in lines
1333 if line.strip())
1354 if line.strip())
1334 if self.firstlinekey in updatedict:
1355 if self.firstlinekey in updatedict:
1335 e = _("%r can't be used as a key")
1356 e = _("%r can't be used as a key")
1336 raise error.CorruptedState(e % self.firstlinekey)
1357 raise error.CorruptedState(e % self.firstlinekey)
1337 d.update(updatedict)
1358 d.update(updatedict)
1338 except ValueError as e:
1359 except ValueError as e:
1339 raise error.CorruptedState(str(e))
1360 raise error.CorruptedState(str(e))
1340 return d
1361 return d
1341
1362
1342 def write(self, data, firstline=None):
1363 def write(self, data, firstline=None):
1343 """Write key=>value mapping to a file
1364 """Write key=>value mapping to a file
1344 data is a dict. Keys must be alphanumerical and start with a letter.
1365 data is a dict. Keys must be alphanumerical and start with a letter.
1345 Values must not contain newline characters.
1366 Values must not contain newline characters.
1346
1367
1347 If 'firstline' is not None, it is written to file before
1368 If 'firstline' is not None, it is written to file before
1348 everything else, as it is, not in a key=value form"""
1369 everything else, as it is, not in a key=value form"""
1349 lines = []
1370 lines = []
1350 if firstline is not None:
1371 if firstline is not None:
1351 lines.append('%s\n' % firstline)
1372 lines.append('%s\n' % firstline)
1352
1373
1353 for k, v in data.items():
1374 for k, v in data.items():
1354 if k == self.firstlinekey:
1375 if k == self.firstlinekey:
1355 e = "key name '%s' is reserved" % self.firstlinekey
1376 e = "key name '%s' is reserved" % self.firstlinekey
1356 raise error.ProgrammingError(e)
1377 raise error.ProgrammingError(e)
1357 if not k[0:1].isalpha():
1378 if not k[0:1].isalpha():
1358 e = "keys must start with a letter in a key-value file"
1379 e = "keys must start with a letter in a key-value file"
1359 raise error.ProgrammingError(e)
1380 raise error.ProgrammingError(e)
1360 if not k.isalnum():
1381 if not k.isalnum():
1361 e = "invalid key name in a simple key-value file"
1382 e = "invalid key name in a simple key-value file"
1362 raise error.ProgrammingError(e)
1383 raise error.ProgrammingError(e)
1363 if '\n' in v:
1384 if '\n' in v:
1364 e = "invalid value in a simple key-value file"
1385 e = "invalid value in a simple key-value file"
1365 raise error.ProgrammingError(e)
1386 raise error.ProgrammingError(e)
1366 lines.append("%s=%s\n" % (k, v))
1387 lines.append("%s=%s\n" % (k, v))
1367 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1388 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1368 fp.write(''.join(lines))
1389 fp.write(''.join(lines))
1369
1390
1370 _reportobsoletedsource = [
1391 _reportobsoletedsource = [
1371 'debugobsolete',
1392 'debugobsolete',
1372 'pull',
1393 'pull',
1373 'push',
1394 'push',
1374 'serve',
1395 'serve',
1375 'unbundle',
1396 'unbundle',
1376 ]
1397 ]
1377
1398
1378 _reportnewcssource = [
1399 _reportnewcssource = [
1379 'pull',
1400 'pull',
1380 'unbundle',
1401 'unbundle',
1381 ]
1402 ]
1382
1403
1383 def prefetchfiles(repo, revs, match):
1404 def prefetchfiles(repo, revs, match):
1384 """Invokes the registered file prefetch functions, allowing extensions to
1405 """Invokes the registered file prefetch functions, allowing extensions to
1385 ensure the corresponding files are available locally, before the command
1406 ensure the corresponding files are available locally, before the command
1386 uses them."""
1407 uses them."""
1387 if match:
1408 if match:
1388 # The command itself will complain about files that don't exist, so
1409 # The command itself will complain about files that don't exist, so
1389 # don't duplicate the message.
1410 # don't duplicate the message.
1390 match = matchmod.badmatch(match, lambda fn, msg: None)
1411 match = matchmod.badmatch(match, lambda fn, msg: None)
1391 else:
1412 else:
1392 match = matchall(repo)
1413 match = matchall(repo)
1393
1414
1394 fileprefetchhooks(repo, revs, match)
1415 fileprefetchhooks(repo, revs, match)
1395
1416
1396 # a list of (repo, revs, match) prefetch functions
1417 # a list of (repo, revs, match) prefetch functions
1397 fileprefetchhooks = util.hooks()
1418 fileprefetchhooks = util.hooks()
1398
1419
1399 # A marker that tells the evolve extension to suppress its own reporting
1420 # A marker that tells the evolve extension to suppress its own reporting
1400 _reportstroubledchangesets = True
1421 _reportstroubledchangesets = True
1401
1422
1402 def registersummarycallback(repo, otr, txnname=''):
1423 def registersummarycallback(repo, otr, txnname=''):
1403 """register a callback to issue a summary after the transaction is closed
1424 """register a callback to issue a summary after the transaction is closed
1404 """
1425 """
1405 def txmatch(sources):
1426 def txmatch(sources):
1406 return any(txnname.startswith(source) for source in sources)
1427 return any(txnname.startswith(source) for source in sources)
1407
1428
1408 categories = []
1429 categories = []
1409
1430
1410 def reportsummary(func):
1431 def reportsummary(func):
1411 """decorator for report callbacks."""
1432 """decorator for report callbacks."""
1412 # The repoview life cycle is shorter than the one of the actual
1433 # The repoview life cycle is shorter than the one of the actual
1413 # underlying repository. So the filtered object can die before the
1434 # underlying repository. So the filtered object can die before the
1414 # weakref is used leading to troubles. We keep a reference to the
1435 # weakref is used leading to troubles. We keep a reference to the
1415 # unfiltered object and restore the filtering when retrieving the
1436 # unfiltered object and restore the filtering when retrieving the
1416 # repository through the weakref.
1437 # repository through the weakref.
1417 filtername = repo.filtername
1438 filtername = repo.filtername
1418 reporef = weakref.ref(repo.unfiltered())
1439 reporef = weakref.ref(repo.unfiltered())
1419 def wrapped(tr):
1440 def wrapped(tr):
1420 repo = reporef()
1441 repo = reporef()
1421 if filtername:
1442 if filtername:
1422 repo = repo.filtered(filtername)
1443 repo = repo.filtered(filtername)
1423 func(repo, tr)
1444 func(repo, tr)
1424 newcat = '%02i-txnreport' % len(categories)
1445 newcat = '%02i-txnreport' % len(categories)
1425 otr.addpostclose(newcat, wrapped)
1446 otr.addpostclose(newcat, wrapped)
1426 categories.append(newcat)
1447 categories.append(newcat)
1427 return wrapped
1448 return wrapped
1428
1449
1429 if txmatch(_reportobsoletedsource):
1450 if txmatch(_reportobsoletedsource):
1430 @reportsummary
1451 @reportsummary
1431 def reportobsoleted(repo, tr):
1452 def reportobsoleted(repo, tr):
1432 obsoleted = obsutil.getobsoleted(repo, tr)
1453 obsoleted = obsutil.getobsoleted(repo, tr)
1433 if obsoleted:
1454 if obsoleted:
1434 repo.ui.status(_('obsoleted %i changesets\n')
1455 repo.ui.status(_('obsoleted %i changesets\n')
1435 % len(obsoleted))
1456 % len(obsoleted))
1436
1457
1437 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1458 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1438 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1459 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1439 instabilitytypes = [
1460 instabilitytypes = [
1440 ('orphan', 'orphan'),
1461 ('orphan', 'orphan'),
1441 ('phase-divergent', 'phasedivergent'),
1462 ('phase-divergent', 'phasedivergent'),
1442 ('content-divergent', 'contentdivergent'),
1463 ('content-divergent', 'contentdivergent'),
1443 ]
1464 ]
1444
1465
1445 def getinstabilitycounts(repo):
1466 def getinstabilitycounts(repo):
1446 filtered = repo.changelog.filteredrevs
1467 filtered = repo.changelog.filteredrevs
1447 counts = {}
1468 counts = {}
1448 for instability, revset in instabilitytypes:
1469 for instability, revset in instabilitytypes:
1449 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1470 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1450 filtered)
1471 filtered)
1451 return counts
1472 return counts
1452
1473
1453 oldinstabilitycounts = getinstabilitycounts(repo)
1474 oldinstabilitycounts = getinstabilitycounts(repo)
1454 @reportsummary
1475 @reportsummary
1455 def reportnewinstabilities(repo, tr):
1476 def reportnewinstabilities(repo, tr):
1456 newinstabilitycounts = getinstabilitycounts(repo)
1477 newinstabilitycounts = getinstabilitycounts(repo)
1457 for instability, revset in instabilitytypes:
1478 for instability, revset in instabilitytypes:
1458 delta = (newinstabilitycounts[instability] -
1479 delta = (newinstabilitycounts[instability] -
1459 oldinstabilitycounts[instability])
1480 oldinstabilitycounts[instability])
1460 if delta > 0:
1481 if delta > 0:
1461 repo.ui.warn(_('%i new %s changesets\n') %
1482 repo.ui.warn(_('%i new %s changesets\n') %
1462 (delta, instability))
1483 (delta, instability))
1463
1484
1464 if txmatch(_reportnewcssource):
1485 if txmatch(_reportnewcssource):
1465 @reportsummary
1486 @reportsummary
1466 def reportnewcs(repo, tr):
1487 def reportnewcs(repo, tr):
1467 """Report the range of new revisions pulled/unbundled."""
1488 """Report the range of new revisions pulled/unbundled."""
1468 newrevs = tr.changes.get('revs', xrange(0, 0))
1489 newrevs = tr.changes.get('revs', xrange(0, 0))
1469 if not newrevs:
1490 if not newrevs:
1470 return
1491 return
1471
1492
1472 # Compute the bounds of new revisions' range, excluding obsoletes.
1493 # Compute the bounds of new revisions' range, excluding obsoletes.
1473 unfi = repo.unfiltered()
1494 unfi = repo.unfiltered()
1474 revs = unfi.revs('%ld and not obsolete()', newrevs)
1495 revs = unfi.revs('%ld and not obsolete()', newrevs)
1475 if not revs:
1496 if not revs:
1476 # Got only obsoletes.
1497 # Got only obsoletes.
1477 return
1498 return
1478 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1499 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1479
1500
1480 if minrev == maxrev:
1501 if minrev == maxrev:
1481 revrange = minrev
1502 revrange = minrev
1482 else:
1503 else:
1483 revrange = '%s:%s' % (minrev, maxrev)
1504 revrange = '%s:%s' % (minrev, maxrev)
1484 repo.ui.status(_('new changesets %s\n') % revrange)
1505 repo.ui.status(_('new changesets %s\n') % revrange)
1485
1506
1486 @reportsummary
1507 @reportsummary
1487 def reportphasechanges(repo, tr):
1508 def reportphasechanges(repo, tr):
1488 """Report statistics of phase changes for changesets pre-existing
1509 """Report statistics of phase changes for changesets pre-existing
1489 pull/unbundle.
1510 pull/unbundle.
1490 """
1511 """
1491 newrevs = tr.changes.get('revs', xrange(0, 0))
1512 newrevs = tr.changes.get('revs', xrange(0, 0))
1492 phasetracking = tr.changes.get('phases', {})
1513 phasetracking = tr.changes.get('phases', {})
1493 if not phasetracking:
1514 if not phasetracking:
1494 return
1515 return
1495 published = [
1516 published = [
1496 rev for rev, (old, new) in phasetracking.iteritems()
1517 rev for rev, (old, new) in phasetracking.iteritems()
1497 if new == phases.public and rev not in newrevs
1518 if new == phases.public and rev not in newrevs
1498 ]
1519 ]
1499 if not published:
1520 if not published:
1500 return
1521 return
1501 repo.ui.status(_('%d local changesets published\n')
1522 repo.ui.status(_('%d local changesets published\n')
1502 % len(published))
1523 % len(published))
1503
1524
1504 def nodesummaries(repo, nodes, maxnumnodes=4):
1525 def nodesummaries(repo, nodes, maxnumnodes=4):
1505 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1526 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1506 return ' '.join(short(h) for h in nodes)
1527 return ' '.join(short(h) for h in nodes)
1507 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1528 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1508 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1529 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1509
1530
1510 def enforcesinglehead(repo, tr, desc):
1531 def enforcesinglehead(repo, tr, desc):
1511 """check that no named branch has multiple heads"""
1532 """check that no named branch has multiple heads"""
1512 if desc in ('strip', 'repair'):
1533 if desc in ('strip', 'repair'):
1513 # skip the logic during strip
1534 # skip the logic during strip
1514 return
1535 return
1515 visible = repo.filtered('visible')
1536 visible = repo.filtered('visible')
1516 # possible improvement: we could restrict the check to affected branch
1537 # possible improvement: we could restrict the check to affected branch
1517 for name, heads in visible.branchmap().iteritems():
1538 for name, heads in visible.branchmap().iteritems():
1518 if len(heads) > 1:
1539 if len(heads) > 1:
1519 msg = _('rejecting multiple heads on branch "%s"')
1540 msg = _('rejecting multiple heads on branch "%s"')
1520 msg %= name
1541 msg %= name
1521 hint = _('%d heads: %s')
1542 hint = _('%d heads: %s')
1522 hint %= (len(heads), nodesummaries(repo, heads))
1543 hint %= (len(heads), nodesummaries(repo, heads))
1523 raise error.Abort(msg, hint=hint)
1544 raise error.Abort(msg, hint=hint)
1524
1545
1525 def wrapconvertsink(sink):
1546 def wrapconvertsink(sink):
1526 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1547 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1527 before it is used, whether or not the convert extension was formally loaded.
1548 before it is used, whether or not the convert extension was formally loaded.
1528 """
1549 """
1529 return sink
1550 return sink
1530
1551
1531 def unhidehashlikerevs(repo, specs, hiddentype):
1552 def unhidehashlikerevs(repo, specs, hiddentype):
1532 """parse the user specs and unhide changesets whose hash or revision number
1553 """parse the user specs and unhide changesets whose hash or revision number
1533 is passed.
1554 is passed.
1534
1555
1535 hiddentype can be: 1) 'warn': warn while unhiding changesets
1556 hiddentype can be: 1) 'warn': warn while unhiding changesets
1536 2) 'nowarn': don't warn while unhiding changesets
1557 2) 'nowarn': don't warn while unhiding changesets
1537
1558
1538 returns a repo object with the required changesets unhidden
1559 returns a repo object with the required changesets unhidden
1539 """
1560 """
1540 if not repo.filtername or not repo.ui.configbool('experimental',
1561 if not repo.filtername or not repo.ui.configbool('experimental',
1541 'directaccess'):
1562 'directaccess'):
1542 return repo
1563 return repo
1543
1564
1544 if repo.filtername not in ('visible', 'visible-hidden'):
1565 if repo.filtername not in ('visible', 'visible-hidden'):
1545 return repo
1566 return repo
1546
1567
1547 symbols = set()
1568 symbols = set()
1548 for spec in specs:
1569 for spec in specs:
1549 try:
1570 try:
1550 tree = revsetlang.parse(spec)
1571 tree = revsetlang.parse(spec)
1551 except error.ParseError: # will be reported by scmutil.revrange()
1572 except error.ParseError: # will be reported by scmutil.revrange()
1552 continue
1573 continue
1553
1574
1554 symbols.update(revsetlang.gethashlikesymbols(tree))
1575 symbols.update(revsetlang.gethashlikesymbols(tree))
1555
1576
1556 if not symbols:
1577 if not symbols:
1557 return repo
1578 return repo
1558
1579
1559 revs = _getrevsfromsymbols(repo, symbols)
1580 revs = _getrevsfromsymbols(repo, symbols)
1560
1581
1561 if not revs:
1582 if not revs:
1562 return repo
1583 return repo
1563
1584
1564 if hiddentype == 'warn':
1585 if hiddentype == 'warn':
1565 unfi = repo.unfiltered()
1586 unfi = repo.unfiltered()
1566 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1587 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1567 repo.ui.warn(_("warning: accessing hidden changesets for write "
1588 repo.ui.warn(_("warning: accessing hidden changesets for write "
1568 "operation: %s\n") % revstr)
1589 "operation: %s\n") % revstr)
1569
1590
1570 # we have to use new filtername to separate branch/tags cache until we can
1591 # we have to use new filtername to separate branch/tags cache until we can
1571 # disbale these cache when revisions are dynamically pinned.
1592 # disbale these cache when revisions are dynamically pinned.
1572 return repo.filtered('visible-hidden', revs)
1593 return repo.filtered('visible-hidden', revs)
1573
1594
1574 def _getrevsfromsymbols(repo, symbols):
1595 def _getrevsfromsymbols(repo, symbols):
1575 """parse the list of symbols and returns a set of revision numbers of hidden
1596 """parse the list of symbols and returns a set of revision numbers of hidden
1576 changesets present in symbols"""
1597 changesets present in symbols"""
1577 revs = set()
1598 revs = set()
1578 unfi = repo.unfiltered()
1599 unfi = repo.unfiltered()
1579 unficl = unfi.changelog
1600 unficl = unfi.changelog
1580 cl = repo.changelog
1601 cl = repo.changelog
1581 tiprev = len(unficl)
1602 tiprev = len(unficl)
1582 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1603 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1583 for s in symbols:
1604 for s in symbols:
1584 try:
1605 try:
1585 n = int(s)
1606 n = int(s)
1586 if n <= tiprev:
1607 if n <= tiprev:
1587 if not allowrevnums:
1608 if not allowrevnums:
1588 continue
1609 continue
1589 else:
1610 else:
1590 if n not in cl:
1611 if n not in cl:
1591 revs.add(n)
1612 revs.add(n)
1592 continue
1613 continue
1593 except ValueError:
1614 except ValueError:
1594 pass
1615 pass
1595
1616
1596 try:
1617 try:
1597 s = resolvehexnodeidprefix(unfi, s)
1618 s = resolvehexnodeidprefix(unfi, s)
1598 except (error.LookupError, error.WdirUnsupported):
1619 except (error.LookupError, error.WdirUnsupported):
1599 s = None
1620 s = None
1600
1621
1601 if s is not None:
1622 if s is not None:
1602 rev = unficl.rev(s)
1623 rev = unficl.rev(s)
1603 if rev not in cl:
1624 if rev not in cl:
1604 revs.add(rev)
1625 revs.add(rev)
1605
1626
1606 return revs
1627 return revs
1607
1628
1608 def bookmarkrevs(repo, mark):
1629 def bookmarkrevs(repo, mark):
1609 """
1630 """
1610 Select revisions reachable by a given bookmark
1631 Select revisions reachable by a given bookmark
1611 """
1632 """
1612 return repo.revs("ancestors(bookmark(%s)) - "
1633 return repo.revs("ancestors(bookmark(%s)) - "
1613 "ancestors(head() and not bookmark(%s)) - "
1634 "ancestors(head() and not bookmark(%s)) - "
1614 "ancestors(bookmark() and not bookmark(%s))",
1635 "ancestors(bookmark() and not bookmark(%s))",
1615 mark, mark, mark)
1636 mark, mark, mark)
@@ -1,1869 +1,1873 b''
1 # ui.py - user interface bits for mercurial
1 # ui.py - user interface bits for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import getpass
13 import getpass
14 import inspect
14 import inspect
15 import os
15 import os
16 import re
16 import re
17 import signal
17 import signal
18 import socket
18 import socket
19 import subprocess
19 import subprocess
20 import sys
20 import sys
21 import traceback
21 import traceback
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import hex
24 from .node import hex
25
25
26 from . import (
26 from . import (
27 color,
27 color,
28 config,
28 config,
29 configitems,
29 configitems,
30 encoding,
30 encoding,
31 error,
31 error,
32 formatter,
32 formatter,
33 progress,
33 progress,
34 pycompat,
34 pycompat,
35 rcutil,
35 rcutil,
36 scmutil,
36 scmutil,
37 util,
37 util,
38 )
38 )
39 from .utils import (
39 from .utils import (
40 dateutil,
40 dateutil,
41 procutil,
41 procutil,
42 stringutil,
42 stringutil,
43 )
43 )
44
44
45 urlreq = util.urlreq
45 urlreq = util.urlreq
46
46
47 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
47 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
48 _keepalnum = ''.join(c for c in map(pycompat.bytechr, range(256))
48 _keepalnum = ''.join(c for c in map(pycompat.bytechr, range(256))
49 if not c.isalnum())
49 if not c.isalnum())
50
50
51 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
51 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
52 tweakrc = b"""
52 tweakrc = b"""
53 [ui]
53 [ui]
54 # The rollback command is dangerous. As a rule, don't use it.
54 # The rollback command is dangerous. As a rule, don't use it.
55 rollback = False
55 rollback = False
56 # Make `hg status` report copy information
56 # Make `hg status` report copy information
57 statuscopies = yes
57 statuscopies = yes
58 # Prefer curses UIs when available. Revert to plain-text with `text`.
58 # Prefer curses UIs when available. Revert to plain-text with `text`.
59 interface = curses
59 interface = curses
60
60
61 [commands]
61 [commands]
62 # Make `hg status` emit cwd-relative paths by default.
62 # Make `hg status` emit cwd-relative paths by default.
63 status.relative = yes
63 status.relative = yes
64 # Refuse to perform an `hg update` that would cause a file content merge
64 # Refuse to perform an `hg update` that would cause a file content merge
65 update.check = noconflict
65 update.check = noconflict
66 # Show conflicts information in `hg status`
66 # Show conflicts information in `hg status`
67 status.verbose = True
67 status.verbose = True
68 # Collapse entire directories that contain only unknown files
68 # Collapse entire directories that contain only unknown files
69 status.terse = u
69 status.terse = u
70
70
71 [diff]
71 [diff]
72 git = 1
72 git = 1
73 showfunc = 1
73 showfunc = 1
74 """
74 """
75
75
76 samplehgrcs = {
76 samplehgrcs = {
77 'user':
77 'user':
78 b"""# example user config (see 'hg help config' for more info)
78 b"""# example user config (see 'hg help config' for more info)
79 [ui]
79 [ui]
80 # name and email, e.g.
80 # name and email, e.g.
81 # username = Jane Doe <jdoe@example.com>
81 # username = Jane Doe <jdoe@example.com>
82 username =
82 username =
83
83
84 # We recommend enabling tweakdefaults to get slight improvements to
84 # We recommend enabling tweakdefaults to get slight improvements to
85 # the UI over time. Make sure to set HGPLAIN in the environment when
85 # the UI over time. Make sure to set HGPLAIN in the environment when
86 # writing scripts!
86 # writing scripts!
87 # tweakdefaults = True
87 # tweakdefaults = True
88
88
89 # uncomment to disable color in command output
89 # uncomment to disable color in command output
90 # (see 'hg help color' for details)
90 # (see 'hg help color' for details)
91 # color = never
91 # color = never
92
92
93 # uncomment to disable command output pagination
93 # uncomment to disable command output pagination
94 # (see 'hg help pager' for details)
94 # (see 'hg help pager' for details)
95 # paginate = never
95 # paginate = never
96
96
97 [extensions]
97 [extensions]
98 # uncomment these lines to enable some popular extensions
98 # uncomment these lines to enable some popular extensions
99 # (see 'hg help extensions' for more info)
99 # (see 'hg help extensions' for more info)
100 #
100 #
101 # churn =
101 # churn =
102 """,
102 """,
103
103
104 'cloned':
104 'cloned':
105 b"""# example repository config (see 'hg help config' for more info)
105 b"""# example repository config (see 'hg help config' for more info)
106 [paths]
106 [paths]
107 default = %s
107 default = %s
108
108
109 # path aliases to other clones of this repo in URLs or filesystem paths
109 # path aliases to other clones of this repo in URLs or filesystem paths
110 # (see 'hg help config.paths' for more info)
110 # (see 'hg help config.paths' for more info)
111 #
111 #
112 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
112 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
113 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
113 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
114 # my-clone = /home/jdoe/jdoes-clone
114 # my-clone = /home/jdoe/jdoes-clone
115
115
116 [ui]
116 [ui]
117 # name and email (local to this repository, optional), e.g.
117 # name and email (local to this repository, optional), e.g.
118 # username = Jane Doe <jdoe@example.com>
118 # username = Jane Doe <jdoe@example.com>
119 """,
119 """,
120
120
121 'local':
121 'local':
122 b"""# example repository config (see 'hg help config' for more info)
122 b"""# example repository config (see 'hg help config' for more info)
123 [paths]
123 [paths]
124 # path aliases to other clones of this repo in URLs or filesystem paths
124 # path aliases to other clones of this repo in URLs or filesystem paths
125 # (see 'hg help config.paths' for more info)
125 # (see 'hg help config.paths' for more info)
126 #
126 #
127 # default = http://example.com/hg/example-repo
127 # default = http://example.com/hg/example-repo
128 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
128 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
129 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
129 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
130 # my-clone = /home/jdoe/jdoes-clone
130 # my-clone = /home/jdoe/jdoes-clone
131
131
132 [ui]
132 [ui]
133 # name and email (local to this repository, optional), e.g.
133 # name and email (local to this repository, optional), e.g.
134 # username = Jane Doe <jdoe@example.com>
134 # username = Jane Doe <jdoe@example.com>
135 """,
135 """,
136
136
137 'global':
137 'global':
138 b"""# example system-wide hg config (see 'hg help config' for more info)
138 b"""# example system-wide hg config (see 'hg help config' for more info)
139
139
140 [ui]
140 [ui]
141 # uncomment to disable color in command output
141 # uncomment to disable color in command output
142 # (see 'hg help color' for details)
142 # (see 'hg help color' for details)
143 # color = never
143 # color = never
144
144
145 # uncomment to disable command output pagination
145 # uncomment to disable command output pagination
146 # (see 'hg help pager' for details)
146 # (see 'hg help pager' for details)
147 # paginate = never
147 # paginate = never
148
148
149 [extensions]
149 [extensions]
150 # uncomment these lines to enable some popular extensions
150 # uncomment these lines to enable some popular extensions
151 # (see 'hg help extensions' for more info)
151 # (see 'hg help extensions' for more info)
152 #
152 #
153 # blackbox =
153 # blackbox =
154 # churn =
154 # churn =
155 """,
155 """,
156 }
156 }
157
157
158 def _maybestrurl(maybebytes):
158 def _maybestrurl(maybebytes):
159 return util.rapply(pycompat.strurl, maybebytes)
159 return util.rapply(pycompat.strurl, maybebytes)
160
160
161 def _maybebytesurl(maybestr):
161 def _maybebytesurl(maybestr):
162 return util.rapply(pycompat.bytesurl, maybestr)
162 return util.rapply(pycompat.bytesurl, maybestr)
163
163
164 class httppasswordmgrdbproxy(object):
164 class httppasswordmgrdbproxy(object):
165 """Delays loading urllib2 until it's needed."""
165 """Delays loading urllib2 until it's needed."""
166 def __init__(self):
166 def __init__(self):
167 self._mgr = None
167 self._mgr = None
168
168
169 def _get_mgr(self):
169 def _get_mgr(self):
170 if self._mgr is None:
170 if self._mgr is None:
171 self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
171 self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
172 return self._mgr
172 return self._mgr
173
173
174 def add_password(self, realm, uris, user, passwd):
174 def add_password(self, realm, uris, user, passwd):
175 return self._get_mgr().add_password(
175 return self._get_mgr().add_password(
176 _maybestrurl(realm), _maybestrurl(uris),
176 _maybestrurl(realm), _maybestrurl(uris),
177 _maybestrurl(user), _maybestrurl(passwd))
177 _maybestrurl(user), _maybestrurl(passwd))
178
178
179 def find_user_password(self, realm, uri):
179 def find_user_password(self, realm, uri):
180 mgr = self._get_mgr()
180 mgr = self._get_mgr()
181 return _maybebytesurl(mgr.find_user_password(_maybestrurl(realm),
181 return _maybebytesurl(mgr.find_user_password(_maybestrurl(realm),
182 _maybestrurl(uri)))
182 _maybestrurl(uri)))
183
183
184 def _catchterm(*args):
184 def _catchterm(*args):
185 raise error.SignalInterrupt
185 raise error.SignalInterrupt
186
186
187 # unique object used to detect no default value has been provided when
187 # unique object used to detect no default value has been provided when
188 # retrieving configuration value.
188 # retrieving configuration value.
189 _unset = object()
189 _unset = object()
190
190
191 # _reqexithandlers: callbacks run at the end of a request
191 # _reqexithandlers: callbacks run at the end of a request
192 _reqexithandlers = []
192 _reqexithandlers = []
193
193
194 class ui(object):
194 class ui(object):
195 def __init__(self, src=None):
195 def __init__(self, src=None):
196 """Create a fresh new ui object if no src given
196 """Create a fresh new ui object if no src given
197
197
198 Use uimod.ui.load() to create a ui which knows global and user configs.
198 Use uimod.ui.load() to create a ui which knows global and user configs.
199 In most cases, you should use ui.copy() to create a copy of an existing
199 In most cases, you should use ui.copy() to create a copy of an existing
200 ui object.
200 ui object.
201 """
201 """
202 # _buffers: used for temporary capture of output
202 # _buffers: used for temporary capture of output
203 self._buffers = []
203 self._buffers = []
204 # 3-tuple describing how each buffer in the stack behaves.
204 # 3-tuple describing how each buffer in the stack behaves.
205 # Values are (capture stderr, capture subprocesses, apply labels).
205 # Values are (capture stderr, capture subprocesses, apply labels).
206 self._bufferstates = []
206 self._bufferstates = []
207 # When a buffer is active, defines whether we are expanding labels.
207 # When a buffer is active, defines whether we are expanding labels.
208 # This exists to prevent an extra list lookup.
208 # This exists to prevent an extra list lookup.
209 self._bufferapplylabels = None
209 self._bufferapplylabels = None
210 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
210 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
211 self._reportuntrusted = True
211 self._reportuntrusted = True
212 self._knownconfig = configitems.coreitems
212 self._knownconfig = configitems.coreitems
213 self._ocfg = config.config() # overlay
213 self._ocfg = config.config() # overlay
214 self._tcfg = config.config() # trusted
214 self._tcfg = config.config() # trusted
215 self._ucfg = config.config() # untrusted
215 self._ucfg = config.config() # untrusted
216 self._trustusers = set()
216 self._trustusers = set()
217 self._trustgroups = set()
217 self._trustgroups = set()
218 self.callhooks = True
218 self.callhooks = True
219 # Insecure server connections requested.
219 # Insecure server connections requested.
220 self.insecureconnections = False
220 self.insecureconnections = False
221 # Blocked time
221 # Blocked time
222 self.logblockedtimes = False
222 self.logblockedtimes = False
223 # color mode: see mercurial/color.py for possible value
223 # color mode: see mercurial/color.py for possible value
224 self._colormode = None
224 self._colormode = None
225 self._terminfoparams = {}
225 self._terminfoparams = {}
226 self._styles = {}
226 self._styles = {}
227
227
228 if src:
228 if src:
229 self.fout = src.fout
229 self.fout = src.fout
230 self.ferr = src.ferr
230 self.ferr = src.ferr
231 self.fin = src.fin
231 self.fin = src.fin
232 self.pageractive = src.pageractive
232 self.pageractive = src.pageractive
233 self._disablepager = src._disablepager
233 self._disablepager = src._disablepager
234 self._tweaked = src._tweaked
234 self._tweaked = src._tweaked
235
235
236 self._tcfg = src._tcfg.copy()
236 self._tcfg = src._tcfg.copy()
237 self._ucfg = src._ucfg.copy()
237 self._ucfg = src._ucfg.copy()
238 self._ocfg = src._ocfg.copy()
238 self._ocfg = src._ocfg.copy()
239 self._trustusers = src._trustusers.copy()
239 self._trustusers = src._trustusers.copy()
240 self._trustgroups = src._trustgroups.copy()
240 self._trustgroups = src._trustgroups.copy()
241 self.environ = src.environ
241 self.environ = src.environ
242 self.callhooks = src.callhooks
242 self.callhooks = src.callhooks
243 self.insecureconnections = src.insecureconnections
243 self.insecureconnections = src.insecureconnections
244 self._colormode = src._colormode
244 self._colormode = src._colormode
245 self._terminfoparams = src._terminfoparams.copy()
245 self._terminfoparams = src._terminfoparams.copy()
246 self._styles = src._styles.copy()
246 self._styles = src._styles.copy()
247
247
248 self.fixconfig()
248 self.fixconfig()
249
249
250 self.httppasswordmgrdb = src.httppasswordmgrdb
250 self.httppasswordmgrdb = src.httppasswordmgrdb
251 self._blockedtimes = src._blockedtimes
251 self._blockedtimes = src._blockedtimes
252 else:
252 else:
253 self.fout = procutil.stdout
253 self.fout = procutil.stdout
254 self.ferr = procutil.stderr
254 self.ferr = procutil.stderr
255 self.fin = procutil.stdin
255 self.fin = procutil.stdin
256 self.pageractive = False
256 self.pageractive = False
257 self._disablepager = False
257 self._disablepager = False
258 self._tweaked = False
258 self._tweaked = False
259
259
260 # shared read-only environment
260 # shared read-only environment
261 self.environ = encoding.environ
261 self.environ = encoding.environ
262
262
263 self.httppasswordmgrdb = httppasswordmgrdbproxy()
263 self.httppasswordmgrdb = httppasswordmgrdbproxy()
264 self._blockedtimes = collections.defaultdict(int)
264 self._blockedtimes = collections.defaultdict(int)
265
265
266 allowed = self.configlist('experimental', 'exportableenviron')
266 allowed = self.configlist('experimental', 'exportableenviron')
267 if '*' in allowed:
267 if '*' in allowed:
268 self._exportableenviron = self.environ
268 self._exportableenviron = self.environ
269 else:
269 else:
270 self._exportableenviron = {}
270 self._exportableenviron = {}
271 for k in allowed:
271 for k in allowed:
272 if k in self.environ:
272 if k in self.environ:
273 self._exportableenviron[k] = self.environ[k]
273 self._exportableenviron[k] = self.environ[k]
274
274
275 @classmethod
275 @classmethod
276 def load(cls):
276 def load(cls):
277 """Create a ui and load global and user configs"""
277 """Create a ui and load global and user configs"""
278 u = cls()
278 u = cls()
279 # we always trust global config files and environment variables
279 # we always trust global config files and environment variables
280 for t, f in rcutil.rccomponents():
280 for t, f in rcutil.rccomponents():
281 if t == 'path':
281 if t == 'path':
282 u.readconfig(f, trust=True)
282 u.readconfig(f, trust=True)
283 elif t == 'items':
283 elif t == 'items':
284 sections = set()
284 sections = set()
285 for section, name, value, source in f:
285 for section, name, value, source in f:
286 # do not set u._ocfg
286 # do not set u._ocfg
287 # XXX clean this up once immutable config object is a thing
287 # XXX clean this up once immutable config object is a thing
288 u._tcfg.set(section, name, value, source)
288 u._tcfg.set(section, name, value, source)
289 u._ucfg.set(section, name, value, source)
289 u._ucfg.set(section, name, value, source)
290 sections.add(section)
290 sections.add(section)
291 for section in sections:
291 for section in sections:
292 u.fixconfig(section=section)
292 u.fixconfig(section=section)
293 else:
293 else:
294 raise error.ProgrammingError('unknown rctype: %s' % t)
294 raise error.ProgrammingError('unknown rctype: %s' % t)
295 u._maybetweakdefaults()
295 u._maybetweakdefaults()
296 return u
296 return u
297
297
298 def _maybetweakdefaults(self):
298 def _maybetweakdefaults(self):
299 if not self.configbool('ui', 'tweakdefaults'):
299 if not self.configbool('ui', 'tweakdefaults'):
300 return
300 return
301 if self._tweaked or self.plain('tweakdefaults'):
301 if self._tweaked or self.plain('tweakdefaults'):
302 return
302 return
303
303
304 # Note: it is SUPER IMPORTANT that you set self._tweaked to
304 # Note: it is SUPER IMPORTANT that you set self._tweaked to
305 # True *before* any calls to setconfig(), otherwise you'll get
305 # True *before* any calls to setconfig(), otherwise you'll get
306 # infinite recursion between setconfig and this method.
306 # infinite recursion between setconfig and this method.
307 #
307 #
308 # TODO: We should extract an inner method in setconfig() to
308 # TODO: We should extract an inner method in setconfig() to
309 # avoid this weirdness.
309 # avoid this weirdness.
310 self._tweaked = True
310 self._tweaked = True
311 tmpcfg = config.config()
311 tmpcfg = config.config()
312 tmpcfg.parse('<tweakdefaults>', tweakrc)
312 tmpcfg.parse('<tweakdefaults>', tweakrc)
313 for section in tmpcfg:
313 for section in tmpcfg:
314 for name, value in tmpcfg.items(section):
314 for name, value in tmpcfg.items(section):
315 if not self.hasconfig(section, name):
315 if not self.hasconfig(section, name):
316 self.setconfig(section, name, value, "<tweakdefaults>")
316 self.setconfig(section, name, value, "<tweakdefaults>")
317
317
318 def copy(self):
318 def copy(self):
319 return self.__class__(self)
319 return self.__class__(self)
320
320
321 def resetstate(self):
321 def resetstate(self):
322 """Clear internal state that shouldn't persist across commands"""
322 """Clear internal state that shouldn't persist across commands"""
323 if self._progbar:
323 if self._progbar:
324 self._progbar.resetstate() # reset last-print time of progress bar
324 self._progbar.resetstate() # reset last-print time of progress bar
325 self.httppasswordmgrdb = httppasswordmgrdbproxy()
325 self.httppasswordmgrdb = httppasswordmgrdbproxy()
326
326
327 @contextlib.contextmanager
327 @contextlib.contextmanager
328 def timeblockedsection(self, key):
328 def timeblockedsection(self, key):
329 # this is open-coded below - search for timeblockedsection to find them
329 # this is open-coded below - search for timeblockedsection to find them
330 starttime = util.timer()
330 starttime = util.timer()
331 try:
331 try:
332 yield
332 yield
333 finally:
333 finally:
334 self._blockedtimes[key + '_blocked'] += \
334 self._blockedtimes[key + '_blocked'] += \
335 (util.timer() - starttime) * 1000
335 (util.timer() - starttime) * 1000
336
336
337 def formatter(self, topic, opts):
337 def formatter(self, topic, opts):
338 return formatter.formatter(self, self, topic, opts)
338 return formatter.formatter(self, self, topic, opts)
339
339
340 def _trusted(self, fp, f):
340 def _trusted(self, fp, f):
341 st = util.fstat(fp)
341 st = util.fstat(fp)
342 if util.isowner(st):
342 if util.isowner(st):
343 return True
343 return True
344
344
345 tusers, tgroups = self._trustusers, self._trustgroups
345 tusers, tgroups = self._trustusers, self._trustgroups
346 if '*' in tusers or '*' in tgroups:
346 if '*' in tusers or '*' in tgroups:
347 return True
347 return True
348
348
349 user = util.username(st.st_uid)
349 user = util.username(st.st_uid)
350 group = util.groupname(st.st_gid)
350 group = util.groupname(st.st_gid)
351 if user in tusers or group in tgroups or user == util.username():
351 if user in tusers or group in tgroups or user == util.username():
352 return True
352 return True
353
353
354 if self._reportuntrusted:
354 if self._reportuntrusted:
355 self.warn(_('not trusting file %s from untrusted '
355 self.warn(_('not trusting file %s from untrusted '
356 'user %s, group %s\n') % (f, user, group))
356 'user %s, group %s\n') % (f, user, group))
357 return False
357 return False
358
358
359 def readconfig(self, filename, root=None, trust=False,
359 def readconfig(self, filename, root=None, trust=False,
360 sections=None, remap=None):
360 sections=None, remap=None):
361 try:
361 try:
362 fp = open(filename, u'rb')
362 fp = open(filename, u'rb')
363 except IOError:
363 except IOError:
364 if not sections: # ignore unless we were looking for something
364 if not sections: # ignore unless we were looking for something
365 return
365 return
366 raise
366 raise
367
367
368 cfg = config.config()
368 cfg = config.config()
369 trusted = sections or trust or self._trusted(fp, filename)
369 trusted = sections or trust or self._trusted(fp, filename)
370
370
371 try:
371 try:
372 cfg.read(filename, fp, sections=sections, remap=remap)
372 cfg.read(filename, fp, sections=sections, remap=remap)
373 fp.close()
373 fp.close()
374 except error.ConfigError as inst:
374 except error.ConfigError as inst:
375 if trusted:
375 if trusted:
376 raise
376 raise
377 self.warn(_("ignored: %s\n") % stringutil.forcebytestr(inst))
377 self.warn(_("ignored: %s\n") % stringutil.forcebytestr(inst))
378
378
379 if self.plain():
379 if self.plain():
380 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
380 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
381 'logtemplate', 'statuscopies', 'style',
381 'logtemplate', 'statuscopies', 'style',
382 'traceback', 'verbose'):
382 'traceback', 'verbose'):
383 if k in cfg['ui']:
383 if k in cfg['ui']:
384 del cfg['ui'][k]
384 del cfg['ui'][k]
385 for k, v in cfg.items('defaults'):
385 for k, v in cfg.items('defaults'):
386 del cfg['defaults'][k]
386 del cfg['defaults'][k]
387 for k, v in cfg.items('commands'):
387 for k, v in cfg.items('commands'):
388 del cfg['commands'][k]
388 del cfg['commands'][k]
389 # Don't remove aliases from the configuration if in the exceptionlist
389 # Don't remove aliases from the configuration if in the exceptionlist
390 if self.plain('alias'):
390 if self.plain('alias'):
391 for k, v in cfg.items('alias'):
391 for k, v in cfg.items('alias'):
392 del cfg['alias'][k]
392 del cfg['alias'][k]
393 if self.plain('revsetalias'):
393 if self.plain('revsetalias'):
394 for k, v in cfg.items('revsetalias'):
394 for k, v in cfg.items('revsetalias'):
395 del cfg['revsetalias'][k]
395 del cfg['revsetalias'][k]
396 if self.plain('templatealias'):
396 if self.plain('templatealias'):
397 for k, v in cfg.items('templatealias'):
397 for k, v in cfg.items('templatealias'):
398 del cfg['templatealias'][k]
398 del cfg['templatealias'][k]
399
399
400 if trusted:
400 if trusted:
401 self._tcfg.update(cfg)
401 self._tcfg.update(cfg)
402 self._tcfg.update(self._ocfg)
402 self._tcfg.update(self._ocfg)
403 self._ucfg.update(cfg)
403 self._ucfg.update(cfg)
404 self._ucfg.update(self._ocfg)
404 self._ucfg.update(self._ocfg)
405
405
406 if root is None:
406 if root is None:
407 root = os.path.expanduser('~')
407 root = os.path.expanduser('~')
408 self.fixconfig(root=root)
408 self.fixconfig(root=root)
409
409
410 def fixconfig(self, root=None, section=None):
410 def fixconfig(self, root=None, section=None):
411 if section in (None, 'paths'):
411 if section in (None, 'paths'):
412 # expand vars and ~
412 # expand vars and ~
413 # translate paths relative to root (or home) into absolute paths
413 # translate paths relative to root (or home) into absolute paths
414 root = root or pycompat.getcwd()
414 root = root or pycompat.getcwd()
415 for c in self._tcfg, self._ucfg, self._ocfg:
415 for c in self._tcfg, self._ucfg, self._ocfg:
416 for n, p in c.items('paths'):
416 for n, p in c.items('paths'):
417 # Ignore sub-options.
417 # Ignore sub-options.
418 if ':' in n:
418 if ':' in n:
419 continue
419 continue
420 if not p:
420 if not p:
421 continue
421 continue
422 if '%%' in p:
422 if '%%' in p:
423 s = self.configsource('paths', n) or 'none'
423 s = self.configsource('paths', n) or 'none'
424 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
424 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
425 % (n, p, s))
425 % (n, p, s))
426 p = p.replace('%%', '%')
426 p = p.replace('%%', '%')
427 p = util.expandpath(p)
427 p = util.expandpath(p)
428 if not util.hasscheme(p) and not os.path.isabs(p):
428 if not util.hasscheme(p) and not os.path.isabs(p):
429 p = os.path.normpath(os.path.join(root, p))
429 p = os.path.normpath(os.path.join(root, p))
430 c.set("paths", n, p)
430 c.set("paths", n, p)
431
431
432 if section in (None, 'ui'):
432 if section in (None, 'ui'):
433 # update ui options
433 # update ui options
434 self.debugflag = self.configbool('ui', 'debug')
434 self.debugflag = self.configbool('ui', 'debug')
435 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
435 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
436 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
436 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
437 if self.verbose and self.quiet:
437 if self.verbose and self.quiet:
438 self.quiet = self.verbose = False
438 self.quiet = self.verbose = False
439 self._reportuntrusted = self.debugflag or self.configbool("ui",
439 self._reportuntrusted = self.debugflag or self.configbool("ui",
440 "report_untrusted")
440 "report_untrusted")
441 self.tracebackflag = self.configbool('ui', 'traceback')
441 self.tracebackflag = self.configbool('ui', 'traceback')
442 self.logblockedtimes = self.configbool('ui', 'logblockedtimes')
442 self.logblockedtimes = self.configbool('ui', 'logblockedtimes')
443
443
444 if section in (None, 'trusted'):
444 if section in (None, 'trusted'):
445 # update trust information
445 # update trust information
446 self._trustusers.update(self.configlist('trusted', 'users'))
446 self._trustusers.update(self.configlist('trusted', 'users'))
447 self._trustgroups.update(self.configlist('trusted', 'groups'))
447 self._trustgroups.update(self.configlist('trusted', 'groups'))
448
448
449 def backupconfig(self, section, item):
449 def backupconfig(self, section, item):
450 return (self._ocfg.backup(section, item),
450 return (self._ocfg.backup(section, item),
451 self._tcfg.backup(section, item),
451 self._tcfg.backup(section, item),
452 self._ucfg.backup(section, item),)
452 self._ucfg.backup(section, item),)
453 def restoreconfig(self, data):
453 def restoreconfig(self, data):
454 self._ocfg.restore(data[0])
454 self._ocfg.restore(data[0])
455 self._tcfg.restore(data[1])
455 self._tcfg.restore(data[1])
456 self._ucfg.restore(data[2])
456 self._ucfg.restore(data[2])
457
457
458 def setconfig(self, section, name, value, source=''):
458 def setconfig(self, section, name, value, source=''):
459 for cfg in (self._ocfg, self._tcfg, self._ucfg):
459 for cfg in (self._ocfg, self._tcfg, self._ucfg):
460 cfg.set(section, name, value, source)
460 cfg.set(section, name, value, source)
461 self.fixconfig(section=section)
461 self.fixconfig(section=section)
462 self._maybetweakdefaults()
462 self._maybetweakdefaults()
463
463
464 def _data(self, untrusted):
464 def _data(self, untrusted):
465 return untrusted and self._ucfg or self._tcfg
465 return untrusted and self._ucfg or self._tcfg
466
466
467 def configsource(self, section, name, untrusted=False):
467 def configsource(self, section, name, untrusted=False):
468 return self._data(untrusted).source(section, name)
468 return self._data(untrusted).source(section, name)
469
469
470 def config(self, section, name, default=_unset, untrusted=False):
470 def config(self, section, name, default=_unset, untrusted=False):
471 """return the plain string version of a config"""
471 """return the plain string version of a config"""
472 value = self._config(section, name, default=default,
472 value = self._config(section, name, default=default,
473 untrusted=untrusted)
473 untrusted=untrusted)
474 if value is _unset:
474 if value is _unset:
475 return None
475 return None
476 return value
476 return value
477
477
478 def _config(self, section, name, default=_unset, untrusted=False):
478 def _config(self, section, name, default=_unset, untrusted=False):
479 value = itemdefault = default
479 value = itemdefault = default
480 item = self._knownconfig.get(section, {}).get(name)
480 item = self._knownconfig.get(section, {}).get(name)
481 alternates = [(section, name)]
481 alternates = [(section, name)]
482
482
483 if item is not None:
483 if item is not None:
484 alternates.extend(item.alias)
484 alternates.extend(item.alias)
485 if callable(item.default):
485 if callable(item.default):
486 itemdefault = item.default()
486 itemdefault = item.default()
487 else:
487 else:
488 itemdefault = item.default
488 itemdefault = item.default
489 else:
489 else:
490 msg = ("accessing unregistered config item: '%s.%s'")
490 msg = ("accessing unregistered config item: '%s.%s'")
491 msg %= (section, name)
491 msg %= (section, name)
492 self.develwarn(msg, 2, 'warn-config-unknown')
492 self.develwarn(msg, 2, 'warn-config-unknown')
493
493
494 if default is _unset:
494 if default is _unset:
495 if item is None:
495 if item is None:
496 value = default
496 value = default
497 elif item.default is configitems.dynamicdefault:
497 elif item.default is configitems.dynamicdefault:
498 value = None
498 value = None
499 msg = "config item requires an explicit default value: '%s.%s'"
499 msg = "config item requires an explicit default value: '%s.%s'"
500 msg %= (section, name)
500 msg %= (section, name)
501 self.develwarn(msg, 2, 'warn-config-default')
501 self.develwarn(msg, 2, 'warn-config-default')
502 else:
502 else:
503 value = itemdefault
503 value = itemdefault
504 elif (item is not None
504 elif (item is not None
505 and item.default is not configitems.dynamicdefault
505 and item.default is not configitems.dynamicdefault
506 and default != itemdefault):
506 and default != itemdefault):
507 msg = ("specifying a mismatched default value for a registered "
507 msg = ("specifying a mismatched default value for a registered "
508 "config item: '%s.%s' '%s'")
508 "config item: '%s.%s' '%s'")
509 msg %= (section, name, pycompat.bytestr(default))
509 msg %= (section, name, pycompat.bytestr(default))
510 self.develwarn(msg, 2, 'warn-config-default')
510 self.develwarn(msg, 2, 'warn-config-default')
511
511
512 for s, n in alternates:
512 for s, n in alternates:
513 candidate = self._data(untrusted).get(s, n, None)
513 candidate = self._data(untrusted).get(s, n, None)
514 if candidate is not None:
514 if candidate is not None:
515 value = candidate
515 value = candidate
516 section = s
516 section = s
517 name = n
517 name = n
518 break
518 break
519
519
520 if self.debugflag and not untrusted and self._reportuntrusted:
520 if self.debugflag and not untrusted and self._reportuntrusted:
521 for s, n in alternates:
521 for s, n in alternates:
522 uvalue = self._ucfg.get(s, n)
522 uvalue = self._ucfg.get(s, n)
523 if uvalue is not None and uvalue != value:
523 if uvalue is not None and uvalue != value:
524 self.debug("ignoring untrusted configuration option "
524 self.debug("ignoring untrusted configuration option "
525 "%s.%s = %s\n" % (s, n, uvalue))
525 "%s.%s = %s\n" % (s, n, uvalue))
526 return value
526 return value
527
527
528 def configsuboptions(self, section, name, default=_unset, untrusted=False):
528 def configsuboptions(self, section, name, default=_unset, untrusted=False):
529 """Get a config option and all sub-options.
529 """Get a config option and all sub-options.
530
530
531 Some config options have sub-options that are declared with the
531 Some config options have sub-options that are declared with the
532 format "key:opt = value". This method is used to return the main
532 format "key:opt = value". This method is used to return the main
533 option and all its declared sub-options.
533 option and all its declared sub-options.
534
534
535 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
535 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
536 is a dict of defined sub-options where keys and values are strings.
536 is a dict of defined sub-options where keys and values are strings.
537 """
537 """
538 main = self.config(section, name, default, untrusted=untrusted)
538 main = self.config(section, name, default, untrusted=untrusted)
539 data = self._data(untrusted)
539 data = self._data(untrusted)
540 sub = {}
540 sub = {}
541 prefix = '%s:' % name
541 prefix = '%s:' % name
542 for k, v in data.items(section):
542 for k, v in data.items(section):
543 if k.startswith(prefix):
543 if k.startswith(prefix):
544 sub[k[len(prefix):]] = v
544 sub[k[len(prefix):]] = v
545
545
546 if self.debugflag and not untrusted and self._reportuntrusted:
546 if self.debugflag and not untrusted and self._reportuntrusted:
547 for k, v in sub.items():
547 for k, v in sub.items():
548 uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
548 uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
549 if uvalue is not None and uvalue != v:
549 if uvalue is not None and uvalue != v:
550 self.debug('ignoring untrusted configuration option '
550 self.debug('ignoring untrusted configuration option '
551 '%s:%s.%s = %s\n' % (section, name, k, uvalue))
551 '%s:%s.%s = %s\n' % (section, name, k, uvalue))
552
552
553 return main, sub
553 return main, sub
554
554
555 def configpath(self, section, name, default=_unset, untrusted=False):
555 def configpath(self, section, name, default=_unset, untrusted=False):
556 'get a path config item, expanded relative to repo root or config file'
556 'get a path config item, expanded relative to repo root or config file'
557 v = self.config(section, name, default, untrusted)
557 v = self.config(section, name, default, untrusted)
558 if v is None:
558 if v is None:
559 return None
559 return None
560 if not os.path.isabs(v) or "://" not in v:
560 if not os.path.isabs(v) or "://" not in v:
561 src = self.configsource(section, name, untrusted)
561 src = self.configsource(section, name, untrusted)
562 if ':' in src:
562 if ':' in src:
563 base = os.path.dirname(src.rsplit(':')[0])
563 base = os.path.dirname(src.rsplit(':')[0])
564 v = os.path.join(base, os.path.expanduser(v))
564 v = os.path.join(base, os.path.expanduser(v))
565 return v
565 return v
566
566
567 def configbool(self, section, name, default=_unset, untrusted=False):
567 def configbool(self, section, name, default=_unset, untrusted=False):
568 """parse a configuration element as a boolean
568 """parse a configuration element as a boolean
569
569
570 >>> u = ui(); s = b'foo'
570 >>> u = ui(); s = b'foo'
571 >>> u.setconfig(s, b'true', b'yes')
571 >>> u.setconfig(s, b'true', b'yes')
572 >>> u.configbool(s, b'true')
572 >>> u.configbool(s, b'true')
573 True
573 True
574 >>> u.setconfig(s, b'false', b'no')
574 >>> u.setconfig(s, b'false', b'no')
575 >>> u.configbool(s, b'false')
575 >>> u.configbool(s, b'false')
576 False
576 False
577 >>> u.configbool(s, b'unknown')
577 >>> u.configbool(s, b'unknown')
578 False
578 False
579 >>> u.configbool(s, b'unknown', True)
579 >>> u.configbool(s, b'unknown', True)
580 True
580 True
581 >>> u.setconfig(s, b'invalid', b'somevalue')
581 >>> u.setconfig(s, b'invalid', b'somevalue')
582 >>> u.configbool(s, b'invalid')
582 >>> u.configbool(s, b'invalid')
583 Traceback (most recent call last):
583 Traceback (most recent call last):
584 ...
584 ...
585 ConfigError: foo.invalid is not a boolean ('somevalue')
585 ConfigError: foo.invalid is not a boolean ('somevalue')
586 """
586 """
587
587
588 v = self._config(section, name, default, untrusted=untrusted)
588 v = self._config(section, name, default, untrusted=untrusted)
589 if v is None:
589 if v is None:
590 return v
590 return v
591 if v is _unset:
591 if v is _unset:
592 if default is _unset:
592 if default is _unset:
593 return False
593 return False
594 return default
594 return default
595 if isinstance(v, bool):
595 if isinstance(v, bool):
596 return v
596 return v
597 b = stringutil.parsebool(v)
597 b = stringutil.parsebool(v)
598 if b is None:
598 if b is None:
599 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
599 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
600 % (section, name, v))
600 % (section, name, v))
601 return b
601 return b
602
602
603 def configwith(self, convert, section, name, default=_unset,
603 def configwith(self, convert, section, name, default=_unset,
604 desc=None, untrusted=False):
604 desc=None, untrusted=False):
605 """parse a configuration element with a conversion function
605 """parse a configuration element with a conversion function
606
606
607 >>> u = ui(); s = b'foo'
607 >>> u = ui(); s = b'foo'
608 >>> u.setconfig(s, b'float1', b'42')
608 >>> u.setconfig(s, b'float1', b'42')
609 >>> u.configwith(float, s, b'float1')
609 >>> u.configwith(float, s, b'float1')
610 42.0
610 42.0
611 >>> u.setconfig(s, b'float2', b'-4.25')
611 >>> u.setconfig(s, b'float2', b'-4.25')
612 >>> u.configwith(float, s, b'float2')
612 >>> u.configwith(float, s, b'float2')
613 -4.25
613 -4.25
614 >>> u.configwith(float, s, b'unknown', 7)
614 >>> u.configwith(float, s, b'unknown', 7)
615 7.0
615 7.0
616 >>> u.setconfig(s, b'invalid', b'somevalue')
616 >>> u.setconfig(s, b'invalid', b'somevalue')
617 >>> u.configwith(float, s, b'invalid')
617 >>> u.configwith(float, s, b'invalid')
618 Traceback (most recent call last):
618 Traceback (most recent call last):
619 ...
619 ...
620 ConfigError: foo.invalid is not a valid float ('somevalue')
620 ConfigError: foo.invalid is not a valid float ('somevalue')
621 >>> u.configwith(float, s, b'invalid', desc=b'womble')
621 >>> u.configwith(float, s, b'invalid', desc=b'womble')
622 Traceback (most recent call last):
622 Traceback (most recent call last):
623 ...
623 ...
624 ConfigError: foo.invalid is not a valid womble ('somevalue')
624 ConfigError: foo.invalid is not a valid womble ('somevalue')
625 """
625 """
626
626
627 v = self.config(section, name, default, untrusted)
627 v = self.config(section, name, default, untrusted)
628 if v is None:
628 if v is None:
629 return v # do not attempt to convert None
629 return v # do not attempt to convert None
630 try:
630 try:
631 return convert(v)
631 return convert(v)
632 except (ValueError, error.ParseError):
632 except (ValueError, error.ParseError):
633 if desc is None:
633 if desc is None:
634 desc = pycompat.sysbytes(convert.__name__)
634 desc = pycompat.sysbytes(convert.__name__)
635 raise error.ConfigError(_("%s.%s is not a valid %s ('%s')")
635 raise error.ConfigError(_("%s.%s is not a valid %s ('%s')")
636 % (section, name, desc, v))
636 % (section, name, desc, v))
637
637
638 def configint(self, section, name, default=_unset, untrusted=False):
638 def configint(self, section, name, default=_unset, untrusted=False):
639 """parse a configuration element as an integer
639 """parse a configuration element as an integer
640
640
641 >>> u = ui(); s = b'foo'
641 >>> u = ui(); s = b'foo'
642 >>> u.setconfig(s, b'int1', b'42')
642 >>> u.setconfig(s, b'int1', b'42')
643 >>> u.configint(s, b'int1')
643 >>> u.configint(s, b'int1')
644 42
644 42
645 >>> u.setconfig(s, b'int2', b'-42')
645 >>> u.setconfig(s, b'int2', b'-42')
646 >>> u.configint(s, b'int2')
646 >>> u.configint(s, b'int2')
647 -42
647 -42
648 >>> u.configint(s, b'unknown', 7)
648 >>> u.configint(s, b'unknown', 7)
649 7
649 7
650 >>> u.setconfig(s, b'invalid', b'somevalue')
650 >>> u.setconfig(s, b'invalid', b'somevalue')
651 >>> u.configint(s, b'invalid')
651 >>> u.configint(s, b'invalid')
652 Traceback (most recent call last):
652 Traceback (most recent call last):
653 ...
653 ...
654 ConfigError: foo.invalid is not a valid integer ('somevalue')
654 ConfigError: foo.invalid is not a valid integer ('somevalue')
655 """
655 """
656
656
657 return self.configwith(int, section, name, default, 'integer',
657 return self.configwith(int, section, name, default, 'integer',
658 untrusted)
658 untrusted)
659
659
660 def configbytes(self, section, name, default=_unset, untrusted=False):
660 def configbytes(self, section, name, default=_unset, untrusted=False):
661 """parse a configuration element as a quantity in bytes
661 """parse a configuration element as a quantity in bytes
662
662
663 Units can be specified as b (bytes), k or kb (kilobytes), m or
663 Units can be specified as b (bytes), k or kb (kilobytes), m or
664 mb (megabytes), g or gb (gigabytes).
664 mb (megabytes), g or gb (gigabytes).
665
665
666 >>> u = ui(); s = b'foo'
666 >>> u = ui(); s = b'foo'
667 >>> u.setconfig(s, b'val1', b'42')
667 >>> u.setconfig(s, b'val1', b'42')
668 >>> u.configbytes(s, b'val1')
668 >>> u.configbytes(s, b'val1')
669 42
669 42
670 >>> u.setconfig(s, b'val2', b'42.5 kb')
670 >>> u.setconfig(s, b'val2', b'42.5 kb')
671 >>> u.configbytes(s, b'val2')
671 >>> u.configbytes(s, b'val2')
672 43520
672 43520
673 >>> u.configbytes(s, b'unknown', b'7 MB')
673 >>> u.configbytes(s, b'unknown', b'7 MB')
674 7340032
674 7340032
675 >>> u.setconfig(s, b'invalid', b'somevalue')
675 >>> u.setconfig(s, b'invalid', b'somevalue')
676 >>> u.configbytes(s, b'invalid')
676 >>> u.configbytes(s, b'invalid')
677 Traceback (most recent call last):
677 Traceback (most recent call last):
678 ...
678 ...
679 ConfigError: foo.invalid is not a byte quantity ('somevalue')
679 ConfigError: foo.invalid is not a byte quantity ('somevalue')
680 """
680 """
681
681
682 value = self._config(section, name, default, untrusted)
682 value = self._config(section, name, default, untrusted)
683 if value is _unset:
683 if value is _unset:
684 if default is _unset:
684 if default is _unset:
685 default = 0
685 default = 0
686 value = default
686 value = default
687 if not isinstance(value, bytes):
687 if not isinstance(value, bytes):
688 return value
688 return value
689 try:
689 try:
690 return util.sizetoint(value)
690 return util.sizetoint(value)
691 except error.ParseError:
691 except error.ParseError:
692 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
692 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
693 % (section, name, value))
693 % (section, name, value))
694
694
695 def configlist(self, section, name, default=_unset, untrusted=False):
695 def configlist(self, section, name, default=_unset, untrusted=False):
696 """parse a configuration element as a list of comma/space separated
696 """parse a configuration element as a list of comma/space separated
697 strings
697 strings
698
698
699 >>> u = ui(); s = b'foo'
699 >>> u = ui(); s = b'foo'
700 >>> u.setconfig(s, b'list1', b'this,is "a small" ,test')
700 >>> u.setconfig(s, b'list1', b'this,is "a small" ,test')
701 >>> u.configlist(s, b'list1')
701 >>> u.configlist(s, b'list1')
702 ['this', 'is', 'a small', 'test']
702 ['this', 'is', 'a small', 'test']
703 >>> u.setconfig(s, b'list2', b'this, is "a small" , test ')
703 >>> u.setconfig(s, b'list2', b'this, is "a small" , test ')
704 >>> u.configlist(s, b'list2')
704 >>> u.configlist(s, b'list2')
705 ['this', 'is', 'a small', 'test']
705 ['this', 'is', 'a small', 'test']
706 """
706 """
707 # default is not always a list
707 # default is not always a list
708 v = self.configwith(config.parselist, section, name, default,
708 v = self.configwith(config.parselist, section, name, default,
709 'list', untrusted)
709 'list', untrusted)
710 if isinstance(v, bytes):
710 if isinstance(v, bytes):
711 return config.parselist(v)
711 return config.parselist(v)
712 elif v is None:
712 elif v is None:
713 return []
713 return []
714 return v
714 return v
715
715
716 def configdate(self, section, name, default=_unset, untrusted=False):
716 def configdate(self, section, name, default=_unset, untrusted=False):
717 """parse a configuration element as a tuple of ints
717 """parse a configuration element as a tuple of ints
718
718
719 >>> u = ui(); s = b'foo'
719 >>> u = ui(); s = b'foo'
720 >>> u.setconfig(s, b'date', b'0 0')
720 >>> u.setconfig(s, b'date', b'0 0')
721 >>> u.configdate(s, b'date')
721 >>> u.configdate(s, b'date')
722 (0, 0)
722 (0, 0)
723 """
723 """
724 if self.config(section, name, default, untrusted):
724 if self.config(section, name, default, untrusted):
725 return self.configwith(dateutil.parsedate, section, name, default,
725 return self.configwith(dateutil.parsedate, section, name, default,
726 'date', untrusted)
726 'date', untrusted)
727 if default is _unset:
727 if default is _unset:
728 return None
728 return None
729 return default
729 return default
730
730
731 def hasconfig(self, section, name, untrusted=False):
731 def hasconfig(self, section, name, untrusted=False):
732 return self._data(untrusted).hasitem(section, name)
732 return self._data(untrusted).hasitem(section, name)
733
733
734 def has_section(self, section, untrusted=False):
734 def has_section(self, section, untrusted=False):
735 '''tell whether section exists in config.'''
735 '''tell whether section exists in config.'''
736 return section in self._data(untrusted)
736 return section in self._data(untrusted)
737
737
738 def configitems(self, section, untrusted=False, ignoresub=False):
738 def configitems(self, section, untrusted=False, ignoresub=False):
739 items = self._data(untrusted).items(section)
739 items = self._data(untrusted).items(section)
740 if ignoresub:
740 if ignoresub:
741 items = [i for i in items if ':' not in i[0]]
741 items = [i for i in items if ':' not in i[0]]
742 if self.debugflag and not untrusted and self._reportuntrusted:
742 if self.debugflag and not untrusted and self._reportuntrusted:
743 for k, v in self._ucfg.items(section):
743 for k, v in self._ucfg.items(section):
744 if self._tcfg.get(section, k) != v:
744 if self._tcfg.get(section, k) != v:
745 self.debug("ignoring untrusted configuration option "
745 self.debug("ignoring untrusted configuration option "
746 "%s.%s = %s\n" % (section, k, v))
746 "%s.%s = %s\n" % (section, k, v))
747 return items
747 return items
748
748
749 def walkconfig(self, untrusted=False):
749 def walkconfig(self, untrusted=False):
750 cfg = self._data(untrusted)
750 cfg = self._data(untrusted)
751 for section in cfg.sections():
751 for section in cfg.sections():
752 for name, value in self.configitems(section, untrusted):
752 for name, value in self.configitems(section, untrusted):
753 yield section, name, value
753 yield section, name, value
754
754
755 def plain(self, feature=None):
755 def plain(self, feature=None):
756 '''is plain mode active?
756 '''is plain mode active?
757
757
758 Plain mode means that all configuration variables which affect
758 Plain mode means that all configuration variables which affect
759 the behavior and output of Mercurial should be
759 the behavior and output of Mercurial should be
760 ignored. Additionally, the output should be stable,
760 ignored. Additionally, the output should be stable,
761 reproducible and suitable for use in scripts or applications.
761 reproducible and suitable for use in scripts or applications.
762
762
763 The only way to trigger plain mode is by setting either the
763 The only way to trigger plain mode is by setting either the
764 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
764 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
765
765
766 The return value can either be
766 The return value can either be
767 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
767 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
768 - False if feature is disabled by default and not included in HGPLAIN
768 - False if feature is disabled by default and not included in HGPLAIN
769 - True otherwise
769 - True otherwise
770 '''
770 '''
771 if ('HGPLAIN' not in encoding.environ and
771 if ('HGPLAIN' not in encoding.environ and
772 'HGPLAINEXCEPT' not in encoding.environ):
772 'HGPLAINEXCEPT' not in encoding.environ):
773 return False
773 return False
774 exceptions = encoding.environ.get('HGPLAINEXCEPT',
774 exceptions = encoding.environ.get('HGPLAINEXCEPT',
775 '').strip().split(',')
775 '').strip().split(',')
776 # TODO: add support for HGPLAIN=+feature,-feature syntax
776 # TODO: add support for HGPLAIN=+feature,-feature syntax
777 if '+strictflags' not in encoding.environ.get('HGPLAIN', '').split(','):
777 if '+strictflags' not in encoding.environ.get('HGPLAIN', '').split(','):
778 exceptions.append('strictflags')
778 exceptions.append('strictflags')
779 if feature and exceptions:
779 if feature and exceptions:
780 return feature not in exceptions
780 return feature not in exceptions
781 return True
781 return True
782
782
783 def username(self, acceptempty=False):
783 def username(self, acceptempty=False):
784 """Return default username to be used in commits.
784 """Return default username to be used in commits.
785
785
786 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
786 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
787 and stop searching if one of these is set.
787 and stop searching if one of these is set.
788 If not found and acceptempty is True, returns None.
788 If not found and acceptempty is True, returns None.
789 If not found and ui.askusername is True, ask the user, else use
789 If not found and ui.askusername is True, ask the user, else use
790 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
790 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
791 If no username could be found, raise an Abort error.
791 If no username could be found, raise an Abort error.
792 """
792 """
793 user = encoding.environ.get("HGUSER")
793 user = encoding.environ.get("HGUSER")
794 if user is None:
794 if user is None:
795 user = self.config("ui", "username")
795 user = self.config("ui", "username")
796 if user is not None:
796 if user is not None:
797 user = os.path.expandvars(user)
797 user = os.path.expandvars(user)
798 if user is None:
798 if user is None:
799 user = encoding.environ.get("EMAIL")
799 user = encoding.environ.get("EMAIL")
800 if user is None and acceptempty:
800 if user is None and acceptempty:
801 return user
801 return user
802 if user is None and self.configbool("ui", "askusername"):
802 if user is None and self.configbool("ui", "askusername"):
803 user = self.prompt(_("enter a commit username:"), default=None)
803 user = self.prompt(_("enter a commit username:"), default=None)
804 if user is None and not self.interactive():
804 if user is None and not self.interactive():
805 try:
805 try:
806 user = '%s@%s' % (procutil.getuser(),
806 user = '%s@%s' % (procutil.getuser(),
807 encoding.strtolocal(socket.getfqdn()))
807 encoding.strtolocal(socket.getfqdn()))
808 self.warn(_("no username found, using '%s' instead\n") % user)
808 self.warn(_("no username found, using '%s' instead\n") % user)
809 except KeyError:
809 except KeyError:
810 pass
810 pass
811 if not user:
811 if not user:
812 raise error.Abort(_('no username supplied'),
812 raise error.Abort(_('no username supplied'),
813 hint=_("use 'hg config --edit' "
813 hint=_("use 'hg config --edit' "
814 'to set your username'))
814 'to set your username'))
815 if "\n" in user:
815 if "\n" in user:
816 raise error.Abort(_("username %r contains a newline\n")
816 raise error.Abort(_("username %r contains a newline\n")
817 % pycompat.bytestr(user))
817 % pycompat.bytestr(user))
818 return user
818 return user
819
819
820 def shortuser(self, user):
820 def shortuser(self, user):
821 """Return a short representation of a user name or email address."""
821 """Return a short representation of a user name or email address."""
822 if not self.verbose:
822 if not self.verbose:
823 user = stringutil.shortuser(user)
823 user = stringutil.shortuser(user)
824 return user
824 return user
825
825
826 def expandpath(self, loc, default=None):
826 def expandpath(self, loc, default=None):
827 """Return repository location relative to cwd or from [paths]"""
827 """Return repository location relative to cwd or from [paths]"""
828 try:
828 try:
829 p = self.paths.getpath(loc)
829 p = self.paths.getpath(loc)
830 if p:
830 if p:
831 return p.rawloc
831 return p.rawloc
832 except error.RepoError:
832 except error.RepoError:
833 pass
833 pass
834
834
835 if default:
835 if default:
836 try:
836 try:
837 p = self.paths.getpath(default)
837 p = self.paths.getpath(default)
838 if p:
838 if p:
839 return p.rawloc
839 return p.rawloc
840 except error.RepoError:
840 except error.RepoError:
841 pass
841 pass
842
842
843 return loc
843 return loc
844
844
845 @util.propertycache
845 @util.propertycache
846 def paths(self):
846 def paths(self):
847 return paths(self)
847 return paths(self)
848
848
849 def pushbuffer(self, error=False, subproc=False, labeled=False):
849 def pushbuffer(self, error=False, subproc=False, labeled=False):
850 """install a buffer to capture standard output of the ui object
850 """install a buffer to capture standard output of the ui object
851
851
852 If error is True, the error output will be captured too.
852 If error is True, the error output will be captured too.
853
853
854 If subproc is True, output from subprocesses (typically hooks) will be
854 If subproc is True, output from subprocesses (typically hooks) will be
855 captured too.
855 captured too.
856
856
857 If labeled is True, any labels associated with buffered
857 If labeled is True, any labels associated with buffered
858 output will be handled. By default, this has no effect
858 output will be handled. By default, this has no effect
859 on the output returned, but extensions and GUI tools may
859 on the output returned, but extensions and GUI tools may
860 handle this argument and returned styled output. If output
860 handle this argument and returned styled output. If output
861 is being buffered so it can be captured and parsed or
861 is being buffered so it can be captured and parsed or
862 processed, labeled should not be set to True.
862 processed, labeled should not be set to True.
863 """
863 """
864 self._buffers.append([])
864 self._buffers.append([])
865 self._bufferstates.append((error, subproc, labeled))
865 self._bufferstates.append((error, subproc, labeled))
866 self._bufferapplylabels = labeled
866 self._bufferapplylabels = labeled
867
867
868 def popbuffer(self):
868 def popbuffer(self):
869 '''pop the last buffer and return the buffered output'''
869 '''pop the last buffer and return the buffered output'''
870 self._bufferstates.pop()
870 self._bufferstates.pop()
871 if self._bufferstates:
871 if self._bufferstates:
872 self._bufferapplylabels = self._bufferstates[-1][2]
872 self._bufferapplylabels = self._bufferstates[-1][2]
873 else:
873 else:
874 self._bufferapplylabels = None
874 self._bufferapplylabels = None
875
875
876 return "".join(self._buffers.pop())
876 return "".join(self._buffers.pop())
877
877
878 def canwritewithoutlabels(self):
878 def canwritewithoutlabels(self):
879 '''check if write skips the label'''
879 '''check if write skips the label'''
880 if self._buffers and not self._bufferapplylabels:
880 if self._buffers and not self._bufferapplylabels:
881 return True
881 return True
882 return self._colormode is None
882 return self._colormode is None
883
883
884 def canbatchlabeledwrites(self):
884 def canbatchlabeledwrites(self):
885 '''check if write calls with labels are batchable'''
885 '''check if write calls with labels are batchable'''
886 # Windows color printing is special, see ``write``.
886 # Windows color printing is special, see ``write``.
887 return self._colormode != 'win32'
887 return self._colormode != 'win32'
888
888
889 def write(self, *args, **opts):
889 def write(self, *args, **opts):
890 '''write args to output
890 '''write args to output
891
891
892 By default, this method simply writes to the buffer or stdout.
892 By default, this method simply writes to the buffer or stdout.
893 Color mode can be set on the UI class to have the output decorated
893 Color mode can be set on the UI class to have the output decorated
894 with color modifier before being written to stdout.
894 with color modifier before being written to stdout.
895
895
896 The color used is controlled by an optional keyword argument, "label".
896 The color used is controlled by an optional keyword argument, "label".
897 This should be a string containing label names separated by space.
897 This should be a string containing label names separated by space.
898 Label names take the form of "topic.type". For example, ui.debug()
898 Label names take the form of "topic.type". For example, ui.debug()
899 issues a label of "ui.debug".
899 issues a label of "ui.debug".
900
900
901 When labeling output for a specific command, a label of
901 When labeling output for a specific command, a label of
902 "cmdname.type" is recommended. For example, status issues
902 "cmdname.type" is recommended. For example, status issues
903 a label of "status.modified" for modified files.
903 a label of "status.modified" for modified files.
904 '''
904 '''
905 if self._buffers:
905 if self._buffers:
906 if self._bufferapplylabels:
906 if self._bufferapplylabels:
907 label = opts.get(r'label', '')
907 label = opts.get(r'label', '')
908 self._buffers[-1].extend(self.label(a, label) for a in args)
908 self._buffers[-1].extend(self.label(a, label) for a in args)
909 else:
909 else:
910 self._buffers[-1].extend(args)
910 self._buffers[-1].extend(args)
911 else:
911 else:
912 self._writenobuf(*args, **opts)
912 self._writenobuf(*args, **opts)
913
913
914 def _writenobuf(self, *args, **opts):
914 def _writenobuf(self, *args, **opts):
915 if self._colormode == 'win32':
915 if self._colormode == 'win32':
916 # windows color printing is its own can of crab, defer to
916 # windows color printing is its own can of crab, defer to
917 # the color module and that is it.
917 # the color module and that is it.
918 color.win32print(self, self._write, *args, **opts)
918 color.win32print(self, self._write, *args, **opts)
919 else:
919 else:
920 msgs = args
920 msgs = args
921 if self._colormode is not None:
921 if self._colormode is not None:
922 label = opts.get(r'label', '')
922 label = opts.get(r'label', '')
923 msgs = [self.label(a, label) for a in args]
923 msgs = [self.label(a, label) for a in args]
924 self._write(*msgs, **opts)
924 self._write(*msgs, **opts)
925
925
926 def _write(self, *msgs, **opts):
926 def _write(self, *msgs, **opts):
927 self._progclear()
927 self._progclear()
928 # opencode timeblockedsection because this is a critical path
928 # opencode timeblockedsection because this is a critical path
929 starttime = util.timer()
929 starttime = util.timer()
930 try:
930 try:
931 self.fout.write(''.join(msgs))
931 self.fout.write(''.join(msgs))
932 except IOError as err:
932 except IOError as err:
933 raise error.StdioError(err)
933 raise error.StdioError(err)
934 finally:
934 finally:
935 self._blockedtimes['stdio_blocked'] += \
935 self._blockedtimes['stdio_blocked'] += \
936 (util.timer() - starttime) * 1000
936 (util.timer() - starttime) * 1000
937
937
938 def write_err(self, *args, **opts):
938 def write_err(self, *args, **opts):
939 self._progclear()
939 self._progclear()
940 if self._bufferstates and self._bufferstates[-1][0]:
940 if self._bufferstates and self._bufferstates[-1][0]:
941 self.write(*args, **opts)
941 self.write(*args, **opts)
942 elif self._colormode == 'win32':
942 elif self._colormode == 'win32':
943 # windows color printing is its own can of crab, defer to
943 # windows color printing is its own can of crab, defer to
944 # the color module and that is it.
944 # the color module and that is it.
945 color.win32print(self, self._write_err, *args, **opts)
945 color.win32print(self, self._write_err, *args, **opts)
946 else:
946 else:
947 msgs = args
947 msgs = args
948 if self._colormode is not None:
948 if self._colormode is not None:
949 label = opts.get(r'label', '')
949 label = opts.get(r'label', '')
950 msgs = [self.label(a, label) for a in args]
950 msgs = [self.label(a, label) for a in args]
951 self._write_err(*msgs, **opts)
951 self._write_err(*msgs, **opts)
952
952
953 def _write_err(self, *msgs, **opts):
953 def _write_err(self, *msgs, **opts):
954 try:
954 try:
955 with self.timeblockedsection('stdio'):
955 with self.timeblockedsection('stdio'):
956 if not getattr(self.fout, 'closed', False):
956 if not getattr(self.fout, 'closed', False):
957 self.fout.flush()
957 self.fout.flush()
958 for a in msgs:
958 for a in msgs:
959 self.ferr.write(a)
959 self.ferr.write(a)
960 # stderr may be buffered under win32 when redirected to files,
960 # stderr may be buffered under win32 when redirected to files,
961 # including stdout.
961 # including stdout.
962 if not getattr(self.ferr, 'closed', False):
962 if not getattr(self.ferr, 'closed', False):
963 self.ferr.flush()
963 self.ferr.flush()
964 except IOError as inst:
964 except IOError as inst:
965 if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
965 if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
966 raise error.StdioError(inst)
966 raise error.StdioError(inst)
967
967
968 def flush(self):
968 def flush(self):
969 # opencode timeblockedsection because this is a critical path
969 # opencode timeblockedsection because this is a critical path
970 starttime = util.timer()
970 starttime = util.timer()
971 try:
971 try:
972 try:
972 try:
973 self.fout.flush()
973 self.fout.flush()
974 except IOError as err:
974 except IOError as err:
975 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
975 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
976 raise error.StdioError(err)
976 raise error.StdioError(err)
977 finally:
977 finally:
978 try:
978 try:
979 self.ferr.flush()
979 self.ferr.flush()
980 except IOError as err:
980 except IOError as err:
981 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
981 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
982 raise error.StdioError(err)
982 raise error.StdioError(err)
983 finally:
983 finally:
984 self._blockedtimes['stdio_blocked'] += \
984 self._blockedtimes['stdio_blocked'] += \
985 (util.timer() - starttime) * 1000
985 (util.timer() - starttime) * 1000
986
986
987 def _isatty(self, fh):
987 def _isatty(self, fh):
988 if self.configbool('ui', 'nontty'):
988 if self.configbool('ui', 'nontty'):
989 return False
989 return False
990 return procutil.isatty(fh)
990 return procutil.isatty(fh)
991
991
992 def disablepager(self):
992 def disablepager(self):
993 self._disablepager = True
993 self._disablepager = True
994
994
995 def pager(self, command):
995 def pager(self, command):
996 """Start a pager for subsequent command output.
996 """Start a pager for subsequent command output.
997
997
998 Commands which produce a long stream of output should call
998 Commands which produce a long stream of output should call
999 this function to activate the user's preferred pagination
999 this function to activate the user's preferred pagination
1000 mechanism (which may be no pager). Calling this function
1000 mechanism (which may be no pager). Calling this function
1001 precludes any future use of interactive functionality, such as
1001 precludes any future use of interactive functionality, such as
1002 prompting the user or activating curses.
1002 prompting the user or activating curses.
1003
1003
1004 Args:
1004 Args:
1005 command: The full, non-aliased name of the command. That is, "log"
1005 command: The full, non-aliased name of the command. That is, "log"
1006 not "history, "summary" not "summ", etc.
1006 not "history, "summary" not "summ", etc.
1007 """
1007 """
1008 if (self._disablepager
1008 if (self._disablepager
1009 or self.pageractive):
1009 or self.pageractive):
1010 # how pager should do is already determined
1010 # how pager should do is already determined
1011 return
1011 return
1012
1012
1013 if not command.startswith('internal-always-') and (
1013 if not command.startswith('internal-always-') and (
1014 # explicit --pager=on (= 'internal-always-' prefix) should
1014 # explicit --pager=on (= 'internal-always-' prefix) should
1015 # take precedence over disabling factors below
1015 # take precedence over disabling factors below
1016 command in self.configlist('pager', 'ignore')
1016 command in self.configlist('pager', 'ignore')
1017 or not self.configbool('ui', 'paginate')
1017 or not self.configbool('ui', 'paginate')
1018 or not self.configbool('pager', 'attend-' + command, True)
1018 or not self.configbool('pager', 'attend-' + command, True)
1019 # TODO: if we want to allow HGPLAINEXCEPT=pager,
1019 # TODO: if we want to allow HGPLAINEXCEPT=pager,
1020 # formatted() will need some adjustment.
1020 # formatted() will need some adjustment.
1021 or not self.formatted()
1021 or not self.formatted()
1022 or self.plain()
1022 or self.plain()
1023 or self._buffers
1023 or self._buffers
1024 # TODO: expose debugger-enabled on the UI object
1024 # TODO: expose debugger-enabled on the UI object
1025 or '--debugger' in pycompat.sysargv):
1025 or '--debugger' in pycompat.sysargv):
1026 # We only want to paginate if the ui appears to be
1026 # We only want to paginate if the ui appears to be
1027 # interactive, the user didn't say HGPLAIN or
1027 # interactive, the user didn't say HGPLAIN or
1028 # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
1028 # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
1029 return
1029 return
1030
1030
1031 pagercmd = self.config('pager', 'pager', rcutil.fallbackpager)
1031 pagercmd = self.config('pager', 'pager', rcutil.fallbackpager)
1032 if not pagercmd:
1032 if not pagercmd:
1033 return
1033 return
1034
1034
1035 pagerenv = {}
1035 pagerenv = {}
1036 for name, value in rcutil.defaultpagerenv().items():
1036 for name, value in rcutil.defaultpagerenv().items():
1037 if name not in encoding.environ:
1037 if name not in encoding.environ:
1038 pagerenv[name] = value
1038 pagerenv[name] = value
1039
1039
1040 self.debug('starting pager for command %r\n' % command)
1040 self.debug('starting pager for command %r\n' % command)
1041 self.flush()
1041 self.flush()
1042
1042
1043 wasformatted = self.formatted()
1043 wasformatted = self.formatted()
1044 if util.safehasattr(signal, "SIGPIPE"):
1044 if util.safehasattr(signal, "SIGPIPE"):
1045 signal.signal(signal.SIGPIPE, _catchterm)
1045 signal.signal(signal.SIGPIPE, _catchterm)
1046 if self._runpager(pagercmd, pagerenv):
1046 if self._runpager(pagercmd, pagerenv):
1047 self.pageractive = True
1047 self.pageractive = True
1048 # Preserve the formatted-ness of the UI. This is important
1048 # Preserve the formatted-ness of the UI. This is important
1049 # because we mess with stdout, which might confuse
1049 # because we mess with stdout, which might confuse
1050 # auto-detection of things being formatted.
1050 # auto-detection of things being formatted.
1051 self.setconfig('ui', 'formatted', wasformatted, 'pager')
1051 self.setconfig('ui', 'formatted', wasformatted, 'pager')
1052 self.setconfig('ui', 'interactive', False, 'pager')
1052 self.setconfig('ui', 'interactive', False, 'pager')
1053
1053
1054 # If pagermode differs from color.mode, reconfigure color now that
1054 # If pagermode differs from color.mode, reconfigure color now that
1055 # pageractive is set.
1055 # pageractive is set.
1056 cm = self._colormode
1056 cm = self._colormode
1057 if cm != self.config('color', 'pagermode', cm):
1057 if cm != self.config('color', 'pagermode', cm):
1058 color.setup(self)
1058 color.setup(self)
1059 else:
1059 else:
1060 # If the pager can't be spawned in dispatch when --pager=on is
1060 # If the pager can't be spawned in dispatch when --pager=on is
1061 # given, don't try again when the command runs, to avoid a duplicate
1061 # given, don't try again when the command runs, to avoid a duplicate
1062 # warning about a missing pager command.
1062 # warning about a missing pager command.
1063 self.disablepager()
1063 self.disablepager()
1064
1064
1065 def _runpager(self, command, env=None):
1065 def _runpager(self, command, env=None):
1066 """Actually start the pager and set up file descriptors.
1066 """Actually start the pager and set up file descriptors.
1067
1067
1068 This is separate in part so that extensions (like chg) can
1068 This is separate in part so that extensions (like chg) can
1069 override how a pager is invoked.
1069 override how a pager is invoked.
1070 """
1070 """
1071 if command == 'cat':
1071 if command == 'cat':
1072 # Save ourselves some work.
1072 # Save ourselves some work.
1073 return False
1073 return False
1074 # If the command doesn't contain any of these characters, we
1074 # If the command doesn't contain any of these characters, we
1075 # assume it's a binary and exec it directly. This means for
1075 # assume it's a binary and exec it directly. This means for
1076 # simple pager command configurations, we can degrade
1076 # simple pager command configurations, we can degrade
1077 # gracefully and tell the user about their broken pager.
1077 # gracefully and tell the user about their broken pager.
1078 shell = any(c in command for c in "|&;<>()$`\\\"' \t\n*?[#~=%")
1078 shell = any(c in command for c in "|&;<>()$`\\\"' \t\n*?[#~=%")
1079
1079
1080 if pycompat.iswindows and not shell:
1080 if pycompat.iswindows and not shell:
1081 # Window's built-in `more` cannot be invoked with shell=False, but
1081 # Window's built-in `more` cannot be invoked with shell=False, but
1082 # its `more.com` can. Hide this implementation detail from the
1082 # its `more.com` can. Hide this implementation detail from the
1083 # user so we can also get sane bad PAGER behavior. MSYS has
1083 # user so we can also get sane bad PAGER behavior. MSYS has
1084 # `more.exe`, so do a cmd.exe style resolution of the executable to
1084 # `more.exe`, so do a cmd.exe style resolution of the executable to
1085 # determine which one to use.
1085 # determine which one to use.
1086 fullcmd = procutil.findexe(command)
1086 fullcmd = procutil.findexe(command)
1087 if not fullcmd:
1087 if not fullcmd:
1088 self.warn(_("missing pager command '%s', skipping pager\n")
1088 self.warn(_("missing pager command '%s', skipping pager\n")
1089 % command)
1089 % command)
1090 return False
1090 return False
1091
1091
1092 command = fullcmd
1092 command = fullcmd
1093
1093
1094 try:
1094 try:
1095 pager = subprocess.Popen(
1095 pager = subprocess.Popen(
1096 command, shell=shell, bufsize=-1,
1096 command, shell=shell, bufsize=-1,
1097 close_fds=procutil.closefds, stdin=subprocess.PIPE,
1097 close_fds=procutil.closefds, stdin=subprocess.PIPE,
1098 stdout=procutil.stdout, stderr=procutil.stderr,
1098 stdout=procutil.stdout, stderr=procutil.stderr,
1099 env=procutil.shellenviron(env))
1099 env=procutil.shellenviron(env))
1100 except OSError as e:
1100 except OSError as e:
1101 if e.errno == errno.ENOENT and not shell:
1101 if e.errno == errno.ENOENT and not shell:
1102 self.warn(_("missing pager command '%s', skipping pager\n")
1102 self.warn(_("missing pager command '%s', skipping pager\n")
1103 % command)
1103 % command)
1104 return False
1104 return False
1105 raise
1105 raise
1106
1106
1107 # back up original file descriptors
1107 # back up original file descriptors
1108 stdoutfd = os.dup(procutil.stdout.fileno())
1108 stdoutfd = os.dup(procutil.stdout.fileno())
1109 stderrfd = os.dup(procutil.stderr.fileno())
1109 stderrfd = os.dup(procutil.stderr.fileno())
1110
1110
1111 os.dup2(pager.stdin.fileno(), procutil.stdout.fileno())
1111 os.dup2(pager.stdin.fileno(), procutil.stdout.fileno())
1112 if self._isatty(procutil.stderr):
1112 if self._isatty(procutil.stderr):
1113 os.dup2(pager.stdin.fileno(), procutil.stderr.fileno())
1113 os.dup2(pager.stdin.fileno(), procutil.stderr.fileno())
1114
1114
1115 @self.atexit
1115 @self.atexit
1116 def killpager():
1116 def killpager():
1117 if util.safehasattr(signal, "SIGINT"):
1117 if util.safehasattr(signal, "SIGINT"):
1118 signal.signal(signal.SIGINT, signal.SIG_IGN)
1118 signal.signal(signal.SIGINT, signal.SIG_IGN)
1119 # restore original fds, closing pager.stdin copies in the process
1119 # restore original fds, closing pager.stdin copies in the process
1120 os.dup2(stdoutfd, procutil.stdout.fileno())
1120 os.dup2(stdoutfd, procutil.stdout.fileno())
1121 os.dup2(stderrfd, procutil.stderr.fileno())
1121 os.dup2(stderrfd, procutil.stderr.fileno())
1122 pager.stdin.close()
1122 pager.stdin.close()
1123 pager.wait()
1123 pager.wait()
1124
1124
1125 return True
1125 return True
1126
1126
1127 @property
1127 @property
1128 def _exithandlers(self):
1128 def _exithandlers(self):
1129 return _reqexithandlers
1129 return _reqexithandlers
1130
1130
1131 def atexit(self, func, *args, **kwargs):
1131 def atexit(self, func, *args, **kwargs):
1132 '''register a function to run after dispatching a request
1132 '''register a function to run after dispatching a request
1133
1133
1134 Handlers do not stay registered across request boundaries.'''
1134 Handlers do not stay registered across request boundaries.'''
1135 self._exithandlers.append((func, args, kwargs))
1135 self._exithandlers.append((func, args, kwargs))
1136 return func
1136 return func
1137
1137
1138 def interface(self, feature):
1138 def interface(self, feature):
1139 """what interface to use for interactive console features?
1139 """what interface to use for interactive console features?
1140
1140
1141 The interface is controlled by the value of `ui.interface` but also by
1141 The interface is controlled by the value of `ui.interface` but also by
1142 the value of feature-specific configuration. For example:
1142 the value of feature-specific configuration. For example:
1143
1143
1144 ui.interface.histedit = text
1144 ui.interface.histedit = text
1145 ui.interface.chunkselector = curses
1145 ui.interface.chunkselector = curses
1146
1146
1147 Here the features are "histedit" and "chunkselector".
1147 Here the features are "histedit" and "chunkselector".
1148
1148
1149 The configuration above means that the default interfaces for commands
1149 The configuration above means that the default interfaces for commands
1150 is curses, the interface for histedit is text and the interface for
1150 is curses, the interface for histedit is text and the interface for
1151 selecting chunk is crecord (the best curses interface available).
1151 selecting chunk is crecord (the best curses interface available).
1152
1152
1153 Consider the following example:
1153 Consider the following example:
1154 ui.interface = curses
1154 ui.interface = curses
1155 ui.interface.histedit = text
1155 ui.interface.histedit = text
1156
1156
1157 Then histedit will use the text interface and chunkselector will use
1157 Then histedit will use the text interface and chunkselector will use
1158 the default curses interface (crecord at the moment).
1158 the default curses interface (crecord at the moment).
1159 """
1159 """
1160 alldefaults = frozenset(["text", "curses"])
1160 alldefaults = frozenset(["text", "curses"])
1161
1161
1162 featureinterfaces = {
1162 featureinterfaces = {
1163 "chunkselector": [
1163 "chunkselector": [
1164 "text",
1164 "text",
1165 "curses",
1165 "curses",
1166 ]
1166 ]
1167 }
1167 }
1168
1168
1169 # Feature-specific interface
1169 # Feature-specific interface
1170 if feature not in featureinterfaces.keys():
1170 if feature not in featureinterfaces.keys():
1171 # Programming error, not user error
1171 # Programming error, not user error
1172 raise ValueError("Unknown feature requested %s" % feature)
1172 raise ValueError("Unknown feature requested %s" % feature)
1173
1173
1174 availableinterfaces = frozenset(featureinterfaces[feature])
1174 availableinterfaces = frozenset(featureinterfaces[feature])
1175 if alldefaults > availableinterfaces:
1175 if alldefaults > availableinterfaces:
1176 # Programming error, not user error. We need a use case to
1176 # Programming error, not user error. We need a use case to
1177 # define the right thing to do here.
1177 # define the right thing to do here.
1178 raise ValueError(
1178 raise ValueError(
1179 "Feature %s does not handle all default interfaces" %
1179 "Feature %s does not handle all default interfaces" %
1180 feature)
1180 feature)
1181
1181
1182 if self.plain():
1182 if self.plain():
1183 return "text"
1183 return "text"
1184
1184
1185 # Default interface for all the features
1185 # Default interface for all the features
1186 defaultinterface = "text"
1186 defaultinterface = "text"
1187 i = self.config("ui", "interface")
1187 i = self.config("ui", "interface")
1188 if i in alldefaults:
1188 if i in alldefaults:
1189 defaultinterface = i
1189 defaultinterface = i
1190
1190
1191 choseninterface = defaultinterface
1191 choseninterface = defaultinterface
1192 f = self.config("ui", "interface.%s" % feature)
1192 f = self.config("ui", "interface.%s" % feature)
1193 if f in availableinterfaces:
1193 if f in availableinterfaces:
1194 choseninterface = f
1194 choseninterface = f
1195
1195
1196 if i is not None and defaultinterface != i:
1196 if i is not None and defaultinterface != i:
1197 if f is not None:
1197 if f is not None:
1198 self.warn(_("invalid value for ui.interface: %s\n") %
1198 self.warn(_("invalid value for ui.interface: %s\n") %
1199 (i,))
1199 (i,))
1200 else:
1200 else:
1201 self.warn(_("invalid value for ui.interface: %s (using %s)\n") %
1201 self.warn(_("invalid value for ui.interface: %s (using %s)\n") %
1202 (i, choseninterface))
1202 (i, choseninterface))
1203 if f is not None and choseninterface != f:
1203 if f is not None and choseninterface != f:
1204 self.warn(_("invalid value for ui.interface.%s: %s (using %s)\n") %
1204 self.warn(_("invalid value for ui.interface.%s: %s (using %s)\n") %
1205 (feature, f, choseninterface))
1205 (feature, f, choseninterface))
1206
1206
1207 return choseninterface
1207 return choseninterface
1208
1208
1209 def interactive(self):
1209 def interactive(self):
1210 '''is interactive input allowed?
1210 '''is interactive input allowed?
1211
1211
1212 An interactive session is a session where input can be reasonably read
1212 An interactive session is a session where input can be reasonably read
1213 from `sys.stdin'. If this function returns false, any attempt to read
1213 from `sys.stdin'. If this function returns false, any attempt to read
1214 from stdin should fail with an error, unless a sensible default has been
1214 from stdin should fail with an error, unless a sensible default has been
1215 specified.
1215 specified.
1216
1216
1217 Interactiveness is triggered by the value of the `ui.interactive'
1217 Interactiveness is triggered by the value of the `ui.interactive'
1218 configuration variable or - if it is unset - when `sys.stdin' points
1218 configuration variable or - if it is unset - when `sys.stdin' points
1219 to a terminal device.
1219 to a terminal device.
1220
1220
1221 This function refers to input only; for output, see `ui.formatted()'.
1221 This function refers to input only; for output, see `ui.formatted()'.
1222 '''
1222 '''
1223 i = self.configbool("ui", "interactive")
1223 i = self.configbool("ui", "interactive")
1224 if i is None:
1224 if i is None:
1225 # some environments replace stdin without implementing isatty
1225 # some environments replace stdin without implementing isatty
1226 # usually those are non-interactive
1226 # usually those are non-interactive
1227 return self._isatty(self.fin)
1227 return self._isatty(self.fin)
1228
1228
1229 return i
1229 return i
1230
1230
1231 def termwidth(self):
1231 def termwidth(self):
1232 '''how wide is the terminal in columns?
1232 '''how wide is the terminal in columns?
1233 '''
1233 '''
1234 if 'COLUMNS' in encoding.environ:
1234 if 'COLUMNS' in encoding.environ:
1235 try:
1235 try:
1236 return int(encoding.environ['COLUMNS'])
1236 return int(encoding.environ['COLUMNS'])
1237 except ValueError:
1237 except ValueError:
1238 pass
1238 pass
1239 return scmutil.termsize(self)[0]
1239 return scmutil.termsize(self)[0]
1240
1240
1241 def formatted(self):
1241 def formatted(self):
1242 '''should formatted output be used?
1242 '''should formatted output be used?
1243
1243
1244 It is often desirable to format the output to suite the output medium.
1244 It is often desirable to format the output to suite the output medium.
1245 Examples of this are truncating long lines or colorizing messages.
1245 Examples of this are truncating long lines or colorizing messages.
1246 However, this is not often not desirable when piping output into other
1246 However, this is not often not desirable when piping output into other
1247 utilities, e.g. `grep'.
1247 utilities, e.g. `grep'.
1248
1248
1249 Formatted output is triggered by the value of the `ui.formatted'
1249 Formatted output is triggered by the value of the `ui.formatted'
1250 configuration variable or - if it is unset - when `sys.stdout' points
1250 configuration variable or - if it is unset - when `sys.stdout' points
1251 to a terminal device. Please note that `ui.formatted' should be
1251 to a terminal device. Please note that `ui.formatted' should be
1252 considered an implementation detail; it is not intended for use outside
1252 considered an implementation detail; it is not intended for use outside
1253 Mercurial or its extensions.
1253 Mercurial or its extensions.
1254
1254
1255 This function refers to output only; for input, see `ui.interactive()'.
1255 This function refers to output only; for input, see `ui.interactive()'.
1256 This function always returns false when in plain mode, see `ui.plain()'.
1256 This function always returns false when in plain mode, see `ui.plain()'.
1257 '''
1257 '''
1258 if self.plain():
1258 if self.plain():
1259 return False
1259 return False
1260
1260
1261 i = self.configbool("ui", "formatted")
1261 i = self.configbool("ui", "formatted")
1262 if i is None:
1262 if i is None:
1263 # some environments replace stdout without implementing isatty
1263 # some environments replace stdout without implementing isatty
1264 # usually those are non-interactive
1264 # usually those are non-interactive
1265 return self._isatty(self.fout)
1265 return self._isatty(self.fout)
1266
1266
1267 return i
1267 return i
1268
1268
1269 def _readline(self):
1269 def _readline(self):
1270 # Replacing stdin/stdout temporarily is a hard problem on Python 3
1270 # Replacing stdin/stdout temporarily is a hard problem on Python 3
1271 # because they have to be text streams with *no buffering*. Instead,
1271 # because they have to be text streams with *no buffering*. Instead,
1272 # we use rawinput() only if call_readline() will be invoked by
1272 # we use rawinput() only if call_readline() will be invoked by
1273 # PyOS_Readline(), so no I/O will be made at Python layer.
1273 # PyOS_Readline(), so no I/O will be made at Python layer.
1274 usereadline = (self._isatty(self.fin) and self._isatty(self.fout)
1274 usereadline = (self._isatty(self.fin) and self._isatty(self.fout)
1275 and procutil.isstdin(self.fin)
1275 and procutil.isstdin(self.fin)
1276 and procutil.isstdout(self.fout))
1276 and procutil.isstdout(self.fout))
1277 if usereadline:
1277 if usereadline:
1278 try:
1278 try:
1279 # magically add command line editing support, where
1279 # magically add command line editing support, where
1280 # available
1280 # available
1281 import readline
1281 import readline
1282 # force demandimport to really load the module
1282 # force demandimport to really load the module
1283 readline.read_history_file
1283 readline.read_history_file
1284 # windows sometimes raises something other than ImportError
1284 # windows sometimes raises something other than ImportError
1285 except Exception:
1285 except Exception:
1286 usereadline = False
1286 usereadline = False
1287
1287
1288 # prompt ' ' must exist; otherwise readline may delete entire line
1288 # prompt ' ' must exist; otherwise readline may delete entire line
1289 # - http://bugs.python.org/issue12833
1289 # - http://bugs.python.org/issue12833
1290 with self.timeblockedsection('stdio'):
1290 with self.timeblockedsection('stdio'):
1291 if usereadline:
1291 if usereadline:
1292 line = encoding.strtolocal(pycompat.rawinput(r' '))
1292 line = encoding.strtolocal(pycompat.rawinput(r' '))
1293 # When stdin is in binary mode on Windows, it can cause
1293 # When stdin is in binary mode on Windows, it can cause
1294 # raw_input() to emit an extra trailing carriage return
1294 # raw_input() to emit an extra trailing carriage return
1295 if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
1295 if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
1296 line = line[:-1]
1296 line = line[:-1]
1297 else:
1297 else:
1298 self.fout.write(b' ')
1298 self.fout.write(b' ')
1299 self.fout.flush()
1299 self.fout.flush()
1300 line = self.fin.readline()
1300 line = self.fin.readline()
1301 if not line:
1301 if not line:
1302 raise EOFError
1302 raise EOFError
1303 line = line.rstrip(pycompat.oslinesep)
1303 line = line.rstrip(pycompat.oslinesep)
1304
1304
1305 return line
1305 return line
1306
1306
1307 def prompt(self, msg, default="y"):
1307 def prompt(self, msg, default="y"):
1308 """Prompt user with msg, read response.
1308 """Prompt user with msg, read response.
1309 If ui is not interactive, the default is returned.
1309 If ui is not interactive, the default is returned.
1310 """
1310 """
1311 if not self.interactive():
1311 if not self.interactive():
1312 self.write(msg, ' ', default or '', "\n")
1312 self.write(msg, ' ', default or '', "\n")
1313 return default
1313 return default
1314 self._writenobuf(msg, label='ui.prompt')
1314 self._writenobuf(msg, label='ui.prompt')
1315 self.flush()
1315 self.flush()
1316 try:
1316 try:
1317 r = self._readline()
1317 r = self._readline()
1318 if not r:
1318 if not r:
1319 r = default
1319 r = default
1320 if self.configbool('ui', 'promptecho'):
1320 if self.configbool('ui', 'promptecho'):
1321 self.write(r, "\n")
1321 self.write(r, "\n")
1322 return r
1322 return r
1323 except EOFError:
1323 except EOFError:
1324 raise error.ResponseExpected()
1324 raise error.ResponseExpected()
1325
1325
1326 @staticmethod
1326 @staticmethod
1327 def extractchoices(prompt):
1327 def extractchoices(prompt):
1328 """Extract prompt message and list of choices from specified prompt.
1328 """Extract prompt message and list of choices from specified prompt.
1329
1329
1330 This returns tuple "(message, choices)", and "choices" is the
1330 This returns tuple "(message, choices)", and "choices" is the
1331 list of tuple "(response character, text without &)".
1331 list of tuple "(response character, text without &)".
1332
1332
1333 >>> ui.extractchoices(b"awake? $$ &Yes $$ &No")
1333 >>> ui.extractchoices(b"awake? $$ &Yes $$ &No")
1334 ('awake? ', [('y', 'Yes'), ('n', 'No')])
1334 ('awake? ', [('y', 'Yes'), ('n', 'No')])
1335 >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No")
1335 >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No")
1336 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
1336 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
1337 >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o")
1337 >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o")
1338 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
1338 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
1339 """
1339 """
1340
1340
1341 # Sadly, the prompt string may have been built with a filename
1341 # Sadly, the prompt string may have been built with a filename
1342 # containing "$$" so let's try to find the first valid-looking
1342 # containing "$$" so let's try to find the first valid-looking
1343 # prompt to start parsing. Sadly, we also can't rely on
1343 # prompt to start parsing. Sadly, we also can't rely on
1344 # choices containing spaces, ASCII, or basically anything
1344 # choices containing spaces, ASCII, or basically anything
1345 # except an ampersand followed by a character.
1345 # except an ampersand followed by a character.
1346 m = re.match(br'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
1346 m = re.match(br'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
1347 msg = m.group(1)
1347 msg = m.group(1)
1348 choices = [p.strip(' ') for p in m.group(2).split('$$')]
1348 choices = [p.strip(' ') for p in m.group(2).split('$$')]
1349 def choicetuple(s):
1349 def choicetuple(s):
1350 ampidx = s.index('&')
1350 ampidx = s.index('&')
1351 return s[ampidx + 1:ampidx + 2].lower(), s.replace('&', '', 1)
1351 return s[ampidx + 1:ampidx + 2].lower(), s.replace('&', '', 1)
1352 return (msg, [choicetuple(s) for s in choices])
1352 return (msg, [choicetuple(s) for s in choices])
1353
1353
1354 def promptchoice(self, prompt, default=0):
1354 def promptchoice(self, prompt, default=0):
1355 """Prompt user with a message, read response, and ensure it matches
1355 """Prompt user with a message, read response, and ensure it matches
1356 one of the provided choices. The prompt is formatted as follows:
1356 one of the provided choices. The prompt is formatted as follows:
1357
1357
1358 "would you like fries with that (Yn)? $$ &Yes $$ &No"
1358 "would you like fries with that (Yn)? $$ &Yes $$ &No"
1359
1359
1360 The index of the choice is returned. Responses are case
1360 The index of the choice is returned. Responses are case
1361 insensitive. If ui is not interactive, the default is
1361 insensitive. If ui is not interactive, the default is
1362 returned.
1362 returned.
1363 """
1363 """
1364
1364
1365 msg, choices = self.extractchoices(prompt)
1365 msg, choices = self.extractchoices(prompt)
1366 resps = [r for r, t in choices]
1366 resps = [r for r, t in choices]
1367 while True:
1367 while True:
1368 r = self.prompt(msg, resps[default])
1368 r = self.prompt(msg, resps[default])
1369 if r.lower() in resps:
1369 if r.lower() in resps:
1370 return resps.index(r.lower())
1370 return resps.index(r.lower())
1371 self.write(_("unrecognized response\n"))
1371 self.write(_("unrecognized response\n"))
1372
1372
1373 def getpass(self, prompt=None, default=None):
1373 def getpass(self, prompt=None, default=None):
1374 if not self.interactive():
1374 if not self.interactive():
1375 return default
1375 return default
1376 try:
1376 try:
1377 self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
1377 self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
1378 # disable getpass() only if explicitly specified. it's still valid
1378 # disable getpass() only if explicitly specified. it's still valid
1379 # to interact with tty even if fin is not a tty.
1379 # to interact with tty even if fin is not a tty.
1380 with self.timeblockedsection('stdio'):
1380 with self.timeblockedsection('stdio'):
1381 if self.configbool('ui', 'nontty'):
1381 if self.configbool('ui', 'nontty'):
1382 l = self.fin.readline()
1382 l = self.fin.readline()
1383 if not l:
1383 if not l:
1384 raise EOFError
1384 raise EOFError
1385 return l.rstrip('\n')
1385 return l.rstrip('\n')
1386 else:
1386 else:
1387 return getpass.getpass('')
1387 return getpass.getpass('')
1388 except EOFError:
1388 except EOFError:
1389 raise error.ResponseExpected()
1389 raise error.ResponseExpected()
1390 def status(self, *msg, **opts):
1390 def status(self, *msg, **opts):
1391 '''write status message to output (if ui.quiet is False)
1391 '''write status message to output (if ui.quiet is False)
1392
1392
1393 This adds an output label of "ui.status".
1393 This adds an output label of "ui.status".
1394 '''
1394 '''
1395 if not self.quiet:
1395 if not self.quiet:
1396 opts[r'label'] = opts.get(r'label', '') + ' ui.status'
1396 opts[r'label'] = opts.get(r'label', '') + ' ui.status'
1397 self.write(*msg, **opts)
1397 self.write(*msg, **opts)
1398 def warn(self, *msg, **opts):
1398 def warn(self, *msg, **opts):
1399 '''write warning message to output (stderr)
1399 '''write warning message to output (stderr)
1400
1400
1401 This adds an output label of "ui.warning".
1401 This adds an output label of "ui.warning".
1402 '''
1402 '''
1403 opts[r'label'] = opts.get(r'label', '') + ' ui.warning'
1403 opts[r'label'] = opts.get(r'label', '') + ' ui.warning'
1404 self.write_err(*msg, **opts)
1404 self.write_err(*msg, **opts)
1405 def note(self, *msg, **opts):
1405 def note(self, *msg, **opts):
1406 '''write note to output (if ui.verbose is True)
1406 '''write note to output (if ui.verbose is True)
1407
1407
1408 This adds an output label of "ui.note".
1408 This adds an output label of "ui.note".
1409 '''
1409 '''
1410 if self.verbose:
1410 if self.verbose:
1411 opts[r'label'] = opts.get(r'label', '') + ' ui.note'
1411 opts[r'label'] = opts.get(r'label', '') + ' ui.note'
1412 self.write(*msg, **opts)
1412 self.write(*msg, **opts)
1413 def debug(self, *msg, **opts):
1413 def debug(self, *msg, **opts):
1414 '''write debug message to output (if ui.debugflag is True)
1414 '''write debug message to output (if ui.debugflag is True)
1415
1415
1416 This adds an output label of "ui.debug".
1416 This adds an output label of "ui.debug".
1417 '''
1417 '''
1418 if self.debugflag:
1418 if self.debugflag:
1419 opts[r'label'] = opts.get(r'label', '') + ' ui.debug'
1419 opts[r'label'] = opts.get(r'label', '') + ' ui.debug'
1420 self.write(*msg, **opts)
1420 self.write(*msg, **opts)
1421
1421
1422 def edit(self, text, user, extra=None, editform=None, pending=None,
1422 def edit(self, text, user, extra=None, editform=None, pending=None,
1423 repopath=None, action=None):
1423 repopath=None, action=None):
1424 if action is None:
1424 if action is None:
1425 self.develwarn('action is None but will soon be a required '
1425 self.develwarn('action is None but will soon be a required '
1426 'parameter to ui.edit()')
1426 'parameter to ui.edit()')
1427 extra_defaults = {
1427 extra_defaults = {
1428 'prefix': 'editor',
1428 'prefix': 'editor',
1429 'suffix': '.txt',
1429 'suffix': '.txt',
1430 }
1430 }
1431 if extra is not None:
1431 if extra is not None:
1432 if extra.get('suffix') is not None:
1432 if extra.get('suffix') is not None:
1433 self.develwarn('extra.suffix is not None but will soon be '
1433 self.develwarn('extra.suffix is not None but will soon be '
1434 'ignored by ui.edit()')
1434 'ignored by ui.edit()')
1435 extra_defaults.update(extra)
1435 extra_defaults.update(extra)
1436 extra = extra_defaults
1436 extra = extra_defaults
1437
1437
1438 if action == 'diff':
1438 if action == 'diff':
1439 suffix = '.diff'
1439 suffix = '.diff'
1440 elif action:
1440 elif action:
1441 suffix = '.%s.hg.txt' % action
1441 suffix = '.%s.hg.txt' % action
1442 else:
1442 else:
1443 suffix = extra['suffix']
1443 suffix = extra['suffix']
1444
1444
1445 rdir = None
1445 rdir = None
1446 if self.configbool('experimental', 'editortmpinhg'):
1446 if self.configbool('experimental', 'editortmpinhg'):
1447 rdir = repopath
1447 rdir = repopath
1448 (fd, name) = pycompat.mkstemp(prefix='hg-' + extra['prefix'] + '-',
1448 (fd, name) = pycompat.mkstemp(prefix='hg-' + extra['prefix'] + '-',
1449 suffix=suffix,
1449 suffix=suffix,
1450 dir=rdir)
1450 dir=rdir)
1451 try:
1451 try:
1452 f = os.fdopen(fd, r'wb')
1452 f = os.fdopen(fd, r'wb')
1453 f.write(util.tonativeeol(text))
1453 f.write(util.tonativeeol(text))
1454 f.close()
1454 f.close()
1455
1455
1456 environ = {'HGUSER': user}
1456 environ = {'HGUSER': user}
1457 if 'transplant_source' in extra:
1457 if 'transplant_source' in extra:
1458 environ.update({'HGREVISION': hex(extra['transplant_source'])})
1458 environ.update({'HGREVISION': hex(extra['transplant_source'])})
1459 for label in ('intermediate-source', 'source', 'rebase_source'):
1459 for label in ('intermediate-source', 'source', 'rebase_source'):
1460 if label in extra:
1460 if label in extra:
1461 environ.update({'HGREVISION': extra[label]})
1461 environ.update({'HGREVISION': extra[label]})
1462 break
1462 break
1463 if editform:
1463 if editform:
1464 environ.update({'HGEDITFORM': editform})
1464 environ.update({'HGEDITFORM': editform})
1465 if pending:
1465 if pending:
1466 environ.update({'HG_PENDING': pending})
1466 environ.update({'HG_PENDING': pending})
1467
1467
1468 editor = self.geteditor()
1468 editor = self.geteditor()
1469
1469
1470 self.system("%s \"%s\"" % (editor, name),
1470 self.system("%s \"%s\"" % (editor, name),
1471 environ=environ,
1471 environ=environ,
1472 onerr=error.Abort, errprefix=_("edit failed"),
1472 onerr=error.Abort, errprefix=_("edit failed"),
1473 blockedtag='editor')
1473 blockedtag='editor')
1474
1474
1475 f = open(name, r'rb')
1475 f = open(name, r'rb')
1476 t = util.fromnativeeol(f.read())
1476 t = util.fromnativeeol(f.read())
1477 f.close()
1477 f.close()
1478 finally:
1478 finally:
1479 os.unlink(name)
1479 os.unlink(name)
1480
1480
1481 return t
1481 return t
1482
1482
1483 def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None,
1483 def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None,
1484 blockedtag=None):
1484 blockedtag=None):
1485 '''execute shell command with appropriate output stream. command
1485 '''execute shell command with appropriate output stream. command
1486 output will be redirected if fout is not stdout.
1486 output will be redirected if fout is not stdout.
1487
1487
1488 if command fails and onerr is None, return status, else raise onerr
1488 if command fails and onerr is None, return status, else raise onerr
1489 object as exception.
1489 object as exception.
1490 '''
1490 '''
1491 if blockedtag is None:
1491 if blockedtag is None:
1492 # Long cmds tend to be because of an absolute path on cmd. Keep
1492 # Long cmds tend to be because of an absolute path on cmd. Keep
1493 # the tail end instead
1493 # the tail end instead
1494 cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
1494 cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
1495 blockedtag = 'unknown_system_' + cmdsuffix
1495 blockedtag = 'unknown_system_' + cmdsuffix
1496 out = self.fout
1496 out = self.fout
1497 if any(s[1] for s in self._bufferstates):
1497 if any(s[1] for s in self._bufferstates):
1498 out = self
1498 out = self
1499 with self.timeblockedsection(blockedtag):
1499 with self.timeblockedsection(blockedtag):
1500 rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
1500 rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
1501 if rc and onerr:
1501 if rc and onerr:
1502 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
1502 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
1503 procutil.explainexit(rc))
1503 procutil.explainexit(rc))
1504 if errprefix:
1504 if errprefix:
1505 errmsg = '%s: %s' % (errprefix, errmsg)
1505 errmsg = '%s: %s' % (errprefix, errmsg)
1506 raise onerr(errmsg)
1506 raise onerr(errmsg)
1507 return rc
1507 return rc
1508
1508
1509 def _runsystem(self, cmd, environ, cwd, out):
1509 def _runsystem(self, cmd, environ, cwd, out):
1510 """actually execute the given shell command (can be overridden by
1510 """actually execute the given shell command (can be overridden by
1511 extensions like chg)"""
1511 extensions like chg)"""
1512 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
1512 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
1513
1513
1514 def traceback(self, exc=None, force=False):
1514 def traceback(self, exc=None, force=False):
1515 '''print exception traceback if traceback printing enabled or forced.
1515 '''print exception traceback if traceback printing enabled or forced.
1516 only to call in exception handler. returns true if traceback
1516 only to call in exception handler. returns true if traceback
1517 printed.'''
1517 printed.'''
1518 if self.tracebackflag or force:
1518 if self.tracebackflag or force:
1519 if exc is None:
1519 if exc is None:
1520 exc = sys.exc_info()
1520 exc = sys.exc_info()
1521 cause = getattr(exc[1], 'cause', None)
1521 cause = getattr(exc[1], 'cause', None)
1522
1522
1523 if cause is not None:
1523 if cause is not None:
1524 causetb = traceback.format_tb(cause[2])
1524 causetb = traceback.format_tb(cause[2])
1525 exctb = traceback.format_tb(exc[2])
1525 exctb = traceback.format_tb(exc[2])
1526 exconly = traceback.format_exception_only(cause[0], cause[1])
1526 exconly = traceback.format_exception_only(cause[0], cause[1])
1527
1527
1528 # exclude frame where 'exc' was chained and rethrown from exctb
1528 # exclude frame where 'exc' was chained and rethrown from exctb
1529 self.write_err('Traceback (most recent call last):\n',
1529 self.write_err('Traceback (most recent call last):\n',
1530 ''.join(exctb[:-1]),
1530 ''.join(exctb[:-1]),
1531 ''.join(causetb),
1531 ''.join(causetb),
1532 ''.join(exconly))
1532 ''.join(exconly))
1533 else:
1533 else:
1534 output = traceback.format_exception(exc[0], exc[1], exc[2])
1534 output = traceback.format_exception(exc[0], exc[1], exc[2])
1535 self.write_err(encoding.strtolocal(r''.join(output)))
1535 self.write_err(encoding.strtolocal(r''.join(output)))
1536 return self.tracebackflag or force
1536 return self.tracebackflag or force
1537
1537
1538 def geteditor(self):
1538 def geteditor(self):
1539 '''return editor to use'''
1539 '''return editor to use'''
1540 if pycompat.sysplatform == 'plan9':
1540 if pycompat.sysplatform == 'plan9':
1541 # vi is the MIPS instruction simulator on Plan 9. We
1541 # vi is the MIPS instruction simulator on Plan 9. We
1542 # instead default to E to plumb commit messages to
1542 # instead default to E to plumb commit messages to
1543 # avoid confusion.
1543 # avoid confusion.
1544 editor = 'E'
1544 editor = 'E'
1545 else:
1545 else:
1546 editor = 'vi'
1546 editor = 'vi'
1547 return (encoding.environ.get("HGEDITOR") or
1547 return (encoding.environ.get("HGEDITOR") or
1548 self.config("ui", "editor", editor))
1548 self.config("ui", "editor", editor))
1549
1549
1550 @util.propertycache
1550 @util.propertycache
1551 def _progbar(self):
1551 def _progbar(self):
1552 """setup the progbar singleton to the ui object"""
1552 """setup the progbar singleton to the ui object"""
1553 if (self.quiet or self.debugflag
1553 if (self.quiet or self.debugflag
1554 or self.configbool('progress', 'disable')
1554 or self.configbool('progress', 'disable')
1555 or not progress.shouldprint(self)):
1555 or not progress.shouldprint(self)):
1556 return None
1556 return None
1557 return getprogbar(self)
1557 return getprogbar(self)
1558
1558
1559 def _progclear(self):
1559 def _progclear(self):
1560 """clear progress bar output if any. use it before any output"""
1560 """clear progress bar output if any. use it before any output"""
1561 if not haveprogbar(): # nothing loaded yet
1561 if not haveprogbar(): # nothing loaded yet
1562 return
1562 return
1563 if self._progbar is not None and self._progbar.printed:
1563 if self._progbar is not None and self._progbar.printed:
1564 self._progbar.clear()
1564 self._progbar.clear()
1565
1565
1566 def progress(self, topic, pos, item="", unit="", total=None):
1566 def progress(self, topic, pos, item="", unit="", total=None):
1567 '''show a progress message
1567 '''show a progress message
1568
1568
1569 By default a textual progress bar will be displayed if an operation
1569 By default a textual progress bar will be displayed if an operation
1570 takes too long. 'topic' is the current operation, 'item' is a
1570 takes too long. 'topic' is the current operation, 'item' is a
1571 non-numeric marker of the current position (i.e. the currently
1571 non-numeric marker of the current position (i.e. the currently
1572 in-process file), 'pos' is the current numeric position (i.e.
1572 in-process file), 'pos' is the current numeric position (i.e.
1573 revision, bytes, etc.), unit is a corresponding unit label,
1573 revision, bytes, etc.), unit is a corresponding unit label,
1574 and total is the highest expected pos.
1574 and total is the highest expected pos.
1575
1575
1576 Multiple nested topics may be active at a time.
1576 Multiple nested topics may be active at a time.
1577
1577
1578 All topics should be marked closed by setting pos to None at
1578 All topics should be marked closed by setting pos to None at
1579 termination.
1579 termination.
1580 '''
1580 '''
1581 if self._progbar is not None:
1581 if self._progbar is not None:
1582 self._progbar.progress(topic, pos, item=item, unit=unit,
1582 self._progbar.progress(topic, pos, item=item, unit=unit,
1583 total=total)
1583 total=total)
1584 if pos is None or not self.configbool('progress', 'debug'):
1584 if pos is None or not self.configbool('progress', 'debug'):
1585 return
1585 return
1586
1586
1587 if unit:
1587 if unit:
1588 unit = ' ' + unit
1588 unit = ' ' + unit
1589 if item:
1589 if item:
1590 item = ' ' + item
1590 item = ' ' + item
1591
1591
1592 if total:
1592 if total:
1593 pct = 100.0 * pos / total
1593 pct = 100.0 * pos / total
1594 self.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1594 self.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1595 % (topic, item, pos, total, unit, pct))
1595 % (topic, item, pos, total, unit, pct))
1596 else:
1596 else:
1597 self.debug('%s:%s %d%s\n' % (topic, item, pos, unit))
1597 self.debug('%s:%s %d%s\n' % (topic, item, pos, unit))
1598
1598
1599 def makeprogress(self, topic, unit="", total=None):
1600 '''exists only so low-level modules won't need to import scmutil'''
1601 return scmutil.progress(self, topic, unit, total)
1602
1599 def log(self, service, *msg, **opts):
1603 def log(self, service, *msg, **opts):
1600 '''hook for logging facility extensions
1604 '''hook for logging facility extensions
1601
1605
1602 service should be a readily-identifiable subsystem, which will
1606 service should be a readily-identifiable subsystem, which will
1603 allow filtering.
1607 allow filtering.
1604
1608
1605 *msg should be a newline-terminated format string to log, and
1609 *msg should be a newline-terminated format string to log, and
1606 then any values to %-format into that format string.
1610 then any values to %-format into that format string.
1607
1611
1608 **opts currently has no defined meanings.
1612 **opts currently has no defined meanings.
1609 '''
1613 '''
1610
1614
1611 def label(self, msg, label):
1615 def label(self, msg, label):
1612 '''style msg based on supplied label
1616 '''style msg based on supplied label
1613
1617
1614 If some color mode is enabled, this will add the necessary control
1618 If some color mode is enabled, this will add the necessary control
1615 characters to apply such color. In addition, 'debug' color mode adds
1619 characters to apply such color. In addition, 'debug' color mode adds
1616 markup showing which label affects a piece of text.
1620 markup showing which label affects a piece of text.
1617
1621
1618 ui.write(s, 'label') is equivalent to
1622 ui.write(s, 'label') is equivalent to
1619 ui.write(ui.label(s, 'label')).
1623 ui.write(ui.label(s, 'label')).
1620 '''
1624 '''
1621 if self._colormode is not None:
1625 if self._colormode is not None:
1622 return color.colorlabel(self, msg, label)
1626 return color.colorlabel(self, msg, label)
1623 return msg
1627 return msg
1624
1628
1625 def develwarn(self, msg, stacklevel=1, config=None):
1629 def develwarn(self, msg, stacklevel=1, config=None):
1626 """issue a developer warning message
1630 """issue a developer warning message
1627
1631
1628 Use 'stacklevel' to report the offender some layers further up in the
1632 Use 'stacklevel' to report the offender some layers further up in the
1629 stack.
1633 stack.
1630 """
1634 """
1631 if not self.configbool('devel', 'all-warnings'):
1635 if not self.configbool('devel', 'all-warnings'):
1632 if config is None or not self.configbool('devel', config):
1636 if config is None or not self.configbool('devel', config):
1633 return
1637 return
1634 msg = 'devel-warn: ' + msg
1638 msg = 'devel-warn: ' + msg
1635 stacklevel += 1 # get in develwarn
1639 stacklevel += 1 # get in develwarn
1636 if self.tracebackflag:
1640 if self.tracebackflag:
1637 util.debugstacktrace(msg, stacklevel, self.ferr, self.fout)
1641 util.debugstacktrace(msg, stacklevel, self.ferr, self.fout)
1638 self.log('develwarn', '%s at:\n%s' %
1642 self.log('develwarn', '%s at:\n%s' %
1639 (msg, ''.join(util.getstackframes(stacklevel))))
1643 (msg, ''.join(util.getstackframes(stacklevel))))
1640 else:
1644 else:
1641 curframe = inspect.currentframe()
1645 curframe = inspect.currentframe()
1642 calframe = inspect.getouterframes(curframe, 2)
1646 calframe = inspect.getouterframes(curframe, 2)
1643 fname, lineno, fmsg = calframe[stacklevel][1:4]
1647 fname, lineno, fmsg = calframe[stacklevel][1:4]
1644 fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
1648 fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
1645 self.write_err('%s at: %s:%d (%s)\n'
1649 self.write_err('%s at: %s:%d (%s)\n'
1646 % (msg, fname, lineno, fmsg))
1650 % (msg, fname, lineno, fmsg))
1647 self.log('develwarn', '%s at: %s:%d (%s)\n',
1651 self.log('develwarn', '%s at: %s:%d (%s)\n',
1648 msg, fname, lineno, fmsg)
1652 msg, fname, lineno, fmsg)
1649 curframe = calframe = None # avoid cycles
1653 curframe = calframe = None # avoid cycles
1650
1654
1651 def deprecwarn(self, msg, version, stacklevel=2):
1655 def deprecwarn(self, msg, version, stacklevel=2):
1652 """issue a deprecation warning
1656 """issue a deprecation warning
1653
1657
1654 - msg: message explaining what is deprecated and how to upgrade,
1658 - msg: message explaining what is deprecated and how to upgrade,
1655 - version: last version where the API will be supported,
1659 - version: last version where the API will be supported,
1656 """
1660 """
1657 if not (self.configbool('devel', 'all-warnings')
1661 if not (self.configbool('devel', 'all-warnings')
1658 or self.configbool('devel', 'deprec-warn')):
1662 or self.configbool('devel', 'deprec-warn')):
1659 return
1663 return
1660 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
1664 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
1661 " update your code.)") % version
1665 " update your code.)") % version
1662 self.develwarn(msg, stacklevel=stacklevel, config='deprec-warn')
1666 self.develwarn(msg, stacklevel=stacklevel, config='deprec-warn')
1663
1667
1664 def exportableenviron(self):
1668 def exportableenviron(self):
1665 """The environment variables that are safe to export, e.g. through
1669 """The environment variables that are safe to export, e.g. through
1666 hgweb.
1670 hgweb.
1667 """
1671 """
1668 return self._exportableenviron
1672 return self._exportableenviron
1669
1673
1670 @contextlib.contextmanager
1674 @contextlib.contextmanager
1671 def configoverride(self, overrides, source=""):
1675 def configoverride(self, overrides, source=""):
1672 """Context manager for temporary config overrides
1676 """Context manager for temporary config overrides
1673 `overrides` must be a dict of the following structure:
1677 `overrides` must be a dict of the following structure:
1674 {(section, name) : value}"""
1678 {(section, name) : value}"""
1675 backups = {}
1679 backups = {}
1676 try:
1680 try:
1677 for (section, name), value in overrides.items():
1681 for (section, name), value in overrides.items():
1678 backups[(section, name)] = self.backupconfig(section, name)
1682 backups[(section, name)] = self.backupconfig(section, name)
1679 self.setconfig(section, name, value, source)
1683 self.setconfig(section, name, value, source)
1680 yield
1684 yield
1681 finally:
1685 finally:
1682 for __, backup in backups.items():
1686 for __, backup in backups.items():
1683 self.restoreconfig(backup)
1687 self.restoreconfig(backup)
1684 # just restoring ui.quiet config to the previous value is not enough
1688 # just restoring ui.quiet config to the previous value is not enough
1685 # as it does not update ui.quiet class member
1689 # as it does not update ui.quiet class member
1686 if ('ui', 'quiet') in overrides:
1690 if ('ui', 'quiet') in overrides:
1687 self.fixconfig(section='ui')
1691 self.fixconfig(section='ui')
1688
1692
1689 class paths(dict):
1693 class paths(dict):
1690 """Represents a collection of paths and their configs.
1694 """Represents a collection of paths and their configs.
1691
1695
1692 Data is initially derived from ui instances and the config files they have
1696 Data is initially derived from ui instances and the config files they have
1693 loaded.
1697 loaded.
1694 """
1698 """
1695 def __init__(self, ui):
1699 def __init__(self, ui):
1696 dict.__init__(self)
1700 dict.__init__(self)
1697
1701
1698 for name, loc in ui.configitems('paths', ignoresub=True):
1702 for name, loc in ui.configitems('paths', ignoresub=True):
1699 # No location is the same as not existing.
1703 # No location is the same as not existing.
1700 if not loc:
1704 if not loc:
1701 continue
1705 continue
1702 loc, sub = ui.configsuboptions('paths', name)
1706 loc, sub = ui.configsuboptions('paths', name)
1703 self[name] = path(ui, name, rawloc=loc, suboptions=sub)
1707 self[name] = path(ui, name, rawloc=loc, suboptions=sub)
1704
1708
1705 def getpath(self, name, default=None):
1709 def getpath(self, name, default=None):
1706 """Return a ``path`` from a string, falling back to default.
1710 """Return a ``path`` from a string, falling back to default.
1707
1711
1708 ``name`` can be a named path or locations. Locations are filesystem
1712 ``name`` can be a named path or locations. Locations are filesystem
1709 paths or URIs.
1713 paths or URIs.
1710
1714
1711 Returns None if ``name`` is not a registered path, a URI, or a local
1715 Returns None if ``name`` is not a registered path, a URI, or a local
1712 path to a repo.
1716 path to a repo.
1713 """
1717 """
1714 # Only fall back to default if no path was requested.
1718 # Only fall back to default if no path was requested.
1715 if name is None:
1719 if name is None:
1716 if not default:
1720 if not default:
1717 default = ()
1721 default = ()
1718 elif not isinstance(default, (tuple, list)):
1722 elif not isinstance(default, (tuple, list)):
1719 default = (default,)
1723 default = (default,)
1720 for k in default:
1724 for k in default:
1721 try:
1725 try:
1722 return self[k]
1726 return self[k]
1723 except KeyError:
1727 except KeyError:
1724 continue
1728 continue
1725 return None
1729 return None
1726
1730
1727 # Most likely empty string.
1731 # Most likely empty string.
1728 # This may need to raise in the future.
1732 # This may need to raise in the future.
1729 if not name:
1733 if not name:
1730 return None
1734 return None
1731
1735
1732 try:
1736 try:
1733 return self[name]
1737 return self[name]
1734 except KeyError:
1738 except KeyError:
1735 # Try to resolve as a local path or URI.
1739 # Try to resolve as a local path or URI.
1736 try:
1740 try:
1737 # We don't pass sub-options in, so no need to pass ui instance.
1741 # We don't pass sub-options in, so no need to pass ui instance.
1738 return path(None, None, rawloc=name)
1742 return path(None, None, rawloc=name)
1739 except ValueError:
1743 except ValueError:
1740 raise error.RepoError(_('repository %s does not exist') %
1744 raise error.RepoError(_('repository %s does not exist') %
1741 name)
1745 name)
1742
1746
1743 _pathsuboptions = {}
1747 _pathsuboptions = {}
1744
1748
1745 def pathsuboption(option, attr):
1749 def pathsuboption(option, attr):
1746 """Decorator used to declare a path sub-option.
1750 """Decorator used to declare a path sub-option.
1747
1751
1748 Arguments are the sub-option name and the attribute it should set on
1752 Arguments are the sub-option name and the attribute it should set on
1749 ``path`` instances.
1753 ``path`` instances.
1750
1754
1751 The decorated function will receive as arguments a ``ui`` instance,
1755 The decorated function will receive as arguments a ``ui`` instance,
1752 ``path`` instance, and the string value of this option from the config.
1756 ``path`` instance, and the string value of this option from the config.
1753 The function should return the value that will be set on the ``path``
1757 The function should return the value that will be set on the ``path``
1754 instance.
1758 instance.
1755
1759
1756 This decorator can be used to perform additional verification of
1760 This decorator can be used to perform additional verification of
1757 sub-options and to change the type of sub-options.
1761 sub-options and to change the type of sub-options.
1758 """
1762 """
1759 def register(func):
1763 def register(func):
1760 _pathsuboptions[option] = (attr, func)
1764 _pathsuboptions[option] = (attr, func)
1761 return func
1765 return func
1762 return register
1766 return register
1763
1767
1764 @pathsuboption('pushurl', 'pushloc')
1768 @pathsuboption('pushurl', 'pushloc')
1765 def pushurlpathoption(ui, path, value):
1769 def pushurlpathoption(ui, path, value):
1766 u = util.url(value)
1770 u = util.url(value)
1767 # Actually require a URL.
1771 # Actually require a URL.
1768 if not u.scheme:
1772 if not u.scheme:
1769 ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
1773 ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
1770 return None
1774 return None
1771
1775
1772 # Don't support the #foo syntax in the push URL to declare branch to
1776 # Don't support the #foo syntax in the push URL to declare branch to
1773 # push.
1777 # push.
1774 if u.fragment:
1778 if u.fragment:
1775 ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
1779 ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
1776 'ignoring)\n') % path.name)
1780 'ignoring)\n') % path.name)
1777 u.fragment = None
1781 u.fragment = None
1778
1782
1779 return bytes(u)
1783 return bytes(u)
1780
1784
1781 @pathsuboption('pushrev', 'pushrev')
1785 @pathsuboption('pushrev', 'pushrev')
1782 def pushrevpathoption(ui, path, value):
1786 def pushrevpathoption(ui, path, value):
1783 return value
1787 return value
1784
1788
1785 class path(object):
1789 class path(object):
1786 """Represents an individual path and its configuration."""
1790 """Represents an individual path and its configuration."""
1787
1791
1788 def __init__(self, ui, name, rawloc=None, suboptions=None):
1792 def __init__(self, ui, name, rawloc=None, suboptions=None):
1789 """Construct a path from its config options.
1793 """Construct a path from its config options.
1790
1794
1791 ``ui`` is the ``ui`` instance the path is coming from.
1795 ``ui`` is the ``ui`` instance the path is coming from.
1792 ``name`` is the symbolic name of the path.
1796 ``name`` is the symbolic name of the path.
1793 ``rawloc`` is the raw location, as defined in the config.
1797 ``rawloc`` is the raw location, as defined in the config.
1794 ``pushloc`` is the raw locations pushes should be made to.
1798 ``pushloc`` is the raw locations pushes should be made to.
1795
1799
1796 If ``name`` is not defined, we require that the location be a) a local
1800 If ``name`` is not defined, we require that the location be a) a local
1797 filesystem path with a .hg directory or b) a URL. If not,
1801 filesystem path with a .hg directory or b) a URL. If not,
1798 ``ValueError`` is raised.
1802 ``ValueError`` is raised.
1799 """
1803 """
1800 if not rawloc:
1804 if not rawloc:
1801 raise ValueError('rawloc must be defined')
1805 raise ValueError('rawloc must be defined')
1802
1806
1803 # Locations may define branches via syntax <base>#<branch>.
1807 # Locations may define branches via syntax <base>#<branch>.
1804 u = util.url(rawloc)
1808 u = util.url(rawloc)
1805 branch = None
1809 branch = None
1806 if u.fragment:
1810 if u.fragment:
1807 branch = u.fragment
1811 branch = u.fragment
1808 u.fragment = None
1812 u.fragment = None
1809
1813
1810 self.url = u
1814 self.url = u
1811 self.branch = branch
1815 self.branch = branch
1812
1816
1813 self.name = name
1817 self.name = name
1814 self.rawloc = rawloc
1818 self.rawloc = rawloc
1815 self.loc = '%s' % u
1819 self.loc = '%s' % u
1816
1820
1817 # When given a raw location but not a symbolic name, validate the
1821 # When given a raw location but not a symbolic name, validate the
1818 # location is valid.
1822 # location is valid.
1819 if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
1823 if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
1820 raise ValueError('location is not a URL or path to a local '
1824 raise ValueError('location is not a URL or path to a local '
1821 'repo: %s' % rawloc)
1825 'repo: %s' % rawloc)
1822
1826
1823 suboptions = suboptions or {}
1827 suboptions = suboptions or {}
1824
1828
1825 # Now process the sub-options. If a sub-option is registered, its
1829 # Now process the sub-options. If a sub-option is registered, its
1826 # attribute will always be present. The value will be None if there
1830 # attribute will always be present. The value will be None if there
1827 # was no valid sub-option.
1831 # was no valid sub-option.
1828 for suboption, (attr, func) in _pathsuboptions.iteritems():
1832 for suboption, (attr, func) in _pathsuboptions.iteritems():
1829 if suboption not in suboptions:
1833 if suboption not in suboptions:
1830 setattr(self, attr, None)
1834 setattr(self, attr, None)
1831 continue
1835 continue
1832
1836
1833 value = func(ui, self, suboptions[suboption])
1837 value = func(ui, self, suboptions[suboption])
1834 setattr(self, attr, value)
1838 setattr(self, attr, value)
1835
1839
1836 def _isvalidlocalpath(self, path):
1840 def _isvalidlocalpath(self, path):
1837 """Returns True if the given path is a potentially valid repository.
1841 """Returns True if the given path is a potentially valid repository.
1838 This is its own function so that extensions can change the definition of
1842 This is its own function so that extensions can change the definition of
1839 'valid' in this case (like when pulling from a git repo into a hg
1843 'valid' in this case (like when pulling from a git repo into a hg
1840 one)."""
1844 one)."""
1841 return os.path.isdir(os.path.join(path, '.hg'))
1845 return os.path.isdir(os.path.join(path, '.hg'))
1842
1846
1843 @property
1847 @property
1844 def suboptions(self):
1848 def suboptions(self):
1845 """Return sub-options and their values for this path.
1849 """Return sub-options and their values for this path.
1846
1850
1847 This is intended to be used for presentation purposes.
1851 This is intended to be used for presentation purposes.
1848 """
1852 """
1849 d = {}
1853 d = {}
1850 for subopt, (attr, _func) in _pathsuboptions.iteritems():
1854 for subopt, (attr, _func) in _pathsuboptions.iteritems():
1851 value = getattr(self, attr)
1855 value = getattr(self, attr)
1852 if value is not None:
1856 if value is not None:
1853 d[subopt] = value
1857 d[subopt] = value
1854 return d
1858 return d
1855
1859
1856 # we instantiate one globally shared progress bar to avoid
1860 # we instantiate one globally shared progress bar to avoid
1857 # competing progress bars when multiple UI objects get created
1861 # competing progress bars when multiple UI objects get created
1858 _progresssingleton = None
1862 _progresssingleton = None
1859
1863
1860 def getprogbar(ui):
1864 def getprogbar(ui):
1861 global _progresssingleton
1865 global _progresssingleton
1862 if _progresssingleton is None:
1866 if _progresssingleton is None:
1863 # passing 'ui' object to the singleton is fishy,
1867 # passing 'ui' object to the singleton is fishy,
1864 # this is how the extension used to work but feel free to rework it.
1868 # this is how the extension used to work but feel free to rework it.
1865 _progresssingleton = progress.progbar(ui)
1869 _progresssingleton = progress.progbar(ui)
1866 return _progresssingleton
1870 return _progresssingleton
1867
1871
1868 def haveprogbar():
1872 def haveprogbar():
1869 return _progresssingleton is not None
1873 return _progresssingleton is not None
General Comments 0
You need to be logged in to leave comments. Login now